diff options
Diffstat (limited to 'drivers')
1196 files changed, 43965 insertions, 26320 deletions
diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c index c1ec087dca13..023172ca22ef 100644 --- a/drivers/accessibility/speakup/speakup_acntpc.c +++ b/drivers/accessibility/speakup/speakup_acntpc.c @@ -247,7 +247,7 @@ static void synth_flush(struct spk_synth *synth) static int synth_probe(struct spk_synth *synth) { unsigned int port_val = 0; - int i = 0; + int i; pr_info("Probing for %s.\n", synth->long_name); if (port_forced) { diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c index 92838d3ae9eb..a9dd5c45d237 100644 --- a/drivers/accessibility/speakup/speakup_dtlk.c +++ b/drivers/accessibility/speakup/speakup_dtlk.c @@ -316,7 +316,7 @@ static struct synth_settings *synth_interrogate(struct spk_synth *synth) static int synth_probe(struct spk_synth *synth) { unsigned int port_val = 0; - int i = 0; + int i; struct synth_settings *sp; pr_info("Probing for DoubleTalk.\n"); diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c index 311f4aa0be22..1618be87bff1 100644 --- a/drivers/accessibility/speakup/speakup_keypc.c +++ b/drivers/accessibility/speakup/speakup_keypc.c @@ -254,7 +254,7 @@ static void synth_flush(struct spk_synth *synth) static int synth_probe(struct spk_synth *synth) { unsigned int port_val = 0; - int i = 0; + int i; pr_info("Probing for %s.\n", synth->long_name); if (port_forced) { diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 705331e3d87a..ba45541b1f1f 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -520,6 +520,28 @@ config ACPI_CONFIGFS userspace. The configurable ACPI groups will be visible under /config/acpi, assuming configfs is mounted under /config. +config ACPI_PFRUT + tristate "ACPI Platform Firmware Runtime Update and Telemetry" + depends on 64BIT + help + This mechanism allows certain pieces of the platform firmware + to be updated on the fly while the system is running (runtime) + without the need to restart it, which is key in the cases when + the system needs to be available 100% of the time and it cannot + afford the downtime related to restarting it, or when the work + carried out by the system is particularly important, so it cannot + be interrupted, and it is not practical to wait until it is complete. + + The existing firmware code can be modified (driver update) or + extended by adding new code to the firmware (code injection). + + Besides, the telemetry driver allows user space to fetch telemetry + data from the firmware with the help of the Platform Firmware Runtime + Telemetry interface. + + To compile the drivers as modules, choose M here: + the modules will be called pfr_update and pfr_telemetry. + if ARM64 source "drivers/acpi/arm64/Kconfig" diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 08c2d985c57c..bb757148e7ba 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -9,7 +9,7 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT # ACPI Boot-Time Table Parsing # ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y) -tables.o: $(src)/../../include/$(subst $\",,$(CONFIG_ACPI_CUSTOM_DSDT_FILE)) ; +tables.o: $(src)/../../include/$(CONFIG_ACPI_CUSTOM_DSDT_FILE) ; endif @@ -103,6 +103,7 @@ obj-$(CONFIG_ACPI_CPPC_LIB) += cppc_acpi.o obj-$(CONFIG_ACPI_SPCR_TABLE) += spcr.o obj-$(CONFIG_ACPI_DEBUGGER_USER) += acpi_dbg.o obj-$(CONFIG_ACPI_PPTT) += pptt.o +obj-$(CONFIG_ACPI_PFRUT) += pfr_update.o pfr_telemetry.o # processor has its own "processor." module_param namespace processor-y := processor_driver.o diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 2b958b426b03..e7934ba79b02 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -102,6 +102,8 @@ static int fch_misc_setup(struct apd_private_data *pdata) resource_size(rentry->res)); break; } + if (!clk_data->base) + return -ENOMEM; acpi_dev_free_resource_list(&resource_list); diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c index 41e3ebd204ff..a12b55d81209 100644 --- a/drivers/acpi/acpi_pcc.c +++ b/drivers/acpi/acpi_pcc.c @@ -31,7 +31,7 @@ struct pcc_data { struct acpi_pcc_info ctx; }; -struct acpi_pcc_info pcc_ctx; +static struct acpi_pcc_info pcc_ctx; static void pcc_rx_callback(struct mbox_client *cl, void *m) { diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 1db3a2f81763..457e11d851b8 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -14,7 +14,7 @@ int early_acpi_osi_init(void); int acpi_osi_init(void); acpi_status acpi_os_initialize1(void); -int acpi_scan_init(void); +void acpi_scan_init(void); #ifdef CONFIG_PCI void acpi_pci_root_init(void); void acpi_pci_link_init(void); diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c new file mode 100644 index 000000000000..9abf350bd7a5 --- /dev/null +++ b/drivers/acpi/pfr_telemetry.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ACPI Platform Firmware Runtime Telemetry driver + * + * Copyright (C) 2021 Intel Corporation + * Author: Chen Yu <yu.c.chen@intel.com> + * + * This driver allows user space to fetch telemetry data from the + * firmware with the help of the Platform Firmware Runtime Telemetry + * interface. + */ +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/platform_device.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <linux/uio.h> +#include <linux/uuid.h> + +#include <uapi/linux/pfrut.h> + +#define PFRT_LOG_EXEC_IDX 0 +#define PFRT_LOG_HISTORY_IDX 1 + +#define PFRT_LOG_ERR 0 +#define PFRT_LOG_WARN 1 +#define PFRT_LOG_INFO 2 +#define PFRT_LOG_VERB 4 + +#define PFRT_FUNC_SET_LEV 1 +#define PFRT_FUNC_GET_LEV 2 +#define PFRT_FUNC_GET_DATA 3 + +#define PFRT_REVID_1 1 +#define PFRT_REVID_2 2 +#define PFRT_DEFAULT_REV_ID PFRT_REVID_1 + +enum log_index { + LOG_STATUS_IDX = 0, + LOG_EXT_STATUS_IDX = 1, + LOG_MAX_SZ_IDX = 2, + LOG_CHUNK1_LO_IDX = 3, + LOG_CHUNK1_HI_IDX = 4, + LOG_CHUNK1_SZ_IDX = 5, + LOG_CHUNK2_LO_IDX = 6, + LOG_CHUNK2_HI_IDX = 7, + LOG_CHUNK2_SZ_IDX = 8, + LOG_ROLLOVER_CNT_IDX = 9, + LOG_RESET_CNT_IDX = 10, + LOG_NR_IDX +}; + +struct pfrt_log_device { + int index; + struct pfrt_log_info info; + struct device *parent_dev; + struct miscdevice miscdev; +}; + +/* pfrt_guid is the parameter for _DSM method */ +static const guid_t pfrt_log_guid = + GUID_INIT(0x75191659, 0x8178, 0x4D9D, 0xB8, 0x8F, 0xAC, 0x5E, + 0x5E, 0x93, 0xE8, 0xBF); + +static DEFINE_IDA(pfrt_log_ida); + +static inline struct pfrt_log_device *to_pfrt_log_dev(struct file *file) +{ + return container_of(file->private_data, struct pfrt_log_device, miscdev); +} + +static int get_pfrt_log_data_info(struct pfrt_log_data_info *data_info, + struct pfrt_log_device *pfrt_log_dev) +{ + acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev); + union acpi_object *out_obj, in_obj, in_buf; + int ret = -EBUSY; + + memset(data_info, 0, sizeof(*data_info)); + memset(&in_obj, 0, sizeof(in_obj)); + memset(&in_buf, 0, sizeof(in_buf)); + in_obj.type = ACPI_TYPE_PACKAGE; + in_obj.package.count = 1; + in_obj.package.elements = &in_buf; + in_buf.type = ACPI_TYPE_INTEGER; + in_buf.integer.value = pfrt_log_dev->info.log_type; + + out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid, + pfrt_log_dev->info.log_revid, PFRT_FUNC_GET_DATA, + &in_obj, ACPI_TYPE_PACKAGE); + if (!out_obj) + return -EINVAL; + + if (out_obj->package.count < LOG_NR_IDX || + out_obj->package.elements[LOG_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_MAX_SZ_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_CHUNK1_LO_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_CHUNK1_HI_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_CHUNK1_SZ_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_CHUNK2_LO_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_CHUNK2_HI_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_CHUNK2_SZ_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_ROLLOVER_CNT_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_RESET_CNT_IDX].type != ACPI_TYPE_INTEGER) + goto free_acpi_buffer; + + data_info->status = out_obj->package.elements[LOG_STATUS_IDX].integer.value; + data_info->ext_status = + out_obj->package.elements[LOG_EXT_STATUS_IDX].integer.value; + if (data_info->status != DSM_SUCCEED) { + dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", data_info->status); + dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n", + data_info->ext_status); + goto free_acpi_buffer; + } + + data_info->max_data_size = + out_obj->package.elements[LOG_MAX_SZ_IDX].integer.value; + data_info->chunk1_addr_lo = + out_obj->package.elements[LOG_CHUNK1_LO_IDX].integer.value; + data_info->chunk1_addr_hi = + out_obj->package.elements[LOG_CHUNK1_HI_IDX].integer.value; + data_info->chunk1_size = + out_obj->package.elements[LOG_CHUNK1_SZ_IDX].integer.value; + data_info->chunk2_addr_lo = + out_obj->package.elements[LOG_CHUNK2_LO_IDX].integer.value; + data_info->chunk2_addr_hi = + out_obj->package.elements[LOG_CHUNK2_HI_IDX].integer.value; + data_info->chunk2_size = + out_obj->package.elements[LOG_CHUNK2_SZ_IDX].integer.value; + data_info->rollover_cnt = + out_obj->package.elements[LOG_ROLLOVER_CNT_IDX].integer.value; + data_info->reset_cnt = + out_obj->package.elements[LOG_RESET_CNT_IDX].integer.value; + + ret = 0; + +free_acpi_buffer: + kfree(out_obj); + + return ret; +} + +static int set_pfrt_log_level(int level, struct pfrt_log_device *pfrt_log_dev) +{ + acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev); + union acpi_object *out_obj, *obj, in_obj, in_buf; + enum pfru_dsm_status status, ext_status; + int ret = 0; + + memset(&in_obj, 0, sizeof(in_obj)); + memset(&in_buf, 0, sizeof(in_buf)); + in_obj.type = ACPI_TYPE_PACKAGE; + in_obj.package.count = 1; + in_obj.package.elements = &in_buf; + in_buf.type = ACPI_TYPE_INTEGER; + in_buf.integer.value = level; + + out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid, + pfrt_log_dev->info.log_revid, PFRT_FUNC_SET_LEV, + &in_obj, ACPI_TYPE_PACKAGE); + if (!out_obj) + return -EINVAL; + + obj = &out_obj->package.elements[0]; + status = obj->integer.value; + if (status != DSM_SUCCEED) { + obj = &out_obj->package.elements[1]; + ext_status = obj->integer.value; + dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", status); + dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n", ext_status); + ret = -EBUSY; + } + + kfree(out_obj); + + return ret; +} + +static int get_pfrt_log_level(struct pfrt_log_device *pfrt_log_dev) +{ + acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev); + union acpi_object *out_obj, *obj; + enum pfru_dsm_status status, ext_status; + int ret = -EBUSY; + + out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid, + pfrt_log_dev->info.log_revid, PFRT_FUNC_GET_LEV, + NULL, ACPI_TYPE_PACKAGE); + if (!out_obj) + return -EINVAL; + + obj = &out_obj->package.elements[0]; + if (obj->type != ACPI_TYPE_INTEGER) + goto free_acpi_buffer; + + status = obj->integer.value; + if (status != DSM_SUCCEED) { + obj = &out_obj->package.elements[1]; + ext_status = obj->integer.value; + dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", status); + dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n", ext_status); + goto free_acpi_buffer; + } + + obj = &out_obj->package.elements[2]; + if (obj->type != ACPI_TYPE_INTEGER) + goto free_acpi_buffer; + + ret = obj->integer.value; + +free_acpi_buffer: + kfree(out_obj); + + return ret; +} + +static int valid_log_level(u32 level) +{ + return level == PFRT_LOG_ERR || level == PFRT_LOG_WARN || + level == PFRT_LOG_INFO || level == PFRT_LOG_VERB; +} + +static int valid_log_type(u32 type) +{ + return type == PFRT_LOG_EXEC_IDX || type == PFRT_LOG_HISTORY_IDX; +} + +static inline int valid_log_revid(u32 id) +{ + return id == PFRT_REVID_1 || id == PFRT_REVID_2; +} + +static long pfrt_log_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct pfrt_log_device *pfrt_log_dev = to_pfrt_log_dev(file); + struct pfrt_log_data_info data_info; + struct pfrt_log_info info; + void __user *p; + int ret = 0; + + p = (void __user *)arg; + + switch (cmd) { + case PFRT_LOG_IOC_SET_INFO: + if (copy_from_user(&info, p, sizeof(info))) + return -EFAULT; + + if (valid_log_revid(info.log_revid)) + pfrt_log_dev->info.log_revid = info.log_revid; + + if (valid_log_level(info.log_level)) { + ret = set_pfrt_log_level(info.log_level, pfrt_log_dev); + if (ret < 0) + return ret; + + pfrt_log_dev->info.log_level = info.log_level; + } + + if (valid_log_type(info.log_type)) + pfrt_log_dev->info.log_type = info.log_type; + + return 0; + + case PFRT_LOG_IOC_GET_INFO: + info.log_level = get_pfrt_log_level(pfrt_log_dev); + if (ret < 0) + return ret; + + info.log_type = pfrt_log_dev->info.log_type; + info.log_revid = pfrt_log_dev->info.log_revid; + if (copy_to_user(p, &info, sizeof(info))) + return -EFAULT; + + return 0; + + case PFRT_LOG_IOC_GET_DATA_INFO: + ret = get_pfrt_log_data_info(&data_info, pfrt_log_dev); + if (ret) + return ret; + + if (copy_to_user(p, &data_info, sizeof(struct pfrt_log_data_info))) + return -EFAULT; + + return 0; + + default: + return -ENOTTY; + } +} + +static int +pfrt_log_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct pfrt_log_device *pfrt_log_dev; + struct pfrt_log_data_info info; + unsigned long psize, vsize; + phys_addr_t base_addr; + int ret; + + if (vma->vm_flags & VM_WRITE) + return -EROFS; + + /* changing from read to write with mprotect is not allowed */ + vma->vm_flags &= ~VM_MAYWRITE; + + pfrt_log_dev = to_pfrt_log_dev(file); + + ret = get_pfrt_log_data_info(&info, pfrt_log_dev); + if (ret) + return ret; + + base_addr = (phys_addr_t)((info.chunk2_addr_hi << 32) | info.chunk2_addr_lo); + /* pfrt update has not been launched yet */ + if (!base_addr) + return -ENODEV; + + psize = info.max_data_size; + /* base address and total buffer size must be page aligned */ + if (!PAGE_ALIGNED(base_addr) || !PAGE_ALIGNED(psize)) + return -ENODEV; + + vsize = vma->vm_end - vma->vm_start; + if (vsize > psize) + return -EINVAL; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (io_remap_pfn_range(vma, vma->vm_start, PFN_DOWN(base_addr), + vsize, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +static const struct file_operations acpi_pfrt_log_fops = { + .owner = THIS_MODULE, + .mmap = pfrt_log_mmap, + .unlocked_ioctl = pfrt_log_ioctl, + .llseek = noop_llseek, +}; + +static int acpi_pfrt_log_remove(struct platform_device *pdev) +{ + struct pfrt_log_device *pfrt_log_dev = platform_get_drvdata(pdev); + + misc_deregister(&pfrt_log_dev->miscdev); + + return 0; +} + +static void pfrt_log_put_idx(void *data) +{ + struct pfrt_log_device *pfrt_log_dev = data; + + ida_free(&pfrt_log_ida, pfrt_log_dev->index); +} + +static int acpi_pfrt_log_probe(struct platform_device *pdev) +{ + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + struct pfrt_log_device *pfrt_log_dev; + int ret; + + if (!acpi_has_method(handle, "_DSM")) { + dev_dbg(&pdev->dev, "Missing _DSM\n"); + return -ENODEV; + } + + pfrt_log_dev = devm_kzalloc(&pdev->dev, sizeof(*pfrt_log_dev), GFP_KERNEL); + if (!pfrt_log_dev) + return -ENOMEM; + + ret = ida_alloc(&pfrt_log_ida, GFP_KERNEL); + if (ret < 0) + return ret; + + pfrt_log_dev->index = ret; + ret = devm_add_action_or_reset(&pdev->dev, pfrt_log_put_idx, pfrt_log_dev); + if (ret) + return ret; + + pfrt_log_dev->info.log_revid = PFRT_DEFAULT_REV_ID; + pfrt_log_dev->parent_dev = &pdev->dev; + + pfrt_log_dev->miscdev.minor = MISC_DYNAMIC_MINOR; + pfrt_log_dev->miscdev.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "pfrt%d", + pfrt_log_dev->index); + if (!pfrt_log_dev->miscdev.name) + return -ENOMEM; + + pfrt_log_dev->miscdev.nodename = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "acpi_pfr_telemetry%d", + pfrt_log_dev->index); + if (!pfrt_log_dev->miscdev.nodename) + return -ENOMEM; + + pfrt_log_dev->miscdev.fops = &acpi_pfrt_log_fops; + pfrt_log_dev->miscdev.parent = &pdev->dev; + + ret = misc_register(&pfrt_log_dev->miscdev); + if (ret) + return ret; + + platform_set_drvdata(pdev, pfrt_log_dev); + + return 0; +} + +static const struct acpi_device_id acpi_pfrt_log_ids[] = { + {"INTC1081"}, + {} +}; +MODULE_DEVICE_TABLE(acpi, acpi_pfrt_log_ids); + +static struct platform_driver acpi_pfrt_log_driver = { + .driver = { + .name = "pfr_telemetry", + .acpi_match_table = acpi_pfrt_log_ids, + }, + .probe = acpi_pfrt_log_probe, + .remove = acpi_pfrt_log_remove, +}; +module_platform_driver(acpi_pfrt_log_driver); + +MODULE_DESCRIPTION("Platform Firmware Runtime Update Telemetry driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c new file mode 100644 index 000000000000..6bb0b778b5da --- /dev/null +++ b/drivers/acpi/pfr_update.c @@ -0,0 +1,575 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ACPI Platform Firmware Runtime Update Device driver + * + * Copyright (C) 2021 Intel Corporation + * Author: Chen Yu <yu.c.chen@intel.com> + * + * pfr_update driver is used for Platform Firmware Runtime + * Update, which includes the code injection and driver update. + */ +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/efi.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <linux/uio.h> +#include <linux/uuid.h> + +#include <uapi/linux/pfrut.h> + +#define PFRU_FUNC_STANDARD_QUERY 0 +#define PFRU_FUNC_QUERY_UPDATE_CAP 1 +#define PFRU_FUNC_QUERY_BUF 2 +#define PFRU_FUNC_START 3 + +#define PFRU_CODE_INJECT_TYPE 1 +#define PFRU_DRIVER_UPDATE_TYPE 2 + +#define PFRU_REVID_1 1 +#define PFRU_REVID_2 2 +#define PFRU_DEFAULT_REV_ID PFRU_REVID_1 + +enum cap_index { + CAP_STATUS_IDX = 0, + CAP_UPDATE_IDX = 1, + CAP_CODE_TYPE_IDX = 2, + CAP_FW_VER_IDX = 3, + CAP_CODE_RT_VER_IDX = 4, + CAP_DRV_TYPE_IDX = 5, + CAP_DRV_RT_VER_IDX = 6, + CAP_DRV_SVN_IDX = 7, + CAP_PLAT_ID_IDX = 8, + CAP_OEM_ID_IDX = 9, + CAP_OEM_INFO_IDX = 10, + CAP_NR_IDX +}; + +enum buf_index { + BUF_STATUS_IDX = 0, + BUF_EXT_STATUS_IDX = 1, + BUF_ADDR_LOW_IDX = 2, + BUF_ADDR_HI_IDX = 3, + BUF_SIZE_IDX = 4, + BUF_NR_IDX +}; + +enum update_index { + UPDATE_STATUS_IDX = 0, + UPDATE_EXT_STATUS_IDX = 1, + UPDATE_AUTH_TIME_LOW_IDX = 2, + UPDATE_AUTH_TIME_HI_IDX = 3, + UPDATE_EXEC_TIME_LOW_IDX = 4, + UPDATE_EXEC_TIME_HI_IDX = 5, + UPDATE_NR_IDX +}; + +enum pfru_start_action { + START_STAGE = 0, + START_ACTIVATE = 1, + START_STAGE_ACTIVATE = 2, +}; + +struct pfru_device { + u32 rev_id, index; + struct device *parent_dev; + struct miscdevice miscdev; +}; + +static DEFINE_IDA(pfru_ida); + +/* + * Manual reference: + * https://uefi.org/sites/default/files/resources/Intel_MM_OS_Interface_Spec_Rev100.pdf + * + * pfru_guid is the parameter for _DSM method + */ +static const guid_t pfru_guid = + GUID_INIT(0xECF9533B, 0x4A3C, 0x4E89, 0x93, 0x9E, 0xC7, 0x71, + 0x12, 0x60, 0x1C, 0x6D); + +/* pfru_code_inj_guid is the UUID to identify code injection EFI capsule file */ +static const guid_t pfru_code_inj_guid = + GUID_INIT(0xB2F84B79, 0x7B6E, 0x4E45, 0x88, 0x5F, 0x3F, 0xB9, + 0xBB, 0x18, 0x54, 0x02); + +/* pfru_drv_update_guid is the UUID to identify driver update EFI capsule file */ +static const guid_t pfru_drv_update_guid = + GUID_INIT(0x4569DD8C, 0x75F1, 0x429A, 0xA3, 0xD6, 0x24, 0xDE, + 0x80, 0x97, 0xA0, 0xDF); + +static inline int pfru_valid_revid(u32 id) +{ + return id == PFRU_REVID_1 || id == PFRU_REVID_2; +} + +static inline struct pfru_device *to_pfru_dev(struct file *file) +{ + return container_of(file->private_data, struct pfru_device, miscdev); +} + +static int query_capability(struct pfru_update_cap_info *cap_hdr, + struct pfru_device *pfru_dev) +{ + acpi_handle handle = ACPI_HANDLE(pfru_dev->parent_dev); + union acpi_object *out_obj; + int ret = -EINVAL; + + out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid, + pfru_dev->rev_id, + PFRU_FUNC_QUERY_UPDATE_CAP, + NULL, ACPI_TYPE_PACKAGE); + if (!out_obj) + return ret; + + if (out_obj->package.count < CAP_NR_IDX || + out_obj->package.elements[CAP_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[CAP_UPDATE_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[CAP_CODE_TYPE_IDX].type != ACPI_TYPE_BUFFER || + out_obj->package.elements[CAP_FW_VER_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[CAP_CODE_RT_VER_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[CAP_DRV_TYPE_IDX].type != ACPI_TYPE_BUFFER || + out_obj->package.elements[CAP_DRV_RT_VER_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[CAP_DRV_SVN_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[CAP_PLAT_ID_IDX].type != ACPI_TYPE_BUFFER || + out_obj->package.elements[CAP_OEM_ID_IDX].type != ACPI_TYPE_BUFFER || + out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER) + goto free_acpi_buffer; + + cap_hdr->status = out_obj->package.elements[CAP_STATUS_IDX].integer.value; + if (cap_hdr->status != DSM_SUCCEED) { + ret = -EBUSY; + dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", cap_hdr->status); + goto free_acpi_buffer; + } + + cap_hdr->update_cap = out_obj->package.elements[CAP_UPDATE_IDX].integer.value; + memcpy(&cap_hdr->code_type, + out_obj->package.elements[CAP_CODE_TYPE_IDX].buffer.pointer, + out_obj->package.elements[CAP_CODE_TYPE_IDX].buffer.length); + cap_hdr->fw_version = + out_obj->package.elements[CAP_FW_VER_IDX].integer.value; + cap_hdr->code_rt_version = + out_obj->package.elements[CAP_CODE_RT_VER_IDX].integer.value; + memcpy(&cap_hdr->drv_type, + out_obj->package.elements[CAP_DRV_TYPE_IDX].buffer.pointer, + out_obj->package.elements[CAP_DRV_TYPE_IDX].buffer.length); + cap_hdr->drv_rt_version = + out_obj->package.elements[CAP_DRV_RT_VER_IDX].integer.value; + cap_hdr->drv_svn = + out_obj->package.elements[CAP_DRV_SVN_IDX].integer.value; + memcpy(&cap_hdr->platform_id, + out_obj->package.elements[CAP_PLAT_ID_IDX].buffer.pointer, + out_obj->package.elements[CAP_PLAT_ID_IDX].buffer.length); + memcpy(&cap_hdr->oem_id, + out_obj->package.elements[CAP_OEM_ID_IDX].buffer.pointer, + out_obj->package.elements[CAP_OEM_ID_IDX].buffer.length); + cap_hdr->oem_info_len = + out_obj->package.elements[CAP_OEM_INFO_IDX].buffer.length; + + ret = 0; + +free_acpi_buffer: + kfree(out_obj); + + return ret; +} + +static int query_buffer(struct pfru_com_buf_info *info, + struct pfru_device *pfru_dev) +{ + acpi_handle handle = ACPI_HANDLE(pfru_dev->parent_dev); + union acpi_object *out_obj; + int ret = -EINVAL; + + out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid, + pfru_dev->rev_id, PFRU_FUNC_QUERY_BUF, + NULL, ACPI_TYPE_PACKAGE); + if (!out_obj) + return ret; + + if (out_obj->package.count < BUF_NR_IDX || + out_obj->package.elements[BUF_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[BUF_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[BUF_ADDR_LOW_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[BUF_ADDR_HI_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER) + goto free_acpi_buffer; + + info->status = out_obj->package.elements[BUF_STATUS_IDX].integer.value; + info->ext_status = + out_obj->package.elements[BUF_EXT_STATUS_IDX].integer.value; + if (info->status != DSM_SUCCEED) { + ret = -EBUSY; + dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", info->status); + dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n", info->ext_status); + + goto free_acpi_buffer; + } + + info->addr_lo = + out_obj->package.elements[BUF_ADDR_LOW_IDX].integer.value; + info->addr_hi = + out_obj->package.elements[BUF_ADDR_HI_IDX].integer.value; + info->buf_size = out_obj->package.elements[BUF_SIZE_IDX].integer.value; + + ret = 0; + +free_acpi_buffer: + kfree(out_obj); + + return ret; +} + +static int get_image_type(const struct efi_manage_capsule_image_header *img_hdr, + struct pfru_device *pfru_dev) +{ + const efi_guid_t *image_type_id = &img_hdr->image_type_id; + + /* check whether this is a code injection or driver update */ + if (guid_equal(image_type_id, &pfru_code_inj_guid)) + return PFRU_CODE_INJECT_TYPE; + + if (guid_equal(image_type_id, &pfru_drv_update_guid)) + return PFRU_DRIVER_UPDATE_TYPE; + + return -EINVAL; +} + +static int adjust_efi_size(const struct efi_manage_capsule_image_header *img_hdr, + int size) +{ + /* + * The (u64 hw_ins) was introduced in UEFI spec version 2, + * and (u64 capsule_support) was introduced in version 3. + * The size needs to be adjusted accordingly. That is to + * say, version 1 should subtract the size of hw_ins+capsule_support, + * and version 2 should sbstract the size of capsule_support. + */ + size += sizeof(struct efi_manage_capsule_image_header); + switch (img_hdr->ver) { + case 1: + return size - 2 * sizeof(u64); + + case 2: + return size - sizeof(u64); + + default: + /* only support version 1 and 2 */ + return -EINVAL; + } +} + +static bool applicable_image(const void *data, struct pfru_update_cap_info *cap, + struct pfru_device *pfru_dev) +{ + struct pfru_payload_hdr *payload_hdr; + const efi_capsule_header_t *cap_hdr = data; + const struct efi_manage_capsule_header *m_hdr; + const struct efi_manage_capsule_image_header *m_img_hdr; + const struct efi_image_auth *auth; + int type, size; + + /* + * If the code in the capsule is older than the current + * firmware code, the update will be rejected by the firmware, + * so check the version of it upfront without engaging the + * Management Mode update mechanism which may be costly. + */ + size = cap_hdr->headersize; + m_hdr = data + size; + /* + * Current data structure size plus variable array indicated + * by number of (emb_drv_cnt + payload_cnt) + */ + size += offsetof(struct efi_manage_capsule_header, offset_list) + + (m_hdr->emb_drv_cnt + m_hdr->payload_cnt) * sizeof(u64); + m_img_hdr = data + size; + + type = get_image_type(m_img_hdr, pfru_dev); + if (type < 0) + return false; + + size = adjust_efi_size(m_img_hdr, size); + if (size < 0) + return false; + + auth = data + size; + size += sizeof(u64) + auth->auth_info.hdr.len; + payload_hdr = (struct pfru_payload_hdr *)(data + size); + + /* finally compare the version */ + if (type == PFRU_CODE_INJECT_TYPE) + return payload_hdr->rt_ver >= cap->code_rt_version; + + return payload_hdr->rt_ver >= cap->drv_rt_version; +} + +static void print_update_debug_info(struct pfru_updated_result *result, + struct pfru_device *pfru_dev) +{ + dev_dbg(pfru_dev->parent_dev, "Update result:\n"); + dev_dbg(pfru_dev->parent_dev, "Authentication Time Low:%lld\n", + result->low_auth_time); + dev_dbg(pfru_dev->parent_dev, "Authentication Time High:%lld\n", + result->high_auth_time); + dev_dbg(pfru_dev->parent_dev, "Execution Time Low:%lld\n", + result->low_exec_time); + dev_dbg(pfru_dev->parent_dev, "Execution Time High:%lld\n", + result->high_exec_time); +} + +static int start_update(int action, struct pfru_device *pfru_dev) +{ + union acpi_object *out_obj, in_obj, in_buf; + struct pfru_updated_result update_result; + acpi_handle handle; + int ret = -EINVAL; + + memset(&in_obj, 0, sizeof(in_obj)); + memset(&in_buf, 0, sizeof(in_buf)); + in_obj.type = ACPI_TYPE_PACKAGE; + in_obj.package.count = 1; + in_obj.package.elements = &in_buf; + in_buf.type = ACPI_TYPE_INTEGER; + in_buf.integer.value = action; + + handle = ACPI_HANDLE(pfru_dev->parent_dev); + out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid, + pfru_dev->rev_id, PFRU_FUNC_START, + &in_obj, ACPI_TYPE_PACKAGE); + if (!out_obj) + return ret; + + if (out_obj->package.count < UPDATE_NR_IDX || + out_obj->package.elements[UPDATE_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[UPDATE_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER) + goto free_acpi_buffer; + + update_result.status = + out_obj->package.elements[UPDATE_STATUS_IDX].integer.value; + update_result.ext_status = + out_obj->package.elements[UPDATE_EXT_STATUS_IDX].integer.value; + + if (update_result.status != DSM_SUCCEED) { + ret = -EBUSY; + dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", update_result.status); + dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n", + update_result.ext_status); + + goto free_acpi_buffer; + } + + update_result.low_auth_time = + out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].integer.value; + update_result.high_auth_time = + out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].integer.value; + update_result.low_exec_time = + out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].integer.value; + update_result.high_exec_time = + out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].integer.value; + + print_update_debug_info(&update_result, pfru_dev); + ret = 0; + +free_acpi_buffer: + kfree(out_obj); + + return ret; +} + +static long pfru_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct pfru_update_cap_info cap_hdr; + struct pfru_device *pfru_dev = to_pfru_dev(file); + void __user *p = (void __user *)arg; + u32 rev; + int ret; + + switch (cmd) { + case PFRU_IOC_QUERY_CAP: + ret = query_capability(&cap_hdr, pfru_dev); + if (ret) + return ret; + + if (copy_to_user(p, &cap_hdr, sizeof(cap_hdr))) + return -EFAULT; + + return 0; + + case PFRU_IOC_SET_REV: + if (copy_from_user(&rev, p, sizeof(rev))) + return -EFAULT; + + if (!pfru_valid_revid(rev)) + return -EINVAL; + + pfru_dev->rev_id = rev; + + return 0; + + case PFRU_IOC_STAGE: + return start_update(START_STAGE, pfru_dev); + + case PFRU_IOC_ACTIVATE: + return start_update(START_ACTIVATE, pfru_dev); + + case PFRU_IOC_STAGE_ACTIVATE: + return start_update(START_STAGE_ACTIVATE, pfru_dev); + + default: + return -ENOTTY; + } +} + +static ssize_t pfru_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct pfru_device *pfru_dev = to_pfru_dev(file); + struct pfru_update_cap_info cap; + struct pfru_com_buf_info buf_info; + phys_addr_t phy_addr; + struct iov_iter iter; + struct iovec iov; + char *buf_ptr; + int ret; + + ret = query_buffer(&buf_info, pfru_dev); + if (ret) + return ret; + + if (len > buf_info.buf_size) + return -EINVAL; + + iov.iov_base = (void __user *)buf; + iov.iov_len = len; + iov_iter_init(&iter, WRITE, &iov, 1, len); + + /* map the communication buffer */ + phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo); + buf_ptr = memremap(phy_addr, buf_info.buf_size, MEMREMAP_WB); + if (!buf_ptr) + return -ENOMEM; + + if (!copy_from_iter_full(buf_ptr, len, &iter)) { + ret = -EINVAL; + goto unmap; + } + + /* check if the capsule header has a valid version number */ + ret = query_capability(&cap, pfru_dev); + if (ret) + goto unmap; + + if (!applicable_image(buf_ptr, &cap, pfru_dev)) + ret = -EINVAL; + +unmap: + memunmap(buf_ptr); + + return ret ?: len; +} + +static const struct file_operations acpi_pfru_fops = { + .owner = THIS_MODULE, + .write = pfru_write, + .unlocked_ioctl = pfru_ioctl, + .llseek = noop_llseek, +}; + +static int acpi_pfru_remove(struct platform_device *pdev) +{ + struct pfru_device *pfru_dev = platform_get_drvdata(pdev); + + misc_deregister(&pfru_dev->miscdev); + + return 0; +} + +static void pfru_put_idx(void *data) +{ + struct pfru_device *pfru_dev = data; + + ida_free(&pfru_ida, pfru_dev->index); +} + +static int acpi_pfru_probe(struct platform_device *pdev) +{ + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + struct pfru_device *pfru_dev; + int ret; + + if (!acpi_has_method(handle, "_DSM")) { + dev_dbg(&pdev->dev, "Missing _DSM\n"); + return -ENODEV; + } + + pfru_dev = devm_kzalloc(&pdev->dev, sizeof(*pfru_dev), GFP_KERNEL); + if (!pfru_dev) + return -ENOMEM; + + ret = ida_alloc(&pfru_ida, GFP_KERNEL); + if (ret < 0) + return ret; + + pfru_dev->index = ret; + ret = devm_add_action_or_reset(&pdev->dev, pfru_put_idx, pfru_dev); + if (ret) + return ret; + + pfru_dev->rev_id = PFRU_DEFAULT_REV_ID; + pfru_dev->parent_dev = &pdev->dev; + + pfru_dev->miscdev.minor = MISC_DYNAMIC_MINOR; + pfru_dev->miscdev.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "pfru%d", pfru_dev->index); + if (!pfru_dev->miscdev.name) + return -ENOMEM; + + pfru_dev->miscdev.nodename = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "acpi_pfr_update%d", pfru_dev->index); + if (!pfru_dev->miscdev.nodename) + return -ENOMEM; + + pfru_dev->miscdev.fops = &acpi_pfru_fops; + pfru_dev->miscdev.parent = &pdev->dev; + + ret = misc_register(&pfru_dev->miscdev); + if (ret) + return ret; + + platform_set_drvdata(pdev, pfru_dev); + + return 0; +} + +static const struct acpi_device_id acpi_pfru_ids[] = { + {"INTC1080"}, + {} +}; +MODULE_DEVICE_TABLE(acpi, acpi_pfru_ids); + +static struct platform_driver acpi_pfru_driver = { + .driver = { + .name = "pfr_update", + .acpi_match_table = acpi_pfru_ids, + }, + .probe = acpi_pfru_probe, + .remove = acpi_pfru_remove, +}; +module_platform_driver(acpi_pfru_driver); + +MODULE_DESCRIPTION("Platform Firmware Runtime Update device driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index c215bc8723d0..1331756d4cfc 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2502,42 +2502,33 @@ int acpi_bus_register_early_device(int type) } EXPORT_SYMBOL_GPL(acpi_bus_register_early_device); -static int acpi_bus_scan_fixed(void) +static void acpi_bus_scan_fixed(void) { - int result = 0; - - /* - * Enumerate all fixed-feature devices. - */ if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) { - struct acpi_device *device = NULL; - - result = acpi_add_single_object(&device, NULL, - ACPI_BUS_TYPE_POWER_BUTTON, false); - if (result) - return result; - - device->flags.match_driver = true; - result = device_attach(&device->dev); - if (result < 0) - return result; - - device_init_wakeup(&device->dev, true); + struct acpi_device *adev = NULL; + + acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_POWER_BUTTON, + false); + if (adev) { + adev->flags.match_driver = true; + if (device_attach(&adev->dev) >= 0) + device_init_wakeup(&adev->dev, true); + else + dev_dbg(&adev->dev, "No driver\n"); + } } if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) { - struct acpi_device *device = NULL; - - result = acpi_add_single_object(&device, NULL, - ACPI_BUS_TYPE_SLEEP_BUTTON, false); - if (result) - return result; - - device->flags.match_driver = true; - result = device_attach(&device->dev); + struct acpi_device *adev = NULL; + + acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_SLEEP_BUTTON, + false); + if (adev) { + adev->flags.match_driver = true; + if (device_attach(&adev->dev) < 0) + dev_dbg(&adev->dev, "No driver\n"); + } } - - return result < 0 ? result : 0; } static void __init acpi_get_spcr_uart_addr(void) @@ -2558,9 +2549,8 @@ static void __init acpi_get_spcr_uart_addr(void) static bool acpi_scan_initialized; -int __init acpi_scan_init(void) +void __init acpi_scan_init(void) { - int result; acpi_status status; struct acpi_table_stao *stao_ptr; @@ -2610,33 +2600,23 @@ int __init acpi_scan_init(void) /* * Enumerate devices in the ACPI namespace. */ - result = acpi_bus_scan(ACPI_ROOT_OBJECT); - if (result) - goto out; + if (acpi_bus_scan(ACPI_ROOT_OBJECT)) + goto unlock; acpi_root = acpi_fetch_acpi_dev(ACPI_ROOT_OBJECT); if (!acpi_root) - goto out; + goto unlock; /* Fixed feature devices do not exist on HW-reduced platform */ - if (!acpi_gbl_reduced_hardware) { - result = acpi_bus_scan_fixed(); - if (result) { - acpi_detach_data(acpi_root->handle, - acpi_scan_drop_device); - acpi_device_del(acpi_root); - acpi_bus_put_acpi_device(acpi_root); - goto out; - } - } + if (!acpi_gbl_reduced_hardware) + acpi_bus_scan_fixed(); acpi_turn_off_unused_power_resources(); acpi_scan_initialized = true; - out: +unlock: mutex_unlock(&acpi_scan_lock); - return result; } static struct acpi_probe_entry *ape; diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 25c2d0be953e..d589543875b8 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -107,8 +107,13 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console) pr_info("SPCR table version %d\n", table->header.revision); if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - switch (ACPI_ACCESS_BIT_WIDTH(( - table->serial_port.access_width))) { + u32 bit_width = table->serial_port.access_width; + + if (bit_width > ACPI_ACCESS_BIT_MAX) { + pr_err("Unacceptable wide SPCR Access Width. Defaulting to byte size\n"); + bit_width = ACPI_ACCESS_BIT_DEFAULT; + } + switch (ACPI_ACCESS_BIT_WIDTH((bit_width))) { default: pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n"); fallthrough; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index c75fb600740c..8351c5638880 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -69,7 +69,7 @@ #include <uapi/linux/android/binder.h> -#include <asm/cacheflush.h> +#include <linux/cacheflush.h> #include "binder_internal.h" #include "binder_trace.h" @@ -1608,15 +1608,21 @@ static void binder_cleanup_transaction(struct binder_transaction *t, /** * binder_get_object() - gets object and checks for valid metadata * @proc: binder_proc owning the buffer + * @u: sender's user pointer to base of buffer * @buffer: binder_buffer that we're parsing. * @offset: offset in the @buffer at which to validate an object. * @object: struct binder_object to read into * - * Return: If there's a valid metadata object at @offset in @buffer, the + * Copy the binder object at the given offset into @object. If @u is + * provided then the copy is from the sender's buffer. If not, then + * it is copied from the target's @buffer. + * + * Return: If there's a valid metadata object at @offset, the * size of that object. Otherwise, it returns zero. The object * is read into the struct binder_object pointed to by @object. */ static size_t binder_get_object(struct binder_proc *proc, + const void __user *u, struct binder_buffer *buffer, unsigned long offset, struct binder_object *object) @@ -1626,10 +1632,16 @@ static size_t binder_get_object(struct binder_proc *proc, size_t object_size = 0; read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); - if (offset > buffer->data_size || read_size < sizeof(*hdr) || - binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, - offset, read_size)) + if (offset > buffer->data_size || read_size < sizeof(*hdr)) return 0; + if (u) { + if (copy_from_user(object, u + offset, read_size)) + return 0; + } else { + if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, + offset, read_size)) + return 0; + } /* Ok, now see if we read a complete object. */ hdr = &object->hdr; @@ -1702,7 +1714,7 @@ static struct binder_buffer_object *binder_validate_ptr( b, buffer_offset, sizeof(object_offset))) return NULL; - object_size = binder_get_object(proc, b, object_offset, object); + object_size = binder_get_object(proc, NULL, b, object_offset, object); if (!object_size || object->hdr.type != BINDER_TYPE_PTR) return NULL; if (object_offsetp) @@ -1767,7 +1779,8 @@ static bool binder_validate_fixup(struct binder_proc *proc, unsigned long buffer_offset; struct binder_object last_object; struct binder_buffer_object *last_bbo; - size_t object_size = binder_get_object(proc, b, last_obj_offset, + size_t object_size = binder_get_object(proc, NULL, b, + last_obj_offset, &last_object); if (object_size != sizeof(*last_bbo)) return false; @@ -1882,7 +1895,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, buffer, buffer_offset, sizeof(object_offset))) - object_size = binder_get_object(proc, buffer, + object_size = binder_get_object(proc, NULL, buffer, object_offset, &object); if (object_size == 0) { pr_err("transaction release %d bad object at offset %lld, size %zd\n", @@ -1933,7 +1946,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, case BINDER_TYPE_FD: { /* * No need to close the file here since user-space - * closes it for for successfully delivered + * closes it for successfully delivered * transactions. For transactions that weren't * delivered, the new fd was never allocated so * there is no need to close and the fput on the @@ -2220,16 +2233,258 @@ err_fd_not_accepted: return ret; } -static int binder_translate_fd_array(struct binder_fd_array_object *fda, +/** + * struct binder_ptr_fixup - data to be fixed-up in target buffer + * @offset offset in target buffer to fixup + * @skip_size bytes to skip in copy (fixup will be written later) + * @fixup_data data to write at fixup offset + * @node list node + * + * This is used for the pointer fixup list (pf) which is created and consumed + * during binder_transaction() and is only accessed locally. No + * locking is necessary. + * + * The list is ordered by @offset. + */ +struct binder_ptr_fixup { + binder_size_t offset; + size_t skip_size; + binder_uintptr_t fixup_data; + struct list_head node; +}; + +/** + * struct binder_sg_copy - scatter-gather data to be copied + * @offset offset in target buffer + * @sender_uaddr user address in source buffer + * @length bytes to copy + * @node list node + * + * This is used for the sg copy list (sgc) which is created and consumed + * during binder_transaction() and is only accessed locally. No + * locking is necessary. + * + * The list is ordered by @offset. + */ +struct binder_sg_copy { + binder_size_t offset; + const void __user *sender_uaddr; + size_t length; + struct list_head node; +}; + +/** + * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data + * @alloc: binder_alloc associated with @buffer + * @buffer: binder buffer in target process + * @sgc_head: list_head of scatter-gather copy list + * @pf_head: list_head of pointer fixup list + * + * Processes all elements of @sgc_head, applying fixups from @pf_head + * and copying the scatter-gather data from the source process' user + * buffer to the target's buffer. It is expected that the list creation + * and processing all occurs during binder_transaction() so these lists + * are only accessed in local context. + * + * Return: 0=success, else -errno + */ +static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, + struct binder_buffer *buffer, + struct list_head *sgc_head, + struct list_head *pf_head) +{ + int ret = 0; + struct binder_sg_copy *sgc, *tmpsgc; + struct binder_ptr_fixup *pf = + list_first_entry_or_null(pf_head, struct binder_ptr_fixup, + node); + + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { + size_t bytes_copied = 0; + + while (bytes_copied < sgc->length) { + size_t copy_size; + size_t bytes_left = sgc->length - bytes_copied; + size_t offset = sgc->offset + bytes_copied; + + /* + * We copy up to the fixup (pointed to by pf) + */ + copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) + : bytes_left; + if (!ret && copy_size) + ret = binder_alloc_copy_user_to_buffer( + alloc, buffer, + offset, + sgc->sender_uaddr + bytes_copied, + copy_size); + bytes_copied += copy_size; + if (copy_size != bytes_left) { + BUG_ON(!pf); + /* we stopped at a fixup offset */ + if (pf->skip_size) { + /* + * we are just skipping. This is for + * BINDER_TYPE_FDA where the translated + * fds will be fixed up when we get + * to target context. + */ + bytes_copied += pf->skip_size; + } else { + /* apply the fixup indicated by pf */ + if (!ret) + ret = binder_alloc_copy_to_buffer( + alloc, buffer, + pf->offset, + &pf->fixup_data, + sizeof(pf->fixup_data)); + bytes_copied += sizeof(pf->fixup_data); + } + list_del(&pf->node); + kfree(pf); + pf = list_first_entry_or_null(pf_head, + struct binder_ptr_fixup, node); + } + } + list_del(&sgc->node); + kfree(sgc); + } + BUG_ON(!list_empty(pf_head)); + BUG_ON(!list_empty(sgc_head)); + + return ret > 0 ? -EINVAL : ret; +} + +/** + * binder_cleanup_deferred_txn_lists() - free specified lists + * @sgc_head: list_head of scatter-gather copy list + * @pf_head: list_head of pointer fixup list + * + * Called to clean up @sgc_head and @pf_head if there is an + * error. + */ +static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, + struct list_head *pf_head) +{ + struct binder_sg_copy *sgc, *tmpsgc; + struct binder_ptr_fixup *pf, *tmppf; + + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { + list_del(&sgc->node); + kfree(sgc); + } + list_for_each_entry_safe(pf, tmppf, pf_head, node) { + list_del(&pf->node); + kfree(pf); + } +} + +/** + * binder_defer_copy() - queue a scatter-gather buffer for copy + * @sgc_head: list_head of scatter-gather copy list + * @offset: binder buffer offset in target process + * @sender_uaddr: user address in source process + * @length: bytes to copy + * + * Specify a scatter-gather block to be copied. The actual copy must + * be deferred until all the needed fixups are identified and queued. + * Then the copy and fixups are done together so un-translated values + * from the source are never visible in the target buffer. + * + * We are guaranteed that repeated calls to this function will have + * monotonically increasing @offset values so the list will naturally + * be ordered. + * + * Return: 0=success, else -errno + */ +static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, + const void __user *sender_uaddr, size_t length) +{ + struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); + + if (!bc) + return -ENOMEM; + + bc->offset = offset; + bc->sender_uaddr = sender_uaddr; + bc->length = length; + INIT_LIST_HEAD(&bc->node); + + /* + * We are guaranteed that the deferred copies are in-order + * so just add to the tail. + */ + list_add_tail(&bc->node, sgc_head); + + return 0; +} + +/** + * binder_add_fixup() - queue a fixup to be applied to sg copy + * @pf_head: list_head of binder ptr fixup list + * @offset: binder buffer offset in target process + * @fixup: bytes to be copied for fixup + * @skip_size: bytes to skip when copying (fixup will be applied later) + * + * Add the specified fixup to a list ordered by @offset. When copying + * the scatter-gather buffers, the fixup will be copied instead of + * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup + * will be applied later (in target process context), so we just skip + * the bytes specified by @skip_size. If @skip_size is 0, we copy the + * value in @fixup. + * + * This function is called *mostly* in @offset order, but there are + * exceptions. Since out-of-order inserts are relatively uncommon, + * we insert the new element by searching backward from the tail of + * the list. + * + * Return: 0=success, else -errno + */ +static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, + binder_uintptr_t fixup, size_t skip_size) +{ + struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); + struct binder_ptr_fixup *tmppf; + + if (!pf) + return -ENOMEM; + + pf->offset = offset; + pf->fixup_data = fixup; + pf->skip_size = skip_size; + INIT_LIST_HEAD(&pf->node); + + /* Fixups are *mostly* added in-order, but there are some + * exceptions. Look backwards through list for insertion point. + */ + list_for_each_entry_reverse(tmppf, pf_head, node) { + if (tmppf->offset < pf->offset) { + list_add(&pf->node, &tmppf->node); + return 0; + } + } + /* + * if we get here, then the new offset is the lowest so + * insert at the head + */ + list_add(&pf->node, pf_head); + return 0; +} + +static int binder_translate_fd_array(struct list_head *pf_head, + struct binder_fd_array_object *fda, + const void __user *sender_ubuffer, struct binder_buffer_object *parent, + struct binder_buffer_object *sender_uparent, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to) { binder_size_t fdi, fd_buf_size; binder_size_t fda_offset; + const void __user *sender_ufda_base; struct binder_proc *proc = thread->proc; - struct binder_proc *target_proc = t->to_proc; + int ret; fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { @@ -2253,29 +2508,36 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, */ fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + fda->parent_offset; - if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { + sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + + fda->parent_offset; + + if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || + !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", proc->pid, thread->pid); return -EINVAL; } + ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); + if (ret) + return ret; + for (fdi = 0; fdi < fda->num_fds; fdi++) { u32 fd; - int ret; binder_size_t offset = fda_offset + fdi * sizeof(fd); + binder_size_t sender_uoffset = fdi * sizeof(fd); - ret = binder_alloc_copy_from_buffer(&target_proc->alloc, - &fd, t->buffer, - offset, sizeof(fd)); + ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd)); if (!ret) ret = binder_translate_fd(fd, offset, t, thread, in_reply_to); - if (ret < 0) - return ret; + if (ret) + return ret > 0 ? -EINVAL : ret; } return 0; } -static int binder_fixup_parent(struct binder_transaction *t, +static int binder_fixup_parent(struct list_head *pf_head, + struct binder_transaction *t, struct binder_thread *thread, struct binder_buffer_object *bp, binder_size_t off_start_offset, @@ -2321,14 +2583,7 @@ static int binder_fixup_parent(struct binder_transaction *t, } buffer_offset = bp->parent_offset + (uintptr_t)parent->buffer - (uintptr_t)b->user_data; - if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, - &bp->buffer, sizeof(bp->buffer))) { - binder_user_error("%d:%d got transaction with invalid parent offset\n", - proc->pid, thread->pid); - return -EINVAL; - } - - return 0; + return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); } /** @@ -2455,6 +2710,7 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t off_start_offset, off_end_offset; binder_size_t off_min; binder_size_t sg_buf_offset, sg_buf_end_offset; + binder_size_t user_offset = 0; struct binder_proc *target_proc = NULL; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; @@ -2469,6 +2725,12 @@ static void binder_transaction(struct binder_proc *proc, int t_debug_id = atomic_inc_return(&binder_last_id); char *secctx = NULL; u32 secctx_sz = 0; + struct list_head sgc_head; + struct list_head pf_head; + const void __user *user_buffer = (const void __user *) + (uintptr_t)tr->data.ptr.buffer; + INIT_LIST_HEAD(&sgc_head); + INIT_LIST_HEAD(&pf_head); e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; @@ -2782,19 +3044,6 @@ static void binder_transaction(struct binder_proc *proc, if (binder_alloc_copy_user_to_buffer( &target_proc->alloc, - t->buffer, 0, - (const void __user *) - (uintptr_t)tr->data.ptr.buffer, - tr->data_size)) { - binder_user_error("%d:%d got transaction with invalid data ptr\n", - proc->pid, thread->pid); - return_error = BR_FAILED_REPLY; - return_error_param = -EFAULT; - return_error_line = __LINE__; - goto err_copy_data_failed; - } - if (binder_alloc_copy_user_to_buffer( - &target_proc->alloc, t->buffer, ALIGN(tr->data_size, sizeof(void *)), (const void __user *) @@ -2837,6 +3086,7 @@ static void binder_transaction(struct binder_proc *proc, size_t object_size; struct binder_object object; binder_size_t object_offset; + binder_size_t copy_size; if (binder_alloc_copy_from_buffer(&target_proc->alloc, &object_offset, @@ -2848,8 +3098,27 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } - object_size = binder_get_object(target_proc, t->buffer, - object_offset, &object); + + /* + * Copy the source user buffer up to the next object + * that will be processed. + */ + copy_size = object_offset - user_offset; + if (copy_size && (user_offset > object_offset || + binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, user_offset, + user_buffer + user_offset, + copy_size))) { + binder_user_error("%d:%d got transaction with invalid data ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + object_size = binder_get_object(target_proc, user_buffer, + t->buffer, object_offset, &object); if (object_size == 0 || object_offset < off_min) { binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", proc->pid, thread->pid, @@ -2861,6 +3130,11 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } + /* + * Set offset to the next buffer fragment to be + * copied + */ + user_offset = object_offset + object_size; hdr = &object.hdr; off_min = object_offset + object_size; @@ -2923,6 +3197,8 @@ static void binder_transaction(struct binder_proc *proc, case BINDER_TYPE_FDA: { struct binder_object ptr_object; binder_size_t parent_offset; + struct binder_object user_object; + size_t user_parent_size; struct binder_fd_array_object *fda = to_binder_fd_array_object(hdr); size_t num_valid = (buffer_offset - off_start_offset) / @@ -2954,11 +3230,35 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_parent; } - ret = binder_translate_fd_array(fda, parent, t, thread, - in_reply_to); - if (ret < 0) { + /* + * We need to read the user version of the parent + * object to get the original user offset + */ + user_parent_size = + binder_get_object(proc, user_buffer, t->buffer, + parent_offset, &user_object); + if (user_parent_size != sizeof(user_object.bbo)) { + binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n", + proc->pid, thread->pid, + user_parent_size, + sizeof(user_object.bbo)); return_error = BR_FAILED_REPLY; - return_error_param = ret; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_parent; + } + ret = binder_translate_fd_array(&pf_head, fda, + user_buffer, parent, + &user_object.bbo, t, + thread, in_reply_to); + if (!ret) + ret = binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, + object_offset, + fda, sizeof(*fda)); + if (ret) { + return_error = BR_FAILED_REPLY; + return_error_param = ret > 0 ? -EINVAL : ret; return_error_line = __LINE__; goto err_translate_failed; } @@ -2980,19 +3280,14 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } - if (binder_alloc_copy_user_to_buffer( - &target_proc->alloc, - t->buffer, - sg_buf_offset, - (const void __user *) - (uintptr_t)bp->buffer, - bp->length)) { - binder_user_error("%d:%d got transaction with invalid offsets ptr\n", - proc->pid, thread->pid); - return_error_param = -EFAULT; + ret = binder_defer_copy(&sgc_head, sg_buf_offset, + (const void __user *)(uintptr_t)bp->buffer, + bp->length); + if (ret) { return_error = BR_FAILED_REPLY; + return_error_param = ret; return_error_line = __LINE__; - goto err_copy_data_failed; + goto err_translate_failed; } /* Fixup buffer pointer to target proc address space */ bp->buffer = (uintptr_t) @@ -3001,7 +3296,8 @@ static void binder_transaction(struct binder_proc *proc, num_valid = (buffer_offset - off_start_offset) / sizeof(binder_size_t); - ret = binder_fixup_parent(t, thread, bp, + ret = binder_fixup_parent(&pf_head, t, + thread, bp, off_start_offset, num_valid, last_fixup_obj_off, @@ -3028,6 +3324,30 @@ static void binder_transaction(struct binder_proc *proc, goto err_bad_object_type; } } + /* Done processing objects, copy the rest of the buffer */ + if (binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, user_offset, + user_buffer + user_offset, + tr->data_size - user_offset)) { + binder_user_error("%d:%d got transaction with invalid data ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + + ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, + &sgc_head, &pf_head); + if (ret) { + binder_user_error("%d:%d got transaction with invalid offsets ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_copy_data_failed; + } if (t->buffer->oneway_spam_suspect) tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; else @@ -3101,6 +3421,7 @@ err_bad_object_type: err_bad_offset: err_bad_parent: err_copy_data_failed: + binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); binder_free_txn_fixups(t); trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, NULL, t->buffer, diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index a7da8ea7b3ed..cb54631fd950 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -146,7 +146,7 @@ config SATA_AHCI_PLATFORM config AHCI_BRCM tristate "Broadcom AHCI SATA support" depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \ - ARCH_BCM_63XX + ARCH_BCM_63XX || COMPILE_TEST select SATA_HOST help This option enables support for the AHCI SATA3 controller found on @@ -156,7 +156,7 @@ config AHCI_BRCM config AHCI_DA850 tristate "DaVinci DA850 AHCI SATA support" - depends on ARCH_DAVINCI_DA850 + depends on ARCH_DAVINCI_DA850 || COMPILE_TEST select SATA_HOST help This option enables support for the DaVinci DA850 SoC's @@ -166,7 +166,7 @@ config AHCI_DA850 config AHCI_DM816 tristate "DaVinci DM816 AHCI SATA support" - depends on ARCH_OMAP2PLUS + depends on ARCH_OMAP2PLUS || COMPILE_TEST select SATA_HOST help This option enables support for the DaVinci DM816 SoC's @@ -206,7 +206,7 @@ config AHCI_CEVA config AHCI_MTK tristate "MediaTek AHCI SATA support" - depends on ARCH_MEDIATEK + depends on ARCH_MEDIATEK || COMPILE_TEST select MFD_SYSCON select SATA_HOST help @@ -217,7 +217,7 @@ config AHCI_MTK config AHCI_MVEBU tristate "Marvell EBU AHCI SATA support" - depends on ARCH_MVEBU + depends on ARCH_MVEBU || COMPILE_TEST select SATA_HOST help This option enables support for the Marvebu EBU SoC's @@ -236,7 +236,7 @@ config AHCI_OCTEON config AHCI_SUNXI tristate "Allwinner sunxi AHCI SATA support" - depends on ARCH_SUNXI + depends on ARCH_SUNXI || COMPILE_TEST select SATA_HOST help This option enables support for the Allwinner sunxi SoC's @@ -246,7 +246,7 @@ config AHCI_SUNXI config AHCI_TEGRA tristate "NVIDIA Tegra AHCI SATA support" - depends on ARCH_TEGRA + depends on ARCH_TEGRA || COMPILE_TEST select SATA_HOST help This option enables support for the NVIDIA Tegra SoC's @@ -256,7 +256,7 @@ config AHCI_TEGRA config AHCI_XGENE tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support" - depends on PHY_XGENE + depends on PHY_XGENE || COMPILE_TEST select SATA_HOST help This option enables support for APM X-Gene SoC SATA host controller. @@ -273,7 +273,7 @@ config AHCI_QORIQ config SATA_FSL tristate "Freescale 3.0Gbps SATA support" - depends on FSL_SOC + depends on FSL_SOC || COMPILE_TEST select SATA_HOST help This option enables support for Freescale 3.0Gbps SATA controller. @@ -294,7 +294,7 @@ config SATA_GEMINI config SATA_AHCI_SEATTLE tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support" - depends on ARCH_SEATTLE + depends on ARCH_SEATTLE || COMPILE_TEST select SATA_HOST help This option enables support for AMD Seattle SATA host controller. @@ -432,18 +432,6 @@ config SATA_DWC_OLD_DMA This option enables support for old device trees without the "dmas" property. -config SATA_DWC_DEBUG - bool "Debugging driver version" - depends on SATA_DWC - help - This option enables debugging output in the driver. - -config SATA_DWC_VDEBUG - bool "Verbose debug output" - depends on SATA_DWC_DEBUG - help - This option enables the taskfile dumping and NCQ debugging. - config SATA_HIGHBANK tristate "Calxeda Highbank SATA support" depends on ARCH_HIGHBANK || COMPILE_TEST @@ -611,7 +599,7 @@ config PATA_ATP867X config PATA_BK3710 tristate "Palmchip BK3710 PATA support" - depends on ARCH_DAVINCI + depends on ARCH_DAVINCI || COMPILE_TEST select PATA_TIMINGS help This option enables support for the integrated IDE controller on @@ -649,7 +637,7 @@ config PATA_CS5530 config PATA_CS5535 tristate "CS5535 PATA support (Experimental)" - depends on PCI && X86_32 + depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST)) help This option enables support for the NatSemi/AMD CS5535 companion chip used with the Geode processor family. @@ -697,7 +685,7 @@ config PATA_EP93XX config PATA_FTIDE010 tristate "Faraday Technology FTIDE010 PATA support" depends on OF - depends on ARM + depends on ARM || COMPILE_TEST depends on SATA_GEMINI help This option enables support for the Faraday FTIDE010 @@ -760,7 +748,7 @@ config PATA_ICSIDE config PATA_IMX tristate "PATA support for Freescale iMX" - depends on ARCH_MXC + depends on ARCH_MXC || COMPILE_TEST select PATA_TIMINGS help This option enables support for the PATA host available on Freescale @@ -981,7 +969,7 @@ config PATA_VIA config PATA_PXA tristate "PXA DMA-capable PATA support" - depends on ARCH_PXA + depends on ARCH_PXA || COMPILE_TEST help This option enables support for harddrive attached to PXA CPU's bus. @@ -1157,7 +1145,7 @@ config PATA_RZ1000 config PATA_SAMSUNG_CF tristate "Samsung SoC PATA support" - depends on SAMSUNG_DEV_IDE + depends on SAMSUNG_DEV_IDE || COMPILE_TEST select PATA_TIMINGS help This option enables basic support for Samsung's S3C/S5P board diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 2a04e8abd397..536d4cb8f08b 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -185,8 +185,6 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; unsigned int si, last_si = 0; - VPRINTK("ENTER\n"); - /* * Next, the S/G list. */ @@ -362,8 +360,6 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id struct ata_host *host; int n_ports, i, rc; - VPRINTK("ENTER\n"); - WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS); ata_print_version_once(&pdev->dev, DRV_VERSION); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 1e1167e725a4..ab5811ef5a53 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -51,6 +51,7 @@ enum board_ids { board_ahci, board_ahci_ign_iferr, board_ahci_mobile, + board_ahci_no_debounce_delay, board_ahci_nomsi, board_ahci_noncq, board_ahci_nosntf, @@ -141,6 +142,13 @@ static const struct ata_port_info ahci_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, + [board_ahci_no_debounce_delay] = { + .flags = AHCI_FLAG_COMMON, + .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, [board_ahci_nomsi] = { AHCI_HFLAGS (AHCI_HFLAG_NO_MSI), .flags = AHCI_FLAG_COMMON, @@ -437,6 +445,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { board_ahci_al }, /* AMD */ { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */ + { PCI_VDEVICE(AMD, 0x7801), board_ahci_no_debounce_delay }, /* AMD Hudson-2 (AHCI mode) */ { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */ { PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */ /* AMD is using RAID class only for ahci controllers */ @@ -684,7 +693,7 @@ static void ahci_pci_init_controller(struct ata_host *host) /* clear port IRQ */ tmp = readl(port_mmio + PORT_IRQ_STAT); - VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); + dev_dbg(&pdev->dev, "PORT_IRQ_STAT 0x%x\n", tmp); if (tmp) writel(tmp, port_mmio + PORT_IRQ_STAT); } @@ -700,8 +709,6 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, bool online; int rc; - DPRINTK("ENTER\n"); - hpriv->stop_engine(ap); rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), @@ -709,8 +716,6 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, hpriv->start_engine(ap); - DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); - /* vt8251 doesn't clear BSY on signature FIS reception, * request follow-up softreset. */ @@ -790,8 +795,6 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, bool online; int rc, i; - DPRINTK("ENTER\n"); - hpriv->stop_engine(ap); for (i = 0; i < 2; i++) { @@ -829,7 +832,6 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, if (online) *class = ahci_dev_classify(ap); - DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); return rc; } @@ -1476,7 +1478,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance) u32 irq_stat, irq_masked; unsigned int handled = 1; - VPRINTK("ENTER\n"); hpriv = host->private_data; mmio = hpriv->mmio; irq_stat = readl(mmio + HOST_IRQ_STAT); @@ -1493,7 +1494,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance) irq_stat = readl(mmio + HOST_IRQ_STAT); spin_unlock(&host->lock); } while (irq_stat); - VPRINTK("EXIT\n"); return IRQ_RETVAL(handled); } @@ -1657,7 +1657,7 @@ static ssize_t remapped_nvme_show(struct device *dev, struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; - return sprintf(buf, "%u\n", hpriv->remapped_nvme); + return sysfs_emit(buf, "%u\n", hpriv->remapped_nvme); } static DEVICE_ATTR_RO(remapped_nvme); @@ -1673,8 +1673,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) int n_ports, i, rc; int ahci_pci_bar = AHCI_PCI_BAR_STANDARD; - VPRINTK("ENTER\n"); - WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS); ata_print_version_once(&pdev->dev, DRV_VERSION); diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index 6e9c5ade4c2e..64dd8aa397d5 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -246,7 +246,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv) } static unsigned int brcm_ahci_read_id(struct ata_device *dev, - struct ata_taskfile *tf, u16 *id) + struct ata_taskfile *tf, __le16 *id) { struct ata_port *ap = dev->link->ap; struct ata_host *host = ap->host; @@ -333,7 +333,7 @@ static struct ata_port_operations ahci_brcm_platform_ops = { static const struct ata_port_info ahci_brcm_port_info = { .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM, - .link_flags = ATA_LFLAG_NO_DB_DELAY, + .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_brcm_platform_ops, diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c index e9c7c07fd84c..acf59f51b356 100644 --- a/drivers/ata/ahci_ceva.c +++ b/drivers/ata/ahci_ceva.c @@ -92,9 +92,8 @@ struct ceva_ahci_priv { }; static unsigned int ceva_ahci_read_id(struct ata_device *dev, - struct ata_taskfile *tf, u16 *id) + struct ata_taskfile *tf, __le16 *id) { - __le16 *__id = (__le16 *)id; u32 err_mask; err_mask = ata_do_dev_read_id(dev, tf, id); @@ -104,7 +103,7 @@ static unsigned int ceva_ahci_read_id(struct ata_device *dev, * Since CEVA controller does not support device sleep feature, we * need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data. */ - __id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8)); + id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8)); return 0; } diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 5b46fc9aeb4a..bf5b388bd4e0 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -103,8 +103,6 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class, int rc; bool ls1021a_workaround = (qoriq_priv->type == AHCI_LS1021A); - DPRINTK("ENTER\n"); - hpriv->stop_engine(ap); /* @@ -146,8 +144,6 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class, if (online) *class = ahci_dev_classify(ap); - - DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); return rc; } diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index dffc432b9d54..8e206379d699 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -193,7 +193,7 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) struct xgene_ahci_context *ctx = hpriv->plat_data; int rc = 0; u32 port_fbs; - void *port_mmio = ahci_port_base(ap); + void __iomem *port_mmio = ahci_port_base(ap); /* * Write the pmp value to PxFBS.DEV @@ -237,7 +237,7 @@ static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx) * does not support DEVSLP. */ static unsigned int xgene_ahci_read_id(struct ata_device *dev, - struct ata_taskfile *tf, u16 *id) + struct ata_taskfile *tf, __le16 *id) { u32 err_mask; @@ -454,7 +454,7 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class, int pmp = sata_srst_pmp(link); struct ata_port *ap = link->ap; u32 rc; - void *port_mmio = ahci_port_base(ap); + void __iomem *port_mmio = ahci_port_base(ap); u32 port_fbs; /* @@ -499,7 +499,7 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class, struct ata_port *ap = link->ap; struct ahci_host_priv *hpriv = ap->host->private_data; struct xgene_ahci_context *ctx = hpriv->plat_data; - void *port_mmio = ahci_port_base(ap); + void __iomem *port_mmio = ahci_port_base(ap); u32 port_fbs; u32 port_fbs_save; u32 retry = 1; @@ -588,8 +588,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance) void __iomem *mmio; u32 irq_stat, irq_masked; - VPRINTK("ENTER\n"); - hpriv = host->private_data; mmio = hpriv->mmio; @@ -612,8 +610,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance) spin_unlock(&host->lock); - VPRINTK("EXIT\n"); - return IRQ_RETVAL(rc); } diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 0b2fcf0d1d6c..27b0d903f91f 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -77,6 +77,7 @@ #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/dmi.h> +#include <trace/events/libata.h> #define DRV_NAME "ata_piix" #define DRV_VERSION "2.13" @@ -816,10 +817,15 @@ static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, static bool piix_irq_check(struct ata_port *ap) { + unsigned char host_stat; + if (unlikely(!ap->ioaddr.bmdma_addr)) return false; - return ap->ops->bmdma_status(ap) & ATA_DMA_INTR; + host_stat = ap->ops->bmdma_status(ap); + trace_ata_bmdma_status(ap, host_stat); + + return host_stat & ATA_DMA_INTR; } #ifdef CONFIG_PM_SLEEP @@ -1345,7 +1351,6 @@ static void piix_init_pcs(struct ata_host *host, new_pcs = pcs | map_db->port_enable; if (new_pcs != pcs) { - DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs); pci_write_config_word(pdev, ICH5_PCS, new_pcs); msleep(150); } @@ -1769,14 +1774,12 @@ static int __init piix_init(void) { int rc; - DPRINTK("pci_register_driver\n"); rc = pci_register_driver(&piix_pci_driver); if (rc) return rc; in_module_init = 0; - DPRINTK("done\n"); return 0; } diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index f76b8418e6fb..0ed484e04fd6 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1234,12 +1234,12 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap, /* clear SError */ tmp = readl(port_mmio + PORT_SCR_ERR); - VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); + dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp); writel(tmp, port_mmio + PORT_SCR_ERR); /* clear port IRQ */ tmp = readl(port_mmio + PORT_IRQ_STAT); - VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); + dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp); if (tmp) writel(tmp, port_mmio + PORT_IRQ_STAT); @@ -1270,10 +1270,10 @@ void ahci_init_controller(struct ata_host *host) } tmp = readl(mmio + HOST_CTL); - VPRINTK("HOST_CTL 0x%x\n", tmp); + dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp); writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); tmp = readl(mmio + HOST_CTL); - VPRINTK("HOST_CTL 0x%x\n", tmp); + dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp); } EXPORT_SYMBOL_GPL(ahci_init_controller); @@ -1300,7 +1300,7 @@ unsigned int ahci_dev_classify(struct ata_port *ap) tf.lbal = (tmp >> 8) & 0xff; tf.nsect = (tmp) & 0xff; - return ata_dev_classify(&tf); + return ata_port_classify(ap, &tf); } EXPORT_SYMBOL_GPL(ahci_dev_classify); @@ -1415,8 +1415,6 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class, bool fbs_disabled = false; int rc; - DPRINTK("ENTER\n"); - /* prepare for SRST (AHCI-1.1 10.4.1) */ rc = ahci_kick_engine(ap); if (rc && rc != -EOPNOTSUPP) @@ -1476,7 +1474,6 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class, if (fbs_disabled) ahci_enable_fbs(ap); - DPRINTK("EXIT, class=%u\n", *class); return 0; fail: @@ -1498,8 +1495,6 @@ static int ahci_softreset(struct ata_link *link, unsigned int *class, { int pmp = sata_srst_pmp(link); - DPRINTK("ENTER\n"); - return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); } EXPORT_SYMBOL_GPL(ahci_do_softreset); @@ -1529,8 +1524,6 @@ static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class, int rc; u32 irq_sts; - DPRINTK("ENTER\n"); - rc = ahci_do_softreset(link, class, pmp, deadline, ahci_bad_pmp_check_ready); @@ -1564,8 +1557,6 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, struct ata_taskfile tf; int rc; - DPRINTK("ENTER\n"); - hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ @@ -1581,7 +1572,6 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, if (*online) *class = ahci_dev_classify(ap); - DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); return rc; } EXPORT_SYMBOL_GPL(ahci_do_hardreset); @@ -1620,8 +1610,6 @@ static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; unsigned int si; - VPRINTK("ENTER\n"); - /* * Next, the S/G list. */ @@ -1695,7 +1683,6 @@ static void ahci_fbs_dec_intr(struct ata_port *ap) u32 fbs = readl(port_mmio + PORT_FBS); int retries = 3; - DPRINTK("ENTER\n"); BUG_ON(!pp->fbs_enabled); /* time to wait for DEC is not specified by AHCI spec, @@ -1924,8 +1911,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance) void __iomem *port_mmio = ahci_port_base(ap); u32 status; - VPRINTK("ENTER\n"); - status = readl(port_mmio + PORT_IRQ_STAT); writel(status, port_mmio + PORT_IRQ_STAT); @@ -1933,8 +1918,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance) ahci_handle_port_interrupt(ap, port_mmio, status); spin_unlock(ap->lock); - VPRINTK("EXIT\n"); - return IRQ_HANDLED; } @@ -1951,9 +1934,7 @@ u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked) ap = host->ports[i]; if (ap) { ahci_port_intr(ap); - VPRINTK("port %u\n", i); } else { - VPRINTK("port %u (no irq)\n", i); if (ata_ratelimit()) dev_warn(host->dev, "interrupt on disabled port %u\n", i); @@ -1974,8 +1955,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance) void __iomem *mmio; u32 irq_stat, irq_masked; - VPRINTK("ENTER\n"); - hpriv = host->private_data; mmio = hpriv->mmio; @@ -2003,8 +1982,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance) spin_unlock(&host->lock); - VPRINTK("EXIT\n"); - return IRQ_RETVAL(rc); } diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 0910441321f7..18296443ccba 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -579,11 +579,8 @@ int ahci_platform_init_host(struct platform_device *pdev, int i, irq, n_ports, rc; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - if (irq != -EPROBE_DEFER) - dev_err(dev, "no irq\n"); + if (irq < 0) return irq; - } if (!irq) return -EINVAL; @@ -642,13 +639,8 @@ int ahci_platform_init_host(struct platform_device *pdev, if (hpriv->cap & HOST_CAP_64) { rc = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (rc) { - rc = dma_coerce_mask_and_coherent(dev, - DMA_BIT_MASK(32)); - if (rc) { - dev_err(dev, "Failed to enable 64-bit DMA.\n"); - return rc; - } - dev_warn(dev, "Enable 32-bit DMA instead of 64-bit.\n"); + dev_err(dev, "Failed to enable 64-bit DMA.\n"); + return rc; } } diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 7a7d6642edcc..8cfa8c96bb13 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -402,7 +402,6 @@ EXPORT_SYMBOL_GPL(ata_acpi_stm); */ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf) { - struct ata_port *ap = dev->link->ap; acpi_status status; struct acpi_buffer output; union acpi_object *out_obj; @@ -418,10 +417,6 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf) output.length = ACPI_ALLOCATE_BUFFER; output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ - if (ata_msg_probe(ap)) - ata_dev_dbg(dev, "%s: ENTER: port#: %d\n", - __func__, ap->port_no); - /* _GTF has no input parameters */ status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_GTF", NULL, &output); @@ -437,11 +432,9 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf) } if (!output.length || !output.pointer) { - if (ata_msg_probe(ap)) - ata_dev_dbg(dev, "%s: Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n", - __func__, - (unsigned long long)output.length, - output.pointer); + ata_dev_dbg(dev, "Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n", + (unsigned long long)output.length, + output.pointer); rc = -EINVAL; goto out_free; } @@ -464,9 +457,8 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf) rc = out_obj->buffer.length / REGS_PER_GTF; if (gtf) { *gtf = (void *)out_obj->buffer.pointer; - if (ata_msg_probe(ap)) - ata_dev_dbg(dev, "%s: returning gtf=%p, gtf_count=%d\n", - __func__, *gtf, rc); + ata_dev_dbg(dev, "returning gtf=%p, gtf_count=%d\n", + *gtf, rc); } return rc; @@ -650,9 +642,7 @@ static int ata_acpi_run_tf(struct ata_device *dev, struct ata_taskfile *pptf = NULL; struct ata_taskfile tf, ptf, rtf; unsigned int err_mask; - const char *level; const char *descr; - char msg[60]; int rc; if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0) @@ -666,6 +656,8 @@ static int ata_acpi_run_tf(struct ata_device *dev, pptf = &ptf; } + descr = ata_get_cmd_name(tf.command); + if (!ata_acpi_filter_tf(dev, &tf, pptf)) { rtf = tf; err_mask = ata_exec_internal(dev, &rtf, NULL, @@ -673,40 +665,42 @@ static int ata_acpi_run_tf(struct ata_device *dev, switch (err_mask) { case 0: - level = KERN_DEBUG; - snprintf(msg, sizeof(msg), "succeeded"); + ata_dev_dbg(dev, + "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" + "(%s) succeeded\n", + tf.command, tf.feature, tf.nsect, tf.lbal, + tf.lbam, tf.lbah, tf.device, descr); rc = 1; break; case AC_ERR_DEV: - level = KERN_INFO; - snprintf(msg, sizeof(msg), - "rejected by device (Stat=0x%02x Err=0x%02x)", - rtf.command, rtf.feature); + ata_dev_info(dev, + "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" + "(%s) rejected by device (Stat=0x%02x Err=0x%02x)", + tf.command, tf.feature, tf.nsect, tf.lbal, + tf.lbam, tf.lbah, tf.device, descr, + rtf.command, rtf.feature); rc = 0; break; default: - level = KERN_ERR; - snprintf(msg, sizeof(msg), - "failed (Emask=0x%x Stat=0x%02x Err=0x%02x)", - err_mask, rtf.command, rtf.feature); + ata_dev_err(dev, + "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" + "(%s) failed (Emask=0x%x Stat=0x%02x Err=0x%02x)", + tf.command, tf.feature, tf.nsect, tf.lbal, + tf.lbam, tf.lbah, tf.device, descr, + err_mask, rtf.command, rtf.feature); rc = -EIO; break; } } else { - level = KERN_INFO; - snprintf(msg, sizeof(msg), "filtered out"); + ata_dev_info(dev, + "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" + "(%s) filtered out\n", + tf.command, tf.feature, tf.nsect, tf.lbal, + tf.lbam, tf.lbah, tf.device, descr); rc = 0; } - descr = ata_get_cmd_descript(tf.command); - - ata_dev_printk(dev, level, - "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n", - tf.command, tf.feature, tf.nsect, tf.lbal, - tf.lbam, tf.lbah, tf.device, - (descr ? descr : "unknown"), msg); - return rc; } @@ -776,9 +770,8 @@ static int ata_acpi_push_id(struct ata_device *dev) struct acpi_object_list input; union acpi_object in_params[1]; - if (ata_msg_probe(ap)) - ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n", - __func__, dev->devno, ap->port_no); + ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n", + __func__, dev->devno, ap->port_no); /* Give the drive Identify data to the drive via the _SDD method */ /* _SDD: set up input parameters */ diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index aba0c67d1bd6..67f88027680a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -764,9 +764,6 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, head = track % dev->heads; sect = (u32)block % dev->sectors + 1; - DPRINTK("block %u track %u cyl %u head %u sect %u\n", - (u32)block, track, cyl, head, sect); - /* Check whether the converted CHS can fit. Cylinder: 0-65535 Head: 0-15 @@ -1010,32 +1007,21 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf) * SEMB signature. This is worked around in * ata_dev_read_id(). */ - if ((tf->lbam == 0) && (tf->lbah == 0)) { - DPRINTK("found ATA device by sig\n"); + if (tf->lbam == 0 && tf->lbah == 0) return ATA_DEV_ATA; - } - if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { - DPRINTK("found ATAPI device by sig\n"); + if (tf->lbam == 0x14 && tf->lbah == 0xeb) return ATA_DEV_ATAPI; - } - if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { - DPRINTK("found PMP device by sig\n"); + if (tf->lbam == 0x69 && tf->lbah == 0x96) return ATA_DEV_PMP; - } - if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { - DPRINTK("found SEMB device by sig (could be ATA device)\n"); + if (tf->lbam == 0x3c && tf->lbah == 0xc3) return ATA_DEV_SEMB; - } - if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) { - DPRINTK("found ZAC device by sig\n"); + if (tf->lbam == 0xcd && tf->lbah == 0xab) return ATA_DEV_ZAC; - } - DPRINTK("unknown device\n"); return ATA_DEV_UNKNOWN; } EXPORT_SYMBOL_GPL(ata_dev_classify); @@ -1355,6 +1341,7 @@ static int ata_hpa_resize(struct ata_device *dev) /** * ata_dump_id - IDENTIFY DEVICE info debugging output + * @dev: device from which the information is fetched * @id: IDENTIFY DEVICE page to dump * * Dump selected 16-bit words from the given IDENTIFY DEVICE @@ -1364,32 +1351,14 @@ static int ata_hpa_resize(struct ata_device *dev) * caller. */ -static inline void ata_dump_id(const u16 *id) -{ - DPRINTK("49==0x%04x " - "53==0x%04x " - "63==0x%04x " - "64==0x%04x " - "75==0x%04x \n", - id[49], - id[53], - id[63], - id[64], - id[75]); - DPRINTK("80==0x%04x " - "81==0x%04x " - "82==0x%04x " - "83==0x%04x " - "84==0x%04x \n", - id[80], - id[81], - id[82], - id[83], - id[84]); - DPRINTK("88==0x%04x " - "93==0x%04x\n", - id[88], - id[93]); +static inline void ata_dump_id(struct ata_device *dev, const u16 *id) +{ + ata_dev_dbg(dev, + "49==0x%04x 53==0x%04x 63==0x%04x 64==0x%04x 75==0x%04x\n" + "80==0x%04x 81==0x%04x 82==0x%04x 83==0x%04x 84==0x%04x\n" + "88==0x%04x 93==0x%04x\n", + id[49], id[53], id[63], id[64], id[75], id[80], + id[81], id[82], id[83], id[84], id[88], id[93]); } /** @@ -1602,9 +1571,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, else ata_qc_complete(qc); - if (ata_msg_warn(ap)) - ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", - command); + ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", + command); } spin_unlock_irqrestore(ap->lock, flags); @@ -1754,7 +1722,7 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) * this function is wrapped or replaced by the driver */ unsigned int ata_do_dev_read_id(struct ata_device *dev, - struct ata_taskfile *tf, u16 *id) + struct ata_taskfile *tf, __le16 *id) { return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, id, sizeof(id[0]) * ATA_ID_WORDS, 0); @@ -1794,9 +1762,6 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, int may_fallback = 1, tried_spinup = 0; int rc; - if (ata_msg_ctl(ap)) - ata_dev_dbg(dev, "%s: ENTER\n", __func__); - retry: ata_tf_init(dev, &tf); @@ -1830,9 +1795,9 @@ retry: tf.flags |= ATA_TFLAG_POLLING; if (ap->ops->read_id) - err_mask = ap->ops->read_id(dev, &tf, id); + err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id); else - err_mask = ata_do_dev_read_id(dev, &tf, id); + err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id); if (err_mask) { if (err_mask & AC_ERR_NODEV_HINT) { @@ -1879,10 +1844,10 @@ retry: } if (dev->horkage & ATA_HORKAGE_DUMP_ID) { - ata_dev_dbg(dev, "dumping IDENTIFY data, " + ata_dev_info(dev, "dumping IDENTIFY data, " "class=%d may_fallback=%d tried_spinup=%d\n", class, may_fallback, tried_spinup); - print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); } @@ -1966,9 +1931,8 @@ retry: return 0; err_out: - if (ata_msg_warn(ap)) - ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", - reason, err_mask); + ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", + reason, err_mask); return rc; } @@ -1996,7 +1960,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log, unsigned int err_mask; bool dma = false; - DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); + ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page); /* * Return error without actually issuing the command on controllers @@ -2390,7 +2354,6 @@ static void ata_dev_config_trusted(struct ata_device *dev) static int ata_dev_config_lba(struct ata_device *dev) { - struct ata_port *ap = dev->link->ap; const u16 *id = dev->id; const char *lba_desc; char ncq_desc[24]; @@ -2412,7 +2375,7 @@ static int ata_dev_config_lba(struct ata_device *dev) ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); /* print device info to dmesg */ - if (ata_msg_drv(ap) && ata_dev_print_info(dev)) + if (ata_dev_print_info(dev)) ata_dev_info(dev, "%llu sectors, multi %u: %s %s\n", (unsigned long long)dev->n_sectors, @@ -2423,7 +2386,6 @@ static int ata_dev_config_lba(struct ata_device *dev) static void ata_dev_config_chs(struct ata_device *dev) { - struct ata_port *ap = dev->link->ap; const u16 *id = dev->id; if (ata_id_current_chs_valid(id)) { @@ -2439,7 +2401,7 @@ static void ata_dev_config_chs(struct ata_device *dev) } /* print device info to dmesg */ - if (ata_msg_drv(ap) && ata_dev_print_info(dev)) + if (ata_dev_print_info(dev)) ata_dev_info(dev, "%llu sectors, multi %u, CHS %u/%u/%u\n", (unsigned long long)dev->n_sectors, @@ -2566,14 +2528,11 @@ int ata_dev_configure(struct ata_device *dev) char modelbuf[ATA_ID_PROD_LEN+1]; int rc; - if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { - ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); + if (!ata_dev_enabled(dev)) { + ata_dev_dbg(dev, "no device\n"); return 0; } - if (ata_msg_probe(ap)) - ata_dev_dbg(dev, "%s: ENTER\n", __func__); - /* set horkage */ dev->horkage |= ata_dev_blacklisted(dev); ata_force_horkage(dev); @@ -2621,13 +2580,12 @@ int ata_dev_configure(struct ata_device *dev) return rc; /* print device capabilities */ - if (ata_msg_probe(ap)) - ata_dev_dbg(dev, - "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " - "85:%04x 86:%04x 87:%04x 88:%04x\n", - __func__, - id[49], id[82], id[83], id[84], - id[85], id[86], id[87], id[88]); + ata_dev_dbg(dev, + "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " + "85:%04x 86:%04x 87:%04x 88:%04x\n", + __func__, + id[49], id[82], id[83], id[84], + id[85], id[86], id[87], id[88]); /* initialize to-be-configured parameters */ dev->flags &= ~ATA_DFLAG_CFG_MASK; @@ -2646,8 +2604,7 @@ int ata_dev_configure(struct ata_device *dev) /* find max transfer mode; for printk only */ xfer_mask = ata_id_xfermask(id); - if (ata_msg_probe(ap)) - ata_dump_id(id); + ata_dump_id(dev, id); /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, @@ -2685,7 +2642,7 @@ int ata_dev_configure(struct ata_device *dev) } /* print device info to dmesg */ - if (ata_msg_drv(ap) && print_info) + if (print_info) ata_dev_info(dev, "%s: %s, %s, max %s\n", revbuf, modelbuf, fwrevbuf, ata_mode_string(xfer_mask)); @@ -2705,7 +2662,7 @@ int ata_dev_configure(struct ata_device *dev) ata_dev_config_cpr(dev); dev->cdb_len = 32; - if (ata_msg_drv(ap) && print_info) + if (print_info) ata_dev_print_features(dev); } @@ -2718,8 +2675,7 @@ int ata_dev_configure(struct ata_device *dev) rc = atapi_cdb_len(id); if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { - if (ata_msg_warn(ap)) - ata_dev_warn(dev, "unsupported CDB len\n"); + ata_dev_warn(dev, "unsupported CDB len %d\n", rc); rc = -EINVAL; goto err_out_nosup; } @@ -2763,7 +2719,7 @@ int ata_dev_configure(struct ata_device *dev) } /* print device info to dmesg */ - if (ata_msg_drv(ap) && print_info) + if (print_info) ata_dev_info(dev, "ATAPI: %s, %s, max %s%s%s%s\n", modelbuf, fwrevbuf, @@ -2780,7 +2736,7 @@ int ata_dev_configure(struct ata_device *dev) /* Limit PATA drive on SATA cable bridge transfers to udma5, 200 sectors */ if (ata_dev_knobble(dev)) { - if (ata_msg_drv(ap) && print_info) + if (print_info) ata_dev_info(dev, "applying bridge limits\n"); dev->udma_mask &= ATA_UDMA5; dev->max_sectors = ATA_MAX_SECTORS; @@ -2829,8 +2785,6 @@ int ata_dev_configure(struct ata_device *dev) return 0; err_out_nosup: - if (ata_msg_probe(ap)) - ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); return rc; } @@ -3373,8 +3327,8 @@ static int ata_dev_set_mode(struct ata_device *dev) dev_err_whine = " (device error ignored)"; } - DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", - dev->xfer_shift, (int)dev->xfer_mode); + ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n", + dev->xfer_shift, (int)dev->xfer_mode); if (!(ehc->i.flags & ATA_EHI_QUIET) || ehc->i.flags & ATA_EHI_DID_HARDRESET) @@ -3688,16 +3642,12 @@ void ata_std_postreset(struct ata_link *link, unsigned int *classes) { u32 serror; - DPRINTK("ENTER\n"); - /* reset complete, clear SError */ if (!sata_scr_read(link, SCR_ERROR, &serror)) sata_scr_write(link, SCR_ERROR, serror); /* print link status */ sata_print_link_status(link); - - DPRINTK("EXIT\n"); } EXPORT_SYMBOL_GPL(ata_std_postreset); @@ -4324,7 +4274,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) unsigned int err_mask; /* set up set-features taskfile */ - DPRINTK("set features - xfer mode\n"); + ata_dev_dbg(dev, "set features - xfer mode\n"); /* Some controllers and ATAPI devices show flaky interrupt * behavior after setting xfer mode. Use polling instead. @@ -4346,7 +4296,6 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) /* On some disks, this command causes spin-up, so we need longer timeout */ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); - DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; } @@ -4372,7 +4321,7 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) unsigned long timeout = 0; /* set up set-features taskfile */ - DPRINTK("set features - SATA features\n"); + ata_dev_dbg(dev, "set features - SATA features\n"); ata_tf_init(dev, &tf); tf.command = ATA_CMD_SET_FEATURES; @@ -4386,7 +4335,6 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout); - DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; } EXPORT_SYMBOL_GPL(ata_dev_set_feature); @@ -4414,7 +4362,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev, return AC_ERR_INVALID; /* set up init dev params taskfile */ - DPRINTK("init dev params \n"); + ata_dev_dbg(dev, "init dev params \n"); ata_tf_init(dev, &tf); tf.command = ATA_CMD_INIT_DEV_PARAMS; @@ -4430,7 +4378,6 @@ static unsigned int ata_dev_init_params(struct ata_device *dev, if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) err_mask = 0; - DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; } @@ -4542,8 +4489,6 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) WARN_ON_ONCE(sg == NULL); - VPRINTK("unmapping %u sg elements\n", qc->n_elem); - if (qc->n_elem) dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); @@ -4569,13 +4514,10 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; unsigned int n_elem; - VPRINTK("ENTER, ata%u\n", ap->print_id); - n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); if (n_elem < 1) return -1; - VPRINTK("%d sg elements mapped\n", n_elem); qc->orig_n_elem = qc->n_elem; qc->n_elem = n_elem; qc->flags |= ATA_QCFLAG_DMAMAP; @@ -4930,6 +4872,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) return; } + trace_ata_qc_prep(qc); qc->err_mask |= ap->ops->qc_prep(qc); if (unlikely(qc->err_mask)) goto err; @@ -5375,8 +5318,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host) { struct ata_port *ap; - DPRINTK("ENTER\n"); - ap = kzalloc(sizeof(*ap), GFP_KERNEL); if (!ap) return NULL; @@ -5388,15 +5329,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host) ap->host = host; ap->dev = host->dev; -#if defined(ATA_VERBOSE_DEBUG) - /* turn on all debugging levels */ - ap->msg_enable = 0x00FF; -#elif defined(ATA_DEBUG) - ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; -#else - ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; -#endif - mutex_init(&ap->scsi_scan_mutex); INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); @@ -5493,8 +5425,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports) int i; void *dr; - DPRINTK("ENTER\n"); - /* alloc a container for our list of ATA ports (buses) */ sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); host = kzalloc(sz, GFP_KERNEL); @@ -5784,9 +5714,7 @@ int ata_port_probe(struct ata_port *ap) __ata_port_probe(ap); ata_port_wait_eh(ap); } else { - DPRINTK("ata%u: bus probe begin\n", ap->print_id); rc = ata_bus_probe(ap); - DPRINTK("ata%u: bus probe end\n", ap->print_id); } return rc; } @@ -6553,69 +6481,14 @@ const struct ata_port_info ata_dummy_port_info = { }; EXPORT_SYMBOL_GPL(ata_dummy_port_info); -/* - * Utility print functions - */ -void ata_port_printk(const struct ata_port *ap, const char *level, - const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - printk("%sata%u: %pV", level, ap->print_id, &vaf); - - va_end(args); -} -EXPORT_SYMBOL(ata_port_printk); - -void ata_link_printk(const struct ata_link *link, const char *level, - const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - if (sata_pmp_attached(link->ap) || link->ap->slave_link) - printk("%sata%u.%02u: %pV", - level, link->ap->print_id, link->pmp, &vaf); - else - printk("%sata%u: %pV", - level, link->ap->print_id, &vaf); - - va_end(args); -} -EXPORT_SYMBOL(ata_link_printk); - -void ata_dev_printk(const struct ata_device *dev, const char *level, - const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - printk("%sata%u.%02u: %pV", - level, dev->link->ap->print_id, dev->link->pmp + dev->devno, - &vaf); - - va_end(args); -} -EXPORT_SYMBOL(ata_dev_printk); - void ata_print_version(const struct device *dev, const char *version) { dev_printk(KERN_DEBUG, dev, "version %s\n", version); } EXPORT_SYMBOL(ata_print_version); + +EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load); +EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command); +EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup); +EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start); +EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 1d4a6f1e88cd..7951fd946bf9 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -533,8 +533,6 @@ void ata_scsi_error(struct Scsi_Host *host) unsigned long flags; LIST_HEAD(eh_work_q); - DPRINTK("ENTER\n"); - spin_lock_irqsave(host->host_lock, flags); list_splice_init(&host->eh_cmd_q, &eh_work_q); spin_unlock_irqrestore(host->host_lock, flags); @@ -548,7 +546,6 @@ void ata_scsi_error(struct Scsi_Host *host) /* finish or retry handled scmd's and clean up */ WARN_ON(!list_empty(&eh_work_q)); - DPRINTK("EXIT\n"); } /** @@ -940,7 +937,7 @@ void ata_std_sched_eh(struct ata_port *ap) ata_eh_set_pending(ap, 1); scsi_schedule_eh(ap->scsi_host); - DPRINTK("port EH scheduled\n"); + trace_ata_std_sched_eh(ap); } EXPORT_SYMBOL_GPL(ata_std_sched_eh); @@ -1070,7 +1067,7 @@ static void __ata_port_freeze(struct ata_port *ap) ap->pflags |= ATA_PFLAG_FROZEN; - DPRINTK("ata%u port frozen\n", ap->print_id); + trace_ata_port_freeze(ap); } /** @@ -1147,7 +1144,7 @@ void ata_eh_thaw_port(struct ata_port *ap) spin_unlock_irqrestore(ap->lock, flags); - DPRINTK("ata%u port thawed\n", ap->print_id); + trace_ata_port_thaw(ap); } static void ata_eh_scsidone(struct scsi_cmnd *scmd) @@ -1217,8 +1214,7 @@ void ata_dev_disable(struct ata_device *dev) if (!ata_dev_enabled(dev)) return; - if (ata_msg_drv(dev->link->ap)) - ata_dev_warn(dev, "disabled\n"); + ata_dev_warn(dev, "disable device\n"); ata_acpi_on_disable(dev); ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); dev->class++; @@ -1287,6 +1283,8 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, struct ata_eh_context *ehc = &link->eh_context; unsigned long flags; + trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action); + spin_lock_irqsave(ap->lock, flags); ata_eh_clear_action(link, dev, ehi, action); @@ -1317,6 +1315,8 @@ void ata_eh_done(struct ata_link *link, struct ata_device *dev, { struct ata_eh_context *ehc = &link->eh_context; + trace_ata_eh_done(link, dev ? dev->devno : 0, action); + ata_eh_clear_action(link, dev, &ehc->i, action); } @@ -1421,8 +1421,6 @@ static void ata_eh_request_sense(struct ata_queued_cmd *qc, return; } - DPRINTK("ATA request sense\n"); - ata_tf_init(dev, &tf); tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; @@ -1463,8 +1461,6 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev, struct ata_port *ap = dev->link->ap; struct ata_taskfile tf; - DPRINTK("ATAPI request sense\n"); - memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); /* initialize sense_buf with the error register, @@ -1928,8 +1924,6 @@ static void ata_eh_link_autopsy(struct ata_link *link) u32 serror; int rc; - DPRINTK("ENTER\n"); - if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) return; @@ -2036,7 +2030,6 @@ static void ata_eh_link_autopsy(struct ata_link *link) ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); } - DPRINTK("EXIT\n"); } /** @@ -2086,16 +2079,15 @@ void ata_eh_autopsy(struct ata_port *ap) } /** - * ata_get_cmd_descript - get description for ATA command - * @command: ATA command code to get description for + * ata_get_cmd_name - get name for ATA command + * @command: ATA command code to get name for * - * Return a textual description of the given command, or NULL if the - * command is not known. + * Return a textual name of the given command or "unknown" * * LOCKING: * None */ -const char *ata_get_cmd_descript(u8 command) +const char *ata_get_cmd_name(u8 command) { #ifdef CONFIG_ATA_VERBOSE_ERROR static const struct @@ -2203,9 +2195,9 @@ const char *ata_get_cmd_descript(u8 command) return cmd_descr[i].text; #endif - return NULL; + return "unknown"; } -EXPORT_SYMBOL_GPL(ata_get_cmd_descript); +EXPORT_SYMBOL_GPL(ata_get_cmd_name); /** * ata_eh_link_report - report error handling to user @@ -2354,12 +2346,9 @@ static void ata_eh_link_report(struct ata_link *link) } __scsi_format_command(cdb_buf, sizeof(cdb_buf), cdb, cdb_len); - } else { - const char *descr = ata_get_cmd_descript(cmd->command); - if (descr) - ata_dev_err(qc->dev, "failed command: %s\n", - descr); - } + } else + ata_dev_err(qc->dev, "failed command: %s\n", + ata_get_cmd_name(cmd->command)); ata_dev_err(qc->dev, "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " @@ -2596,12 +2585,19 @@ int ata_eh_reset(struct ata_link *link, int classify, /* mark that this EH session started with reset */ ehc->last_reset = jiffies; - if (reset == hardreset) + if (reset == hardreset) { ehc->i.flags |= ATA_EHI_DID_HARDRESET; - else + trace_ata_link_hardreset_begin(link, classes, deadline); + } else { ehc->i.flags |= ATA_EHI_DID_SOFTRESET; + trace_ata_link_softreset_begin(link, classes, deadline); + } rc = ata_do_reset(link, reset, classes, deadline, true); + if (reset == hardreset) + trace_ata_link_hardreset_end(link, classes, rc); + else + trace_ata_link_softreset_end(link, classes, rc); if (rc && rc != -EAGAIN) { failed_link = link; goto fail; @@ -2615,8 +2611,11 @@ int ata_eh_reset(struct ata_link *link, int classify, ata_link_info(slave, "hard resetting link\n"); ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); + trace_ata_slave_hardreset_begin(slave, classes, + deadline); tmp = ata_do_reset(slave, reset, classes, deadline, false); + trace_ata_slave_hardreset_end(slave, classes, tmp); switch (tmp) { case -EAGAIN: rc = -EAGAIN; @@ -2644,7 +2643,9 @@ int ata_eh_reset(struct ata_link *link, int classify, } ata_eh_about_to_do(link, NULL, ATA_EH_RESET); + trace_ata_link_softreset_begin(link, classes, deadline); rc = ata_do_reset(link, reset, classes, deadline, true); + trace_ata_link_softreset_end(link, classes, rc); if (rc) { failed_link = link; goto fail; @@ -2698,8 +2699,11 @@ int ata_eh_reset(struct ata_link *link, int classify, */ if (postreset) { postreset(link, classes); - if (slave) + trace_ata_link_postreset(link, classes, rc); + if (slave) { postreset(slave, classes); + trace_ata_slave_postreset(slave, classes, rc); + } } /* @@ -2921,8 +2925,6 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, unsigned long flags; int rc = 0; - DPRINTK("ENTER\n"); - /* For PATA drive side cable detection to work, IDENTIFY must * be done backwards such that PDIAG- is released by the slave * device before the master device is identified. @@ -3036,7 +3038,6 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, err: *r_failed_dev = dev; - DPRINTK("EXIT rc=%d\n", rc); return rc; } @@ -3551,8 +3552,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, int rc, nr_fails; unsigned long flags, deadline; - DPRINTK("ENTER\n"); - /* prep for recovery */ ata_for_each_link(link, ap, EDGE) { struct ata_eh_context *ehc = &link->eh_context; @@ -3760,7 +3759,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, if (rc && r_failed_link) *r_failed_link = link; - DPRINTK("EXIT, rc=%d\n", rc); return rc; } diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index ba7be3f38617..e2e9cbd405fa 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -652,8 +652,6 @@ static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class) u32 *gscr = (void *)ap->sector_buf; int rc; - DPRINTK("ENTER\n"); - ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE); if (!ata_dev_enabled(dev)) { @@ -686,12 +684,10 @@ static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class) ata_eh_done(link, NULL, ATA_EH_REVALIDATE); - DPRINTK("EXIT, rc=0\n"); return 0; fail: ata_dev_err(dev, "PMP revalidation failed (errno=%d)\n", rc); - DPRINTK("EXIT, rc=%d\n", rc); return rc; } @@ -759,8 +755,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, int detach = 0, rc = 0; int reval_failed = 0; - DPRINTK("ENTER\n"); - if (dev->flags & ATA_DFLAG_DETACH) { detach = 1; rc = -ENODEV; @@ -828,7 +822,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, /* okay, PMP resurrected */ ehc->i.flags = 0; - DPRINTK("EXIT, rc=0\n"); return 0; fail: @@ -838,7 +831,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, else ata_dev_disable(dev); - DPRINTK("EXIT, rc=%d\n", rc); return rc; } diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index b9c77885b872..071158c0c44c 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -317,7 +317,7 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params, * immediately after resuming. Delay 200ms before * debouncing. */ - if (!(link->flags & ATA_LFLAG_NO_DB_DELAY)) + if (!(link->flags & ATA_LFLAG_NO_DEBOUNCE_DELAY)) ata_msleep(link->ap, 200); /* is SControl restored correctly? */ @@ -533,8 +533,6 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, u32 scontrol; int rc; - DPRINTK("ENTER\n"); - if (online) *online = false; @@ -610,7 +608,6 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, *online = false; ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); } - DPRINTK("EXIT, rc=%d\n", rc); return rc; } EXPORT_SYMBOL_GPL(sata_link_hardreset); @@ -876,7 +873,7 @@ static ssize_t ata_ncq_prio_enable_show(struct device *device, ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE; spin_unlock_irq(ap->lock); - return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable); + return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable); } static ssize_t ata_ncq_prio_enable_store(struct device *device, @@ -972,7 +969,7 @@ ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct ata_port *ap = ata_shost_to_port(shost); - return snprintf(buf, 23, "%d\n", ap->em_message_type); + return sysfs_emit(buf, "%d\n", ap->em_message_type); } DEVICE_ATTR(em_message_type, S_IRUGO, ata_scsi_em_message_type_show, NULL); @@ -1261,8 +1258,6 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap) { int rc = 0; - ata_scsi_dump_cdb(ap, cmd); - if (likely(ata_dev_enabled(ap->link.device))) rc = __ata_scsi_queuecmd(cmd, ap->link.device); else { diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 313e9475507b..ed8be585a98f 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -121,7 +121,7 @@ static ssize_t ata_scsi_park_show(struct device *device, unlock: spin_unlock_irq(ap->lock); - return rc ? rc : snprintf(buf, 20, "%u\n", msecs); + return rc ? rc : sysfs_emit(buf, "%u\n", msecs); } static ssize_t ata_scsi_park_store(struct device *device, @@ -668,7 +668,7 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc) /** * ata_dump_status - user friendly display of error info - * @id: id of the port in question + * @ap: the port in question * @tf: ptr to filled out taskfile * * Decode and dump the ATA error/status registers for the user so @@ -678,37 +678,32 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc) * LOCKING: * inherited from caller */ -static void ata_dump_status(unsigned id, struct ata_taskfile *tf) +static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf) { u8 stat = tf->command, err = tf->feature; - pr_warn("ata%u: status=0x%02x { ", id, stat); if (stat & ATA_BUSY) { - pr_cont("Busy }\n"); /* Data is not valid in this case */ + ata_port_warn(ap, "status=0x%02x {Busy} ", stat); } else { - if (stat & ATA_DRDY) pr_cont("DriveReady "); - if (stat & ATA_DF) pr_cont("DeviceFault "); - if (stat & ATA_DSC) pr_cont("SeekComplete "); - if (stat & ATA_DRQ) pr_cont("DataRequest "); - if (stat & ATA_CORR) pr_cont("CorrectedError "); - if (stat & ATA_SENSE) pr_cont("Sense "); - if (stat & ATA_ERR) pr_cont("Error "); - pr_cont("}\n"); - - if (err) { - pr_warn("ata%u: error=0x%02x { ", id, err); - if (err & ATA_ABORTED) pr_cont("DriveStatusError "); - if (err & ATA_ICRC) { - if (err & ATA_ABORTED) - pr_cont("BadCRC "); - else pr_cont("Sector "); - } - if (err & ATA_UNC) pr_cont("UncorrectableError "); - if (err & ATA_IDNF) pr_cont("SectorIdNotFound "); - if (err & ATA_TRK0NF) pr_cont("TrackZeroNotFound "); - if (err & ATA_AMNF) pr_cont("AddrMarkNotFound "); - pr_cont("}\n"); - } + ata_port_warn(ap, "status=0x%02x { %s%s%s%s%s%s%s} ", stat, + stat & ATA_DRDY ? "DriveReady " : "", + stat & ATA_DF ? "DeviceFault " : "", + stat & ATA_DSC ? "SeekComplete " : "", + stat & ATA_DRQ ? "DataRequest " : "", + stat & ATA_CORR ? "CorrectedError " : "", + stat & ATA_SENSE ? "Sense " : "", + stat & ATA_ERR ? "Error " : ""); + if (err) + ata_port_warn(ap, "error=0x%02x {%s%s%s%s%s%s", err, + err & ATA_ABORTED ? + "DriveStatusError " : "", + err & ATA_ICRC ? + (err & ATA_ABORTED ? + "BadCRC " : "Sector ") : "", + err & ATA_UNC ? "UncorrectableError " : "", + err & ATA_IDNF ? "SectorIdNotFound " : "", + err & ATA_TRK0NF ? "TrackZeroNotFound " : "", + err & ATA_AMNF ? "AddrMarkNotFound " : ""); } } @@ -1299,8 +1294,6 @@ static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen) u64 lba = 0; u32 len; - VPRINTK("six-byte command\n"); - lba |= ((u64)(cdb[1] & 0x1f)) << 16; lba |= ((u64)cdb[2]) << 8; lba |= ((u64)cdb[3]); @@ -1326,8 +1319,6 @@ static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen) u64 lba = 0; u32 len = 0; - VPRINTK("ten-byte command\n"); - lba |= ((u64)cdb[2]) << 24; lba |= ((u64)cdb[3]) << 16; lba |= ((u64)cdb[4]) << 8; @@ -1355,8 +1346,6 @@ static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen) u64 lba = 0; u32 len = 0; - VPRINTK("sixteen-byte command\n"); - lba |= ((u64)cdb[2]) << 56; lba |= ((u64)cdb[3]) << 48; lba |= ((u64)cdb[4]) << 40; @@ -1469,9 +1458,6 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) head = track % dev->heads; sect = (u32)block % dev->sectors + 1; - DPRINTK("block %u track %u cyl %u head %u sect %u\n", - (u32)block, track, cyl, head, sect); - /* Check whether the converted CHS can fit. Cylinder: 0-65535 Head: 0-15 @@ -1594,7 +1580,6 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) goto invalid_fld; break; default: - DPRINTK("no-byte command\n"); fp = 0; goto invalid_fld; } @@ -1672,7 +1657,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) cmd->result = SAM_STAT_GOOD; if (need_sense && !ap->ops->error_handler) - ata_dump_status(ap->print_id, &qc->result_tf); + ata_dump_status(ap, &qc->result_tf); ata_qc_done(qc); } @@ -1710,8 +1695,6 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, struct ata_queued_cmd *qc; int rc; - VPRINTK("ENTER\n"); - qc = ata_scsi_qc_new(dev, cmd); if (!qc) goto err_mem; @@ -1742,13 +1725,11 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, /* select device, send command to hardware */ ata_qc_issue(qc); - VPRINTK("EXIT\n"); return 0; early_finish: ata_qc_free(qc); scsi_done(cmd); - DPRINTK("EXIT - early finish (good or error)\n"); return 0; err_did: @@ -1756,12 +1737,10 @@ err_did: cmd->result = (DID_ERROR << 16); scsi_done(cmd); err_mem: - DPRINTK("EXIT - internal\n"); return 0; defer: ata_qc_free(qc); - DPRINTK("EXIT - defer\n"); if (rc == ATA_DEFER_LINK) return SCSI_MLQUEUE_DEVICE_BUSY; else @@ -1858,8 +1837,6 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) 2 }; - VPRINTK("ENTER\n"); - /* set scsi removable (RMB) bit per ata bit, or if the * AHCI port says it's external (Hotplug-capable, eSATA). */ @@ -2294,8 +2271,6 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) u8 dpofua, bp = 0xff; u16 fp; - VPRINTK("ENTER\n"); - six_byte = (scsicmd[0] == MODE_SENSE); ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */ /* @@ -2413,8 +2388,6 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) log2_per_phys = ata_id_log2_per_physical_sector(dev->id); lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys); - VPRINTK("ENTER\n"); - if (args->cmd->cmnd[0] == READ_CAPACITY) { if (last_lba >= 0xffffffffULL) last_lba = 0xffffffff; @@ -2481,7 +2454,6 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) */ static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf) { - VPRINTK("ENTER\n"); rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */ return 0; @@ -2512,8 +2484,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; struct scsi_cmnd *cmd = qc->scsicmd; - DPRINTK("ATAPI request sense\n"); - memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); #ifdef CONFIG_ATA_SFF @@ -2552,8 +2522,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) qc->complete_fn = atapi_sense_complete; ata_qc_issue(qc); - - DPRINTK("EXIT\n"); } /* @@ -2581,8 +2549,6 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) struct scsi_cmnd *cmd = qc->scsicmd; unsigned int err_mask = qc->err_mask; - VPRINTK("ENTER, err_mask 0x%X\n", err_mask); - /* handle completion from new EH */ if (unlikely(qc->ap->ops->error_handler && (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) { @@ -2663,7 +2629,6 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; if (scmd->sc_data_direction == DMA_TO_DEVICE) { qc->tf.flags |= ATA_TFLAG_WRITE; - DPRINTK("direction: write\n"); } qc->tf.command = ATA_CMD_PACKET; @@ -3591,10 +3556,7 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc, */ if (len != CACHE_MPAGE_LEN - 2) { - if (len < CACHE_MPAGE_LEN - 2) - *fp = len; - else - *fp = CACHE_MPAGE_LEN - 2; + *fp = min(len, CACHE_MPAGE_LEN - 2); return -EINVAL; } @@ -3647,10 +3609,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc, */ if (len != CONTROL_MPAGE_LEN - 2) { - if (len < CONTROL_MPAGE_LEN - 2) - *fp = len; - else - *fp = CONTROL_MPAGE_LEN - 2; + *fp = min(len, CONTROL_MPAGE_LEN - 2); return -EINVAL; } @@ -3698,8 +3657,6 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) u8 buffer[64]; const u8 *p = buffer; - VPRINTK("ENTER\n"); - six_byte = (cdb[0] == MODE_SELECT); if (six_byte) { if (scmd->cmd_len < 5) { @@ -3997,70 +3954,45 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) return NULL; } -/** - * ata_scsi_dump_cdb - dump SCSI command contents to dmesg - * @ap: ATA port to which the command was being sent - * @cmd: SCSI command to dump - * - * Prints the contents of a SCSI command via printk(). - */ - -void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd) -{ -#ifdef ATA_VERBOSE_DEBUG - struct scsi_device *scsidev = cmd->device; - - VPRINTK("CDB (%u:%d,%d,%lld) %9ph\n", - ap->print_id, - scsidev->channel, scsidev->id, scsidev->lun, - cmd->cmnd); -#endif -} - int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev) { u8 scsi_op = scmd->cmnd[0]; ata_xlat_func_t xlat_func; - int rc = 0; + + if (unlikely(!scmd->cmd_len)) + goto bad_cdb_len; if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { - if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) + if (unlikely(scmd->cmd_len > dev->cdb_len)) goto bad_cdb_len; xlat_func = ata_get_xlat_func(dev, scsi_op); - } else { - if (unlikely(!scmd->cmd_len)) - goto bad_cdb_len; + } else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { + /* relay SCSI command to ATAPI device */ + int len = COMMAND_SIZE(scsi_op); - xlat_func = NULL; - if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { - /* relay SCSI command to ATAPI device */ - int len = COMMAND_SIZE(scsi_op); - if (unlikely(len > scmd->cmd_len || - len > dev->cdb_len || - scmd->cmd_len > ATAPI_CDB_LEN)) - goto bad_cdb_len; + if (unlikely(len > scmd->cmd_len || + len > dev->cdb_len || + scmd->cmd_len > ATAPI_CDB_LEN)) + goto bad_cdb_len; - xlat_func = atapi_xlat; - } else { - /* ATA_16 passthru, treat as an ATA command */ - if (unlikely(scmd->cmd_len > 16)) - goto bad_cdb_len; + xlat_func = atapi_xlat; + } else { + /* ATA_16 passthru, treat as an ATA command */ + if (unlikely(scmd->cmd_len > 16)) + goto bad_cdb_len; - xlat_func = ata_get_xlat_func(dev, scsi_op); - } + xlat_func = ata_get_xlat_func(dev, scsi_op); } if (xlat_func) - rc = ata_scsi_translate(dev, scmd, xlat_func); - else - ata_scsi_simulate(dev, scmd); + return ata_scsi_translate(dev, scmd, xlat_func); - return rc; + ata_scsi_simulate(dev, scmd); + + return 0; bad_cdb_len: - DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", - scmd->cmd_len, scsi_op, dev->cdb_len); scmd->result = DID_ERROR << 16; scsi_done(scmd); return 0; @@ -4097,8 +4029,6 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) spin_lock_irqsave(ap->lock, irq_flags); - ata_scsi_dump_cdb(ap, cmd); - dev = ata_scsi_find_dev(ap, scsidev); if (likely(dev)) rc = __ata_scsi_queuecmd(cmd, dev); @@ -4531,12 +4461,9 @@ void ata_scsi_hotplug(struct work_struct *work) container_of(work, struct ata_port, hotplug_task.work); int i; - if (ap->pflags & ATA_PFLAG_UNLOADING) { - DPRINTK("ENTER/EXIT - unloading\n"); + if (ap->pflags & ATA_PFLAG_UNLOADING) return; - } - DPRINTK("ENTER\n"); mutex_lock(&ap->scsi_scan_mutex); /* Unplug detached devices. We cannot use link iterator here @@ -4552,7 +4479,6 @@ void ata_scsi_hotplug(struct work_struct *work) ata_scsi_scan_host(ap, 0); mutex_unlock(&ap->scsi_scan_mutex); - DPRINTK("EXIT\n"); } /** diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index b71ea4a680b0..75217828dfe3 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -18,7 +18,7 @@ #include <linux/module.h> #include <linux/libata.h> #include <linux/highmem.h> - +#include <trace/events/libata.h> #include "libata.h" static struct workqueue_struct *ata_sff_wq; @@ -330,10 +330,6 @@ EXPORT_SYMBOL_GPL(ata_sff_dev_select); static void ata_dev_select(struct ata_port *ap, unsigned int device, unsigned int wait, unsigned int can_sleep) { - if (ata_msg_probe(ap)) - ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n", - device, wait); - if (wait) ata_wait_idle(ap); @@ -409,12 +405,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) iowrite8(tf->hob_lbal, ioaddr->lbal_addr); iowrite8(tf->hob_lbam, ioaddr->lbam_addr); iowrite8(tf->hob_lbah, ioaddr->lbah_addr); - VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", - tf->hob_feature, - tf->hob_nsect, - tf->hob_lbal, - tf->hob_lbam, - tf->hob_lbah); } if (is_addr) { @@ -423,18 +413,10 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) iowrite8(tf->lbal, ioaddr->lbal_addr); iowrite8(tf->lbam, ioaddr->lbam_addr); iowrite8(tf->lbah, ioaddr->lbah_addr); - VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", - tf->feature, - tf->nsect, - tf->lbal, - tf->lbam, - tf->lbah); } - if (tf->flags & ATA_TFLAG_DEVICE) { + if (tf->flags & ATA_TFLAG_DEVICE) iowrite8(tf->device, ioaddr->device_addr); - VPRINTK("device 0x%X\n", tf->device); - } ata_wait_idle(ap); } @@ -494,8 +476,6 @@ EXPORT_SYMBOL_GPL(ata_sff_tf_read); */ void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) { - DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); - iowrite8(tf->command, ap->ioaddr.command_addr); ata_sff_pause(ap); } @@ -505,6 +485,7 @@ EXPORT_SYMBOL_GPL(ata_sff_exec_command); * ata_tf_to_host - issue ATA taskfile to host controller * @ap: port to which command is being issued * @tf: ATA taskfile register set + * @tag: tag of the associated command * * Issues ATA taskfile register set to ATA host controller, * with proper synchronization with interrupt handler and @@ -514,9 +495,12 @@ EXPORT_SYMBOL_GPL(ata_sff_exec_command); * spin_lock_irqsave(host lock) */ static inline void ata_tf_to_host(struct ata_port *ap, - const struct ata_taskfile *tf) + const struct ata_taskfile *tf, + unsigned int tag) { + trace_ata_tf_load(ap, tf); ap->ops->sff_tf_load(ap, tf); + trace_ata_exec_command(ap, tf, tag); ap->ops->sff_exec_command(ap, tf); } @@ -680,7 +664,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) page = nth_page(page, (offset >> PAGE_SHIFT)); offset %= PAGE_SIZE; - DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); + trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size); /* * Split the transfer when it splits a page boundary. Note that the @@ -750,7 +734,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) { /* send SCSI cdb */ - DPRINTK("send cdb\n"); + trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len); WARN_ON_ONCE(qc->dev->cdb_len < 12); ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1); @@ -768,6 +752,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) case ATAPI_PROT_DMA: ap->hsm_task_state = HSM_ST_LAST; /* initiate bmdma */ + trace_ata_bmdma_start(ap, &qc->tf, qc->tag); ap->ops->bmdma_start(qc); break; #endif /* CONFIG_ATA_BMDMA */ @@ -820,7 +805,7 @@ next_sg: /* don't cross page boundaries */ count = min(count, (unsigned int)PAGE_SIZE - offset); - DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); + trace_atapi_pio_transfer_data(qc, offset, count); /* do the actual data transfer */ buf = kmap_atomic(page); @@ -888,8 +873,6 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) if (unlikely(!bytes)) goto atapi_check; - VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); - if (unlikely(__atapi_pio_bytes(qc, bytes))) goto err_out; ata_sff_sync(ap); /* flush */ @@ -1002,8 +985,7 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); fsm_start: - DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", - ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); + trace_ata_sff_hsm_state(qc, status); switch (ap->hsm_task_state) { case HSM_ST_FIRST: @@ -1204,8 +1186,7 @@ fsm_start: } /* no more data to transfer */ - DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", - ap->print_id, qc->dev->devno, status); + trace_ata_sff_hsm_command_complete(qc, status); WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); @@ -1262,7 +1243,7 @@ EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); void ata_sff_flush_pio_task(struct ata_port *ap) { - DPRINTK("ENTER\n"); + trace_ata_sff_flush_pio_task(ap); cancel_delayed_work_sync(&ap->sff_pio_task); @@ -1279,9 +1260,6 @@ void ata_sff_flush_pio_task(struct ata_port *ap) spin_unlock_irq(ap->lock); ap->sff_pio_task_link = NULL; - - if (ata_msg_ctl(ap)) - ata_port_dbg(ap, "%s: EXIT\n", __func__); } static void ata_sff_pio_task(struct work_struct *work) @@ -1376,7 +1354,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) if (qc->tf.flags & ATA_TFLAG_POLLING) ata_qc_set_polling(qc); - ata_tf_to_host(ap, &qc->tf); + ata_tf_to_host(ap, &qc->tf, qc->tag); ap->hsm_task_state = HSM_ST_LAST; if (qc->tf.flags & ATA_TFLAG_POLLING) @@ -1388,7 +1366,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) if (qc->tf.flags & ATA_TFLAG_POLLING) ata_qc_set_polling(qc); - ata_tf_to_host(ap, &qc->tf); + ata_tf_to_host(ap, &qc->tf, qc->tag); if (qc->tf.flags & ATA_TFLAG_WRITE) { /* PIO data out protocol */ @@ -1418,7 +1396,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) if (qc->tf.flags & ATA_TFLAG_POLLING) ata_qc_set_polling(qc); - ata_tf_to_host(ap, &qc->tf); + ata_tf_to_host(ap, &qc->tf, qc->tag); ap->hsm_task_state = HSM_ST_FIRST; @@ -1478,8 +1456,7 @@ static unsigned int __ata_sff_port_intr(struct ata_port *ap, { u8 status; - VPRINTK("ata%u: protocol %d task_state %d\n", - ap->print_id, qc->tf.protocol, ap->hsm_task_state); + trace_ata_sff_port_intr(qc, hsmv_on_idle); /* Check whether we are expecting interrupt in this state */ switch (ap->hsm_task_state) { @@ -1853,7 +1830,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, return ATA_DEV_NONE; /* determine if device is ATA or ATAPI */ - class = ata_dev_classify(&tf); + class = ata_port_classify(ap, &tf); if (class == ATA_DEV_UNKNOWN) { /* If the device failed diagnostic, it's likely to @@ -1956,8 +1933,6 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, { struct ata_ioports *ioaddr = &ap->ioaddr; - DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); - if (ap->ioaddr.ctl_addr) { /* software reset. causes dev0 to be selected */ iowrite8(ap->ctl, ioaddr->ctl_addr); @@ -1995,8 +1970,6 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes, int rc; u8 err; - DPRINTK("ENTER\n"); - /* determine if device 0/1 are present */ if (ata_devchk(ap, 0)) devmask |= (1 << 0); @@ -2007,7 +1980,6 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes, ap->ops->sff_dev_select(ap, 0); /* issue bus reset */ - DPRINTK("about to softreset, devmask=%x\n", devmask); rc = ata_bus_softreset(ap, devmask, deadline); /* if link is occupied, -ENODEV too is an error */ if (rc && (rc != -ENODEV || sata_scr_valid(link))) { @@ -2022,7 +1994,6 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes, classes[1] = ata_sff_dev_classify(&link->device[1], devmask & (1 << 1), &err); - DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); return 0; } EXPORT_SYMBOL_GPL(ata_sff_softreset); @@ -2055,7 +2026,6 @@ int sata_sff_hardreset(struct ata_link *link, unsigned int *class, if (online) *class = ata_sff_dev_classify(link->device, 1, NULL); - DPRINTK("EXIT, class=%u\n", *class); return rc; } EXPORT_SYMBOL_GPL(sata_sff_hardreset); @@ -2085,10 +2055,8 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes) ap->ops->sff_dev_select(ap, 0); /* bail out if no device is present */ - if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { - DPRINTK("EXIT, no device\n"); + if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) return; - } /* set up device control */ if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) { @@ -2123,7 +2091,6 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc) && count < 65536; count += 2) ioread16(ap->ioaddr.data_addr); - /* Can become DEBUG later */ if (count) ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count); @@ -2467,8 +2434,6 @@ static int ata_pci_init_one(struct pci_dev *pdev, struct ata_host *host = NULL; int rc; - DPRINTK("ENTER\n"); - pi = ata_sff_find_valid_pi(ppi); if (!pi) { dev_err(&pdev->dev, "no valid port_info specified\n"); @@ -2614,7 +2579,6 @@ static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) prd[pi].addr = cpu_to_le32(addr); prd[pi].flags_len = cpu_to_le32(len & 0xffff); - VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); pi++; sg_len -= len; @@ -2674,7 +2638,6 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) prd[++pi].addr = cpu_to_le32(addr + 0x8000); } prd[pi].flags_len = cpu_to_le32(blen); - VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); pi++; sg_len -= len; @@ -2756,8 +2719,11 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) case ATA_PROT_DMA: WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); + trace_ata_tf_load(ap, &qc->tf); ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ + trace_ata_bmdma_setup(ap, &qc->tf, qc->tag); ap->ops->bmdma_setup(qc); /* set up bmdma */ + trace_ata_bmdma_start(ap, &qc->tf, qc->tag); ap->ops->bmdma_start(qc); /* initiate bmdma */ ap->hsm_task_state = HSM_ST_LAST; break; @@ -2765,7 +2731,9 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) case ATAPI_PROT_DMA: WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); + trace_ata_tf_load(ap, &qc->tf); ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ + trace_ata_bmdma_setup(ap, &qc->tf, qc->tag); ap->ops->bmdma_setup(qc); /* set up bmdma */ ap->hsm_task_state = HSM_ST_FIRST; @@ -2806,13 +2774,14 @@ unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { /* check status of DMA engine */ host_stat = ap->ops->bmdma_status(ap); - VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat); + trace_ata_bmdma_status(ap, host_stat); /* if it's not our irq... */ if (!(host_stat & ATA_DMA_INTR)) return ata_sff_idle_irq(ap); /* before we do anything else, clear DMA-Start bit */ + trace_ata_bmdma_stop(ap, &qc->tf, qc->tag); ap->ops->bmdma_stop(qc); bmdma_stopped = true; @@ -2881,6 +2850,7 @@ void ata_bmdma_error_handler(struct ata_port *ap) u8 host_stat; host_stat = ap->ops->bmdma_status(ap); + trace_ata_bmdma_status(ap, host_stat); /* BMDMA controllers indicate host bus error by * setting DMA_ERR bit and timing out. As it wasn't @@ -2892,6 +2862,7 @@ void ata_bmdma_error_handler(struct ata_port *ap) thaw = true; } + trace_ata_bmdma_stop(ap, &qc->tf, qc->tag); ap->ops->bmdma_stop(qc); /* if we're gonna thaw, make sure IRQ is clear */ @@ -2925,6 +2896,7 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) if (ata_is_dma(qc->tf.protocol)) { spin_lock_irqsave(ap->lock, flags); + trace_ata_bmdma_stop(ap, &qc->tf, qc->tag); ap->ops->bmdma_stop(qc); spin_unlock_irqrestore(ap->lock, flags); } diff --git a/drivers/ata/libata-trace.c b/drivers/ata/libata-trace.c index 08e001303a82..e0e4d0d5a100 100644 --- a/drivers/ata/libata-trace.c +++ b/drivers/ata/libata-trace.c @@ -39,6 +39,24 @@ libata_trace_parse_status(struct trace_seq *p, unsigned char status) } const char * +libata_trace_parse_host_stat(struct trace_seq *p, unsigned char host_stat) +{ + const char *ret = trace_seq_buffer_ptr(p); + + trace_seq_printf(p, "{ "); + if (host_stat & ATA_DMA_INTR) + trace_seq_printf(p, "INTR "); + if (host_stat & ATA_DMA_ERR) + trace_seq_printf(p, "ERR "); + if (host_stat & ATA_DMA_ACTIVE) + trace_seq_printf(p, "ACTIVE "); + trace_seq_putc(p, '}'); + trace_seq_putc(p, 0); + + return ret; +} + +const char * libata_trace_parse_eh_action(struct trace_seq *p, unsigned int eh_action) { const char *ret = trace_seq_buffer_ptr(p); @@ -138,6 +156,35 @@ libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags) } const char * +libata_trace_parse_tf_flags(struct trace_seq *p, unsigned int tf_flags) +{ + const char *ret = trace_seq_buffer_ptr(p); + + trace_seq_printf(p, "%x", tf_flags); + if (tf_flags) { + trace_seq_printf(p, "{ "); + if (tf_flags & ATA_TFLAG_LBA48) + trace_seq_printf(p, "LBA48 "); + if (tf_flags & ATA_TFLAG_ISADDR) + trace_seq_printf(p, "ISADDR "); + if (tf_flags & ATA_TFLAG_DEVICE) + trace_seq_printf(p, "DEV "); + if (tf_flags & ATA_TFLAG_WRITE) + trace_seq_printf(p, "WRITE "); + if (tf_flags & ATA_TFLAG_LBA) + trace_seq_printf(p, "LBA "); + if (tf_flags & ATA_TFLAG_FUA) + trace_seq_printf(p, "FUA "); + if (tf_flags & ATA_TFLAG_POLLING) + trace_seq_printf(p, "POLL "); + trace_seq_putc(p, '}'); + } + trace_seq_putc(p, 0); + + return ret; +} + +const char * libata_trace_parse_subcmd(struct trace_seq *p, unsigned char cmd, unsigned char feature, unsigned char hob_nsect) { diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index 34bb4608bdc6..ca129854a88c 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -163,7 +163,7 @@ static struct { { AC_ERR_INVALID, "InvalidArg" }, { AC_ERR_OTHER, "Unknown" }, { AC_ERR_NODEV_HINT, "NoDeviceHint" }, - { AC_ERR_NCQ, "NCQError" } + { AC_ERR_NCQ, "NCQError" } }; ata_bitfield_name_match(err, ata_err_names) @@ -321,13 +321,43 @@ int ata_tport_add(struct device *parent, return error; } +/** + * ata_port_classify - determine device type based on ATA-spec signature + * @ap: ATA port device on which the classification should be run + * @tf: ATA taskfile register set for device to be identified + * + * A wrapper around ata_dev_classify() to provide additional logging + * + * RETURNS: + * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP, + * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure. + */ +unsigned int ata_port_classify(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + int i; + unsigned int class = ata_dev_classify(tf); + + /* Start with index '1' to skip the 'unknown' entry */ + for (i = 1; i < ARRAY_SIZE(ata_class_names); i++) { + if (ata_class_names[i].value == class) { + ata_port_dbg(ap, "found %s device by sig\n", + ata_class_names[i].name); + return class; + } + } + + ata_port_info(ap, "found unknown device (class %u)\n", class); + return class; +} +EXPORT_SYMBOL_GPL(ata_port_classify); /* * ATA link attributes */ static int noop(int x) { return x; } -#define ata_link_show_linkspeed(field, format) \ +#define ata_link_show_linkspeed(field, format) \ static ssize_t \ show_ata_link_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ @@ -416,7 +446,7 @@ int ata_tlink_add(struct ata_link *link) dev->release = ata_tlink_release; if (ata_is_host_link(link)) dev_set_name(dev, "link%d", ap->print_id); - else + else dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp); transport_setup_device(dev); @@ -472,7 +502,7 @@ ata_dev_attr(xfer, dma_mode); ata_dev_attr(xfer, xfer_mode); -#define ata_dev_show_simple(field, format_string, cast) \ +#define ata_dev_show_simple(field, format_string, cast) \ static ssize_t \ show_ata_dev_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ @@ -482,9 +512,9 @@ show_ata_dev_##field(struct device *dev, \ return scnprintf(buf, 20, format_string, cast ata_dev->field); \ } -#define ata_dev_simple_attr(field, format_string, type) \ +#define ata_dev_simple_attr(field, format_string, type) \ ata_dev_show_simple(field, format_string, (type)) \ -static DEVICE_ATTR(field, S_IRUGO, \ + static DEVICE_ATTR(field, S_IRUGO, \ show_ata_dev_##field, NULL) ata_dev_simple_attr(spdn_cnt, "%d\n", int); @@ -502,7 +532,7 @@ static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg) seconds = div_u64_rem(ent->timestamp, HZ, &rem); arg->written += sprintf(arg->buf + arg->written, - "[%5llu.%09lu]", seconds, + "[%5llu.%09lu]", seconds, rem * NSEC_PER_SEC / HZ); arg->written += get_ata_err_names(ent->err_mask, arg->buf + arg->written); @@ -667,7 +697,7 @@ static int ata_tdev_add(struct ata_device *ata_dev) dev->release = ata_tdev_release; if (ata_is_host_link(link)) dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno); - else + else dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp); transport_setup_device(dev); @@ -689,7 +719,7 @@ static int ata_tdev_add(struct ata_device *ata_dev) */ #define SETUP_TEMPLATE(attrb, field, perm, test) \ - i->private_##attrb[count] = dev_attr_##field; \ + i->private_##attrb[count] = dev_attr_##field; \ i->private_##attrb[count].attr.mode = perm; \ i->attrb[count] = &i->private_##attrb[count]; \ if (test) \ diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 68cdd81d747c..51e01acdd241 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -148,7 +148,6 @@ extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, unsigned int id, u64 lun); void ata_scsi_sdev_config(struct scsi_device *sdev); int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev); -void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd); int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev); /* libata-eh.c */ @@ -166,7 +165,7 @@ extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, extern void ata_eh_done(struct ata_link *link, struct ata_device *dev, unsigned int action); extern void ata_eh_autopsy(struct ata_port *ap); -const char *ata_get_cmd_descript(u8 command); +const char *ata_get_cmd_name(u8 command); extern void ata_eh_report(struct ata_port *ap); extern int ata_eh_reset(struct ata_link *link, int classify, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, @@ -179,7 +178,7 @@ extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, extern void ata_eh_finish(struct ata_port *ap); extern int ata_ering_map(struct ata_ering *ering, int (*map_fn)(struct ata_ering_entry *, void *), - void *arg); + void *arg); extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key); extern unsigned int atapi_eh_request_sense(struct ata_device *dev, u8 *sense_buf, u8 dfl_sense_key); diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index b7ff63ed3bbb..1b90cda27246 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c @@ -37,7 +37,7 @@ #define DRV_NAME "pata_ali" #define DRV_VERSION "0.7.8" -static int ali_atapi_dma = 0; +static int ali_atapi_dma; module_param_named(atapi_dma, ali_atapi_dma, int, 0644); MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)"); @@ -123,7 +123,7 @@ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask) mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); if (strstr(model_num, "WDC")) - return mask &= ~ATA_MASK_UDMA; + mask &= ~ATA_MASK_UDMA; return mask; } diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 63f39440a9b4..24c3d5e1fca3 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -39,6 +39,7 @@ #include <linux/spinlock.h> #include <linux/types.h> #include <linux/workqueue.h> +#include <trace/events/libata.h> #define DRIVER_NAME "arasan_cf" #define TIMEOUT msecs_to_jiffies(3000) @@ -703,9 +704,11 @@ static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc) case ATA_PROT_DMA: WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); + trace_ata_tf_load(ap, &qc->tf); ap->ops->sff_tf_load(ap, &qc->tf); acdev->dma_status = 0; acdev->qc = qc; + trace_ata_bmdma_start(ap, &qc->tf, qc->tag); arasan_cf_dma_start(acdev); ap->hsm_task_state = HSM_ST_LAST; break; diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c index 2bc5fc81efe3..779d660415c8 100644 --- a/drivers/ata/pata_atp867x.c +++ b/drivers/ata/pata_atp867x.c @@ -155,7 +155,7 @@ static int atp867x_get_active_clocks_shifted(struct ata_port *ap, case 1 ... 6: break; default: - printk(KERN_WARNING "ATP867X: active %dclk is invalid. " + ata_port_warn(ap, "ATP867X: active %dclk is invalid. " "Using 12clk.\n", clk); fallthrough; case 9 ... 12: @@ -171,7 +171,8 @@ active_clock_shift_done: return clocks << ATP867X_IO_PIOSPD_ACTIVE_SHIFT; } -static int atp867x_get_recover_clocks_shifted(unsigned int clk) +static int atp867x_get_recover_clocks_shifted(struct ata_port *ap, + unsigned int clk) { unsigned char clocks = clk; @@ -188,7 +189,7 @@ static int atp867x_get_recover_clocks_shifted(unsigned int clk) case 15: break; default: - printk(KERN_WARNING "ATP867X: recover %dclk is invalid. " + ata_port_warn(ap, "ATP867X: recover %dclk is invalid. " "Using default 12clk.\n", clk); fallthrough; case 12: /* default 12 clk */ @@ -225,7 +226,7 @@ static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev) iowrite8(b, dp->dma_mode); b = atp867x_get_active_clocks_shifted(ap, t.active) | - atp867x_get_recover_clocks_shifted(t.recover); + atp867x_get_recover_clocks_shifted(ap, t.recover); if (adev->devno & 1) iowrite8(b, dp->slave_piospd); @@ -233,7 +234,7 @@ static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev) iowrite8(b, dp->mstr_piospd); b = atp867x_get_active_clocks_shifted(ap, t.act8b) | - atp867x_get_recover_clocks_shifted(t.rec8b); + atp867x_get_recover_clocks_shifted(ap, t.rec8b); iowrite8(b, dp->eightb_piospd); } @@ -270,7 +271,6 @@ static struct ata_port_operations atp867x_ops = { }; -#ifdef ATP867X_DEBUG static void atp867x_check_res(struct pci_dev *pdev) { int i; @@ -280,7 +280,7 @@ static void atp867x_check_res(struct pci_dev *pdev) for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { start = pci_resource_start(pdev, i); len = pci_resource_len(pdev, i); - printk(KERN_DEBUG "ATP867X: resource start:len=%lx:%lx\n", + dev_dbg(&pdev->dev, "ATP867X: resource start:len=%lx:%lx\n", start, len); } } @@ -290,49 +290,48 @@ static void atp867x_check_ports(struct ata_port *ap, int port) struct ata_ioports *ioaddr = &ap->ioaddr; struct atp867x_priv *dp = ap->private_data; - printk(KERN_DEBUG "ATP867X: port[%d] addresses\n" - " cmd_addr =0x%llx, 0x%llx\n" - " ctl_addr =0x%llx, 0x%llx\n" - " bmdma_addr =0x%llx, 0x%llx\n" - " data_addr =0x%llx\n" - " error_addr =0x%llx\n" - " feature_addr =0x%llx\n" - " nsect_addr =0x%llx\n" - " lbal_addr =0x%llx\n" - " lbam_addr =0x%llx\n" - " lbah_addr =0x%llx\n" - " device_addr =0x%llx\n" - " status_addr =0x%llx\n" - " command_addr =0x%llx\n" - " dp->dma_mode =0x%llx\n" - " dp->mstr_piospd =0x%llx\n" - " dp->slave_piospd =0x%llx\n" - " dp->eightb_piospd =0x%llx\n" + ata_port_dbg(ap, "ATP867X: port[%d] addresses\n" + " cmd_addr =0x%lx, 0x%lx\n" + " ctl_addr =0x%lx, 0x%lx\n" + " bmdma_addr =0x%lx, 0x%lx\n" + " data_addr =0x%lx\n" + " error_addr =0x%lx\n" + " feature_addr =0x%lx\n" + " nsect_addr =0x%lx\n" + " lbal_addr =0x%lx\n" + " lbam_addr =0x%lx\n" + " lbah_addr =0x%lx\n" + " device_addr =0x%lx\n" + " status_addr =0x%lx\n" + " command_addr =0x%lx\n" + " dp->dma_mode =0x%lx\n" + " dp->mstr_piospd =0x%lx\n" + " dp->slave_piospd =0x%lx\n" + " dp->eightb_piospd =0x%lx\n" " dp->pci66mhz =0x%lx\n", port, - (unsigned long long)ioaddr->cmd_addr, - (unsigned long long)ATP867X_IO_PORTBASE(ap, port), - (unsigned long long)ioaddr->ctl_addr, - (unsigned long long)ATP867X_IO_ALTSTATUS(ap, port), - (unsigned long long)ioaddr->bmdma_addr, - (unsigned long long)ATP867X_IO_DMABASE(ap, port), - (unsigned long long)ioaddr->data_addr, - (unsigned long long)ioaddr->error_addr, - (unsigned long long)ioaddr->feature_addr, - (unsigned long long)ioaddr->nsect_addr, - (unsigned long long)ioaddr->lbal_addr, - (unsigned long long)ioaddr->lbam_addr, - (unsigned long long)ioaddr->lbah_addr, - (unsigned long long)ioaddr->device_addr, - (unsigned long long)ioaddr->status_addr, - (unsigned long long)ioaddr->command_addr, - (unsigned long long)dp->dma_mode, - (unsigned long long)dp->mstr_piospd, - (unsigned long long)dp->slave_piospd, - (unsigned long long)dp->eightb_piospd, + (unsigned long)ioaddr->cmd_addr, + (unsigned long)ATP867X_IO_PORTBASE(ap, port), + (unsigned long)ioaddr->ctl_addr, + (unsigned long)ATP867X_IO_ALTSTATUS(ap, port), + (unsigned long)ioaddr->bmdma_addr, + (unsigned long)ATP867X_IO_DMABASE(ap, port), + (unsigned long)ioaddr->data_addr, + (unsigned long)ioaddr->error_addr, + (unsigned long)ioaddr->feature_addr, + (unsigned long)ioaddr->nsect_addr, + (unsigned long)ioaddr->lbal_addr, + (unsigned long)ioaddr->lbam_addr, + (unsigned long)ioaddr->lbah_addr, + (unsigned long)ioaddr->device_addr, + (unsigned long)ioaddr->status_addr, + (unsigned long)ioaddr->command_addr, + (unsigned long)dp->dma_mode, + (unsigned long)dp->mstr_piospd, + (unsigned long)dp->slave_piospd, + (unsigned long)dp->eightb_piospd, (unsigned long)dp->pci66mhz); } -#endif static int atp867x_set_priv(struct ata_port *ap) { @@ -370,8 +369,7 @@ static void atp867x_fixup(struct ata_host *host) if (v < 0x80) { v = 0x80; pci_write_config_byte(pdev, PCI_LATENCY_TIMER, v); - printk(KERN_DEBUG "ATP867X: set latency timer of device %s" - " to %d\n", pci_name(pdev), v); + dev_dbg(&pdev->dev, "ATP867X: set latency timer to %d\n", v); } /* @@ -419,13 +417,11 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host) return rc; host->iomap = pcim_iomap_table(pdev); -#ifdef ATP867X_DEBUG atp867x_check_res(pdev); for (i = 0; i < PCI_STD_NUM_BARS; i++) - printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i, - (unsigned long long)(host->iomap[i])); -#endif + dev_dbg(gdev, "ATP867X: iomap[%d]=0x%p\n", i, + host->iomap[i]); /* * request, iomap BARs and init port addresses accordingly @@ -444,9 +440,8 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host) if (rc) return rc; -#ifdef ATP867X_DEBUG atp867x_check_ports(ap, i); -#endif + ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", (unsigned long)ioaddr->cmd_addr, (unsigned long)ioaddr->ctl_addr); @@ -486,7 +481,7 @@ static int atp867x_init_one(struct pci_dev *pdev, if (rc) return rc; - printk(KERN_INFO "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)", + dev_info(&pdev->dev, "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)", pdev->device); host = ata_host_alloc_pinfo(&pdev->dev, ppi, ATP867X_NUM_PORTS); diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c index d0bcabb58b44..1a3372a72213 100644 --- a/drivers/ata/pata_cmd640.c +++ b/drivers/ata/pata_cmd640.c @@ -61,7 +61,7 @@ static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev) struct ata_device *pair = ata_dev_pair(adev); if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) { - printk(KERN_ERR DRV_NAME ": mode computation failed.\n"); + ata_dev_err(adev, DRV_NAME ": mode computation failed.\n"); return; } diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index 1d74d89b5bed..5baa4a7819c1 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c @@ -116,7 +116,7 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m /* ata_timing_compute is smart and will produce timings for MWDMA that don't violate the drives PIO capabilities. */ if (ata_timing_compute(adev, mode, &t, T, 0) < 0) { - printk(KERN_ERR DRV_NAME ": mode computation failed.\n"); + ata_dev_err(adev, DRV_NAME ": mode computation failed.\n"); return; } if (ap->port_no) { @@ -130,7 +130,7 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m } } - printk(KERN_DEBUG DRV_NAME ": active %d recovery %d setup %d.\n", + ata_dev_dbg(adev, DRV_NAME ": active %d recovery %d setup %d.\n", t.active, t.recover, t.setup); if (t.recover > 16) { t.active += t.recover - 16; diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index 247c14702624..24ce8665b1f9 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c @@ -153,12 +153,12 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Perform set up for DMA */ if (pci_enable_device_io(pdev)) { - printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n"); + dev_err(&pdev->dev, "unable to configure BAR2.\n"); return -ENODEV; } if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { - printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); + dev_err(&pdev->dev, "unable to configure DMA mask.\n"); return -ENODEV; } diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c index 760ac6e65216..ab47aeb5587f 100644 --- a/drivers/ata/pata_cs5536.c +++ b/drivers/ata/pata_cs5536.c @@ -263,12 +263,12 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) ppi[1] = &ata_dummy_port_info; if (use_msr) - printk(KERN_ERR DRV_NAME ": Using MSR regs instead of PCI\n"); + dev_err(&dev->dev, DRV_NAME ": Using MSR regs instead of PCI\n"); cs5536_read(dev, CFG, &cfg); if ((cfg & IDE_CFG_CHANEN) == 0) { - printk(KERN_ERR DRV_NAME ": disabled by BIOS\n"); + dev_err(&dev->dev, DRV_NAME ": disabled by BIOS\n"); return -ENODEV; } diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c index 5b3a7a8ebef6..3be5d52a777b 100644 --- a/drivers/ata/pata_cypress.c +++ b/drivers/ata/pata_cypress.c @@ -62,7 +62,7 @@ static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev) u32 addr; if (ata_timing_compute(adev, adev->pio_mode, &t, T, 1) < 0) { - printk(KERN_ERR DRV_NAME ": mome computation failed.\n"); + ata_dev_err(adev, DRV_NAME ": mome computation failed.\n"); return; } diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c index 46208ececbb6..b78f71c70f27 100644 --- a/drivers/ata/pata_ep93xx.c +++ b/drivers/ata/pata_ep93xx.c @@ -855,7 +855,6 @@ static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc) && count < 65536; count += 2) ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DATA); - /* Can become DEBUG later */ if (count) ata_port_dbg(ap, "drained %d bytes to clear DRQ.\n", count); diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index 06b7c4a9ec95..778c893f276b 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -14,9 +14,6 @@ * TODO * Look into engine reset on timeout errors. Should not be required. */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> @@ -183,7 +180,7 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, i = match_string(list, -1, model_num); if (i >= 0) { - pr_warn("%s is not supported for %s\n", modestr, list[i]); + ata_dev_warn(dev, "%s is not supported for %s\n", modestr, list[i]); return 1; } return 0; diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index f242157bc81b..7abc7e04f656 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -14,9 +14,6 @@ * TODO * Look into engine reset on timeout errors. Should not be required. */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> @@ -231,7 +228,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, i = match_string(list, -1, model_num); if (i >= 0) { - pr_warn("%s is not supported for %s\n", modestr, list[i]); + ata_dev_warn(dev, "%s is not supported for %s\n", + modestr, list[i]); return 1; } return 0; @@ -864,7 +862,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) chip_table = &hpt372; break; default: - pr_err("Unknown HPT366 subtype, please report (%d)\n", + dev_err(&dev->dev, + "Unknown HPT366 subtype, please report (%d)\n", rev); return -ENODEV; } @@ -905,7 +904,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) *ppi = &info_hpt374_fn1; break; default: - pr_err("PCI table is bogus, please report (%d)\n", dev->device); + dev_err(&dev->dev, "PCI table is bogus, please report (%d)\n", + dev->device); return -ENODEV; } /* Ok so this is a chip we support */ @@ -953,7 +953,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) u8 sr; u32 total = 0; - pr_warn("BIOS has not set timing clocks\n"); + dev_warn(&dev->dev, "BIOS has not set timing clocks\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { @@ -1009,7 +1009,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) (f_high << 16) | f_low | 0x100); } if (adjust == 8) { - pr_err("DPLL did not stabilize!\n"); + dev_err(&dev->dev, "DPLL did not stabilize!\n"); return -ENODEV; } if (dpll == 3) @@ -1017,7 +1017,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) else private_data = (void *)hpt37x_timings_50; - pr_info("bus clock %dMHz, using %dMHz DPLL\n", + dev_info(&dev->dev, "bus clock %dMHz, using %dMHz DPLL\n", MHz[clock_slot], MHz[dpll]); } else { private_data = (void *)chip_table->clocks[clock_slot]; @@ -1032,7 +1032,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) if (clock_slot < 2 && ppi[0] == &info_hpt370a) ppi[0] = &info_hpt370a_33; - pr_info("%s using %dMHz bus clock\n", + dev_info(&dev->dev, "%s using %dMHz bus clock\n", chip_table->name, MHz[clock_slot]); } diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index 48eef338e050..1d9d4eec5b8a 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c @@ -15,9 +15,6 @@ * TODO * Work out best PLL policy */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> @@ -420,7 +417,7 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev) u16 sr; u32 total = 0; - pr_warn("BIOS clock data not set\n"); + dev_warn(&pdev->dev, "BIOS clock data not set\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { @@ -530,7 +527,8 @@ hpt372n: ppi[0] = &info_hpt372n; break; default: - pr_err("PCI table is bogus, please report (%d)\n", dev->device); + dev_err(&dev->dev,"PCI table is bogus, please report (%d)\n", + dev->device); return -ENODEV; } @@ -579,11 +577,11 @@ hpt372n: pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low); } if (adjust == 8) { - pr_err("DPLL did not stabilize!\n"); + dev_err(&dev->dev, "DPLL did not stabilize!\n"); return -ENODEV; } - pr_info("bus clock %dMHz, using 66MHz DPLL\n", pci_mhz); + dev_info(&dev->dev, "bus clock %dMHz, using 66MHz DPLL\n", pci_mhz); /* * Set our private data up. We only need a few flags diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index 0e2265978a34..8a5b4e0079ab 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c @@ -431,7 +431,8 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc) case ATA_CMD_SET_FEATURES: return ata_bmdma_qc_issue(qc); } - printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command); + ata_dev_dbg(qc->dev, "it821x: can't process command 0x%02X\n", + qc->tf.command); return AC_ERR_DEV; } @@ -507,12 +508,14 @@ static void it821x_dev_config(struct ata_device *adev) if (strstr(model_num, "Integrated Technology Express")) { /* RAID mode */ - ata_dev_info(adev, "%sRAID%d volume", - adev->id[147] ? "Bootable " : "", - adev->id[129]); - if (adev->id[129] != 1) - pr_cont("(%dK stripe)", adev->id[146]); - pr_cont("\n"); + if (adev->id[129] == 1) + ata_dev_info(adev, "%sRAID%d volume\n", + adev->id[147] ? "Bootable " : "", + adev->id[129]); + else + ata_dev_info(adev, "%sRAID%d volume (%dK stripe)\n", + adev->id[147] ? "Bootable " : "", + adev->id[129], adev->id[146]); } /* This is a controller firmware triggered funny, don't report the drive faulty! */ @@ -534,7 +537,7 @@ static void it821x_dev_config(struct ata_device *adev) */ static unsigned int it821x_read_id(struct ata_device *adev, - struct ata_taskfile *tf, u16 *id) + struct ata_taskfile *tf, __le16 *id) { unsigned int err_mask; unsigned char model_num[ATA_ID_PROD_LEN + 1]; @@ -542,21 +545,20 @@ static unsigned int it821x_read_id(struct ata_device *adev, err_mask = ata_do_dev_read_id(adev, tf, id); if (err_mask) return err_mask; - ata_id_c_string(id, model_num, ATA_ID_PROD, sizeof(model_num)); + ata_id_c_string((u16 *)id, model_num, ATA_ID_PROD, sizeof(model_num)); - id[83] &= ~(1 << 12); /* Cache flush is firmware handled */ - id[83] &= ~(1 << 13); /* Ditto for LBA48 flushes */ - id[84] &= ~(1 << 6); /* No FUA */ - id[85] &= ~(1 << 10); /* No HPA */ - id[76] = 0; /* No NCQ/AN etc */ + id[83] &= cpu_to_le16(~(1 << 12)); /* Cache flush is firmware handled */ + id[84] &= cpu_to_le16(~(1 << 6)); /* No FUA */ + id[85] &= cpu_to_le16(~(1 << 10)); /* No HPA */ + id[76] = 0; /* No NCQ/AN etc */ if (strstr(model_num, "Integrated Technology Express")) { /* Set feature bits the firmware neglects */ - id[49] |= 0x0300; /* LBA, DMA */ - id[83] &= 0x7FFF; - id[83] |= 0x4400; /* Word 83 is valid and LBA48 */ - id[86] |= 0x0400; /* LBA48 on */ - id[ATA_ID_MAJOR_VER] |= 0x1F; + id[49] |= cpu_to_le16(0x0300); /* LBA, DMA */ + id[83] &= cpu_to_le16(0x7FFF); + id[83] |= cpu_to_le16(0x4400); /* Word 83 is valid and LBA48 */ + id[86] |= cpu_to_le16(0x0400); /* LBA48 on */ + id[ATA_ID_MAJOR_VER] |= cpu_to_le16(0x1F); /* Clear the serial number because it's different each boot which breaks validation on resume */ memset(&id[ATA_ID_SERNO], 0x20, ATA_ID_SERNO_LEN); @@ -593,6 +595,7 @@ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc) /** * it821x_display_disk - display disk setup + * @ap: ATA port * @n: Device number * @buf: Buffer block from firmware * @@ -600,7 +603,7 @@ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc) * by the firmware. */ -static void it821x_display_disk(int n, u8 *buf) +static void it821x_display_disk(struct ata_port *ap, int n, u8 *buf) { unsigned char id[41]; int mode = 0; @@ -633,13 +636,13 @@ static void it821x_display_disk(int n, u8 *buf) else strcpy(mbuf, "PIO"); if (buf[52] == 4) - printk(KERN_INFO "%d: %-6s %-8s %s %s\n", + ata_port_info(ap, "%d: %-6s %-8s %s %s\n", n, mbuf, types[buf[52]], id, cbl); else - printk(KERN_INFO "%d: %-6s %-8s Volume: %1d %s %s\n", + ata_port_info(ap, "%d: %-6s %-8s Volume: %1d %s %s\n", n, mbuf, types[buf[52]], buf[53], id, cbl); if (buf[125] < 100) - printk(KERN_INFO "%d: Rebuilding: %d%%\n", n, buf[125]); + ata_port_info(ap, "%d: Rebuilding: %d%%\n", n, buf[125]); } /** @@ -676,7 +679,7 @@ static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len) status = ioread8(ap->ioaddr.status_addr); if (status & ATA_ERR) { kfree(buf); - printk(KERN_ERR "it821x_firmware_command: rejected\n"); + ata_port_err(ap, "%s: rejected\n", __func__); return NULL; } if (status & ATA_DRQ) { @@ -686,7 +689,7 @@ static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len) usleep_range(500, 1000); } kfree(buf); - printk(KERN_ERR "it821x_firmware_command: timeout\n"); + ata_port_err(ap, "%s: timeout\n", __func__); return NULL; } @@ -709,13 +712,13 @@ static void it821x_probe_firmware(struct ata_port *ap) buf = it821x_firmware_command(ap, 0xFA, 512); if (buf != NULL) { - printk(KERN_INFO "pata_it821x: Firmware %02X/%02X/%02X%02X\n", + ata_port_info(ap, "pata_it821x: Firmware %02X/%02X/%02X%02X\n", buf[505], buf[506], buf[507], buf[508]); for (i = 0; i < 4; i++) - it821x_display_disk(i, buf + 128 * i); + it821x_display_disk(ap, i, buf + 128 * i); kfree(buf); } } @@ -771,7 +774,8 @@ static int it821x_port_start(struct ata_port *ap) itdev->timing10 = 1; /* Need to disable ATAPI DMA for this case */ if (!itdev->smart) - printk(KERN_WARNING DRV_NAME": Revision 0x10, workarounds activated.\n"); + dev_warn(&pdev->dev, + "Revision 0x10, workarounds activated.\n"); } return 0; @@ -919,14 +923,14 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) } else { /* Force the card into bypass mode if so requested */ if (it8212_noraid) { - printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n"); + dev_info(&pdev->dev, "forcing bypass mode.\n"); it821x_disable_raid(pdev); } pci_read_config_byte(pdev, 0x50, &conf); conf &= 1; - printk(KERN_INFO DRV_NAME": controller in %s mode.\n", - mode[conf]); + dev_info(&pdev->dev, "controller in %s mode.\n", mode[conf]); + if (conf == 0) ppi[0] = &info_passthru; else diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index 99c63087c8ae..17b557c91e1c 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c @@ -114,7 +114,7 @@ static void ixp4xx_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct ixp4xx_pata *ixpp = ap->host->private_data; - ata_dev_printk(adev, KERN_INFO, "configured for PIO%d 8bit\n", + ata_dev_info(adev, "configured for PIO%d 8bit\n", adev->pio_mode - XFER_PIO_0); ixp4xx_set_8bit_timing(ixpp, adev->pio_mode); } @@ -132,8 +132,8 @@ static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc, struct ixp4xx_pata *ixpp = ap->host->private_data; unsigned long flags; - ata_dev_printk(adev, KERN_DEBUG, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE", - buflen); + ata_dev_dbg(adev, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE", + buflen); spin_lock_irqsave(ap->lock, flags); /* set the expansion bus in 16bit mode and restore diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index 361597d14c56..0c5a51970fbf 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c @@ -32,7 +32,6 @@ static int marvell_pata_active(struct pci_dev *pdev) { - int i; u32 devices; void __iomem *barp; @@ -44,11 +43,6 @@ static int marvell_pata_active(struct pci_dev *pdev) if (barp == NULL) return -ENOMEM; - printk("BAR5:"); - for(i = 0; i <= 0x0F; i++) - printk("%02X:%02X ", i, ioread8(barp + i)); - printk("\n"); - devices = ioread32(barp + 0x0C); pci_iounmap(pdev, barp); @@ -149,7 +143,8 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i #if IS_ENABLED(CONFIG_SATA_AHCI) if (!marvell_pata_active(pdev)) { - printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n"); + dev_info(&pdev->dev, + "PATA port not active, deferring to AHCI driver.\n"); return -ENODEV; } #endif diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c index a7ecc1a204b5..06929e77c491 100644 --- a/drivers/ata/pata_netcell.c +++ b/drivers/ata/pata_netcell.c @@ -21,12 +21,13 @@ /* No PIO or DMA methods needed for this device */ static unsigned int netcell_read_id(struct ata_device *adev, - struct ata_taskfile *tf, u16 *id) + struct ata_taskfile *tf, __le16 *id) { unsigned int err_mask = ata_do_dev_read_id(adev, tf, id); + /* Firmware forgets to mark words 85-87 valid */ if (err_mask == 0) - id[ATA_ID_CSF_DEFAULT] |= 0x4000; + id[ATA_ID_CSF_DEFAULT] |= cpu_to_le16(0x4000); return err_mask; } diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index b5a3f710d76d..0912846bc1b0 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -19,7 +19,7 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> #include <scsi/scsi_host.h> - +#include <trace/events/libata.h> #include <asm/byteorder.h> #include <asm/octeon/octeon.h> @@ -73,16 +73,12 @@ MODULE_PARM_DESC(enable_dma, */ static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs) { - unsigned int val; - /* * Compute # of eclock periods to get desired duration in * nanoseconds. */ - val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000), + return DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000), 1000 * tim_mult); - - return val; } static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier) @@ -273,9 +269,9 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev) dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n); dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a); - pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60, + ata_dev_dbg(dev, "ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60, ns_to_tim_reg(tim_mult, 60)); - pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n", + ata_dev_dbg(dev, "oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n", dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s, dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause); @@ -440,7 +436,6 @@ static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes, int rc; u8 err; - DPRINTK("about to softreset\n"); __raw_writew(ap->ctl, base + 0xe); udelay(20); __raw_writew(ap->ctl | ATA_SRST, base + 0xe); @@ -455,7 +450,6 @@ static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes, /* determine by signature whether we have ATA or ATAPI devices */ classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err); - DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); return 0; } @@ -479,23 +473,11 @@ static void octeon_cf_tf_load16(struct ata_port *ap, __raw_writew(tf->hob_feature << 8, base + 0xc); __raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2); __raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4); - VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", - tf->hob_feature, - tf->hob_nsect, - tf->hob_lbal, - tf->hob_lbam, - tf->hob_lbah); } if (is_addr) { __raw_writew(tf->feature << 8, base + 0xc); __raw_writew(tf->nsect | tf->lbal << 8, base + 2); __raw_writew(tf->lbam | tf->lbah << 8, base + 4); - VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", - tf->feature, - tf->nsect, - tf->lbal, - tf->lbam, - tf->lbah); } ata_wait_idle(ap); } @@ -516,20 +498,14 @@ static void octeon_cf_exec_command16(struct ata_port *ap, { /* The base of the registers is at ioaddr.data_addr. */ void __iomem *base = ap->ioaddr.data_addr; - u16 blob; + u16 blob = 0; - if (tf->flags & ATA_TFLAG_DEVICE) { - VPRINTK("device 0x%X\n", tf->device); + if (tf->flags & ATA_TFLAG_DEVICE) blob = tf->device; - } else { - blob = 0; - } - DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); blob |= (tf->command << 8); __raw_writew(blob, base + 6); - ata_wait_idle(ap); } @@ -543,12 +519,10 @@ static void octeon_cf_dma_setup(struct ata_queued_cmd *qc) struct octeon_cf_port *cf_port; cf_port = ap->private_data; - DPRINTK("ENTER\n"); /* issue r/w command */ qc->cursg = qc->sg; cf_port->dma_finished = 0; ap->ops->sff_exec_command(ap, &qc->tf); - DPRINTK("EXIT\n"); } /** @@ -563,8 +537,6 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc) union cvmx_mio_boot_dma_intx mio_boot_dma_int; struct scatterlist *sg; - VPRINTK("%d scatterlists\n", qc->n_elem); - /* Get the scatter list entry we need to DMA into */ sg = qc->cursg; BUG_ON(!sg); @@ -605,10 +577,6 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc) mio_boot_dma_cfg.s.adr = sg_dma_address(sg); - VPRINTK("%s %d bytes address=%p\n", - (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length, - (void *)(unsigned long)mio_boot_dma_cfg.s.adr); - cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64); } @@ -627,9 +595,7 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap, union cvmx_mio_boot_dma_intx dma_int; u8 status; - VPRINTK("ata%u: protocol %d task_state %d\n", - ap->print_id, qc->tf.protocol, ap->hsm_task_state); - + trace_ata_bmdma_stop(qc, &qc->tf, qc->tag); if (ap->hsm_task_state != HSM_ST_LAST) return 0; @@ -678,7 +644,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) spin_lock_irqsave(&host->lock, flags); - DPRINTK("ENTER\n"); for (i = 0; i < host->n_ports; i++) { u8 status; struct ata_port *ap; @@ -701,6 +666,7 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) if (!sg_is_last(qc->cursg)) { qc->cursg = sg_next(qc->cursg); handled = 1; + trace_ata_bmdma_start(ap, &qc->tf, qc->tag); octeon_cf_dma_start(qc); continue; } else { @@ -732,7 +698,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) } } spin_unlock_irqrestore(&host->lock, flags); - DPRINTK("EXIT\n"); return IRQ_RETVAL(handled); } @@ -800,8 +765,11 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc) case ATA_PROT_DMA: WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); + trace_ata_tf_load(ap, &qc->tf); ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ + trace_ata_bmdma_setup(ap, &qc->tf, qc->tag); octeon_cf_dma_setup(qc); /* set up dma */ + trace_ata_bmdma_start(ap, &qc->tf, qc->tag); octeon_cf_dma_start(qc); /* initiate dma */ ap->hsm_task_state = HSM_ST_LAST; break; diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c index 35aa158fc976..c3a40b717dcd 100644 --- a/drivers/ata/pata_of_platform.c +++ b/drivers/ata/pata_of_platform.c @@ -25,11 +25,12 @@ static int pata_of_platform_probe(struct platform_device *ofdev) struct device_node *dn = ofdev->dev.of_node; struct resource io_res; struct resource ctl_res; - struct resource *irq_res; + struct resource irq_res; unsigned int reg_shift = 0; int pio_mode = 0; int pio_mask; bool use16bit; + int irq; ret = of_address_to_resource(dn, 0, &io_res); if (ret) { @@ -45,7 +46,15 @@ static int pata_of_platform_probe(struct platform_device *ofdev) return -EINVAL; } - irq_res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); + memset(&irq_res, 0, sizeof(irq_res)); + + irq = platform_get_irq_optional(ofdev, 0); + if (irq < 0 && irq != -ENXIO) + return irq; + if (irq > 0) { + irq_res.start = irq; + irq_res.end = irq; + } of_property_read_u32(dn, "reg-shift", ®_shift); @@ -63,7 +72,7 @@ static int pata_of_platform_probe(struct platform_device *ofdev) pio_mask = 1 << pio_mode; pio_mask |= (1 << pio_mode) - 1; - return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res, + return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq > 0 ? &irq_res : NULL, reg_shift, pio_mask, &pata_platform_sht, use16bit); } diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index effc1a09444d..4fbb3eed8b0b 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c @@ -30,13 +30,6 @@ #define DRV_NAME "pata_pdc2027x" #define DRV_VERSION "1.0" -#undef PDC_DEBUG - -#ifdef PDC_DEBUG -#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) -#else -#define PDPRINTK(fmt, args...) -#endif enum { PDC_MMIO_BAR = 5, @@ -214,11 +207,11 @@ static int pdc2027x_cable_detect(struct ata_port *ap) if (cgcr & (1 << 26)) goto cbl40; - PDPRINTK("No cable or 80-conductor cable on port %d\n", ap->port_no); + ata_port_dbg(ap, "No cable or 80-conductor cable\n"); return ATA_CBL_PATA80; cbl40: - printk(KERN_INFO DRV_NAME ": 40-conductor cable detected on port %d\n", ap->port_no); + ata_port_info(ap, DRV_NAME ":40-conductor cable detected\n"); return ATA_CBL_PATA40; } @@ -292,17 +285,17 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev) unsigned int pio = adev->pio_mode - XFER_PIO_0; u32 ctcr0, ctcr1; - PDPRINTK("adev->pio_mode[%X]\n", adev->pio_mode); + ata_port_dbg(ap, "adev->pio_mode[%X]\n", adev->pio_mode); /* Sanity check */ if (pio > 4) { - printk(KERN_ERR DRV_NAME ": Unknown pio mode [%d] ignored\n", pio); + ata_port_err(ap, "Unknown pio mode [%d] ignored\n", pio); return; } /* Set the PIO timing registers using value table for 133MHz */ - PDPRINTK("Set pio regs... \n"); + ata_port_dbg(ap, "Set pio regs... \n"); ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0)); ctcr0 &= 0xffff0000; @@ -315,9 +308,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev) ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24); iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); - PDPRINTK("Set pio regs done\n"); - - PDPRINTK("Set to pio mode[%u] \n", pio); + ata_port_dbg(ap, "Set to pio mode[%u] \n", pio); } /** @@ -350,7 +341,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev) iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1)); } - PDPRINTK("Set udma regs... \n"); + ata_port_dbg(ap, "Set udma regs... \n"); ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1)); ctcr1 &= 0xff000000; @@ -359,16 +350,14 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev) (pdc2027x_udma_timing_tbl[udma_mode].value2 << 16); iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); - PDPRINTK("Set udma regs done\n"); - - PDPRINTK("Set to udma mode[%u] \n", udma_mode); + ata_port_dbg(ap, "Set to udma mode[%u] \n", udma_mode); } else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_2)) { /* Set the MDMA timing registers with value table for 133MHz */ unsigned int mdma_mode = dma_mode & 0x07; - PDPRINTK("Set mdma regs... \n"); + ata_port_dbg(ap, "Set mdma regs... \n"); ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0)); ctcr0 &= 0x0000ffff; @@ -376,11 +365,10 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev) (pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24); iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); - PDPRINTK("Set mdma regs done\n"); - PDPRINTK("Set to mdma mode[%u] \n", mdma_mode); + ata_port_dbg(ap, "Set to mdma mode[%u] \n", mdma_mode); } else { - printk(KERN_ERR DRV_NAME ": Unknown dma mode [%u] ignored\n", dma_mode); + ata_port_err(ap, "Unknown dma mode [%u] ignored\n", dma_mode); } } @@ -414,7 +402,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed ctcr1 |= (1 << 25); iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1)); - PDPRINTK("Turn on prefetch\n"); + ata_dev_dbg(dev, "Turn on prefetch\n"); } else { pdc2027x_set_dmamode(ap, dev); } @@ -485,8 +473,8 @@ retry: counter = (bccrh << 15) | bccrl; - PDPRINTK("bccrh [%X] bccrl [%X]\n", bccrh, bccrl); - PDPRINTK("bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv); + dev_dbg(host->dev, "bccrh [%X] bccrl [%X]\n", bccrh, bccrl); + dev_dbg(host->dev, "bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv); /* * The 30-bit decreasing counter are read by 2 pieces. @@ -495,7 +483,7 @@ retry: */ if (retry && !(bccrh == bccrhv && bccrl >= bccrlv)) { retry--; - PDPRINTK("rereading counter\n"); + dev_dbg(host->dev, "rereading counter\n"); goto retry; } @@ -520,20 +508,19 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b /* Sanity check */ if (unlikely(pll_clock_khz < 5000L || pll_clock_khz > 70000L)) { - printk(KERN_ERR DRV_NAME ": Invalid PLL input clock %ldkHz, give up!\n", pll_clock_khz); + dev_err(host->dev, "Invalid PLL input clock %ldkHz, give up!\n", + pll_clock_khz); return; } -#ifdef PDC_DEBUG - PDPRINTK("pout_required is %ld\n", pout_required); + dev_dbg(host->dev, "pout_required is %ld\n", pout_required); /* Show the current clock value of PLL control register * (maybe already configured by the firmware) */ pll_ctl = ioread16(mmio_base + PDC_PLL_CTL); - PDPRINTK("pll_ctl[%X]\n", pll_ctl); -#endif + dev_dbg(host->dev, "pll_ctl[%X]\n", pll_ctl); /* * Calculate the ratio of F, R and OD @@ -552,7 +539,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b R = 0x00; } else { /* Invalid ratio */ - printk(KERN_ERR DRV_NAME ": Invalid ratio %ld, give up!\n", ratio); + dev_err(host->dev, "Invalid ratio %ld, give up!\n", ratio); return; } @@ -560,15 +547,15 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b if (unlikely(F < 0 || F > 127)) { /* Invalid F */ - printk(KERN_ERR DRV_NAME ": F[%d] invalid!\n", F); + dev_err(host->dev, "F[%d] invalid!\n", F); return; } - PDPRINTK("F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio); + dev_dbg(host->dev, "F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio); pll_ctl = (R << 8) | F; - PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl); + dev_dbg(host->dev, "Writing pll_ctl[%X]\n", pll_ctl); iowrite16(pll_ctl, mmio_base + PDC_PLL_CTL); ioread16(mmio_base + PDC_PLL_CTL); /* flush */ @@ -576,15 +563,13 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b /* Wait the PLL circuit to be stable */ msleep(30); -#ifdef PDC_DEBUG /* * Show the current clock value of PLL control register * (maybe configured by the firmware) */ pll_ctl = ioread16(mmio_base + PDC_PLL_CTL); - PDPRINTK("pll_ctl[%X]\n", pll_ctl); -#endif + dev_dbg(host->dev, "pll_ctl[%X]\n", pll_ctl); return; } @@ -605,7 +590,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host) /* Start the test mode */ scr = ioread32(mmio_base + PDC_SYS_CTL); - PDPRINTK("scr[%X]\n", scr); + dev_dbg(host->dev, "scr[%X]\n", scr); iowrite32(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL); ioread32(mmio_base + PDC_SYS_CTL); /* flush */ @@ -622,7 +607,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host) /* Stop the test mode */ scr = ioread32(mmio_base + PDC_SYS_CTL); - PDPRINTK("scr[%X]\n", scr); + dev_dbg(host->dev, "scr[%X]\n", scr); iowrite32(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL); ioread32(mmio_base + PDC_SYS_CTL); /* flush */ @@ -632,8 +617,8 @@ static long pdc_detect_pll_input_clock(struct ata_host *host) pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 * (100000000 / usec_elapsed); - PDPRINTK("start[%ld] end[%ld] \n", start_count, end_count); - PDPRINTK("PLL input clock[%ld]Hz\n", pll_clock); + dev_dbg(host->dev, "start[%ld] end[%ld] PLL input clock[%ld]HZ\n", + start_count, end_count, pll_clock); return pll_clock; } diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index 0c5cbcd28d0d..b99849095853 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c @@ -38,8 +38,6 @@ static int pdc2026x_cable_detect(struct ata_port *ap) static void pdc202xx_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) { - DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); - iowrite8(tf->command, ap->ioaddr.command_addr); ndelay(400); } diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c index 3722a67083fd..fb00c3e5fd19 100644 --- a/drivers/ata/pata_rz1000.c +++ b/drivers/ata/pata_rz1000.c @@ -69,7 +69,7 @@ static int rz1000_fifo_disable(struct pci_dev *pdev) reg &= 0xDFFF; if (pci_write_config_word(pdev, 0x40, reg) != 0) return -1; - printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n"); + dev_info(&pdev->dev, "disabled chipset readahead.\n"); return 0; } @@ -97,7 +97,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en if (rz1000_fifo_disable(pdev) == 0) return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL, 0); - printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n"); + dev_err(&pdev->dev, "failed to disable read-ahead on chipset.\n"); /* Not safe to use so skip */ return -ENODEV; } diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index b602e303fb54..e410fe44177f 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c @@ -286,13 +286,13 @@ static int serverworks_fixup_osb4(struct pci_dev *pdev) pci_read_config_dword(isa_dev, 0x64, ®); reg &= ~0x00002000; /* disable 600ns interrupt mask */ if (!(reg & 0x00004000)) - printk(KERN_DEBUG DRV_NAME ": UDMA not BIOS enabled.\n"); + dev_info(&pdev->dev, "UDMA not BIOS enabled.\n"); reg |= 0x00004000; /* enable UDMA/33 support */ pci_write_config_dword(isa_dev, 0x64, reg); pci_dev_put(isa_dev); return 0; } - printk(KERN_WARNING DRV_NAME ": Unable to find bridge.\n"); + dev_warn(&pdev->dev, "Unable to find bridge.\n"); return -ENODEV; } diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index 43215a664b96..0da58ce20d82 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c @@ -212,7 +212,6 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev) static void sil680_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) { - DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); iowrite8(tf->command, ap->ioaddr.command_addr); ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); } @@ -309,17 +308,17 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio) switch (tmpbyte & 0x30) { case 0x00: - printk(KERN_INFO "sil680: 100MHz clock.\n"); + dev_info(&pdev->dev, "sil680: 100MHz clock.\n"); break; case 0x10: - printk(KERN_INFO "sil680: 133MHz clock.\n"); + dev_info(&pdev->dev, "sil680: 133MHz clock.\n"); break; case 0x20: - printk(KERN_INFO "sil680: Using PCI clock.\n"); + dev_info(&pdev->dev, "sil680: Using PCI clock.\n"); break; /* This last case is _NOT_ ok */ case 0x30: - printk(KERN_ERR "sil680: Clock disabled ?\n"); + dev_err(&pdev->dev, "sil680: Clock disabled ?\n"); } return tmpbyte & 0x30; } diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 475032048984..439ca882f73c 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -414,12 +414,6 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) iowrite8(tf->hob_lbal, ioaddr->lbal_addr); iowrite8(tf->hob_lbam, ioaddr->lbam_addr); iowrite8(tf->hob_lbah, ioaddr->lbah_addr); - VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", - tf->hob_feature, - tf->hob_nsect, - tf->hob_lbal, - tf->hob_lbam, - tf->hob_lbah); } if (is_addr) { @@ -428,12 +422,6 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) iowrite8(tf->lbal, ioaddr->lbal_addr); iowrite8(tf->lbam, ioaddr->lbam_addr); iowrite8(tf->lbah, ioaddr->lbah_addr); - VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", - tf->feature, - tf->nsect, - tf->lbal, - tf->lbam, - tf->lbah); } ata_wait_idle(ap); diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index 5db55e1e2a61..35b823ac20c9 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c @@ -284,9 +284,6 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) *(__le32 *)(buf + i) = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4); i += 4; - - VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4, - (unsigned long)addr, len); } if (likely(last_buf)) @@ -302,8 +299,6 @@ static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc) u32 pkt_dma = (u32)pp->pkt_dma; int i = 0; - VPRINTK("ENTER\n"); - adma_enter_reg_mode(qc->ap); if (qc->tf.protocol != ATA_PROT_DMA) return AC_ERR_OK; @@ -355,22 +350,6 @@ static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc) i = adma_fill_sg(qc); wmb(); /* flush PRDs and pkt to memory */ -#if 0 - /* dump out CPB + PRDs for debug */ - { - int j, len = 0; - static char obuf[2048]; - for (j = 0; j < i; ++j) { - len += sprintf(obuf+len, "%02x ", buf[j]); - if ((j & 7) == 7) { - printk("%s\n", obuf); - len = 0; - } - } - if (len) - printk("%s\n", obuf); - } -#endif return AC_ERR_OK; } @@ -379,8 +358,6 @@ static inline void adma_packet_start(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; void __iomem *chan = ADMA_PORT_REGS(ap); - VPRINTK("ENTER, ap %p\n", ap); - /* fire up the ADMA engine */ writew(aPIOMD4 | aGO, chan + ADMA_CONTROL); } @@ -475,8 +452,6 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host) u8 status = ata_sff_check_status(ap); if ((status & ATA_BUSY)) continue; - DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", - ap->print_id, qc->tf.protocol, status); /* complete taskfile transaction */ pp->state = adma_state_idle; @@ -504,14 +479,10 @@ static irqreturn_t adma_intr(int irq, void *dev_instance) struct ata_host *host = dev_instance; unsigned int handled = 0; - VPRINTK("ENTER\n"); - spin_lock(&host->lock); handled = adma_intr_pkt(host) | adma_intr_mmio(host); spin_unlock(&host->lock); - VPRINTK("EXIT\n"); - return IRQ_RETVAL(handled); } @@ -547,8 +518,8 @@ static int adma_port_start(struct ata_port *ap) return -ENOMEM; /* paranoia? */ if ((pp->pkt_dma & 7) != 0) { - printk(KERN_ERR "bad alignment for pp->pkt_dma: %08x\n", - (u32)pp->pkt_dma); + ata_port_err(ap, "bad alignment for pp->pkt_dma: %08x\n", + (u32)pp->pkt_dma); return -ENOMEM; } ap->private_data = pp; diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 338c2e50f759..bec33d781ae0 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -14,15 +14,6 @@ * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED */ -#ifdef CONFIG_SATA_DWC_DEBUG -#define DEBUG -#endif - -#ifdef CONFIG_SATA_DWC_VDEBUG -#define VERBOSE_DEBUG -#define DEBUG_NCQ -#endif - #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> @@ -34,6 +25,7 @@ #include <linux/phy/phy.h> #include <linux/libata.h> #include <linux/slab.h> +#include <trace/events/libata.h> #include "libata.h" @@ -182,10 +174,8 @@ enum { * Prototypes */ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); -static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, - u32 check_status); -static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); -static void sata_dwc_port_stop(struct ata_port *ap); +static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc); +static void sata_dwc_dma_xfer_complete(struct ata_port *ap); static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); #ifdef CONFIG_SATA_DWC_OLD_DMA @@ -215,9 +205,10 @@ static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp) { struct sata_dwc_device *hsdev = hsdevp->hsdev; struct dw_dma_slave *dws = &sata_dwc_dma_dws; + struct device *dev = hsdev->dev; dma_cap_mask_t mask; - dws->dma_dev = hsdev->dev; + dws->dma_dev = dev; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); @@ -225,8 +216,7 @@ static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp) /* Acquire DMA channel */ hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp); if (!hsdevp->chan) { - dev_err(hsdev->dev, "%s: dma channel unavailable\n", - __func__); + dev_err(dev, "%s: dma channel unavailable\n", __func__); return -EAGAIN; } @@ -236,26 +226,25 @@ static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp) static int sata_dwc_dma_init_old(struct platform_device *pdev, struct sata_dwc_device *hsdev) { - struct device_node *np = pdev->dev.of_node; - struct resource *res; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; - hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL); + hsdev->dma = devm_kzalloc(dev, sizeof(*hsdev->dma), GFP_KERNEL); if (!hsdev->dma) return -ENOMEM; - hsdev->dma->dev = &pdev->dev; + hsdev->dma->dev = dev; hsdev->dma->id = pdev->id; /* Get SATA DMA interrupt number */ hsdev->dma->irq = irq_of_parse_and_map(np, 1); if (hsdev->dma->irq == NO_IRQ) { - dev_err(&pdev->dev, "no SATA DMA irq\n"); + dev_err(dev, "no SATA DMA irq\n"); return -ENODEV; } /* Get physical SATA DMA register base address */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res); + hsdev->dma->regs = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(hsdev->dma->regs)) return PTR_ERR(hsdev->dma->regs); @@ -297,35 +286,6 @@ static const char *get_prot_descript(u8 protocol) } } -static const char *get_dma_dir_descript(int dma_dir) -{ - switch ((enum dma_data_direction)dma_dir) { - case DMA_BIDIRECTIONAL: - return "bidirectional"; - case DMA_TO_DEVICE: - return "to device"; - case DMA_FROM_DEVICE: - return "from device"; - default: - return "none"; - } -} - -static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf) -{ - dev_vdbg(ap->dev, - "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n", - tf->command, get_prot_descript(tf->protocol), tf->flags, - tf->device); - dev_vdbg(ap->dev, - "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n", - tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah); - dev_vdbg(ap->dev, - "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n", - tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, - tf->hob_lbah); -} - static void dma_dwc_xfer_done(void *hsdev_instance) { unsigned long flags; @@ -355,7 +315,7 @@ static void dma_dwc_xfer_done(void *hsdev_instance) } if ((hsdevp->dma_interrupt_count % 2) == 0) - sata_dwc_dma_xfer_complete(ap, 1); + sata_dwc_dma_xfer_complete(ap); spin_unlock_irqrestore(&host->lock, flags); } @@ -553,6 +513,7 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) * active tag. It is the tag that matches the command about to * be completed. */ + trace_ata_bmdma_start(ap, &qc->tf, tag); qc->ap->link.active_tag = tag; sata_dwc_bmdma_start_by_tag(qc, tag); @@ -586,7 +547,7 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) if (status & ATA_ERR) { dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status); - sata_dwc_qc_complete(ap, qc, 1); + sata_dwc_qc_complete(ap, qc); handled = 1; goto DONE; } @@ -611,13 +572,13 @@ DRVSTILLBUSY: } if ((hsdevp->dma_interrupt_count % 2) == 0) - sata_dwc_dma_xfer_complete(ap, 1); + sata_dwc_dma_xfer_complete(ap); } else if (ata_is_pio(qc->tf.protocol)) { ata_sff_hsm_move(ap, qc, status, 0); handled = 1; goto DONE; } else { - if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) + if (unlikely(sata_dwc_qc_complete(ap, qc))) goto DRVSTILLBUSY; } @@ -677,7 +638,7 @@ DRVSTILLBUSY: if (status & ATA_ERR) { dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__, status); - sata_dwc_qc_complete(ap, qc, 1); + sata_dwc_qc_complete(ap, qc); handled = 1; goto DONE; } @@ -692,9 +653,9 @@ DRVSTILLBUSY: dev_warn(ap->dev, "%s: DMA not pending?\n", __func__); if ((hsdevp->dma_interrupt_count % 2) == 0) - sata_dwc_dma_xfer_complete(ap, 1); + sata_dwc_dma_xfer_complete(ap); } else { - if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) + if (unlikely(sata_dwc_qc_complete(ap, qc))) goto STILLBUSY; } continue; @@ -749,7 +710,7 @@ static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) } } -static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) +static void sata_dwc_dma_xfer_complete(struct ata_port *ap) { struct ata_queued_cmd *qc; struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); @@ -763,17 +724,6 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) return; } -#ifdef DEBUG_NCQ - if (tag > 0) { - dev_info(ap->dev, - "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n", - __func__, qc->hw_tag, qc->tf.command, - get_dma_dir_descript(qc->dma_dir), - get_prot_descript(qc->tf.protocol), - sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr)); - } -#endif - if (ata_is_dma(qc->tf.protocol)) { if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { dev_err(ap->dev, @@ -783,15 +733,14 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) } hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; - sata_dwc_qc_complete(ap, qc, check_status); + sata_dwc_qc_complete(ap, qc); ap->link.active_tag = ATA_TAG_POISON; } else { - sata_dwc_qc_complete(ap, qc, check_status); + sata_dwc_qc_complete(ap, qc); } } -static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, - u32 check_status) +static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc) { u8 status = 0; u32 mask = 0x0; @@ -799,7 +748,6 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); hsdev->sactive_queued = 0; - dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status); if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) dev_err(ap->dev, "TX DMA PENDING\n"); @@ -980,9 +928,6 @@ static void sata_dwc_exec_command_by_tag(struct ata_port *ap, { struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); - dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, - ata_get_cmd_descript(tf->command), tag); - hsdevp->cmd_issued[tag] = cmd_issued; /* @@ -1005,12 +950,9 @@ static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) { u8 tag = qc->hw_tag; - if (ata_is_ncq(qc->tf.protocol)) { - dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", - __func__, qc->ap->link.sactive, tag); - } else { + if (!ata_is_ncq(qc->tf.protocol)) tag = 0; - } + sata_dwc_bmdma_setup_by_tag(qc, tag); } @@ -1037,12 +979,6 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) start_dma = 0; } - dev_dbg(ap->dev, - "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n", - __func__, qc, tag, qc->tf.command, - get_dma_dir_descript(qc->dma_dir), start_dma); - sata_dwc_tf_dump(ap, &qc->tf); - if (start_dma) { sata_dwc_scr_read(&ap->link, SCR_ERROR, ®); if (reg & SATA_DWC_SERROR_ERR_BITS) { @@ -1067,13 +1003,9 @@ static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) { u8 tag = qc->hw_tag; - if (ata_is_ncq(qc->tf.protocol)) { - dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", - __func__, qc->ap->link.sactive, tag); - } else { + if (!ata_is_ncq(qc->tf.protocol)) tag = 0; - } - dev_dbg(qc->ap->dev, "%s\n", __func__); + sata_dwc_bmdma_start_by_tag(qc, tag); } @@ -1084,16 +1016,6 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); -#ifdef DEBUG_NCQ - if (qc->hw_tag > 0 || ap->link.sactive > 1) - dev_info(ap->dev, - "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", - __func__, ap->print_id, qc->tf.command, - ata_get_cmd_descript(qc->tf.command), - qc->hw_tag, get_prot_descript(qc->tf.protocol), - ap->link.active_tag, ap->link.sactive); -#endif - if (!ata_is_ncq(qc->tf.protocol)) tag = 0; @@ -1110,11 +1032,9 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) sactive |= (0x00000001 << tag); sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive); - dev_dbg(qc->ap->dev, - "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n", - __func__, tag, qc->ap->link.sactive, sactive); - + trace_ata_tf_load(ap, &qc->tf); ap->ops->sff_tf_load(ap, &qc->tf); + trace_ata_exec_command(ap, &qc->tf, tag); sata_dwc_exec_command_by_tag(ap, &qc->tf, tag, SATA_DWC_CMD_ISSUED_PEND); } else { @@ -1207,6 +1127,8 @@ static const struct ata_port_info sata_dwc_port_info[] = { static int sata_dwc_probe(struct platform_device *ofdev) { + struct device *dev = &ofdev->dev; + struct device_node *np = dev->of_node; struct sata_dwc_device *hsdev; u32 idr, versionr; char *ver = (char *)&versionr; @@ -1216,23 +1138,21 @@ static int sata_dwc_probe(struct platform_device *ofdev) struct ata_host *host; struct ata_port_info pi = sata_dwc_port_info[0]; const struct ata_port_info *ppi[] = { &pi, NULL }; - struct device_node *np = ofdev->dev.of_node; struct resource *res; /* Allocate DWC SATA device */ - host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); - hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL); + host = ata_host_alloc_pinfo(dev, ppi, SATA_DWC_MAX_PORTS); + hsdev = devm_kzalloc(dev, sizeof(*hsdev), GFP_KERNEL); if (!host || !hsdev) return -ENOMEM; host->private_data = hsdev; /* Ioremap SATA registers */ - res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&ofdev->dev, res); + base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res); if (IS_ERR(base)) return PTR_ERR(base); - dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); + dev_dbg(dev, "ioremap done for SATA register address\n"); /* Synopsys DWC SATA specific Registers */ hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET; @@ -1246,11 +1166,10 @@ static int sata_dwc_probe(struct platform_device *ofdev) /* Read the ID and Version Registers */ idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr); versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr); - dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n", - idr, ver[0], ver[1], ver[2]); + dev_notice(dev, "id %d, controller version %c.%c%c\n", idr, ver[0], ver[1], ver[2]); /* Save dev for later use in dev_xxx() routines */ - hsdev->dev = &ofdev->dev; + hsdev->dev = dev; /* Enable SATA Interrupts */ sata_dwc_enable_interrupts(hsdev); @@ -1258,7 +1177,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) /* Get SATA interrupt number */ irq = irq_of_parse_and_map(np, 0); if (irq == NO_IRQ) { - dev_err(&ofdev->dev, "no SATA DMA irq\n"); + dev_err(dev, "no SATA DMA irq\n"); return -ENODEV; } @@ -1270,7 +1189,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) } #endif - hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy"); + hsdev->phy = devm_phy_optional_get(dev, "sata-phy"); if (IS_ERR(hsdev->phy)) return PTR_ERR(hsdev->phy); @@ -1285,7 +1204,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) */ err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); if (err) - dev_err(&ofdev->dev, "failed to activate host"); + dev_err(dev, "failed to activate host"); return 0; @@ -1309,7 +1228,7 @@ static int sata_dwc_remove(struct platform_device *ofdev) sata_dwc_dma_exit_old(hsdev); #endif - dev_dbg(&ofdev->dev, "done\n"); + dev_dbg(dev, "done\n"); return 0; } diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 3b31a4f596d8..da0152116d9f 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -221,10 +221,10 @@ enum { * 4 Dwords per command slot, command header size == 64 Dwords. */ struct cmdhdr_tbl_entry { - u32 cda; - u32 prde_fis_len; - u32 ttl; - u32 desc_info; + __le32 cda; + __le32 prde_fis_len; + __le32 ttl; + __le32 desc_info; }; /* @@ -246,8 +246,10 @@ enum { struct command_desc { u8 cfis[8 * 4]; u8 sfis[8 * 4]; - u8 acmd[4 * 4]; - u8 fill[4 * 4]; + struct_group(cdb, + u8 acmd[4 * 4]; + u8 fill[4 * 4]; + ); u32 prdt[SATA_FSL_MAX_PRD_DIRECT * 4]; u32 prdt_indirect[(SATA_FSL_MAX_PRD - SATA_FSL_MAX_PRD_DIRECT) * 4]; }; @@ -257,9 +259,9 @@ struct command_desc { */ struct prde { - u32 dba; + __le32 dba; u8 fill[2 * 4]; - u32 ddc_and_ext; + __le32 ddc_and_ext; }; /* @@ -311,16 +313,16 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, intr_coalescing_ticks = ticks; spin_unlock_irqrestore(&host->lock, flags); - DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", - intr_coalescing_count, intr_coalescing_ticks); - DPRINTK("ICC register status: (hcr base: %p) = 0x%x\n", - hcr_base, ioread32(hcr_base + ICC)); + dev_dbg(host->dev, "interrupt coalescing, count = 0x%x, ticks = %x\n", + intr_coalescing_count, intr_coalescing_ticks); + dev_dbg(host->dev, "ICC register status: (hcr base: 0x%p) = 0x%x\n", + hcr_base, ioread32(hcr_base + ICC)); } static ssize_t fsl_sata_intr_coalescing_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d %d\n", + return sysfs_emit(buf, "%d %d\n", intr_coalescing_count, intr_coalescing_ticks); } @@ -355,9 +357,9 @@ static ssize_t fsl_sata_rx_watermark_show(struct device *dev, spin_lock_irqsave(&host->lock, flags); rx_watermark = ioread32(csr_base + TRANSCFG); rx_watermark &= 0x1f; - spin_unlock_irqrestore(&host->lock, flags); - return sprintf(buf, "%d\n", rx_watermark); + + return sysfs_emit(buf, "%d\n", rx_watermark); } static ssize_t fsl_sata_rx_watermark_store(struct device *dev, @@ -385,25 +387,27 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev, return strlen(buf); } -static inline unsigned int sata_fsl_tag(unsigned int tag, +static inline unsigned int sata_fsl_tag(struct ata_port *ap, + unsigned int tag, void __iomem *hcr_base) { /* We let libATA core do actual (queue) tag allocation */ if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) { - DPRINTK("tag %d invalid : out of range\n", tag); + ata_port_dbg(ap, "tag %d invalid : out of range\n", tag); return 0; } if (unlikely((ioread32(hcr_base + CQ)) & (1 << tag))) { - DPRINTK("tag %d invalid : in use!!\n", tag); + ata_port_dbg(ap, "tag %d invalid : in use!!\n", tag); return 0; } return tag; } -static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp, +static void sata_fsl_setup_cmd_hdr_entry(struct ata_port *ap, + struct sata_fsl_port_priv *pp, unsigned int tag, u32 desc_info, u32 data_xfer_len, u8 num_prde, u8 fis_len) @@ -421,11 +425,11 @@ static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp, pp->cmdslot[tag].ttl = cpu_to_le32(data_xfer_len & ~0x03); pp->cmdslot[tag].desc_info = cpu_to_le32(desc_info | (tag & 0x1F)); - VPRINTK("cda=0x%x, prde_fis_len=0x%x, ttl=0x%x, di=0x%x\n", - pp->cmdslot[tag].cda, - pp->cmdslot[tag].prde_fis_len, - pp->cmdslot[tag].ttl, pp->cmdslot[tag].desc_info); - + ata_port_dbg(ap, "cda=0x%x, prde_fis_len=0x%x, ttl=0x%x, di=0x%x\n", + le32_to_cpu(pp->cmdslot[tag].cda), + le32_to_cpu(pp->cmdslot[tag].prde_fis_len), + le32_to_cpu(pp->cmdslot[tag].ttl), + le32_to_cpu(pp->cmdslot[tag].desc_info)); } static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, @@ -447,8 +451,6 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, dma_addr_t indirect_ext_segment_paddr; unsigned int si; - VPRINTK("SATA FSL : cd = 0x%p, prd = 0x%p\n", cmd_desc, prd); - indirect_ext_segment_paddr = cmd_desc_paddr + SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16; @@ -456,9 +458,6 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, dma_addr_t sg_addr = sg_dma_address(sg); u32 sg_len = sg_dma_len(sg); - VPRINTK("SATA FSL : fill_sg, sg_addr = 0x%llx, sg_len = %d\n", - (unsigned long long)sg_addr, sg_len); - /* warn if each s/g element is not dword aligned */ if (unlikely(sg_addr & 0x03)) ata_port_err(qc->ap, "s/g addr unaligned : 0x%llx\n", @@ -469,7 +468,6 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, if (num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1) && sg_next(sg) != NULL) { - VPRINTK("setting indirect prde\n"); prd_ptr_to_indirect_ext = prd; prd->dba = cpu_to_le32(indirect_ext_segment_paddr); indirect_ext_segment_sz = 0; @@ -481,9 +479,6 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, prd->dba = cpu_to_le32(sg_addr); prd->ddc_and_ext = cpu_to_le32(data_snoop | (sg_len & ~0x03)); - VPRINTK("sg_fill, ttl=%d, dba=0x%x, ddc=0x%x\n", - ttl_dwords, prd->dba, prd->ddc_and_ext); - ++num_prde; ++prd; if (prd_ptr_to_indirect_ext) @@ -508,7 +503,7 @@ static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc) struct sata_fsl_port_priv *pp = ap->private_data; struct sata_fsl_host_priv *host_priv = ap->host->private_data; void __iomem *hcr_base = host_priv->hcr_base; - unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base); + unsigned int tag = sata_fsl_tag(ap, qc->hw_tag, hcr_base); struct command_desc *cd; u32 desc_info = CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE; u32 num_prde = 0; @@ -520,19 +515,11 @@ static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc) ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *) &cd->cfis); - VPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x\n", - cd->cfis[0], cd->cfis[1], cd->cfis[2]); - - if (qc->tf.protocol == ATA_PROT_NCQ) { - VPRINTK("FPDMA xfer,Sctor cnt[0:7],[8:15] = %d,%d\n", - cd->cfis[3], cd->cfis[11]); - } - /* setup "ACMD - atapi command" in cmd. desc. if this is ATAPI cmd */ if (ata_is_atapi(qc->tf.protocol)) { desc_info |= ATAPI_CMD; - memset((void *)&cd->acmd, 0, 32); - memcpy((void *)&cd->acmd, qc->cdb, qc->dev->cdb_len); + memset(&cd->cdb, 0, sizeof(cd->cdb)); + memcpy(&cd->cdb, qc->cdb, qc->dev->cdb_len); } if (qc->flags & ATA_QCFLAG_DMAMAP) @@ -543,10 +530,10 @@ static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc) if (qc->tf.protocol == ATA_PROT_NCQ) desc_info |= FPDMA_QUEUED_CMD; - sata_fsl_setup_cmd_hdr_entry(pp, tag, desc_info, ttl_dwords, + sata_fsl_setup_cmd_hdr_entry(ap, pp, tag, desc_info, ttl_dwords, num_prde, 5); - VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n", + ata_port_dbg(ap, "SATA FSL : di = 0x%x, ttl = %d, num_prde = %d\n", desc_info, ttl_dwords, num_prde); return AC_ERR_OK; @@ -557,9 +544,9 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; struct sata_fsl_host_priv *host_priv = ap->host->private_data; void __iomem *hcr_base = host_priv->hcr_base; - unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base); + unsigned int tag = sata_fsl_tag(ap, qc->hw_tag, hcr_base); - VPRINTK("xx_qc_issue called,CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n", + ata_port_dbg(ap, "CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n", ioread32(CQ + hcr_base), ioread32(CA + hcr_base), ioread32(CE + hcr_base), ioread32(CC + hcr_base)); @@ -569,10 +556,10 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) /* Simply queue command to the controller/device */ iowrite32(1 << tag, CQ + hcr_base); - VPRINTK("xx_qc_issue called, tag=%d, CQ=0x%x, CA=0x%x\n", + ata_port_dbg(ap, "tag=%d, CQ=0x%x, CA=0x%x\n", tag, ioread32(CQ + hcr_base), ioread32(CA + hcr_base)); - VPRINTK("CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n", + ata_port_dbg(ap, "CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n", ioread32(CE + hcr_base), ioread32(DE + hcr_base), ioread32(CC + hcr_base), @@ -586,7 +573,7 @@ static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc) struct sata_fsl_port_priv *pp = qc->ap->private_data; struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data; void __iomem *hcr_base = host_priv->hcr_base; - unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base); + unsigned int tag = sata_fsl_tag(qc->ap, qc->hw_tag, hcr_base); struct command_desc *cd; cd = pp->cmdentry + tag; @@ -613,7 +600,7 @@ static int sata_fsl_scr_write(struct ata_link *link, return -EINVAL; } - VPRINTK("xx_scr_write, reg_in = %d\n", sc_reg); + ata_link_dbg(link, "reg_in = %d\n", sc_reg); iowrite32(val, ssr_base + (sc_reg * 4)); return 0; @@ -637,7 +624,7 @@ static int sata_fsl_scr_read(struct ata_link *link, return -EINVAL; } - VPRINTK("xx_scr_read, reg_in = %d\n", sc_reg); + ata_link_dbg(link, "reg_in = %d\n", sc_reg); *val = ioread32(ssr_base + (sc_reg * 4)); return 0; @@ -649,18 +636,18 @@ static void sata_fsl_freeze(struct ata_port *ap) void __iomem *hcr_base = host_priv->hcr_base; u32 temp; - VPRINTK("xx_freeze, CQ=0x%x, CA=0x%x, CE=0x%x, DE=0x%x\n", + ata_port_dbg(ap, "CQ=0x%x, CA=0x%x, CE=0x%x, DE=0x%x\n", ioread32(CQ + hcr_base), ioread32(CA + hcr_base), ioread32(CE + hcr_base), ioread32(DE + hcr_base)); - VPRINTK("CmdStat = 0x%x\n", + ata_port_dbg(ap, "CmdStat = 0x%x\n", ioread32(host_priv->csr_base + COMMANDSTAT)); /* disable interrupts on the controller/port */ temp = ioread32(hcr_base + HCONTROL); iowrite32((temp & ~0x3F), hcr_base + HCONTROL); - VPRINTK("in xx_freeze : HControl = 0x%x, HStatus = 0x%x\n", + ata_port_dbg(ap, "HControl = 0x%x, HStatus = 0x%x\n", ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS)); } @@ -673,7 +660,7 @@ static void sata_fsl_thaw(struct ata_port *ap) /* ack. any pending IRQs for this controller/port */ temp = ioread32(hcr_base + HSTATUS); - VPRINTK("xx_thaw, pending IRQs = 0x%x\n", (temp & 0x3F)); + ata_port_dbg(ap, "pending IRQs = 0x%x\n", (temp & 0x3F)); if (temp & 0x3F) iowrite32((temp & 0x3F), hcr_base + HSTATUS); @@ -682,7 +669,7 @@ static void sata_fsl_thaw(struct ata_port *ap) temp = ioread32(hcr_base + HCONTROL); iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL); - VPRINTK("xx_thaw : HControl = 0x%x, HStatus = 0x%x\n", + ata_port_dbg(ap, "HControl = 0x%x, HStatus = 0x%x\n", ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS)); } @@ -744,8 +731,9 @@ static int sata_fsl_port_start(struct ata_port *ap) ap->private_data = pp; - VPRINTK("CHBA = 0x%x, cmdentry_phys = 0x%x\n", - pp->cmdslot_paddr, pp->cmdentry_paddr); + ata_port_dbg(ap, "CHBA = 0x%lx, cmdentry_phys = 0x%lx\n", + (unsigned long)pp->cmdslot_paddr, + (unsigned long)pp->cmdentry_paddr); /* Now, update the CHBA register in host controller cmd register set */ iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA); @@ -761,9 +749,9 @@ static int sata_fsl_port_start(struct ata_port *ap) temp = ioread32(hcr_base + HCONTROL); iowrite32((temp | HCONTROL_ONLINE_PHY_RST), hcr_base + HCONTROL); - VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); - VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); - VPRINTK("CHBA = 0x%x\n", ioread32(hcr_base + CHBA)); + ata_port_dbg(ap, "HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); + ata_port_dbg(ap, "HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); + ata_port_dbg(ap, "CHBA = 0x%x\n", ioread32(hcr_base + CHBA)); return 0; } @@ -803,16 +791,15 @@ static unsigned int sata_fsl_dev_classify(struct ata_port *ap) temp = ioread32(hcr_base + SIGNATURE); - VPRINTK("raw sig = 0x%x\n", temp); - VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); - VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); + ata_port_dbg(ap, "HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); + ata_port_dbg(ap, "HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); tf.lbah = (temp >> 24) & 0xff; tf.lbam = (temp >> 16) & 0xff; tf.lbal = (temp >> 8) & 0xff; tf.nsect = temp & 0xff; - return ata_dev_classify(&tf); + return ata_port_classify(ap, &tf); } static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class, @@ -825,8 +812,6 @@ static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class, int i = 0; unsigned long start_jiffies; - DPRINTK("in xx_hardreset\n"); - try_offline_again: /* * Force host controller to go off-line, aborting current operations @@ -852,9 +837,10 @@ try_offline_again: goto try_offline_again; } - DPRINTK("hardreset, controller off-lined\n"); - VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); - VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); + ata_port_dbg(ap, "hardreset, controller off-lined\n" + "HStatus = 0x%x HControl = 0x%x\n", + ioread32(hcr_base + HSTATUS), + ioread32(hcr_base + HCONTROL)); /* * PHY reset should remain asserted for atleast 1ms @@ -882,9 +868,10 @@ try_offline_again: goto err; } - DPRINTK("hardreset, controller off-lined & on-lined\n"); - VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); - VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); + ata_port_dbg(ap, "controller off-lined & on-lined\n" + "HStatus = 0x%x HControl = 0x%x\n", + ioread32(hcr_base + HSTATUS), + ioread32(hcr_base + HCONTROL)); /* * First, wait for the PHYRDY change to occur before waiting for @@ -941,10 +928,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, u8 *cfis; u32 Serror; - DPRINTK("in xx_softreset\n"); - if (ata_link_offline(link)) { - DPRINTK("PHY reports no device\n"); *class = ATA_DEV_NONE; return 0; } @@ -957,19 +941,17 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, * reached here, we can send a command to the target device */ - DPRINTK("Sending SRST/device reset\n"); - ata_tf_init(link->device, &tf); cfis = (u8 *) &pp->cmdentry->cfis; /* device reset/SRST is a control register update FIS, uses tag0 */ - sata_fsl_setup_cmd_hdr_entry(pp, 0, + sata_fsl_setup_cmd_hdr_entry(ap, pp, 0, SRST_CMD | CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE, 0, 0, 5); tf.ctl |= ATA_SRST; /* setup SRST bit in taskfile control reg */ ata_tf_to_fis(&tf, pmp, 0, cfis); - DPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n", + ata_port_dbg(ap, "Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n", cfis[0], cfis[1], cfis[2], cfis[3]); /* @@ -977,7 +959,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, * other commands are active on the controller/device */ - DPRINTK("@Softreset, CQ = 0x%x, CA = 0x%x, CC = 0x%x\n", + ata_port_dbg(ap, "CQ = 0x%x, CA = 0x%x, CC = 0x%x\n", ioread32(CQ + hcr_base), ioread32(CA + hcr_base), ioread32(CC + hcr_base)); @@ -990,15 +972,16 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, if (temp & 0x1) { ata_port_warn(ap, "ATA_SRST issue failed\n"); - DPRINTK("Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n", + ata_port_dbg(ap, "Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n", ioread32(CQ + hcr_base), ioread32(CA + hcr_base), ioread32(CC + hcr_base)); sata_fsl_scr_read(&ap->link, SCR_ERROR, &Serror); - DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); - DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); - DPRINTK("Serror = 0x%x\n", Serror); + ata_port_dbg(ap, "HStatus = 0x%x HControl = 0x%x Serror = 0x%x\n", + ioread32(hcr_base + HSTATUS), + ioread32(hcr_base + HCONTROL), + Serror); goto err; } @@ -1012,8 +995,9 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, * using ATA signature D2H register FIS to the host controller. */ - sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE, - 0, 0, 5); + sata_fsl_setup_cmd_hdr_entry(ap, pp, 0, + CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE, + 0, 0, 5); tf.ctl &= ~ATA_SRST; /* 2nd H2D Ctl. register FIS */ ata_tf_to_fis(&tf, pmp, 0, cfis); @@ -1030,8 +1014,6 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, */ iowrite32(0x01, CC + hcr_base); /* We know it will be cmd#0 always */ - DPRINTK("SATA FSL : Now checking device signature\n"); - *class = ATA_DEV_NONE; /* Verify if SStatus indicates device presence */ @@ -1045,9 +1027,8 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, *class = sata_fsl_dev_classify(ap); - DPRINTK("class = %d\n", *class); - VPRINTK("ccreg = 0x%x\n", ioread32(hcr_base + CC)); - VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE)); + ata_port_dbg(ap, "ccreg = 0x%x\n", ioread32(hcr_base + CC)); + ata_port_dbg(ap, "cereg = 0x%x\n", ioread32(hcr_base + CE)); } return 0; @@ -1058,10 +1039,7 @@ err: static void sata_fsl_error_handler(struct ata_port *ap) { - - DPRINTK("in xx_error_handler\n"); sata_pmp_error_handler(ap); - } static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc) @@ -1102,7 +1080,7 @@ static void sata_fsl_error_intr(struct ata_port *ap) if (unlikely(SError & 0xFFFF0000)) sata_fsl_scr_write(&ap->link, SCR_ERROR, SError); - DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n", + ata_port_dbg(ap, "hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n", hstatus, cereg, ioread32(hcr_base + DE), SError); /* handle fatal errors */ @@ -1119,7 +1097,7 @@ static void sata_fsl_error_intr(struct ata_port *ap) /* Handle PHYRDY change notification */ if (hstatus & INT_ON_PHYRDY_CHG) { - DPRINTK("SATA FSL: PHYRDY change indication\n"); + ata_port_dbg(ap, "PHYRDY change indication\n"); /* Setup a soft-reset EH action */ ata_ehi_hotplugged(ehi); @@ -1140,7 +1118,7 @@ static void sata_fsl_error_intr(struct ata_port *ap) */ abort = 1; - DPRINTK("single device error, CE=0x%x, DE=0x%x\n", + ata_port_dbg(ap, "single device error, CE=0x%x, DE=0x%x\n", ioread32(hcr_base + CE), ioread32(hcr_base + DE)); /* find out the offending link and qc */ @@ -1245,18 +1223,18 @@ static void sata_fsl_host_intr(struct ata_port *ap) } if (unlikely(SError & 0xFFFF0000)) { - DPRINTK("serror @host_intr : 0x%x\n", SError); + ata_port_dbg(ap, "serror @host_intr : 0x%x\n", SError); sata_fsl_error_intr(ap); } if (unlikely(hstatus & status_mask)) { - DPRINTK("error interrupt!!\n"); + ata_port_dbg(ap, "error interrupt!!\n"); sata_fsl_error_intr(ap); return; } - VPRINTK("Status of all queues :\n"); - VPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%llx\n", + ata_port_dbg(ap, "Status of all queues :\n"); + ata_port_dbg(ap, "done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%llx\n", done_mask, ioread32(hcr_base + CA), ioread32(hcr_base + CE), @@ -1268,15 +1246,13 @@ static void sata_fsl_host_intr(struct ata_port *ap) /* clear CC bit, this will also complete the interrupt */ iowrite32(done_mask, hcr_base + CC); - DPRINTK("Status of all queues :\n"); - DPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x\n", + ata_port_dbg(ap, "Status of all queues: done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x\n", done_mask, ioread32(hcr_base + CA), ioread32(hcr_base + CE)); for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) { if (done_mask & (1 << i)) - DPRINTK - ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n", + ata_port_dbg(ap, "completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n", i, ioread32(hcr_base + CC), ioread32(hcr_base + CA)); } @@ -1287,7 +1263,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) iowrite32(1, hcr_base + CC); qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL); - DPRINTK("completing non-ncq cmd, CC=0x%x\n", + ata_port_dbg(ap, "completing non-ncq cmd, CC=0x%x\n", ioread32(hcr_base + CC)); if (qc) { @@ -1295,7 +1271,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) } } else { /* Spurious Interrupt!! */ - DPRINTK("spurious interrupt!!, CC = 0x%x\n", + ata_port_dbg(ap, "spurious interrupt!!, CC = 0x%x\n", ioread32(hcr_base + CC)); iowrite32(done_mask, hcr_base + CC); return; @@ -1315,8 +1291,6 @@ static irqreturn_t sata_fsl_interrupt(int irq, void *dev_instance) interrupt_enables = ioread32(hcr_base + HSTATUS); interrupt_enables &= 0x3F; - DPRINTK("interrupt status 0x%x\n", interrupt_enables); - if (!interrupt_enables) return IRQ_NONE; @@ -1369,7 +1343,7 @@ static int sata_fsl_init_controller(struct ata_host *host) iowrite32((temp & ~0x3F), hcr_base + HCONTROL); /* Disable interrupt coalescing control(icc), for the moment */ - DPRINTK("icc = 0x%x\n", ioread32(hcr_base + ICC)); + dev_dbg(host->dev, "icc = 0x%x\n", ioread32(hcr_base + ICC)); iowrite32(0x01000000, hcr_base + ICC); /* clear error registers, SError is cleared by libATA */ @@ -1388,8 +1362,8 @@ static int sata_fsl_init_controller(struct ata_host *host) * callback, that should also initiate the OOB, COMINIT sequence */ - DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); - DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); + dev_dbg(host->dev, "HStatus = 0x%x HControl = 0x%x\n", + ioread32(hcr_base + HSTATUS), ioread32(hcr_base + HCONTROL)); return 0; } @@ -1406,8 +1380,7 @@ static void sata_fsl_host_stop(struct ata_host *host) * scsi mid-layer and libata interface structures */ static struct scsi_host_template sata_fsl_sht = { - ATA_NCQ_SHT("sata_fsl"), - .can_queue = SATA_FSL_QUEUE_DEPTH, + ATA_NCQ_SHT_QD("sata_fsl", SATA_FSL_QUEUE_DEPTH), .sg_tablesize = SATA_FSL_MAX_PRD_USABLE, .dma_boundary = ATA_DMA_BOUNDARY, }; @@ -1478,9 +1451,8 @@ static int sata_fsl_probe(struct platform_device *ofdev) iowrite32(temp | TRANSCFG_RX_WATER_MARK, csr_base + TRANSCFG); } - DPRINTK("@reset i/o = 0x%x\n", ioread32(csr_base + TRANSCFG)); - DPRINTK("sizeof(cmd_desc) = %d\n", sizeof(struct command_desc)); - DPRINTK("sizeof(#define cmd_desc) = %d\n", SATA_FSL_CMD_DESC_SIZE); + dev_dbg(&ofdev->dev, "@reset i/o = 0x%x\n", + ioread32(csr_base + TRANSCFG)); host_priv = kzalloc(sizeof(struct sata_fsl_host_priv), GFP_KERNEL); if (!host_priv) diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c index f793564f3d78..440a63de20d0 100644 --- a/drivers/ata/sata_gemini.c +++ b/drivers/ata/sata_gemini.c @@ -253,12 +253,12 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg) ret = clk_prepare_enable(sg->sata0_pclk); if (ret) { - pr_err("failed to enable SATA0 PCLK\n"); + dev_err(dev, "failed to enable SATA0 PCLK\n"); return ret; } ret = clk_prepare_enable(sg->sata1_pclk); if (ret) { - pr_err("failed to enable SATA1 PCLK\n"); + dev_err(dev, "failed to enable SATA1 PCLK\n"); clk_disable_unprepare(sg->sata0_pclk); return ret; } diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index e517bd8822a5..781901151d82 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -488,8 +488,6 @@ static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc) bool is_data = ata_is_data(qc->tf.protocol); unsigned int cdb_len = 0; - VPRINTK("ENTER\n"); - if (is_atapi) cdb_len = qc->dev->cdb_len; @@ -657,7 +655,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, } inic_tf_read(ap, &tf); - *class = ata_dev_classify(&tf); + *class = ata_port_classify(ap, &tf); } return 0; diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index cae4c1eab102..53446b997740 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -579,7 +579,7 @@ struct mv_hw_ops { void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); void (*read_preamp)(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); - int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, + int (*reset_hc)(struct ata_host *host, void __iomem *mmio, unsigned int n_hc); void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); void (*reset_bus)(struct ata_host *host, void __iomem *mmio); @@ -606,7 +606,7 @@ static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); -static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, +static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio, unsigned int n_hc); static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); @@ -616,14 +616,14 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); -static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, +static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio, unsigned int n_hc); static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_soc_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); -static int mv_soc_reset_hc(struct mv_host_priv *hpriv, +static int mv_soc_reset_hc(struct ata_host *host, void __iomem *mmio, unsigned int n_hc); static void mv_soc_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); @@ -1248,81 +1248,74 @@ static int mv_stop_edma(struct ata_port *ap) return err; } -#ifdef ATA_DEBUG -static void mv_dump_mem(void __iomem *start, unsigned bytes) +static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes) { - int b, w; + int b, w, o; + unsigned char linebuf[38]; + for (b = 0; b < bytes; ) { - DPRINTK("%p: ", start + b); - for (w = 0; b < bytes && w < 4; w++) { - printk("%08x ", readl(start + b)); + for (w = 0, o = 0; b < bytes && w < 4; w++) { + o += snprintf(linebuf + o, sizeof(linebuf) - o, + "%08x ", readl(start + b)); b += sizeof(u32); } - printk("\n"); + dev_dbg(dev, "%s: %p: %s\n", + __func__, start + b, linebuf); } } -#endif -#if defined(ATA_DEBUG) || defined(CONFIG_PCI) + static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) { -#ifdef ATA_DEBUG - int b, w; - u32 dw; + int b, w, o; + u32 dw = 0; + unsigned char linebuf[38]; + for (b = 0; b < bytes; ) { - DPRINTK("%02x: ", b); - for (w = 0; b < bytes && w < 4; w++) { + for (w = 0, o = 0; b < bytes && w < 4; w++) { (void) pci_read_config_dword(pdev, b, &dw); - printk("%08x ", dw); + o += snprintf(linebuf + o, sizeof(linebuf) - o, + "%08x ", dw); b += sizeof(u32); } - printk("\n"); + dev_dbg(&pdev->dev, "%s: %02x: %s\n", + __func__, b, linebuf); } -#endif } -#endif -static void mv_dump_all_regs(void __iomem *mmio_base, int port, + +static void mv_dump_all_regs(void __iomem *mmio_base, struct pci_dev *pdev) { -#ifdef ATA_DEBUG - void __iomem *hc_base = mv_hc_base(mmio_base, - port >> MV_PORT_HC_SHIFT); + void __iomem *hc_base; void __iomem *port_base; int start_port, num_ports, p, start_hc, num_hcs, hc; - if (0 > port) { - start_hc = start_port = 0; - num_ports = 8; /* shld be benign for 4 port devs */ - num_hcs = 2; - } else { - start_hc = port >> MV_PORT_HC_SHIFT; - start_port = port; - num_ports = num_hcs = 1; - } - DPRINTK("All registers for port(s) %u-%u:\n", start_port, - num_ports > 1 ? num_ports - 1 : start_port); + start_hc = start_port = 0; + num_ports = 8; /* should be benign for 4 port devs */ + num_hcs = 2; + dev_dbg(&pdev->dev, + "%s: All registers for port(s) %u-%u:\n", __func__, + start_port, num_ports > 1 ? num_ports - 1 : start_port); - if (NULL != pdev) { - DPRINTK("PCI config space regs:\n"); - mv_dump_pci_cfg(pdev, 0x68); - } - DPRINTK("PCI regs:\n"); - mv_dump_mem(mmio_base+0xc00, 0x3c); - mv_dump_mem(mmio_base+0xd00, 0x34); - mv_dump_mem(mmio_base+0xf00, 0x4); - mv_dump_mem(mmio_base+0x1d00, 0x6c); + dev_dbg(&pdev->dev, "%s: PCI config space regs:\n", __func__); + mv_dump_pci_cfg(pdev, 0x68); + + dev_dbg(&pdev->dev, "%s: PCI regs:\n", __func__); + mv_dump_mem(&pdev->dev, mmio_base+0xc00, 0x3c); + mv_dump_mem(&pdev->dev, mmio_base+0xd00, 0x34); + mv_dump_mem(&pdev->dev, mmio_base+0xf00, 0x4); + mv_dump_mem(&pdev->dev, mmio_base+0x1d00, 0x6c); for (hc = start_hc; hc < start_hc + num_hcs; hc++) { hc_base = mv_hc_base(mmio_base, hc); - DPRINTK("HC regs (HC %i):\n", hc); - mv_dump_mem(hc_base, 0x1c); + dev_dbg(&pdev->dev, "%s: HC regs (HC %i):\n", __func__, hc); + mv_dump_mem(&pdev->dev, hc_base, 0x1c); } for (p = start_port; p < start_port + num_ports; p++) { port_base = mv_port_base(mmio_base, p); - DPRINTK("EDMA regs (port %i):\n", p); - mv_dump_mem(port_base, 0x54); - DPRINTK("SATA regs (port %i):\n", p); - mv_dump_mem(port_base+0x300, 0x60); + dev_dbg(&pdev->dev, "%s: EDMA regs (port %i):\n", __func__, p); + mv_dump_mem(&pdev->dev, port_base, 0x54); + dev_dbg(&pdev->dev, "%s: SATA regs (port %i):\n", __func__, p); + mv_dump_mem(&pdev->dev, port_base+0x300, 0x60); } -#endif } static unsigned int mv_scr_offset(unsigned int sc_reg_in) @@ -2962,8 +2955,8 @@ static int mv_pci_error(struct ata_host *host, void __iomem *mmio) dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause); - DPRINTK("All regs @ PCI error\n"); - mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); + dev_dbg(host->dev, "%s: All regs @ PCI error\n", __func__); + mv_dump_all_regs(mmio, to_pci_dev(host->dev)); writelfl(0, mmio + hpriv->irq_cause_offset); @@ -3201,9 +3194,10 @@ static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, } #undef ZERO -static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, +static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio, unsigned int n_hc) { + struct mv_host_priv *hpriv = host->private_data; unsigned int hc, port; for (hc = 0; hc < n_hc; hc++) { @@ -3262,7 +3256,7 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) * LOCKING: * Inherited from caller. */ -static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, +static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio, unsigned int n_hc) { void __iomem *reg = mmio + PCI_MAIN_CMD_STS; @@ -3282,7 +3276,7 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, break; } if (!(PCI_MASTER_EMPTY & t)) { - printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); + dev_err(host->dev, "PCI master won't flush\n"); rc = 1; goto done; } @@ -3296,7 +3290,7 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, } while (!(GLOB_SFT_RST & t) && (i-- > 0)); if (!(GLOB_SFT_RST & t)) { - printk(KERN_ERR DRV_NAME ": can't set global reset\n"); + dev_err(host->dev, "can't set global reset\n"); rc = 1; goto done; } @@ -3310,7 +3304,7 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, } while ((GLOB_SFT_RST & t) && (i-- > 0)); if (GLOB_SFT_RST & t) { - printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); + dev_err(host->dev, "can't clear global reset\n"); rc = 1; } done: @@ -3479,9 +3473,10 @@ static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, #undef ZERO -static int mv_soc_reset_hc(struct mv_host_priv *hpriv, +static int mv_soc_reset_hc(struct ata_host *host, void __iomem *mmio, unsigned int n_hc) { + struct mv_host_priv *hpriv = host->private_data; unsigned int port; for (port = 0; port < hpriv->n_ports; port++) @@ -3723,11 +3718,6 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) /* unmask all non-transient EDMA error interrupts */ writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK); - - VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", - readl(port_mmio + EDMA_CFG), - readl(port_mmio + EDMA_ERR_IRQ_CAUSE), - readl(port_mmio + EDMA_ERR_IRQ_MASK)); } static unsigned int mv_in_pcix_mode(struct ata_host *host) @@ -3859,11 +3849,11 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) * * Warn the user, lest they think we're just buggy. */ - printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" + dev_warn(&pdev->dev, "Highpoint RocketRAID" " BIOS CORRUPTS DATA on all attached drives," " regardless of if/how they are configured." " BEWARE!\n"); - printk(KERN_WARNING DRV_NAME ": For data safety, do not" + dev_warn(&pdev->dev, "For data safety, do not" " use sectors 8-9 on \"Legacy\" drives," " and avoid the final two gigabytes on" " all RocketRAID BIOS initialized drives.\n"); @@ -3954,7 +3944,7 @@ static int mv_init_host(struct ata_host *host) if (hpriv->ops->read_preamp) hpriv->ops->read_preamp(hpriv, port, mmio); - rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); + rc = hpriv->ops->reset_hc(host, mmio, n_hc); if (rc) goto done; @@ -3972,7 +3962,7 @@ static int mv_init_host(struct ata_host *host) for (hc = 0; hc < n_hc; hc++) { void __iomem *hc_mmio = mv_hc_base(mmio, hc); - VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " + dev_dbg(host->dev, "HC%i: HC config=0x%08x HC IRQ cause " "(before clear)=0x%08x\n", hc, readl(hc_mmio + HC_CFG), readl(hc_mmio + HC_IRQ_CAUSE)); @@ -4270,7 +4260,7 @@ static int mv_platform_resume(struct platform_device *pdev) /* initialize adapter */ ret = mv_init_host(host); if (ret) { - printk(KERN_ERR DRV_NAME ": Error during HW init\n"); + dev_err(&pdev->dev, "Error during HW init\n"); return ret; } ata_host_resume(host); diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 16272c111208..7f14d0d31057 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -31,6 +31,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <linux/libata.h> +#include <trace/events/libata.h> #define DRV_NAME "sata_nv" #define DRV_VERSION "3.5" @@ -808,7 +809,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) struct nv_adma_port_priv *pp = ap->private_data; u8 flags = pp->cpb[cpb_num].resp_flags; - VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags); + ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags); if (unlikely((force_err || flags & (NV_CPB_RESP_ATA_ERR | @@ -1100,8 +1101,6 @@ static int nv_adma_port_start(struct ata_port *ap) struct pci_dev *pdev = to_pci_dev(dev); u16 tmp; - VPRINTK("ENTER\n"); - /* * Ensure DMA mask is set to 32-bit before allocating legacy PRD and * pad buffers. @@ -1190,7 +1189,6 @@ static void nv_adma_port_stop(struct ata_port *ap) struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; - VPRINTK("ENTER\n"); writew(0, mmio + NV_ADMA_CTL); } @@ -1252,8 +1250,6 @@ static void nv_adma_setup_port(struct ata_port *ap) void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; struct ata_ioports *ioport = &ap->ioaddr; - VPRINTK("ENTER\n"); - mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE; ioport->cmd_addr = mmio; @@ -1277,8 +1273,6 @@ static int nv_adma_host_init(struct ata_host *host) unsigned int i; u32 tmp32; - VPRINTK("ENTER\n"); - /* enable ADMA on the ports */ pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN | @@ -1320,8 +1314,6 @@ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) struct scatterlist *sg; unsigned int si; - VPRINTK("ENTER\n"); - for_each_sg(qc->sg, sg, qc->n_elem, si) { aprd = (si < 5) ? &cpb->aprd[si] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)]; @@ -1378,8 +1370,6 @@ static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc) if (qc->tf.protocol == ATA_PROT_NCQ) ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA; - VPRINTK("qc->flags = 0x%lx\n", qc->flags); - nv_adma_tf_to_cpb(&qc->tf, cpb->tf); if (qc->flags & ATA_QCFLAG_DMAMAP) { @@ -1404,8 +1394,6 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) void __iomem *mmio = pp->ctl_block; int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ); - VPRINTK("ENTER\n"); - /* We can't handle result taskfile with NCQ commands, since retrieving the taskfile switches us out of ADMA mode and would abort existing commands. */ @@ -1417,7 +1405,6 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) if (nv_adma_use_reg_mode(qc)) { /* use ATA register mode */ - VPRINTK("using ATA register mode: 0x%lx\n", qc->flags); BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); @@ -1438,8 +1425,6 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) writew(qc->hw_tag, mmio + NV_ADMA_APPEND); - DPRINTK("Issued tag %u\n", qc->hw_tag); - return 0; } @@ -1871,12 +1856,12 @@ static void nv_swncq_host_init(struct ata_host *host) /* enable swncq */ tmp = readl(mmio + NV_CTL_MCP55); - VPRINTK("HOST_CTL:0x%X\n", tmp); + dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp); writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55); /* enable irq intr */ tmp = readl(mmio + NV_INT_ENABLE_MCP55); - VPRINTK("HOST_ENABLE:0x%X\n", tmp); + dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp); writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55); /* clear port irq */ @@ -2017,19 +2002,17 @@ static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, if (qc == NULL) return 0; - DPRINTK("Enter\n"); - writel((1 << qc->hw_tag), pp->sactive_block); pp->last_issue_tag = qc->hw_tag; pp->dhfis_bits &= ~(1 << qc->hw_tag); pp->dmafis_bits &= ~(1 << qc->hw_tag); pp->qc_active |= (0x1 << qc->hw_tag); + trace_ata_tf_load(ap, &qc->tf); ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ + trace_ata_exec_command(ap, &qc->tf, qc->hw_tag); ap->ops->sff_exec_command(ap, &qc->tf); - DPRINTK("Issued tag %u\n", qc->hw_tag); - return 0; } @@ -2041,8 +2024,6 @@ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc) if (qc->tf.protocol != ATA_PROT_NCQ) return ata_bmdma_qc_issue(qc); - DPRINTK("Enter\n"); - if (!pp->qc_active) nv_swncq_issue_atacmd(ap, qc); else @@ -2087,6 +2068,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap) u8 lack_dhfis = 0; host_stat = ap->ops->bmdma_status(ap); + trace_ata_bmdma_status(ap, host_stat); if (unlikely(host_stat & ATA_DMA_ERR)) { /* error when transferring data to/from memory */ ata_ehi_clear_desc(ehi); @@ -2109,7 +2091,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap) ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); if (!ap->qc_active) { - DPRINTK("over\n"); + ata_port_dbg(ap, "over\n"); nv_swncq_pp_reinit(ap); return 0; } @@ -2124,12 +2106,12 @@ static int nv_swncq_sdbfis(struct ata_port *ap) */ lack_dhfis = 1; - DPRINTK("id 0x%x QC: qc_active 0x%llx," - "SWNCQ:qc_active 0x%X defer_bits %X " - "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n", - ap->print_id, ap->qc_active, pp->qc_active, - pp->defer_queue.defer_bits, pp->dhfis_bits, - pp->dmafis_bits, pp->last_issue_tag); + ata_port_dbg(ap, "QC: qc_active 0x%llx," + "SWNCQ:qc_active 0x%X defer_bits %X " + "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n", + ap->qc_active, pp->qc_active, + pp->defer_queue.defer_bits, pp->dhfis_bits, + pp->dmafis_bits, pp->last_issue_tag); nv_swncq_fis_reinit(ap); @@ -2169,7 +2151,7 @@ static void nv_swncq_dmafis(struct ata_port *ap) __ata_bmdma_stop(ap); tag = nv_swncq_tag(ap); - DPRINTK("dma setup tag 0x%x\n", tag); + ata_port_dbg(ap, "dma setup tag 0x%x\n", tag); qc = ata_qc_from_tag(ap, tag); if (unlikely(!qc)) @@ -2237,9 +2219,9 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) if (fis & NV_SWNCQ_IRQ_SDBFIS) { pp->ncq_flags |= ncq_saw_sdb; - DPRINTK("id 0x%x SWNCQ: qc_active 0x%X " + ata_port_dbg(ap, "SWNCQ: qc_active 0x%X " "dhfis 0x%X dmafis 0x%X sactive 0x%X\n", - ap->print_id, pp->qc_active, pp->dhfis_bits, + pp->qc_active, pp->dhfis_bits, pp->dmafis_bits, readl(pp->sactive_block)); if (nv_swncq_sdbfis(ap) < 0) goto irq_error; @@ -2265,7 +2247,7 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) goto irq_exit; if (pp->defer_queue.defer_bits) { - DPRINTK("send next command\n"); + ata_port_dbg(ap, "send next command\n"); qc = nv_swncq_qc_from_dq(ap); nv_swncq_issue_atacmd(ap, qc); } diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 7815da8ef9e5..b8465fef2ed2 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -596,7 +596,8 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc) prd[idx].addr = cpu_to_le32(addr); prd[idx].flags_len = cpu_to_le32(len & 0xffff); - VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); + ata_port_dbg(ap, "PRD[%u] = (0x%X, 0x%X)\n", + idx, addr, len); idx++; sg_len -= len; @@ -609,17 +610,16 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc) if (len > SG_COUNT_ASIC_BUG) { u32 addr; - VPRINTK("Splitting last PRD.\n"); - addr = le32_to_cpu(prd[idx - 1].addr); prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); - VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG); + ata_port_dbg(ap, "PRD[%u] = (0x%X, 0x%X)\n", + idx - 1, addr, SG_COUNT_ASIC_BUG); addr = addr + len - SG_COUNT_ASIC_BUG; len = SG_COUNT_ASIC_BUG; prd[idx].addr = cpu_to_le32(addr); prd[idx].flags_len = cpu_to_le32(len); - VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); + ata_port_dbg(ap, "PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); idx++; } @@ -632,8 +632,6 @@ static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc) struct pdc_port_priv *pp = qc->ap->private_data; unsigned int i; - VPRINTK("ENTER\n"); - switch (qc->tf.protocol) { case ATA_PROT_DMA: pdc_fill_sg(qc); @@ -922,12 +920,8 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance) u32 hotplug_status; int is_sataii_tx4; - VPRINTK("ENTER\n"); - - if (!host || !host->iomap[PDC_MMIO_BAR]) { - VPRINTK("QUICK EXIT\n"); + if (!host || !host->iomap[PDC_MMIO_BAR]) return IRQ_NONE; - } host_mmio = host->iomap[PDC_MMIO_BAR]; @@ -946,23 +940,18 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance) /* reading should also clear interrupts */ mask = readl(host_mmio + PDC_INT_SEQMASK); - if (mask == 0xffffffff && hotplug_status == 0) { - VPRINTK("QUICK EXIT 2\n"); + if (mask == 0xffffffff && hotplug_status == 0) goto done_irq; - } mask &= 0xffff; /* only 16 SEQIDs possible */ - if (mask == 0 && hotplug_status == 0) { - VPRINTK("QUICK EXIT 3\n"); + if (mask == 0 && hotplug_status == 0) goto done_irq; - } writel(mask, host_mmio + PDC_INT_SEQMASK); is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags); for (i = 0; i < host->n_ports; i++) { - VPRINTK("port %u\n", i); ap = host->ports[i]; /* check for a plug or unplug event */ @@ -989,8 +978,6 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance) } } - VPRINTK("EXIT\n"); - done_irq: spin_unlock(&host->lock); return IRQ_RETVAL(handled); @@ -1005,8 +992,6 @@ static void pdc_packet_start(struct ata_queued_cmd *qc) unsigned int port_no = ap->port_no; u8 seq = (u8) (port_no + 1); - VPRINTK("ENTER, ap %p\n", ap); - writel(0x00000001, host_mmio + (seq * 4)); readl(host_mmio + (seq * 4)); /* flush */ diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index ef00ab644afb..8ca0810aad26 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c @@ -252,9 +252,6 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) len = sg_dma_len(sg); *(__le32 *)prd = cpu_to_le32(len); prd += sizeof(u64); - - VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si, - (unsigned long long)addr, len); } return si; @@ -268,8 +265,6 @@ static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc) u64 addr; unsigned int nelem; - VPRINTK("ENTER\n"); - qs_enter_reg_mode(qc->ap); if (qc->tf.protocol != ATA_PROT_DMA) return AC_ERR_OK; @@ -304,8 +299,6 @@ static inline void qs_packet_start(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); - VPRINTK("ENTER, ap %p\n", ap); - writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0); wmb(); /* flush PRDs and pkt to memory */ writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF); @@ -374,8 +367,8 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host) struct qs_port_priv *pp = ap->private_data; struct ata_queued_cmd *qc; - DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", - sff1, sff0, port_no, sHST, sDST); + dev_dbg(host->dev, "SFF=%08x%08x: sHST=%d sDST=%02x\n", + sff1, sff0, sHST, sDST); handled = 1; if (!pp || pp->state != qs_state_pkt) continue; @@ -435,14 +428,10 @@ static irqreturn_t qs_intr(int irq, void *dev_instance) unsigned int handled = 0; unsigned long flags; - VPRINTK("ENTER\n"); - spin_lock_irqsave(&host->lock, flags); handled = qs_intr_pkt(host) | qs_intr_mmio(host); spin_unlock_irqrestore(&host->lock, flags); - VPRINTK("EXIT\n"); - return IRQ_RETVAL(handled); } diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 44b0ed8f6bb8..3d96b6faa3f0 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -323,8 +323,6 @@ static int sata_rcar_bus_softreset(struct ata_port *ap, unsigned long deadline) { struct ata_ioports *ioaddr = &ap->ioaddr; - DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); - /* software reset. causes dev0 to be selected */ iowrite32(ap->ctl, ioaddr->ctl_addr); udelay(20); @@ -350,7 +348,6 @@ static int sata_rcar_softreset(struct ata_link *link, unsigned int *classes, devmask |= 1 << 0; /* issue bus reset */ - DPRINTK("about to softreset, devmask=%x\n", devmask); rc = sata_rcar_bus_softreset(ap, deadline); /* if link is occupied, -ENODEV too is an error */ if (rc && (rc != -ENODEV || sata_scr_valid(link))) { @@ -361,7 +358,6 @@ static int sata_rcar_softreset(struct ata_link *link, unsigned int *classes, /* determine by signature whether we have ATA or ATAPI devices */ classes[0] = ata_sff_dev_classify(&link->device[0], devmask, &err); - DPRINTK("classes[0]=%u\n", classes[0]); return 0; } @@ -383,12 +379,6 @@ static void sata_rcar_tf_load(struct ata_port *ap, iowrite32(tf->hob_lbal, ioaddr->lbal_addr); iowrite32(tf->hob_lbam, ioaddr->lbam_addr); iowrite32(tf->hob_lbah, ioaddr->lbah_addr); - VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", - tf->hob_feature, - tf->hob_nsect, - tf->hob_lbal, - tf->hob_lbam, - tf->hob_lbah); } if (is_addr) { @@ -397,18 +387,10 @@ static void sata_rcar_tf_load(struct ata_port *ap, iowrite32(tf->lbal, ioaddr->lbal_addr); iowrite32(tf->lbam, ioaddr->lbam_addr); iowrite32(tf->lbah, ioaddr->lbah_addr); - VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", - tf->feature, - tf->nsect, - tf->lbal, - tf->lbam, - tf->lbah); } - if (tf->flags & ATA_TFLAG_DEVICE) { + if (tf->flags & ATA_TFLAG_DEVICE) iowrite32(tf->device, ioaddr->device_addr); - VPRINTK("device 0x%X\n", tf->device); - } ata_wait_idle(ap); } @@ -440,8 +422,6 @@ static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf) static void sata_rcar_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) { - DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); - iowrite32(tf->command, ap->ioaddr.command_addr); ata_sff_pause(ap); } @@ -499,7 +479,6 @@ static void sata_rcar_drain_fifo(struct ata_queued_cmd *qc) count < 65536; count += 2) ioread32(ap->ioaddr.data_addr); - /* Can become DEBUG later */ if (count) ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count); } @@ -543,7 +522,6 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc) prd[si].addr = cpu_to_le32(addr); prd[si].flags_len = cpu_to_le32(sg_len); - VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len); } /* end-of-table flag */ @@ -685,7 +663,7 @@ static void sata_rcar_serr_interrupt(struct ata_port *ap) if (!serror) return; - DPRINTK("SError @host_intr: 0x%x\n", serror); + ata_port_dbg(ap, "SError @host_intr: 0x%x\n", serror); /* first, analyze and record host port events */ ata_ehi_clear_desc(ehi); diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 75321f1ceba5..3b989a52879d 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -307,7 +307,6 @@ static void sil_fill_sg(struct ata_queued_cmd *qc) prd->addr = cpu_to_le32(addr); prd->flags_len = cpu_to_le32(sg_len); - VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len); last_prd = prd; prd++; diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index f99ec6f7d7c0..2fef6ce93f07 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -656,8 +656,6 @@ static int sil24_softreset(struct ata_link *link, unsigned int *class, const char *reason; int rc; - DPRINTK("ENTER\n"); - /* put the port into known state */ if (sil24_init_port(ap)) { reason = "port not ready"; @@ -680,9 +678,8 @@ static int sil24_softreset(struct ata_link *link, unsigned int *class, } sil24_read_tf(ap, 0, &tf); - *class = ata_dev_classify(&tf); + *class = ata_port_classify(ap, &tf); - DPRINTK("EXIT, class=%u\n", *class); return 0; err: diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 4c01190a5e37..6ceec59cb291 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -78,6 +78,9 @@ #define DRV_NAME "sata_sx4" #define DRV_VERSION "0.12" +static int dimm_test; +module_param(dimm_test, int, 0644); +MODULE_PARM_DESC(dimm_test, "Enable DIMM test during startup (1 = enabled)"); enum { PDC_MMIO_BAR = 3, @@ -211,10 +214,8 @@ static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device, u32 subaddr, u32 *pdata); static int pdc20621_prog_dimm0(struct ata_host *host); static unsigned int pdc20621_prog_dimm_global(struct ata_host *host); -#ifdef ATA_VERBOSE_DEBUG static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, u32 offset, u32 size); -#endif static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, u32 offset, u32 size); static void pdc20621_irq_clear(struct ata_port *ap); @@ -308,15 +309,9 @@ static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno, /* output ATA packet S/G table */ addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + (PDC_DIMM_DATA_STEP * portno); - VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr); + buf32[dw] = cpu_to_le32(addr); buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT); - - VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n", - PDC_20621_DIMM_BASE + - (PDC_DIMM_WINDOW_STEP * portno) + - PDC_DIMM_APKT_PRD, - buf32[dw], buf32[dw + 1]); } static inline void pdc20621_host_sg(u8 *buf, unsigned int portno, @@ -332,12 +327,6 @@ static inline void pdc20621_host_sg(u8 *buf, unsigned int portno, buf32[dw] = cpu_to_le32(addr); buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT); - - VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n", - PDC_20621_DIMM_BASE + - (PDC_DIMM_WINDOW_STEP * portno) + - PDC_DIMM_HPKT_PRD, - buf32[dw], buf32[dw + 1]); } static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf, @@ -351,7 +340,6 @@ static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf, unsigned int dimm_sg = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_APKT_PRD; - VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg); i = PDC_DIMM_ATA_PKT; @@ -406,8 +394,6 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf, unsigned int dimm_sg = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_HPKT_PRD; - VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg); - VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg); dw = PDC_DIMM_HOST_PKT >> 2; @@ -424,14 +410,6 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf, buf32[dw + 1] = cpu_to_le32(host_sg); buf32[dw + 2] = cpu_to_le32(dimm_sg); buf32[dw + 3] = 0; - - VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n", - PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + - PDC_DIMM_HOST_PKT, - buf32[dw + 0], - buf32[dw + 1], - buf32[dw + 2], - buf32[dw + 3]); } static void pdc20621_dma_prep(struct ata_queued_cmd *qc) @@ -447,8 +425,6 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); - VPRINTK("ata%u: ENTER\n", ap->print_id); - /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; @@ -492,7 +468,8 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) readl(dimm_mmio); /* MMIO PCI posting flush */ - VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len); + ata_port_dbg(ap, "ata pkt buf ofs %u, prd size %u, mmio copied\n", + i, sgt_len); } static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) @@ -504,8 +481,6 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) unsigned int portno = ap->port_no; unsigned int i; - VPRINTK("ata%u: ENTER\n", ap->print_id); - /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; @@ -527,7 +502,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) readl(dimm_mmio); /* MMIO PCI posting flush */ - VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); + ata_port_dbg(ap, "ata pkt buf ofs %u, mmio copied\n", i); } static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc) @@ -601,7 +576,6 @@ static void pdc20621_pop_hdma(struct ata_queued_cmd *qc) pp->hdma_cons++; } -#ifdef ATA_VERBOSE_DEBUG static void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; @@ -611,14 +585,10 @@ static void pdc20621_dump_hdma(struct ata_queued_cmd *qc) dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP); dimm_mmio += PDC_DIMM_HOST_PKT; - printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio)); - printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4)); - printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8)); - printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12)); + ata_port_dbg(ap, "HDMA 0x%08X 0x%08X 0x%08X 0x%08X\n", + readl(dimm_mmio), readl(dimm_mmio + 4), + readl(dimm_mmio + 8), readl(dimm_mmio + 12)); } -#else -static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { } -#endif /* ATA_VERBOSE_DEBUG */ static void pdc20621_packet_start(struct ata_queued_cmd *qc) { @@ -633,8 +603,6 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc) /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; - VPRINTK("ata%u: ENTER\n", ap->print_id); - wmb(); /* flush PRD, pkt writes */ port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no); @@ -645,7 +613,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc) pdc20621_dump_hdma(qc); pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT); - VPRINTK("queued ofs 0x%x (%u), seq %u\n", + ata_port_dbg(ap, "queued ofs 0x%x (%u), seq %u\n", port_ofs + PDC_DIMM_HOST_PKT, port_ofs + PDC_DIMM_HOST_PKT, seq); @@ -656,7 +624,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc) writel(port_ofs + PDC_DIMM_ATA_PKT, ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); - VPRINTK("submitted ofs 0x%x (%u), seq %u\n", + ata_port_dbg(ap, "submitted ofs 0x%x (%u), seq %u\n", port_ofs + PDC_DIMM_ATA_PKT, port_ofs + PDC_DIMM_ATA_PKT, seq); @@ -696,14 +664,12 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap, u8 status; unsigned int handled = 0; - VPRINTK("ENTER\n"); - if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */ (!(qc->tf.flags & ATA_TFLAG_WRITE))) { /* step two - DMA from DIMM to host */ if (doing_hdma) { - VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id, + ata_port_dbg(ap, "read hdma, 0x%x 0x%x\n", readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* get drive status; clear intr; complete txn */ qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); @@ -714,7 +680,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap, /* step one - exec ATA command */ else { u8 seq = (u8) (port_no + 1 + 4); - VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id, + ata_port_dbg(ap, "read ata, 0x%x 0x%x\n", readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* submit hdma pkt */ @@ -729,7 +695,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap, /* step one - DMA from host to DIMM */ if (doing_hdma) { u8 seq = (u8) (port_no + 1); - VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id, + ata_port_dbg(ap, "write hdma, 0x%x 0x%x\n", readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* submit ata pkt */ @@ -742,7 +708,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap, /* step two - execute ATA command */ else { - VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id, + ata_port_dbg(ap, "write ata, 0x%x 0x%x\n", readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* get drive status; clear intr; complete txn */ qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); @@ -755,7 +721,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap, } else if (qc->tf.protocol == ATA_PROT_NODATA) { status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); - DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); + ata_port_dbg(ap, "BUS_NODATA (drv_stat 0x%X)\n", status); qc->err_mask |= ac_err_mask(status); ata_qc_complete(qc); handled = 1; @@ -781,29 +747,21 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance) unsigned int handled = 0; void __iomem *mmio_base; - VPRINTK("ENTER\n"); - - if (!host || !host->iomap[PDC_MMIO_BAR]) { - VPRINTK("QUICK EXIT\n"); + if (!host || !host->iomap[PDC_MMIO_BAR]) return IRQ_NONE; - } mmio_base = host->iomap[PDC_MMIO_BAR]; /* reading should also clear interrupts */ mmio_base += PDC_CHIP0_OFS; mask = readl(mmio_base + PDC_20621_SEQMASK); - VPRINTK("mask == 0x%x\n", mask); - if (mask == 0xffffffff) { - VPRINTK("QUICK EXIT 2\n"); + if (mask == 0xffffffff) return IRQ_NONE; - } + mask &= 0xffff; /* only 16 tags possible */ - if (!mask) { - VPRINTK("QUICK EXIT 3\n"); + if (!mask) return IRQ_NONE; - } spin_lock(&host->lock); @@ -816,7 +774,8 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance) else ap = host->ports[port_no]; tmp = mask & (1 << i); - VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); + if (ap) + ata_port_dbg(ap, "seq %u, tmp %x\n", i, tmp); if (tmp && ap) { struct ata_queued_cmd *qc; @@ -829,10 +788,6 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance) spin_unlock(&host->lock); - VPRINTK("mask == 0x%x\n", mask); - - VPRINTK("EXIT\n"); - return IRQ_RETVAL(handled); } @@ -979,7 +934,6 @@ static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base) } -#ifdef ATA_VERBOSE_DEBUG static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, u32 offset, u32 size) { @@ -1029,7 +983,6 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, memcpy_fromio(psource, dimm_mmio, size / 4); } } -#endif static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, @@ -1226,15 +1179,16 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host) /* Turn on for ECC */ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE, &spd0)) { - pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n", - PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE); + dev_err(host->dev, + "Failed in i2c read: device=%#x, subaddr=%#x\n", + PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE); return 1; } if (spd0 == 0x02) { data |= (0x01 << 16); writel(data, mmio + PDC_SDRAM_CONTROL); readl(mmio + PDC_SDRAM_CONTROL); - printk(KERN_ERR "Local DIMM ECC Enabled\n"); + dev_err(host->dev, "Local DIMM ECC Enabled\n"); } /* DIMM Initialization Select/Enable (bit 18/19) */ @@ -1274,7 +1228,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) /* Initialize Time Period Register */ writel(0xffffffff, mmio + PDC_TIME_PERIOD); time_period = readl(mmio + PDC_TIME_PERIOD); - VPRINTK("Time Period Register (0x40): 0x%x\n", time_period); + dev_dbg(host->dev, "Time Period Register (0x40): 0x%x\n", time_period); /* Enable timer */ writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL); @@ -1289,7 +1243,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) */ tcount = readl(mmio + PDC_TIME_COUNTER); - VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount); + dev_dbg(host->dev, "Time Counter Register (0x44): 0x%x\n", tcount); /* If SX4 is on PCI-X bus, after 3 seconds, the timer counter @@ -1297,17 +1251,19 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) */ if (tcount >= PCI_X_TCOUNT) { ticks = (time_period - tcount); - VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks); + dev_dbg(host->dev, "Num counters 0x%x (%d)\n", ticks, ticks); clock = (ticks / 300000); - VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock); + dev_dbg(host->dev, "10 * Internal clk = 0x%x (%d)\n", + clock, clock); clock = (clock * 33); - VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock); + dev_dbg(host->dev, "10 * Internal clk * 33 = 0x%x (%d)\n", + clock, clock); /* PLL F Param (bit 22:16) */ fparam = (1400000 / clock) - 2; - VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam); + dev_dbg(host->dev, "PLL F Param: 0x%x (%d)\n", fparam, fparam); /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */ pci_status = (0x8a001824 | (fparam << 16)); @@ -1315,7 +1271,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) pci_status = PCI_PLL_INIT; /* Initialize PLL. */ - VPRINTK("pci_status: 0x%x\n", pci_status); + dev_dbg(host->dev, "pci_status: 0x%x\n", pci_status); writel(pci_status, mmio + PDC_CTL_STATUS); readl(mmio + PDC_CTL_STATUS); @@ -1324,23 +1280,23 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) and program the DIMM Module Controller. */ if (!(speed = pdc20621_detect_dimm(host))) { - printk(KERN_ERR "Detect Local DIMM Fail\n"); + dev_err(host->dev, "Detect Local DIMM Fail\n"); return 1; /* DIMM error */ } - VPRINTK("Local DIMM Speed = %d\n", speed); + dev_dbg(host->dev, "Local DIMM Speed = %d\n", speed); /* Programming DIMM0 Module Control Register (index_CID0:80h) */ size = pdc20621_prog_dimm0(host); - VPRINTK("Local DIMM Size = %dMB\n", size); + dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size); /* Programming DIMM Module Global Control Register (index_CID0:88h) */ if (pdc20621_prog_dimm_global(host)) { - printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n"); + dev_err(host->dev, + "Programming DIMM Module Global Control Register Fail\n"); return 1; } -#ifdef ATA_VERBOSE_DEBUG - { + if (dimm_test) { u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ', 'N','o','t',' ','Y','e','t',' ', @@ -1354,31 +1310,33 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40); pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); - printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], + dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0], test_parttern2[1], &(test_parttern2[2])); pdc20621_get_from_dimm(host, test_parttern2, 0x10040, 40); - printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], - test_parttern2[1], &(test_parttern2[2])); + dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n", + test_parttern2[0], + test_parttern2[1], &(test_parttern2[2])); pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40); pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); - printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], - test_parttern2[1], &(test_parttern2[2])); + dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n", + test_parttern2[0], + test_parttern2[1], &(test_parttern2[2])); } -#endif /* ECC initiliazation. */ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE, &spd0)) { - pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n", + dev_err(host->dev, + "Failed in i2c read: device=%#x, subaddr=%#x\n", PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE); return 1; } if (spd0 == 0x02) { void *buf; - VPRINTK("Start ECC initialization\n"); + dev_dbg(host->dev, "Start ECC initialization\n"); addr = 0; length = size * 1024 * 1024; buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL); @@ -1390,7 +1348,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) addr += ECC_ERASE_BUF_SZ; } kfree(buf); - VPRINTK("Finish ECC initialization\n"); + dev_dbg(host->dev, "Finish ECC initialization\n"); } return 0; } diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index bc8e8d9f176b..3e726ee91fdc 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -178,7 +178,6 @@ static void ia_hack_tcq(IADEV *dev) { static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) { u_short desc_num, i; - struct sk_buff *skb; struct ia_vcc *iavcc_r = NULL; unsigned long delta; static unsigned long timer = 0; @@ -202,8 +201,7 @@ static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) { else dev->ffL.tcq_rd -= 2; *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1; - if (!(skb = dev->desc_tbl[i].txskb) || - !(iavcc_r = dev->desc_tbl[i].iavcc)) + if (!dev->desc_tbl[i].txskb || !(iavcc_r = dev->desc_tbl[i].iavcc)) printk("Fatal err, desc table vcc or skb is NULL\n"); else iavcc_r->vc_desc_cnt--; diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c index bc1876915457..eaa31e567d1e 100644 --- a/drivers/base/arch_numa.c +++ b/drivers/base/arch_numa.c @@ -14,7 +14,6 @@ #include <linux/of.h> #include <asm/sections.h> -#include <asm/pgalloc.h> struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); @@ -155,66 +154,6 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) return node_distance(early_cpu_to_node(from), early_cpu_to_node(to)); } -static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, - size_t align) -{ - int nid = early_cpu_to_node(cpu); - - return memblock_alloc_try_nid(size, align, - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); -} - -static void __init pcpu_fc_free(void *ptr, size_t size) -{ - memblock_free(ptr, size); -} - -#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK -static void __init pcpu_populate_pte(unsigned long addr) -{ - pgd_t *pgd = pgd_offset_k(addr); - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - - p4d = p4d_offset(pgd, addr); - if (p4d_none(*p4d)) { - pud_t *new; - - new = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!new) - goto err_alloc; - p4d_populate(&init_mm, p4d, new); - } - - pud = pud_offset(p4d, addr); - if (pud_none(*pud)) { - pmd_t *new; - - new = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!new) - goto err_alloc; - pud_populate(&init_mm, pud, new); - } - - pmd = pmd_offset(pud, addr); - if (!pmd_present(*pmd)) { - pte_t *new; - - new = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!new) - goto err_alloc; - pmd_populate_kernel(&init_mm, pmd, new); - } - - return; - -err_alloc: - panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); -} -#endif - void __init setup_per_cpu_areas(void) { unsigned long delta; @@ -229,7 +168,7 @@ void __init setup_per_cpu_areas(void) rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, pcpu_cpu_distance, - pcpu_fc_alloc, pcpu_fc_free); + early_cpu_to_node); #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK if (rc < 0) pr_warn("PERCPU: %s allocator failed (%d), falling back to page size\n", @@ -239,10 +178,7 @@ void __init setup_per_cpu_areas(void) #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK if (rc < 0) - rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, - pcpu_fc_alloc, - pcpu_fc_free, - pcpu_populate_pte); + rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, early_cpu_to_node); #endif if (rc < 0) panic("Failed to initialize percpu areas (err=%d).", rc); diff --git a/drivers/base/core.c b/drivers/base/core.c index 8b17cb80420c..7bb957b11861 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2872,10 +2872,6 @@ void device_initialize(struct device *dev) INIT_LIST_HEAD(&dev->devres_head); device_pm_init(dev); set_dev_node(dev, NUMA_NO_NODE); -#ifdef CONFIG_GENERIC_MSI_IRQ - raw_spin_lock_init(&dev->msi_lock); - INIT_LIST_HEAD(&dev->msi_list); -#endif INIT_LIST_HEAD(&dev->links.consumers); INIT_LIST_HEAD(&dev->links.suppliers); INIT_LIST_HEAD(&dev->links.defer_sync); diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 1e2c2d3882e2..f41063ac1aee 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -65,8 +65,15 @@ static struct dentry *public_dev_mount(struct file_system_type *fs_type, int fla const char *dev_name, void *data) { struct super_block *s = mnt->mnt_sb; + int err; + atomic_inc(&s->s_active); down_write(&s->s_umount); + err = reconfigure_single(s, flags, data); + if (err < 0) { + deactivate_locked_super(s); + return ERR_PTR(err); + } return dget(s->s_root); } diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile index eb4be452062a..6c067dedc01e 100644 --- a/drivers/base/firmware_loader/builtin/Makefile +++ b/drivers/base/firmware_loader/builtin/Makefile @@ -3,10 +3,10 @@ obj-y += main.o # Create $(fwdir) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a # leading /, it's relative to $(srctree). -fwdir := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE_DIR)) +fwdir := $(CONFIG_EXTRA_FIRMWARE_DIR) fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir)) -firmware := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE))) +firmware := $(addsuffix .gen.o, $(CONFIG_EXTRA_FIRMWARE)) obj-y += $(firmware) FWNAME = $(patsubst $(obj)/%.gen.S,%,$@) diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 3d6c8f9caf43..296ea673d661 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -23,7 +23,6 @@ struct platform_msi_priv_data { struct device *dev; void *host_data; - const struct attribute_group **msi_irq_groups; msi_alloc_info_t arg; irq_write_msi_msg_t write_msg; int devid; @@ -39,11 +38,9 @@ static DEFINE_IDA(platform_msi_devid_ida); */ static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) { - u32 devid; + u32 devid = desc->dev->msi.data->platform_data->devid; - devid = desc->platform.msi_priv_data->devid; - - return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index; + return (devid << (32 - DEV_ID_SHIFT)) | desc->msi_index; } static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) @@ -86,11 +83,8 @@ static void platform_msi_update_dom_ops(struct msi_domain_info *info) static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg) { struct msi_desc *desc = irq_data_get_msi_desc(data); - struct platform_msi_priv_data *priv_data; - - priv_data = desc->platform.msi_priv_data; - priv_data->write_msg(desc, msg); + desc->dev->msi.data->platform_data->write_msg(desc, msg); } static void platform_msi_update_chip_ops(struct msi_domain_info *info) @@ -113,62 +107,6 @@ static void platform_msi_update_chip_ops(struct msi_domain_info *info) info->flags &= ~MSI_FLAG_LEVEL_CAPABLE; } -static void platform_msi_free_descs(struct device *dev, int base, int nvec) -{ - struct msi_desc *desc, *tmp; - - list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { - if (desc->platform.msi_index >= base && - desc->platform.msi_index < (base + nvec)) { - list_del(&desc->list); - free_msi_entry(desc); - } - } -} - -static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq, - int nvec, - struct platform_msi_priv_data *data) - -{ - struct msi_desc *desc; - int i, base = 0; - - if (!list_empty(dev_to_msi_list(dev))) { - desc = list_last_entry(dev_to_msi_list(dev), - struct msi_desc, list); - base = desc->platform.msi_index + 1; - } - - for (i = 0; i < nvec; i++) { - desc = alloc_msi_entry(dev, 1, NULL); - if (!desc) - break; - - desc->platform.msi_priv_data = data; - desc->platform.msi_index = base + i; - desc->irq = virq ? virq + i : 0; - - list_add_tail(&desc->list, dev_to_msi_list(dev)); - } - - if (i != nvec) { - /* Clean up the mess */ - platform_msi_free_descs(dev, base, nvec); - - return -ENOMEM; - } - - return 0; -} - -static int platform_msi_alloc_descs(struct device *dev, int nvec, - struct platform_msi_priv_data *data) - -{ - return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data); -} - /** * platform_msi_create_irq_domain - Create a platform MSI interrupt domain * @fwnode: Optional fwnode of the interrupt controller @@ -191,6 +129,8 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, platform_msi_update_dom_ops(info); if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) platform_msi_update_chip_ops(info); + info->flags |= MSI_FLAG_DEV_SYSFS | MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | + MSI_FLAG_FREE_MSI_DESCS; domain = msi_create_irq_domain(fwnode, info, parent); if (domain) @@ -199,49 +139,57 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, return domain; } -static struct platform_msi_priv_data * -platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec, - irq_write_msi_msg_t write_msi_msg) +static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) { struct platform_msi_priv_data *datap; + int err; + /* * Limit the number of interrupts to 2048 per device. Should we * need to bump this up, DEV_ID_SHIFT should be adjusted * accordingly (which would impact the max number of MSI * capable devices). */ - if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS) - return ERR_PTR(-EINVAL); + if (!dev->msi.domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS) + return -EINVAL; - if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) { + if (dev->msi.domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) { dev_err(dev, "Incompatible msi_domain, giving up\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } - /* Already had a helping of MSI? Greed... */ - if (!list_empty(dev_to_msi_list(dev))) - return ERR_PTR(-EBUSY); + err = msi_setup_device_data(dev); + if (err) + return err; + + /* Already initialized? */ + if (dev->msi.data->platform_data) + return -EBUSY; datap = kzalloc(sizeof(*datap), GFP_KERNEL); if (!datap) - return ERR_PTR(-ENOMEM); + return -ENOMEM; datap->devid = ida_simple_get(&platform_msi_devid_ida, 0, 1 << DEV_ID_SHIFT, GFP_KERNEL); if (datap->devid < 0) { - int err = datap->devid; + err = datap->devid; kfree(datap); - return ERR_PTR(err); + return err; } datap->write_msg = write_msi_msg; datap->dev = dev; - - return datap; + dev->msi.data->platform_data = datap; + return 0; } -static void platform_msi_free_priv_data(struct platform_msi_priv_data *data) +static void platform_msi_free_priv_data(struct device *dev) { + struct platform_msi_priv_data *data = dev->msi.data->platform_data; + + dev->msi.data->platform_data = NULL; ida_simple_remove(&platform_msi_devid_ida, data->devid); kfree(data); } @@ -258,35 +206,15 @@ static void platform_msi_free_priv_data(struct platform_msi_priv_data *data) int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, irq_write_msi_msg_t write_msi_msg) { - struct platform_msi_priv_data *priv_data; int err; - priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); - if (IS_ERR(priv_data)) - return PTR_ERR(priv_data); - - err = platform_msi_alloc_descs(dev, nvec, priv_data); + err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); if (err) - goto out_free_priv_data; + return err; - err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec); + err = msi_domain_alloc_irqs(dev->msi.domain, dev, nvec); if (err) - goto out_free_desc; - - priv_data->msi_irq_groups = msi_populate_sysfs(dev); - if (IS_ERR(priv_data->msi_irq_groups)) { - err = PTR_ERR(priv_data->msi_irq_groups); - goto out_free_irqs; - } - - return 0; - -out_free_irqs: - msi_domain_free_irqs(dev->msi_domain, dev); -out_free_desc: - platform_msi_free_descs(dev, 0, nvec); -out_free_priv_data: - platform_msi_free_priv_data(priv_data); + platform_msi_free_priv_data(dev); return err; } @@ -298,16 +226,8 @@ EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs); */ void platform_msi_domain_free_irqs(struct device *dev) { - if (!list_empty(dev_to_msi_list(dev))) { - struct msi_desc *desc; - - desc = first_msi_entry(dev); - msi_destroy_sysfs(dev, desc->platform.msi_priv_data->msi_irq_groups); - platform_msi_free_priv_data(desc->platform.msi_priv_data); - } - - msi_domain_free_irqs(dev->msi_domain, dev); - platform_msi_free_descs(dev, 0, MAX_DEV_MSIS); + msi_domain_free_irqs(dev->msi.domain, dev); + platform_msi_free_priv_data(dev); } EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs); @@ -316,17 +236,20 @@ EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs); * a platform-msi domain * @domain: The platform-msi domain * - * Returns the private data provided when calling - * platform_msi_create_device_domain. + * Return: The private data provided when calling + * platform_msi_create_device_domain(). */ void *platform_msi_get_host_data(struct irq_domain *domain) { struct platform_msi_priv_data *data = domain->host_data; + return data->host_data; } +static struct lock_class_key platform_device_msi_lock_class; + /** - * __platform_msi_create_device_domain - Create a platform-msi domain + * __platform_msi_create_device_domain - Create a platform-msi device domain * * @dev: The device generating the MSIs * @nvec: The number of MSIs that need to be allocated @@ -335,7 +258,11 @@ void *platform_msi_get_host_data(struct irq_domain *domain) * @ops: The hierarchy domain operations to use * @host_data: Private data associated to this domain * - * Returns an irqdomain for @nvec interrupts + * Return: An irqdomain for @nvec interrupts on success, NULL in case of error. + * + * This is for interrupt domains which stack on a platform-msi domain + * created by platform_msi_create_irq_domain(). @dev->msi.domain points to + * that platform-msi domain which is the parent for the new domain. */ struct irq_domain * __platform_msi_create_device_domain(struct device *dev, @@ -349,12 +276,20 @@ __platform_msi_create_device_domain(struct device *dev, struct irq_domain *domain; int err; - data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); - if (IS_ERR(data)) + err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); + if (err) return NULL; + /* + * Use a separate lock class for the MSI descriptor mutex on + * platform MSI device domains because the descriptor mutex nests + * into the domain mutex. See alloc/free below. + */ + lockdep_set_class(&dev->msi.data->mutex, &platform_device_msi_lock_class); + + data = dev->msi.data->platform_data; data->host_data = host_data; - domain = irq_domain_create_hierarchy(dev->msi_domain, 0, + domain = irq_domain_create_hierarchy(dev->msi.domain, 0, is_tree ? 0 : nvec, dev->fwnode, ops, data); if (!domain) @@ -370,61 +305,46 @@ __platform_msi_create_device_domain(struct device *dev, free_domain: irq_domain_remove(domain); free_priv: - platform_msi_free_priv_data(data); + platform_msi_free_priv_data(dev); return NULL; } /** - * platform_msi_domain_free - Free interrupts associated with a platform-msi - * domain + * platform_msi_device_domain_free - Free interrupts associated with a platform-msi + * device domain * - * @domain: The platform-msi domain + * @domain: The platform-msi device domain * @virq: The base irq from which to perform the free operation - * @nvec: How many interrupts to free from @virq + * @nr_irqs: How many interrupts to free from @virq */ -void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, - unsigned int nvec) +void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) { struct platform_msi_priv_data *data = domain->host_data; - struct msi_desc *desc, *tmp; - for_each_msi_entry_safe(desc, tmp, data->dev) { - if (WARN_ON(!desc->irq || desc->nvec_used != 1)) - return; - if (!(desc->irq >= virq && desc->irq < (virq + nvec))) - continue; - - irq_domain_free_irqs_common(domain, desc->irq, 1); - list_del(&desc->list); - free_msi_entry(desc); - } + + msi_lock_descs(data->dev); + irq_domain_free_irqs_common(domain, virq, nr_irqs); + msi_free_msi_descs_range(data->dev, MSI_DESC_ALL, virq, virq + nr_irqs - 1); + msi_unlock_descs(data->dev); } /** - * platform_msi_domain_alloc - Allocate interrupts associated with - * a platform-msi domain + * platform_msi_device_domain_alloc - Allocate interrupts associated with + * a platform-msi device domain * - * @domain: The platform-msi domain + * @domain: The platform-msi device domain * @virq: The base irq from which to perform the allocate operation - * @nr_irqs: How many interrupts to free from @virq + * @nr_irqs: How many interrupts to allocate from @virq * * Return 0 on success, or an error code on failure. Must be called * with irq_domain_mutex held (which can only be done as part of a * top-level interrupt allocation). */ -int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs) +int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) { struct platform_msi_priv_data *data = domain->host_data; - int err; - - err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data); - if (err) - return err; - - err = msi_domain_populate_irqs(domain->parent, data->dev, - virq, nr_irqs, &data->arg); - if (err) - platform_msi_domain_free(domain, virq, nr_irqs); + struct device *dev = data->dev; - return err; + return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg); } diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 94665037f4a3..72b7a92337b1 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -120,7 +120,11 @@ static unsigned int read_magic_time(void) struct rtc_time time; unsigned int val; - mc146818_get_time(&time); + if (mc146818_get_time(&time) < 0) { + pr_err("Unable to read current time from RTC\n"); + return 0; + } + pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time); val = time.tm_year; /* 100 years */ if (val > 100) diff --git a/drivers/base/property.c b/drivers/base/property.c index a74c21af97c1..e6497f6877ee 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -920,6 +920,22 @@ int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index) EXPORT_SYMBOL(fwnode_irq_get); /** + * fwnode_iomap - Maps the memory mapped IO for a given fwnode + * @fwnode: Pointer to the firmware node + * @index: Index of the IO range + * + * Returns a pointer to the mapped memory. + */ +void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index) +{ + if (IS_ENABLED(CONFIG_OF_ADDRESS) && is_of_node(fwnode)) + return of_iomap(to_of_node(fwnode), index); + + return NULL; +} +EXPORT_SYMBOL(fwnode_iomap); + +/** * fwnode_graph_get_next_endpoint - Get next endpoint firmware node * @fwnode: Pointer to the parent firmware node * @prev: Previous endpoint node or %NULL to get the first diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 588889bea7c3..6af111f568e4 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -122,7 +122,7 @@ newtag(struct aoedev *d) register ulong n; n = jiffies & 0xffff; - return n |= (++d->lasttag & 0x7fff) << 16; + return n | (++d->lasttag & 0x7fff) << 16; } static u32 diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 8fe2e4289dae..6e3f2f0d2352 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -362,7 +362,6 @@ __setup("ramdisk_size=", ramdisk_size); * (should share code eventually). */ static LIST_HEAD(brd_devices); -static DEFINE_MUTEX(brd_devices_mutex); static struct dentry *brd_debugfs_dir; static int brd_alloc(int i) @@ -372,21 +371,14 @@ static int brd_alloc(int i) char buf[DISK_NAME_LEN]; int err = -ENOMEM; - mutex_lock(&brd_devices_mutex); - list_for_each_entry(brd, &brd_devices, brd_list) { - if (brd->brd_number == i) { - mutex_unlock(&brd_devices_mutex); + list_for_each_entry(brd, &brd_devices, brd_list) + if (brd->brd_number == i) return -EEXIST; - } - } brd = kzalloc(sizeof(*brd), GFP_KERNEL); - if (!brd) { - mutex_unlock(&brd_devices_mutex); + if (!brd) return -ENOMEM; - } brd->brd_number = i; list_add_tail(&brd->brd_list, &brd_devices); - mutex_unlock(&brd_devices_mutex); spin_lock_init(&brd->brd_lock); INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); @@ -429,9 +421,7 @@ static int brd_alloc(int i) out_cleanup_disk: blk_cleanup_disk(disk); out_free_dev: - mutex_lock(&brd_devices_mutex); list_del(&brd->brd_list); - mutex_unlock(&brd_devices_mutex); kfree(brd); return err; } @@ -441,15 +431,19 @@ static void brd_probe(dev_t dev) brd_alloc(MINOR(dev) / max_part); } -static void brd_del_one(struct brd_device *brd) +static void brd_cleanup(void) { - del_gendisk(brd->brd_disk); - blk_cleanup_disk(brd->brd_disk); - brd_free_pages(brd); - mutex_lock(&brd_devices_mutex); - list_del(&brd->brd_list); - mutex_unlock(&brd_devices_mutex); - kfree(brd); + struct brd_device *brd, *next; + + debugfs_remove_recursive(brd_debugfs_dir); + + list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { + del_gendisk(brd->brd_disk); + blk_cleanup_disk(brd->brd_disk); + brd_free_pages(brd); + list_del(&brd->brd_list); + kfree(brd); + } } static inline void brd_check_and_reset_par(void) @@ -473,9 +467,18 @@ static inline void brd_check_and_reset_par(void) static int __init brd_init(void) { - struct brd_device *brd, *next; int err, i; + brd_check_and_reset_par(); + + brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL); + + for (i = 0; i < rd_nr; i++) { + err = brd_alloc(i); + if (err) + goto out_free; + } + /* * brd module now has a feature to instantiate underlying device * structure on-demand, provided that there is an access dev node. @@ -491,28 +494,16 @@ static int __init brd_init(void) * dynamically. */ - if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) - return -EIO; - - brd_check_and_reset_par(); - - brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL); - - for (i = 0; i < rd_nr; i++) { - err = brd_alloc(i); - if (err) - goto out_free; + if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) { + err = -EIO; + goto out_free; } pr_info("brd: module loaded\n"); return 0; out_free: - unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); - debugfs_remove_recursive(brd_debugfs_dir); - - list_for_each_entry_safe(brd, next, &brd_devices, brd_list) - brd_del_one(brd); + brd_cleanup(); pr_info("brd: module NOT loaded !!!\n"); return err; @@ -520,13 +511,9 @@ out_free: static void __exit brd_exit(void) { - struct brd_device *brd, *next; unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); - debugfs_remove_recursive(brd_debugfs_dir); - - list_for_each_entry_safe(brd, next, &brd_devices, brd_list) - brd_del_one(brd); + brd_cleanup(); pr_info("brd: module unloaded\n"); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index b1b05c45c07c..01cbbfc4e9e2 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -820,7 +820,7 @@ static inline int queue_on_root_worker(struct cgroup_subsys_state *css) static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd) { - struct rb_node **node = &(lo->worker_tree.rb_node), *parent = NULL; + struct rb_node **node, *parent = NULL; struct loop_worker *cur_worker, *worker = NULL; struct work_struct *work; struct list_head *cmd_list; diff --git a/drivers/block/paride/bpck.c b/drivers/block/paride/bpck.c index f5f63ca2889d..d880a9465e9b 100644 --- a/drivers/block/paride/bpck.c +++ b/drivers/block/paride/bpck.c @@ -28,6 +28,7 @@ #undef r2 #undef w2 +#undef PC #define PC pi->private #define r2() (PC=(in_p(2) & 0xff)) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8f140da1efe3..4203cdab8abf 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -6189,7 +6189,7 @@ static inline size_t next_token(const char **buf) * These are the characters that produce nonzero for * isspace() in the "C" and "POSIX" locales. */ - const char *spaces = " \f\n\r\t\v"; + static const char spaces[] = " \f\n\r\t\v"; *buf += strspn(*buf, spaces); /* Find start of token */ @@ -6495,7 +6495,8 @@ static int rbd_add_parse_args(const char *buf, pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT; pctx.opts->trim = RBD_TRIM_DEFAULT; - ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL); + ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL, + ','); if (ret) goto out_err; diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index 67a8edbaa1fd..8f8443ee6fe4 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -433,7 +433,7 @@ static void msg_conf(void *priv, int errno) schedule_work(&iu->work); } -static int send_usr_msg(struct rtrs_clt *rtrs, int dir, +static int send_usr_msg(struct rtrs_clt_sess *rtrs, int dir, struct rnbd_iu *iu, struct kvec *vec, size_t len, struct scatterlist *sg, unsigned int sg_len, void (*conf)(struct work_struct *work), @@ -1010,7 +1010,7 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev, struct request *rq, struct rnbd_iu *iu) { - struct rtrs_clt *rtrs = dev->sess->rtrs; + struct rtrs_clt_sess *rtrs = dev->sess->rtrs; struct rtrs_permit *permit = iu->permit; struct rnbd_msg_io msg; struct rtrs_clt_req_ops req_ops; diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h index 9ef8c4f306f2..0c2cae7f39b9 100644 --- a/drivers/block/rnbd/rnbd-clt.h +++ b/drivers/block/rnbd/rnbd-clt.h @@ -75,7 +75,7 @@ struct rnbd_cpu_qlist { struct rnbd_clt_session { struct list_head list; - struct rtrs_clt *rtrs; + struct rtrs_clt_sess *rtrs; wait_queue_head_t rtrs_waitq; bool rtrs_ready; struct rnbd_cpu_qlist __percpu diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index aafecfe97055..1ee808fc600c 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -263,15 +263,15 @@ out: kfree(srv_sess); } -static int create_sess(struct rtrs_srv *rtrs) +static int create_sess(struct rtrs_srv_sess *rtrs) { struct rnbd_srv_session *srv_sess; - char sessname[NAME_MAX]; + char pathname[NAME_MAX]; int err; - err = rtrs_srv_get_sess_name(rtrs, sessname, sizeof(sessname)); + err = rtrs_srv_get_path_name(rtrs, pathname, sizeof(pathname)); if (err) { - pr_err("rtrs_srv_get_sess_name(%s): %d\n", sessname, err); + pr_err("rtrs_srv_get_path_name(%s): %d\n", pathname, err); return err; } @@ -284,8 +284,8 @@ static int create_sess(struct rtrs_srv *rtrs) offsetof(struct rnbd_dev_blk_io, bio), BIOSET_NEED_BVECS); if (err) { - pr_err("Allocating srv_session for session %s failed\n", - sessname); + pr_err("Allocating srv_session for path %s failed\n", + pathname); kfree(srv_sess); return err; } @@ -298,14 +298,14 @@ static int create_sess(struct rtrs_srv *rtrs) mutex_unlock(&sess_lock); srv_sess->rtrs = rtrs; - strscpy(srv_sess->sessname, sessname, sizeof(srv_sess->sessname)); + strscpy(srv_sess->sessname, pathname, sizeof(srv_sess->sessname)); rtrs_srv_set_sess_priv(rtrs, srv_sess); return 0; } -static int rnbd_srv_link_ev(struct rtrs_srv *rtrs, +static int rnbd_srv_link_ev(struct rtrs_srv_sess *rtrs, enum rtrs_srv_link_ev ev, void *priv) { struct rnbd_srv_session *srv_sess = priv; diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h index 98ddc31eb408..e5604bce123a 100644 --- a/drivers/block/rnbd/rnbd-srv.h +++ b/drivers/block/rnbd/rnbd-srv.h @@ -20,7 +20,7 @@ struct rnbd_srv_session { /* Entry inside global sess_list */ struct list_head list; - struct rtrs_srv *rtrs; + struct rtrs_srv_sess *rtrs; char sessname[NAME_MAX]; int queue_depth; struct bio_set sess_bio_set; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index c3dc3cd7a779..c443cd64fc9b 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -976,7 +976,7 @@ static void virtblk_remove(struct virtio_device *vdev) mutex_lock(&vblk->vdev_mutex); /* Stop all the virtqueues. */ - vdev->config->reset(vdev); + virtio_reset_device(vdev); /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ vblk->vdev = NULL; @@ -995,7 +995,7 @@ static int virtblk_freeze(struct virtio_device *vdev) struct virtio_blk *vblk = vdev->priv; /* Ensure we don't receive any more interrupts */ - vdev->config->reset(vdev); + virtio_reset_device(vdev); /* Make sure no work handler is accessing the device. */ flush_work(&vblk->config_work); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index f6da5293b913..cb253d80d72b 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1903,14 +1903,7 @@ static struct attribute *zram_disk_attrs[] = { NULL, }; -static const struct attribute_group zram_disk_attr_group = { - .attrs = zram_disk_attrs, -}; - -static const struct attribute_group *zram_disk_attr_groups[] = { - &zram_disk_attr_group, - NULL, -}; +ATTRIBUTE_GROUPS(zram_disk); /* * Allocate and initialize new zram device. the function returns @@ -1983,7 +1976,7 @@ static int zram_add(void) blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX); blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue); - ret = device_add_disk(NULL, zram->disk, zram_disk_attr_groups); + ret = device_add_disk(NULL, zram->disk, zram_disk_groups); if (ret) goto out_cleanup_disk; diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c index 076e4942a3f0..67c21263f9e0 100644 --- a/drivers/bluetooth/virtio_bt.c +++ b/drivers/bluetooth/virtio_bt.c @@ -367,7 +367,7 @@ static void virtbt_remove(struct virtio_device *vdev) struct hci_dev *hdev = vbt->hdev; hci_unregister_dev(hdev); - vdev->config->reset(vdev); + virtio_reset_device(vdev); hci_free_dev(hdev); vbt->hdev = NULL; diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c index 315e830b6ecd..5e70f9775a0e 100644 --- a/drivers/bus/fsl-mc/dprc-driver.c +++ b/drivers/bus/fsl-mc/dprc-driver.c @@ -400,7 +400,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg) struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); struct fsl_mc_io *mc_io = mc_dev->mc_io; - struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc; + int irq = mc_dev->irqs[0]->virq; dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n", irq_num, smp_processor_id()); @@ -409,7 +409,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg) return IRQ_HANDLED; mutex_lock(&mc_bus->scan_mutex); - if (!msi_desc || msi_desc->irq != (u32)irq_num) + if (irq != (u32)irq_num) goto out; status = 0; @@ -521,7 +521,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev) * function that programs the MSI physically in the device */ error = devm_request_threaded_irq(&mc_dev->dev, - irq->msi_desc->irq, + irq->virq, dprc_irq0_handler, dprc_irq0_handler_thread, IRQF_NO_SUSPEND | IRQF_ONESHOT, @@ -771,7 +771,7 @@ static void dprc_teardown_irq(struct fsl_mc_device *mc_dev) (void)disable_dprc_irq(mc_dev); - devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev); + devm_free_irq(&mc_dev->dev, irq->virq, &mc_dev->dev); fsl_mc_free_irqs(mc_dev); } diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c index 6c513556911e..dced427ca8ba 100644 --- a/drivers/bus/fsl-mc/fsl-mc-allocator.c +++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c @@ -350,7 +350,6 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev, unsigned int irq_count) { unsigned int i; - struct msi_desc *msi_desc; struct fsl_mc_device_irq *irq_resources; struct fsl_mc_device_irq *mc_dev_irq; int error; @@ -388,16 +387,12 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev, mc_dev_irq->resource.type = res_pool->type; mc_dev_irq->resource.data = mc_dev_irq; mc_dev_irq->resource.parent_pool = res_pool; + mc_dev_irq->virq = msi_get_virq(&mc_bus_dev->dev, i); + mc_dev_irq->resource.id = mc_dev_irq->virq; INIT_LIST_HEAD(&mc_dev_irq->resource.node); list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list); } - for_each_msi_entry(msi_desc, &mc_bus_dev->dev) { - mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index]; - mc_dev_irq->msi_desc = msi_desc; - mc_dev_irq->resource.id = msi_desc->irq; - } - res_pool->max_count = irq_count; res_pool->free_count = irq_count; mc_bus->irq_resources = irq_resources; diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c index cf974870ba55..5e0e4393ce4d 100644 --- a/drivers/bus/fsl-mc/fsl-mc-msi.c +++ b/drivers/bus/fsl-mc/fsl-mc-msi.c @@ -29,7 +29,7 @@ static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev, * Make the base hwirq value for ICID*10000 so it is readable * as a decimal value in /proc/interrupts. */ - return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000)); + return (irq_hw_number_t)(desc->msi_index + (dev->icid * 10000)); } static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg, @@ -58,11 +58,11 @@ static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info) } static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev, - struct fsl_mc_device_irq *mc_dev_irq) + struct fsl_mc_device_irq *mc_dev_irq, + struct msi_desc *msi_desc) { int error; struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev; - struct msi_desc *msi_desc = mc_dev_irq->msi_desc; struct dprc_irq_cfg irq_cfg; /* @@ -122,14 +122,14 @@ static void fsl_mc_msi_write_msg(struct irq_data *irq_data, struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev); struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); struct fsl_mc_device_irq *mc_dev_irq = - &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index]; + &mc_bus->irq_resources[msi_desc->msi_index]; msi_desc->msg = *msg; /* * Program the MSI (paddr, value) pair in the device: */ - __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq); + __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq, msi_desc); } static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info) @@ -170,6 +170,7 @@ struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode, fsl_mc_msi_update_dom_ops(info); if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) fsl_mc_msi_update_chip_ops(info); + info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS; domain = msi_create_irq_domain(fwnode, info, parent); if (domain) @@ -210,61 +211,21 @@ struct irq_domain *fsl_mc_find_msi_domain(struct device *dev) return msi_domain; } -static void fsl_mc_msi_free_descs(struct device *dev) -{ - struct msi_desc *desc, *tmp; - - list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { - list_del(&desc->list); - free_msi_entry(desc); - } -} - -static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count) - -{ - unsigned int i; - int error; - struct msi_desc *msi_desc; - - for (i = 0; i < irq_count; i++) { - msi_desc = alloc_msi_entry(dev, 1, NULL); - if (!msi_desc) { - dev_err(dev, "Failed to allocate msi entry\n"); - error = -ENOMEM; - goto cleanup_msi_descs; - } - - msi_desc->fsl_mc.msi_index = i; - INIT_LIST_HEAD(&msi_desc->list); - list_add_tail(&msi_desc->list, dev_to_msi_list(dev)); - } - - return 0; - -cleanup_msi_descs: - fsl_mc_msi_free_descs(dev); - return error; -} - -int fsl_mc_msi_domain_alloc_irqs(struct device *dev, - unsigned int irq_count) +int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count) { struct irq_domain *msi_domain; int error; - if (!list_empty(dev_to_msi_list(dev))) + msi_domain = dev_get_msi_domain(dev); + if (!msi_domain) return -EINVAL; - error = fsl_mc_msi_alloc_descs(dev, irq_count); - if (error < 0) + error = msi_setup_device_data(dev); + if (error) return error; - msi_domain = dev_get_msi_domain(dev); - if (!msi_domain) { - error = -EINVAL; - goto cleanup_msi_descs; - } + if (msi_first_desc(dev, MSI_DESC_ALL)) + return -EINVAL; /* * NOTE: Calling this function will trigger the invocation of the @@ -272,15 +233,8 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev, */ error = msi_domain_alloc_irqs(msi_domain, dev, irq_count); - if (error) { + if (error) dev_err(dev, "Failed to allocate IRQs\n"); - goto cleanup_msi_descs; - } - - return 0; - -cleanup_msi_descs: - fsl_mc_msi_free_descs(dev); return error; } @@ -293,9 +247,4 @@ void fsl_mc_msi_domain_free_irqs(struct device *dev) return; msi_domain_free_irqs(msi_domain, dev); - - if (list_empty(dev_to_msi_list(dev))) - return; - - fsl_mc_msi_free_descs(dev); } diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c index 0a972620a403..74295d3cc662 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/core/boot.c @@ -417,7 +417,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) } /* wait for ready on pass through or any other execution environment */ - if (mhi_cntrl->ee != MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_PBL) + if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee)) goto fw_load_ready_state; fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 5aaca6d0f52b..046f407dc5d6 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -79,7 +79,8 @@ static const char * const mhi_pm_state_str[] = { const char *to_mhi_pm_state_str(enum mhi_pm_state state) { - int index = find_last_bit((unsigned long *)&state, 32); + unsigned long pm_state = state; + int index = find_last_bit(&pm_state, 32); if (index >= ARRAY_SIZE(mhi_pm_state_str)) return "Invalid State"; @@ -788,6 +789,7 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, mhi_chan->offload_ch = ch_cfg->offload_channel; mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; mhi_chan->pre_alloc = ch_cfg->auto_queue; + mhi_chan->wake_capable = ch_cfg->wake_capable; /* * If MHI host allocates buffers, then the channel direction diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 3a732afaf73e..e2e10474a9d9 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -390,7 +390,8 @@ extern const char * const mhi_ee_str[MHI_EE_MAX]; #define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ ee == MHI_EE_EDL) - +#define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS) +#define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL) #define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \ ee == MHI_EE_FP) @@ -681,8 +682,12 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct image_info *img_info); void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); + +/* Automatically allocate and queue inbound buffers */ +#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan); + struct mhi_chan *mhi_chan, unsigned int flags); + int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan); void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index b15c5bc37dd4..ffde617f93a3 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -1065,7 +1065,7 @@ void mhi_ctrl_ev_task(unsigned long data) return; } - /* Process ctrl events events */ + /* Process ctrl events */ ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); /* @@ -1430,7 +1430,7 @@ exit_unprepare_channel: } int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan) + struct mhi_chan *mhi_chan, unsigned int flags) { int ret = 0; struct device *dev = &mhi_chan->mhi_dev->dev; @@ -1455,6 +1455,9 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, if (ret) goto error_pm_state; + if (mhi_chan->dir == DMA_FROM_DEVICE) + mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); + /* Pre-allocate buffer for xfer ring */ if (mhi_chan->pre_alloc) { int nr_el = get_nr_avail_ring_elements(mhi_cntrl, @@ -1464,6 +1467,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, while (nr_el--) { void *buf; struct mhi_buf_info info = { }; + buf = kmalloc(len, GFP_KERNEL); if (!buf) { ret = -ENOMEM; @@ -1609,8 +1613,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) read_unlock_bh(&mhi_cntrl->pm_lock); } -/* Move channel to start state */ -int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags) { int ret, dir; struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; @@ -1621,7 +1624,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) if (!mhi_chan) continue; - ret = mhi_prepare_channel(mhi_cntrl, mhi_chan); + ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags); if (ret) goto error_open_chan; } @@ -1639,8 +1642,19 @@ error_open_chan: return ret; } + +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +{ + return __mhi_prepare_for_transfer(mhi_dev, 0); +} EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer); +int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev) +{ + return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS); +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue); + void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) { struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 547e6e769546..4aae0baea008 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -42,7 +42,7 @@ * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT * LD_ERR_FATAL_DETECT -> DISABLE */ -static struct mhi_pm_transitions const dev_state_transitions[] = { +static const struct mhi_pm_transitions dev_state_transitions[] = { /* L0 States */ { MHI_PM_DISABLE, @@ -1053,7 +1053,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) enum mhi_ee_type current_ee; enum dev_st_transition next_state; struct device *dev = &mhi_cntrl->mhi_dev->dev; - u32 val; + u32 interval_us = 25000; /* poll register field every 25 milliseconds */ int ret; dev_info(dev, "Requested to power ON\n"); @@ -1070,10 +1070,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) mutex_lock(&mhi_cntrl->pm_mutex); mhi_cntrl->pm_state = MHI_PM_DISABLE; - ret = mhi_init_irq_setup(mhi_cntrl); - if (ret) - goto error_setup_irq; - /* Setup BHI INTVEC */ write_lock_irq(&mhi_cntrl->pm_lock); mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); @@ -1083,11 +1079,11 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) write_unlock_irq(&mhi_cntrl->pm_lock); /* Confirm that the device is in valid exec env */ - if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { + if (!MHI_POWER_UP_CAPABLE(current_ee)) { dev_err(dev, "%s is not a valid EE for power on\n", TO_MHI_EXEC_STR(current_ee)); ret = -EIO; - goto error_async_power_up; + goto error_exit; } state = mhi_get_mhi_state(mhi_cntrl); @@ -1096,20 +1092,12 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) if (state == MHI_STATE_SYS_ERR) { mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); - ret = wait_event_timeout(mhi_cntrl->state_event, - MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || - mhi_read_reg_field(mhi_cntrl, - mhi_cntrl->regs, - MHICTRL, - MHICTRL_RESET_MASK, - MHICTRL_RESET_SHIFT, - &val) || - !val, - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - if (!ret) { - ret = -EIO; + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, + interval_us); + if (ret) { dev_info(dev, "Failed to reset MHI due to syserr state\n"); - goto error_async_power_up; + goto error_exit; } /* @@ -1119,6 +1107,10 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); } + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) + goto error_exit; + /* Transition to next state */ next_state = MHI_IN_PBL(current_ee) ? DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; @@ -1131,10 +1123,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) return 0; -error_async_power_up: - mhi_deinit_free_irq(mhi_cntrl); - -error_setup_irq: +error_exit: mhi_cntrl->pm_state = MHI_PM_DISABLE; mutex_unlock(&mhi_cntrl->pm_mutex); diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c index 4c577a731709..3a258a677df8 100644 --- a/drivers/bus/mhi/pci_generic.c +++ b/drivers/bus/mhi/pci_generic.c @@ -403,7 +403,50 @@ static const struct mhi_pci_dev_info mhi_mv31_info = { .dma_data_width = 32, }; +static const struct mhi_channel_config mhi_sierra_em919x_channels[] = { + MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0), + MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2), +}; + +static struct mhi_event_config modem_sierra_em919x_mhi_events[] = { + /* first ring is control+data and DIAG ring */ + MHI_EVENT_CONFIG_CTRL(0, 2048), + /* Hardware channels request dedicated hardware event rings */ + MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100), + MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) +}; + +static const struct mhi_controller_config modem_sierra_em919x_config = { + .max_channels = 128, + .timeout_ms = 24000, + .num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels), + .ch_cfg = mhi_sierra_em919x_channels, + .num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events), + .event_cfg = modem_sierra_em919x_mhi_events, +}; + +static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { + .name = "sierra-em919x", + .config = &modem_sierra_em919x_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, +}; + static const struct pci_device_id mhi_pci_id_table[] = { + /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), + .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), @@ -423,6 +466,9 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1), .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* T99W175 (sdx55), Based on Qualcomm new baseline */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, /* MV31-W (Cinterion) */ { PCI_DEVICE(0x1269, 0x00b3), .driver_data = (kernel_ulong_t) &mhi_mv31_info }, @@ -529,18 +575,12 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl, mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num]; mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num); - err = pci_set_dma_mask(pdev, dma_mask); + err = dma_set_mask_and_coherent(&pdev->dev, dma_mask); if (err) { dev_err(&pdev->dev, "Cannot set proper DMA mask\n"); return err; } - err = pci_set_consistent_dma_mask(pdev, dma_mask); - if (err) { - dev_err(&pdev->dev, "set consistent dma mask failed\n"); - return err; - } - pci_set_master(pdev); return 0; @@ -1018,7 +1058,7 @@ static int __maybe_unused mhi_pci_freeze(struct device *dev) * context. */ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { - mhi_power_down(mhi_cntrl, false); + mhi_power_down(mhi_cntrl, true); mhi_unprepare_after_power_down(mhi_cntrl); } diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index ea0424922de7..db612045616f 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -914,6 +914,7 @@ int mvebu_mbus_add_window_remap_by_id(unsigned int target, return mvebu_mbus_alloc_window(s, base, size, remap, target, attribute); } +EXPORT_SYMBOL_GPL(mvebu_mbus_add_window_remap_by_id); int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute, phys_addr_t base, size_t size) @@ -921,6 +922,7 @@ int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute, return mvebu_mbus_add_window_remap_by_id(target, attribute, base, size, MVEBU_MBUS_NO_REMAP); } +EXPORT_SYMBOL_GPL(mvebu_mbus_add_window_by_id); int mvebu_mbus_del_window(phys_addr_t base, size_t size) { @@ -933,6 +935,7 @@ int mvebu_mbus_del_window(phys_addr_t base, size_t size) mvebu_mbus_disable_window(&mbus_state, win); return 0; } +EXPORT_SYMBOL_GPL(mvebu_mbus_del_window); void mvebu_mbus_get_pcie_mem_aperture(struct resource *res) { @@ -940,6 +943,7 @@ void mvebu_mbus_get_pcie_mem_aperture(struct resource *res) return; *res = mbus_state.pcie_mem_aperture; } +EXPORT_SYMBOL_GPL(mvebu_mbus_get_pcie_mem_aperture); void mvebu_mbus_get_pcie_io_aperture(struct resource *res) { @@ -947,6 +951,7 @@ void mvebu_mbus_get_pcie_io_aperture(struct resource *res) return; *res = mbus_state.pcie_io_aperture; } +EXPORT_SYMBOL_GPL(mvebu_mbus_get_pcie_io_aperture); int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr) { diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index b40edae32817..dc78a4fb879e 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -588,20 +588,11 @@ static void agp_amd64_remove(struct pci_dev *pdev) agp_bridges_found--; } -#ifdef CONFIG_PM +#define agp_amd64_suspend NULL -static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused agp_amd64_resume(struct device *dev) { - pci_save_state(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -static int agp_amd64_resume(struct pci_dev *pdev) -{ - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); + struct pci_dev *pdev = to_pci_dev(dev); if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) nforce3_agp_init(pdev); @@ -609,8 +600,6 @@ static int agp_amd64_resume(struct pci_dev *pdev) return amd_8151_configure(); } -#endif /* CONFIG_PM */ - static const struct pci_device_id agp_amd64_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), @@ -738,15 +727,14 @@ static const struct pci_device_id agp_amd64_pci_promisc_table[] = { { } }; +static SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, agp_amd64_suspend, agp_amd64_resume); + static struct pci_driver agp_amd64_pci_driver = { .name = "agpgart-amd64", .id_table = agp_amd64_pci_table, .probe = agp_amd64_probe, .remove = agp_amd64_remove, -#ifdef CONFIG_PM - .suspend = agp_amd64_suspend, - .resume = agp_amd64_resume, -#endif + .driver.pm = &agp_amd64_pm_ops, }; diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c index 14909fc5d767..f8a02f4bef1b 100644 --- a/drivers/char/agp/sis-agp.c +++ b/drivers/char/agp/sis-agp.c @@ -217,26 +217,14 @@ static void agp_sis_remove(struct pci_dev *pdev) agp_put_bridge(bridge); } -#ifdef CONFIG_PM +#define agp_sis_suspend NULL -static int agp_sis_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused agp_sis_resume( + __attribute__((unused)) struct device *dev) { - pci_save_state(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -static int agp_sis_resume(struct pci_dev *pdev) -{ - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - return sis_driver.configure(); } -#endif /* CONFIG_PM */ - static const struct pci_device_id agp_sis_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), @@ -419,15 +407,14 @@ static const struct pci_device_id agp_sis_pci_table[] = { MODULE_DEVICE_TABLE(pci, agp_sis_pci_table); +static SIMPLE_DEV_PM_OPS(agp_sis_pm_ops, agp_sis_suspend, agp_sis_resume); + static struct pci_driver agp_sis_pci_driver = { .name = "agpgart-sis", .id_table = agp_sis_pci_table, .probe = agp_sis_probe, .remove = agp_sis_remove, -#ifdef CONFIG_PM - .suspend = agp_sis_suspend, - .resume = agp_sis_resume, -#endif + .driver.pm = &agp_sis_pm_ops, }; static int __init agp_sis_init(void) diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c index 87a92a044570..a460ae352772 100644 --- a/drivers/char/agp/via-agp.c +++ b/drivers/char/agp/via-agp.c @@ -492,22 +492,11 @@ static void agp_via_remove(struct pci_dev *pdev) agp_put_bridge(bridge); } -#ifdef CONFIG_PM +#define agp_via_suspend NULL -static int agp_via_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused agp_via_resume(struct device *dev) { - pci_save_state (pdev); - pci_set_power_state (pdev, PCI_D3hot); - - return 0; -} - -static int agp_via_resume(struct pci_dev *pdev) -{ - struct agp_bridge_data *bridge = pci_get_drvdata(pdev); - - pci_set_power_state (pdev, PCI_D0); - pci_restore_state(pdev); + struct agp_bridge_data *bridge = dev_get_drvdata(dev); if (bridge->driver == &via_agp3_driver) return via_configure_agp3(); @@ -517,8 +506,6 @@ static int agp_via_resume(struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ - /* must be the same order as name table above */ static const struct pci_device_id agp_via_pci_table[] = { #define ID(x) \ @@ -567,16 +554,14 @@ static const struct pci_device_id agp_via_pci_table[] = { MODULE_DEVICE_TABLE(pci, agp_via_pci_table); +static SIMPLE_DEV_PM_OPS(agp_via_pm_ops, agp_via_suspend, agp_via_resume); static struct pci_driver agp_via_pci_driver = { .name = "agpgart-via", .id_table = agp_via_pci_table, .probe = agp_via_probe, .remove = agp_via_remove, -#ifdef CONFIG_PM - .suspend = agp_via_suspend, - .resume = agp_via_resume, -#endif + .driver.pm = &agp_via_pm_ops, }; diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index deb85a334c93..36203d3fa6ea 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -89,8 +89,8 @@ static struct applicom_board { spinlock_t mutex; } apbs[MAX_BOARD]; -static unsigned int irq = 0; /* interrupt number IRQ */ -static unsigned long mem = 0; /* physical segment of board */ +static unsigned int irq; /* interrupt number IRQ */ +static unsigned long mem; /* physical segment of board */ module_param_hw(irq, uint, irq, 0); MODULE_PARM_DESC(irq, "IRQ of the Applicom board"); diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 001b819f5298..9704963f9d50 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -226,19 +226,6 @@ config HW_RANDOM_VIRTIO To compile this driver as a module, choose M here: the module will be called virtio-rng. If unsure, say N. -config HW_RANDOM_TX4939 - tristate "TX4939 Random Number Generator support" - depends on SOC_TX4939 - default HW_RANDOM - help - This driver provides kernel-side support for the Random Number - Generator hardware found on TX4939 SoC. - - To compile this driver as a module, choose M here: the - module will be called tx4939-rng. - - If unsure, say Y. - config HW_RANDOM_MXC_RNGA tristate "Freescale i.MX RNGA Random Number Generator" depends on SOC_IMX31 diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index a2f1ce0790d1..584d47ba32f7 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -20,7 +20,6 @@ obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o -obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o obj-$(CONFIG_HW_RANDOM_INGENIC_RNG) += ingenic-rng.o diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c deleted file mode 100644 index c8bd34e740fd..000000000000 --- a/drivers/char/hw_random/tx4939-rng.c +++ /dev/null @@ -1,157 +0,0 @@ -/* - * RNG driver for TX4939 Random Number Generators (RNG) - * - * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/err.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/platform_device.h> -#include <linux/hw_random.h> -#include <linux/gfp.h> - -#define TX4939_RNG_RCSR 0x00000000 -#define TX4939_RNG_ROR(n) (0x00000018 + (n) * 8) - -#define TX4939_RNG_RCSR_INTE 0x00000008 -#define TX4939_RNG_RCSR_RST 0x00000004 -#define TX4939_RNG_RCSR_FIN 0x00000002 -#define TX4939_RNG_RCSR_ST 0x00000001 - -struct tx4939_rng { - struct hwrng rng; - void __iomem *base; - u64 databuf[3]; - unsigned int data_avail; -}; - -static void rng_io_start(void) -{ -#ifndef CONFIG_64BIT - /* - * readq is reading a 64-bit register using a 64-bit load. On - * a 32-bit kernel however interrupts or any other processor - * exception would clobber the upper 32-bit of the processor - * register so interrupts need to be disabled. - */ - local_irq_disable(); -#endif -} - -static void rng_io_end(void) -{ -#ifndef CONFIG_64BIT - local_irq_enable(); -#endif -} - -static u64 read_rng(void __iomem *base, unsigned int offset) -{ - return ____raw_readq(base + offset); -} - -static void write_rng(u64 val, void __iomem *base, unsigned int offset) -{ - return ____raw_writeq(val, base + offset); -} - -static int tx4939_rng_data_present(struct hwrng *rng, int wait) -{ - struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng); - int i; - - if (rngdev->data_avail) - return rngdev->data_avail; - for (i = 0; i < 20; i++) { - rng_io_start(); - if (!(read_rng(rngdev->base, TX4939_RNG_RCSR) - & TX4939_RNG_RCSR_ST)) { - rngdev->databuf[0] = - read_rng(rngdev->base, TX4939_RNG_ROR(0)); - rngdev->databuf[1] = - read_rng(rngdev->base, TX4939_RNG_ROR(1)); - rngdev->databuf[2] = - read_rng(rngdev->base, TX4939_RNG_ROR(2)); - rngdev->data_avail = - sizeof(rngdev->databuf) / sizeof(u32); - /* Start RNG */ - write_rng(TX4939_RNG_RCSR_ST, - rngdev->base, TX4939_RNG_RCSR); - wait = 0; - } - rng_io_end(); - if (!wait) - break; - /* 90 bus clock cycles by default for generation */ - ndelay(90 * 5); - } - return rngdev->data_avail; -} - -static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer) -{ - struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng); - - rngdev->data_avail--; - *buffer = *((u32 *)&rngdev->databuf + rngdev->data_avail); - return sizeof(u32); -} - -static int __init tx4939_rng_probe(struct platform_device *dev) -{ - struct tx4939_rng *rngdev; - int i; - - rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL); - if (!rngdev) - return -ENOMEM; - rngdev->base = devm_platform_ioremap_resource(dev, 0); - if (IS_ERR(rngdev->base)) - return PTR_ERR(rngdev->base); - - rngdev->rng.name = dev_name(&dev->dev); - rngdev->rng.data_present = tx4939_rng_data_present; - rngdev->rng.data_read = tx4939_rng_data_read; - - rng_io_start(); - /* Reset RNG */ - write_rng(TX4939_RNG_RCSR_RST, rngdev->base, TX4939_RNG_RCSR); - write_rng(0, rngdev->base, TX4939_RNG_RCSR); - /* Start RNG */ - write_rng(TX4939_RNG_RCSR_ST, rngdev->base, TX4939_RNG_RCSR); - rng_io_end(); - /* - * Drop first two results. From the datasheet: - * The quality of the random numbers generated immediately - * after reset can be insufficient. Therefore, do not use - * random numbers obtained from the first and second - * generations; use the ones from the third or subsequent - * generation. - */ - for (i = 0; i < 2; i++) { - rngdev->data_avail = 0; - if (!tx4939_rng_data_present(&rngdev->rng, 1)) - return -EIO; - } - - platform_set_drvdata(dev, rngdev); - return devm_hwrng_register(&dev->dev, &rngdev->rng); -} - -static struct platform_driver tx4939_rng_driver = { - .driver = { - .name = "tx4939-rng", - }, -}; - -module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe); - -MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for TX4939"); -MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 0a7dde135db1..e856df7e285c 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -179,9 +179,9 @@ static void remove_common(struct virtio_device *vdev) vi->data_avail = 0; vi->data_idx = 0; complete(&vi->have_data); - vdev->config->reset(vdev); if (vi->hwrng_register_done) hwrng_unregister(&vi->hwrng); + virtio_reset_device(vdev); vdev->config->del_vqs(vdev); ida_simple_remove(&rng_index_ida, vi->index); kfree(vi); diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h index 9ccb6b270b07..95164246afd1 100644 --- a/drivers/char/mwave/3780i.h +++ b/drivers/char/mwave/3780i.h @@ -68,7 +68,7 @@ typedef struct { unsigned char ClockControl:1; /* RW: Clock control: 0=normal, 1=stop 3780i clocks */ unsigned char SoftReset:1; /* RW: Soft reset 0=normal, 1=soft reset active */ unsigned char ConfigMode:1; /* RW: Configuration mode, 0=normal, 1=config mode */ - unsigned char Reserved:5; /* 0: Reserved */ + unsigned short Reserved:13; /* 0: Reserved */ } DSP_ISA_SLAVE_CONTROL; diff --git a/drivers/char/random.c b/drivers/char/random.c index 227fb7802738..b411182df6f6 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -101,7 +101,7 @@ * =============================== * * There are four exported interfaces; two for use within the kernel, - * and two or use from userspace. + * and two for use from userspace. * * Exported interfaces ---- userspace output * ----------------------------------------- @@ -124,7 +124,7 @@ * * The primary kernel interface is * - * void get_random_bytes(void *buf, int nbytes); + * void get_random_bytes(void *buf, int nbytes); * * This interface will return the requested number of random bytes, * and place it in the requested buffer. This is equivalent to a @@ -132,10 +132,10 @@ * * For less critical applications, there are the functions: * - * u32 get_random_u32() - * u64 get_random_u64() - * unsigned int get_random_int() - * unsigned long get_random_long() + * u32 get_random_u32() + * u64 get_random_u64() + * unsigned int get_random_int() + * unsigned long get_random_long() * * These are produced by a cryptographic RNG seeded from get_random_bytes, * and so do not deplete the entropy pool as much. These are recommended @@ -197,10 +197,10 @@ * from the devices are: * * void add_device_randomness(const void *buf, unsigned int size); - * void add_input_randomness(unsigned int type, unsigned int code, + * void add_input_randomness(unsigned int type, unsigned int code, * unsigned int value); * void add_interrupt_randomness(int irq); - * void add_disk_randomness(struct gendisk *disk); + * void add_disk_randomness(struct gendisk *disk); * void add_hwgenerator_randomness(const char *buffer, size_t count, * size_t entropy); * void add_bootloader_randomness(const void *buf, unsigned int size); @@ -296,8 +296,8 @@ * /dev/random and /dev/urandom created already, they can be created * by using the commands: * - * mknod /dev/random c 1 8 - * mknod /dev/urandom c 1 9 + * mknod /dev/random c 1 8 + * mknod /dev/urandom c 1 9 * * Acknowledgements: * ================= @@ -337,7 +337,6 @@ #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/percpu.h> -#include <linux/fips.h> #include <linux/ptrace.h> #include <linux/workqueue.h> #include <linux/irq.h> @@ -360,30 +359,11 @@ /* #define ADD_INTERRUPT_BENCH */ /* - * Configuration information - */ -#define INPUT_POOL_SHIFT 12 -#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5)) -#define OUTPUT_POOL_SHIFT 10 -#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5)) -#define EXTRACT_SIZE (BLAKE2S_HASH_SIZE / 2) - -/* - * To allow fractional bits to be tracked, the entropy_count field is - * denominated in units of 1/8th bits. - * - * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in - * credit_entropy_bits() needs to be 64 bits wide. - */ -#define ENTROPY_SHIFT 3 -#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT) - -/* * If the entropy count falls under this number of bits, then we * should wake up processes which are selecting or polling on write * access to /dev/random. */ -static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; +static int random_write_wakeup_bits = 28 * (1 << 5); /* * Originally, we used a primitive polynomial of degree .poolwords @@ -430,14 +410,27 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; * polynomial which improves the resulting TGFSR polynomial to be * irreducible, which we have made here. */ -static const struct poolinfo { - int poolbitshift, poolwords, poolbytes, poolfracbits; -#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5) - int tap1, tap2, tap3, tap4, tap5; -} poolinfo_table[] = { - /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */ +enum poolinfo { + POOL_WORDS = 128, + POOL_WORDMASK = POOL_WORDS - 1, + POOL_BYTES = POOL_WORDS * sizeof(u32), + POOL_BITS = POOL_BYTES * 8, + POOL_BITSHIFT = ilog2(POOL_BITS), + + /* To allow fractional bits to be tracked, the entropy_count field is + * denominated in units of 1/8th bits. */ + POOL_ENTROPY_SHIFT = 3, +#define POOL_ENTROPY_BITS() (input_pool.entropy_count >> POOL_ENTROPY_SHIFT) + POOL_FRACBITS = POOL_BITS << POOL_ENTROPY_SHIFT, + /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */ - { S(128), 104, 76, 51, 25, 1 }, + POOL_TAP1 = 104, + POOL_TAP2 = 76, + POOL_TAP3 = 51, + POOL_TAP4 = 25, + POOL_TAP5 = 1, + + EXTRACT_SIZE = BLAKE2S_HASH_SIZE / 2 }; /* @@ -450,9 +443,9 @@ static DEFINE_SPINLOCK(random_ready_list_lock); static LIST_HEAD(random_ready_list); struct crng_state { - __u32 state[16]; - unsigned long init_time; - spinlock_t lock; + u32 state[16]; + unsigned long init_time; + spinlock_t lock; }; static struct crng_state primary_crng = { @@ -476,10 +469,10 @@ static bool crng_need_final_init = false; #define crng_ready() (likely(crng_init > 1)) static int crng_init_cnt = 0; static unsigned long crng_global_init_time = 0; -#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE) -static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]); +#define CRNG_INIT_CNT_THRESH (2 * CHACHA_KEY_SIZE) +static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]); static void _crng_backtrack_protect(struct crng_state *crng, - __u8 tmp[CHACHA_BLOCK_SIZE], int used); + u8 tmp[CHACHA_BLOCK_SIZE], int used); static void process_random_ready_list(void); static void _get_random_bytes(void *buf, int nbytes); @@ -500,38 +493,23 @@ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); * **********************************************************************/ -struct entropy_store; -struct entropy_store { - /* read-only data: */ - const struct poolinfo *poolinfo; - __u32 *pool; - const char *name; +static u32 input_pool_data[POOL_WORDS] __latent_entropy; - /* read-write data: */ +static struct { spinlock_t lock; - unsigned short add_ptr; - unsigned short input_rotate; + u16 add_ptr; + u16 input_rotate; int entropy_count; - unsigned int last_data_init:1; - __u8 last_data[EXTRACT_SIZE]; +} input_pool = { + .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), }; -static ssize_t extract_entropy(struct entropy_store *r, void *buf, - size_t nbytes, int min, int rsvd); -static ssize_t _extract_entropy(struct entropy_store *r, void *buf, - size_t nbytes, int fips); +static ssize_t extract_entropy(void *buf, size_t nbytes, int min); +static ssize_t _extract_entropy(void *buf, size_t nbytes); -static void crng_reseed(struct crng_state *crng, struct entropy_store *r); -static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy; - -static struct entropy_store input_pool = { - .poolinfo = &poolinfo_table[0], - .name = "input", - .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), - .pool = input_pool_data -}; +static void crng_reseed(struct crng_state *crng, bool use_input_pool); -static __u32 const twist_table[8] = { +static const u32 twist_table[8] = { 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; @@ -545,39 +523,31 @@ static __u32 const twist_table[8] = { * it's cheap to do so and helps slightly in the expected case where * the entropy is concentrated in the low-order bits. */ -static void _mix_pool_bytes(struct entropy_store *r, const void *in, - int nbytes) +static void _mix_pool_bytes(const void *in, int nbytes) { - unsigned long i, tap1, tap2, tap3, tap4, tap5; + unsigned long i; int input_rotate; - int wordmask = r->poolinfo->poolwords - 1; - const unsigned char *bytes = in; - __u32 w; + const u8 *bytes = in; + u32 w; - tap1 = r->poolinfo->tap1; - tap2 = r->poolinfo->tap2; - tap3 = r->poolinfo->tap3; - tap4 = r->poolinfo->tap4; - tap5 = r->poolinfo->tap5; - - input_rotate = r->input_rotate; - i = r->add_ptr; + input_rotate = input_pool.input_rotate; + i = input_pool.add_ptr; /* mix one byte at a time to simplify size handling and churn faster */ while (nbytes--) { w = rol32(*bytes++, input_rotate); - i = (i - 1) & wordmask; + i = (i - 1) & POOL_WORDMASK; /* XOR in the various taps */ - w ^= r->pool[i]; - w ^= r->pool[(i + tap1) & wordmask]; - w ^= r->pool[(i + tap2) & wordmask]; - w ^= r->pool[(i + tap3) & wordmask]; - w ^= r->pool[(i + tap4) & wordmask]; - w ^= r->pool[(i + tap5) & wordmask]; + w ^= input_pool_data[i]; + w ^= input_pool_data[(i + POOL_TAP1) & POOL_WORDMASK]; + w ^= input_pool_data[(i + POOL_TAP2) & POOL_WORDMASK]; + w ^= input_pool_data[(i + POOL_TAP3) & POOL_WORDMASK]; + w ^= input_pool_data[(i + POOL_TAP4) & POOL_WORDMASK]; + w ^= input_pool_data[(i + POOL_TAP5) & POOL_WORDMASK]; /* Mix the result back in with a twist */ - r->pool[i] = (w >> 3) ^ twist_table[w & 7]; + input_pool_data[i] = (w >> 3) ^ twist_table[w & 7]; /* * Normally, we add 7 bits of rotation to the pool. @@ -588,33 +558,31 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in, input_rotate = (input_rotate + (i ? 7 : 14)) & 31; } - r->input_rotate = input_rotate; - r->add_ptr = i; + input_pool.input_rotate = input_rotate; + input_pool.add_ptr = i; } -static void __mix_pool_bytes(struct entropy_store *r, const void *in, - int nbytes) +static void __mix_pool_bytes(const void *in, int nbytes) { - trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_); - _mix_pool_bytes(r, in, nbytes); + trace_mix_pool_bytes_nolock(nbytes, _RET_IP_); + _mix_pool_bytes(in, nbytes); } -static void mix_pool_bytes(struct entropy_store *r, const void *in, - int nbytes) +static void mix_pool_bytes(const void *in, int nbytes) { unsigned long flags; - trace_mix_pool_bytes(r->name, nbytes, _RET_IP_); - spin_lock_irqsave(&r->lock, flags); - _mix_pool_bytes(r, in, nbytes); - spin_unlock_irqrestore(&r->lock, flags); + trace_mix_pool_bytes(nbytes, _RET_IP_); + spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(in, nbytes); + spin_unlock_irqrestore(&input_pool.lock, flags); } struct fast_pool { - __u32 pool[4]; - unsigned long last; - unsigned short reg_idx; - unsigned char count; + u32 pool[4]; + unsigned long last; + u16 reg_idx; + u8 count; }; /* @@ -624,8 +592,8 @@ struct fast_pool { */ static void fast_mix(struct fast_pool *f) { - __u32 a = f->pool[0], b = f->pool[1]; - __u32 c = f->pool[2], d = f->pool[3]; + u32 a = f->pool[0], b = f->pool[1]; + u32 c = f->pool[2], d = f->pool[3]; a += b; c += d; b = rol32(b, 6); d = rol32(d, 27); @@ -669,17 +637,19 @@ static void process_random_ready_list(void) * Use credit_entropy_bits_safe() if the value comes from userspace * or otherwise should be checked for extreme values. */ -static void credit_entropy_bits(struct entropy_store *r, int nbits) +static void credit_entropy_bits(int nbits) { - int entropy_count, orig; - const int pool_size = r->poolinfo->poolfracbits; - int nfrac = nbits << ENTROPY_SHIFT; + int entropy_count, entropy_bits, orig; + int nfrac = nbits << POOL_ENTROPY_SHIFT; + + /* Ensure that the multiplication can avoid being 64 bits wide. */ + BUILD_BUG_ON(2 * (POOL_ENTROPY_SHIFT + POOL_BITSHIFT) > 31); if (!nbits) return; retry: - entropy_count = orig = READ_ONCE(r->entropy_count); + entropy_count = orig = READ_ONCE(input_pool.entropy_count); if (nfrac < 0) { /* Debit */ entropy_count += nfrac; @@ -706,50 +676,43 @@ retry: * turns no matter how large nbits is. */ int pnfrac = nfrac; - const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2; + const int s = POOL_BITSHIFT + POOL_ENTROPY_SHIFT + 2; /* The +2 corresponds to the /4 in the denominator */ do { - unsigned int anfrac = min(pnfrac, pool_size/2); + unsigned int anfrac = min(pnfrac, POOL_FRACBITS / 2); unsigned int add = - ((pool_size - entropy_count)*anfrac*3) >> s; + ((POOL_FRACBITS - entropy_count) * anfrac * 3) >> s; entropy_count += add; pnfrac -= anfrac; - } while (unlikely(entropy_count < pool_size-2 && pnfrac)); + } while (unlikely(entropy_count < POOL_FRACBITS - 2 && pnfrac)); } if (WARN_ON(entropy_count < 0)) { - pr_warn("negative entropy/overflow: pool %s count %d\n", - r->name, entropy_count); + pr_warn("negative entropy/overflow: count %d\n", entropy_count); entropy_count = 0; - } else if (entropy_count > pool_size) - entropy_count = pool_size; - if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) + } else if (entropy_count > POOL_FRACBITS) + entropy_count = POOL_FRACBITS; + if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig) goto retry; - trace_credit_entropy_bits(r->name, nbits, - entropy_count >> ENTROPY_SHIFT, _RET_IP_); - - if (r == &input_pool) { - int entropy_bits = entropy_count >> ENTROPY_SHIFT; + trace_credit_entropy_bits(nbits, entropy_count >> POOL_ENTROPY_SHIFT, _RET_IP_); - if (crng_init < 2 && entropy_bits >= 128) - crng_reseed(&primary_crng, r); - } + entropy_bits = entropy_count >> POOL_ENTROPY_SHIFT; + if (crng_init < 2 && entropy_bits >= 128) + crng_reseed(&primary_crng, true); } -static int credit_entropy_bits_safe(struct entropy_store *r, int nbits) +static int credit_entropy_bits_safe(int nbits) { - const int nbits_max = r->poolinfo->poolwords * 32; - if (nbits < 0) return -EINVAL; /* Cap the value to avoid overflows */ - nbits = min(nbits, nbits_max); + nbits = min(nbits, POOL_BITS); - credit_entropy_bits(r, nbits); + credit_entropy_bits(nbits); return 0; } @@ -759,7 +722,7 @@ static int credit_entropy_bits_safe(struct entropy_store *r, int nbits) * *********************************************************************/ -#define CRNG_RESEED_INTERVAL (300*HZ) +#define CRNG_RESEED_INTERVAL (300 * HZ) static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); @@ -783,9 +746,9 @@ early_param("random.trust_cpu", parse_trust_cpu); static bool crng_init_try_arch(struct crng_state *crng) { - int i; - bool arch_init = true; - unsigned long rv; + int i; + bool arch_init = true; + unsigned long rv; for (i = 4; i < 16; i++) { if (!arch_get_random_seed_long(&rv) && @@ -801,9 +764,9 @@ static bool crng_init_try_arch(struct crng_state *crng) static bool __init crng_init_try_arch_early(struct crng_state *crng) { - int i; - bool arch_init = true; - unsigned long rv; + int i; + bool arch_init = true; + unsigned long rv; for (i = 4; i < 16; i++) { if (!arch_get_random_seed_long_early(&rv) && @@ -820,14 +783,14 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng) static void crng_initialize_secondary(struct crng_state *crng) { chacha_init_consts(crng->state); - _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); + _get_random_bytes(&crng->state[4], sizeof(u32) * 12); crng_init_try_arch(crng); crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; } static void __init crng_initialize_primary(struct crng_state *crng) { - _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); + _extract_entropy(&crng->state[4], sizeof(u32) * 12); if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) { invalidate_batched_entropy(); numa_crng_init(); @@ -873,7 +836,7 @@ static void do_numa_crng_init(struct work_struct *work) struct crng_state *crng; struct crng_state **pool; - pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); + pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL | __GFP_NOFAIL); for_each_online_node(i) { crng = kmalloc_node(sizeof(struct crng_state), GFP_KERNEL | __GFP_NOFAIL, i); @@ -917,10 +880,10 @@ static struct crng_state *select_crng(void) * path. So we can't afford to dilly-dally. Returns the number of * bytes processed from cp. */ -static size_t crng_fast_load(const char *cp, size_t len) +static size_t crng_fast_load(const u8 *cp, size_t len) { unsigned long flags; - char *p; + u8 *p; size_t ret = 0; if (!spin_trylock_irqsave(&primary_crng.lock, flags)) @@ -929,7 +892,7 @@ static size_t crng_fast_load(const char *cp, size_t len) spin_unlock_irqrestore(&primary_crng.lock, flags); return 0; } - p = (unsigned char *) &primary_crng.state[4]; + p = (u8 *)&primary_crng.state[4]; while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) { p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp; cp++; crng_init_cnt++; len--; ret++; @@ -957,14 +920,14 @@ static size_t crng_fast_load(const char *cp, size_t len) * like a fixed DMI table (for example), which might very well be * unique to the machine, but is otherwise unvarying. */ -static int crng_slow_load(const char *cp, size_t len) +static int crng_slow_load(const u8 *cp, size_t len) { - unsigned long flags; - static unsigned char lfsr = 1; - unsigned char tmp; - unsigned i, max = CHACHA_KEY_SIZE; - const char * src_buf = cp; - char * dest_buf = (char *) &primary_crng.state[4]; + unsigned long flags; + static u8 lfsr = 1; + u8 tmp; + unsigned int i, max = CHACHA_KEY_SIZE; + const u8 *src_buf = cp; + u8 *dest_buf = (u8 *)&primary_crng.state[4]; if (!spin_trylock_irqsave(&primary_crng.lock, flags)) return 0; @@ -975,7 +938,7 @@ static int crng_slow_load(const char *cp, size_t len) if (len > max) max = len; - for (i = 0; i < max ; i++) { + for (i = 0; i < max; i++) { tmp = lfsr; lfsr >>= 1; if (tmp & 1) @@ -988,17 +951,17 @@ static int crng_slow_load(const char *cp, size_t len) return 1; } -static void crng_reseed(struct crng_state *crng, struct entropy_store *r) +static void crng_reseed(struct crng_state *crng, bool use_input_pool) { - unsigned long flags; - int i, num; + unsigned long flags; + int i, num; union { - __u8 block[CHACHA_BLOCK_SIZE]; - __u32 key[8]; + u8 block[CHACHA_BLOCK_SIZE]; + u32 key[8]; } buf; - if (r) { - num = extract_entropy(r, &buf, 32, 16, 0); + if (use_input_pool) { + num = extract_entropy(&buf, 32, 16); if (num == 0) return; } else { @@ -1008,11 +971,11 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) } spin_lock_irqsave(&crng->lock, flags); for (i = 0; i < 8; i++) { - unsigned long rv; + unsigned long rv; if (!arch_get_random_seed_long(&rv) && !arch_get_random_long(&rv)) rv = random_get_entropy(); - crng->state[i+4] ^= buf.key[i] ^ rv; + crng->state[i + 4] ^= buf.key[i] ^ rv; } memzero_explicit(&buf, sizeof(buf)); WRITE_ONCE(crng->init_time, jiffies); @@ -1020,8 +983,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) crng_finalize_init(crng); } -static void _extract_crng(struct crng_state *crng, - __u8 out[CHACHA_BLOCK_SIZE]) +static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]) { unsigned long flags, init_time; @@ -1029,8 +991,7 @@ static void _extract_crng(struct crng_state *crng, init_time = READ_ONCE(crng->init_time); if (time_after(READ_ONCE(crng_global_init_time), init_time) || time_after(jiffies, init_time + CRNG_RESEED_INTERVAL)) - crng_reseed(crng, crng == &primary_crng ? - &input_pool : NULL); + crng_reseed(crng, crng == &primary_crng); } spin_lock_irqsave(&crng->lock, flags); chacha20_block(&crng->state[0], out); @@ -1039,7 +1000,7 @@ static void _extract_crng(struct crng_state *crng, spin_unlock_irqrestore(&crng->lock, flags); } -static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE]) +static void extract_crng(u8 out[CHACHA_BLOCK_SIZE]) { _extract_crng(select_crng(), out); } @@ -1049,26 +1010,26 @@ static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE]) * enough) to mutate the CRNG key to provide backtracking protection. */ static void _crng_backtrack_protect(struct crng_state *crng, - __u8 tmp[CHACHA_BLOCK_SIZE], int used) + u8 tmp[CHACHA_BLOCK_SIZE], int used) { - unsigned long flags; - __u32 *s, *d; - int i; + unsigned long flags; + u32 *s, *d; + int i; - used = round_up(used, sizeof(__u32)); + used = round_up(used, sizeof(u32)); if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) { extract_crng(tmp); used = 0; } spin_lock_irqsave(&crng->lock, flags); - s = (__u32 *) &tmp[used]; + s = (u32 *)&tmp[used]; d = &crng->state[4]; - for (i=0; i < 8; i++) + for (i = 0; i < 8; i++) *d++ ^= *s++; spin_unlock_irqrestore(&crng->lock, flags); } -static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used) +static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used) { _crng_backtrack_protect(select_crng(), tmp, used); } @@ -1076,7 +1037,7 @@ static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used) static ssize_t extract_crng_user(void __user *buf, size_t nbytes) { ssize_t ret = 0, i = CHACHA_BLOCK_SIZE; - __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); + u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); int large_request = (nbytes > 256); while (nbytes) { @@ -1108,7 +1069,6 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes) return ret; } - /********************************************************************* * * Entropy input management @@ -1141,8 +1101,8 @@ void add_device_randomness(const void *buf, unsigned int size) trace_add_device_randomness(size, _RET_IP_); spin_lock_irqsave(&input_pool.lock, flags); - _mix_pool_bytes(&input_pool, buf, size); - _mix_pool_bytes(&input_pool, &time, sizeof(time)); + _mix_pool_bytes(buf, size); + _mix_pool_bytes(&time, sizeof(time)); spin_unlock_irqrestore(&input_pool.lock, flags); } EXPORT_SYMBOL(add_device_randomness); @@ -1161,19 +1121,17 @@ static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE; */ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) { - struct entropy_store *r; struct { long jiffies; - unsigned cycles; - unsigned num; + unsigned int cycles; + unsigned int num; } sample; long delta, delta2, delta3; sample.jiffies = jiffies; sample.cycles = random_get_entropy(); sample.num = num; - r = &input_pool; - mix_pool_bytes(r, &sample, sizeof(sample)); + mix_pool_bytes(&sample, sizeof(sample)); /* * Calculate number of bits of randomness we probably added. @@ -1205,11 +1163,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) * Round down by 1 bit on general principles, * and limit entropy estimate to 12 bits. */ - credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); + credit_entropy_bits(min_t(int, fls(delta >> 1), 11)); } void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value) + unsigned int value) { static unsigned char last_value; @@ -1220,7 +1178,7 @@ void add_input_randomness(unsigned int type, unsigned int code, last_value = value; add_timer_randomness(&input_timer_state, (type << 4) ^ code ^ (code >> 4) ^ value); - trace_add_input_randomness(ENTROPY_BITS(&input_pool)); + trace_add_input_randomness(POOL_ENTROPY_BITS()); } EXPORT_SYMBOL_GPL(add_input_randomness); @@ -1229,33 +1187,33 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness); #ifdef ADD_INTERRUPT_BENCH static unsigned long avg_cycles, avg_deviation; -#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */ -#define FIXED_1_2 (1 << (AVG_SHIFT-1)) +#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */ +#define FIXED_1_2 (1 << (AVG_SHIFT - 1)) static void add_interrupt_bench(cycles_t start) { - long delta = random_get_entropy() - start; + long delta = random_get_entropy() - start; - /* Use a weighted moving average */ - delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT); - avg_cycles += delta; - /* And average deviation */ - delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT); - avg_deviation += delta; + /* Use a weighted moving average */ + delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT); + avg_cycles += delta; + /* And average deviation */ + delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT); + avg_deviation += delta; } #else #define add_interrupt_bench(x) #endif -static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) +static u32 get_reg(struct fast_pool *f, struct pt_regs *regs) { - __u32 *ptr = (__u32 *) regs; + u32 *ptr = (u32 *)regs; unsigned int idx; if (regs == NULL) return 0; idx = READ_ONCE(f->reg_idx); - if (idx >= sizeof(struct pt_regs) / sizeof(__u32)) + if (idx >= sizeof(struct pt_regs) / sizeof(u32)) idx = 0; ptr += idx++; WRITE_ONCE(f->reg_idx, idx); @@ -1264,13 +1222,12 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) void add_interrupt_randomness(int irq) { - struct entropy_store *r; - struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); - struct pt_regs *regs = get_irq_regs(); - unsigned long now = jiffies; - cycles_t cycles = random_get_entropy(); - __u32 c_high, j_high; - __u64 ip; + struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); + struct pt_regs *regs = get_irq_regs(); + unsigned long now = jiffies; + cycles_t cycles = random_get_entropy(); + u32 c_high, j_high; + u64 ip; if (cycles == 0) cycles = get_reg(fast_pool, regs); @@ -1280,38 +1237,35 @@ void add_interrupt_randomness(int irq) fast_pool->pool[1] ^= now ^ c_high; ip = regs ? instruction_pointer(regs) : _RET_IP_; fast_pool->pool[2] ^= ip; - fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : - get_reg(fast_pool, regs); + fast_pool->pool[3] ^= + (sizeof(ip) > 4) ? ip >> 32 : get_reg(fast_pool, regs); fast_mix(fast_pool); add_interrupt_bench(cycles); if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && - crng_fast_load((char *) fast_pool->pool, - sizeof(fast_pool->pool)) > 0) { + crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) { fast_pool->count = 0; fast_pool->last = now; } return; } - if ((fast_pool->count < 64) && - !time_after(now, fast_pool->last + HZ)) + if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ)) return; - r = &input_pool; - if (!spin_trylock(&r->lock)) + if (!spin_trylock(&input_pool.lock)) return; fast_pool->last = now; - __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool)); - spin_unlock(&r->lock); + __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool)); + spin_unlock(&input_pool.lock); fast_pool->count = 0; /* award one bit for the contents of the fast pool */ - credit_entropy_bits(r, 1); + credit_entropy_bits(1); } EXPORT_SYMBOL_GPL(add_interrupt_randomness); @@ -1322,7 +1276,7 @@ void add_disk_randomness(struct gendisk *disk) return; /* first major is 1, so we get >= 0x200 here */ add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); - trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool)); + trace_add_disk_randomness(disk_devt(disk), POOL_ENTROPY_BITS()); } EXPORT_SYMBOL_GPL(add_disk_randomness); #endif @@ -1337,43 +1291,36 @@ EXPORT_SYMBOL_GPL(add_disk_randomness); * This function decides how many bytes to actually take from the * given pool, and also debits the entropy count accordingly. */ -static size_t account(struct entropy_store *r, size_t nbytes, int min, - int reserved) +static size_t account(size_t nbytes, int min) { - int entropy_count, orig, have_bytes; + int entropy_count, orig; size_t ibytes, nfrac; - BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); + BUG_ON(input_pool.entropy_count > POOL_FRACBITS); /* Can we pull enough? */ retry: - entropy_count = orig = READ_ONCE(r->entropy_count); - ibytes = nbytes; - /* never pull more than available */ - have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); - - if ((have_bytes -= reserved) < 0) - have_bytes = 0; - ibytes = min_t(size_t, ibytes, have_bytes); - if (ibytes < min) - ibytes = 0; - + entropy_count = orig = READ_ONCE(input_pool.entropy_count); if (WARN_ON(entropy_count < 0)) { - pr_warn("negative entropy count: pool %s count %d\n", - r->name, entropy_count); + pr_warn("negative entropy count: count %d\n", entropy_count); entropy_count = 0; } - nfrac = ibytes << (ENTROPY_SHIFT + 3); - if ((size_t) entropy_count > nfrac) + + /* never pull more than available */ + ibytes = min_t(size_t, nbytes, entropy_count >> (POOL_ENTROPY_SHIFT + 3)); + if (ibytes < min) + ibytes = 0; + nfrac = ibytes << (POOL_ENTROPY_SHIFT + 3); + if ((size_t)entropy_count > nfrac) entropy_count -= nfrac; else entropy_count = 0; - if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) + if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig) goto retry; - trace_debit_entropy(r->name, 8 * ibytes); - if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) { + trace_debit_entropy(8 * ibytes); + if (ibytes && POOL_ENTROPY_BITS() < random_write_wakeup_bits) { wake_up_interruptible(&random_write_wait); kill_fasync(&fasync, SIGIO, POLL_OUT); } @@ -1386,7 +1333,7 @@ retry: * * Note: we assume that .poolwords is a multiple of 16 words. */ -static void extract_buf(struct entropy_store *r, __u8 *out) +static void extract_buf(u8 *out) { struct blake2s_state state __aligned(__alignof__(unsigned long)); u8 hash[BLAKE2S_HASH_SIZE]; @@ -1408,9 +1355,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out) } /* Generate a hash across the pool */ - spin_lock_irqsave(&r->lock, flags); - blake2s_update(&state, (const u8 *)r->pool, - r->poolinfo->poolwords * sizeof(*r->pool)); + spin_lock_irqsave(&input_pool.lock, flags); + blake2s_update(&state, (const u8 *)input_pool_data, POOL_BYTES); blake2s_final(&state, hash); /* final zeros out state */ /* @@ -1422,8 +1368,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out) * brute-forcing the feedback as hard as brute-forcing the * hash. */ - __mix_pool_bytes(r, hash, sizeof(hash)); - spin_unlock_irqrestore(&r->lock, flags); + __mix_pool_bytes(hash, sizeof(hash)); + spin_unlock_irqrestore(&input_pool.lock, flags); /* Note that EXTRACT_SIZE is half of hash size here, because above * we've dumped the full length back into mixer. By reducing the @@ -1433,23 +1379,13 @@ static void extract_buf(struct entropy_store *r, __u8 *out) memzero_explicit(hash, sizeof(hash)); } -static ssize_t _extract_entropy(struct entropy_store *r, void *buf, - size_t nbytes, int fips) +static ssize_t _extract_entropy(void *buf, size_t nbytes) { ssize_t ret = 0, i; - __u8 tmp[EXTRACT_SIZE]; - unsigned long flags; + u8 tmp[EXTRACT_SIZE]; while (nbytes) { - extract_buf(r, tmp); - - if (fips) { - spin_lock_irqsave(&r->lock, flags); - if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) - panic("Hardware RNG duplicated output!\n"); - memcpy(r->last_data, tmp, EXTRACT_SIZE); - spin_unlock_irqrestore(&r->lock, flags); - } + extract_buf(tmp); i = min_t(int, nbytes, EXTRACT_SIZE); memcpy(buf, tmp, i); nbytes -= i; @@ -1468,42 +1404,19 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf, * returns it in a buffer. * * The min parameter specifies the minimum amount we can pull before - * failing to avoid races that defeat catastrophic reseeding while the - * reserved parameter indicates how much entropy we must leave in the - * pool after each pull to avoid starving other readers. + * failing to avoid races that defeat catastrophic reseeding. */ -static ssize_t extract_entropy(struct entropy_store *r, void *buf, - size_t nbytes, int min, int reserved) +static ssize_t extract_entropy(void *buf, size_t nbytes, int min) { - __u8 tmp[EXTRACT_SIZE]; - unsigned long flags; - - /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */ - if (fips_enabled) { - spin_lock_irqsave(&r->lock, flags); - if (!r->last_data_init) { - r->last_data_init = 1; - spin_unlock_irqrestore(&r->lock, flags); - trace_extract_entropy(r->name, EXTRACT_SIZE, - ENTROPY_BITS(r), _RET_IP_); - extract_buf(r, tmp); - spin_lock_irqsave(&r->lock, flags); - memcpy(r->last_data, tmp, EXTRACT_SIZE); - } - spin_unlock_irqrestore(&r->lock, flags); - } - - trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); - nbytes = account(r, nbytes, min, reserved); - - return _extract_entropy(r, buf, nbytes, fips_enabled); + trace_extract_entropy(nbytes, POOL_ENTROPY_BITS(), _RET_IP_); + nbytes = account(nbytes, min); + return _extract_entropy(buf, nbytes); } #define warn_unseeded_randomness(previous) \ - _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous)) + _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous)) -static void _warn_unseeded_randomness(const char *func_name, void *caller, - void **previous) +static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous) { #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM const bool print_once = false; @@ -1511,8 +1424,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, static bool print_once __read_mostly; #endif - if (print_once || - crng_ready() || + if (print_once || crng_ready() || (previous && (caller == READ_ONCE(*previous)))) return; WRITE_ONCE(*previous, caller); @@ -1520,9 +1432,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, print_once = true; #endif if (__ratelimit(&unseeded_warning)) - printk_deferred(KERN_NOTICE "random: %s called from %pS " - "with crng_init=%d\n", func_name, caller, - crng_init); + printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", + func_name, caller, crng_init); } /* @@ -1537,7 +1448,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, */ static void _get_random_bytes(void *buf, int nbytes) { - __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); + u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); trace_get_random_bytes(nbytes, _RET_IP_); @@ -1565,7 +1476,6 @@ void get_random_bytes(void *buf, int nbytes) } EXPORT_SYMBOL(get_random_bytes); - /* * Each time the timer fires, we expect that we got an unpredictable * jump in the cycle counter. Even if the timer is running on another @@ -1581,7 +1491,7 @@ EXPORT_SYMBOL(get_random_bytes); */ static void entropy_timer(struct timer_list *t) { - credit_entropy_bits(&input_pool, 1); + credit_entropy_bits(1); } /* @@ -1604,15 +1514,15 @@ static void try_to_generate_entropy(void) timer_setup_on_stack(&stack.timer, entropy_timer, 0); while (!crng_ready()) { if (!timer_pending(&stack.timer)) - mod_timer(&stack.timer, jiffies+1); - mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now)); + mod_timer(&stack.timer, jiffies + 1); + mix_pool_bytes(&stack.now, sizeof(stack.now)); schedule(); stack.now = random_get_entropy(); } del_timer_sync(&stack.timer); destroy_timer_on_stack(&stack.timer); - mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now)); + mix_pool_bytes(&stack.now, sizeof(stack.now)); } /* @@ -1731,7 +1641,7 @@ EXPORT_SYMBOL(del_random_ready_callback); int __must_check get_random_bytes_arch(void *buf, int nbytes) { int left = nbytes; - char *p = buf; + u8 *p = buf; trace_get_random_bytes_arch(left, _RET_IP_); while (left) { @@ -1753,26 +1663,24 @@ EXPORT_SYMBOL(get_random_bytes_arch); /* * init_std_data - initialize pool with system data * - * @r: pool to initialize - * * This function clears the pool's entropy count and mixes some system * data into the pool to prepare it for use. The pool is not cleared * as that can only decrease the entropy in the pool. */ -static void __init init_std_data(struct entropy_store *r) +static void __init init_std_data(void) { int i; ktime_t now = ktime_get_real(); unsigned long rv; - mix_pool_bytes(r, &now, sizeof(now)); - for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) { + mix_pool_bytes(&now, sizeof(now)); + for (i = POOL_BYTES; i > 0; i -= sizeof(rv)) { if (!arch_get_random_seed_long(&rv) && !arch_get_random_long(&rv)) rv = random_get_entropy(); - mix_pool_bytes(r, &rv, sizeof(rv)); + mix_pool_bytes(&rv, sizeof(rv)); } - mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); + mix_pool_bytes(utsname(), sizeof(*(utsname()))); } /* @@ -1787,7 +1695,7 @@ static void __init init_std_data(struct entropy_store *r) */ int __init rand_initialize(void) { - init_std_data(&input_pool); + init_std_data(); if (crng_need_final_init) crng_finalize_init(&primary_crng); crng_initialize_primary(&primary_crng); @@ -1816,20 +1724,19 @@ void rand_initialize_disk(struct gendisk *disk) } #endif -static ssize_t -urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) +static ssize_t urandom_read_nowarn(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) { int ret; - nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3)); + nbytes = min_t(size_t, nbytes, INT_MAX >> (POOL_ENTROPY_SHIFT + 3)); ret = extract_crng_user(buf, nbytes); - trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool)); + trace_urandom_read(8 * nbytes, 0, POOL_ENTROPY_BITS()); return ret; } -static ssize_t -urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) +static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) { static int maxwarn = 10; @@ -1843,8 +1750,8 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) return urandom_read_nowarn(file, buf, nbytes, ppos); } -static ssize_t -random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) +static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) { int ret; @@ -1854,8 +1761,7 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) return urandom_read_nowarn(file, buf, nbytes, ppos); } -static __poll_t -random_poll(struct file *file, poll_table * wait) +static __poll_t random_poll(struct file *file, poll_table *wait) { __poll_t mask; @@ -1864,16 +1770,15 @@ random_poll(struct file *file, poll_table * wait) mask = 0; if (crng_ready()) mask |= EPOLLIN | EPOLLRDNORM; - if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits) + if (POOL_ENTROPY_BITS() < random_write_wakeup_bits) mask |= EPOLLOUT | EPOLLWRNORM; return mask; } -static int -write_pool(struct entropy_store *r, const char __user *buffer, size_t count) +static int write_pool(const char __user *buffer, size_t count) { size_t bytes; - __u32 t, buf[16]; + u32 t, buf[16]; const char __user *p = buffer; while (count > 0) { @@ -1883,7 +1788,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count) if (copy_from_user(&buf, p, bytes)) return -EFAULT; - for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) { + for (b = bytes; b > 0; b -= sizeof(u32), i++) { if (!arch_get_random_int(&t)) break; buf[i] ^= t; @@ -1892,7 +1797,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count) count -= bytes; p += bytes; - mix_pool_bytes(r, buf, bytes); + mix_pool_bytes(buf, bytes); cond_resched(); } @@ -1904,7 +1809,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer, { size_t ret; - ret = write_pool(&input_pool, buffer, count); + ret = write_pool(buffer, count); if (ret) return ret; @@ -1920,7 +1825,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) switch (cmd) { case RNDGETENTCNT: /* inherently racy, no point locking */ - ent_count = ENTROPY_BITS(&input_pool); + ent_count = POOL_ENTROPY_BITS(); if (put_user(ent_count, p)) return -EFAULT; return 0; @@ -1929,7 +1834,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EPERM; if (get_user(ent_count, p)) return -EFAULT; - return credit_entropy_bits_safe(&input_pool, ent_count); + return credit_entropy_bits_safe(ent_count); case RNDADDENTROPY: if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1939,11 +1844,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EINVAL; if (get_user(size, p++)) return -EFAULT; - retval = write_pool(&input_pool, (const char __user *)p, - size); + retval = write_pool((const char __user *)p, size); if (retval < 0) return retval; - return credit_entropy_bits_safe(&input_pool, ent_count); + return credit_entropy_bits_safe(ent_count); case RNDZAPENTCNT: case RNDCLEARPOOL: /* @@ -1959,7 +1863,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EPERM; if (crng_init < 2) return -ENODATA; - crng_reseed(&primary_crng, &input_pool); + crng_reseed(&primary_crng, true); WRITE_ONCE(crng_global_init_time, jiffies - 1); return 0; default: @@ -1973,9 +1877,9 @@ static int random_fasync(int fd, struct file *filp, int on) } const struct file_operations random_fops = { - .read = random_read, + .read = random_read, .write = random_write, - .poll = random_poll, + .poll = random_poll, .unlocked_ioctl = random_ioctl, .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, @@ -1983,7 +1887,7 @@ const struct file_operations random_fops = { }; const struct file_operations urandom_fops = { - .read = urandom_read, + .read = urandom_read, .write = random_write, .unlocked_ioctl = random_ioctl, .compat_ioctl = compat_ptr_ioctl, @@ -1991,19 +1895,19 @@ const struct file_operations urandom_fops = { .llseek = noop_llseek, }; -SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, - unsigned int, flags) +SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, + flags) { int ret; - if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE)) + if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) return -EINVAL; /* * Requesting insecure and blocking randomness at the same time makes * no sense. */ - if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM)) + if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) return -EINVAL; if (count > INT_MAX) @@ -2030,7 +1934,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, #include <linux/sysctl.h> static int min_write_thresh; -static int max_write_thresh = INPUT_POOL_WORDS * 32; +static int max_write_thresh = POOL_BITS; static int random_min_urandom_seed = 60; static char sysctl_bootid[16]; @@ -2043,8 +1947,8 @@ static char sysctl_bootid[16]; * returned as an ASCII string in the standard UUID format; if via the * sysctl system call, as 16 bytes of binary data. */ -static int proc_do_uuid(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) +static int proc_do_uuid(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos) { struct ctl_table fake_table; unsigned char buf[64], tmp_uuid[16], *uuid; @@ -2073,13 +1977,13 @@ static int proc_do_uuid(struct ctl_table *table, int write, /* * Return entropy available scaled to integral bits */ -static int proc_do_entropy(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) +static int proc_do_entropy(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos) { struct ctl_table fake_table; int entropy_count; - entropy_count = *(int *)table->data >> ENTROPY_SHIFT; + entropy_count = *(int *)table->data >> POOL_ENTROPY_SHIFT; fake_table.data = &entropy_count; fake_table.maxlen = sizeof(entropy_count); @@ -2087,7 +1991,7 @@ static int proc_do_entropy(struct ctl_table *table, int write, return proc_dointvec(&fake_table, write, buffer, lenp, ppos); } -static int sysctl_poolsize = INPUT_POOL_WORDS * 32; +static int sysctl_poolsize = POOL_BITS; extern struct ctl_table random_table[]; struct ctl_table random_table[] = { { @@ -2151,7 +2055,7 @@ struct ctl_table random_table[] = { #endif { } }; -#endif /* CONFIG_SYSCTL */ +#endif /* CONFIG_SYSCTL */ struct batched_entropy { union { @@ -2171,7 +2075,7 @@ struct batched_entropy { * point prior. */ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { - .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), + .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), }; u64 get_random_u64(void) @@ -2196,7 +2100,7 @@ u64 get_random_u64(void) EXPORT_SYMBOL(get_random_u64); static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { - .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock), + .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock), }; u32 get_random_u32(void) { @@ -2228,7 +2132,7 @@ static void invalidate_batched_entropy(void) int cpu; unsigned long flags; - for_each_possible_cpu (cpu) { + for_each_possible_cpu(cpu) { struct batched_entropy *batched_entropy; batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu); @@ -2257,8 +2161,7 @@ static void invalidate_batched_entropy(void) * Return: A page aligned address within [start, start + range). On error, * @start is returned. */ -unsigned long -randomize_page(unsigned long start, unsigned long range) +unsigned long randomize_page(unsigned long start, unsigned long range) { if (!PAGE_ALIGNED(start)) { range -= PAGE_ALIGN(start) - start; @@ -2283,11 +2186,9 @@ randomize_page(unsigned long start, unsigned long range) void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy) { - struct entropy_store *poolp = &input_pool; - if (unlikely(crng_init == 0)) { size_t ret = crng_fast_load(buffer, count); - mix_pool_bytes(poolp, buffer, ret); + mix_pool_bytes(buffer, ret); count -= ret; buffer += ret; if (!count || crng_init == 0) @@ -2300,9 +2201,9 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, */ wait_event_interruptible(random_write_wait, !system_wq || kthread_should_stop() || - ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); - mix_pool_bytes(poolp, buffer, count); - credit_entropy_bits(poolp, entropy); + POOL_ENTROPY_BITS() <= random_write_wakeup_bits); + mix_pool_bytes(buffer, count); + credit_entropy_bits(entropy); } EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 660c5c388c29..2359889a35a0 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1958,7 +1958,7 @@ static void virtcons_remove(struct virtio_device *vdev) spin_unlock_irq(&pdrvdata_lock); /* Disable interrupts for vqs */ - vdev->config->reset(vdev); + virtio_reset_device(vdev); /* Finish up work that's lined up */ if (use_multiport(portdev)) cancel_work_sync(&portdev->control_work); @@ -2148,7 +2148,7 @@ static int virtcons_freeze(struct virtio_device *vdev) portdev = vdev->priv; - vdev->config->reset(vdev); + virtio_reset_device(vdev); if (use_multiport(portdev)) virtqueue_disable_cb(portdev->c_ivq); diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c index 57ae183982d8..f7b41366666e 100644 --- a/drivers/clk/clk-si5341.c +++ b/drivers/clk/clk-si5341.c @@ -1740,7 +1740,7 @@ static int si5341_probe(struct i2c_client *client, clk_prepare(data->clk[i].hw.clk); } - err = of_clk_add_hw_provider(client->dev.of_node, of_clk_si5341_get, + err = devm_of_clk_add_hw_provider(&client->dev, of_clk_si5341_get, data); if (err) { dev_err(&client->dev, "unable to add clk provider\n"); diff --git a/drivers/clk/mediatek/clk-mt7986-apmixed.c b/drivers/clk/mediatek/clk-mt7986-apmixed.c index 76c8ebdeae96..98ec3887585f 100644 --- a/drivers/clk/mediatek/clk-mt7986-apmixed.c +++ b/drivers/clk/mediatek/clk-mt7986-apmixed.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-1.0 +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2021 MediaTek Inc. * Author: Sam Shih <sam.shih@mediatek.com> diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c index 3be168c34fc0..f209c559fbc3 100644 --- a/drivers/clk/mediatek/clk-mt7986-infracfg.c +++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-1.0 +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2021 MediaTek Inc. * Author: Sam Shih <sam.shih@mediatek.com> diff --git a/drivers/clk/mediatek/clk-mt7986-topckgen.c b/drivers/clk/mediatek/clk-mt7986-topckgen.c index 8550e2be7773..8f6f79b6e31e 100644 --- a/drivers/clk/mediatek/clk-mt7986-topckgen.c +++ b/drivers/clk/mediatek/clk-mt7986-topckgen.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-1.0 +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2021 MediaTek Inc. * Author: Sam Shih <sam.shih@mediatek.com> diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c index a2398bc6c6e4..a484cb945d67 100644 --- a/drivers/clk/visconti/pll.c +++ b/drivers/clk/visconti/pll.c @@ -246,7 +246,6 @@ static struct clk_hw *visconti_register_pll(struct visconti_pll_provider *ctx, { struct clk_init_data init; struct visconti_pll *pll; - struct clk *pll_clk; struct clk_hw *pll_hw_clk; size_t len; int ret; @@ -277,7 +276,7 @@ static struct clk_hw *visconti_register_pll(struct visconti_pll_provider *ctx, pll_hw_clk = &pll->hw; ret = clk_hw_register(NULL, &pll->hw); if (ret) { - pr_err("failed to register pll clock %s : %ld\n", name, PTR_ERR(pll_clk)); + pr_err("failed to register pll clock %s : %d\n", name, ret); kfree(pll); pll_hw_clk = ERR_PTR(ret); } diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index f65e31bab9ae..cfb8ea0df3b1 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -510,7 +510,8 @@ config SH_TIMER_MTU2 This hardware comes with 16-bit timer registers. config RENESAS_OSTM - bool "Renesas OSTM timer driver" if COMPILE_TEST + bool "Renesas OSTM timer driver" + depends on ARCH_RENESAS || COMPILE_TEST select CLKSRC_MMIO select TIMER_OF help @@ -671,6 +672,15 @@ config MILBEAUT_TIMER help Enables the support for Milbeaut timer driver. +config MSC313E_TIMER + bool "MSC313E timer driver" if COMPILE_TEST + select TIMER_OF + select CLKSRC_MMIO + help + Enables support for the MStar MSC313E timer driver. + This provides access to multiple interrupt generating + programmable 32-bit free running incrementing counters. + config INGENIC_TIMER bool "Clocksource/timer using the TCU in Ingenic JZ SoCs" default MACH_INGENIC diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index c17ee32a7151..fa5f624eadb6 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -88,3 +88,4 @@ obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o +obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 5e3e96d3d1b9..6db3d5511b0f 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -467,7 +467,7 @@ static int exynos4_mct_starting_cpu(unsigned int cpu) evt->tick_resume = set_state_shutdown; evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERCPU; - evt->rating = MCT_CLKEVENTS_RATING, + evt->rating = MCT_CLKEVENTS_RATING; exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); @@ -504,11 +504,14 @@ static int exynos4_mct_dying_cpu(unsigned int cpu) return 0; } -static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) +static int __init exynos4_timer_resources(struct device_node *np) { - int err, cpu; struct clk *mct_clk, *tick_clk; + reg_base = of_iomap(np, 0); + if (!reg_base) + panic("%s: unable to ioremap mct address space\n", __func__); + tick_clk = of_clk_get_by_name(np, "fin_pll"); if (IS_ERR(tick_clk)) panic("%s: unable to determine tick clock rate\n", __func__); @@ -519,9 +522,27 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * panic("%s: unable to retrieve mct clock instance\n", __func__); clk_prepare_enable(mct_clk); - reg_base = base; - if (!reg_base) - panic("%s: unable to ioremap mct address space\n", __func__); + return 0; +} + +static int __init exynos4_timer_interrupts(struct device_node *np, + unsigned int int_type) +{ + int nr_irqs, i, err, cpu; + + mct_int_type = int_type; + + /* This driver uses only one global timer interrupt */ + mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); + + /* + * Find out the number of local irqs specified. The local + * timer irqs are specified after the four global timer + * irqs are specified. + */ + nr_irqs = of_irq_count(np); + for (i = MCT_L0_IRQ; i < nr_irqs; i++) + mct_irqs[i] = irq_of_parse_and_map(np, i); if (mct_int_type == MCT_INT_PPI) { @@ -581,24 +602,13 @@ out_irq: static int __init mct_init_dt(struct device_node *np, unsigned int int_type) { - u32 nr_irqs, i; int ret; - mct_int_type = int_type; - - /* This driver uses only one global timer interrupt */ - mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); - - /* - * Find out the number of local irqs specified. The local - * timer irqs are specified after the four global timer - * irqs are specified. - */ - nr_irqs = of_irq_count(np); - for (i = MCT_L0_IRQ; i < nr_irqs; i++) - mct_irqs[i] = irq_of_parse_and_map(np, i); + ret = exynos4_timer_resources(np); + if (ret) + return ret; - ret = exynos4_timer_resources(np, of_iomap(np, 0)); + ret = exynos4_timer_interrupts(np, int_type); if (ret) return ret; diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c index 3d06ba66008c..21d1392637b8 100644 --- a/drivers/clocksource/renesas-ostm.c +++ b/drivers/clocksource/renesas-ostm.c @@ -9,6 +9,8 @@ #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/reset.h> #include <linux/sched_clock.h> #include <linux/slab.h> @@ -159,6 +161,7 @@ static int __init ostm_init_clkevt(struct timer_of *to) static int __init ostm_init(struct device_node *np) { + struct reset_control *rstc; struct timer_of *to; int ret; @@ -166,6 +169,14 @@ static int __init ostm_init(struct device_node *np) if (!to) return -ENOMEM; + rstc = of_reset_control_get_optional_exclusive(np, NULL); + if (IS_ERR(rstc)) { + ret = PTR_ERR(rstc); + goto err_free; + } + + reset_control_deassert(rstc); + to->flags = TIMER_OF_BASE | TIMER_OF_CLOCK; if (system_clock) { /* @@ -178,7 +189,7 @@ static int __init ostm_init(struct device_node *np) ret = timer_of_init(np, to); if (ret) - goto err_free; + goto err_reset; /* * First probed device will be used as system clocksource. Any @@ -203,9 +214,35 @@ static int __init ostm_init(struct device_node *np) err_cleanup: timer_of_cleanup(to); +err_reset: + reset_control_assert(rstc); + reset_control_put(rstc); err_free: kfree(to); return ret; } TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init); + +#ifdef CONFIG_ARCH_R9A07G044 +static int __init ostm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + return ostm_init(dev->of_node); +} + +static const struct of_device_id ostm_of_table[] = { + { .compatible = "renesas,ostm", }, + { /* sentinel */ } +}; + +static struct platform_driver ostm_device_driver = { + .driver = { + .name = "renesas_ostm", + .of_match_table = of_match_ptr(ostm_of_table), + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver_probe(ostm_device_driver, ostm_probe); +#endif diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c index 18b90fc56bfc..55a8e198d2a1 100644 --- a/drivers/clocksource/timer-imx-sysctr.c +++ b/drivers/clocksource/timer-imx-sysctr.c @@ -20,8 +20,8 @@ #define SYS_CTR_CLK_DIV 0x3 -static void __iomem *sys_ctr_base; -static u32 cmpcr; +static void __iomem *sys_ctr_base __ro_after_init; +static u32 cmpcr __ro_after_init; static void sysctr_timer_enable(bool enable) { @@ -119,7 +119,7 @@ static struct timer_of to_sysctr = { static void __init sysctr_clockevent_init(void) { - to_sysctr.clkevt.cpumask = cpumask_of(0); + to_sysctr.clkevt.cpumask = cpu_possible_mask; clockevents_config_and_register(&to_sysctr.clkevt, timer_of_rate(&to_sysctr), diff --git a/drivers/clocksource/timer-msc313e.c b/drivers/clocksource/timer-msc313e.c new file mode 100644 index 000000000000..54c54ca7c786 --- /dev/null +++ b/drivers/clocksource/timer-msc313e.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MStar timer driver + * + * Copyright (C) 2021 Daniel Palmer + * Copyright (C) 2021 Romain Perier + * + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqreturn.h> +#include <linux/sched_clock.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#ifdef CONFIG_ARM +#include <linux/delay.h> +#endif + +#include "timer-of.h" + +#define TIMER_NAME "msc313e_timer" + +#define MSC313E_REG_CTRL 0x00 +#define MSC313E_REG_CTRL_TIMER_EN BIT(0) +#define MSC313E_REG_CTRL_TIMER_TRIG BIT(1) +#define MSC313E_REG_CTRL_TIMER_INT_EN BIT(8) +#define MSC313E_REG_TIMER_MAX_LOW 0x08 +#define MSC313E_REG_TIMER_MAX_HIGH 0x0c +#define MSC313E_REG_COUNTER_LOW 0x10 +#define MSC313E_REG_COUNTER_HIGH 0x14 +#define MSC313E_REG_TIMER_DIVIDE 0x18 + +#define MSC313E_CLK_DIVIDER 9 +#define TIMER_SYNC_TICKS 3 + +#ifdef CONFIG_ARM +struct msc313e_delay { + void __iomem *base; + struct delay_timer delay; +}; +static struct msc313e_delay msc313e_delay; +#endif + +static void __iomem *msc313e_clksrc; + +static void msc313e_timer_stop(void __iomem *base) +{ + writew(0, base + MSC313E_REG_CTRL); +} + +static void msc313e_timer_start(void __iomem *base, bool periodic) +{ + u16 reg; + + reg = readw(base + MSC313E_REG_CTRL); + if (periodic) + reg |= MSC313E_REG_CTRL_TIMER_EN; + else + reg |= MSC313E_REG_CTRL_TIMER_TRIG; + writew(reg | MSC313E_REG_CTRL_TIMER_INT_EN, base + MSC313E_REG_CTRL); +} + +static void msc313e_timer_setup(void __iomem *base, unsigned long delay) +{ + unsigned long flags; + + local_irq_save(flags); + writew(delay >> 16, base + MSC313E_REG_TIMER_MAX_HIGH); + writew(delay & 0xffff, base + MSC313E_REG_TIMER_MAX_LOW); + local_irq_restore(flags); +} + +static unsigned long msc313e_timer_current_value(void __iomem *base) +{ + unsigned long flags; + u16 l, h; + + local_irq_save(flags); + l = readw(base + MSC313E_REG_COUNTER_LOW); + h = readw(base + MSC313E_REG_COUNTER_HIGH); + local_irq_restore(flags); + + return (((u32)h) << 16 | l); +} + +static int msc313e_timer_clkevt_shutdown(struct clock_event_device *evt) +{ + struct timer_of *timer = to_timer_of(evt); + + msc313e_timer_stop(timer_of_base(timer)); + + return 0; +} + +static int msc313e_timer_clkevt_set_oneshot(struct clock_event_device *evt) +{ + struct timer_of *timer = to_timer_of(evt); + + msc313e_timer_stop(timer_of_base(timer)); + msc313e_timer_start(timer_of_base(timer), false); + + return 0; +} + +static int msc313e_timer_clkevt_set_periodic(struct clock_event_device *evt) +{ + struct timer_of *timer = to_timer_of(evt); + + msc313e_timer_stop(timer_of_base(timer)); + msc313e_timer_setup(timer_of_base(timer), timer_of_period(timer)); + msc313e_timer_start(timer_of_base(timer), true); + + return 0; +} + +static int msc313e_timer_clkevt_next_event(unsigned long evt, struct clock_event_device *clkevt) +{ + struct timer_of *timer = to_timer_of(clkevt); + + msc313e_timer_stop(timer_of_base(timer)); + msc313e_timer_setup(timer_of_base(timer), evt); + msc313e_timer_start(timer_of_base(timer), false); + + return 0; +} + +static irqreturn_t msc313e_timer_clkevt_irq(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static u64 msc313e_timer_clksrc_read(struct clocksource *cs) +{ + return msc313e_timer_current_value(msc313e_clksrc) & cs->mask; +} + +#ifdef CONFIG_ARM +static unsigned long msc313e_read_delay_timer_read(void) +{ + return msc313e_timer_current_value(msc313e_delay.base); +} +#endif + +static u64 msc313e_timer_sched_clock_read(void) +{ + return msc313e_timer_current_value(msc313e_clksrc); +} + +static struct clock_event_device msc313e_clkevt = { + .name = TIMER_NAME, + .rating = 300, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = msc313e_timer_clkevt_shutdown, + .set_state_periodic = msc313e_timer_clkevt_set_periodic, + .set_state_oneshot = msc313e_timer_clkevt_set_oneshot, + .tick_resume = msc313e_timer_clkevt_shutdown, + .set_next_event = msc313e_timer_clkevt_next_event, +}; + +static int __init msc313e_clkevt_init(struct device_node *np) +{ + int ret; + struct timer_of *to; + + to = kzalloc(sizeof(*to), GFP_KERNEL); + if (!to) + return -ENOMEM; + + to->flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE; + to->of_irq.handler = msc313e_timer_clkevt_irq; + ret = timer_of_init(np, to); + if (ret) + return ret; + + if (of_device_is_compatible(np, "sstar,ssd20xd-timer")) { + to->of_clk.rate = clk_get_rate(to->of_clk.clk) / MSC313E_CLK_DIVIDER; + to->of_clk.period = DIV_ROUND_UP(to->of_clk.rate, HZ); + writew(MSC313E_CLK_DIVIDER - 1, timer_of_base(to) + MSC313E_REG_TIMER_DIVIDE); + } + + msc313e_clkevt.cpumask = cpu_possible_mask; + msc313e_clkevt.irq = to->of_irq.irq; + to->clkevt = msc313e_clkevt; + + clockevents_config_and_register(&to->clkevt, timer_of_rate(to), + TIMER_SYNC_TICKS, 0xffffffff); + return 0; +} + +static int __init msc313e_clksrc_init(struct device_node *np) +{ + struct timer_of to = { 0 }; + int ret; + u16 reg; + + to.flags = TIMER_OF_BASE | TIMER_OF_CLOCK; + ret = timer_of_init(np, &to); + if (ret) + return ret; + + msc313e_clksrc = timer_of_base(&to); + reg = readw(msc313e_clksrc + MSC313E_REG_CTRL); + reg |= MSC313E_REG_CTRL_TIMER_EN; + writew(reg, msc313e_clksrc + MSC313E_REG_CTRL); + +#ifdef CONFIG_ARM + msc313e_delay.base = timer_of_base(&to); + msc313e_delay.delay.read_current_timer = msc313e_read_delay_timer_read; + msc313e_delay.delay.freq = timer_of_rate(&to); + + register_current_timer_delay(&msc313e_delay.delay); +#endif + + sched_clock_register(msc313e_timer_sched_clock_read, 32, timer_of_rate(&to)); + return clocksource_mmio_init(timer_of_base(&to), TIMER_NAME, timer_of_rate(&to), 300, 32, + msc313e_timer_clksrc_read); +} + +static int __init msc313e_timer_init(struct device_node *np) +{ + int ret = 0; + static int num_called; + + switch (num_called) { + case 0: + ret = msc313e_clksrc_init(np); + if (ret) + return ret; + break; + + default: + ret = msc313e_clkevt_init(np); + if (ret) + return ret; + break; + } + + num_called++; + + return 0; +} + +TIMER_OF_DECLARE(msc313, "mstar,msc313e-timer", msc313e_timer_init); +TIMER_OF_DECLARE(ssd20xd, "sstar,ssd20xd-timer", msc313e_timer_init); diff --git a/drivers/clocksource/timer-pistachio.c b/drivers/clocksource/timer-pistachio.c index 6f37181a8c63..69c069e6f0a2 100644 --- a/drivers/clocksource/timer-pistachio.c +++ b/drivers/clocksource/timer-pistachio.c @@ -71,7 +71,8 @@ static u64 notrace pistachio_clocksource_read_cycles(struct clocksource *cs) { struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); - u32 counter, overflow; + __maybe_unused u32 overflow; + u32 counter; unsigned long flags; /* diff --git a/drivers/comedi/comedi.h b/drivers/comedi/comedi.h deleted file mode 100644 index b5d00a006dbb..000000000000 --- a/drivers/comedi/comedi.h +++ /dev/null @@ -1,1528 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.0+ */ -/* - * comedi.h - * header file for COMEDI user API - * - * COMEDI - Linux Control and Measurement Device Interface - * Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org> - */ - -#ifndef _COMEDI_H -#define _COMEDI_H - -#define COMEDI_MAJORVERSION 0 -#define COMEDI_MINORVERSION 7 -#define COMEDI_MICROVERSION 76 -#define VERSION "0.7.76" - -/* comedi's major device number */ -#define COMEDI_MAJOR 98 - -/* - * maximum number of minor devices. This can be increased, although - * kernel structures are currently statically allocated, thus you - * don't want this to be much more than you actually use. - */ -#define COMEDI_NDEVICES 16 - -/* number of config options in the config structure */ -#define COMEDI_NDEVCONFOPTS 32 - -/* - * NOTE: 'comedi_config --init-data' is deprecated - * - * The following indexes in the config options were used by - * comedi_config to pass firmware blobs from user space to the - * comedi drivers. The request_firmware() hotplug interface is - * now used by all comedi drivers instead. - */ - -/* length of nth chunk of firmware data -*/ -#define COMEDI_DEVCONF_AUX_DATA3_LENGTH 25 -#define COMEDI_DEVCONF_AUX_DATA2_LENGTH 26 -#define COMEDI_DEVCONF_AUX_DATA1_LENGTH 27 -#define COMEDI_DEVCONF_AUX_DATA0_LENGTH 28 -/* most significant 32 bits of pointer address (if needed) */ -#define COMEDI_DEVCONF_AUX_DATA_HI 29 -/* least significant 32 bits of pointer address */ -#define COMEDI_DEVCONF_AUX_DATA_LO 30 -#define COMEDI_DEVCONF_AUX_DATA_LENGTH 31 /* total data length */ - -/* max length of device and driver names */ -#define COMEDI_NAMELEN 20 - -/* packs and unpacks a channel/range number */ - -#define CR_PACK(chan, rng, aref) \ - ((((aref) & 0x3) << 24) | (((rng) & 0xff) << 16) | (chan)) -#define CR_PACK_FLAGS(chan, range, aref, flags) \ - (CR_PACK(chan, range, aref) | ((flags) & CR_FLAGS_MASK)) - -#define CR_CHAN(a) ((a) & 0xffff) -#define CR_RANGE(a) (((a) >> 16) & 0xff) -#define CR_AREF(a) (((a) >> 24) & 0x03) - -#define CR_FLAGS_MASK 0xfc000000 -#define CR_ALT_FILTER 0x04000000 -#define CR_DITHER CR_ALT_FILTER -#define CR_DEGLITCH CR_ALT_FILTER -#define CR_ALT_SOURCE 0x08000000 -#define CR_EDGE 0x40000000 -#define CR_INVERT 0x80000000 - -#define AREF_GROUND 0x00 /* analog ref = analog ground */ -#define AREF_COMMON 0x01 /* analog ref = analog common */ -#define AREF_DIFF 0x02 /* analog ref = differential */ -#define AREF_OTHER 0x03 /* analog ref = other (undefined) */ - -/* counters -- these are arbitrary values */ -#define GPCT_RESET 0x0001 -#define GPCT_SET_SOURCE 0x0002 -#define GPCT_SET_GATE 0x0004 -#define GPCT_SET_DIRECTION 0x0008 -#define GPCT_SET_OPERATION 0x0010 -#define GPCT_ARM 0x0020 -#define GPCT_DISARM 0x0040 -#define GPCT_GET_INT_CLK_FRQ 0x0080 - -#define GPCT_INT_CLOCK 0x0001 -#define GPCT_EXT_PIN 0x0002 -#define GPCT_NO_GATE 0x0004 -#define GPCT_UP 0x0008 -#define GPCT_DOWN 0x0010 -#define GPCT_HWUD 0x0020 -#define GPCT_SIMPLE_EVENT 0x0040 -#define GPCT_SINGLE_PERIOD 0x0080 -#define GPCT_SINGLE_PW 0x0100 -#define GPCT_CONT_PULSE_OUT 0x0200 -#define GPCT_SINGLE_PULSE_OUT 0x0400 - -/* instructions */ - -#define INSN_MASK_WRITE 0x8000000 -#define INSN_MASK_READ 0x4000000 -#define INSN_MASK_SPECIAL 0x2000000 - -#define INSN_READ (0 | INSN_MASK_READ) -#define INSN_WRITE (1 | INSN_MASK_WRITE) -#define INSN_BITS (2 | INSN_MASK_READ | INSN_MASK_WRITE) -#define INSN_CONFIG (3 | INSN_MASK_READ | INSN_MASK_WRITE) -#define INSN_DEVICE_CONFIG (INSN_CONFIG | INSN_MASK_SPECIAL) -#define INSN_GTOD (4 | INSN_MASK_READ | INSN_MASK_SPECIAL) -#define INSN_WAIT (5 | INSN_MASK_WRITE | INSN_MASK_SPECIAL) -#define INSN_INTTRIG (6 | INSN_MASK_WRITE | INSN_MASK_SPECIAL) - -/* command flags */ -/* These flags are used in comedi_cmd structures */ - -#define CMDF_BOGUS 0x00000001 /* do the motions */ - -/* try to use a real-time interrupt while performing command */ -#define CMDF_PRIORITY 0x00000008 - -/* wake up on end-of-scan events */ -#define CMDF_WAKE_EOS 0x00000020 - -#define CMDF_WRITE 0x00000040 - -#define CMDF_RAWDATA 0x00000080 - -/* timer rounding definitions */ -#define CMDF_ROUND_MASK 0x00030000 -#define CMDF_ROUND_NEAREST 0x00000000 -#define CMDF_ROUND_DOWN 0x00010000 -#define CMDF_ROUND_UP 0x00020000 -#define CMDF_ROUND_UP_NEXT 0x00030000 - -#define COMEDI_EV_START 0x00040000 -#define COMEDI_EV_SCAN_BEGIN 0x00080000 -#define COMEDI_EV_CONVERT 0x00100000 -#define COMEDI_EV_SCAN_END 0x00200000 -#define COMEDI_EV_STOP 0x00400000 - -/* compatibility definitions */ -#define TRIG_BOGUS CMDF_BOGUS -#define TRIG_RT CMDF_PRIORITY -#define TRIG_WAKE_EOS CMDF_WAKE_EOS -#define TRIG_WRITE CMDF_WRITE -#define TRIG_ROUND_MASK CMDF_ROUND_MASK -#define TRIG_ROUND_NEAREST CMDF_ROUND_NEAREST -#define TRIG_ROUND_DOWN CMDF_ROUND_DOWN -#define TRIG_ROUND_UP CMDF_ROUND_UP -#define TRIG_ROUND_UP_NEXT CMDF_ROUND_UP_NEXT - -/* trigger sources */ - -#define TRIG_ANY 0xffffffff -#define TRIG_INVALID 0x00000000 - -#define TRIG_NONE 0x00000001 /* never trigger */ -#define TRIG_NOW 0x00000002 /* trigger now + N ns */ -#define TRIG_FOLLOW 0x00000004 /* trigger on next lower level trig */ -#define TRIG_TIME 0x00000008 /* trigger at time N ns */ -#define TRIG_TIMER 0x00000010 /* trigger at rate N ns */ -#define TRIG_COUNT 0x00000020 /* trigger when count reaches N */ -#define TRIG_EXT 0x00000040 /* trigger on external signal N */ -#define TRIG_INT 0x00000080 /* trigger on comedi-internal signal N */ -#define TRIG_OTHER 0x00000100 /* driver defined */ - -/* subdevice flags */ - -#define SDF_BUSY 0x0001 /* device is busy */ -#define SDF_BUSY_OWNER 0x0002 /* device is busy with your job */ -#define SDF_LOCKED 0x0004 /* subdevice is locked */ -#define SDF_LOCK_OWNER 0x0008 /* you own lock */ -#define SDF_MAXDATA 0x0010 /* maxdata depends on channel */ -#define SDF_FLAGS 0x0020 /* flags depend on channel */ -#define SDF_RANGETYPE 0x0040 /* range type depends on channel */ -#define SDF_PWM_COUNTER 0x0080 /* PWM can automatically switch off */ -#define SDF_PWM_HBRIDGE 0x0100 /* PWM is signed (H-bridge) */ -#define SDF_CMD 0x1000 /* can do commands (deprecated) */ -#define SDF_SOFT_CALIBRATED 0x2000 /* subdevice uses software calibration */ -#define SDF_CMD_WRITE 0x4000 /* can do output commands */ -#define SDF_CMD_READ 0x8000 /* can do input commands */ - -/* subdevice can be read (e.g. analog input) */ -#define SDF_READABLE 0x00010000 -/* subdevice can be written (e.g. analog output) */ -#define SDF_WRITABLE 0x00020000 -#define SDF_WRITEABLE SDF_WRITABLE /* spelling error in API */ -/* subdevice does not have externally visible lines */ -#define SDF_INTERNAL 0x00040000 -#define SDF_GROUND 0x00100000 /* can do aref=ground */ -#define SDF_COMMON 0x00200000 /* can do aref=common */ -#define SDF_DIFF 0x00400000 /* can do aref=diff */ -#define SDF_OTHER 0x00800000 /* can do aref=other */ -#define SDF_DITHER 0x01000000 /* can do dithering */ -#define SDF_DEGLITCH 0x02000000 /* can do deglitching */ -#define SDF_MMAP 0x04000000 /* can do mmap() */ -#define SDF_RUNNING 0x08000000 /* subdevice is acquiring data */ -#define SDF_LSAMPL 0x10000000 /* subdevice uses 32-bit samples */ -#define SDF_PACKED 0x20000000 /* subdevice can do packed DIO */ - -/* subdevice types */ - -/** - * enum comedi_subdevice_type - COMEDI subdevice types - * @COMEDI_SUBD_UNUSED: Unused subdevice. - * @COMEDI_SUBD_AI: Analog input. - * @COMEDI_SUBD_AO: Analog output. - * @COMEDI_SUBD_DI: Digital input. - * @COMEDI_SUBD_DO: Digital output. - * @COMEDI_SUBD_DIO: Digital input/output. - * @COMEDI_SUBD_COUNTER: Counter. - * @COMEDI_SUBD_TIMER: Timer. - * @COMEDI_SUBD_MEMORY: Memory, EEPROM, DPRAM. - * @COMEDI_SUBD_CALIB: Calibration DACs. - * @COMEDI_SUBD_PROC: Processor, DSP. - * @COMEDI_SUBD_SERIAL: Serial I/O. - * @COMEDI_SUBD_PWM: Pulse-Width Modulation output. - */ -enum comedi_subdevice_type { - COMEDI_SUBD_UNUSED, - COMEDI_SUBD_AI, - COMEDI_SUBD_AO, - COMEDI_SUBD_DI, - COMEDI_SUBD_DO, - COMEDI_SUBD_DIO, - COMEDI_SUBD_COUNTER, - COMEDI_SUBD_TIMER, - COMEDI_SUBD_MEMORY, - COMEDI_SUBD_CALIB, - COMEDI_SUBD_PROC, - COMEDI_SUBD_SERIAL, - COMEDI_SUBD_PWM -}; - -/* configuration instructions */ - -/** - * enum comedi_io_direction - COMEDI I/O directions - * @COMEDI_INPUT: Input. - * @COMEDI_OUTPUT: Output. - * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output. - * - * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to - * report a direction. They may also be used in other places where a direction - * needs to be specified. - */ -enum comedi_io_direction { - COMEDI_INPUT = 0, - COMEDI_OUTPUT = 1, - COMEDI_OPENDRAIN = 2 -}; - -/** - * enum configuration_ids - COMEDI configuration instruction codes - * @INSN_CONFIG_DIO_INPUT: Configure digital I/O as input. - * @INSN_CONFIG_DIO_OUTPUT: Configure digital I/O as output. - * @INSN_CONFIG_DIO_OPENDRAIN: Configure digital I/O as open-drain (or open - * collector) output. - * @INSN_CONFIG_ANALOG_TRIG: Configure analog trigger. - * @INSN_CONFIG_ALT_SOURCE: Configure alternate input source. - * @INSN_CONFIG_DIGITAL_TRIG: Configure digital trigger. - * @INSN_CONFIG_BLOCK_SIZE: Configure block size for DMA transfers. - * @INSN_CONFIG_TIMER_1: Configure divisor for external clock. - * @INSN_CONFIG_FILTER: Configure a filter. - * @INSN_CONFIG_CHANGE_NOTIFY: Configure change notification for digital - * inputs. (New drivers should use - * %INSN_CONFIG_DIGITAL_TRIG instead.) - * @INSN_CONFIG_SERIAL_CLOCK: Configure clock for serial I/O. - * @INSN_CONFIG_BIDIRECTIONAL_DATA: Send and receive byte over serial I/O. - * @INSN_CONFIG_DIO_QUERY: Query direction of digital I/O channel. - * @INSN_CONFIG_PWM_OUTPUT: Configure pulse-width modulator output. - * @INSN_CONFIG_GET_PWM_OUTPUT: Get pulse-width modulator output configuration. - * @INSN_CONFIG_ARM: Arm a subdevice or channel. - * @INSN_CONFIG_DISARM: Disarm a subdevice or channel. - * @INSN_CONFIG_GET_COUNTER_STATUS: Get counter status. - * @INSN_CONFIG_RESET: Reset a subdevice or channel. - * @INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR: Configure counter/timer as - * single pulse generator. - * @INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR: Configure counter/timer as - * pulse train generator. - * @INSN_CONFIG_GPCT_QUADRATURE_ENCODER: Configure counter as a quadrature - * encoder. - * @INSN_CONFIG_SET_GATE_SRC: Set counter/timer gate source. - * @INSN_CONFIG_GET_GATE_SRC: Get counter/timer gate source. - * @INSN_CONFIG_SET_CLOCK_SRC: Set counter/timer master clock source. - * @INSN_CONFIG_GET_CLOCK_SRC: Get counter/timer master clock source. - * @INSN_CONFIG_SET_OTHER_SRC: Set counter/timer "other" source. - * @INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE: Get size (in bytes) of subdevice's - * on-board FIFOs used during streaming - * input/output. - * @INSN_CONFIG_SET_COUNTER_MODE: Set counter/timer mode. - * @INSN_CONFIG_8254_SET_MODE: (Deprecated) Same as - * %INSN_CONFIG_SET_COUNTER_MODE. - * @INSN_CONFIG_8254_READ_STATUS: Read status of 8254 counter channel. - * @INSN_CONFIG_SET_ROUTING: Set routing for a channel. - * @INSN_CONFIG_GET_ROUTING: Get routing for a channel. - * @INSN_CONFIG_PWM_SET_PERIOD: Set PWM period in nanoseconds. - * @INSN_CONFIG_PWM_GET_PERIOD: Get PWM period in nanoseconds. - * @INSN_CONFIG_GET_PWM_STATUS: Get PWM status. - * @INSN_CONFIG_PWM_SET_H_BRIDGE: Set PWM H bridge duty cycle and polarity for - * a relay simultaneously. - * @INSN_CONFIG_PWM_GET_H_BRIDGE: Get PWM H bridge duty cycle and polarity. - * @INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS: Get the hardware timing restraints, - * regardless of trigger sources. - */ -enum configuration_ids { - INSN_CONFIG_DIO_INPUT = COMEDI_INPUT, - INSN_CONFIG_DIO_OUTPUT = COMEDI_OUTPUT, - INSN_CONFIG_DIO_OPENDRAIN = COMEDI_OPENDRAIN, - INSN_CONFIG_ANALOG_TRIG = 16, -/* INSN_CONFIG_WAVEFORM = 17, */ -/* INSN_CONFIG_TRIG = 18, */ -/* INSN_CONFIG_COUNTER = 19, */ - INSN_CONFIG_ALT_SOURCE = 20, - INSN_CONFIG_DIGITAL_TRIG = 21, - INSN_CONFIG_BLOCK_SIZE = 22, - INSN_CONFIG_TIMER_1 = 23, - INSN_CONFIG_FILTER = 24, - INSN_CONFIG_CHANGE_NOTIFY = 25, - - INSN_CONFIG_SERIAL_CLOCK = 26, /*ALPHA*/ - INSN_CONFIG_BIDIRECTIONAL_DATA = 27, - INSN_CONFIG_DIO_QUERY = 28, - INSN_CONFIG_PWM_OUTPUT = 29, - INSN_CONFIG_GET_PWM_OUTPUT = 30, - INSN_CONFIG_ARM = 31, - INSN_CONFIG_DISARM = 32, - INSN_CONFIG_GET_COUNTER_STATUS = 33, - INSN_CONFIG_RESET = 34, - INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR = 1001, - INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR = 1002, - INSN_CONFIG_GPCT_QUADRATURE_ENCODER = 1003, - INSN_CONFIG_SET_GATE_SRC = 2001, - INSN_CONFIG_GET_GATE_SRC = 2002, - INSN_CONFIG_SET_CLOCK_SRC = 2003, - INSN_CONFIG_GET_CLOCK_SRC = 2004, - INSN_CONFIG_SET_OTHER_SRC = 2005, - INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE = 2006, - INSN_CONFIG_SET_COUNTER_MODE = 4097, - INSN_CONFIG_8254_SET_MODE = INSN_CONFIG_SET_COUNTER_MODE, - INSN_CONFIG_8254_READ_STATUS = 4098, - INSN_CONFIG_SET_ROUTING = 4099, - INSN_CONFIG_GET_ROUTING = 4109, - INSN_CONFIG_PWM_SET_PERIOD = 5000, - INSN_CONFIG_PWM_GET_PERIOD = 5001, - INSN_CONFIG_GET_PWM_STATUS = 5002, - INSN_CONFIG_PWM_SET_H_BRIDGE = 5003, - INSN_CONFIG_PWM_GET_H_BRIDGE = 5004, - INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS = 5005, -}; - -/** - * enum device_configuration_ids - COMEDI configuration instruction codes global - * to an entire device. - * @INSN_DEVICE_CONFIG_TEST_ROUTE: Validate the possibility of a - * globally-named route - * @INSN_DEVICE_CONFIG_CONNECT_ROUTE: Connect a globally-named route - * @INSN_DEVICE_CONFIG_DISCONNECT_ROUTE:Disconnect a globally-named route - * @INSN_DEVICE_CONFIG_GET_ROUTES: Get a list of all globally-named routes - * that are valid for a particular device. - */ -enum device_config_route_ids { - INSN_DEVICE_CONFIG_TEST_ROUTE = 0, - INSN_DEVICE_CONFIG_CONNECT_ROUTE = 1, - INSN_DEVICE_CONFIG_DISCONNECT_ROUTE = 2, - INSN_DEVICE_CONFIG_GET_ROUTES = 3, -}; - -/** - * enum comedi_digital_trig_op - operations for configuring a digital trigger - * @COMEDI_DIGITAL_TRIG_DISABLE: Return digital trigger to its default, - * inactive, unconfigured state. - * @COMEDI_DIGITAL_TRIG_ENABLE_EDGES: Set rising and/or falling edge inputs - * that each can fire the trigger. - * @COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: Set a combination of high and/or low - * level inputs that can fire the trigger. - * - * These are used with the %INSN_CONFIG_DIGITAL_TRIG configuration instruction. - * The data for the configuration instruction is as follows... - * - * data[%0] = %INSN_CONFIG_DIGITAL_TRIG - * - * data[%1] = trigger ID - * - * data[%2] = configuration operation - * - * data[%3] = configuration parameter 1 - * - * data[%4] = configuration parameter 2 - * - * data[%5] = configuration parameter 3 - * - * The trigger ID (data[%1]) is used to differentiate multiple digital triggers - * belonging to the same subdevice. The configuration operation (data[%2]) is - * one of the enum comedi_digital_trig_op values. The configuration - * parameters (data[%3], data[%4], and data[%5]) depend on the operation; they - * are not used with %COMEDI_DIGITAL_TRIG_DISABLE. - * - * For %COMEDI_DIGITAL_TRIG_ENABLE_EDGES and %COMEDI_DIGITAL_TRIG_ENABLE_LEVELS, - * configuration parameter 1 (data[%3]) contains a "left-shift" value that - * specifies the input corresponding to bit 0 of configuration parameters 2 - * and 3. This is useful if the trigger has more than 32 inputs. - * - * For %COMEDI_DIGITAL_TRIG_ENABLE_EDGES, configuration parameter 2 (data[%4]) - * specifies which of up to 32 inputs have rising-edge sensitivity, and - * configuration parameter 3 (data[%5]) specifies which of up to 32 inputs - * have falling-edge sensitivity that can fire the trigger. - * - * For %COMEDI_DIGITAL_TRIG_ENABLE_LEVELS, configuration parameter 2 (data[%4]) - * specifies which of up to 32 inputs must be at a high level, and - * configuration parameter 3 (data[%5]) specifies which of up to 32 inputs - * must be at a low level for the trigger to fire. - * - * Some sequences of %INSN_CONFIG_DIGITAL_TRIG instructions may have a (partly) - * accumulative effect, depending on the low-level driver. This is useful - * when setting up a trigger that has more than 32 inputs, or has a combination - * of edge- and level-triggered inputs. - */ -enum comedi_digital_trig_op { - COMEDI_DIGITAL_TRIG_DISABLE = 0, - COMEDI_DIGITAL_TRIG_ENABLE_EDGES = 1, - COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = 2 -}; - -/** - * enum comedi_support_level - support level for a COMEDI feature - * @COMEDI_UNKNOWN_SUPPORT: Unspecified support for feature. - * @COMEDI_SUPPORTED: Feature is supported. - * @COMEDI_UNSUPPORTED: Feature is unsupported. - */ -enum comedi_support_level { - COMEDI_UNKNOWN_SUPPORT = 0, - COMEDI_SUPPORTED, - COMEDI_UNSUPPORTED -}; - -/** - * enum comedi_counter_status_flags - counter status bits - * @COMEDI_COUNTER_ARMED: Counter is armed. - * @COMEDI_COUNTER_COUNTING: Counter is counting. - * @COMEDI_COUNTER_TERMINAL_COUNT: Counter reached terminal count. - * - * These bitwise values are used by the %INSN_CONFIG_GET_COUNTER_STATUS - * configuration instruction to report the status of a counter. - */ -enum comedi_counter_status_flags { - COMEDI_COUNTER_ARMED = 0x1, - COMEDI_COUNTER_COUNTING = 0x2, - COMEDI_COUNTER_TERMINAL_COUNT = 0x4, -}; - -/* ioctls */ - -#define CIO 'd' -#define COMEDI_DEVCONFIG _IOW(CIO, 0, struct comedi_devconfig) -#define COMEDI_DEVINFO _IOR(CIO, 1, struct comedi_devinfo) -#define COMEDI_SUBDINFO _IOR(CIO, 2, struct comedi_subdinfo) -#define COMEDI_CHANINFO _IOR(CIO, 3, struct comedi_chaninfo) -/* _IOWR(CIO, 4, ...) is reserved */ -#define COMEDI_LOCK _IO(CIO, 5) -#define COMEDI_UNLOCK _IO(CIO, 6) -#define COMEDI_CANCEL _IO(CIO, 7) -#define COMEDI_RANGEINFO _IOR(CIO, 8, struct comedi_rangeinfo) -#define COMEDI_CMD _IOR(CIO, 9, struct comedi_cmd) -#define COMEDI_CMDTEST _IOR(CIO, 10, struct comedi_cmd) -#define COMEDI_INSNLIST _IOR(CIO, 11, struct comedi_insnlist) -#define COMEDI_INSN _IOR(CIO, 12, struct comedi_insn) -#define COMEDI_BUFCONFIG _IOR(CIO, 13, struct comedi_bufconfig) -#define COMEDI_BUFINFO _IOWR(CIO, 14, struct comedi_bufinfo) -#define COMEDI_POLL _IO(CIO, 15) -#define COMEDI_SETRSUBD _IO(CIO, 16) -#define COMEDI_SETWSUBD _IO(CIO, 17) - -/* structures */ - -/** - * struct comedi_insn - COMEDI instruction - * @insn: COMEDI instruction type (%INSN_xxx). - * @n: Length of @data[]. - * @data: Pointer to data array operated on by the instruction. - * @subdev: Subdevice index. - * @chanspec: A packed "chanspec" value consisting of channel number, - * analog range index, analog reference type, and flags. - * @unused: Reserved for future use. - * - * This is used with the %COMEDI_INSN ioctl, and indirectly with the - * %COMEDI_INSNLIST ioctl. - */ -struct comedi_insn { - unsigned int insn; - unsigned int n; - unsigned int __user *data; - unsigned int subdev; - unsigned int chanspec; - unsigned int unused[3]; -}; - -/** - * struct comedi_insnlist - list of COMEDI instructions - * @n_insns: Number of COMEDI instructions. - * @insns: Pointer to array COMEDI instructions. - * - * This is used with the %COMEDI_INSNLIST ioctl. - */ -struct comedi_insnlist { - unsigned int n_insns; - struct comedi_insn __user *insns; -}; - -/** - * struct comedi_cmd - COMEDI asynchronous acquisition command details - * @subdev: Subdevice index. - * @flags: Command flags (%CMDF_xxx). - * @start_src: "Start acquisition" trigger source (%TRIG_xxx). - * @start_arg: "Start acquisition" trigger argument. - * @scan_begin_src: "Scan begin" trigger source. - * @scan_begin_arg: "Scan begin" trigger argument. - * @convert_src: "Convert" trigger source. - * @convert_arg: "Convert" trigger argument. - * @scan_end_src: "Scan end" trigger source. - * @scan_end_arg: "Scan end" trigger argument. - * @stop_src: "Stop acquisition" trigger source. - * @stop_arg: "Stop acquisition" trigger argument. - * @chanlist: Pointer to array of "chanspec" values, containing a - * sequence of channel numbers packed with analog range - * index, etc. - * @chanlist_len: Number of channels in sequence. - * @data: Pointer to miscellaneous set-up data (not used). - * @data_len: Length of miscellaneous set-up data. - * - * This is used with the %COMEDI_CMD or %COMEDI_CMDTEST ioctl to set-up - * or validate an asynchronous acquisition command. The ioctl may modify - * the &struct comedi_cmd and copy it back to the caller. - * - * Optional command @flags values that can be ORed together... - * - * %CMDF_BOGUS - makes %COMEDI_CMD ioctl return error %EAGAIN instead of - * starting the command. - * - * %CMDF_PRIORITY - requests "hard real-time" processing (which is not - * supported in this version of COMEDI). - * - * %CMDF_WAKE_EOS - requests the command makes data available for reading - * after every "scan" period. - * - * %CMDF_WRITE - marks the command as being in the "write" (to device) - * direction. This does not need to be specified by the caller unless the - * subdevice supports commands in either direction. - * - * %CMDF_RAWDATA - prevents the command from "munging" the data between the - * COMEDI sample format and the raw hardware sample format. - * - * %CMDF_ROUND_NEAREST - requests timing periods to be rounded to nearest - * supported values. - * - * %CMDF_ROUND_DOWN - requests timing periods to be rounded down to supported - * values (frequencies rounded up). - * - * %CMDF_ROUND_UP - requests timing periods to be rounded up to supported - * values (frequencies rounded down). - * - * Trigger source values for @start_src, @scan_begin_src, @convert_src, - * @scan_end_src, and @stop_src... - * - * %TRIG_ANY - "all ones" value used to test which trigger sources are - * supported. - * - * %TRIG_INVALID - "all zeroes" value used to indicate that all requested - * trigger sources are invalid. - * - * %TRIG_NONE - never trigger (often used as a @stop_src value). - * - * %TRIG_NOW - trigger after '_arg' nanoseconds. - * - * %TRIG_FOLLOW - trigger follows another event. - * - * %TRIG_TIMER - trigger every '_arg' nanoseconds. - * - * %TRIG_COUNT - trigger when count '_arg' is reached. - * - * %TRIG_EXT - trigger on external signal specified by '_arg'. - * - * %TRIG_INT - trigger on internal, software trigger specified by '_arg'. - * - * %TRIG_OTHER - trigger on other, driver-defined signal specified by '_arg'. - */ -struct comedi_cmd { - unsigned int subdev; - unsigned int flags; - - unsigned int start_src; - unsigned int start_arg; - - unsigned int scan_begin_src; - unsigned int scan_begin_arg; - - unsigned int convert_src; - unsigned int convert_arg; - - unsigned int scan_end_src; - unsigned int scan_end_arg; - - unsigned int stop_src; - unsigned int stop_arg; - - unsigned int *chanlist; - unsigned int chanlist_len; - - short __user *data; - unsigned int data_len; -}; - -/** - * struct comedi_chaninfo - used to retrieve per-channel information - * @subdev: Subdevice index. - * @maxdata_list: Optional pointer to per-channel maximum data values. - * @flaglist: Optional pointer to per-channel flags. - * @rangelist: Optional pointer to per-channel range types. - * @unused: Reserved for future use. - * - * This is used with the %COMEDI_CHANINFO ioctl to get per-channel information - * for the subdevice. Use of this requires knowledge of the number of channels - * and subdevice flags obtained using the %COMEDI_SUBDINFO ioctl. - * - * The @maxdata_list member must be %NULL unless the %SDF_MAXDATA subdevice - * flag is set. The @flaglist member must be %NULL unless the %SDF_FLAGS - * subdevice flag is set. The @rangelist member must be %NULL unless the - * %SDF_RANGETYPE subdevice flag is set. Otherwise, the arrays they point to - * must be at least as long as the number of channels. - */ -struct comedi_chaninfo { - unsigned int subdev; - unsigned int __user *maxdata_list; - unsigned int __user *flaglist; - unsigned int __user *rangelist; - unsigned int unused[4]; -}; - -/** - * struct comedi_rangeinfo - used to retrieve the range table for a channel - * @range_type: Encodes subdevice index (bits 27:24), channel index - * (bits 23:16) and range table length (bits 15:0). - * @range_ptr: Pointer to array of @struct comedi_krange to be filled - * in with the range table for the channel or subdevice. - * - * This is used with the %COMEDI_RANGEINFO ioctl to retrieve the range table - * for a specific channel (if the subdevice has the %SDF_RANGETYPE flag set to - * indicate that the range table depends on the channel), or for the subdevice - * as a whole (if the %SDF_RANGETYPE flag is clear, indicating the range table - * is shared by all channels). - * - * The @range_type value is an input to the ioctl and comes from a previous - * use of the %COMEDI_SUBDINFO ioctl (if the %SDF_RANGETYPE flag is clear), - * or the %COMEDI_CHANINFO ioctl (if the %SDF_RANGETYPE flag is set). - */ -struct comedi_rangeinfo { - unsigned int range_type; - void __user *range_ptr; -}; - -/** - * struct comedi_krange - describes a range in a range table - * @min: Minimum value in millionths (1e-6) of a unit. - * @max: Maximum value in millionths (1e-6) of a unit. - * @flags: Indicates the units (in bits 7:0) OR'ed with optional flags. - * - * A range table is associated with a single channel, or with all channels in a - * subdevice, and a list of one or more ranges. A %struct comedi_krange - * describes the physical range of units for one of those ranges. Sample - * values in COMEDI are unsigned from %0 up to some 'maxdata' value. The - * mapping from sample values to physical units is assumed to be nomimally - * linear (for the purpose of describing the range), with sample value %0 - * mapping to @min, and the 'maxdata' sample value mapping to @max. - * - * The currently defined units are %UNIT_volt (%0), %UNIT_mA (%1), and - * %UNIT_none (%2). The @min and @max values are the physical range multiplied - * by 1e6, so a @max value of %1000000 (with %UNIT_volt) represents a maximal - * value of 1 volt. - * - * The only defined flag value is %RF_EXTERNAL (%0x100), indicating that the - * range needs to be multiplied by an external reference. - */ -struct comedi_krange { - int min; - int max; - unsigned int flags; -}; - -/** - * struct comedi_subdinfo - used to retrieve information about a subdevice - * @type: Type of subdevice from &enum comedi_subdevice_type. - * @n_chan: Number of channels the subdevice supports. - * @subd_flags: A mixture of static and dynamic flags describing - * aspects of the subdevice and its current state. - * @timer_type: Timer type. Always set to %5 ("nanosecond timer"). - * @len_chanlist: Maximum length of a channel list if the subdevice - * supports asynchronous acquisition commands. - * @maxdata: Maximum sample value for all channels if the - * %SDF_MAXDATA subdevice flag is clear. - * @flags: Channel flags for all channels if the %SDF_FLAGS - * subdevice flag is clear. - * @range_type: The range type for all channels if the %SDF_RANGETYPE - * subdevice flag is clear. Encodes the subdevice index - * (bits 27:24), a dummy channel index %0 (bits 23:16), - * and the range table length (bits 15:0). - * @settling_time_0: Not used. - * @insn_bits_support: Set to %COMEDI_SUPPORTED if the subdevice supports the - * %INSN_BITS instruction, or to %COMEDI_UNSUPPORTED if it - * does not. - * @unused: Reserved for future use. - * - * This is used with the %COMEDI_SUBDINFO ioctl which copies an array of - * &struct comedi_subdinfo back to user space, with one element per subdevice. - * Use of this requires knowledge of the number of subdevices obtained from - * the %COMEDI_DEVINFO ioctl. - * - * These are the @subd_flags values that may be ORed together... - * - * %SDF_BUSY - the subdevice is busy processing an asynchronous command or a - * synchronous instruction. - * - * %SDF_BUSY_OWNER - the subdevice is busy processing an asynchronous - * acquisition command started on the current file object (the file object - * issuing the %COMEDI_SUBDINFO ioctl). - * - * %SDF_LOCKED - the subdevice is locked by a %COMEDI_LOCK ioctl. - * - * %SDF_LOCK_OWNER - the subdevice is locked by a %COMEDI_LOCK ioctl from the - * current file object. - * - * %SDF_MAXDATA - maximum sample values are channel-specific. - * - * %SDF_FLAGS - channel flags are channel-specific. - * - * %SDF_RANGETYPE - range types are channel-specific. - * - * %SDF_PWM_COUNTER - PWM can switch off automatically. - * - * %SDF_PWM_HBRIDGE - or PWM is signed (H-bridge). - * - * %SDF_CMD - the subdevice supports asynchronous commands. - * - * %SDF_SOFT_CALIBRATED - the subdevice uses software calibration. - * - * %SDF_CMD_WRITE - the subdevice supports asynchronous commands in the output - * ("write") direction. - * - * %SDF_CMD_READ - the subdevice supports asynchronous commands in the input - * ("read") direction. - * - * %SDF_READABLE - the subdevice is readable (e.g. analog input). - * - * %SDF_WRITABLE (aliased as %SDF_WRITEABLE) - the subdevice is writable (e.g. - * analog output). - * - * %SDF_INTERNAL - the subdevice has no externally visible lines. - * - * %SDF_GROUND - the subdevice can use ground as an analog reference. - * - * %SDF_COMMON - the subdevice can use a common analog reference. - * - * %SDF_DIFF - the subdevice can use differential inputs (or outputs). - * - * %SDF_OTHER - the subdevice can use some other analog reference. - * - * %SDF_DITHER - the subdevice can do dithering. - * - * %SDF_DEGLITCH - the subdevice can do deglitching. - * - * %SDF_MMAP - this is never set. - * - * %SDF_RUNNING - an asynchronous command is still running. - * - * %SDF_LSAMPL - the subdevice uses "long" (32-bit) samples (for asynchronous - * command data). - * - * %SDF_PACKED - the subdevice packs several DIO samples into a single sample - * (for asynchronous command data). - * - * No "channel flags" (@flags) values are currently defined. - */ -struct comedi_subdinfo { - unsigned int type; - unsigned int n_chan; - unsigned int subd_flags; - unsigned int timer_type; - unsigned int len_chanlist; - unsigned int maxdata; - unsigned int flags; - unsigned int range_type; - unsigned int settling_time_0; - unsigned int insn_bits_support; - unsigned int unused[8]; -}; - -/** - * struct comedi_devinfo - used to retrieve information about a COMEDI device - * @version_code: COMEDI version code. - * @n_subdevs: Number of subdevices the device has. - * @driver_name: Null-terminated COMEDI driver name. - * @board_name: Null-terminated COMEDI board name. - * @read_subdevice: Index of the current "read" subdevice (%-1 if none). - * @write_subdevice: Index of the current "write" subdevice (%-1 if none). - * @unused: Reserved for future use. - * - * This is used with the %COMEDI_DEVINFO ioctl to get basic information about - * the device. - */ -struct comedi_devinfo { - unsigned int version_code; - unsigned int n_subdevs; - char driver_name[COMEDI_NAMELEN]; - char board_name[COMEDI_NAMELEN]; - int read_subdevice; - int write_subdevice; - int unused[30]; -}; - -/** - * struct comedi_devconfig - used to configure a legacy COMEDI device - * @board_name: Null-terminated string specifying the type of board - * to configure. - * @options: An array of integer configuration options. - * - * This is used with the %COMEDI_DEVCONFIG ioctl to configure a "legacy" COMEDI - * device, such as an ISA card. Not all COMEDI drivers support this. Those - * that do either expect the specified board name to match one of a list of - * names registered with the COMEDI core, or expect the specified board name - * to match the COMEDI driver name itself. The configuration options are - * handled in a driver-specific manner. - */ -struct comedi_devconfig { - char board_name[COMEDI_NAMELEN]; - int options[COMEDI_NDEVCONFOPTS]; -}; - -/** - * struct comedi_bufconfig - used to set or get buffer size for a subdevice - * @subdevice: Subdevice index. - * @flags: Not used. - * @maximum_size: Maximum allowed buffer size. - * @size: Buffer size. - * @unused: Reserved for future use. - * - * This is used with the %COMEDI_BUFCONFIG ioctl to get or configure the - * maximum buffer size and current buffer size for a COMEDI subdevice that - * supports asynchronous commands. If the subdevice does not support - * asynchronous commands, @maximum_size and @size are ignored and set to 0. - * - * On ioctl input, non-zero values of @maximum_size and @size specify a - * new maximum size and new current size (in bytes), respectively. These - * will by rounded up to a multiple of %PAGE_SIZE. Specifying a new maximum - * size requires admin capabilities. - * - * On ioctl output, @maximum_size and @size and set to the current maximum - * buffer size and current buffer size, respectively. - */ -struct comedi_bufconfig { - unsigned int subdevice; - unsigned int flags; - - unsigned int maximum_size; - unsigned int size; - - unsigned int unused[4]; -}; - -/** - * struct comedi_bufinfo - used to manipulate buffer position for a subdevice - * @subdevice: Subdevice index. - * @bytes_read: Specify amount to advance read position for an - * asynchronous command in the input ("read") direction. - * @buf_write_ptr: Current write position (index) within the buffer. - * @buf_read_ptr: Current read position (index) within the buffer. - * @buf_write_count: Total amount written, modulo 2^32. - * @buf_read_count: Total amount read, modulo 2^32. - * @bytes_written: Specify amount to advance write position for an - * asynchronous command in the output ("write") direction. - * @unused: Reserved for future use. - * - * This is used with the %COMEDI_BUFINFO ioctl to optionally advance the - * current read or write position in an asynchronous acquisition data buffer, - * and to get the current read and write positions in the buffer. - */ -struct comedi_bufinfo { - unsigned int subdevice; - unsigned int bytes_read; - - unsigned int buf_write_ptr; - unsigned int buf_read_ptr; - unsigned int buf_write_count; - unsigned int buf_read_count; - - unsigned int bytes_written; - - unsigned int unused[4]; -}; - -/* range stuff */ - -#define __RANGE(a, b) ((((a) & 0xffff) << 16) | ((b) & 0xffff)) - -#define RANGE_OFFSET(a) (((a) >> 16) & 0xffff) -#define RANGE_LENGTH(b) ((b) & 0xffff) - -#define RF_UNIT(flags) ((flags) & 0xff) -#define RF_EXTERNAL 0x100 - -#define UNIT_volt 0 -#define UNIT_mA 1 -#define UNIT_none 2 - -#define COMEDI_MIN_SPEED 0xffffffffu - -/**********************************************************/ -/* everything after this line is ALPHA */ -/**********************************************************/ - -/* - * 8254 specific configuration. - * - * It supports two config commands: - * - * 0 ID: INSN_CONFIG_SET_COUNTER_MODE - * 1 8254 Mode - * I8254_MODE0, I8254_MODE1, ..., I8254_MODE5 - * OR'ed with: - * I8254_BCD, I8254_BINARY - * - * 0 ID: INSN_CONFIG_8254_READ_STATUS - * 1 <-- Status byte returned here. - * B7 = Output - * B6 = NULL Count - * B5 - B0 Current mode. - */ - -enum i8254_mode { - I8254_MODE0 = (0 << 1), /* Interrupt on terminal count */ - I8254_MODE1 = (1 << 1), /* Hardware retriggerable one-shot */ - I8254_MODE2 = (2 << 1), /* Rate generator */ - I8254_MODE3 = (3 << 1), /* Square wave mode */ - I8254_MODE4 = (4 << 1), /* Software triggered strobe */ - /* Hardware triggered strobe (retriggerable) */ - I8254_MODE5 = (5 << 1), - /* Use binary-coded decimal instead of binary (pretty useless) */ - I8254_BCD = 1, - I8254_BINARY = 0 -}; - -/* *** BEGIN GLOBALLY-NAMED NI TERMINALS/SIGNALS *** */ - -/* - * Common National Instruments Terminal/Signal names. - * Some of these have no NI_ prefix as they are useful for non-NI hardware, such - * as those that utilize the PXI/RTSI trigger lines. - * - * NOTE ABOUT THE CHOICE OF NAMES HERE AND THE CAMELSCRIPT: - * The choice to use CamelScript and the exact names below is for - * maintainability, clarity, similarity to manufacturer's documentation, - * _and_ a mitigation for confusion that has plagued the use of these drivers - * for years! - * - * More detail: - * There have been significant confusions over the past many years for users - * when trying to understand how to connect to/from signals and terminals on - * NI hardware using comedi. The major reason for this is that the actual - * register values were exposed and required to be used by users. Several - * major reasons exist why this caused major confusion for users: - * 1) The register values are _NOT_ in user documentation, but rather in - * arcane locations, such as a few register programming manuals that are - * increasingly hard to find and the NI MHDDK (comments in example code). - * There is no one place to find the various valid values of the registers. - * 2) The register values are _NOT_ completely consistent. There is no way to - * gain any sense of intuition of which values, or even enums one should use - * for various registers. There was some attempt in prior use of comedi to - * name enums such that a user might know which enums should be used for - * varying purposes, but the end-user had to gain a knowledge of register - * values to correctly wield this approach. - * 3) The names for signals and registers found in the various register level - * programming manuals and vendor-provided documentation are _not_ even - * close to the same names that are in the end-user documentation. - * - * Similar, albeit less, confusion plagued NI's previous version of their own - * drivers. Earlier than 2003, NI greatly simplified the situation for users - * by releasing a new API that abstracted the names of signals/terminals to a - * common and intuitive set of names. - * - * The names below mirror the names chosen and well documented by NI. These - * names are exposed to the user via the comedilib user library. By keeping - * the names below, in spite of the use of CamelScript, maintenance will be - * greatly eased and confusion for users _and_ comedi developers will be - * greatly reduced. - */ - -/* - * Base of abstracted NI names. - * The first 16 bits of *_arg are reserved for channel selection. - * Since we only actually need the first 4 or 5 bits for all register values on - * NI select registers anyways, we'll identify all values >= (1<<15) as being an - * abstracted NI signal/terminal name. - * These values are also used/returned by INSN_DEVICE_CONFIG_TEST_ROUTE, - * INSN_DEVICE_CONFIG_CONNECT_ROUTE, INSN_DEVICE_CONFIG_DISCONNECT_ROUTE, - * and INSN_DEVICE_CONFIG_GET_ROUTES. - */ -#define NI_NAMES_BASE 0x8000u - -#define _TERM_N(base, n, x) ((base) + ((x) & ((n) - 1))) - -/* - * not necessarily all allowed 64 PFIs are valid--certainly not for all devices - */ -#define NI_PFI(x) _TERM_N(NI_NAMES_BASE, 64, x) -/* 8 trigger lines by standard, Some devices cannot talk to all eight. */ -#define TRIGGER_LINE(x) _TERM_N(NI_PFI(-1) + 1, 8, x) -/* 4 RTSI shared MUXes to route signals to/from TRIGGER_LINES on NI hardware */ -#define NI_RTSI_BRD(x) _TERM_N(TRIGGER_LINE(-1) + 1, 4, x) - -/* *** Counter/timer names : 8 counters max *** */ -#define NI_MAX_COUNTERS 8 -#define NI_COUNTER_NAMES_BASE (NI_RTSI_BRD(-1) + 1) -#define NI_CtrSource(x) _TERM_N(NI_COUNTER_NAMES_BASE, NI_MAX_COUNTERS, x) -/* Gate, Aux, A,B,Z are all treated, at times as gates */ -#define NI_GATES_NAMES_BASE (NI_CtrSource(-1) + 1) -#define NI_CtrGate(x) _TERM_N(NI_GATES_NAMES_BASE, NI_MAX_COUNTERS, x) -#define NI_CtrAux(x) _TERM_N(NI_CtrGate(-1) + 1, NI_MAX_COUNTERS, x) -#define NI_CtrA(x) _TERM_N(NI_CtrAux(-1) + 1, NI_MAX_COUNTERS, x) -#define NI_CtrB(x) _TERM_N(NI_CtrA(-1) + 1, NI_MAX_COUNTERS, x) -#define NI_CtrZ(x) _TERM_N(NI_CtrB(-1) + 1, NI_MAX_COUNTERS, x) -#define NI_GATES_NAMES_MAX NI_CtrZ(-1) -#define NI_CtrArmStartTrigger(x) _TERM_N(NI_CtrZ(-1) + 1, NI_MAX_COUNTERS, x) -#define NI_CtrInternalOutput(x) \ - _TERM_N(NI_CtrArmStartTrigger(-1) + 1, NI_MAX_COUNTERS, x) -/** external pin(s) labeled conveniently as Ctr<i>Out. */ -#define NI_CtrOut(x) _TERM_N(NI_CtrInternalOutput(-1) + 1, NI_MAX_COUNTERS, x) -/** For Buffered sampling of ctr -- x series capability. */ -#define NI_CtrSampleClock(x) _TERM_N(NI_CtrOut(-1) + 1, NI_MAX_COUNTERS, x) -#define NI_COUNTER_NAMES_MAX NI_CtrSampleClock(-1) - -enum ni_common_signal_names { - /* PXI_Star: this is a non-NI-specific signal */ - PXI_Star = NI_COUNTER_NAMES_MAX + 1, - PXI_Clk10, - PXIe_Clk100, - NI_AI_SampleClock, - NI_AI_SampleClockTimebase, - NI_AI_StartTrigger, - NI_AI_ReferenceTrigger, - NI_AI_ConvertClock, - NI_AI_ConvertClockTimebase, - NI_AI_PauseTrigger, - NI_AI_HoldCompleteEvent, - NI_AI_HoldComplete, - NI_AI_ExternalMUXClock, - NI_AI_STOP, /* pulse signal that occurs when a update is finished(?) */ - NI_AO_SampleClock, - NI_AO_SampleClockTimebase, - NI_AO_StartTrigger, - NI_AO_PauseTrigger, - NI_DI_SampleClock, - NI_DI_SampleClockTimebase, - NI_DI_StartTrigger, - NI_DI_ReferenceTrigger, - NI_DI_PauseTrigger, - NI_DI_InputBufferFull, - NI_DI_ReadyForStartEvent, - NI_DI_ReadyForTransferEventBurst, - NI_DI_ReadyForTransferEventPipelined, - NI_DO_SampleClock, - NI_DO_SampleClockTimebase, - NI_DO_StartTrigger, - NI_DO_PauseTrigger, - NI_DO_OutputBufferFull, - NI_DO_DataActiveEvent, - NI_DO_ReadyForStartEvent, - NI_DO_ReadyForTransferEvent, - NI_MasterTimebase, - NI_20MHzTimebase, - NI_80MHzTimebase, - NI_100MHzTimebase, - NI_200MHzTimebase, - NI_100kHzTimebase, - NI_10MHzRefClock, - NI_FrequencyOutput, - NI_ChangeDetectionEvent, - NI_AnalogComparisonEvent, - NI_WatchdogExpiredEvent, - NI_WatchdogExpirationTrigger, - NI_SCXI_Trig1, - NI_LogicLow, - NI_LogicHigh, - NI_ExternalStrobe, - NI_PFI_DO, - NI_CaseGround, - /* special internal signal used as variable source for RTSI bus: */ - NI_RGOUT0, - - /* just a name to make the next more convenient, regardless of above */ - _NI_NAMES_MAX_PLUS_1, - NI_NUM_NAMES = _NI_NAMES_MAX_PLUS_1 - NI_NAMES_BASE, -}; - -/* *** END GLOBALLY-NAMED NI TERMINALS/SIGNALS *** */ - -#define NI_USUAL_PFI_SELECT(x) (((x) < 10) ? (0x1 + (x)) : (0xb + (x))) -#define NI_USUAL_RTSI_SELECT(x) (((x) < 7) ? (0xb + (x)) : 0x1b) - -/* - * mode bits for NI general-purpose counters, set with - * INSN_CONFIG_SET_COUNTER_MODE - */ -#define NI_GPCT_COUNTING_MODE_SHIFT 16 -#define NI_GPCT_INDEX_PHASE_BITSHIFT 20 -#define NI_GPCT_COUNTING_DIRECTION_SHIFT 24 -enum ni_gpct_mode_bits { - NI_GPCT_GATE_ON_BOTH_EDGES_BIT = 0x4, - NI_GPCT_EDGE_GATE_MODE_MASK = 0x18, - NI_GPCT_EDGE_GATE_STARTS_STOPS_BITS = 0x0, - NI_GPCT_EDGE_GATE_STOPS_STARTS_BITS = 0x8, - NI_GPCT_EDGE_GATE_STARTS_BITS = 0x10, - NI_GPCT_EDGE_GATE_NO_STARTS_NO_STOPS_BITS = 0x18, - NI_GPCT_STOP_MODE_MASK = 0x60, - NI_GPCT_STOP_ON_GATE_BITS = 0x00, - NI_GPCT_STOP_ON_GATE_OR_TC_BITS = 0x20, - NI_GPCT_STOP_ON_GATE_OR_SECOND_TC_BITS = 0x40, - NI_GPCT_LOAD_B_SELECT_BIT = 0x80, - NI_GPCT_OUTPUT_MODE_MASK = 0x300, - NI_GPCT_OUTPUT_TC_PULSE_BITS = 0x100, - NI_GPCT_OUTPUT_TC_TOGGLE_BITS = 0x200, - NI_GPCT_OUTPUT_TC_OR_GATE_TOGGLE_BITS = 0x300, - NI_GPCT_HARDWARE_DISARM_MASK = 0xc00, - NI_GPCT_NO_HARDWARE_DISARM_BITS = 0x000, - NI_GPCT_DISARM_AT_TC_BITS = 0x400, - NI_GPCT_DISARM_AT_GATE_BITS = 0x800, - NI_GPCT_DISARM_AT_TC_OR_GATE_BITS = 0xc00, - NI_GPCT_LOADING_ON_TC_BIT = 0x1000, - NI_GPCT_LOADING_ON_GATE_BIT = 0x4000, - NI_GPCT_COUNTING_MODE_MASK = 0x7 << NI_GPCT_COUNTING_MODE_SHIFT, - NI_GPCT_COUNTING_MODE_NORMAL_BITS = - 0x0 << NI_GPCT_COUNTING_MODE_SHIFT, - NI_GPCT_COUNTING_MODE_QUADRATURE_X1_BITS = - 0x1 << NI_GPCT_COUNTING_MODE_SHIFT, - NI_GPCT_COUNTING_MODE_QUADRATURE_X2_BITS = - 0x2 << NI_GPCT_COUNTING_MODE_SHIFT, - NI_GPCT_COUNTING_MODE_QUADRATURE_X4_BITS = - 0x3 << NI_GPCT_COUNTING_MODE_SHIFT, - NI_GPCT_COUNTING_MODE_TWO_PULSE_BITS = - 0x4 << NI_GPCT_COUNTING_MODE_SHIFT, - NI_GPCT_COUNTING_MODE_SYNC_SOURCE_BITS = - 0x6 << NI_GPCT_COUNTING_MODE_SHIFT, - NI_GPCT_INDEX_PHASE_MASK = 0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT, - NI_GPCT_INDEX_PHASE_LOW_A_LOW_B_BITS = - 0x0 << NI_GPCT_INDEX_PHASE_BITSHIFT, - NI_GPCT_INDEX_PHASE_LOW_A_HIGH_B_BITS = - 0x1 << NI_GPCT_INDEX_PHASE_BITSHIFT, - NI_GPCT_INDEX_PHASE_HIGH_A_LOW_B_BITS = - 0x2 << NI_GPCT_INDEX_PHASE_BITSHIFT, - NI_GPCT_INDEX_PHASE_HIGH_A_HIGH_B_BITS = - 0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT, - NI_GPCT_INDEX_ENABLE_BIT = 0x400000, - NI_GPCT_COUNTING_DIRECTION_MASK = - 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT, - NI_GPCT_COUNTING_DIRECTION_DOWN_BITS = - 0x00 << NI_GPCT_COUNTING_DIRECTION_SHIFT, - NI_GPCT_COUNTING_DIRECTION_UP_BITS = - 0x1 << NI_GPCT_COUNTING_DIRECTION_SHIFT, - NI_GPCT_COUNTING_DIRECTION_HW_UP_DOWN_BITS = - 0x2 << NI_GPCT_COUNTING_DIRECTION_SHIFT, - NI_GPCT_COUNTING_DIRECTION_HW_GATE_BITS = - 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT, - NI_GPCT_RELOAD_SOURCE_MASK = 0xc000000, - NI_GPCT_RELOAD_SOURCE_FIXED_BITS = 0x0, - NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS = 0x4000000, - NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS = 0x8000000, - NI_GPCT_OR_GATE_BIT = 0x10000000, - NI_GPCT_INVERT_OUTPUT_BIT = 0x20000000 -}; - -/* - * Bits for setting a clock source with - * INSN_CONFIG_SET_CLOCK_SRC when using NI general-purpose counters. - */ -enum ni_gpct_clock_source_bits { - NI_GPCT_CLOCK_SRC_SELECT_MASK = 0x3f, - NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS = 0x0, - NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS = 0x1, - NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS = 0x2, - NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS = 0x3, - NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS = 0x4, - NI_GPCT_NEXT_TC_CLOCK_SRC_BITS = 0x5, - /* NI 660x-specific */ - NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS = 0x6, - NI_GPCT_PXI10_CLOCK_SRC_BITS = 0x7, - NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS = 0x8, - NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS = 0x9, - NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK = 0x30000000, - NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS = 0x0, - /* divide source by 2 */ - NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS = 0x10000000, - /* divide source by 8 */ - NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS = 0x20000000, - NI_GPCT_INVERT_CLOCK_SRC_BIT = 0x80000000 -}; - -/* NI 660x-specific */ -#define NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(x) (0x10 + (x)) - -#define NI_GPCT_RTSI_CLOCK_SRC_BITS(x) (0x18 + (x)) - -/* no pfi on NI 660x */ -#define NI_GPCT_PFI_CLOCK_SRC_BITS(x) (0x20 + (x)) - -/* - * Possibilities for setting a gate source with - * INSN_CONFIG_SET_GATE_SRC when using NI general-purpose counters. - * May be bitwise-or'd with CR_EDGE or CR_INVERT. - */ -enum ni_gpct_gate_select { - /* m-series gates */ - NI_GPCT_TIMESTAMP_MUX_GATE_SELECT = 0x0, - NI_GPCT_AI_START2_GATE_SELECT = 0x12, - NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT = 0x13, - NI_GPCT_NEXT_OUT_GATE_SELECT = 0x14, - NI_GPCT_AI_START1_GATE_SELECT = 0x1c, - NI_GPCT_NEXT_SOURCE_GATE_SELECT = 0x1d, - NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT = 0x1e, - NI_GPCT_LOGIC_LOW_GATE_SELECT = 0x1f, - /* more gates for 660x */ - NI_GPCT_SOURCE_PIN_i_GATE_SELECT = 0x100, - NI_GPCT_GATE_PIN_i_GATE_SELECT = 0x101, - /* more gates for 660x "second gate" */ - NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT = 0x201, - NI_GPCT_SELECTED_GATE_GATE_SELECT = 0x21e, - /* - * m-series "second gate" sources are unknown, - * we should add them here with an offset of 0x300 when - * known. - */ - NI_GPCT_DISABLED_GATE_SELECT = 0x8000, -}; - -#define NI_GPCT_GATE_PIN_GATE_SELECT(x) (0x102 + (x)) -#define NI_GPCT_RTSI_GATE_SELECT(x) NI_USUAL_RTSI_SELECT(x) -#define NI_GPCT_PFI_GATE_SELECT(x) NI_USUAL_PFI_SELECT(x) -#define NI_GPCT_UP_DOWN_PIN_GATE_SELECT(x) (0x202 + (x)) - -/* - * Possibilities for setting a source with - * INSN_CONFIG_SET_OTHER_SRC when using NI general-purpose counters. - */ -enum ni_gpct_other_index { - NI_GPCT_SOURCE_ENCODER_A, - NI_GPCT_SOURCE_ENCODER_B, - NI_GPCT_SOURCE_ENCODER_Z -}; - -enum ni_gpct_other_select { - /* m-series gates */ - /* Still unknown, probably only need NI_GPCT_PFI_OTHER_SELECT */ - NI_GPCT_DISABLED_OTHER_SELECT = 0x8000, -}; - -#define NI_GPCT_PFI_OTHER_SELECT(x) NI_USUAL_PFI_SELECT(x) - -/* - * start sources for ni general-purpose counters for use with - * INSN_CONFIG_ARM - */ -enum ni_gpct_arm_source { - NI_GPCT_ARM_IMMEDIATE = 0x0, - /* - * Start both the counter and the adjacent paired counter simultaneously - */ - NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1, - /* - * If the NI_GPCT_HW_ARM bit is set, we will pass the least significant - * bits (3 bits for 660x or 5 bits for m-series) through to the - * hardware. To select a hardware trigger, pass the appropriate select - * bit, e.g., - * NI_GPCT_HW_ARM | NI_GPCT_AI_START1_GATE_SELECT or - * NI_GPCT_HW_ARM | NI_GPCT_PFI_GATE_SELECT(pfi_number) - */ - NI_GPCT_HW_ARM = 0x1000, - NI_GPCT_ARM_UNKNOWN = NI_GPCT_HW_ARM, /* for backward compatibility */ -}; - -/* digital filtering options for ni 660x for use with INSN_CONFIG_FILTER. */ -enum ni_gpct_filter_select { - NI_GPCT_FILTER_OFF = 0x0, - NI_GPCT_FILTER_TIMEBASE_3_SYNC = 0x1, - NI_GPCT_FILTER_100x_TIMEBASE_1 = 0x2, - NI_GPCT_FILTER_20x_TIMEBASE_1 = 0x3, - NI_GPCT_FILTER_10x_TIMEBASE_1 = 0x4, - NI_GPCT_FILTER_2x_TIMEBASE_1 = 0x5, - NI_GPCT_FILTER_2x_TIMEBASE_3 = 0x6 -}; - -/* - * PFI digital filtering options for ni m-series for use with - * INSN_CONFIG_FILTER. - */ -enum ni_pfi_filter_select { - NI_PFI_FILTER_OFF = 0x0, - NI_PFI_FILTER_125ns = 0x1, - NI_PFI_FILTER_6425ns = 0x2, - NI_PFI_FILTER_2550us = 0x3 -}; - -/* master clock sources for ni mio boards and INSN_CONFIG_SET_CLOCK_SRC */ -enum ni_mio_clock_source { - NI_MIO_INTERNAL_CLOCK = 0, - /* - * Doesn't work for m-series, use NI_MIO_PLL_RTSI_CLOCK() - * the NI_MIO_PLL_* sources are m-series only - */ - NI_MIO_RTSI_CLOCK = 1, - NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK = 2, - NI_MIO_PLL_PXI10_CLOCK = 3, - NI_MIO_PLL_RTSI0_CLOCK = 4 -}; - -#define NI_MIO_PLL_RTSI_CLOCK(x) (NI_MIO_PLL_RTSI0_CLOCK + (x)) - -/* - * Signals which can be routed to an NI RTSI pin with INSN_CONFIG_SET_ROUTING. - * The numbers assigned are not arbitrary, they correspond to the bits required - * to program the board. - */ -enum ni_rtsi_routing { - NI_RTSI_OUTPUT_ADR_START1 = 0, - NI_RTSI_OUTPUT_ADR_START2 = 1, - NI_RTSI_OUTPUT_SCLKG = 2, - NI_RTSI_OUTPUT_DACUPDN = 3, - NI_RTSI_OUTPUT_DA_START1 = 4, - NI_RTSI_OUTPUT_G_SRC0 = 5, - NI_RTSI_OUTPUT_G_GATE0 = 6, - NI_RTSI_OUTPUT_RGOUT0 = 7, - NI_RTSI_OUTPUT_RTSI_BRD_0 = 8, - /* Pre-m-series always have RTSI clock on line 7 */ - NI_RTSI_OUTPUT_RTSI_OSC = 12 -}; - -#define NI_RTSI_OUTPUT_RTSI_BRD(x) (NI_RTSI_OUTPUT_RTSI_BRD_0 + (x)) - -/* - * Signals which can be routed to an NI PFI pin on an m-series board with - * INSN_CONFIG_SET_ROUTING. These numbers are also returned by - * INSN_CONFIG_GET_ROUTING on pre-m-series boards, even though their routing - * cannot be changed. The numbers assigned are not arbitrary, they correspond - * to the bits required to program the board. - */ -enum ni_pfi_routing { - NI_PFI_OUTPUT_PFI_DEFAULT = 0, - NI_PFI_OUTPUT_AI_START1 = 1, - NI_PFI_OUTPUT_AI_START2 = 2, - NI_PFI_OUTPUT_AI_CONVERT = 3, - NI_PFI_OUTPUT_G_SRC1 = 4, - NI_PFI_OUTPUT_G_GATE1 = 5, - NI_PFI_OUTPUT_AO_UPDATE_N = 6, - NI_PFI_OUTPUT_AO_START1 = 7, - NI_PFI_OUTPUT_AI_START_PULSE = 8, - NI_PFI_OUTPUT_G_SRC0 = 9, - NI_PFI_OUTPUT_G_GATE0 = 10, - NI_PFI_OUTPUT_EXT_STROBE = 11, - NI_PFI_OUTPUT_AI_EXT_MUX_CLK = 12, - NI_PFI_OUTPUT_GOUT0 = 13, - NI_PFI_OUTPUT_GOUT1 = 14, - NI_PFI_OUTPUT_FREQ_OUT = 15, - NI_PFI_OUTPUT_PFI_DO = 16, - NI_PFI_OUTPUT_I_ATRIG = 17, - NI_PFI_OUTPUT_RTSI0 = 18, - NI_PFI_OUTPUT_PXI_STAR_TRIGGER_IN = 26, - NI_PFI_OUTPUT_SCXI_TRIG1 = 27, - NI_PFI_OUTPUT_DIO_CHANGE_DETECT_RTSI = 28, - NI_PFI_OUTPUT_CDI_SAMPLE = 29, - NI_PFI_OUTPUT_CDO_UPDATE = 30 -}; - -#define NI_PFI_OUTPUT_RTSI(x) (NI_PFI_OUTPUT_RTSI0 + (x)) - -/* - * Signals which can be routed to output on a NI PFI pin on a 660x board - * with INSN_CONFIG_SET_ROUTING. The numbers assigned are - * not arbitrary, they correspond to the bits required - * to program the board. Lines 0 to 7 can only be set to - * NI_660X_PFI_OUTPUT_DIO. Lines 32 to 39 can only be set to - * NI_660X_PFI_OUTPUT_COUNTER. - */ -enum ni_660x_pfi_routing { - NI_660X_PFI_OUTPUT_COUNTER = 1, /* counter */ - NI_660X_PFI_OUTPUT_DIO = 2, /* static digital output */ -}; - -/* - * NI External Trigger lines. These values are not arbitrary, but are related - * to the bits required to program the board (offset by 1 for historical - * reasons). - */ -#define NI_EXT_PFI(x) (NI_USUAL_PFI_SELECT(x) - 1) -#define NI_EXT_RTSI(x) (NI_USUAL_RTSI_SELECT(x) - 1) - -/* - * Clock sources for CDIO subdevice on NI m-series boards. Used as the - * scan_begin_arg for a comedi_command. These sources may also be bitwise-or'd - * with CR_INVERT to change polarity. - */ -enum ni_m_series_cdio_scan_begin_src { - NI_CDIO_SCAN_BEGIN_SRC_GROUND = 0, - NI_CDIO_SCAN_BEGIN_SRC_AI_START = 18, - NI_CDIO_SCAN_BEGIN_SRC_AI_CONVERT = 19, - NI_CDIO_SCAN_BEGIN_SRC_PXI_STAR_TRIGGER = 20, - NI_CDIO_SCAN_BEGIN_SRC_G0_OUT = 28, - NI_CDIO_SCAN_BEGIN_SRC_G1_OUT = 29, - NI_CDIO_SCAN_BEGIN_SRC_ANALOG_TRIGGER = 30, - NI_CDIO_SCAN_BEGIN_SRC_AO_UPDATE = 31, - NI_CDIO_SCAN_BEGIN_SRC_FREQ_OUT = 32, - NI_CDIO_SCAN_BEGIN_SRC_DIO_CHANGE_DETECT_IRQ = 33 -}; - -#define NI_CDIO_SCAN_BEGIN_SRC_PFI(x) NI_USUAL_PFI_SELECT(x) -#define NI_CDIO_SCAN_BEGIN_SRC_RTSI(x) NI_USUAL_RTSI_SELECT(x) - -/* - * scan_begin_src for scan_begin_arg==TRIG_EXT with analog output command on NI - * boards. These scan begin sources can also be bitwise-or'd with CR_INVERT to - * change polarity. - */ -#define NI_AO_SCAN_BEGIN_SRC_PFI(x) NI_USUAL_PFI_SELECT(x) -#define NI_AO_SCAN_BEGIN_SRC_RTSI(x) NI_USUAL_RTSI_SELECT(x) - -/* - * Bits for setting a clock source with - * INSN_CONFIG_SET_CLOCK_SRC when using NI frequency output subdevice. - */ -enum ni_freq_out_clock_source_bits { - NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC, /* 10 MHz */ - NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC /* 100 KHz */ -}; - -/* - * Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for - * 8254 counter subdevices on Amplicon DIO boards (amplc_dio200 driver). - */ -enum amplc_dio_clock_source { - /* - * Per channel external clock - * input/output pin (pin is only an - * input when clock source set to this value, - * otherwise it is an output) - */ - AMPLC_DIO_CLK_CLKN, - AMPLC_DIO_CLK_10MHZ, /* 10 MHz internal clock */ - AMPLC_DIO_CLK_1MHZ, /* 1 MHz internal clock */ - AMPLC_DIO_CLK_100KHZ, /* 100 kHz internal clock */ - AMPLC_DIO_CLK_10KHZ, /* 10 kHz internal clock */ - AMPLC_DIO_CLK_1KHZ, /* 1 kHz internal clock */ - /* - * Output of preceding counter channel - * (for channel 0, preceding counter - * channel is channel 2 on preceding - * counter subdevice, for first counter - * subdevice, preceding counter - * subdevice is the last counter - * subdevice) - */ - AMPLC_DIO_CLK_OUTNM1, - AMPLC_DIO_CLK_EXT, /* per chip external input pin */ - /* the following are "enhanced" clock sources for PCIe models */ - AMPLC_DIO_CLK_VCC, /* clock input HIGH */ - AMPLC_DIO_CLK_GND, /* clock input LOW */ - AMPLC_DIO_CLK_PAT_PRESENT, /* "pattern present" signal */ - AMPLC_DIO_CLK_20MHZ /* 20 MHz internal clock */ -}; - -/* - * Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for - * timer subdevice on some Amplicon DIO PCIe boards (amplc_dio200 driver). - */ -enum amplc_dio_ts_clock_src { - AMPLC_DIO_TS_CLK_1GHZ, /* 1 ns period with 20 ns granularity */ - AMPLC_DIO_TS_CLK_1MHZ, /* 1 us period */ - AMPLC_DIO_TS_CLK_1KHZ /* 1 ms period */ -}; - -/* - * Values for setting a gate source with INSN_CONFIG_SET_GATE_SRC for - * 8254 counter subdevices on Amplicon DIO boards (amplc_dio200 driver). - */ -enum amplc_dio_gate_source { - AMPLC_DIO_GAT_VCC, /* internal high logic level */ - AMPLC_DIO_GAT_GND, /* internal low logic level */ - AMPLC_DIO_GAT_GATN, /* per channel external gate input */ - /* - * negated output of counter channel minus 2 - * (for channels 0 or 1, channel minus 2 is channel 1 or 2 on - * the preceding counter subdevice, for the first counter subdevice - * the preceding counter subdevice is the last counter subdevice) - */ - AMPLC_DIO_GAT_NOUTNM2, - AMPLC_DIO_GAT_RESERVED4, - AMPLC_DIO_GAT_RESERVED5, - AMPLC_DIO_GAT_RESERVED6, - AMPLC_DIO_GAT_RESERVED7, - /* the following are "enhanced" gate sources for PCIe models */ - AMPLC_DIO_GAT_NGATN = 6, /* negated per channel gate input */ - /* non-negated output of counter channel minus 2 */ - AMPLC_DIO_GAT_OUTNM2, - AMPLC_DIO_GAT_PAT_PRESENT, /* "pattern present" signal */ - AMPLC_DIO_GAT_PAT_OCCURRED, /* "pattern occurred" latched */ - AMPLC_DIO_GAT_PAT_GONE, /* "pattern gone away" latched */ - AMPLC_DIO_GAT_NPAT_PRESENT, /* negated "pattern present" */ - AMPLC_DIO_GAT_NPAT_OCCURRED, /* negated "pattern occurred" */ - AMPLC_DIO_GAT_NPAT_GONE /* negated "pattern gone away" */ -}; - -/* - * Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for - * the counter subdevice on the Kolter Electronic PCI-Counter board - * (ke_counter driver). - */ -enum ke_counter_clock_source { - KE_CLK_20MHZ, /* internal 20MHz (default) */ - KE_CLK_4MHZ, /* internal 4MHz (option) */ - KE_CLK_EXT /* external clock on pin 21 of D-Sub */ -}; - -#endif /* _COMEDI_H */ diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c index 06bfc859ab31..393966c09740 100644 --- a/drivers/comedi/comedi_buf.c +++ b/drivers/comedi/comedi_buf.c @@ -9,8 +9,7 @@ #include <linux/vmalloc.h> #include <linux/slab.h> - -#include "comedidev.h" +#include <linux/comedi/comedidev.h> #include "comedi_internal.h" #ifdef PAGE_KERNEL_NOCACHE diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c index 763cea8418f8..55a0cae04b8d 100644 --- a/drivers/comedi/comedi_fops.c +++ b/drivers/comedi/comedi_fops.c @@ -23,7 +23,7 @@ #include <linux/poll.h> #include <linux/device.h> #include <linux/fs.h> -#include "comedidev.h" +#include <linux/comedi/comedidev.h> #include <linux/cdev.h> #include <linux/io.h> diff --git a/drivers/comedi/comedi_pci.c b/drivers/comedi/comedi_pci.c index 54739af7eb71..cc2581902195 100644 --- a/drivers/comedi/comedi_pci.c +++ b/drivers/comedi/comedi_pci.c @@ -9,8 +9,7 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /** * comedi_to_pci_dev() - Return PCI device attached to COMEDI device diff --git a/drivers/comedi/comedi_pci.h b/drivers/comedi/comedi_pci.h deleted file mode 100644 index 4e069440cbdc..000000000000 --- a/drivers/comedi/comedi_pci.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * comedi_pci.h - * header file for Comedi PCI drivers - * - * COMEDI - Linux Control and Measurement Device Interface - * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> - */ - -#ifndef _COMEDI_PCI_H -#define _COMEDI_PCI_H - -#include <linux/pci.h> - -#include "comedidev.h" - -/* - * PCI Vendor IDs not in <linux/pci_ids.h> - */ -#define PCI_VENDOR_ID_KOLTER 0x1001 -#define PCI_VENDOR_ID_ICP 0x104c -#define PCI_VENDOR_ID_DT 0x1116 -#define PCI_VENDOR_ID_IOTECH 0x1616 -#define PCI_VENDOR_ID_CONTEC 0x1221 -#define PCI_VENDOR_ID_RTD 0x1435 -#define PCI_VENDOR_ID_HUMUSOFT 0x186c - -struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev); - -int comedi_pci_enable(struct comedi_device *dev); -void comedi_pci_disable(struct comedi_device *dev); -void comedi_pci_detach(struct comedi_device *dev); - -int comedi_pci_auto_config(struct pci_dev *pcidev, struct comedi_driver *driver, - unsigned long context); -void comedi_pci_auto_unconfig(struct pci_dev *pcidev); - -int comedi_pci_driver_register(struct comedi_driver *comedi_driver, - struct pci_driver *pci_driver); -void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver, - struct pci_driver *pci_driver); - -/** - * module_comedi_pci_driver() - Helper macro for registering a comedi PCI driver - * @__comedi_driver: comedi_driver struct - * @__pci_driver: pci_driver struct - * - * Helper macro for comedi PCI drivers which do not do anything special - * in module init/exit. This eliminates a lot of boilerplate. Each - * module may only use this macro once, and calling it replaces - * module_init() and module_exit() - */ -#define module_comedi_pci_driver(__comedi_driver, __pci_driver) \ - module_driver(__comedi_driver, comedi_pci_driver_register, \ - comedi_pci_driver_unregister, &(__pci_driver)) - -#endif /* _COMEDI_PCI_H */ diff --git a/drivers/comedi/comedi_pcmcia.c b/drivers/comedi/comedi_pcmcia.c index bb273bb202e6..c53aad0fc2ce 100644 --- a/drivers/comedi/comedi_pcmcia.c +++ b/drivers/comedi/comedi_pcmcia.c @@ -9,8 +9,7 @@ #include <linux/module.h> #include <linux/kernel.h> - -#include "comedi_pcmcia.h" +#include <linux/comedi/comedi_pcmcia.h> /** * comedi_to_pcmcia_dev() - Return PCMCIA device attached to COMEDI device diff --git a/drivers/comedi/comedi_pcmcia.h b/drivers/comedi/comedi_pcmcia.h deleted file mode 100644 index f2f6e779645b..000000000000 --- a/drivers/comedi/comedi_pcmcia.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * comedi_pcmcia.h - * header file for Comedi PCMCIA drivers - * - * COMEDI - Linux Control and Measurement Device Interface - * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> - */ - -#ifndef _COMEDI_PCMCIA_H -#define _COMEDI_PCMCIA_H - -#include <pcmcia/cistpl.h> -#include <pcmcia/ds.h> - -#include "comedidev.h" - -struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *dev); - -int comedi_pcmcia_enable(struct comedi_device *dev, - int (*conf_check)(struct pcmcia_device *p_dev, - void *priv_data)); -void comedi_pcmcia_disable(struct comedi_device *dev); - -int comedi_pcmcia_auto_config(struct pcmcia_device *link, - struct comedi_driver *driver); -void comedi_pcmcia_auto_unconfig(struct pcmcia_device *link); - -int comedi_pcmcia_driver_register(struct comedi_driver *comedi_driver, - struct pcmcia_driver *pcmcia_driver); -void comedi_pcmcia_driver_unregister(struct comedi_driver *comedi_driver, - struct pcmcia_driver *pcmcia_driver); - -/** - * module_comedi_pcmcia_driver() - Helper macro for registering a comedi - * PCMCIA driver - * @__comedi_driver: comedi_driver struct - * @__pcmcia_driver: pcmcia_driver struct - * - * Helper macro for comedi PCMCIA drivers which do not do anything special - * in module init/exit. This eliminates a lot of boilerplate. Each - * module may only use this macro once, and calling it replaces - * module_init() and module_exit() - */ -#define module_comedi_pcmcia_driver(__comedi_driver, __pcmcia_driver) \ - module_driver(__comedi_driver, comedi_pcmcia_driver_register, \ - comedi_pcmcia_driver_unregister, &(__pcmcia_driver)) - -#endif /* _COMEDI_PCMCIA_H */ diff --git a/drivers/comedi/comedi_usb.c b/drivers/comedi/comedi_usb.c index eea8ebf32ed0..d11ea148ebf8 100644 --- a/drivers/comedi/comedi_usb.c +++ b/drivers/comedi/comedi_usb.c @@ -8,8 +8,7 @@ */ #include <linux/module.h> - -#include "comedi_usb.h" +#include <linux/comedi/comedi_usb.h> /** * comedi_to_usb_interface() - Return USB interface attached to COMEDI device diff --git a/drivers/comedi/comedi_usb.h b/drivers/comedi/comedi_usb.h deleted file mode 100644 index 601e29d3891c..000000000000 --- a/drivers/comedi/comedi_usb.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* comedi_usb.h - * header file for USB Comedi drivers - * - * COMEDI - Linux Control and Measurement Device Interface - * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> - */ - -#ifndef _COMEDI_USB_H -#define _COMEDI_USB_H - -#include <linux/usb.h> - -#include "comedidev.h" - -struct usb_interface *comedi_to_usb_interface(struct comedi_device *dev); -struct usb_device *comedi_to_usb_dev(struct comedi_device *dev); - -int comedi_usb_auto_config(struct usb_interface *intf, - struct comedi_driver *driver, unsigned long context); -void comedi_usb_auto_unconfig(struct usb_interface *intf); - -int comedi_usb_driver_register(struct comedi_driver *comedi_driver, - struct usb_driver *usb_driver); -void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver, - struct usb_driver *usb_driver); - -/** - * module_comedi_usb_driver() - Helper macro for registering a comedi USB driver - * @__comedi_driver: comedi_driver struct - * @__usb_driver: usb_driver struct - * - * Helper macro for comedi USB drivers which do not do anything special - * in module init/exit. This eliminates a lot of boilerplate. Each - * module may only use this macro once, and calling it replaces - * module_init() and module_exit() - */ -#define module_comedi_usb_driver(__comedi_driver, __usb_driver) \ - module_driver(__comedi_driver, comedi_usb_driver_register, \ - comedi_usb_driver_unregister, &(__usb_driver)) - -#endif /* _COMEDI_USB_H */ diff --git a/drivers/comedi/comedidev.h b/drivers/comedi/comedidev.h deleted file mode 100644 index 0e1b95ef9a4d..000000000000 --- a/drivers/comedi/comedidev.h +++ /dev/null @@ -1,1054 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * comedidev.h - * header file for kernel-only structures, variables, and constants - * - * COMEDI - Linux Control and Measurement Device Interface - * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> - */ - -#ifndef _COMEDIDEV_H -#define _COMEDIDEV_H - -#include <linux/dma-mapping.h> -#include <linux/mutex.h> -#include <linux/spinlock_types.h> -#include <linux/rwsem.h> -#include <linux/kref.h> - -#include "comedi.h" - -#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) -#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, \ - COMEDI_MINORVERSION, COMEDI_MICROVERSION) -#define COMEDI_RELEASE VERSION - -#define COMEDI_NUM_BOARD_MINORS 0x30 - -/** - * struct comedi_subdevice - Working data for a COMEDI subdevice - * @device: COMEDI device to which this subdevice belongs. (Initialized by - * comedi_alloc_subdevices().) - * @index: Index of this subdevice within device's array of subdevices. - * (Initialized by comedi_alloc_subdevices().) - * @type: Type of subdevice from &enum comedi_subdevice_type. (Initialized by - * the low-level driver.) - * @n_chan: Number of channels the subdevice supports. (Initialized by the - * low-level driver.) - * @subdev_flags: Various "SDF" flags indicating aspects of the subdevice to - * the COMEDI core and user application. (Initialized by the low-level - * driver.) - * @len_chanlist: Maximum length of a channel list if the subdevice supports - * asynchronous acquisition commands. (Optionally initialized by the - * low-level driver, or changed from 0 to 1 during post-configuration.) - * @private: Private data pointer which is either set by the low-level driver - * itself, or by a call to comedi_alloc_spriv() which allocates storage. - * In the latter case, the storage is automatically freed after the - * low-level driver's "detach" handler is called for the device. - * (Initialized by the low-level driver.) - * @async: Pointer to &struct comedi_async id the subdevice supports - * asynchronous acquisition commands. (Allocated and initialized during - * post-configuration if needed.) - * @lock: Pointer to a file object that performed a %COMEDI_LOCK ioctl on the - * subdevice. (Initially NULL.) - * @busy: Pointer to a file object that is performing an asynchronous - * acquisition command on the subdevice. (Initially NULL.) - * @runflags: Internal flags for use by COMEDI core, mostly indicating whether - * an asynchronous acquisition command is running. - * @spin_lock: Generic spin-lock for use by the COMEDI core and the low-level - * driver. (Initialized by comedi_alloc_subdevices().) - * @io_bits: Bit-mask indicating the channel directions for a DIO subdevice - * with no more than 32 channels. A '1' at a bit position indicates the - * corresponding channel is configured as an output. (Initialized by the - * low-level driver for a DIO subdevice. Forced to all-outputs during - * post-configuration for a digital output subdevice.) - * @maxdata: If non-zero, this is the maximum raw data value of each channel. - * If zero, the maximum data value is channel-specific. (Initialized by - * the low-level driver.) - * @maxdata_list: If the maximum data value is channel-specific, this points - * to an array of maximum data values indexed by channel index. - * (Initialized by the low-level driver.) - * @range_table: If non-NULL, this points to a COMEDI range table for the - * subdevice. If NULL, the range table is channel-specific. (Initialized - * by the low-level driver, will be set to an "invalid" range table during - * post-configuration if @range_table and @range_table_list are both - * NULL.) - * @range_table_list: If the COMEDI range table is channel-specific, this - * points to an array of pointers to COMEDI range tables indexed by - * channel number. (Initialized by the low-level driver.) - * @chanlist: Not used. - * @insn_read: Optional pointer to a handler for the %INSN_READ instruction. - * (Initialized by the low-level driver, or set to a default handler - * during post-configuration.) - * @insn_write: Optional pointer to a handler for the %INSN_WRITE instruction. - * (Initialized by the low-level driver, or set to a default handler - * during post-configuration.) - * @insn_bits: Optional pointer to a handler for the %INSN_BITS instruction - * for a digital input, digital output or digital input/output subdevice. - * (Initialized by the low-level driver, or set to a default handler - * during post-configuration.) - * @insn_config: Optional pointer to a handler for the %INSN_CONFIG - * instruction. (Initialized by the low-level driver, or set to a default - * handler during post-configuration.) - * @do_cmd: If the subdevice supports asynchronous acquisition commands, this - * points to a handler to set it up in hardware. (Initialized by the - * low-level driver.) - * @do_cmdtest: If the subdevice supports asynchronous acquisition commands, - * this points to a handler used to check and possibly tweak a prospective - * acquisition command without setting it up in hardware. (Initialized by - * the low-level driver.) - * @poll: If the subdevice supports asynchronous acquisition commands, this - * is an optional pointer to a handler for the %COMEDI_POLL ioctl which - * instructs the low-level driver to synchronize buffers. (Initialized by - * the low-level driver if needed.) - * @cancel: If the subdevice supports asynchronous acquisition commands, this - * points to a handler used to terminate a running command. (Initialized - * by the low-level driver.) - * @buf_change: If the subdevice supports asynchronous acquisition commands, - * this is an optional pointer to a handler that is called when the data - * buffer for handling asynchronous commands is allocated or reallocated. - * (Initialized by the low-level driver if needed.) - * @munge: If the subdevice supports asynchronous acquisition commands and - * uses DMA to transfer data from the hardware to the acquisition buffer, - * this points to a function used to "munge" the data values from the - * hardware into the format expected by COMEDI. (Initialized by the - * low-level driver if needed.) - * @async_dma_dir: If the subdevice supports asynchronous acquisition commands - * and uses DMA to transfer data from the hardware to the acquisition - * buffer, this sets the DMA direction for the buffer. (initialized to - * %DMA_NONE by comedi_alloc_subdevices() and changed by the low-level - * driver if necessary.) - * @state: Handy bit-mask indicating the output states for a DIO or digital - * output subdevice with no more than 32 channels. (Initialized by the - * low-level driver.) - * @class_dev: If the subdevice supports asynchronous acquisition commands, - * this points to a sysfs comediX_subdY device where X is the minor device - * number of the COMEDI device and Y is the subdevice number. The minor - * device number for the sysfs device is allocated dynamically in the - * range 48 to 255. This is used to allow the COMEDI device to be opened - * with a different default read or write subdevice. (Allocated during - * post-configuration if needed.) - * @minor: If @class_dev is set, this is its dynamically allocated minor - * device number. (Set during post-configuration if necessary.) - * @readback: Optional pointer to memory allocated by - * comedi_alloc_subdev_readback() used to hold the values written to - * analog output channels so they can be read back. The storage is - * automatically freed after the low-level driver's "detach" handler is - * called for the device. (Initialized by the low-level driver.) - * - * This is the main control structure for a COMEDI subdevice. If the subdevice - * supports asynchronous acquisition commands, additional information is stored - * in the &struct comedi_async pointed to by @async. - * - * Most of the subdevice is initialized by the low-level driver's "attach" or - * "auto_attach" handlers but parts of it are initialized by - * comedi_alloc_subdevices(), and other parts are initialized during - * post-configuration on return from that handler. - * - * A low-level driver that sets @insn_bits for a digital input, digital output, - * or DIO subdevice may leave @insn_read and @insn_write uninitialized, in - * which case they will be set to a default handler during post-configuration - * that uses @insn_bits to emulate the %INSN_READ and %INSN_WRITE instructions. - */ -struct comedi_subdevice { - struct comedi_device *device; - int index; - int type; - int n_chan; - int subdev_flags; - int len_chanlist; /* maximum length of channel/gain list */ - - void *private; - - struct comedi_async *async; - - void *lock; - void *busy; - unsigned int runflags; - spinlock_t spin_lock; /* generic spin-lock for COMEDI and drivers */ - - unsigned int io_bits; - - unsigned int maxdata; /* if maxdata==0, use list */ - const unsigned int *maxdata_list; /* list is channel specific */ - - const struct comedi_lrange *range_table; - const struct comedi_lrange *const *range_table_list; - - unsigned int *chanlist; /* driver-owned chanlist (not used) */ - - int (*insn_read)(struct comedi_device *dev, struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data); - int (*insn_write)(struct comedi_device *dev, struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data); - int (*insn_bits)(struct comedi_device *dev, struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data); - int (*insn_config)(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, - unsigned int *data); - - int (*do_cmd)(struct comedi_device *dev, struct comedi_subdevice *s); - int (*do_cmdtest)(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_cmd *cmd); - int (*poll)(struct comedi_device *dev, struct comedi_subdevice *s); - int (*cancel)(struct comedi_device *dev, struct comedi_subdevice *s); - - /* called when the buffer changes */ - int (*buf_change)(struct comedi_device *dev, - struct comedi_subdevice *s); - - void (*munge)(struct comedi_device *dev, struct comedi_subdevice *s, - void *data, unsigned int num_bytes, - unsigned int start_chan_index); - enum dma_data_direction async_dma_dir; - - unsigned int state; - - struct device *class_dev; - int minor; - - unsigned int *readback; -}; - -/** - * struct comedi_buf_page - Describe a page of a COMEDI buffer - * @virt_addr: Kernel address of page. - * @dma_addr: DMA address of page if in DMA coherent memory. - */ -struct comedi_buf_page { - void *virt_addr; - dma_addr_t dma_addr; -}; - -/** - * struct comedi_buf_map - Describe pages in a COMEDI buffer - * @dma_hw_dev: Low-level hardware &struct device pointer copied from the - * COMEDI device's hw_dev member. - * @page_list: Pointer to array of &struct comedi_buf_page, one for each - * page in the buffer. - * @n_pages: Number of pages in the buffer. - * @dma_dir: DMA direction used to allocate pages of DMA coherent memory, - * or %DMA_NONE if pages allocated from regular memory. - * @refcount: &struct kref reference counter used to free the buffer. - * - * A COMEDI data buffer is allocated as individual pages, either in - * conventional memory or DMA coherent memory, depending on the attached, - * low-level hardware device. (The buffer pages also get mapped into the - * kernel's contiguous virtual address space pointed to by the 'prealloc_buf' - * member of &struct comedi_async.) - * - * The buffer is normally freed when the COMEDI device is detached from the - * low-level driver (which may happen due to device removal), but if it happens - * to be mmapped at the time, the pages cannot be freed until the buffer has - * been munmapped. That is what the reference counter is for. (The virtual - * address space pointed by 'prealloc_buf' is freed when the COMEDI device is - * detached.) - */ -struct comedi_buf_map { - struct device *dma_hw_dev; - struct comedi_buf_page *page_list; - unsigned int n_pages; - enum dma_data_direction dma_dir; - struct kref refcount; -}; - -/** - * struct comedi_async - Control data for asynchronous COMEDI commands - * @prealloc_buf: Kernel virtual address of allocated acquisition buffer. - * @prealloc_bufsz: Buffer size (in bytes). - * @buf_map: Map of buffer pages. - * @max_bufsize: Maximum allowed buffer size (in bytes). - * @buf_write_count: "Write completed" count (in bytes, modulo 2**32). - * @buf_write_alloc_count: "Allocated for writing" count (in bytes, - * modulo 2**32). - * @buf_read_count: "Read completed" count (in bytes, modulo 2**32). - * @buf_read_alloc_count: "Allocated for reading" count (in bytes, - * modulo 2**32). - * @buf_write_ptr: Buffer position for writer. - * @buf_read_ptr: Buffer position for reader. - * @cur_chan: Current position in chanlist for scan (for those drivers that - * use it). - * @scans_done: The number of scans completed. - * @scan_progress: Amount received or sent for current scan (in bytes). - * @munge_chan: Current position in chanlist for "munging". - * @munge_count: "Munge" count (in bytes, modulo 2**32). - * @munge_ptr: Buffer position for "munging". - * @events: Bit-vector of events that have occurred. - * @cmd: Details of comedi command in progress. - * @wait_head: Task wait queue for file reader or writer. - * @cb_mask: Bit-vector of events that should wake waiting tasks. - * @inttrig: Software trigger function for command, or NULL. - * - * Note about the ..._count and ..._ptr members: - * - * Think of the _Count values being integers of unlimited size, indexing - * into a buffer of infinite length (though only an advancing portion - * of the buffer of fixed length prealloc_bufsz is accessible at any - * time). Then: - * - * Buf_Read_Count <= Buf_Read_Alloc_Count <= Munge_Count <= - * Buf_Write_Count <= Buf_Write_Alloc_Count <= - * (Buf_Read_Count + prealloc_bufsz) - * - * (Those aren't the actual members, apart from prealloc_bufsz.) When the - * buffer is reset, those _Count values start at 0 and only increase in value, - * maintaining the above inequalities until the next time the buffer is - * reset. The buffer is divided into the following regions by the inequalities: - * - * [0, Buf_Read_Count): - * old region no longer accessible - * - * [Buf_Read_Count, Buf_Read_Alloc_Count): - * filled and munged region allocated for reading but not yet read - * - * [Buf_Read_Alloc_Count, Munge_Count): - * filled and munged region not yet allocated for reading - * - * [Munge_Count, Buf_Write_Count): - * filled region not yet munged - * - * [Buf_Write_Count, Buf_Write_Alloc_Count): - * unfilled region allocated for writing but not yet written - * - * [Buf_Write_Alloc_Count, Buf_Read_Count + prealloc_bufsz): - * unfilled region not yet allocated for writing - * - * [Buf_Read_Count + prealloc_bufsz, infinity): - * unfilled region not yet accessible - * - * Data needs to be written into the buffer before it can be read out, - * and may need to be converted (or "munged") between the two - * operations. Extra unfilled buffer space may need to allocated for - * writing (advancing Buf_Write_Alloc_Count) before new data is written. - * After writing new data, the newly filled space needs to be released - * (advancing Buf_Write_Count). This also results in the new data being - * "munged" (advancing Munge_Count). Before data is read out of the - * buffer, extra space may need to be allocated for reading (advancing - * Buf_Read_Alloc_Count). After the data has been read out, the space - * needs to be released (advancing Buf_Read_Count). - * - * The actual members, buf_read_count, buf_read_alloc_count, - * munge_count, buf_write_count, and buf_write_alloc_count take the - * value of the corresponding capitalized _Count values modulo 2^32 - * (UINT_MAX+1). Subtracting a "higher" _count value from a "lower" - * _count value gives the same answer as subtracting a "higher" _Count - * value from a lower _Count value because prealloc_bufsz < UINT_MAX+1. - * The modulo operation is done implicitly. - * - * The buf_read_ptr, munge_ptr, and buf_write_ptr members take the value - * of the corresponding capitalized _Count values modulo prealloc_bufsz. - * These correspond to byte indices in the physical buffer. The modulo - * operation is done by subtracting prealloc_bufsz when the value - * exceeds prealloc_bufsz (assuming prealloc_bufsz plus the increment is - * less than or equal to UINT_MAX). - */ -struct comedi_async { - void *prealloc_buf; - unsigned int prealloc_bufsz; - struct comedi_buf_map *buf_map; - unsigned int max_bufsize; - unsigned int buf_write_count; - unsigned int buf_write_alloc_count; - unsigned int buf_read_count; - unsigned int buf_read_alloc_count; - unsigned int buf_write_ptr; - unsigned int buf_read_ptr; - unsigned int cur_chan; - unsigned int scans_done; - unsigned int scan_progress; - unsigned int munge_chan; - unsigned int munge_count; - unsigned int munge_ptr; - unsigned int events; - struct comedi_cmd cmd; - wait_queue_head_t wait_head; - unsigned int cb_mask; - int (*inttrig)(struct comedi_device *dev, struct comedi_subdevice *s, - unsigned int x); -}; - -/** - * enum comedi_cb - &struct comedi_async callback "events" - * @COMEDI_CB_EOS: end-of-scan - * @COMEDI_CB_EOA: end-of-acquisition/output - * @COMEDI_CB_BLOCK: data has arrived, wakes up read() / write() - * @COMEDI_CB_EOBUF: DEPRECATED: end of buffer - * @COMEDI_CB_ERROR: card error during acquisition - * @COMEDI_CB_OVERFLOW: buffer overflow/underflow - * @COMEDI_CB_ERROR_MASK: events that indicate an error has occurred - * @COMEDI_CB_CANCEL_MASK: events that will cancel an async command - */ -enum comedi_cb { - COMEDI_CB_EOS = BIT(0), - COMEDI_CB_EOA = BIT(1), - COMEDI_CB_BLOCK = BIT(2), - COMEDI_CB_EOBUF = BIT(3), - COMEDI_CB_ERROR = BIT(4), - COMEDI_CB_OVERFLOW = BIT(5), - /* masks */ - COMEDI_CB_ERROR_MASK = (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW), - COMEDI_CB_CANCEL_MASK = (COMEDI_CB_EOA | COMEDI_CB_ERROR_MASK) -}; - -/** - * struct comedi_driver - COMEDI driver registration - * @driver_name: Name of driver. - * @module: Owning module. - * @attach: The optional "attach" handler for manually configured COMEDI - * devices. - * @detach: The "detach" handler for deconfiguring COMEDI devices. - * @auto_attach: The optional "auto_attach" handler for automatically - * configured COMEDI devices. - * @num_names: Optional number of "board names" supported. - * @board_name: Optional pointer to a pointer to a board name. The pointer - * to a board name is embedded in an element of a driver-defined array - * of static, read-only board type information. - * @offset: Optional size of each element of the driver-defined array of - * static, read-only board type information, i.e. the offset between each - * pointer to a board name. - * - * This is used with comedi_driver_register() and comedi_driver_unregister() to - * register and unregister a low-level COMEDI driver with the COMEDI core. - * - * If @num_names is non-zero, @board_name should be non-NULL, and @offset - * should be at least sizeof(*board_name). These are used by the handler for - * the %COMEDI_DEVCONFIG ioctl to match a hardware device and its driver by - * board name. If @num_names is zero, the %COMEDI_DEVCONFIG ioctl matches a - * hardware device and its driver by driver name. This is only useful if the - * @attach handler is set. If @num_names is non-zero, the driver's @attach - * handler will be called with the COMEDI device structure's board_ptr member - * pointing to the matched pointer to a board name within the driver's private - * array of static, read-only board type information. - * - * The @detach handler has two roles. If a COMEDI device was successfully - * configured by the @attach or @auto_attach handler, it is called when the - * device is being deconfigured (by the %COMEDI_DEVCONFIG ioctl, or due to - * unloading of the driver, or due to device removal). It is also called when - * the @attach or @auto_attach handler returns an error. Therefore, the - * @attach or @auto_attach handlers can defer clean-up on error until the - * @detach handler is called. If the @attach or @auto_attach handlers free - * any resources themselves, they must prevent the @detach handler from - * freeing the same resources. The @detach handler must not assume that all - * resources requested by the @attach or @auto_attach handler were - * successfully allocated. - */ -struct comedi_driver { - /* private: */ - struct comedi_driver *next; /* Next in list of COMEDI drivers. */ - /* public: */ - const char *driver_name; - struct module *module; - int (*attach)(struct comedi_device *dev, struct comedi_devconfig *it); - void (*detach)(struct comedi_device *dev); - int (*auto_attach)(struct comedi_device *dev, unsigned long context); - unsigned int num_names; - const char *const *board_name; - int offset; -}; - -/** - * struct comedi_device - Working data for a COMEDI device - * @use_count: Number of open file objects. - * @driver: Low-level COMEDI driver attached to this COMEDI device. - * @pacer: Optional pointer to a dynamically allocated acquisition pacer - * control. It is freed automatically after the COMEDI device is - * detached from the low-level driver. - * @private: Optional pointer to private data allocated by the low-level - * driver. It is freed automatically after the COMEDI device is - * detached from the low-level driver. - * @class_dev: Sysfs comediX device. - * @minor: Minor device number of COMEDI char device (0-47). - * @detach_count: Counter incremented every time the COMEDI device is detached. - * Used for checking a previous attachment is still valid. - * @hw_dev: Optional pointer to the low-level hardware &struct device. It is - * required for automatically configured COMEDI devices and optional for - * COMEDI devices configured by the %COMEDI_DEVCONFIG ioctl, although - * the bus-specific COMEDI functions only work if it is set correctly. - * It is also passed to dma_alloc_coherent() for COMEDI subdevices that - * have their 'async_dma_dir' member set to something other than - * %DMA_NONE. - * @board_name: Pointer to a COMEDI board name or a COMEDI driver name. When - * the low-level driver's "attach" handler is called by the handler for - * the %COMEDI_DEVCONFIG ioctl, it either points to a matched board name - * string if the 'num_names' member of the &struct comedi_driver is - * non-zero, otherwise it points to the low-level driver name string. - * When the low-lever driver's "auto_attach" handler is called for an - * automatically configured COMEDI device, it points to the low-level - * driver name string. The low-level driver is free to change it in its - * "attach" or "auto_attach" handler if it wishes. - * @board_ptr: Optional pointer to private, read-only board type information in - * the low-level driver. If the 'num_names' member of the &struct - * comedi_driver is non-zero, the handler for the %COMEDI_DEVCONFIG ioctl - * will point it to a pointer to a matched board name string within the - * driver's private array of static, read-only board type information when - * calling the driver's "attach" handler. The low-level driver is free to - * change it. - * @attached: Flag indicating that the COMEDI device is attached to a low-level - * driver. - * @ioenabled: Flag used to indicate that a PCI device has been enabled and - * its regions requested. - * @spinlock: Generic spin-lock for use by the low-level driver. - * @mutex: Generic mutex for use by the COMEDI core module. - * @attach_lock: &struct rw_semaphore used to guard against the COMEDI device - * being detached while an operation is in progress. The down_write() - * operation is only allowed while @mutex is held and is used when - * changing @attached and @detach_count and calling the low-level driver's - * "detach" handler. The down_read() operation is generally used without - * holding @mutex. - * @refcount: &struct kref reference counter for freeing COMEDI device. - * @n_subdevices: Number of COMEDI subdevices allocated by the low-level - * driver for this device. - * @subdevices: Dynamically allocated array of COMEDI subdevices. - * @mmio: Optional pointer to a remapped MMIO region set by the low-level - * driver. - * @iobase: Optional base of an I/O port region requested by the low-level - * driver. - * @iolen: Length of I/O port region requested at @iobase. - * @irq: Optional IRQ number requested by the low-level driver. - * @read_subdev: Optional pointer to a default COMEDI subdevice operated on by - * the read() file operation. Set by the low-level driver. - * @write_subdev: Optional pointer to a default COMEDI subdevice operated on by - * the write() file operation. Set by the low-level driver. - * @async_queue: Storage for fasync_helper(). - * @open: Optional pointer to a function set by the low-level driver to be - * called when @use_count changes from 0 to 1. - * @close: Optional pointer to a function set by the low-level driver to be - * called when @use_count changed from 1 to 0. - * @insn_device_config: Optional pointer to a handler for all sub-instructions - * except %INSN_DEVICE_CONFIG_GET_ROUTES of the %INSN_DEVICE_CONFIG - * instruction. If this is not initialized by the low-level driver, a - * default handler will be set during post-configuration. - * @get_valid_routes: Optional pointer to a handler for the - * %INSN_DEVICE_CONFIG_GET_ROUTES sub-instruction of the - * %INSN_DEVICE_CONFIG instruction set. If this is not initialized by the - * low-level driver, a default handler that copies zero routes back to the - * user will be used. - * - * This is the main control data structure for a COMEDI device (as far as the - * COMEDI core is concerned). There are two groups of COMEDI devices - - * "legacy" devices that are configured by the handler for the - * %COMEDI_DEVCONFIG ioctl, and automatically configured devices resulting - * from a call to comedi_auto_config() as a result of a bus driver probe in - * a low-level COMEDI driver. The "legacy" COMEDI devices are allocated - * during module initialization if the "comedi_num_legacy_minors" module - * parameter is non-zero and use minor device numbers from 0 to - * comedi_num_legacy_minors minus one. The automatically configured COMEDI - * devices are allocated on demand and use minor device numbers from - * comedi_num_legacy_minors to 47. - */ -struct comedi_device { - int use_count; - struct comedi_driver *driver; - struct comedi_8254 *pacer; - void *private; - - struct device *class_dev; - int minor; - unsigned int detach_count; - struct device *hw_dev; - - const char *board_name; - const void *board_ptr; - unsigned int attached:1; - unsigned int ioenabled:1; - spinlock_t spinlock; /* generic spin-lock for low-level driver */ - struct mutex mutex; /* generic mutex for COMEDI core */ - struct rw_semaphore attach_lock; - struct kref refcount; - - int n_subdevices; - struct comedi_subdevice *subdevices; - - /* dumb */ - void __iomem *mmio; - unsigned long iobase; - unsigned long iolen; - unsigned int irq; - - struct comedi_subdevice *read_subdev; - struct comedi_subdevice *write_subdev; - - struct fasync_struct *async_queue; - - int (*open)(struct comedi_device *dev); - void (*close)(struct comedi_device *dev); - int (*insn_device_config)(struct comedi_device *dev, - struct comedi_insn *insn, unsigned int *data); - unsigned int (*get_valid_routes)(struct comedi_device *dev, - unsigned int n_pairs, - unsigned int *pair_data); -}; - -/* - * function prototypes - */ - -void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s); - -struct comedi_device *comedi_dev_get_from_minor(unsigned int minor); -int comedi_dev_put(struct comedi_device *dev); - -bool comedi_is_subdevice_running(struct comedi_subdevice *s); - -void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size); -void comedi_set_spriv_auto_free(struct comedi_subdevice *s); - -int comedi_check_chanlist(struct comedi_subdevice *s, - int n, - unsigned int *chanlist); - -/* range stuff */ - -#define RANGE(a, b) {(a) * 1e6, (b) * 1e6, 0} -#define RANGE_ext(a, b) {(a) * 1e6, (b) * 1e6, RF_EXTERNAL} -#define RANGE_mA(a, b) {(a) * 1e6, (b) * 1e6, UNIT_mA} -#define RANGE_unitless(a, b) {(a) * 1e6, (b) * 1e6, 0} -#define BIP_RANGE(a) {-(a) * 1e6, (a) * 1e6, 0} -#define UNI_RANGE(a) {0, (a) * 1e6, 0} - -extern const struct comedi_lrange range_bipolar10; -extern const struct comedi_lrange range_bipolar5; -extern const struct comedi_lrange range_bipolar2_5; -extern const struct comedi_lrange range_unipolar10; -extern const struct comedi_lrange range_unipolar5; -extern const struct comedi_lrange range_unipolar2_5; -extern const struct comedi_lrange range_0_20mA; -extern const struct comedi_lrange range_4_20mA; -extern const struct comedi_lrange range_0_32mA; -extern const struct comedi_lrange range_unknown; - -#define range_digital range_unipolar5 - -/** - * struct comedi_lrange - Describes a COMEDI range table - * @length: Number of entries in the range table. - * @range: Array of &struct comedi_krange, one for each range. - * - * Each element of @range[] describes the minimum and maximum physical range - * and the type of units. Typically, the type of unit is %UNIT_volt - * (i.e. volts) and the minimum and maximum are in millionths of a volt. - * There may also be a flag that indicates the minimum and maximum are merely - * scale factors for an unknown, external reference. - */ -struct comedi_lrange { - int length; - struct comedi_krange range[]; -}; - -/** - * comedi_range_is_bipolar() - Test if subdevice range is bipolar - * @s: COMEDI subdevice. - * @range: Index of range within a range table. - * - * Tests whether a range is bipolar by checking whether its minimum value - * is negative. - * - * Assumes @range is valid. Does not work for subdevices using a - * channel-specific range table list. - * - * Return: - * %true if the range is bipolar. - * %false if the range is unipolar. - */ -static inline bool comedi_range_is_bipolar(struct comedi_subdevice *s, - unsigned int range) -{ - return s->range_table->range[range].min < 0; -} - -/** - * comedi_range_is_unipolar() - Test if subdevice range is unipolar - * @s: COMEDI subdevice. - * @range: Index of range within a range table. - * - * Tests whether a range is unipolar by checking whether its minimum value - * is at least 0. - * - * Assumes @range is valid. Does not work for subdevices using a - * channel-specific range table list. - * - * Return: - * %true if the range is unipolar. - * %false if the range is bipolar. - */ -static inline bool comedi_range_is_unipolar(struct comedi_subdevice *s, - unsigned int range) -{ - return s->range_table->range[range].min >= 0; -} - -/** - * comedi_range_is_external() - Test if subdevice range is external - * @s: COMEDI subdevice. - * @range: Index of range within a range table. - * - * Tests whether a range is externally reference by checking whether its - * %RF_EXTERNAL flag is set. - * - * Assumes @range is valid. Does not work for subdevices using a - * channel-specific range table list. - * - * Return: - * %true if the range is external. - * %false if the range is internal. - */ -static inline bool comedi_range_is_external(struct comedi_subdevice *s, - unsigned int range) -{ - return !!(s->range_table->range[range].flags & RF_EXTERNAL); -} - -/** - * comedi_chan_range_is_bipolar() - Test if channel-specific range is bipolar - * @s: COMEDI subdevice. - * @chan: The channel number. - * @range: Index of range within a range table. - * - * Tests whether a range is bipolar by checking whether its minimum value - * is negative. - * - * Assumes @chan and @range are valid. Only works for subdevices with a - * channel-specific range table list. - * - * Return: - * %true if the range is bipolar. - * %false if the range is unipolar. - */ -static inline bool comedi_chan_range_is_bipolar(struct comedi_subdevice *s, - unsigned int chan, - unsigned int range) -{ - return s->range_table_list[chan]->range[range].min < 0; -} - -/** - * comedi_chan_range_is_unipolar() - Test if channel-specific range is unipolar - * @s: COMEDI subdevice. - * @chan: The channel number. - * @range: Index of range within a range table. - * - * Tests whether a range is unipolar by checking whether its minimum value - * is at least 0. - * - * Assumes @chan and @range are valid. Only works for subdevices with a - * channel-specific range table list. - * - * Return: - * %true if the range is unipolar. - * %false if the range is bipolar. - */ -static inline bool comedi_chan_range_is_unipolar(struct comedi_subdevice *s, - unsigned int chan, - unsigned int range) -{ - return s->range_table_list[chan]->range[range].min >= 0; -} - -/** - * comedi_chan_range_is_external() - Test if channel-specific range is external - * @s: COMEDI subdevice. - * @chan: The channel number. - * @range: Index of range within a range table. - * - * Tests whether a range is externally reference by checking whether its - * %RF_EXTERNAL flag is set. - * - * Assumes @chan and @range are valid. Only works for subdevices with a - * channel-specific range table list. - * - * Return: - * %true if the range is bipolar. - * %false if the range is unipolar. - */ -static inline bool comedi_chan_range_is_external(struct comedi_subdevice *s, - unsigned int chan, - unsigned int range) -{ - return !!(s->range_table_list[chan]->range[range].flags & RF_EXTERNAL); -} - -/** - * comedi_offset_munge() - Convert between offset binary and 2's complement - * @s: COMEDI subdevice. - * @val: Value to be converted. - * - * Toggles the highest bit of a sample value to toggle between offset binary - * and 2's complement. Assumes that @s->maxdata is a power of 2 minus 1. - * - * Return: The converted value. - */ -static inline unsigned int comedi_offset_munge(struct comedi_subdevice *s, - unsigned int val) -{ - return val ^ s->maxdata ^ (s->maxdata >> 1); -} - -/** - * comedi_bytes_per_sample() - Determine subdevice sample size - * @s: COMEDI subdevice. - * - * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on - * whether the %SDF_LSAMPL subdevice flag is set or not. - * - * Return: The subdevice sample size. - */ -static inline unsigned int comedi_bytes_per_sample(struct comedi_subdevice *s) -{ - return s->subdev_flags & SDF_LSAMPL ? sizeof(int) : sizeof(short); -} - -/** - * comedi_sample_shift() - Determine log2 of subdevice sample size - * @s: COMEDI subdevice. - * - * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on - * whether the %SDF_LSAMPL subdevice flag is set or not. The log2 of the - * sample size will be 2 or 1 and can be used as the right operand of a - * bit-shift operator to multiply or divide something by the sample size. - * - * Return: log2 of the subdevice sample size. - */ -static inline unsigned int comedi_sample_shift(struct comedi_subdevice *s) -{ - return s->subdev_flags & SDF_LSAMPL ? 2 : 1; -} - -/** - * comedi_bytes_to_samples() - Convert a number of bytes to a number of samples - * @s: COMEDI subdevice. - * @nbytes: Number of bytes - * - * Return: The number of bytes divided by the subdevice sample size. - */ -static inline unsigned int comedi_bytes_to_samples(struct comedi_subdevice *s, - unsigned int nbytes) -{ - return nbytes >> comedi_sample_shift(s); -} - -/** - * comedi_samples_to_bytes() - Convert a number of samples to a number of bytes - * @s: COMEDI subdevice. - * @nsamples: Number of samples. - * - * Return: The number of samples multiplied by the subdevice sample size. - * (Does not check for arithmetic overflow.) - */ -static inline unsigned int comedi_samples_to_bytes(struct comedi_subdevice *s, - unsigned int nsamples) -{ - return nsamples << comedi_sample_shift(s); -} - -/** - * comedi_check_trigger_src() - Trivially validate a comedi_cmd trigger source - * @src: Pointer to the trigger source to validate. - * @flags: Bitmask of valid %TRIG_* for the trigger. - * - * This is used in "step 1" of the do_cmdtest functions of comedi drivers - * to validate the comedi_cmd triggers. The mask of the @src against the - * @flags allows the userspace comedilib to pass all the comedi_cmd - * triggers as %TRIG_ANY and get back a bitmask of the valid trigger sources. - * - * Return: - * 0 if trigger sources in *@src are all supported. - * -EINVAL if any trigger source in *@src is unsupported. - */ -static inline int comedi_check_trigger_src(unsigned int *src, - unsigned int flags) -{ - unsigned int orig_src = *src; - - *src = orig_src & flags; - if (*src == TRIG_INVALID || *src != orig_src) - return -EINVAL; - return 0; -} - -/** - * comedi_check_trigger_is_unique() - Make sure a trigger source is unique - * @src: The trigger source to check. - * - * Return: - * 0 if no more than one trigger source is set. - * -EINVAL if more than one trigger source is set. - */ -static inline int comedi_check_trigger_is_unique(unsigned int src) -{ - /* this test is true if more than one _src bit is set */ - if ((src & (src - 1)) != 0) - return -EINVAL; - return 0; -} - -/** - * comedi_check_trigger_arg_is() - Trivially validate a trigger argument - * @arg: Pointer to the trigger arg to validate. - * @val: The value the argument should be. - * - * Forces *@arg to be @val. - * - * Return: - * 0 if *@arg was already @val. - * -EINVAL if *@arg differed from @val. - */ -static inline int comedi_check_trigger_arg_is(unsigned int *arg, - unsigned int val) -{ - if (*arg != val) { - *arg = val; - return -EINVAL; - } - return 0; -} - -/** - * comedi_check_trigger_arg_min() - Trivially validate a trigger argument min - * @arg: Pointer to the trigger arg to validate. - * @val: The minimum value the argument should be. - * - * Forces *@arg to be at least @val, setting it to @val if necessary. - * - * Return: - * 0 if *@arg was already at least @val. - * -EINVAL if *@arg was less than @val. - */ -static inline int comedi_check_trigger_arg_min(unsigned int *arg, - unsigned int val) -{ - if (*arg < val) { - *arg = val; - return -EINVAL; - } - return 0; -} - -/** - * comedi_check_trigger_arg_max() - Trivially validate a trigger argument max - * @arg: Pointer to the trigger arg to validate. - * @val: The maximum value the argument should be. - * - * Forces *@arg to be no more than @val, setting it to @val if necessary. - * - * Return: - * 0 if*@arg was already no more than @val. - * -EINVAL if *@arg was greater than @val. - */ -static inline int comedi_check_trigger_arg_max(unsigned int *arg, - unsigned int val) -{ - if (*arg > val) { - *arg = val; - return -EINVAL; - } - return 0; -} - -/* - * Must set dev->hw_dev if you wish to dma directly into comedi's buffer. - * Also useful for retrieving a previously configured hardware device of - * known bus type. Set automatically for auto-configured devices. - * Automatically set to NULL when detaching hardware device. - */ -int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev); - -/** - * comedi_buf_n_bytes_ready - Determine amount of unread data in buffer - * @s: COMEDI subdevice. - * - * Determines the number of bytes of unread data in the asynchronous - * acquisition data buffer for a subdevice. The data in question might not - * have been fully "munged" yet. - * - * Returns: The amount of unread data in bytes. - */ -static inline unsigned int comedi_buf_n_bytes_ready(struct comedi_subdevice *s) -{ - return s->async->buf_write_count - s->async->buf_read_count; -} - -unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s, unsigned int n); -unsigned int comedi_buf_write_free(struct comedi_subdevice *s, unsigned int n); - -unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s); -unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s, unsigned int n); -unsigned int comedi_buf_read_free(struct comedi_subdevice *s, unsigned int n); - -unsigned int comedi_buf_write_samples(struct comedi_subdevice *s, - const void *data, unsigned int nsamples); -unsigned int comedi_buf_read_samples(struct comedi_subdevice *s, - void *data, unsigned int nsamples); - -/* drivers.c - general comedi driver functions */ - -#define COMEDI_TIMEOUT_MS 1000 - -int comedi_timeout(struct comedi_device *dev, struct comedi_subdevice *s, - struct comedi_insn *insn, - int (*cb)(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned long context), - unsigned long context); - -unsigned int comedi_handle_events(struct comedi_device *dev, - struct comedi_subdevice *s); - -int comedi_dio_insn_config(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data, - unsigned int mask); -unsigned int comedi_dio_update_state(struct comedi_subdevice *s, - unsigned int *data); -unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s, - struct comedi_cmd *cmd); -unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s); -unsigned int comedi_nscans_left(struct comedi_subdevice *s, - unsigned int nscans); -unsigned int comedi_nsamples_left(struct comedi_subdevice *s, - unsigned int nsamples); -void comedi_inc_scan_progress(struct comedi_subdevice *s, - unsigned int num_bytes); - -void *comedi_alloc_devpriv(struct comedi_device *dev, size_t size); -int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices); -int comedi_alloc_subdev_readback(struct comedi_subdevice *s); - -int comedi_readback_insn_read(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data); - -int comedi_load_firmware(struct comedi_device *dev, struct device *hw_dev, - const char *name, - int (*cb)(struct comedi_device *dev, - const u8 *data, size_t size, - unsigned long context), - unsigned long context); - -int __comedi_request_region(struct comedi_device *dev, - unsigned long start, unsigned long len); -int comedi_request_region(struct comedi_device *dev, - unsigned long start, unsigned long len); -void comedi_legacy_detach(struct comedi_device *dev); - -int comedi_auto_config(struct device *hardware_device, - struct comedi_driver *driver, unsigned long context); -void comedi_auto_unconfig(struct device *hardware_device); - -int comedi_driver_register(struct comedi_driver *driver); -void comedi_driver_unregister(struct comedi_driver *driver); - -/** - * module_comedi_driver() - Helper macro for registering a comedi driver - * @__comedi_driver: comedi_driver struct - * - * Helper macro for comedi drivers which do not do anything special in module - * init/exit. This eliminates a lot of boilerplate. Each module may only use - * this macro once, and calling it replaces module_init() and module_exit(). - */ -#define module_comedi_driver(__comedi_driver) \ - module_driver(__comedi_driver, comedi_driver_register, \ - comedi_driver_unregister) - -#endif /* _COMEDIDEV_H */ diff --git a/drivers/comedi/comedilib.h b/drivers/comedi/comedilib.h deleted file mode 100644 index 0223c9cd9215..000000000000 --- a/drivers/comedi/comedilib.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * comedilib.h - * Header file for kcomedilib - * - * COMEDI - Linux Control and Measurement Device Interface - * Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org> - */ - -#ifndef _LINUX_COMEDILIB_H -#define _LINUX_COMEDILIB_H - -struct comedi_device *comedi_open(const char *path); -int comedi_close(struct comedi_device *dev); -int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev, - unsigned int chan, unsigned int *io); -int comedi_dio_config(struct comedi_device *dev, unsigned int subdev, - unsigned int chan, unsigned int io); -int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev, - unsigned int mask, unsigned int *bits, - unsigned int base_channel); -int comedi_find_subdevice_by_type(struct comedi_device *dev, int type, - unsigned int subd); -int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice); - -#endif diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c index 750a6ff3c03c..8eb1f699a857 100644 --- a/drivers/comedi/drivers.c +++ b/drivers/comedi/drivers.c @@ -17,8 +17,7 @@ #include <linux/dma-direction.h> #include <linux/interrupt.h> #include <linux/firmware.h> - -#include "comedidev.h" +#include <linux/comedi/comedidev.h> #include "comedi_internal.h" struct comedi_driver *comedi_drivers; diff --git a/drivers/comedi/drivers/8255.c b/drivers/comedi/drivers/8255.c index e23335c75867..ced8ea09d4fa 100644 --- a/drivers/comedi/drivers/8255.c +++ b/drivers/comedi/drivers/8255.c @@ -40,9 +40,8 @@ */ #include <linux/module.h> -#include "../comedidev.h" - -#include "8255.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> static int dev_8255_attach(struct comedi_device *dev, struct comedi_devconfig *it) diff --git a/drivers/comedi/drivers/8255.h b/drivers/comedi/drivers/8255.h deleted file mode 100644 index ceae3ca52e60..000000000000 --- a/drivers/comedi/drivers/8255.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * module/8255.h - * Header file for 8255 - * - * COMEDI - Linux Control and Measurement Device Interface - * Copyright (C) 1998 David A. Schleef <ds@schleef.org> - */ - -#ifndef _8255_H -#define _8255_H - -#define I8255_SIZE 0x04 - -#define I8255_DATA_A_REG 0x00 -#define I8255_DATA_B_REG 0x01 -#define I8255_DATA_C_REG 0x02 -#define I8255_CTRL_REG 0x03 -#define I8255_CTRL_C_LO_IO BIT(0) -#define I8255_CTRL_B_IO BIT(1) -#define I8255_CTRL_B_MODE BIT(2) -#define I8255_CTRL_C_HI_IO BIT(3) -#define I8255_CTRL_A_IO BIT(4) -#define I8255_CTRL_A_MODE(x) ((x) << 5) -#define I8255_CTRL_CW BIT(7) - -struct comedi_device; -struct comedi_subdevice; - -int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s, - int (*io)(struct comedi_device *dev, int dir, int port, - int data, unsigned long regbase), - unsigned long regbase); - -int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s, - int (*io)(struct comedi_device *dev, int dir, int port, - int data, unsigned long regbase), - unsigned long regbase); - -unsigned long subdev_8255_regbase(struct comedi_subdevice *s); - -#endif diff --git a/drivers/comedi/drivers/8255_pci.c b/drivers/comedi/drivers/8255_pci.c index 5a810f0e532a..0fec048e3a53 100644 --- a/drivers/comedi/drivers/8255_pci.c +++ b/drivers/comedi/drivers/8255_pci.c @@ -53,10 +53,8 @@ */ #include <linux/module.h> - -#include "../comedi_pci.h" - -#include "8255.h" +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8255.h> enum pci_8255_boardid { BOARD_ADLINK_PCI7224, diff --git a/drivers/comedi/drivers/addi_apci_1032.c b/drivers/comedi/drivers/addi_apci_1032.c index 81a246fbcc01..8eec6d9402de 100644 --- a/drivers/comedi/drivers/addi_apci_1032.c +++ b/drivers/comedi/drivers/addi_apci_1032.c @@ -63,8 +63,8 @@ #include <linux/module.h> #include <linux/interrupt.h> +#include <linux/comedi/comedi_pci.h> -#include "../comedi_pci.h" #include "amcc_s5933.h" /* diff --git a/drivers/comedi/drivers/addi_apci_1500.c b/drivers/comedi/drivers/addi_apci_1500.c index b04c15dcfb57..c94c78588889 100644 --- a/drivers/comedi/drivers/addi_apci_1500.c +++ b/drivers/comedi/drivers/addi_apci_1500.c @@ -14,8 +14,8 @@ #include <linux/module.h> #include <linux/interrupt.h> +#include <linux/comedi/comedi_pci.h> -#include "../comedi_pci.h" #include "amcc_s5933.h" #include "z8536.h" diff --git a/drivers/comedi/drivers/addi_apci_1516.c b/drivers/comedi/drivers/addi_apci_1516.c index 274ec9fb030c..3c48b72dad9d 100644 --- a/drivers/comedi/drivers/addi_apci_1516.c +++ b/drivers/comedi/drivers/addi_apci_1516.c @@ -14,8 +14,8 @@ */ #include <linux/module.h> +#include <linux/comedi/comedi_pci.h> -#include "../comedi_pci.h" #include "addi_watchdog.h" /* diff --git a/drivers/comedi/drivers/addi_apci_1564.c b/drivers/comedi/drivers/addi_apci_1564.c index 06fc7ed96200..0cd40948bee7 100644 --- a/drivers/comedi/drivers/addi_apci_1564.c +++ b/drivers/comedi/drivers/addi_apci_1564.c @@ -68,8 +68,8 @@ #include <linux/module.h> #include <linux/interrupt.h> +#include <linux/comedi/comedi_pci.h> -#include "../comedi_pci.h" #include "addi_tcw.h" #include "addi_watchdog.h" diff --git a/drivers/comedi/drivers/addi_apci_16xx.c b/drivers/comedi/drivers/addi_apci_16xx.c index c306aa41df97..ec2c321d2431 100644 --- a/drivers/comedi/drivers/addi_apci_16xx.c +++ b/drivers/comedi/drivers/addi_apci_16xx.c @@ -14,8 +14,7 @@ */ #include <linux/module.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/addi_apci_2032.c b/drivers/comedi/drivers/addi_apci_2032.c index e9a2b37a4ae0..e048dfc3ec77 100644 --- a/drivers/comedi/drivers/addi_apci_2032.c +++ b/drivers/comedi/drivers/addi_apci_2032.c @@ -16,8 +16,8 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> +#include <linux/comedi/comedi_pci.h> -#include "../comedi_pci.h" #include "addi_watchdog.h" /* diff --git a/drivers/comedi/drivers/addi_apci_2200.c b/drivers/comedi/drivers/addi_apci_2200.c index 4c5aee784bd9..00378c9dddc8 100644 --- a/drivers/comedi/drivers/addi_apci_2200.c +++ b/drivers/comedi/drivers/addi_apci_2200.c @@ -14,8 +14,8 @@ */ #include <linux/module.h> +#include <linux/comedi/comedi_pci.h> -#include "../comedi_pci.h" #include "addi_watchdog.h" /* diff --git a/drivers/comedi/drivers/addi_apci_3120.c b/drivers/comedi/drivers/addi_apci_3120.c index 1ed3b33d1a30..28a242e69721 100644 --- a/drivers/comedi/drivers/addi_apci_3120.c +++ b/drivers/comedi/drivers/addi_apci_3120.c @@ -14,8 +14,8 @@ #include <linux/module.h> #include <linux/interrupt.h> +#include <linux/comedi/comedi_pci.h> -#include "../comedi_pci.h" #include "amcc_s5933.h" /* diff --git a/drivers/comedi/drivers/addi_apci_3501.c b/drivers/comedi/drivers/addi_apci_3501.c index f0c9642f3f1a..ecb5552f1785 100644 --- a/drivers/comedi/drivers/addi_apci_3501.c +++ b/drivers/comedi/drivers/addi_apci_3501.c @@ -41,8 +41,8 @@ */ #include <linux/module.h> +#include <linux/comedi/comedi_pci.h> -#include "../comedi_pci.h" #include "amcc_s5933.h" /* diff --git a/drivers/comedi/drivers/addi_apci_3xxx.c b/drivers/comedi/drivers/addi_apci_3xxx.c index a90d59377e18..bc72273e6a29 100644 --- a/drivers/comedi/drivers/addi_apci_3xxx.c +++ b/drivers/comedi/drivers/addi_apci_3xxx.c @@ -15,8 +15,7 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #define CONV_UNIT_NS BIT(0) #define CONV_UNIT_US BIT(1) diff --git a/drivers/comedi/drivers/addi_watchdog.c b/drivers/comedi/drivers/addi_watchdog.c index 69b323fb869f..ed87ab432020 100644 --- a/drivers/comedi/drivers/addi_watchdog.c +++ b/drivers/comedi/drivers/addi_watchdog.c @@ -10,7 +10,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> #include "addi_tcw.h" #include "addi_watchdog.h" diff --git a/drivers/comedi/drivers/adl_pci6208.c b/drivers/comedi/drivers/adl_pci6208.c index 9ae4cc523dd4..b27354a51f5c 100644 --- a/drivers/comedi/drivers/adl_pci6208.c +++ b/drivers/comedi/drivers/adl_pci6208.c @@ -24,8 +24,7 @@ #include <linux/module.h> #include <linux/delay.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* * PCI-6208/6216-GL register map diff --git a/drivers/comedi/drivers/adl_pci7x3x.c b/drivers/comedi/drivers/adl_pci7x3x.c index 8fc45638ff59..e9f22de9b6f1 100644 --- a/drivers/comedi/drivers/adl_pci7x3x.c +++ b/drivers/comedi/drivers/adl_pci7x3x.c @@ -46,8 +46,7 @@ */ #include <linux/module.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #include "plx9052.h" diff --git a/drivers/comedi/drivers/adl_pci8164.c b/drivers/comedi/drivers/adl_pci8164.c index d5e1bda81557..0c513a67a264 100644 --- a/drivers/comedi/drivers/adl_pci8164.c +++ b/drivers/comedi/drivers/adl_pci8164.c @@ -19,8 +19,7 @@ #include <linux/kernel.h> #include <linux/module.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #define PCI8164_AXIS(x) ((x) * 0x08) #define PCI8164_CMD_MSTS_REG 0x00 diff --git a/drivers/comedi/drivers/adl_pci9111.c b/drivers/comedi/drivers/adl_pci9111.c index a062c5ab20e9..c50f94272a74 100644 --- a/drivers/comedi/drivers/adl_pci9111.c +++ b/drivers/comedi/drivers/adl_pci9111.c @@ -42,11 +42,10 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8254.h> #include "plx9052.h" -#include "comedi_8254.h" #define PCI9111_FIFO_HALF_SIZE 512 diff --git a/drivers/comedi/drivers/adl_pci9118.c b/drivers/comedi/drivers/adl_pci9118.c index cda3a4267dca..9a816c718303 100644 --- a/drivers/comedi/drivers/adl_pci9118.c +++ b/drivers/comedi/drivers/adl_pci9118.c @@ -78,11 +78,10 @@ #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/io.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8254.h> #include "amcc_s5933.h" -#include "comedi_8254.h" /* * PCI BAR2 Register map (dev->iobase) diff --git a/drivers/comedi/drivers/adq12b.c b/drivers/comedi/drivers/adq12b.c index d719f76709ef..19d765182006 100644 --- a/drivers/comedi/drivers/adq12b.c +++ b/drivers/comedi/drivers/adq12b.c @@ -48,8 +48,7 @@ #include <linux/module.h> #include <linux/delay.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* address scheme (page 2.17 of the manual) */ #define ADQ12B_CTREG 0x00 diff --git a/drivers/comedi/drivers/adv_pci1710.c b/drivers/comedi/drivers/adv_pci1710.c index 090607760be6..4f2639968260 100644 --- a/drivers/comedi/drivers/adv_pci1710.c +++ b/drivers/comedi/drivers/adv_pci1710.c @@ -30,10 +30,9 @@ #include <linux/module.h> #include <linux/interrupt.h> +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8254.h> -#include "../comedi_pci.h" - -#include "comedi_8254.h" #include "amcc_s5933.h" /* diff --git a/drivers/comedi/drivers/adv_pci1720.c b/drivers/comedi/drivers/adv_pci1720.c index 2fcd7e8e7d85..2619591ba301 100644 --- a/drivers/comedi/drivers/adv_pci1720.c +++ b/drivers/comedi/drivers/adv_pci1720.c @@ -42,8 +42,7 @@ #include <linux/module.h> #include <linux/delay.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* * PCI BAR2 Register map (dev->iobase) diff --git a/drivers/comedi/drivers/adv_pci1723.c b/drivers/comedi/drivers/adv_pci1723.c index 23660a9fdb9c..e2aedb152068 100644 --- a/drivers/comedi/drivers/adv_pci1723.c +++ b/drivers/comedi/drivers/adv_pci1723.c @@ -32,8 +32,7 @@ */ #include <linux/module.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* * PCI Bar 2 I/O Register map (dev->iobase) diff --git a/drivers/comedi/drivers/adv_pci1724.c b/drivers/comedi/drivers/adv_pci1724.c index e8ab573c839f..bb43b7deeb56 100644 --- a/drivers/comedi/drivers/adv_pci1724.c +++ b/drivers/comedi/drivers/adv_pci1724.c @@ -38,8 +38,7 @@ */ #include <linux/module.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* * PCI bar 2 Register I/O map (dev->iobase) diff --git a/drivers/comedi/drivers/adv_pci1760.c b/drivers/comedi/drivers/adv_pci1760.c index 6de8ab97d346..fcfc2e299110 100644 --- a/drivers/comedi/drivers/adv_pci1760.c +++ b/drivers/comedi/drivers/adv_pci1760.c @@ -22,8 +22,7 @@ */ #include <linux/module.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* * PCI-1760 Register Map diff --git a/drivers/comedi/drivers/adv_pci_dio.c b/drivers/comedi/drivers/adv_pci_dio.c index 54c7419c8ca6..efa3e46b554b 100644 --- a/drivers/comedi/drivers/adv_pci_dio.c +++ b/drivers/comedi/drivers/adv_pci_dio.c @@ -23,11 +23,9 @@ #include <linux/module.h> #include <linux/delay.h> - -#include "../comedi_pci.h" - -#include "8255.h" -#include "comedi_8254.h" +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8255.h> +#include <linux/comedi/comedi_8254.h> /* * Register offset definitions diff --git a/drivers/comedi/drivers/aio_aio12_8.c b/drivers/comedi/drivers/aio_aio12_8.c index 4829115921a3..30b8a32204d8 100644 --- a/drivers/comedi/drivers/aio_aio12_8.c +++ b/drivers/comedi/drivers/aio_aio12_8.c @@ -22,10 +22,9 @@ */ #include <linux/module.h> -#include "../comedidev.h" - -#include "comedi_8254.h" -#include "8255.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> +#include <linux/comedi/comedi_8254.h> /* * Register map diff --git a/drivers/comedi/drivers/aio_iiro_16.c b/drivers/comedi/drivers/aio_iiro_16.c index fe3876235075..b00fab0b89d4 100644 --- a/drivers/comedi/drivers/aio_iiro_16.c +++ b/drivers/comedi/drivers/aio_iiro_16.c @@ -30,8 +30,7 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> #define AIO_IIRO_16_RELAY_0_7 0x00 #define AIO_IIRO_16_INPUT_0_7 0x01 diff --git a/drivers/comedi/drivers/amplc_dio200.c b/drivers/comedi/drivers/amplc_dio200.c index fa19c9e7c56b..4544bcdd8a70 100644 --- a/drivers/comedi/drivers/amplc_dio200.c +++ b/drivers/comedi/drivers/amplc_dio200.c @@ -185,7 +185,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> #include "amplc_dio200.h" diff --git a/drivers/comedi/drivers/amplc_dio200_common.c b/drivers/comedi/drivers/amplc_dio200_common.c index a3454130d5f8..ff651f2eb86c 100644 --- a/drivers/comedi/drivers/amplc_dio200_common.c +++ b/drivers/comedi/drivers/amplc_dio200_common.c @@ -12,12 +12,11 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> /* only for register defines */ +#include <linux/comedi/comedi_8254.h> #include "amplc_dio200.h" -#include "comedi_8254.h" -#include "8255.h" /* only for register defines */ /* 200 series registers */ #define DIO200_IO_SIZE 0x20 diff --git a/drivers/comedi/drivers/amplc_dio200_pci.c b/drivers/comedi/drivers/amplc_dio200_pci.c index 1bd7a42c8464..527994d82a1f 100644 --- a/drivers/comedi/drivers/amplc_dio200_pci.c +++ b/drivers/comedi/drivers/amplc_dio200_pci.c @@ -214,8 +214,7 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #include "amplc_dio200.h" diff --git a/drivers/comedi/drivers/amplc_pc236.c b/drivers/comedi/drivers/amplc_pc236.c index c377af1d5246..b21e0c906aab 100644 --- a/drivers/comedi/drivers/amplc_pc236.c +++ b/drivers/comedi/drivers/amplc_pc236.c @@ -32,8 +32,7 @@ */ #include <linux/module.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> #include "amplc_pc236.h" diff --git a/drivers/comedi/drivers/amplc_pc236_common.c b/drivers/comedi/drivers/amplc_pc236_common.c index 981d281e87a1..9f4f89b1ef23 100644 --- a/drivers/comedi/drivers/amplc_pc236_common.c +++ b/drivers/comedi/drivers/amplc_pc236_common.c @@ -11,11 +11,10 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> #include "amplc_pc236.h" -#include "8255.h" static void pc236_intr_update(struct comedi_device *dev, bool enable) { diff --git a/drivers/comedi/drivers/amplc_pc263.c b/drivers/comedi/drivers/amplc_pc263.c index 68da6098ee84..d7f088a8a5e3 100644 --- a/drivers/comedi/drivers/amplc_pc263.c +++ b/drivers/comedi/drivers/amplc_pc263.c @@ -25,7 +25,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* PC263 registers */ #define PC263_DO_0_7_REG 0x00 diff --git a/drivers/comedi/drivers/amplc_pci224.c b/drivers/comedi/drivers/amplc_pci224.c index bcf6d61af863..5a04e55daeea 100644 --- a/drivers/comedi/drivers/amplc_pci224.c +++ b/drivers/comedi/drivers/amplc_pci224.c @@ -96,10 +96,8 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> - -#include "../comedi_pci.h" - -#include "comedi_8254.h" +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8254.h> /* * PCI224/234 i/o space 1 (PCIBAR2) registers. diff --git a/drivers/comedi/drivers/amplc_pci230.c b/drivers/comedi/drivers/amplc_pci230.c index 8911dc2bd2c6..92ba8b8c0172 100644 --- a/drivers/comedi/drivers/amplc_pci230.c +++ b/drivers/comedi/drivers/amplc_pci230.c @@ -174,11 +174,9 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> - -#include "../comedi_pci.h" - -#include "comedi_8254.h" -#include "8255.h" +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8255.h> +#include <linux/comedi/comedi_8254.h> /* * PCI230 PCI configuration register information diff --git a/drivers/comedi/drivers/amplc_pci236.c b/drivers/comedi/drivers/amplc_pci236.c index e7f6fa4d101a..482eb261c333 100644 --- a/drivers/comedi/drivers/amplc_pci236.c +++ b/drivers/comedi/drivers/amplc_pci236.c @@ -34,8 +34,7 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #include "amplc_pc236.h" #include "plx9052.h" diff --git a/drivers/comedi/drivers/amplc_pci263.c b/drivers/comedi/drivers/amplc_pci263.c index 9217973f1141..1609665c4b18 100644 --- a/drivers/comedi/drivers/amplc_pci263.c +++ b/drivers/comedi/drivers/amplc_pci263.c @@ -24,8 +24,7 @@ */ #include <linux/module.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* PCI263 registers */ #define PCI263_DO_0_7_REG 0x00 diff --git a/drivers/comedi/drivers/c6xdigio.c b/drivers/comedi/drivers/c6xdigio.c index 786fd15698df..14b90d1c64dc 100644 --- a/drivers/comedi/drivers/c6xdigio.c +++ b/drivers/comedi/drivers/c6xdigio.c @@ -30,8 +30,7 @@ #include <linux/timer.h> #include <linux/io.h> #include <linux/pnp.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/cb_das16_cs.c b/drivers/comedi/drivers/cb_das16_cs.c index a5d171e71c33..8e0d2fa5f95d 100644 --- a/drivers/comedi/drivers/cb_das16_cs.c +++ b/drivers/comedi/drivers/cb_das16_cs.c @@ -27,10 +27,8 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> - -#include "../comedi_pcmcia.h" - -#include "comedi_8254.h" +#include <linux/comedi/comedi_pcmcia.h> +#include <linux/comedi/comedi_8254.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/cb_pcidas.c b/drivers/comedi/drivers/cb_pcidas.c index 2f20bd56ec6c..0c7576b967fc 100644 --- a/drivers/comedi/drivers/cb_pcidas.c +++ b/drivers/comedi/drivers/cb_pcidas.c @@ -54,11 +54,10 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8255.h> +#include <linux/comedi/comedi_8254.h> -#include "../comedi_pci.h" - -#include "comedi_8254.h" -#include "8255.h" #include "amcc_s5933.h" #define AI_BUFFER_SIZE 1024 /* max ai fifo size */ diff --git a/drivers/comedi/drivers/cb_pcidas64.c b/drivers/comedi/drivers/cb_pcidas64.c index 41a8fea7f48a..ca6038a25f26 100644 --- a/drivers/comedi/drivers/cb_pcidas64.c +++ b/drivers/comedi/drivers/cb_pcidas64.c @@ -73,10 +73,9 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8255.h> -#include "../comedi_pci.h" - -#include "8255.h" #include "plx9080.h" #define TIMER_BASE 25 /* 40MHz master clock */ diff --git a/drivers/comedi/drivers/cb_pcidda.c b/drivers/comedi/drivers/cb_pcidda.c index 78cf1603638c..c52204a6bda4 100644 --- a/drivers/comedi/drivers/cb_pcidda.c +++ b/drivers/comedi/drivers/cb_pcidda.c @@ -27,10 +27,8 @@ */ #include <linux/module.h> - -#include "../comedi_pci.h" - -#include "8255.h" +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8255.h> #define EEPROM_SIZE 128 /* number of entries in eeprom */ /* maximum number of ao channels for supported boards */ diff --git a/drivers/comedi/drivers/cb_pcimdas.c b/drivers/comedi/drivers/cb_pcimdas.c index 2292f69da4f4..8bdb00774f11 100644 --- a/drivers/comedi/drivers/cb_pcimdas.c +++ b/drivers/comedi/drivers/cb_pcimdas.c @@ -34,12 +34,11 @@ #include <linux/module.h> #include <linux/interrupt.h> +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8255.h> +#include <linux/comedi/comedi_8254.h> -#include "../comedi_pci.h" - -#include "comedi_8254.h" #include "plx9052.h" -#include "8255.h" /* * PCI Bar 1 Register map diff --git a/drivers/comedi/drivers/cb_pcimdda.c b/drivers/comedi/drivers/cb_pcimdda.c index 21fc7b3c5f60..bf8093a10315 100644 --- a/drivers/comedi/drivers/cb_pcimdda.c +++ b/drivers/comedi/drivers/cb_pcimdda.c @@ -67,10 +67,8 @@ */ #include <linux/module.h> - -#include "../comedi_pci.h" - -#include "8255.h" +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8255.h> /* device ids of the cards we support -- currently only 1 card supported */ #define PCI_ID_PCIM_DDA06_16 0x0053 diff --git a/drivers/comedi/drivers/comedi_8254.c b/drivers/comedi/drivers/comedi_8254.c index 4bf5daa9e885..b4185c1b2695 100644 --- a/drivers/comedi/drivers/comedi_8254.c +++ b/drivers/comedi/drivers/comedi_8254.c @@ -116,10 +116,8 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/io.h> - -#include "../comedidev.h" - -#include "comedi_8254.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8254.h> static unsigned int __i8254_read(struct comedi_8254 *i8254, unsigned int reg) { diff --git a/drivers/comedi/drivers/comedi_8254.h b/drivers/comedi/drivers/comedi_8254.h deleted file mode 100644 index d8264417e53c..000000000000 --- a/drivers/comedi/drivers/comedi_8254.h +++ /dev/null @@ -1,134 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * comedi_8254.h - * Generic 8254 timer/counter support - * Copyright (C) 2014 H Hartley Sweeten <hsweeten@visionengravers.com> - * - * COMEDI - Linux Control and Measurement Device Interface - * Copyright (C) 2000 David A. Schleef <ds@schleef.org> - */ - -#ifndef _COMEDI_8254_H -#define _COMEDI_8254_H - -#include <linux/types.h> - -struct comedi_device; -struct comedi_insn; -struct comedi_subdevice; - -/* - * Common oscillator base values in nanoseconds - */ -#define I8254_OSC_BASE_10MHZ 100 -#define I8254_OSC_BASE_5MHZ 200 -#define I8254_OSC_BASE_4MHZ 250 -#define I8254_OSC_BASE_2MHZ 500 -#define I8254_OSC_BASE_1MHZ 1000 -#define I8254_OSC_BASE_100KHZ 10000 -#define I8254_OSC_BASE_10KHZ 100000 -#define I8254_OSC_BASE_1KHZ 1000000 - -/* - * I/O access size used to read/write registers - */ -#define I8254_IO8 1 -#define I8254_IO16 2 -#define I8254_IO32 4 - -/* - * Register map for generic 8254 timer (I8254_IO8 with 0 regshift) - */ -#define I8254_COUNTER0_REG 0x00 -#define I8254_COUNTER1_REG 0x01 -#define I8254_COUNTER2_REG 0x02 -#define I8254_CTRL_REG 0x03 -#define I8254_CTRL_SEL_CTR(x) ((x) << 6) -#define I8254_CTRL_READBACK(x) (I8254_CTRL_SEL_CTR(3) | BIT(x)) -#define I8254_CTRL_READBACK_COUNT I8254_CTRL_READBACK(4) -#define I8254_CTRL_READBACK_STATUS I8254_CTRL_READBACK(5) -#define I8254_CTRL_READBACK_SEL_CTR(x) (2 << (x)) -#define I8254_CTRL_RW(x) (((x) & 0x3) << 4) -#define I8254_CTRL_LATCH I8254_CTRL_RW(0) -#define I8254_CTRL_LSB_ONLY I8254_CTRL_RW(1) -#define I8254_CTRL_MSB_ONLY I8254_CTRL_RW(2) -#define I8254_CTRL_LSB_MSB I8254_CTRL_RW(3) - -/* counter maps zero to 0x10000 */ -#define I8254_MAX_COUNT 0x10000 - -/** - * struct comedi_8254 - private data used by this module - * @iobase: PIO base address of the registers (in/out) - * @mmio: MMIO base address of the registers (read/write) - * @iosize: I/O size used to access the registers (b/w/l) - * @regshift: register gap shift - * @osc_base: cascaded oscillator speed in ns - * @divisor: divisor for single counter - * @divisor1: divisor loaded into first cascaded counter - * @divisor2: divisor loaded into second cascaded counter - * #next_div: next divisor for single counter - * @next_div1: next divisor to use for first cascaded counter - * @next_div2: next divisor to use for second cascaded counter - * @clock_src; current clock source for each counter (driver specific) - * @gate_src; current gate source for each counter (driver specific) - * @busy: flags used to indicate that a counter is "busy" - * @insn_config: driver specific (*insn_config) callback - */ -struct comedi_8254 { - unsigned long iobase; - void __iomem *mmio; - unsigned int iosize; - unsigned int regshift; - unsigned int osc_base; - unsigned int divisor; - unsigned int divisor1; - unsigned int divisor2; - unsigned int next_div; - unsigned int next_div1; - unsigned int next_div2; - unsigned int clock_src[3]; - unsigned int gate_src[3]; - bool busy[3]; - - int (*insn_config)(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data); -}; - -unsigned int comedi_8254_status(struct comedi_8254 *i8254, - unsigned int counter); -unsigned int comedi_8254_read(struct comedi_8254 *i8254, unsigned int counter); -void comedi_8254_write(struct comedi_8254 *i8254, - unsigned int counter, unsigned int val); - -int comedi_8254_set_mode(struct comedi_8254 *i8254, - unsigned int counter, unsigned int mode); -int comedi_8254_load(struct comedi_8254 *i8254, - unsigned int counter, unsigned int val, unsigned int mode); - -void comedi_8254_pacer_enable(struct comedi_8254 *i8254, - unsigned int counter1, unsigned int counter2, - bool enable); -void comedi_8254_update_divisors(struct comedi_8254 *i8254); -void comedi_8254_cascade_ns_to_timer(struct comedi_8254 *i8254, - unsigned int *nanosec, unsigned int flags); -void comedi_8254_ns_to_timer(struct comedi_8254 *i8254, - unsigned int *nanosec, unsigned int flags); - -void comedi_8254_set_busy(struct comedi_8254 *i8254, - unsigned int counter, bool busy); - -void comedi_8254_subdevice_init(struct comedi_subdevice *s, - struct comedi_8254 *i8254); - -struct comedi_8254 *comedi_8254_init(unsigned long iobase, - unsigned int osc_base, - unsigned int iosize, - unsigned int regshift); -struct comedi_8254 *comedi_8254_mm_init(void __iomem *mmio, - unsigned int osc_base, - unsigned int iosize, - unsigned int regshift); - -#endif /* _COMEDI_8254_H */ diff --git a/drivers/comedi/drivers/comedi_8255.c b/drivers/comedi/drivers/comedi_8255.c index b7ca465933ee..5562b9cd0a17 100644 --- a/drivers/comedi/drivers/comedi_8255.c +++ b/drivers/comedi/drivers/comedi_8255.c @@ -29,9 +29,8 @@ */ #include <linux/module.h> -#include "../comedidev.h" - -#include "8255.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> struct subdev_8255_private { unsigned long regbase; diff --git a/drivers/comedi/drivers/comedi_bond.c b/drivers/comedi/drivers/comedi_bond.c index 4392b5927a99..78c39fa84177 100644 --- a/drivers/comedi/drivers/comedi_bond.c +++ b/drivers/comedi/drivers/comedi_bond.c @@ -40,9 +40,9 @@ #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> -#include "../comedi.h" -#include "../comedilib.h" -#include "../comedidev.h" +#include <linux/comedi.h> +#include <linux/comedi/comedilib.h> +#include <linux/comedi/comedidev.h> struct bonded_device { struct comedi_device *dev; diff --git a/drivers/comedi/drivers/comedi_isadma.c b/drivers/comedi/drivers/comedi_isadma.c index 479b58e209ba..700982464c53 100644 --- a/drivers/comedi/drivers/comedi_isadma.c +++ b/drivers/comedi/drivers/comedi_isadma.c @@ -9,10 +9,8 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <asm/dma.h> - -#include "../comedidev.h" - -#include "comedi_isadma.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_isadma.h> /** * comedi_isadma_program - program and enable an ISA DMA transfer diff --git a/drivers/comedi/drivers/comedi_isadma.h b/drivers/comedi/drivers/comedi_isadma.h deleted file mode 100644 index 9d2b12db7e6e..000000000000 --- a/drivers/comedi/drivers/comedi_isadma.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * COMEDI ISA DMA support functions - * Copyright (c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com> - */ - -#ifndef _COMEDI_ISADMA_H -#define _COMEDI_ISADMA_H - -#include <linux/types.h> - -struct comedi_device; -struct device; - -/* - * These are used to avoid issues when <asm/dma.h> and the DMA_MODE_ - * defines are not available. - */ -#define COMEDI_ISADMA_READ 0 -#define COMEDI_ISADMA_WRITE 1 - -/** - * struct comedi_isadma_desc - cookie for ISA DMA - * @virt_addr: virtual address of buffer - * @hw_addr: hardware (bus) address of buffer - * @chan: DMA channel - * @maxsize: allocated size of buffer (in bytes) - * @size: transfer size (in bytes) - * @mode: DMA_MODE_READ or DMA_MODE_WRITE - */ -struct comedi_isadma_desc { - void *virt_addr; - dma_addr_t hw_addr; - unsigned int chan; - unsigned int maxsize; - unsigned int size; - char mode; -}; - -/** - * struct comedi_isadma - ISA DMA data - * @dev: device to allocate non-coherent memory for - * @desc: cookie for each DMA buffer - * @n_desc: the number of cookies - * @cur_dma: the current cookie in use - * @chan: the first DMA channel requested - * @chan2: the second DMA channel requested - */ -struct comedi_isadma { - struct device *dev; - struct comedi_isadma_desc *desc; - int n_desc; - int cur_dma; - unsigned int chan; - unsigned int chan2; -}; - -#if IS_ENABLED(CONFIG_ISA_DMA_API) - -void comedi_isadma_program(struct comedi_isadma_desc *desc); -unsigned int comedi_isadma_disable(unsigned int dma_chan); -unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan, - unsigned int size); -unsigned int comedi_isadma_poll(struct comedi_isadma *dma); -void comedi_isadma_set_mode(struct comedi_isadma_desc *desc, char dma_dir); - -struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *dev, - int n_desc, unsigned int dma_chan1, - unsigned int dma_chan2, - unsigned int maxsize, char dma_dir); -void comedi_isadma_free(struct comedi_isadma *dma); - -#else /* !IS_ENABLED(CONFIG_ISA_DMA_API) */ - -static inline void comedi_isadma_program(struct comedi_isadma_desc *desc) -{ -} - -static inline unsigned int comedi_isadma_disable(unsigned int dma_chan) -{ - return 0; -} - -static inline unsigned int -comedi_isadma_disable_on_sample(unsigned int dma_chan, unsigned int size) -{ - return 0; -} - -static inline unsigned int comedi_isadma_poll(struct comedi_isadma *dma) -{ - return 0; -} - -static inline void comedi_isadma_set_mode(struct comedi_isadma_desc *desc, - char dma_dir) -{ -} - -static inline struct comedi_isadma * -comedi_isadma_alloc(struct comedi_device *dev, int n_desc, - unsigned int dma_chan1, unsigned int dma_chan2, - unsigned int maxsize, char dma_dir) -{ - return NULL; -} - -static inline void comedi_isadma_free(struct comedi_isadma *dma) -{ -} - -#endif /* !IS_ENABLED(CONFIG_ISA_DMA_API) */ - -#endif /* #ifndef _COMEDI_ISADMA_H */ diff --git a/drivers/comedi/drivers/comedi_parport.c b/drivers/comedi/drivers/comedi_parport.c index 5338b5eea440..098738a688fe 100644 --- a/drivers/comedi/drivers/comedi_parport.c +++ b/drivers/comedi/drivers/comedi_parport.c @@ -57,8 +57,7 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* * Register map diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c index cbc225eb1991..0b5c0af1cebf 100644 --- a/drivers/comedi/drivers/comedi_test.c +++ b/drivers/comedi/drivers/comedi_test.c @@ -45,10 +45,8 @@ */ #include <linux/module.h> -#include "../comedidev.h" - +#include <linux/comedi/comedidev.h> #include <asm/div64.h> - #include <linux/timer.h> #include <linux/ktime.h> #include <linux/jiffies.h> diff --git a/drivers/comedi/drivers/contec_pci_dio.c b/drivers/comedi/drivers/contec_pci_dio.c index b8fdd9c1f166..41d42ff14144 100644 --- a/drivers/comedi/drivers/contec_pci_dio.c +++ b/drivers/comedi/drivers/contec_pci_dio.c @@ -18,8 +18,7 @@ */ #include <linux/module.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* * Register map diff --git a/drivers/comedi/drivers/dac02.c b/drivers/comedi/drivers/dac02.c index 5ef8114c2c85..4b011d66d7b0 100644 --- a/drivers/comedi/drivers/dac02.c +++ b/drivers/comedi/drivers/dac02.c @@ -25,8 +25,7 @@ */ #include <linux/module.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* * The output range is selected by jumpering pins on the I/O connector. diff --git a/drivers/comedi/drivers/daqboard2000.c b/drivers/comedi/drivers/daqboard2000.c index f64e747078bd..c0a4e1b06fb3 100644 --- a/drivers/comedi/drivers/daqboard2000.c +++ b/drivers/comedi/drivers/daqboard2000.c @@ -96,10 +96,9 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8255.h> -#include "../comedi_pci.h" - -#include "8255.h" #include "plx9080.h" #define DB2K_FIRMWARE "daqboard2000_firmware.bin" diff --git a/drivers/comedi/drivers/das08.c b/drivers/comedi/drivers/das08.c index b50743c5b822..f8ab3af2e391 100644 --- a/drivers/comedi/drivers/das08.c +++ b/drivers/comedi/drivers/das08.c @@ -10,11 +10,10 @@ */ #include <linux/module.h> +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> +#include <linux/comedi/comedi_8254.h> -#include "../comedidev.h" - -#include "8255.h" -#include "comedi_8254.h" #include "das08.h" /* diff --git a/drivers/comedi/drivers/das08_cs.c b/drivers/comedi/drivers/das08_cs.c index 223479f9ea3c..6075efcf10d6 100644 --- a/drivers/comedi/drivers/das08_cs.c +++ b/drivers/comedi/drivers/das08_cs.c @@ -30,8 +30,7 @@ */ #include <linux/module.h> - -#include "../comedi_pcmcia.h" +#include <linux/comedi/comedi_pcmcia.h> #include "das08.h" diff --git a/drivers/comedi/drivers/das08_isa.c b/drivers/comedi/drivers/das08_isa.c index 8c4cfa821423..3d43b77cc9f4 100644 --- a/drivers/comedi/drivers/das08_isa.c +++ b/drivers/comedi/drivers/das08_isa.c @@ -29,7 +29,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> #include "das08.h" diff --git a/drivers/comedi/drivers/das08_pci.c b/drivers/comedi/drivers/das08_pci.c index 1cd903336a4c..982f3ab0ccbd 100644 --- a/drivers/comedi/drivers/das08_pci.c +++ b/drivers/comedi/drivers/das08_pci.c @@ -23,8 +23,7 @@ */ #include <linux/module.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #include "das08.h" diff --git a/drivers/comedi/drivers/das16.c b/drivers/comedi/drivers/das16.c index 4ac2622b0fac..937a69ce0977 100644 --- a/drivers/comedi/drivers/das16.c +++ b/drivers/comedi/drivers/das16.c @@ -63,12 +63,10 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> - -#include "../comedidev.h" - -#include "comedi_isadma.h" -#include "comedi_8254.h" -#include "8255.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> +#include <linux/comedi/comedi_8254.h> +#include <linux/comedi/comedi_isadma.h> #define DAS16_DMA_SIZE 0xff00 /* size in bytes of allocated dma buffer */ diff --git a/drivers/comedi/drivers/das16m1.c b/drivers/comedi/drivers/das16m1.c index 75f3dbbe97ac..275effb77746 100644 --- a/drivers/comedi/drivers/das16m1.c +++ b/drivers/comedi/drivers/das16m1.c @@ -42,10 +42,9 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> -#include "../comedidev.h" - -#include "8255.h" -#include "comedi_8254.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> +#include <linux/comedi/comedi_8254.h> /* * Register map (dev->iobase) diff --git a/drivers/comedi/drivers/das1800.c b/drivers/comedi/drivers/das1800.c index f50891a6ee7d..f09608c0f4ff 100644 --- a/drivers/comedi/drivers/das1800.c +++ b/drivers/comedi/drivers/das1800.c @@ -73,11 +73,9 @@ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/io.h> - -#include "../comedidev.h" - -#include "comedi_isadma.h" -#include "comedi_8254.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8254.h> +#include <linux/comedi/comedi_isadma.h> /* misc. defines */ #define DAS1800_SIZE 16 /* uses 16 io addresses */ diff --git a/drivers/comedi/drivers/das6402.c b/drivers/comedi/drivers/das6402.c index 96f4107b8054..1af394591e74 100644 --- a/drivers/comedi/drivers/das6402.c +++ b/drivers/comedi/drivers/das6402.c @@ -24,10 +24,8 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedidev.h" - -#include "comedi_8254.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8254.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/das800.c b/drivers/comedi/drivers/das800.c index bc08324f422f..4ca33f46eaa7 100644 --- a/drivers/comedi/drivers/das800.c +++ b/drivers/comedi/drivers/das800.c @@ -46,10 +46,8 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> - -#include "../comedidev.h" - -#include "comedi_8254.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8254.h> #define N_CHAN_AI 8 /* number of analog input channels */ diff --git a/drivers/comedi/drivers/dmm32at.c b/drivers/comedi/drivers/dmm32at.c index 56682f01242f..fe023c722aa3 100644 --- a/drivers/comedi/drivers/dmm32at.c +++ b/drivers/comedi/drivers/dmm32at.c @@ -29,9 +29,8 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> -#include "../comedidev.h" - -#include "8255.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> /* Board register addresses */ #define DMM32AT_AI_START_CONV_REG 0x00 diff --git a/drivers/comedi/drivers/dt2801.c b/drivers/comedi/drivers/dt2801.c index 0d571d817b4e..230d25010f58 100644 --- a/drivers/comedi/drivers/dt2801.c +++ b/drivers/comedi/drivers/dt2801.c @@ -31,7 +31,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> #include <linux/delay.h> #define DT2801_TIMEOUT 1000 diff --git a/drivers/comedi/drivers/dt2811.c b/drivers/comedi/drivers/dt2811.c index 0eb5e6ba6916..dbb9f38da289 100644 --- a/drivers/comedi/drivers/dt2811.c +++ b/drivers/comedi/drivers/dt2811.c @@ -40,8 +40,7 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/dt2814.c b/drivers/comedi/drivers/dt2814.c index ed44ce0d151b..c98a5a4a7aec 100644 --- a/drivers/comedi/drivers/dt2814.c +++ b/drivers/comedi/drivers/dt2814.c @@ -27,8 +27,7 @@ #include <linux/module.h> #include <linux/interrupt.h> -#include "../comedidev.h" - +#include <linux/comedi/comedidev.h> #include <linux/delay.h> #define DT2814_CSR 0 diff --git a/drivers/comedi/drivers/dt2815.c b/drivers/comedi/drivers/dt2815.c index 5906f32aa01f..03ba2fd18a21 100644 --- a/drivers/comedi/drivers/dt2815.c +++ b/drivers/comedi/drivers/dt2815.c @@ -43,8 +43,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" - +#include <linux/comedi/comedidev.h> #include <linux/delay.h> #define DT2815_DATA 0 diff --git a/drivers/comedi/drivers/dt2817.c b/drivers/comedi/drivers/dt2817.c index 7c1463e835d3..6738045c7531 100644 --- a/drivers/comedi/drivers/dt2817.c +++ b/drivers/comedi/drivers/dt2817.c @@ -25,7 +25,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> #define DT2817_CR 0 #define DT2817_DATA 1 diff --git a/drivers/comedi/drivers/dt282x.c b/drivers/comedi/drivers/dt282x.c index 2656b4b0e3d0..4ae80e6c7266 100644 --- a/drivers/comedi/drivers/dt282x.c +++ b/drivers/comedi/drivers/dt282x.c @@ -51,10 +51,8 @@ #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/io.h> - -#include "../comedidev.h" - -#include "comedi_isadma.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_isadma.h> /* * Register map diff --git a/drivers/comedi/drivers/dt3000.c b/drivers/comedi/drivers/dt3000.c index ec27aa4730d4..fc6e9c30e522 100644 --- a/drivers/comedi/drivers/dt3000.c +++ b/drivers/comedi/drivers/dt3000.c @@ -43,8 +43,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* * PCI BAR0 - dual-ported RAM location definitions (dev->mmio) diff --git a/drivers/comedi/drivers/dt9812.c b/drivers/comedi/drivers/dt9812.c index 704b04d2980d..b37b9d8eca0d 100644 --- a/drivers/comedi/drivers/dt9812.c +++ b/drivers/comedi/drivers/dt9812.c @@ -34,8 +34,7 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/uaccess.h> - -#include "../comedi_usb.h" +#include <linux/comedi/comedi_usb.h> #define DT9812_DIAGS_BOARD_INFO_ADDR 0xFBFF #define DT9812_MAX_WRITE_CMD_PIPE_SIZE 32 diff --git a/drivers/comedi/drivers/dyna_pci10xx.c b/drivers/comedi/drivers/dyna_pci10xx.c index c224422bb126..407a038fb3e0 100644 --- a/drivers/comedi/drivers/dyna_pci10xx.c +++ b/drivers/comedi/drivers/dyna_pci10xx.c @@ -26,8 +26,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/mutex.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #define READ_TIMEOUT 50 diff --git a/drivers/comedi/drivers/fl512.c b/drivers/comedi/drivers/fl512.c index b715f30659fa..139e801fc358 100644 --- a/drivers/comedi/drivers/fl512.c +++ b/drivers/comedi/drivers/fl512.c @@ -21,8 +21,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" - +#include <linux/comedi/comedidev.h> #include <linux/delay.h> /* diff --git a/drivers/comedi/drivers/gsc_hpdi.c b/drivers/comedi/drivers/gsc_hpdi.c index e35e4a743714..c09d135df38d 100644 --- a/drivers/comedi/drivers/gsc_hpdi.c +++ b/drivers/comedi/drivers/gsc_hpdi.c @@ -34,8 +34,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #include "plx9080.h" diff --git a/drivers/comedi/drivers/icp_multi.c b/drivers/comedi/drivers/icp_multi.c index 16d2b78de83c..ac4b11dbd741 100644 --- a/drivers/comedi/drivers/icp_multi.c +++ b/drivers/comedi/drivers/icp_multi.c @@ -36,8 +36,7 @@ #include <linux/module.h> #include <linux/delay.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #define ICP_MULTI_ADC_CSR 0x00 /* R/W: ADC command/status register */ #define ICP_MULTI_ADC_CSR_ST BIT(0) /* Start ADC */ diff --git a/drivers/comedi/drivers/ii_pci20kc.c b/drivers/comedi/drivers/ii_pci20kc.c index 399255dbe388..4a19bf8462be 100644 --- a/drivers/comedi/drivers/ii_pci20kc.c +++ b/drivers/comedi/drivers/ii_pci20kc.c @@ -30,7 +30,7 @@ #include <linux/module.h> #include <linux/io.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c index f963080dd61f..951c23fa0369 100644 --- a/drivers/comedi/drivers/jr3_pci.c +++ b/drivers/comedi/drivers/jr3_pci.c @@ -35,8 +35,7 @@ #include <linux/jiffies.h> #include <linux/slab.h> #include <linux/timer.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #include "jr3_pci.h" diff --git a/drivers/comedi/drivers/ke_counter.c b/drivers/comedi/drivers/ke_counter.c index bef1b20c1c8d..b825cf60e1e0 100644 --- a/drivers/comedi/drivers/ke_counter.c +++ b/drivers/comedi/drivers/ke_counter.c @@ -19,8 +19,7 @@ */ #include <linux/module.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* * PCI BAR 0 Register I/O map diff --git a/drivers/comedi/drivers/me4000.c b/drivers/comedi/drivers/me4000.c index 0d3d4cafce2e..9aea02b86ed9 100644 --- a/drivers/comedi/drivers/me4000.c +++ b/drivers/comedi/drivers/me4000.c @@ -32,10 +32,9 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8254.h> -#include "../comedi_pci.h" - -#include "comedi_8254.h" #include "plx9052.h" #define ME4000_FIRMWARE "me4000_firmware.bin" diff --git a/drivers/comedi/drivers/me_daq.c b/drivers/comedi/drivers/me_daq.c index ef18e387471b..076b15097afd 100644 --- a/drivers/comedi/drivers/me_daq.c +++ b/drivers/comedi/drivers/me_daq.c @@ -23,8 +23,7 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/sched.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #include "plx9052.h" diff --git a/drivers/comedi/drivers/mf6x4.c b/drivers/comedi/drivers/mf6x4.c index 9da8dd748078..14f1d5e9cd59 100644 --- a/drivers/comedi/drivers/mf6x4.c +++ b/drivers/comedi/drivers/mf6x4.c @@ -18,8 +18,7 @@ #include <linux/module.h> #include <linux/delay.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* Registers present in BAR0 memory region */ #define MF624_GPIOC_REG 0x54 diff --git a/drivers/comedi/drivers/mite.c b/drivers/comedi/drivers/mite.c index 70960e3ba878..88f3cd6f54f1 100644 --- a/drivers/comedi/drivers/mite.c +++ b/drivers/comedi/drivers/mite.c @@ -38,8 +38,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/log2.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #include "mite.h" diff --git a/drivers/comedi/drivers/mpc624.c b/drivers/comedi/drivers/mpc624.c index 646f4c086204..9e51ff528ed1 100644 --- a/drivers/comedi/drivers/mpc624.c +++ b/drivers/comedi/drivers/mpc624.c @@ -44,8 +44,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" - +#include <linux/comedi/comedidev.h> #include <linux/delay.h> /* Offsets of different ports */ diff --git a/drivers/comedi/drivers/multiq3.c b/drivers/comedi/drivers/multiq3.c index c1897aee9a9a..07ff5383da99 100644 --- a/drivers/comedi/drivers/multiq3.c +++ b/drivers/comedi/drivers/multiq3.c @@ -26,8 +26,7 @@ */ #include <linux/module.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* * Register map diff --git a/drivers/comedi/drivers/ni_6527.c b/drivers/comedi/drivers/ni_6527.c index f1a45cf7342a..ac5820085231 100644 --- a/drivers/comedi/drivers/ni_6527.c +++ b/drivers/comedi/drivers/ni_6527.c @@ -20,8 +20,7 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* * PCI BAR1 - Register memory map diff --git a/drivers/comedi/drivers/ni_65xx.c b/drivers/comedi/drivers/ni_65xx.c index 7cd8497420f2..58334de3b253 100644 --- a/drivers/comedi/drivers/ni_65xx.c +++ b/drivers/comedi/drivers/ni_65xx.c @@ -49,8 +49,7 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> /* * PCI BAR1 Register Map diff --git a/drivers/comedi/drivers/ni_660x.c b/drivers/comedi/drivers/ni_660x.c index e60d0125bcb2..0679bc39e0bc 100644 --- a/drivers/comedi/drivers/ni_660x.c +++ b/drivers/comedi/drivers/ni_660x.c @@ -26,8 +26,7 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #include "mite.h" #include "ni_tio.h" diff --git a/drivers/comedi/drivers/ni_670x.c b/drivers/comedi/drivers/ni_670x.c index c197e47486be..c875d251c230 100644 --- a/drivers/comedi/drivers/ni_670x.c +++ b/drivers/comedi/drivers/ni_670x.c @@ -24,8 +24,7 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #define AO_VALUE_OFFSET 0x00 #define AO_CHAN_OFFSET 0x0c diff --git a/drivers/comedi/drivers/ni_at_a2150.c b/drivers/comedi/drivers/ni_at_a2150.c index 10ad7b88713e..df8d219e6723 100644 --- a/drivers/comedi/drivers/ni_at_a2150.c +++ b/drivers/comedi/drivers/ni_at_a2150.c @@ -39,11 +39,9 @@ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/io.h> - -#include "../comedidev.h" - -#include "comedi_isadma.h" -#include "comedi_8254.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8254.h> +#include <linux/comedi/comedi_isadma.h> #define A2150_DMA_BUFFER_SIZE 0xff00 /* size in bytes of dma buffer */ diff --git a/drivers/comedi/drivers/ni_at_ao.c b/drivers/comedi/drivers/ni_at_ao.c index 2a0fb4d460db..9f3147b72aa8 100644 --- a/drivers/comedi/drivers/ni_at_ao.c +++ b/drivers/comedi/drivers/ni_at_ao.c @@ -25,10 +25,8 @@ */ #include <linux/module.h> - -#include "../comedidev.h" - -#include "comedi_8254.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8254.h> /* * Register map diff --git a/drivers/comedi/drivers/ni_atmio.c b/drivers/comedi/drivers/ni_atmio.c index 56c78da475e7..8876a1d24c56 100644 --- a/drivers/comedi/drivers/ni_atmio.c +++ b/drivers/comedi/drivers/ni_atmio.c @@ -73,12 +73,11 @@ #include <linux/module.h> #include <linux/interrupt.h> -#include "../comedidev.h" - +#include <linux/comedi/comedidev.h> #include <linux/isapnp.h> +#include <linux/comedi/comedi_8255.h> #include "ni_stc.h" -#include "8255.h" /* AT specific setup */ static const struct ni_board_struct ni_boards[] = { diff --git a/drivers/comedi/drivers/ni_atmio16d.c b/drivers/comedi/drivers/ni_atmio16d.c index dffce1aa3e69..9fa902529a8e 100644 --- a/drivers/comedi/drivers/ni_atmio16d.c +++ b/drivers/comedi/drivers/ni_atmio16d.c @@ -39,9 +39,8 @@ #include <linux/module.h> #include <linux/interrupt.h> -#include "../comedidev.h" - -#include "8255.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> /* Configuration and Status Registers */ #define COM_REG_1 0x00 /* wo 16 */ diff --git a/drivers/comedi/drivers/ni_daq_700.c b/drivers/comedi/drivers/ni_daq_700.c index d40fc89f9cef..0ef20e9a8bc4 100644 --- a/drivers/comedi/drivers/ni_daq_700.c +++ b/drivers/comedi/drivers/ni_daq_700.c @@ -41,8 +41,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> - -#include "../comedi_pcmcia.h" +#include <linux/comedi/comedi_pcmcia.h> /* daqcard700 registers */ #define DIO_W 0x04 /* WO 8bit */ diff --git a/drivers/comedi/drivers/ni_daq_dio24.c b/drivers/comedi/drivers/ni_daq_dio24.c index 44fb65afc218..487733111023 100644 --- a/drivers/comedi/drivers/ni_daq_dio24.c +++ b/drivers/comedi/drivers/ni_daq_dio24.c @@ -23,9 +23,8 @@ */ #include <linux/module.h> -#include "../comedi_pcmcia.h" - -#include "8255.h" +#include <linux/comedi/comedi_pcmcia.h> +#include <linux/comedi/comedi_8255.h> static int dio24_auto_attach(struct comedi_device *dev, unsigned long context) diff --git a/drivers/comedi/drivers/ni_labpc.c b/drivers/comedi/drivers/ni_labpc.c index 1f4a07bd1d26..b25a8e117072 100644 --- a/drivers/comedi/drivers/ni_labpc.c +++ b/drivers/comedi/drivers/ni_labpc.c @@ -48,8 +48,7 @@ */ #include <linux/module.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> #include "ni_labpc.h" #include "ni_labpc_isadma.h" diff --git a/drivers/comedi/drivers/ni_labpc_common.c b/drivers/comedi/drivers/ni_labpc_common.c index dd97946eacaf..763249653228 100644 --- a/drivers/comedi/drivers/ni_labpc_common.c +++ b/drivers/comedi/drivers/ni_labpc_common.c @@ -12,11 +12,10 @@ #include <linux/io.h> #include <linux/delay.h> #include <linux/slab.h> +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> +#include <linux/comedi/comedi_8254.h> -#include "../comedidev.h" - -#include "comedi_8254.h" -#include "8255.h" #include "ni_labpc.h" #include "ni_labpc_regs.h" #include "ni_labpc_isadma.h" diff --git a/drivers/comedi/drivers/ni_labpc_cs.c b/drivers/comedi/drivers/ni_labpc_cs.c index 4f7e2fe21254..62fecb50ec6e 100644 --- a/drivers/comedi/drivers/ni_labpc_cs.c +++ b/drivers/comedi/drivers/ni_labpc_cs.c @@ -38,8 +38,7 @@ */ #include <linux/module.h> - -#include "../comedi_pcmcia.h" +#include <linux/comedi/comedi_pcmcia.h> #include "ni_labpc.h" diff --git a/drivers/comedi/drivers/ni_labpc_isadma.c b/drivers/comedi/drivers/ni_labpc_isadma.c index a551aca6e615..0652ca8345b6 100644 --- a/drivers/comedi/drivers/ni_labpc_isadma.c +++ b/drivers/comedi/drivers/ni_labpc_isadma.c @@ -10,10 +10,9 @@ #include <linux/module.h> #include <linux/slab.h> +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_isadma.h> -#include "../comedidev.h" - -#include "comedi_isadma.h" #include "ni_labpc.h" #include "ni_labpc_regs.h" #include "ni_labpc_isadma.h" diff --git a/drivers/comedi/drivers/ni_labpc_pci.c b/drivers/comedi/drivers/ni_labpc_pci.c index ec180b0fedf7..e2a44bbd9fa6 100644 --- a/drivers/comedi/drivers/ni_labpc_pci.c +++ b/drivers/comedi/drivers/ni_labpc_pci.c @@ -22,8 +22,7 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #include "ni_labpc.h" diff --git a/drivers/comedi/drivers/ni_mio_common.c b/drivers/comedi/drivers/ni_mio_common.c index 4f80a4991f95..d39998565808 100644 --- a/drivers/comedi/drivers/ni_mio_common.c +++ b/drivers/comedi/drivers/ni_mio_common.c @@ -43,7 +43,7 @@ #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/delay.h> -#include "8255.h" +#include <linux/comedi/comedi_8255.h> #include "mite.h" /* A timeout count */ diff --git a/drivers/comedi/drivers/ni_mio_cs.c b/drivers/comedi/drivers/ni_mio_cs.c index 4f37b4e58f09..796f0b743772 100644 --- a/drivers/comedi/drivers/ni_mio_cs.c +++ b/drivers/comedi/drivers/ni_mio_cs.c @@ -28,10 +28,10 @@ #include <linux/module.h> #include <linux/delay.h> +#include <linux/comedi/comedi_pcmcia.h> +#include <linux/comedi/comedi_8255.h> -#include "../comedi_pcmcia.h" #include "ni_stc.h" -#include "8255.h" /* * AT specific setup diff --git a/drivers/comedi/drivers/ni_pcidio.c b/drivers/comedi/drivers/ni_pcidio.c index 623f8d08d13a..2d58e83420e8 100644 --- a/drivers/comedi/drivers/ni_pcidio.c +++ b/drivers/comedi/drivers/ni_pcidio.c @@ -42,8 +42,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/sched.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #include "mite.h" diff --git a/drivers/comedi/drivers/ni_pcimio.c b/drivers/comedi/drivers/ni_pcimio.c index 6c813a490ba5..0b055321023d 100644 --- a/drivers/comedi/drivers/ni_pcimio.c +++ b/drivers/comedi/drivers/ni_pcimio.c @@ -94,9 +94,7 @@ #include <linux/module.h> #include <linux/delay.h> - -#include "../comedi_pci.h" - +#include <linux/comedi/comedi_pci.h> #include <asm/byteorder.h> #include "ni_stc.h" diff --git a/drivers/comedi/drivers/ni_routes.c b/drivers/comedi/drivers/ni_routes.c index f0f8cd424b30..f24eeb464eba 100644 --- a/drivers/comedi/drivers/ni_routes.c +++ b/drivers/comedi/drivers/ni_routes.c @@ -21,8 +21,7 @@ #include <linux/slab.h> #include <linux/bsearch.h> #include <linux/sort.h> - -#include "../comedi.h" +#include <linux/comedi.h> #include "ni_routes.h" #include "ni_routing/ni_route_values.h" diff --git a/drivers/comedi/drivers/ni_routes.h b/drivers/comedi/drivers/ni_routes.h index 036982315584..cff8a463a03f 100644 --- a/drivers/comedi/drivers/ni_routes.h +++ b/drivers/comedi/drivers/ni_routes.h @@ -27,7 +27,7 @@ #include <linux/bitops.h> #endif -#include "../comedi.h" +#include <linux/comedi.h> /** * struct ni_route_set - Set of destinations with a common source. diff --git a/drivers/comedi/drivers/ni_routing/ni_route_values.h b/drivers/comedi/drivers/ni_routing/ni_route_values.h index 6e358efa6f7f..80880083ea41 100644 --- a/drivers/comedi/drivers/ni_routing/ni_route_values.h +++ b/drivers/comedi/drivers/ni_routing/ni_route_values.h @@ -20,7 +20,7 @@ #ifndef _COMEDI_DRIVERS_NI_ROUTINT_NI_ROUTE_VALUES_H #define _COMEDI_DRIVERS_NI_ROUTINT_NI_ROUTE_VALUES_H -#include "../../comedi.h" +#include <linux/comedi.h> #include <linux/types.h> /* diff --git a/drivers/comedi/drivers/ni_routing/tools/.gitignore b/drivers/comedi/drivers/ni_routing/tools/.gitignore index e3ebffcd900e..c12f825db266 100644 --- a/drivers/comedi/drivers/ni_routing/tools/.gitignore +++ b/drivers/comedi/drivers/ni_routing/tools/.gitignore @@ -5,4 +5,5 @@ ni_values.py convert_c_to_py c/ csv/ +linux/ all_cfiles.c diff --git a/drivers/comedi/drivers/ni_routing/tools/Makefile b/drivers/comedi/drivers/ni_routing/tools/Makefile index 6e92a06a44cb..31212101b3bc 100644 --- a/drivers/comedi/drivers/ni_routing/tools/Makefile +++ b/drivers/comedi/drivers/ni_routing/tools/Makefile @@ -3,7 +3,7 @@ # ni_route_values.h # ni_device_routes.h # in order to do this, we are also generating a python representation (using -# ctypesgen) of ../../comedi.h. +# ctypesgen) of ../../../../../include/uapi/linux/comedi.h. # This allows us to sort NI signal/terminal names numerically to use a binary # search through the device_routes tables to find valid routes. @@ -30,13 +30,21 @@ ALL: everything : csv-files c-files csv-blank -CPPFLAGS=-D"BIT(x)=(1UL<<(x))" -D__user= +CPPFLAGS = -D__user= +INC_UAPI = ../../../../../include/uapi -comedi_h.py : ../../../comedi.h +comedi_h.py: $(INC_UAPI)/linux/comedi.h ctypesgen $< --include "sys/ioctl.h" --cpp 'gcc -E $(CPPFLAGS)' -o $@ -convert_c_to_py: all_cfiles.c - gcc -g convert_c_to_py.c -o convert_c_to_py -std=c99 +convert_c_to_py: all_cfiles.c linux/comedi.h + gcc -g -I. convert_c_to_py.c -o convert_c_to_py -std=c99 + +# Create a local 'linux/comedi.h' for use when compiling 'convert_c_to_py.c' +# with the '-I.' option. (Cannot specify '-I../../../../../include/uapi' +# because that interferes with inclusion of other system headers.) +linux/comedi.h: $(INC_UAPI)/linux/comedi.h + mkdir -p linux + ln -snf ../$< $@ ni_values.py: convert_c_to_py ./convert_c_to_py @@ -44,7 +52,7 @@ ni_values.py: convert_c_to_py csv-files : ni_values.py comedi_h.py ./convert_py_to_csv.py -csv-blank : +csv-blank : comedi_h.py ./make_blank_csv.py @echo New blank csv signal table in csv/blank_route_table.csv @@ -62,17 +70,16 @@ clean-partial : $(RM) -rf comedi_h.py ni_values.py convert_c_to_py all_cfiles.c *.pyc \ __pycache__/ -clean : partial_clean - $(RM) -rf c/ csv/ +clean : clean-partial + $(RM) -rf c/ csv/ linux/ # Note: One could also use ctypeslib in order to generate these files. The # caveat is that ctypeslib does not do a great job at handling macro functions. # The make rules are as follows: -# comedi.h.xml : ../../comedi.h +# comedi.h.xml : $(INC_UAPI)/linux/comedi.h # # note that we have to use PWD here to avoid h2xml finding a system # # installed version of the comedilib/comedi.h file -# h2xml ${PWD}/../../comedi.h -c -D__user="" -D"BIT(x)=(1<<(x))" \ -# -o comedi.h.xml +# h2xml ${PWD}/$(INC_UAPI)/linux/comedi.h -c D__user="" -o comedi.h.xml # # comedi_h.py : comedi.h.xml # xml2py ./comedi.h.xml -o comedi_h.py diff --git a/drivers/comedi/drivers/ni_tio.h b/drivers/comedi/drivers/ni_tio.h index e7b05718df9b..9ae2221c3c18 100644 --- a/drivers/comedi/drivers/ni_tio.h +++ b/drivers/comedi/drivers/ni_tio.h @@ -8,7 +8,7 @@ #ifndef _COMEDI_NI_TIO_H #define _COMEDI_NI_TIO_H -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> enum ni_gpct_register { NITIO_G0_AUTO_INC, diff --git a/drivers/comedi/drivers/ni_usb6501.c b/drivers/comedi/drivers/ni_usb6501.c index c42987b74b1d..0dd9edf7bced 100644 --- a/drivers/comedi/drivers/ni_usb6501.c +++ b/drivers/comedi/drivers/ni_usb6501.c @@ -87,8 +87,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> - -#include "../comedi_usb.h" +#include <linux/comedi/comedi_usb.h> #define NI6501_TIMEOUT 1000 diff --git a/drivers/comedi/drivers/pcl711.c b/drivers/comedi/drivers/pcl711.c index bd6f42fe9e3c..05172c553c8a 100644 --- a/drivers/comedi/drivers/pcl711.c +++ b/drivers/comedi/drivers/pcl711.c @@ -29,10 +29,8 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> - -#include "../comedidev.h" - -#include "comedi_8254.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8254.h> /* * I/O port register map diff --git a/drivers/comedi/drivers/pcl724.c b/drivers/comedi/drivers/pcl724.c index 1a5799278a7a..948a0576c9ef 100644 --- a/drivers/comedi/drivers/pcl724.c +++ b/drivers/comedi/drivers/pcl724.c @@ -25,9 +25,8 @@ */ #include <linux/module.h> -#include "../comedidev.h" - -#include "8255.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> struct pcl724_board { const char *name; diff --git a/drivers/comedi/drivers/pcl726.c b/drivers/comedi/drivers/pcl726.c index 88f25d7e76f7..0430630e6ebb 100644 --- a/drivers/comedi/drivers/pcl726.c +++ b/drivers/comedi/drivers/pcl726.c @@ -50,8 +50,7 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> #define PCL726_AO_MSB_REG(x) (0x00 + ((x) * 2)) #define PCL726_AO_LSB_REG(x) (0x01 + ((x) * 2)) diff --git a/drivers/comedi/drivers/pcl730.c b/drivers/comedi/drivers/pcl730.c index 32a29129e6e8..d2733cd5383d 100644 --- a/drivers/comedi/drivers/pcl730.c +++ b/drivers/comedi/drivers/pcl730.c @@ -25,7 +25,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* * Register map diff --git a/drivers/comedi/drivers/pcl812.c b/drivers/comedi/drivers/pcl812.c index b87ab3840eee..70dbc129fcf5 100644 --- a/drivers/comedi/drivers/pcl812.c +++ b/drivers/comedi/drivers/pcl812.c @@ -114,11 +114,9 @@ #include <linux/gfp.h> #include <linux/delay.h> #include <linux/io.h> - -#include "../comedidev.h" - -#include "comedi_isadma.h" -#include "comedi_8254.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8254.h> +#include <linux/comedi/comedi_isadma.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/pcl816.c b/drivers/comedi/drivers/pcl816.c index c368a337a0ae..a5e5320be648 100644 --- a/drivers/comedi/drivers/pcl816.c +++ b/drivers/comedi/drivers/pcl816.c @@ -35,11 +35,9 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/interrupt.h> - -#include "../comedidev.h" - -#include "comedi_isadma.h" -#include "comedi_8254.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8254.h> +#include <linux/comedi/comedi_isadma.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/pcl818.c b/drivers/comedi/drivers/pcl818.c index f4b4a686c710..29e503de8267 100644 --- a/drivers/comedi/drivers/pcl818.c +++ b/drivers/comedi/drivers/pcl818.c @@ -97,11 +97,9 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/interrupt.h> - -#include "../comedidev.h" - -#include "comedi_isadma.h" -#include "comedi_8254.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8254.h> +#include <linux/comedi/comedi_isadma.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/pcm3724.c b/drivers/comedi/drivers/pcm3724.c index 0cb1ad060402..e4103f9eeced 100644 --- a/drivers/comedi/drivers/pcm3724.c +++ b/drivers/comedi/drivers/pcm3724.c @@ -24,9 +24,8 @@ */ #include <linux/module.h> -#include "../comedidev.h" - -#include "8255.h" +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedi_8255.h> /* * Register I/O Map diff --git a/drivers/comedi/drivers/pcmad.c b/drivers/comedi/drivers/pcmad.c index eec89a0afb2f..976eda43881b 100644 --- a/drivers/comedi/drivers/pcmad.c +++ b/drivers/comedi/drivers/pcmad.c @@ -29,7 +29,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> #define PCMAD_STATUS 0 #define PCMAD_LSB 1 diff --git a/drivers/comedi/drivers/pcmda12.c b/drivers/comedi/drivers/pcmda12.c index 14ab1f0d1e9f..611f13bedca0 100644 --- a/drivers/comedi/drivers/pcmda12.c +++ b/drivers/comedi/drivers/pcmda12.c @@ -40,7 +40,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* AI range is not configurable, it's set by jumpers on the board */ static const struct comedi_lrange pcmda12_ranges = { diff --git a/drivers/comedi/drivers/pcmmio.c b/drivers/comedi/drivers/pcmmio.c index 24a9568d3378..c2402239d551 100644 --- a/drivers/comedi/drivers/pcmmio.c +++ b/drivers/comedi/drivers/pcmmio.c @@ -66,8 +66,7 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/pcmuio.c b/drivers/comedi/drivers/pcmuio.c index b299d648a0eb..33b24dbbb919 100644 --- a/drivers/comedi/drivers/pcmuio.c +++ b/drivers/comedi/drivers/pcmuio.c @@ -65,8 +65,7 @@ #include <linux/module.h> #include <linux/interrupt.h> - -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/quatech_daqp_cs.c b/drivers/comedi/drivers/quatech_daqp_cs.c index fe4408ebf6b3..2a76c75c513b 100644 --- a/drivers/comedi/drivers/quatech_daqp_cs.c +++ b/drivers/comedi/drivers/quatech_daqp_cs.c @@ -41,8 +41,7 @@ */ #include <linux/module.h> - -#include "../comedi_pcmcia.h" +#include <linux/comedi/comedi_pcmcia.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/rtd520.c b/drivers/comedi/drivers/rtd520.c index 2d99a648b054..7e0ec1a2a2ca 100644 --- a/drivers/comedi/drivers/rtd520.c +++ b/drivers/comedi/drivers/rtd520.c @@ -85,10 +85,9 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/comedi/comedi_pci.h> +#include <linux/comedi/comedi_8254.h> -#include "../comedi_pci.h" - -#include "comedi_8254.h" #include "plx9080.h" /* diff --git a/drivers/comedi/drivers/rti800.c b/drivers/comedi/drivers/rti800.c index 327fd93b8b12..1b02e47bdb4c 100644 --- a/drivers/comedi/drivers/rti800.c +++ b/drivers/comedi/drivers/rti800.c @@ -42,7 +42,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* * Register map diff --git a/drivers/comedi/drivers/rti802.c b/drivers/comedi/drivers/rti802.c index 195e2b1ac4c1..d66762a22258 100644 --- a/drivers/comedi/drivers/rti802.c +++ b/drivers/comedi/drivers/rti802.c @@ -22,7 +22,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/s526.c b/drivers/comedi/drivers/s526.c index 085cf5b449e5..9245c679a3c4 100644 --- a/drivers/comedi/drivers/s526.c +++ b/drivers/comedi/drivers/s526.c @@ -27,7 +27,7 @@ */ #include <linux/module.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* * Register I/O map diff --git a/drivers/comedi/drivers/s626.c b/drivers/comedi/drivers/s626.c index e7aba937d896..0e5f9a9a7fd3 100644 --- a/drivers/comedi/drivers/s626.c +++ b/drivers/comedi/drivers/s626.c @@ -55,8 +55,7 @@ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> - -#include "../comedi_pci.h" +#include <linux/comedi/comedi_pci.h> #include "s626.h" diff --git a/drivers/comedi/drivers/ssv_dnp.c b/drivers/comedi/drivers/ssv_dnp.c index 016d315aa584..813bd0853b0b 100644 --- a/drivers/comedi/drivers/ssv_dnp.c +++ b/drivers/comedi/drivers/ssv_dnp.c @@ -19,7 +19,7 @@ /* include files ----------------------------------------------------------- */ #include <linux/module.h> -#include "../comedidev.h" +#include <linux/comedi/comedidev.h> /* Some global definitions: the registers of the DNP ----------------------- */ /* */ diff --git a/drivers/comedi/drivers/usbdux.c b/drivers/comedi/drivers/usbdux.c index 0350f303d557..92d514b3c1c3 100644 --- a/drivers/comedi/drivers/usbdux.c +++ b/drivers/comedi/drivers/usbdux.c @@ -73,8 +73,7 @@ #include <linux/input.h> #include <linux/fcntl.h> #include <linux/compiler.h> - -#include "../comedi_usb.h" +#include <linux/comedi/comedi_usb.h> /* constants for firmware upload and download */ #define USBDUX_FIRMWARE "usbdux_firmware.bin" diff --git a/drivers/comedi/drivers/usbduxfast.c b/drivers/comedi/drivers/usbduxfast.c index 4af012968cb6..39faae0ecb19 100644 --- a/drivers/comedi/drivers/usbduxfast.c +++ b/drivers/comedi/drivers/usbduxfast.c @@ -40,7 +40,7 @@ #include <linux/input.h> #include <linux/fcntl.h> #include <linux/compiler.h> -#include "../comedi_usb.h" +#include <linux/comedi/comedi_usb.h> /* * timeout for the USB-transfer diff --git a/drivers/comedi/drivers/usbduxsigma.c b/drivers/comedi/drivers/usbduxsigma.c index 54d7605e909f..2aaeaf44fbe5 100644 --- a/drivers/comedi/drivers/usbduxsigma.c +++ b/drivers/comedi/drivers/usbduxsigma.c @@ -40,8 +40,7 @@ #include <linux/fcntl.h> #include <linux/compiler.h> #include <asm/unaligned.h> - -#include "../comedi_usb.h" +#include <linux/comedi/comedi_usb.h> /* timeout for the USB-transfer in ms*/ #define BULK_TIMEOUT 1000 diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c index 4b00a9ea611a..46023adc5395 100644 --- a/drivers/comedi/drivers/vmk80xx.c +++ b/drivers/comedi/drivers/vmk80xx.c @@ -35,8 +35,7 @@ #include <linux/slab.h> #include <linux/poll.h> #include <linux/uaccess.h> - -#include "../comedi_usb.h" +#include <linux/comedi/comedi_usb.h> enum { DEVICE_VMK8055, diff --git a/drivers/comedi/kcomedilib/kcomedilib_main.c b/drivers/comedi/kcomedilib/kcomedilib_main.c index df9bba1b69ed..43fbe1a63b14 100644 --- a/drivers/comedi/kcomedilib/kcomedilib_main.c +++ b/drivers/comedi/kcomedilib/kcomedilib_main.c @@ -16,9 +16,9 @@ #include <linux/mm.h> #include <linux/io.h> -#include "../comedi.h" -#include "../comedilib.h" -#include "../comedidev.h" +#include <linux/comedi.h> +#include <linux/comedi/comedidev.h> +#include <linux/comedi/comedilib.h> MODULE_AUTHOR("David Schleef <ds@schleef.org>"); MODULE_DESCRIPTION("Comedi kernel library"); diff --git a/drivers/comedi/proc.c b/drivers/comedi/proc.c index 8bc8e42beb90..2e4496633d3d 100644 --- a/drivers/comedi/proc.c +++ b/drivers/comedi/proc.c @@ -13,7 +13,7 @@ * was cool. */ -#include "comedidev.h" +#include <linux/comedi/comedidev.h> #include "comedi_internal.h" #include <linux/proc_fs.h> #include <linux/seq_file.h> diff --git a/drivers/comedi/range.c b/drivers/comedi/range.c index a4e6fe0fb729..8f43cf88d784 100644 --- a/drivers/comedi/range.c +++ b/drivers/comedi/range.c @@ -8,7 +8,7 @@ */ #include <linux/uaccess.h> -#include "comedidev.h" +#include <linux/comedi/comedidev.h> #include "comedi_internal.h" const struct comedi_lrange range_bipolar10 = { 1, {BIP_RANGE(10)} }; diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c index 1cbd60aaed69..a17e51d65aca 100644 --- a/drivers/counter/104-quad-8.c +++ b/drivers/counter/104-quad-8.c @@ -14,6 +14,7 @@ #include <linux/interrupt.h> #include <linux/isa.h> #include <linux/kernel.h> +#include <linux/list.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> @@ -44,7 +45,6 @@ MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers"); * @ab_enable: array of A and B inputs enable configurations * @preset_enable: array of set_to_preset_on_index attribute configurations * @irq_trigger: array of current IRQ trigger function configurations - * @next_irq_trigger: array of next IRQ trigger function configurations * @synchronous_mode: array of index function synchronous mode configurations * @index_polarity: array of index function polarity configurations * @cable_fault_enable: differential encoder cable status enable configurations @@ -52,7 +52,6 @@ MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers"); */ struct quad8 { spinlock_t lock; - struct counter_device counter; unsigned int fck_prescaler[QUAD8_NUM_COUNTERS]; unsigned int preset[QUAD8_NUM_COUNTERS]; unsigned int count_mode[QUAD8_NUM_COUNTERS]; @@ -61,7 +60,6 @@ struct quad8 { unsigned int ab_enable[QUAD8_NUM_COUNTERS]; unsigned int preset_enable[QUAD8_NUM_COUNTERS]; unsigned int irq_trigger[QUAD8_NUM_COUNTERS]; - unsigned int next_irq_trigger[QUAD8_NUM_COUNTERS]; unsigned int synchronous_mode[QUAD8_NUM_COUNTERS]; unsigned int index_polarity[QUAD8_NUM_COUNTERS]; unsigned int cable_fault_enable; @@ -113,7 +111,7 @@ static int quad8_signal_read(struct counter_device *counter, struct counter_signal *signal, enum counter_signal_level *level) { - const struct quad8 *const priv = counter->priv; + const struct quad8 *const priv = counter_priv(counter); unsigned int state; /* Only Index signal levels can be read */ @@ -131,7 +129,7 @@ static int quad8_signal_read(struct counter_device *counter, static int quad8_count_read(struct counter_device *counter, struct counter_count *count, u64 *val) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); const int base_offset = priv->base + 2 * count->id; unsigned int flags; unsigned int borrow; @@ -163,7 +161,7 @@ static int quad8_count_read(struct counter_device *counter, static int quad8_count_write(struct counter_device *counter, struct counter_count *count, u64 val) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); const int base_offset = priv->base + 2 * count->id; unsigned long irqflags; int i; @@ -213,7 +211,7 @@ static int quad8_function_read(struct counter_device *counter, struct counter_count *count, enum counter_function *function) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); const int id = count->id; unsigned long irqflags; @@ -243,7 +241,7 @@ static int quad8_function_write(struct counter_device *counter, struct counter_count *count, enum counter_function function) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); const int id = count->id; unsigned int *const quadrature_mode = priv->quadrature_mode + id; unsigned int *const scale = priv->quadrature_scale + id; @@ -305,7 +303,7 @@ static int quad8_direction_read(struct counter_device *counter, struct counter_count *count, enum counter_count_direction *direction) { - const struct quad8 *const priv = counter->priv; + const struct quad8 *const priv = counter_priv(counter); unsigned int ud_flag; const unsigned int flag_addr = priv->base + 2 * count->id + 1; @@ -335,7 +333,7 @@ static int quad8_action_read(struct counter_device *counter, struct counter_synapse *synapse, enum counter_synapse_action *action) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); int err; enum counter_function function; const size_t signal_a_id = count->synapses[0].signal->id; @@ -390,7 +388,6 @@ static int quad8_action_read(struct counter_device *counter, } enum { - QUAD8_EVENT_NONE = -1, QUAD8_EVENT_CARRY = 0, QUAD8_EVENT_COMPARE = 1, QUAD8_EVENT_CARRY_BORROW = 2, @@ -399,37 +396,52 @@ enum { static int quad8_events_configure(struct counter_device *counter) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); unsigned long irq_enabled = 0; unsigned long irqflags; - size_t channel; + struct counter_event_node *event_node; + unsigned int next_irq_trigger; unsigned long ior_cfg; unsigned long base_offset; spin_lock_irqsave(&priv->lock, irqflags); - /* Enable interrupts for the requested channels, disable for the rest */ - for (channel = 0; channel < QUAD8_NUM_COUNTERS; channel++) { - if (priv->next_irq_trigger[channel] == QUAD8_EVENT_NONE) + list_for_each_entry(event_node, &counter->events_list, l) { + switch (event_node->event) { + case COUNTER_EVENT_OVERFLOW: + next_irq_trigger = QUAD8_EVENT_CARRY; + break; + case COUNTER_EVENT_THRESHOLD: + next_irq_trigger = QUAD8_EVENT_COMPARE; + break; + case COUNTER_EVENT_OVERFLOW_UNDERFLOW: + next_irq_trigger = QUAD8_EVENT_CARRY_BORROW; + break; + case COUNTER_EVENT_INDEX: + next_irq_trigger = QUAD8_EVENT_INDEX; + break; + default: + /* should never reach this path */ + spin_unlock_irqrestore(&priv->lock, irqflags); + return -EINVAL; + } + + /* Skip configuration if it is the same as previously set */ + if (priv->irq_trigger[event_node->channel] == next_irq_trigger) continue; - if (priv->irq_trigger[channel] != priv->next_irq_trigger[channel]) { - /* Save new IRQ function configuration */ - priv->irq_trigger[channel] = priv->next_irq_trigger[channel]; + /* Save new IRQ function configuration */ + priv->irq_trigger[event_node->channel] = next_irq_trigger; - /* Load configuration to I/O Control Register */ - ior_cfg = priv->ab_enable[channel] | - priv->preset_enable[channel] << 1 | - priv->irq_trigger[channel] << 3; - base_offset = priv->base + 2 * channel + 1; - outb(QUAD8_CTR_IOR | ior_cfg, base_offset); - } - - /* Reset next IRQ trigger function configuration */ - priv->next_irq_trigger[channel] = QUAD8_EVENT_NONE; + /* Load configuration to I/O Control Register */ + ior_cfg = priv->ab_enable[event_node->channel] | + priv->preset_enable[event_node->channel] << 1 | + priv->irq_trigger[event_node->channel] << 3; + base_offset = priv->base + 2 * event_node->channel + 1; + outb(QUAD8_CTR_IOR | ior_cfg, base_offset); /* Enable IRQ line */ - irq_enabled |= BIT(channel); + irq_enabled |= BIT(event_node->channel); } outb(irq_enabled, priv->base + QUAD8_REG_INDEX_INTERRUPT); @@ -442,35 +454,20 @@ static int quad8_events_configure(struct counter_device *counter) static int quad8_watch_validate(struct counter_device *counter, const struct counter_watch *watch) { - struct quad8 *const priv = counter->priv; + struct counter_event_node *event_node; if (watch->channel > QUAD8_NUM_COUNTERS - 1) return -EINVAL; switch (watch->event) { case COUNTER_EVENT_OVERFLOW: - if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE) - priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_CARRY; - else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_CARRY) - return -EINVAL; - return 0; case COUNTER_EVENT_THRESHOLD: - if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE) - priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_COMPARE; - else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_COMPARE) - return -EINVAL; - return 0; case COUNTER_EVENT_OVERFLOW_UNDERFLOW: - if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE) - priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_CARRY_BORROW; - else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_CARRY_BORROW) - return -EINVAL; - return 0; case COUNTER_EVENT_INDEX: - if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE) - priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_INDEX; - else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_INDEX) - return -EINVAL; + list_for_each_entry(event_node, &counter->next_events_list, l) + if (watch->channel == event_node->channel && + watch->event != event_node->event) + return -EINVAL; return 0; default: return -EINVAL; @@ -497,7 +494,7 @@ static int quad8_index_polarity_get(struct counter_device *counter, struct counter_signal *signal, u32 *index_polarity) { - const struct quad8 *const priv = counter->priv; + const struct quad8 *const priv = counter_priv(counter); const size_t channel_id = signal->id - 16; *index_polarity = priv->index_polarity[channel_id]; @@ -509,7 +506,7 @@ static int quad8_index_polarity_set(struct counter_device *counter, struct counter_signal *signal, u32 index_polarity) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); const size_t channel_id = signal->id - 16; const int base_offset = priv->base + 2 * channel_id + 1; unsigned long irqflags; @@ -538,7 +535,7 @@ static int quad8_synchronous_mode_get(struct counter_device *counter, struct counter_signal *signal, u32 *synchronous_mode) { - const struct quad8 *const priv = counter->priv; + const struct quad8 *const priv = counter_priv(counter); const size_t channel_id = signal->id - 16; *synchronous_mode = priv->synchronous_mode[channel_id]; @@ -550,7 +547,7 @@ static int quad8_synchronous_mode_set(struct counter_device *counter, struct counter_signal *signal, u32 synchronous_mode) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); const size_t channel_id = signal->id - 16; const int base_offset = priv->base + 2 * channel_id + 1; unsigned long irqflags; @@ -589,7 +586,7 @@ static int quad8_count_mode_read(struct counter_device *counter, struct counter_count *count, enum counter_count_mode *cnt_mode) { - const struct quad8 *const priv = counter->priv; + const struct quad8 *const priv = counter_priv(counter); /* Map 104-QUAD-8 count mode to Generic Counter count mode */ switch (priv->count_mode[count->id]) { @@ -614,7 +611,7 @@ static int quad8_count_mode_write(struct counter_device *counter, struct counter_count *count, enum counter_count_mode cnt_mode) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); unsigned int count_mode; unsigned int mode_cfg; const int base_offset = priv->base + 2 * count->id + 1; @@ -661,7 +658,7 @@ static int quad8_count_mode_write(struct counter_device *counter, static int quad8_count_enable_read(struct counter_device *counter, struct counter_count *count, u8 *enable) { - const struct quad8 *const priv = counter->priv; + const struct quad8 *const priv = counter_priv(counter); *enable = priv->ab_enable[count->id]; @@ -671,7 +668,7 @@ static int quad8_count_enable_read(struct counter_device *counter, static int quad8_count_enable_write(struct counter_device *counter, struct counter_count *count, u8 enable) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); const int base_offset = priv->base + 2 * count->id; unsigned long irqflags; unsigned int ior_cfg; @@ -699,7 +696,7 @@ static const char *const quad8_noise_error_states[] = { static int quad8_error_noise_get(struct counter_device *counter, struct counter_count *count, u32 *noise_error) { - const struct quad8 *const priv = counter->priv; + const struct quad8 *const priv = counter_priv(counter); const int base_offset = priv->base + 2 * count->id + 1; *noise_error = !!(inb(base_offset) & QUAD8_FLAG_E); @@ -710,7 +707,7 @@ static int quad8_error_noise_get(struct counter_device *counter, static int quad8_count_preset_read(struct counter_device *counter, struct counter_count *count, u64 *preset) { - const struct quad8 *const priv = counter->priv; + const struct quad8 *const priv = counter_priv(counter); *preset = priv->preset[count->id]; @@ -736,7 +733,7 @@ static void quad8_preset_register_set(struct quad8 *const priv, const int id, static int quad8_count_preset_write(struct counter_device *counter, struct counter_count *count, u64 preset) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); unsigned long irqflags; /* Only 24-bit values are supported */ @@ -755,7 +752,7 @@ static int quad8_count_preset_write(struct counter_device *counter, static int quad8_count_ceiling_read(struct counter_device *counter, struct counter_count *count, u64 *ceiling) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); unsigned long irqflags; spin_lock_irqsave(&priv->lock, irqflags); @@ -780,7 +777,7 @@ static int quad8_count_ceiling_read(struct counter_device *counter, static int quad8_count_ceiling_write(struct counter_device *counter, struct counter_count *count, u64 ceiling) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); unsigned long irqflags; /* Only 24-bit values are supported */ @@ -807,7 +804,7 @@ static int quad8_count_preset_enable_read(struct counter_device *counter, struct counter_count *count, u8 *preset_enable) { - const struct quad8 *const priv = counter->priv; + const struct quad8 *const priv = counter_priv(counter); *preset_enable = !priv->preset_enable[count->id]; @@ -818,7 +815,7 @@ static int quad8_count_preset_enable_write(struct counter_device *counter, struct counter_count *count, u8 preset_enable) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); const int base_offset = priv->base + 2 * count->id + 1; unsigned long irqflags; unsigned int ior_cfg; @@ -845,7 +842,7 @@ static int quad8_signal_cable_fault_read(struct counter_device *counter, struct counter_signal *signal, u8 *cable_fault) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); const size_t channel_id = signal->id / 2; unsigned long irqflags; bool disabled; @@ -875,7 +872,7 @@ static int quad8_signal_cable_fault_enable_read(struct counter_device *counter, struct counter_signal *signal, u8 *enable) { - const struct quad8 *const priv = counter->priv; + const struct quad8 *const priv = counter_priv(counter); const size_t channel_id = signal->id / 2; *enable = !!(priv->cable_fault_enable & BIT(channel_id)); @@ -887,7 +884,7 @@ static int quad8_signal_cable_fault_enable_write(struct counter_device *counter, struct counter_signal *signal, u8 enable) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); const size_t channel_id = signal->id / 2; unsigned long irqflags; unsigned int cable_fault_enable; @@ -913,7 +910,7 @@ static int quad8_signal_fck_prescaler_read(struct counter_device *counter, struct counter_signal *signal, u8 *prescaler) { - const struct quad8 *const priv = counter->priv; + const struct quad8 *const priv = counter_priv(counter); *prescaler = priv->fck_prescaler[signal->id / 2]; @@ -924,7 +921,7 @@ static int quad8_signal_fck_prescaler_write(struct counter_device *counter, struct counter_signal *signal, u8 prescaler) { - struct quad8 *const priv = counter->priv; + struct quad8 *const priv = counter_priv(counter); const size_t channel_id = signal->id / 2; const int base_offset = priv->base + 2 * channel_id; unsigned long irqflags; @@ -1085,7 +1082,8 @@ static struct counter_count quad8_counts[] = { static irqreturn_t quad8_irq_handler(int irq, void *private) { - struct quad8 *const priv = private; + struct counter_device *counter = private; + struct quad8 *const priv = counter_priv(counter); const unsigned long base = priv->base; unsigned long irq_status; unsigned long channel; @@ -1116,7 +1114,7 @@ static irqreturn_t quad8_irq_handler(int irq, void *private) continue; } - counter_push_event(&priv->counter, event, channel); + counter_push_event(counter, event, channel); } /* Clear pending interrupts on device */ @@ -1127,6 +1125,7 @@ static irqreturn_t quad8_irq_handler(int irq, void *private) static int quad8_probe(struct device *dev, unsigned int id) { + struct counter_device *counter; struct quad8 *priv; int i, j; unsigned int base_offset; @@ -1138,19 +1137,19 @@ static int quad8_probe(struct device *dev, unsigned int id) return -EBUSY; } - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) + counter = devm_counter_alloc(dev, sizeof(*priv)); + if (!counter) return -ENOMEM; + priv = counter_priv(counter); /* Initialize Counter device and driver data */ - priv->counter.name = dev_name(dev); - priv->counter.parent = dev; - priv->counter.ops = &quad8_ops; - priv->counter.counts = quad8_counts; - priv->counter.num_counts = ARRAY_SIZE(quad8_counts); - priv->counter.signals = quad8_signals; - priv->counter.num_signals = ARRAY_SIZE(quad8_signals); - priv->counter.priv = priv; + counter->name = dev_name(dev); + counter->parent = dev; + counter->ops = &quad8_ops; + counter->counts = quad8_counts; + counter->num_counts = ARRAY_SIZE(quad8_counts); + counter->signals = quad8_signals; + counter->num_signals = ARRAY_SIZE(quad8_signals); priv->base = base[id]; spin_lock_init(&priv->lock); @@ -1183,20 +1182,22 @@ static int quad8_probe(struct device *dev, unsigned int id) outb(QUAD8_CTR_IOR, base_offset + 1); /* Disable index function; negative index polarity */ outb(QUAD8_CTR_IDR, base_offset + 1); - /* Initialize next IRQ trigger function configuration */ - priv->next_irq_trigger[i] = QUAD8_EVENT_NONE; } /* Disable Differential Encoder Cable Status for all channels */ outb(0xFF, base[id] + QUAD8_DIFF_ENCODER_CABLE_STATUS); /* Enable all counters and enable interrupt function */ outb(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, base[id] + QUAD8_REG_CHAN_OP); - err = devm_request_irq(dev, irq[id], quad8_irq_handler, IRQF_SHARED, - priv->counter.name, priv); + err = devm_request_irq(&counter->dev, irq[id], quad8_irq_handler, + IRQF_SHARED, counter->name, counter); if (err) return err; - return devm_counter_register(dev, &priv->counter); + err = devm_counter_add(dev, counter); + if (err < 0) + return dev_err_probe(dev, err, "Failed to add counter\n"); + + return 0; } static struct isa_driver quad8_driver = { diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c index 5acc54539623..7e0957eea094 100644 --- a/drivers/counter/counter-core.c +++ b/drivers/counter/counter-core.c @@ -15,6 +15,7 @@ #include <linux/kdev_t.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/slab.h> #include <linux/types.h> #include <linux/wait.h> @@ -24,12 +25,25 @@ /* Provides a unique ID for each counter device */ static DEFINE_IDA(counter_ida); +struct counter_device_allochelper { + struct counter_device counter; + + /* + * This is cache line aligned to ensure private data behaves like if it + * were kmalloced separately. + */ + unsigned long privdata[] ____cacheline_aligned; +}; + static void counter_device_release(struct device *dev) { - struct counter_device *const counter = dev_get_drvdata(dev); + struct counter_device *const counter = + container_of(dev, struct counter_device, dev); counter_chrdev_remove(counter); ida_free(&counter_ida, dev->id); + + kfree(container_of(counter, struct counter_device_allochelper, counter)); } static struct device_type counter_device_type = { @@ -45,62 +59,108 @@ static struct bus_type counter_bus_type = { static dev_t counter_devt; /** - * counter_register - register Counter to the system - * @counter: pointer to Counter to register + * counter_priv - access counter device private data + * @counter: counter device * - * This function registers a Counter to the system. A sysfs "counter" directory - * will be created and populated with sysfs attributes correlating with the - * Counter Signals, Synapses, and Counts respectively. + * Get the counter device private data + */ +void *counter_priv(const struct counter_device *const counter) +{ + struct counter_device_allochelper *ch = + container_of(counter, struct counter_device_allochelper, counter); + + return &ch->privdata; +} +EXPORT_SYMBOL_GPL(counter_priv); + +/** + * counter_alloc - allocate a counter_device + * @sizeof_priv: size of the driver private data + * + * This is part one of counter registration. The structure is allocated + * dynamically to ensure the right lifetime for the embedded struct device. * - * RETURNS: - * 0 on success, negative error number on failure. + * If this succeeds, call counter_put() to get rid of the counter_device again. */ -int counter_register(struct counter_device *const counter) +struct counter_device *counter_alloc(size_t sizeof_priv) { - struct device *const dev = &counter->dev; - int id; + struct counter_device_allochelper *ch; + struct counter_device *counter; + struct device *dev; int err; + ch = kzalloc(sizeof(*ch) + sizeof_priv, GFP_KERNEL); + if (!ch) { + err = -ENOMEM; + goto err_alloc_ch; + } + + counter = &ch->counter; + dev = &counter->dev; + /* Acquire unique ID */ - id = ida_alloc(&counter_ida, GFP_KERNEL); - if (id < 0) - return id; + err = ida_alloc(&counter_ida, GFP_KERNEL); + if (err < 0) + goto err_ida_alloc; + dev->id = err; mutex_init(&counter->ops_exist_lock); - - /* Configure device structure for Counter */ - dev->id = id; dev->type = &counter_device_type; dev->bus = &counter_bus_type; - dev->devt = MKDEV(MAJOR(counter_devt), id); + dev->devt = MKDEV(MAJOR(counter_devt), dev->id); + + err = counter_chrdev_add(counter); + if (err < 0) + goto err_chrdev_add; + + device_initialize(dev); + + return counter; + +err_chrdev_add: + + ida_free(&counter_ida, dev->id); +err_ida_alloc: + + kfree(ch); +err_alloc_ch: + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(counter_alloc); + +void counter_put(struct counter_device *counter) +{ + put_device(&counter->dev); +} +EXPORT_SYMBOL_GPL(counter_put); + +/** + * counter_add - complete registration of a counter + * @counter: the counter to add + * + * This is part two of counter registration. + * + * If this succeeds, call counter_unregister() to get rid of the counter_device again. + */ +int counter_add(struct counter_device *counter) +{ + int err; + struct device *dev = &counter->dev; + if (counter->parent) { dev->parent = counter->parent; dev->of_node = counter->parent->of_node; } - device_initialize(dev); - dev_set_drvdata(dev, counter); err = counter_sysfs_add(counter); if (err < 0) - goto err_free_id; - - err = counter_chrdev_add(counter); - if (err < 0) - goto err_free_id; - - err = cdev_device_add(&counter->chrdev, dev); - if (err < 0) - goto err_remove_chrdev; - - return 0; + return err; -err_remove_chrdev: - counter_chrdev_remove(counter); -err_free_id: - put_device(dev); - return err; + /* implies device_add(dev) */ + return cdev_device_add(&counter->chrdev, dev); } -EXPORT_SYMBOL_GPL(counter_register); +EXPORT_SYMBOL_GPL(counter_add); /** * counter_unregister - unregister Counter from the system @@ -121,8 +181,6 @@ void counter_unregister(struct counter_device *const counter) wake_up(&counter->events_wait); mutex_unlock(&counter->ops_exist_lock); - - put_device(&counter->dev); } EXPORT_SYMBOL_GPL(counter_unregister); @@ -131,30 +189,56 @@ static void devm_counter_release(void *counter) counter_unregister(counter); } +static void devm_counter_put(void *counter) +{ + counter_put(counter); +} + /** - * devm_counter_register - Resource-managed counter_register - * @dev: device to allocate counter_device for - * @counter: pointer to Counter to register + * devm_counter_alloc - allocate a counter_device + * @dev: the device to register the release callback for + * @sizeof_priv: size of the driver private data * - * Managed counter_register. The Counter registered with this function is - * automatically unregistered on driver detach. This function calls - * counter_register internally. Refer to that function for more information. + * This is the device managed version of counter_add(). It registers a cleanup + * callback to care for calling counter_put(). + */ +struct counter_device *devm_counter_alloc(struct device *dev, size_t sizeof_priv) +{ + struct counter_device *counter; + int err; + + counter = counter_alloc(sizeof_priv); + if (IS_ERR(counter)) + return counter; + + err = devm_add_action_or_reset(dev, devm_counter_put, counter); + if (err < 0) + return ERR_PTR(err); + + return counter; +} +EXPORT_SYMBOL_GPL(devm_counter_alloc); + +/** + * devm_counter_add - complete registration of a counter + * @dev: the device to register the release callback for + * @counter: the counter to add * - * RETURNS: - * 0 on success, negative error number on failure. + * This is the device managed version of counter_add(). It registers a cleanup + * callback to care for calling counter_unregister(). */ -int devm_counter_register(struct device *dev, - struct counter_device *const counter) +int devm_counter_add(struct device *dev, + struct counter_device *const counter) { int err; - err = counter_register(counter); + err = counter_add(counter); if (err < 0) return err; return devm_add_action_or_reset(dev, devm_counter_release, counter); } -EXPORT_SYMBOL_GPL(devm_counter_register); +EXPORT_SYMBOL_GPL(devm_counter_add); #define COUNTER_DEV_MAX 256 diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c index 5ef0478709cd..2a58582a9df4 100644 --- a/drivers/counter/ftm-quaddec.c +++ b/drivers/counter/ftm-quaddec.c @@ -26,7 +26,6 @@ }) struct ftm_quaddec { - struct counter_device counter; struct platform_device *pdev; void __iomem *ftm_base; bool big_endian; @@ -118,7 +117,7 @@ static void ftm_quaddec_disable(void *ftm) static int ftm_quaddec_get_prescaler(struct counter_device *counter, struct counter_count *count, u32 *cnt_mode) { - struct ftm_quaddec *ftm = counter->priv; + struct ftm_quaddec *ftm = counter_priv(counter); uint32_t scflags; ftm_read(ftm, FTM_SC, &scflags); @@ -131,7 +130,7 @@ static int ftm_quaddec_get_prescaler(struct counter_device *counter, static int ftm_quaddec_set_prescaler(struct counter_device *counter, struct counter_count *count, u32 cnt_mode) { - struct ftm_quaddec *ftm = counter->priv; + struct ftm_quaddec *ftm = counter_priv(counter); mutex_lock(&ftm->ftm_quaddec_mutex); @@ -162,7 +161,7 @@ static int ftm_quaddec_count_read(struct counter_device *counter, struct counter_count *count, u64 *val) { - struct ftm_quaddec *const ftm = counter->priv; + struct ftm_quaddec *const ftm = counter_priv(counter); uint32_t cntval; ftm_read(ftm, FTM_CNT, &cntval); @@ -176,7 +175,7 @@ static int ftm_quaddec_count_write(struct counter_device *counter, struct counter_count *count, const u64 val) { - struct ftm_quaddec *const ftm = counter->priv; + struct ftm_quaddec *const ftm = counter_priv(counter); if (val != 0) { dev_warn(&ftm->pdev->dev, "Can only accept '0' as new counter value\n"); @@ -259,17 +258,17 @@ static struct counter_count ftm_quaddec_counts = { static int ftm_quaddec_probe(struct platform_device *pdev) { + struct counter_device *counter; struct ftm_quaddec *ftm; struct device_node *node = pdev->dev.of_node; struct resource *io; int ret; - ftm = devm_kzalloc(&pdev->dev, sizeof(*ftm), GFP_KERNEL); - if (!ftm) + counter = devm_counter_alloc(&pdev->dev, sizeof(*ftm)); + if (!counter) return -ENOMEM; - - platform_set_drvdata(pdev, ftm); + ftm = counter_priv(counter); io = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!io) { @@ -285,14 +284,13 @@ static int ftm_quaddec_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Failed to map memory region\n"); return -EINVAL; } - ftm->counter.name = dev_name(&pdev->dev); - ftm->counter.parent = &pdev->dev; - ftm->counter.ops = &ftm_quaddec_cnt_ops; - ftm->counter.counts = &ftm_quaddec_counts; - ftm->counter.num_counts = 1; - ftm->counter.signals = ftm_quaddec_signals; - ftm->counter.num_signals = ARRAY_SIZE(ftm_quaddec_signals); - ftm->counter.priv = ftm; + counter->name = dev_name(&pdev->dev); + counter->parent = &pdev->dev; + counter->ops = &ftm_quaddec_cnt_ops; + counter->counts = &ftm_quaddec_counts; + counter->num_counts = 1; + counter->signals = ftm_quaddec_signals; + counter->num_signals = ARRAY_SIZE(ftm_quaddec_signals); mutex_init(&ftm->ftm_quaddec_mutex); @@ -302,9 +300,9 @@ static int ftm_quaddec_probe(struct platform_device *pdev) if (ret) return ret; - ret = devm_counter_register(&pdev->dev, &ftm->counter); + ret = devm_counter_add(&pdev->dev, counter); if (ret) - return ret; + return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n"); return 0; } diff --git a/drivers/counter/intel-qep.c b/drivers/counter/intel-qep.c index 0924d16de6e2..47a6a9dfc9e8 100644 --- a/drivers/counter/intel-qep.c +++ b/drivers/counter/intel-qep.c @@ -63,7 +63,6 @@ #define INTEL_QEP_CLK_PERIOD_NS 10 struct intel_qep { - struct counter_device counter; struct mutex lock; struct device *dev; void __iomem *regs; @@ -109,7 +108,7 @@ static void intel_qep_init(struct intel_qep *qep) static int intel_qep_count_read(struct counter_device *counter, struct counter_count *count, u64 *val) { - struct intel_qep *const qep = counter->priv; + struct intel_qep *const qep = counter_priv(counter); pm_runtime_get_sync(qep->dev); *val = intel_qep_readl(qep, INTEL_QEPCOUNT); @@ -176,7 +175,7 @@ static struct counter_synapse intel_qep_count_synapses[] = { static int intel_qep_ceiling_read(struct counter_device *counter, struct counter_count *count, u64 *ceiling) { - struct intel_qep *qep = counter->priv; + struct intel_qep *qep = counter_priv(counter); pm_runtime_get_sync(qep->dev); *ceiling = intel_qep_readl(qep, INTEL_QEPMAX); @@ -188,7 +187,7 @@ static int intel_qep_ceiling_read(struct counter_device *counter, static int intel_qep_ceiling_write(struct counter_device *counter, struct counter_count *count, u64 max) { - struct intel_qep *qep = counter->priv; + struct intel_qep *qep = counter_priv(counter); int ret = 0; /* Intel QEP ceiling configuration only supports 32-bit values */ @@ -213,7 +212,7 @@ out: static int intel_qep_enable_read(struct counter_device *counter, struct counter_count *count, u8 *enable) { - struct intel_qep *qep = counter->priv; + struct intel_qep *qep = counter_priv(counter); *enable = qep->enabled; @@ -223,7 +222,7 @@ static int intel_qep_enable_read(struct counter_device *counter, static int intel_qep_enable_write(struct counter_device *counter, struct counter_count *count, u8 val) { - struct intel_qep *qep = counter->priv; + struct intel_qep *qep = counter_priv(counter); u32 reg; bool changed; @@ -256,7 +255,7 @@ static int intel_qep_spike_filter_ns_read(struct counter_device *counter, struct counter_count *count, u64 *length) { - struct intel_qep *qep = counter->priv; + struct intel_qep *qep = counter_priv(counter); u32 reg; pm_runtime_get_sync(qep->dev); @@ -277,7 +276,7 @@ static int intel_qep_spike_filter_ns_write(struct counter_device *counter, struct counter_count *count, u64 length) { - struct intel_qep *qep = counter->priv; + struct intel_qep *qep = counter_priv(counter); u32 reg; bool enable; int ret = 0; @@ -326,7 +325,7 @@ static int intel_qep_preset_enable_read(struct counter_device *counter, struct counter_count *count, u8 *preset_enable) { - struct intel_qep *qep = counter->priv; + struct intel_qep *qep = counter_priv(counter); u32 reg; pm_runtime_get_sync(qep->dev); @@ -341,7 +340,7 @@ static int intel_qep_preset_enable_read(struct counter_device *counter, static int intel_qep_preset_enable_write(struct counter_device *counter, struct counter_count *count, u8 val) { - struct intel_qep *qep = counter->priv; + struct intel_qep *qep = counter_priv(counter); u32 reg; int ret = 0; @@ -392,14 +391,16 @@ static struct counter_count intel_qep_counter_count[] = { static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id) { + struct counter_device *counter; struct intel_qep *qep; struct device *dev = &pci->dev; void __iomem *regs; int ret; - qep = devm_kzalloc(dev, sizeof(*qep), GFP_KERNEL); - if (!qep) + counter = devm_counter_alloc(dev, sizeof(*qep)); + if (!counter) return -ENOMEM; + qep = counter_priv(counter); ret = pcim_enable_device(pci); if (ret) @@ -422,20 +423,23 @@ static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id) intel_qep_init(qep); pci_set_drvdata(pci, qep); - qep->counter.name = pci_name(pci); - qep->counter.parent = dev; - qep->counter.ops = &intel_qep_counter_ops; - qep->counter.counts = intel_qep_counter_count; - qep->counter.num_counts = ARRAY_SIZE(intel_qep_counter_count); - qep->counter.signals = intel_qep_signals; - qep->counter.num_signals = ARRAY_SIZE(intel_qep_signals); - qep->counter.priv = qep; + counter->name = pci_name(pci); + counter->parent = dev; + counter->ops = &intel_qep_counter_ops; + counter->counts = intel_qep_counter_count; + counter->num_counts = ARRAY_SIZE(intel_qep_counter_count); + counter->signals = intel_qep_signals; + counter->num_signals = ARRAY_SIZE(intel_qep_signals); qep->enabled = false; pm_runtime_put(dev); pm_runtime_allow(dev); - return devm_counter_register(&pci->dev, &qep->counter); + ret = devm_counter_add(&pci->dev, counter); + if (ret < 0) + return dev_err_probe(&pci->dev, ret, "Failed to add counter\n"); + + return 0; } static void intel_qep_remove(struct pci_dev *pci) diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c index 8514a87fcbee..9e99702470c2 100644 --- a/drivers/counter/interrupt-cnt.c +++ b/drivers/counter/interrupt-cnt.c @@ -16,7 +16,6 @@ struct interrupt_cnt_priv { atomic_t count; - struct counter_device counter; struct gpio_desc *gpio; int irq; bool enabled; @@ -37,7 +36,7 @@ static irqreturn_t interrupt_cnt_isr(int irq, void *dev_id) static int interrupt_cnt_enable_read(struct counter_device *counter, struct counter_count *count, u8 *enable) { - struct interrupt_cnt_priv *priv = counter->priv; + struct interrupt_cnt_priv *priv = counter_priv(counter); *enable = priv->enabled; @@ -47,7 +46,7 @@ static int interrupt_cnt_enable_read(struct counter_device *counter, static int interrupt_cnt_enable_write(struct counter_device *counter, struct counter_count *count, u8 enable) { - struct interrupt_cnt_priv *priv = counter->priv; + struct interrupt_cnt_priv *priv = counter_priv(counter); if (priv->enabled == enable) return 0; @@ -85,7 +84,7 @@ static int interrupt_cnt_action_read(struct counter_device *counter, static int interrupt_cnt_read(struct counter_device *counter, struct counter_count *count, u64 *val) { - struct interrupt_cnt_priv *priv = counter->priv; + struct interrupt_cnt_priv *priv = counter_priv(counter); *val = atomic_read(&priv->count); @@ -95,7 +94,7 @@ static int interrupt_cnt_read(struct counter_device *counter, static int interrupt_cnt_write(struct counter_device *counter, struct counter_count *count, const u64 val) { - struct interrupt_cnt_priv *priv = counter->priv; + struct interrupt_cnt_priv *priv = counter_priv(counter); if (val != (typeof(priv->count.counter))val) return -ERANGE; @@ -122,7 +121,7 @@ static int interrupt_cnt_signal_read(struct counter_device *counter, struct counter_signal *signal, enum counter_signal_level *level) { - struct interrupt_cnt_priv *priv = counter->priv; + struct interrupt_cnt_priv *priv = counter_priv(counter); int ret; if (!priv->gpio) @@ -148,12 +147,14 @@ static const struct counter_ops interrupt_cnt_ops = { static int interrupt_cnt_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct counter_device *counter; struct interrupt_cnt_priv *priv; int ret; - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) + counter = devm_counter_alloc(dev, sizeof(*priv)); + if (!counter) return -ENOMEM; + priv = counter_priv(counter); priv->irq = platform_get_irq_optional(pdev, 0); if (priv->irq == -ENXIO) @@ -184,8 +185,8 @@ static int interrupt_cnt_probe(struct platform_device *pdev) if (!priv->signals.name) return -ENOMEM; - priv->counter.signals = &priv->signals; - priv->counter.num_signals = 1; + counter->signals = &priv->signals; + counter->num_signals = 1; priv->synapses.actions_list = interrupt_cnt_synapse_actions; priv->synapses.num_actions = ARRAY_SIZE(interrupt_cnt_synapse_actions); @@ -199,12 +200,11 @@ static int interrupt_cnt_probe(struct platform_device *pdev) priv->cnts.ext = interrupt_cnt_ext; priv->cnts.num_ext = ARRAY_SIZE(interrupt_cnt_ext); - priv->counter.priv = priv; - priv->counter.name = dev_name(dev); - priv->counter.parent = dev; - priv->counter.ops = &interrupt_cnt_ops; - priv->counter.counts = &priv->cnts; - priv->counter.num_counts = 1; + counter->name = dev_name(dev); + counter->parent = dev; + counter->ops = &interrupt_cnt_ops; + counter->counts = &priv->cnts; + counter->num_counts = 1; irq_set_status_flags(priv->irq, IRQ_NOAUTOEN); ret = devm_request_irq(dev, priv->irq, interrupt_cnt_isr, @@ -213,7 +213,11 @@ static int interrupt_cnt_probe(struct platform_device *pdev) if (ret) return ret; - return devm_counter_register(dev, &priv->counter); + ret = devm_counter_add(dev, counter); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to add counter\n"); + + return 0; } static const struct of_device_id interrupt_cnt_of_match[] = { diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c index 0ab1b2716784..00844445143b 100644 --- a/drivers/counter/microchip-tcb-capture.c +++ b/drivers/counter/microchip-tcb-capture.c @@ -24,7 +24,6 @@ struct mchp_tc_data { const struct atmel_tcb_config *tc_cfg; - struct counter_device counter; struct regmap *regmap; int qdec_mode; int num_channels; @@ -72,7 +71,7 @@ static int mchp_tc_count_function_read(struct counter_device *counter, struct counter_count *count, enum counter_function *function) { - struct mchp_tc_data *const priv = counter->priv; + struct mchp_tc_data *const priv = counter_priv(counter); if (priv->qdec_mode) *function = COUNTER_FUNCTION_QUADRATURE_X4; @@ -86,7 +85,7 @@ static int mchp_tc_count_function_write(struct counter_device *counter, struct counter_count *count, enum counter_function function) { - struct mchp_tc_data *const priv = counter->priv; + struct mchp_tc_data *const priv = counter_priv(counter); u32 bmr, cmr; regmap_read(priv->regmap, ATMEL_TC_BMR, &bmr); @@ -148,7 +147,7 @@ static int mchp_tc_count_signal_read(struct counter_device *counter, struct counter_signal *signal, enum counter_signal_level *lvl) { - struct mchp_tc_data *const priv = counter->priv; + struct mchp_tc_data *const priv = counter_priv(counter); bool sigstatus; u32 sr; @@ -169,7 +168,7 @@ static int mchp_tc_count_action_read(struct counter_device *counter, struct counter_synapse *synapse, enum counter_synapse_action *action) { - struct mchp_tc_data *const priv = counter->priv; + struct mchp_tc_data *const priv = counter_priv(counter); u32 cmr; regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr); @@ -197,7 +196,7 @@ static int mchp_tc_count_action_write(struct counter_device *counter, struct counter_synapse *synapse, enum counter_synapse_action action) { - struct mchp_tc_data *const priv = counter->priv; + struct mchp_tc_data *const priv = counter_priv(counter); u32 edge = ATMEL_TC_ETRGEDG_NONE; /* QDEC mode is rising edge only */ @@ -230,7 +229,7 @@ static int mchp_tc_count_action_write(struct counter_device *counter, static int mchp_tc_count_read(struct counter_device *counter, struct counter_count *count, u64 *val) { - struct mchp_tc_data *const priv = counter->priv; + struct mchp_tc_data *const priv = counter_priv(counter); u32 cnt; regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CV), &cnt); @@ -296,6 +295,7 @@ static int mchp_tc_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; const struct atmel_tcb_config *tcb_config; const struct of_device_id *match; + struct counter_device *counter; struct mchp_tc_data *priv; char clk_name[7]; struct regmap *regmap; @@ -303,11 +303,10 @@ static int mchp_tc_probe(struct platform_device *pdev) int channel; int ret, i; - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) + counter = devm_counter_alloc(&pdev->dev, sizeof(*priv)); + if (!counter) return -ENOMEM; - - platform_set_drvdata(pdev, priv); + priv = counter_priv(counter); match = of_match_node(atmel_tc_of_match, np->parent); tcb_config = match->data; @@ -362,16 +361,19 @@ static int mchp_tc_probe(struct platform_device *pdev) priv->tc_cfg = tcb_config; priv->regmap = regmap; - priv->counter.name = dev_name(&pdev->dev); - priv->counter.parent = &pdev->dev; - priv->counter.ops = &mchp_tc_ops; - priv->counter.num_counts = ARRAY_SIZE(mchp_tc_counts); - priv->counter.counts = mchp_tc_counts; - priv->counter.num_signals = ARRAY_SIZE(mchp_tc_count_signals); - priv->counter.signals = mchp_tc_count_signals; - priv->counter.priv = priv; - - return devm_counter_register(&pdev->dev, &priv->counter); + counter->name = dev_name(&pdev->dev); + counter->parent = &pdev->dev; + counter->ops = &mchp_tc_ops; + counter->num_counts = ARRAY_SIZE(mchp_tc_counts); + counter->counts = mchp_tc_counts; + counter->num_signals = ARRAY_SIZE(mchp_tc_count_signals); + counter->signals = mchp_tc_count_signals; + + ret = devm_counter_add(&pdev->dev, counter); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n"); + + return 0; } static const struct of_device_id mchp_tc_dt_ids[] = { diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c index 5168833b1fdf..68031d93ce89 100644 --- a/drivers/counter/stm32-lptimer-cnt.c +++ b/drivers/counter/stm32-lptimer-cnt.c @@ -20,7 +20,6 @@ #include <linux/types.h> struct stm32_lptim_cnt { - struct counter_device counter; struct device *dev; struct regmap *regmap; struct clk *clk; @@ -141,7 +140,7 @@ static const enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = { static int stm32_lptim_cnt_read(struct counter_device *counter, struct counter_count *count, u64 *val) { - struct stm32_lptim_cnt *const priv = counter->priv; + struct stm32_lptim_cnt *const priv = counter_priv(counter); u32 cnt; int ret; @@ -158,7 +157,7 @@ static int stm32_lptim_cnt_function_read(struct counter_device *counter, struct counter_count *count, enum counter_function *function) { - struct stm32_lptim_cnt *const priv = counter->priv; + struct stm32_lptim_cnt *const priv = counter_priv(counter); if (!priv->quadrature_mode) { *function = COUNTER_FUNCTION_INCREASE; @@ -177,7 +176,7 @@ static int stm32_lptim_cnt_function_write(struct counter_device *counter, struct counter_count *count, enum counter_function function) { - struct stm32_lptim_cnt *const priv = counter->priv; + struct stm32_lptim_cnt *const priv = counter_priv(counter); if (stm32_lptim_is_enabled(priv)) return -EBUSY; @@ -200,7 +199,7 @@ static int stm32_lptim_cnt_enable_read(struct counter_device *counter, struct counter_count *count, u8 *enable) { - struct stm32_lptim_cnt *const priv = counter->priv; + struct stm32_lptim_cnt *const priv = counter_priv(counter); int ret; ret = stm32_lptim_is_enabled(priv); @@ -216,7 +215,7 @@ static int stm32_lptim_cnt_enable_write(struct counter_device *counter, struct counter_count *count, u8 enable) { - struct stm32_lptim_cnt *const priv = counter->priv; + struct stm32_lptim_cnt *const priv = counter_priv(counter); int ret; /* Check nobody uses the timer, or already disabled/enabled */ @@ -241,7 +240,7 @@ static int stm32_lptim_cnt_ceiling_read(struct counter_device *counter, struct counter_count *count, u64 *ceiling) { - struct stm32_lptim_cnt *const priv = counter->priv; + struct stm32_lptim_cnt *const priv = counter_priv(counter); *ceiling = priv->ceiling; @@ -252,7 +251,7 @@ static int stm32_lptim_cnt_ceiling_write(struct counter_device *counter, struct counter_count *count, u64 ceiling) { - struct stm32_lptim_cnt *const priv = counter->priv; + struct stm32_lptim_cnt *const priv = counter_priv(counter); if (stm32_lptim_is_enabled(priv)) return -EBUSY; @@ -277,7 +276,7 @@ static int stm32_lptim_cnt_action_read(struct counter_device *counter, struct counter_synapse *synapse, enum counter_synapse_action *action) { - struct stm32_lptim_cnt *const priv = counter->priv; + struct stm32_lptim_cnt *const priv = counter_priv(counter); enum counter_function function; int err; @@ -321,7 +320,7 @@ static int stm32_lptim_cnt_action_write(struct counter_device *counter, struct counter_synapse *synapse, enum counter_synapse_action action) { - struct stm32_lptim_cnt *const priv = counter->priv; + struct stm32_lptim_cnt *const priv = counter_priv(counter); enum counter_function function; int err; @@ -411,14 +410,17 @@ static struct counter_count stm32_lptim_in1_counts = { static int stm32_lptim_cnt_probe(struct platform_device *pdev) { struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent); + struct counter_device *counter; struct stm32_lptim_cnt *priv; + int ret; if (IS_ERR_OR_NULL(ddata)) return -EINVAL; - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) + counter = devm_counter_alloc(&pdev->dev, sizeof(*priv)); + if (!counter) return -ENOMEM; + priv = counter_priv(counter); priv->dev = &pdev->dev; priv->regmap = ddata->regmap; @@ -426,23 +428,26 @@ static int stm32_lptim_cnt_probe(struct platform_device *pdev) priv->ceiling = STM32_LPTIM_MAX_ARR; /* Initialize Counter device */ - priv->counter.name = dev_name(&pdev->dev); - priv->counter.parent = &pdev->dev; - priv->counter.ops = &stm32_lptim_cnt_ops; + counter->name = dev_name(&pdev->dev); + counter->parent = &pdev->dev; + counter->ops = &stm32_lptim_cnt_ops; if (ddata->has_encoder) { - priv->counter.counts = &stm32_lptim_enc_counts; - priv->counter.num_signals = ARRAY_SIZE(stm32_lptim_cnt_signals); + counter->counts = &stm32_lptim_enc_counts; + counter->num_signals = ARRAY_SIZE(stm32_lptim_cnt_signals); } else { - priv->counter.counts = &stm32_lptim_in1_counts; - priv->counter.num_signals = 1; + counter->counts = &stm32_lptim_in1_counts; + counter->num_signals = 1; } - priv->counter.num_counts = 1; - priv->counter.signals = stm32_lptim_cnt_signals; - priv->counter.priv = priv; + counter->num_counts = 1; + counter->signals = stm32_lptim_cnt_signals; platform_set_drvdata(pdev, priv); - return devm_counter_register(&pdev->dev, &priv->counter); + ret = devm_counter_add(&pdev->dev, counter); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n"); + + return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c index 0546e932db0c..5779ae7c73cf 100644 --- a/drivers/counter/stm32-timer-cnt.c +++ b/drivers/counter/stm32-timer-cnt.c @@ -29,7 +29,6 @@ struct stm32_timer_regs { }; struct stm32_timer_cnt { - struct counter_device counter; struct regmap *regmap; struct clk *clk; u32 max_arr; @@ -47,7 +46,7 @@ static const enum counter_function stm32_count_functions[] = { static int stm32_count_read(struct counter_device *counter, struct counter_count *count, u64 *val) { - struct stm32_timer_cnt *const priv = counter->priv; + struct stm32_timer_cnt *const priv = counter_priv(counter); u32 cnt; regmap_read(priv->regmap, TIM_CNT, &cnt); @@ -59,7 +58,7 @@ static int stm32_count_read(struct counter_device *counter, static int stm32_count_write(struct counter_device *counter, struct counter_count *count, const u64 val) { - struct stm32_timer_cnt *const priv = counter->priv; + struct stm32_timer_cnt *const priv = counter_priv(counter); u32 ceiling; regmap_read(priv->regmap, TIM_ARR, &ceiling); @@ -73,7 +72,7 @@ static int stm32_count_function_read(struct counter_device *counter, struct counter_count *count, enum counter_function *function) { - struct stm32_timer_cnt *const priv = counter->priv; + struct stm32_timer_cnt *const priv = counter_priv(counter); u32 smcr; regmap_read(priv->regmap, TIM_SMCR, &smcr); @@ -100,7 +99,7 @@ static int stm32_count_function_write(struct counter_device *counter, struct counter_count *count, enum counter_function function) { - struct stm32_timer_cnt *const priv = counter->priv; + struct stm32_timer_cnt *const priv = counter_priv(counter); u32 cr1, sms; switch (function) { @@ -140,7 +139,7 @@ static int stm32_count_direction_read(struct counter_device *counter, struct counter_count *count, enum counter_count_direction *direction) { - struct stm32_timer_cnt *const priv = counter->priv; + struct stm32_timer_cnt *const priv = counter_priv(counter); u32 cr1; regmap_read(priv->regmap, TIM_CR1, &cr1); @@ -153,7 +152,7 @@ static int stm32_count_direction_read(struct counter_device *counter, static int stm32_count_ceiling_read(struct counter_device *counter, struct counter_count *count, u64 *ceiling) { - struct stm32_timer_cnt *const priv = counter->priv; + struct stm32_timer_cnt *const priv = counter_priv(counter); u32 arr; regmap_read(priv->regmap, TIM_ARR, &arr); @@ -166,7 +165,7 @@ static int stm32_count_ceiling_read(struct counter_device *counter, static int stm32_count_ceiling_write(struct counter_device *counter, struct counter_count *count, u64 ceiling) { - struct stm32_timer_cnt *const priv = counter->priv; + struct stm32_timer_cnt *const priv = counter_priv(counter); if (ceiling > priv->max_arr) return -ERANGE; @@ -181,7 +180,7 @@ static int stm32_count_ceiling_write(struct counter_device *counter, static int stm32_count_enable_read(struct counter_device *counter, struct counter_count *count, u8 *enable) { - struct stm32_timer_cnt *const priv = counter->priv; + struct stm32_timer_cnt *const priv = counter_priv(counter); u32 cr1; regmap_read(priv->regmap, TIM_CR1, &cr1); @@ -194,7 +193,7 @@ static int stm32_count_enable_read(struct counter_device *counter, static int stm32_count_enable_write(struct counter_device *counter, struct counter_count *count, u8 enable) { - struct stm32_timer_cnt *const priv = counter->priv; + struct stm32_timer_cnt *const priv = counter_priv(counter); u32 cr1; if (enable) { @@ -317,31 +316,38 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev) struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent); struct device *dev = &pdev->dev; struct stm32_timer_cnt *priv; + struct counter_device *counter; + int ret; if (IS_ERR_OR_NULL(ddata)) return -EINVAL; - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) + counter = devm_counter_alloc(dev, sizeof(*priv)); + if (!counter) return -ENOMEM; + priv = counter_priv(counter); + priv->regmap = ddata->regmap; priv->clk = ddata->clk; priv->max_arr = ddata->max_arr; - priv->counter.name = dev_name(dev); - priv->counter.parent = dev; - priv->counter.ops = &stm32_timer_cnt_ops; - priv->counter.counts = &stm32_counts; - priv->counter.num_counts = 1; - priv->counter.signals = stm32_signals; - priv->counter.num_signals = ARRAY_SIZE(stm32_signals); - priv->counter.priv = priv; + counter->name = dev_name(dev); + counter->parent = dev; + counter->ops = &stm32_timer_cnt_ops; + counter->counts = &stm32_counts; + counter->num_counts = 1; + counter->signals = stm32_signals; + counter->num_signals = ARRAY_SIZE(stm32_signals); platform_set_drvdata(pdev, priv); /* Register Counter device */ - return devm_counter_register(dev, &priv->counter); + ret = devm_counter_add(dev, counter); + if (ret < 0) + dev_err_probe(dev, ret, "Failed to add counter\n"); + + return ret; } static int __maybe_unused stm32_timer_cnt_suspend(struct device *dev) diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c index 09817c953f9a..0489d26eb47c 100644 --- a/drivers/counter/ti-eqep.c +++ b/drivers/counter/ti-eqep.c @@ -87,10 +87,15 @@ struct ti_eqep_cnt { struct regmap *regmap16; }; +static struct ti_eqep_cnt *ti_eqep_count_from_counter(struct counter_device *counter) +{ + return counter_priv(counter); +} + static int ti_eqep_count_read(struct counter_device *counter, struct counter_count *count, u64 *val) { - struct ti_eqep_cnt *priv = counter->priv; + struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter); u32 cnt; regmap_read(priv->regmap32, QPOSCNT, &cnt); @@ -102,7 +107,7 @@ static int ti_eqep_count_read(struct counter_device *counter, static int ti_eqep_count_write(struct counter_device *counter, struct counter_count *count, u64 val) { - struct ti_eqep_cnt *priv = counter->priv; + struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter); u32 max; regmap_read(priv->regmap32, QPOSMAX, &max); @@ -116,7 +121,7 @@ static int ti_eqep_function_read(struct counter_device *counter, struct counter_count *count, enum counter_function *function) { - struct ti_eqep_cnt *priv = counter->priv; + struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter); u32 qdecctl; regmap_read(priv->regmap16, QDECCTL, &qdecctl); @@ -143,7 +148,7 @@ static int ti_eqep_function_write(struct counter_device *counter, struct counter_count *count, enum counter_function function) { - struct ti_eqep_cnt *priv = counter->priv; + struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter); enum ti_eqep_count_func qsrc; switch (function) { @@ -173,7 +178,7 @@ static int ti_eqep_action_read(struct counter_device *counter, struct counter_synapse *synapse, enum counter_synapse_action *action) { - struct ti_eqep_cnt *priv = counter->priv; + struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter); enum counter_function function; u32 qdecctl; int err; @@ -245,7 +250,7 @@ static int ti_eqep_position_ceiling_read(struct counter_device *counter, struct counter_count *count, u64 *ceiling) { - struct ti_eqep_cnt *priv = counter->priv; + struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter); u32 qposmax; regmap_read(priv->regmap32, QPOSMAX, &qposmax); @@ -259,7 +264,7 @@ static int ti_eqep_position_ceiling_write(struct counter_device *counter, struct counter_count *count, u64 ceiling) { - struct ti_eqep_cnt *priv = counter->priv; + struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter); if (ceiling != (u32)ceiling) return -ERANGE; @@ -272,7 +277,7 @@ static int ti_eqep_position_ceiling_write(struct counter_device *counter, static int ti_eqep_position_enable_read(struct counter_device *counter, struct counter_count *count, u8 *enable) { - struct ti_eqep_cnt *priv = counter->priv; + struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter); u32 qepctl; regmap_read(priv->regmap16, QEPCTL, &qepctl); @@ -285,7 +290,7 @@ static int ti_eqep_position_enable_read(struct counter_device *counter, static int ti_eqep_position_enable_write(struct counter_device *counter, struct counter_count *count, u8 enable) { - struct ti_eqep_cnt *priv = counter->priv; + struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter); regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, enable ? -1 : 0); @@ -368,13 +373,15 @@ static const struct regmap_config ti_eqep_regmap16_config = { static int ti_eqep_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct counter_device *counter; struct ti_eqep_cnt *priv; void __iomem *base; int err; - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) + counter = devm_counter_alloc(dev, sizeof(*priv)); + if (!counter) return -ENOMEM; + priv = counter_priv(counter); base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) @@ -390,16 +397,15 @@ static int ti_eqep_probe(struct platform_device *pdev) if (IS_ERR(priv->regmap16)) return PTR_ERR(priv->regmap16); - priv->counter.name = dev_name(dev); - priv->counter.parent = dev; - priv->counter.ops = &ti_eqep_counter_ops; - priv->counter.counts = ti_eqep_counts; - priv->counter.num_counts = ARRAY_SIZE(ti_eqep_counts); - priv->counter.signals = ti_eqep_signals; - priv->counter.num_signals = ARRAY_SIZE(ti_eqep_signals); - priv->counter.priv = priv; + counter->name = dev_name(dev); + counter->parent = dev; + counter->ops = &ti_eqep_counter_ops; + counter->counts = ti_eqep_counts; + counter->num_counts = ARRAY_SIZE(ti_eqep_counts); + counter->signals = ti_eqep_signals; + counter->num_signals = ARRAY_SIZE(ti_eqep_signals); - platform_set_drvdata(pdev, priv); + platform_set_drvdata(pdev, counter); /* * Need to make sure power is turned on. On AM33xx, this comes from the @@ -409,7 +415,7 @@ static int ti_eqep_probe(struct platform_device *pdev) pm_runtime_enable(dev); pm_runtime_get_sync(dev); - err = counter_register(&priv->counter); + err = counter_add(counter); if (err < 0) { pm_runtime_put_sync(dev); pm_runtime_disable(dev); @@ -421,10 +427,10 @@ static int ti_eqep_probe(struct platform_device *pdev) static int ti_eqep_remove(struct platform_device *pdev) { - struct ti_eqep_cnt *priv = platform_get_drvdata(pdev); + struct counter_device *counter = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; - counter_unregister(&priv->counter); + counter_unregister(counter); pm_runtime_put_sync(dev); pm_runtime_disable(dev); diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index e2375d992308..8e977b7627cb 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -404,7 +404,7 @@ static int virtcrypto_probe(struct virtio_device *vdev) free_engines: virtcrypto_clear_crypto_engines(vcrypto); free_vqs: - vcrypto->vdev->config->reset(vdev); + virtio_reset_device(vdev); virtcrypto_del_vqs(vcrypto); free_dev: virtcrypto_devmgr_rm_dev(vcrypto); @@ -436,7 +436,7 @@ static void virtcrypto_remove(struct virtio_device *vdev) if (virtcrypto_dev_started(vcrypto)) virtcrypto_dev_stop(vcrypto); - vdev->config->reset(vdev); + virtio_reset_device(vdev); virtcrypto_free_unused_reqs(vcrypto); virtcrypto_clear_crypto_engines(vcrypto); virtcrypto_del_vqs(vcrypto); @@ -456,7 +456,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev) { struct virtio_crypto *vcrypto = vdev->priv; - vdev->config->reset(vdev); + virtio_reset_device(vdev); virtcrypto_free_unused_reqs(vcrypto); if (virtcrypto_dev_started(vcrypto)) virtcrypto_dev_stop(vcrypto); @@ -492,7 +492,7 @@ static int virtcrypto_restore(struct virtio_device *vdev) free_engines: virtcrypto_clear_crypto_engines(vcrypto); free_vqs: - vcrypto->vdev->config->reset(vdev); + virtio_reset_device(vdev); virtcrypto_del_vqs(vcrypto); return err; } diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index ee4568ef757c..1dad813ee4a6 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -127,11 +127,35 @@ ATTRIBUTE_GROUPS(dax_drv); static int dax_bus_match(struct device *dev, struct device_driver *drv); +/* + * Static dax regions are regions created by an external subsystem + * nvdimm where a single range is assigned. Its boundaries are by the external + * subsystem and are usually limited to one physical memory range. For example, + * for PMEM it is usually defined by NVDIMM Namespace boundaries (i.e. a + * single contiguous range) + * + * On dynamic dax regions, the assigned region can be partitioned by dax core + * into multiple subdivisions. A subdivision is represented into one + * /dev/daxN.M device composed by one or more potentially discontiguous ranges. + * + * When allocating a dax region, drivers must set whether it's static + * (IORESOURCE_DAX_STATIC). On static dax devices, the @pgmap is pre-assigned + * to dax core when calling devm_create_dev_dax(), whereas in dynamic dax + * devices it is NULL but afterwards allocated by dax core on device ->probe(). + * Care is needed to make sure that dynamic dax devices are torn down with a + * cleared @pgmap field (see kill_dev_dax()). + */ static bool is_static(struct dax_region *dax_region) { return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0; } +bool static_dev_dax(struct dev_dax *dev_dax) +{ + return is_static(dev_dax->region); +} +EXPORT_SYMBOL_GPL(static_dev_dax); + static u64 dev_dax_size(struct dev_dax *dev_dax) { u64 size = 0; @@ -361,6 +385,14 @@ void kill_dev_dax(struct dev_dax *dev_dax) kill_dax(dax_dev); unmap_mapping_range(inode->i_mapping, 0, 0, 1); + + /* + * Dynamic dax region have the pgmap allocated via dev_kzalloc() + * and thus freed by devm. Clear the pgmap to not have stale pgmap + * ranges on probe() from previous reconfigurations of region devices. + */ + if (!static_dev_dax(dev_dax)) + dev_dax->pgmap = NULL; } EXPORT_SYMBOL_GPL(kill_dev_dax); diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h index 381cec9ff05c..fbb940293d6d 100644 --- a/drivers/dax/bus.h +++ b/drivers/dax/bus.h @@ -39,6 +39,7 @@ int __dax_driver_register(struct dax_device_driver *dax_drv, __dax_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) void dax_driver_unregister(struct dax_device_driver *dax_drv); void kill_dev_dax(struct dev_dax *dev_dax); +bool static_dev_dax(struct dev_dax *dev_dax); /* * While run_dax() is potentially a generic operation that could be diff --git a/drivers/dax/device.c b/drivers/dax/device.c index e58d597f0415..d33a0613ed0c 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -73,11 +73,39 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, return -1; } +static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn, + unsigned long fault_size) +{ + unsigned long i, nr_pages = fault_size / PAGE_SIZE; + struct file *filp = vmf->vma->vm_file; + struct dev_dax *dev_dax = filp->private_data; + pgoff_t pgoff; + + /* mapping is only set on the head */ + if (dev_dax->pgmap->vmemmap_shift) + nr_pages = 1; + + pgoff = linear_page_index(vmf->vma, + ALIGN(vmf->address, fault_size)); + + for (i = 0; i < nr_pages; i++) { + struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i); + + page = compound_head(page); + if (page->mapping) + continue; + + page->mapping = filp->f_mapping; + page->index = pgoff + i; + } +} + static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, - struct vm_fault *vmf, pfn_t *pfn) + struct vm_fault *vmf) { struct device *dev = &dev_dax->dev; phys_addr_t phys; + pfn_t pfn; unsigned int fault_size = PAGE_SIZE; if (check_vma(dev_dax, vmf->vma, __func__)) @@ -98,18 +126,21 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); - return vmf_insert_mixed(vmf->vma, vmf->address, *pfn); + dax_set_mapping(vmf, pfn, fault_size); + + return vmf_insert_mixed(vmf->vma, vmf->address, pfn); } static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, - struct vm_fault *vmf, pfn_t *pfn) + struct vm_fault *vmf) { unsigned long pmd_addr = vmf->address & PMD_MASK; struct device *dev = &dev_dax->dev; phys_addr_t phys; pgoff_t pgoff; + pfn_t pfn; unsigned int fault_size = PMD_SIZE; if (check_vma(dev_dax, vmf->vma, __func__)) @@ -138,19 +169,22 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); - return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); + dax_set_mapping(vmf, pfn, fault_size); + + return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, - struct vm_fault *vmf, pfn_t *pfn) + struct vm_fault *vmf) { unsigned long pud_addr = vmf->address & PUD_MASK; struct device *dev = &dev_dax->dev; phys_addr_t phys; pgoff_t pgoff; + pfn_t pfn; unsigned int fault_size = PUD_SIZE; @@ -180,13 +214,15 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); - return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); + dax_set_mapping(vmf, pfn, fault_size); + + return vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE); } #else static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, - struct vm_fault *vmf, pfn_t *pfn) + struct vm_fault *vmf) { return VM_FAULT_FALLBACK; } @@ -196,10 +232,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, enum page_entry_size pe_size) { struct file *filp = vmf->vma->vm_file; - unsigned long fault_size; vm_fault_t rc = VM_FAULT_SIGBUS; int id; - pfn_t pfn; struct dev_dax *dev_dax = filp->private_data; dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm, @@ -209,43 +243,18 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, id = dax_read_lock(); switch (pe_size) { case PE_SIZE_PTE: - fault_size = PAGE_SIZE; - rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn); + rc = __dev_dax_pte_fault(dev_dax, vmf); break; case PE_SIZE_PMD: - fault_size = PMD_SIZE; - rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn); + rc = __dev_dax_pmd_fault(dev_dax, vmf); break; case PE_SIZE_PUD: - fault_size = PUD_SIZE; - rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn); + rc = __dev_dax_pud_fault(dev_dax, vmf); break; default: rc = VM_FAULT_SIGBUS; } - if (rc == VM_FAULT_NOPAGE) { - unsigned long i; - pgoff_t pgoff; - - /* - * In the device-dax case the only possibility for a - * VM_FAULT_NOPAGE result is when device-dax capacity is - * mapped. No need to consider the zero page, or racing - * conflicting mappings. - */ - pgoff = linear_page_index(vmf->vma, vmf->address - & ~(fault_size - 1)); - for (i = 0; i < fault_size / PAGE_SIZE; i++) { - struct page *page; - - page = pfn_to_page(pfn_t_to_pfn(pfn) + i); - if (page->mapping) - continue; - page->mapping = filp->f_mapping; - page->index = pgoff + i; - } - } dax_read_unlock(id); return rc; @@ -398,17 +407,34 @@ int dev_dax_probe(struct dev_dax *dev_dax) void *addr; int rc, i; - pgmap = dev_dax->pgmap; - if (dev_WARN_ONCE(dev, pgmap && dev_dax->nr_range > 1, - "static pgmap / multi-range device conflict\n")) - return -EINVAL; + if (static_dev_dax(dev_dax)) { + if (dev_dax->nr_range > 1) { + dev_warn(dev, + "static pgmap / multi-range device conflict\n"); + return -EINVAL; + } - if (!pgmap) { - pgmap = devm_kzalloc(dev, sizeof(*pgmap) + sizeof(struct range) - * (dev_dax->nr_range - 1), GFP_KERNEL); + pgmap = dev_dax->pgmap; + } else { + if (dev_dax->pgmap) { + dev_warn(dev, + "dynamic-dax with pre-populated page map\n"); + return -EINVAL; + } + + pgmap = devm_kzalloc(dev, + struct_size(pgmap, ranges, dev_dax->nr_range - 1), + GFP_KERNEL); if (!pgmap) return -ENOMEM; + pgmap->nr_range = dev_dax->nr_range; + dev_dax->pgmap = pgmap; + + for (i = 0; i < dev_dax->nr_range; i++) { + struct range *range = &dev_dax->ranges[i].range; + pgmap->ranges[i] = *range; + } } for (i = 0; i < dev_dax->nr_range; i++) { @@ -420,12 +446,12 @@ int dev_dax_probe(struct dev_dax *dev_dax) i, range->start, range->end); return -EBUSY; } - /* don't update the range for static pgmap */ - if (!dev_dax->pgmap) - pgmap->ranges[i] = *range; } pgmap->type = MEMORY_DEVICE_GENERIC; + if (dev_dax->align > PAGE_SIZE) + pgmap->vmemmap_shift = + order_base_2(dev_dax->align >> PAGE_SHIFT); addr = devm_memremap_pages(dev, pgmap); if (IS_ERR(addr)) return PTR_ERR(addr); diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 0c05b79870f9..83f02bd51dda 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -124,10 +124,11 @@ static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, struct cma_heap_buffer *buffer = dmabuf->priv; struct dma_heap_attachment *a; + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); - mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; @@ -144,10 +145,11 @@ static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, struct cma_heap_buffer *buffer = dmabuf->priv; struct dma_heap_attachment *a; + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) flush_kernel_vmap_range(buffer->vaddr, buffer->len); - mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 275a76f188ae..a1da2b4b6d73 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -99,6 +99,7 @@ #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */ #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */ #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */ +#define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27) #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */ #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */ #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */ @@ -252,15 +253,15 @@ struct at_xdmac { /* Linked List Descriptor */ struct at_xdmac_lld { - dma_addr_t mbr_nda; /* Next Descriptor Member */ - u32 mbr_ubc; /* Microblock Control Member */ - dma_addr_t mbr_sa; /* Source Address Member */ - dma_addr_t mbr_da; /* Destination Address Member */ - u32 mbr_cfg; /* Configuration Register */ - u32 mbr_bc; /* Block Control Register */ - u32 mbr_ds; /* Data Stride Register */ - u32 mbr_sus; /* Source Microblock Stride Register */ - u32 mbr_dus; /* Destination Microblock Stride Register */ + u32 mbr_nda; /* Next Descriptor Member */ + u32 mbr_ubc; /* Microblock Control Member */ + u32 mbr_sa; /* Source Address Member */ + u32 mbr_da; /* Destination Address Member */ + u32 mbr_cfg; /* Configuration Register */ + u32 mbr_bc; /* Block Control Register */ + u32 mbr_ds; /* Data Stride Register */ + u32 mbr_sus; /* Source Microblock Stride Register */ + u32 mbr_dus; /* Destination Microblock Stride Register */ }; /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ @@ -385,9 +386,6 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); - if (at_xdmac_chan_is_enabled(atchan)) - return; - /* Set transfer as active to not try to start it again. */ first->active_xfer = true; @@ -405,7 +403,8 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, */ if (at_xdmac_chan_is_cyclic(atchan)) reg = AT_XDMAC_CNDC_NDVIEW_NDV1; - else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) + else if ((first->lld.mbr_ubc & + AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3) reg = AT_XDMAC_CNDC_NDVIEW_NDV3; else reg = AT_XDMAC_CNDC_NDVIEW_NDV2; @@ -476,13 +475,12 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&atchan->lock, irqflags); cookie = dma_cookie_assign(tx); + list_add_tail(&desc->xfer_node, &atchan->xfers_list); + spin_unlock_irqrestore(&atchan->lock, irqflags); + dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", __func__, atchan, desc); - list_add_tail(&desc->xfer_node, &atchan->xfers_list); - if (list_is_singular(&atchan->xfers_list)) - at_xdmac_start_xfer(atchan, desc); - spin_unlock_irqrestore(&atchan->lock, irqflags); return cookie; } @@ -733,7 +731,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (!desc) { dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) - list_splice_init(&first->descs_list, &atchan->free_descs_list); + list_splice_tail_init(&first->descs_list, + &atchan->free_descs_list); goto spin_unlock; } @@ -821,7 +820,8 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, if (!desc) { dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) - list_splice_init(&first->descs_list, &atchan->free_descs_list); + list_splice_tail_init(&first->descs_list, + &atchan->free_descs_list); spin_unlock_irqrestore(&atchan->lock, irqflags); return NULL; } @@ -1055,8 +1055,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, src_addr, dst_addr, xt, chunk); if (!desc) { - list_splice_init(&first->descs_list, - &atchan->free_descs_list); + list_splice_tail_init(&first->descs_list, + &atchan->free_descs_list); return NULL; } @@ -1136,7 +1136,8 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, if (!desc) { dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) - list_splice_init(&first->descs_list, &atchan->free_descs_list); + list_splice_tail_init(&first->descs_list, + &atchan->free_descs_list); return NULL; } @@ -1312,8 +1313,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl, sg_dma_len(sg), value); if (!desc && first) - list_splice_init(&first->descs_list, - &atchan->free_descs_list); + list_splice_tail_init(&first->descs_list, + &atchan->free_descs_list); if (!first) first = desc; @@ -1586,20 +1587,6 @@ spin_unlock: return ret; } -/* Call must be protected by lock. */ -static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, - struct at_xdmac_desc *desc) -{ - dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); - - /* - * Remove the transfer from the transfer list then move the transfer - * descriptors into the free descriptors list. - */ - list_del(&desc->xfer_node); - list_splice_init(&desc->descs_list, &atchan->free_descs_list); -} - static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) { struct at_xdmac_desc *desc; @@ -1608,14 +1595,14 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) * If channel is enabled, do nothing, advance_work will be triggered * after the interruption. */ - if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) { - desc = list_first_entry(&atchan->xfers_list, - struct at_xdmac_desc, - xfer_node); - dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); - if (!desc->active_xfer) - at_xdmac_start_xfer(atchan, desc); - } + if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list)) + return; + + desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, + xfer_node); + dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); + if (!desc->active_xfer) + at_xdmac_start_xfer(atchan, desc); } static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) @@ -1623,16 +1610,22 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) struct at_xdmac_desc *desc; struct dma_async_tx_descriptor *txd; - if (!list_empty(&atchan->xfers_list)) { - desc = list_first_entry(&atchan->xfers_list, - struct at_xdmac_desc, xfer_node); - txd = &desc->tx_dma_desc; - - if (txd->flags & DMA_PREP_INTERRUPT) - dmaengine_desc_get_callback_invoke(txd, NULL); + spin_lock_irq(&atchan->lock); + dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", + __func__, atchan->irq_status); + if (list_empty(&atchan->xfers_list)) { + spin_unlock_irq(&atchan->lock); + return; } + desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, + xfer_node); + spin_unlock_irq(&atchan->lock); + txd = &desc->tx_dma_desc; + if (txd->flags & DMA_PREP_INTERRUPT) + dmaengine_desc_get_callback_invoke(txd, NULL); } +/* Called with atchan->lock held. */ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) { struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); @@ -1651,8 +1644,6 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) if (atchan->irq_status & AT_XDMAC_CIS_ROIS) dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); - spin_lock_irq(&atchan->lock); - /* Channel must be disabled first as it's not done automatically */ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) @@ -1662,8 +1653,6 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) struct at_xdmac_desc, xfer_node); - spin_unlock_irq(&atchan->lock); - /* Print bad descriptor's details if needed */ dev_dbg(chan2dev(&atchan->chan), "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", @@ -1677,50 +1666,52 @@ static void at_xdmac_tasklet(struct tasklet_struct *t) { struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet); struct at_xdmac_desc *desc; + struct dma_async_tx_descriptor *txd; u32 error_mask; + if (at_xdmac_chan_is_cyclic(atchan)) + return at_xdmac_handle_cyclic(atchan); + + error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS | + AT_XDMAC_CIS_ROIS; + + spin_lock_irq(&atchan->lock); + dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", __func__, atchan->irq_status); - error_mask = AT_XDMAC_CIS_RBEIS - | AT_XDMAC_CIS_WBEIS - | AT_XDMAC_CIS_ROIS; - - if (at_xdmac_chan_is_cyclic(atchan)) { - at_xdmac_handle_cyclic(atchan); - } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) - || (atchan->irq_status & error_mask)) { - struct dma_async_tx_descriptor *txd; - - if (atchan->irq_status & error_mask) - at_xdmac_handle_error(atchan); - - spin_lock_irq(&atchan->lock); - desc = list_first_entry(&atchan->xfers_list, - struct at_xdmac_desc, - xfer_node); - dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); - if (!desc->active_xfer) { - dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); - spin_unlock_irq(&atchan->lock); - return; - } + if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) && + !(atchan->irq_status & error_mask)) + return; - txd = &desc->tx_dma_desc; + if (atchan->irq_status & error_mask) + at_xdmac_handle_error(atchan); - at_xdmac_remove_xfer(atchan, desc); + desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, + xfer_node); + dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); + if (!desc->active_xfer) { + dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); spin_unlock_irq(&atchan->lock); + return; + } - dma_cookie_complete(txd); - if (txd->flags & DMA_PREP_INTERRUPT) - dmaengine_desc_get_callback_invoke(txd, NULL); + txd = &desc->tx_dma_desc; + dma_cookie_complete(txd); + /* Remove the transfer from the transfer list. */ + list_del(&desc->xfer_node); + spin_unlock_irq(&atchan->lock); - dma_run_dependencies(txd); + if (txd->flags & DMA_PREP_INTERRUPT) + dmaengine_desc_get_callback_invoke(txd, NULL); - spin_lock_irq(&atchan->lock); - at_xdmac_advance_work(atchan); - spin_unlock_irq(&atchan->lock); - } + dma_run_dependencies(txd); + + spin_lock_irq(&atchan->lock); + /* Move the xfer descriptors into the free descriptors list. */ + list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list); + at_xdmac_advance_work(atchan); + spin_unlock_irq(&atchan->lock); } static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) @@ -1784,11 +1775,9 @@ static void at_xdmac_issue_pending(struct dma_chan *chan) dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); - if (!at_xdmac_chan_is_cyclic(atchan)) { - spin_lock_irqsave(&atchan->lock, flags); - at_xdmac_advance_work(atchan); - spin_unlock_irqrestore(&atchan->lock, flags); - } + spin_lock_irqsave(&atchan->lock, flags); + at_xdmac_advance_work(atchan); + spin_unlock_irqrestore(&atchan->lock, flags); return; } @@ -1866,8 +1855,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) cpu_relax(); /* Cancel all pending transfers. */ - list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) - at_xdmac_remove_xfer(atchan, desc); + list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) { + list_del(&desc->xfer_node); + list_splice_tail_init(&desc->descs_list, + &atchan->free_descs_list); + } clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); @@ -2031,7 +2023,7 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev) static int at_xdmac_probe(struct platform_device *pdev) { struct at_xdmac *atxdmac; - int irq, size, nr_channels, i, ret; + int irq, nr_channels, i, ret; void __iomem *base; u32 reg; @@ -2056,9 +2048,9 @@ static int at_xdmac_probe(struct platform_device *pdev) return -EINVAL; } - size = sizeof(*atxdmac); - size += nr_channels * sizeof(struct at_xdmac_chan); - atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + atxdmac = devm_kzalloc(&pdev->dev, + struct_size(atxdmac, chan, nr_channels), + GFP_KERNEL); if (!atxdmac) { dev_err(&pdev->dev, "can't allocate at_xdmac structure\n"); return -ENOMEM; diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 96701dedcac8..fc513eb2b289 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -104,10 +104,10 @@ * descriptor base address in the upper 8 bits. */ struct jz4780_dma_hwdesc { - uint32_t dcm; - uint32_t dsa; - uint32_t dta; - uint32_t dtc; + u32 dcm; + u32 dsa; + u32 dta; + u32 dtc; }; /* Size of allocations for hardware descriptor blocks. */ @@ -122,7 +122,8 @@ struct jz4780_dma_desc { dma_addr_t desc_phys; unsigned int count; enum dma_transaction_type type; - uint32_t status; + u32 transfer_type; + u32 status; }; struct jz4780_dma_chan { @@ -130,8 +131,8 @@ struct jz4780_dma_chan { unsigned int id; struct dma_pool *desc_pool; - uint32_t transfer_type; - uint32_t transfer_shift; + u32 transfer_type_tx, transfer_type_rx; + u32 transfer_shift; struct dma_slave_config config; struct jz4780_dma_desc *desc; @@ -152,12 +153,12 @@ struct jz4780_dma_dev { unsigned int irq; const struct jz4780_dma_soc_data *soc_data; - uint32_t chan_reserved; + u32 chan_reserved; struct jz4780_dma_chan chan[]; }; struct jz4780_dma_filter_data { - uint32_t transfer_type; + u32 transfer_type_tx, transfer_type_rx; int channel; }; @@ -179,26 +180,26 @@ static inline struct jz4780_dma_dev *jz4780_dma_chan_parent( dma_device); } -static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma, +static inline u32 jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma, unsigned int chn, unsigned int reg) { return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn)); } static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma, - unsigned int chn, unsigned int reg, uint32_t val) + unsigned int chn, unsigned int reg, u32 val) { writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn)); } -static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma, +static inline u32 jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma, unsigned int reg) { return readl(jzdma->ctrl_base + reg); } static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma, - unsigned int reg, uint32_t val) + unsigned int reg, u32 val) { writel(val, jzdma->ctrl_base + reg); } @@ -226,9 +227,10 @@ static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma, jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn)); } -static struct jz4780_dma_desc *jz4780_dma_desc_alloc( - struct jz4780_dma_chan *jzchan, unsigned int count, - enum dma_transaction_type type) +static struct jz4780_dma_desc * +jz4780_dma_desc_alloc(struct jz4780_dma_chan *jzchan, unsigned int count, + enum dma_transaction_type type, + enum dma_transfer_direction direction) { struct jz4780_dma_desc *desc; @@ -248,6 +250,12 @@ static struct jz4780_dma_desc *jz4780_dma_desc_alloc( desc->count = count; desc->type = type; + + if (direction == DMA_DEV_TO_MEM) + desc->transfer_type = jzchan->transfer_type_rx; + else + desc->transfer_type = jzchan->transfer_type_tx; + return desc; } @@ -260,8 +268,8 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc) kfree(desc); } -static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan, - unsigned long val, uint32_t *shift) +static u32 jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan, + unsigned long val, u32 *shift) { struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); int ord = ffs(val) - 1; @@ -303,7 +311,7 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, enum dma_transfer_direction direction) { struct dma_slave_config *config = &jzchan->config; - uint32_t width, maxburst, tsz; + u32 width, maxburst, tsz; if (direction == DMA_MEM_TO_DEV) { desc->dcm = JZ_DMA_DCM_SAI; @@ -361,7 +369,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg( unsigned int i; int err; - desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE); + desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE, direction); if (!desc) return NULL; @@ -410,7 +418,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic( periods = buf_len / period_len; - desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC); + desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC, direction); if (!desc) return NULL; @@ -453,16 +461,16 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy( { struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); struct jz4780_dma_desc *desc; - uint32_t tsz; + u32 tsz; - desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY); + desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY, 0); if (!desc) return NULL; tsz = jz4780_dma_transfer_size(jzchan, dest | src | len, &jzchan->transfer_shift); - jzchan->transfer_type = JZ_DMA_DRT_AUTO; + desc->transfer_type = JZ_DMA_DRT_AUTO; desc->desc[0].dsa = src; desc->desc[0].dta = dest; @@ -528,7 +536,7 @@ static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan) /* Set transfer type. */ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT, - jzchan->transfer_type); + jzchan->desc->transfer_type); /* * Set the transfer count. This is redundant for a descriptor-driven @@ -670,7 +678,7 @@ static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, { const unsigned int soc_flags = jzdma->soc_data->flags; struct jz4780_dma_desc *desc = jzchan->desc; - uint32_t dcs; + u32 dcs; bool ack = true; spin_lock(&jzchan->vchan.lock); @@ -727,7 +735,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data) struct jz4780_dma_dev *jzdma = data; unsigned int nb_channels = jzdma->soc_data->nb_channels; unsigned long pending; - uint32_t dmac; + u32 dmac; int i; pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP); @@ -788,7 +796,8 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param) return false; } - jzchan->transfer_type = data->transfer_type; + jzchan->transfer_type_tx = data->transfer_type_tx; + jzchan->transfer_type_rx = data->transfer_type_rx; return true; } @@ -800,11 +809,17 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, dma_cap_mask_t mask = jzdma->dma_device.cap_mask; struct jz4780_dma_filter_data data; - if (dma_spec->args_count != 2) + if (dma_spec->args_count == 2) { + data.transfer_type_tx = dma_spec->args[0]; + data.transfer_type_rx = dma_spec->args[0]; + data.channel = dma_spec->args[1]; + } else if (dma_spec->args_count == 3) { + data.transfer_type_tx = dma_spec->args[0]; + data.transfer_type_rx = dma_spec->args[1]; + data.channel = dma_spec->args[2]; + } else { return NULL; - - data.transfer_type = dma_spec->args[0]; - data.channel = dma_spec->args[1]; + } if (data.channel > -1) { if (data.channel >= jzdma->soc_data->nb_channels) { @@ -822,7 +837,8 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, return NULL; } - jzdma->chan[data.channel].transfer_type = data.transfer_type; + jzdma->chan[data.channel].transfer_type_tx = data.transfer_type_tx; + jzdma->chan[data.channel].transfer_type_rx = data.transfer_type_rx; return dma_get_slave_channel( &jzdma->chan[data.channel].vchan.chan); @@ -938,6 +954,14 @@ static int jz4780_dma_probe(struct platform_device *pdev) jzchan->vchan.desc_free = jz4780_dma_desc_free; } + /* + * On JZ4760, chan0 won't enable properly the first time. + * Enabling then disabling chan1 will magically make chan0 work + * correctly. + */ + jz4780_dma_chan_enable(jzdma, 1); + jz4780_dma_chan_disable(jzdma, 1); + ret = platform_get_irq(pdev, 0); if (ret < 0) goto err_disable_clk; @@ -1011,12 +1035,36 @@ static const struct jz4780_dma_soc_data jz4760_dma_soc_data = { .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, }; +static const struct jz4780_dma_soc_data jz4760_mdma_soc_data = { + .nb_channels = 2, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, +}; + +static const struct jz4780_dma_soc_data jz4760_bdma_soc_data = { + .nb_channels = 3, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, +}; + static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = { .nb_channels = 5, .transfer_ord_max = 6, .flags = JZ_SOC_DATA_PER_CHAN_PM, }; +static const struct jz4780_dma_soc_data jz4760b_mdma_soc_data = { + .nb_channels = 2, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM, +}; + +static const struct jz4780_dma_soc_data jz4760b_bdma_soc_data = { + .nb_channels = 3, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM, +}; + static const struct jz4780_dma_soc_data jz4770_dma_soc_data = { .nb_channels = 6, .transfer_ord_max = 6, @@ -1045,7 +1093,11 @@ static const struct of_device_id jz4780_dma_dt_match[] = { { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data }, { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data }, { .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data }, + { .compatible = "ingenic,jz4760-mdma", .data = &jz4760_mdma_soc_data }, + { .compatible = "ingenic,jz4760-bdma", .data = &jz4760_bdma_soc_data }, { .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data }, + { .compatible = "ingenic,jz4760b-mdma", .data = &jz4760b_mdma_soc_data }, + { .compatible = "ingenic,jz4760b-bdma", .data = &jz4760b_bdma_soc_data }, { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data }, { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data }, { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data }, diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index d9f7c097cfd6..2cfa8458b51b 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1159,6 +1159,13 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && !device->device_prep_dma_memcpy_sg) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_MEMCPY_SG"); + return -EIO; + } + if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { dev_err(device->dev, "Device claims capability %s, but op is not defined\n", diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index fab412349f7f..573ad8b86804 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -19,30 +19,6 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd); static void idxd_wq_disable_cleanup(struct idxd_wq *wq); /* Interrupt control bits */ -void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id) -{ - struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector); - - pci_msi_mask_irq(data); -} - -void idxd_mask_msix_vectors(struct idxd_device *idxd) -{ - struct pci_dev *pdev = idxd->pdev; - int msixcnt = pci_msix_vec_count(pdev); - int i; - - for (i = 0; i < msixcnt; i++) - idxd_mask_msix_vector(idxd, i); -} - -void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id) -{ - struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector); - - pci_msi_unmask_irq(data); -} - void idxd_unmask_error_interrupts(struct idxd_device *idxd) { union genctrl_reg genctrl; @@ -382,14 +358,24 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq) lockdep_assert_held(&wq->wq_lock); memset(wq->wqcfg, 0, idxd->wqcfg_size); wq->type = IDXD_WQT_NONE; - wq->size = 0; - wq->group = NULL; wq->threshold = 0; wq->priority = 0; wq->ats_dis = 0; + wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; clear_bit(WQ_FLAG_DEDICATED, &wq->flags); clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); memset(wq->name, 0, WQ_NAME_SIZE); + wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; + wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; +} + +static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq) +{ + lockdep_assert_held(&wq->wq_lock); + + idxd_wq_disable_cleanup(wq); + wq->size = 0; + wq->group = NULL; } static void idxd_wq_ref_release(struct percpu_ref *ref) @@ -404,19 +390,31 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq) int rc; memset(&wq->wq_active, 0, sizeof(wq->wq_active)); - rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL); + rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, + PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); if (rc < 0) return rc; reinit_completion(&wq->wq_dead); + reinit_completion(&wq->wq_resurrect); return 0; } -void idxd_wq_quiesce(struct idxd_wq *wq) +void __idxd_wq_quiesce(struct idxd_wq *wq) { + lockdep_assert_held(&wq->wq_lock); + reinit_completion(&wq->wq_resurrect); percpu_ref_kill(&wq->wq_active); + complete_all(&wq->wq_resurrect); wait_for_completion(&wq->wq_dead); } +void idxd_wq_quiesce(struct idxd_wq *wq) +{ + mutex_lock(&wq->wq_lock); + __idxd_wq_quiesce(wq); + mutex_unlock(&wq->wq_lock); +} + /* Device control bits */ static inline bool idxd_is_enabled(struct idxd_device *idxd) { @@ -572,7 +570,6 @@ void idxd_device_reset(struct idxd_device *idxd) idxd_device_clear_state(idxd); idxd->state = IDXD_DEV_DISABLED; idxd_unmask_error_interrupts(idxd); - idxd_msix_perm_setup(idxd); spin_unlock(&idxd->dev_lock); } @@ -681,9 +678,9 @@ static void idxd_groups_clear_state(struct idxd_device *idxd) memset(&group->grpcfg, 0, sizeof(group->grpcfg)); group->num_engines = 0; group->num_wqs = 0; - group->use_token_limit = false; - group->tokens_allowed = 0; - group->tokens_reserved = 0; + group->use_rdbuf_limit = false; + group->rdbufs_allowed = 0; + group->rdbufs_reserved = 0; group->tc_a = -1; group->tc_b = -1; } @@ -699,6 +696,7 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd) if (wq->state == IDXD_WQ_ENABLED) { idxd_wq_disable_cleanup(wq); + idxd_wq_device_reset_cleanup(wq); wq->state = IDXD_WQ_DISABLED; } } @@ -711,36 +709,6 @@ void idxd_device_clear_state(struct idxd_device *idxd) idxd_device_wqs_clear_state(idxd); } -void idxd_msix_perm_setup(struct idxd_device *idxd) -{ - union msix_perm mperm; - int i, msixcnt; - - msixcnt = pci_msix_vec_count(idxd->pdev); - if (msixcnt < 0) - return; - - mperm.bits = 0; - mperm.pasid = idxd->pasid; - mperm.pasid_en = device_pasid_enabled(idxd); - for (i = 1; i < msixcnt; i++) - iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8); -} - -void idxd_msix_perm_clear(struct idxd_device *idxd) -{ - union msix_perm mperm; - int i, msixcnt; - - msixcnt = pci_msix_vec_count(idxd->pdev); - if (msixcnt < 0) - return; - - mperm.bits = 0; - for (i = 1; i < msixcnt; i++) - iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8); -} - static void idxd_group_config_write(struct idxd_group *group) { struct idxd_device *idxd = group->idxd; @@ -780,10 +748,10 @@ static int idxd_groups_config_write(struct idxd_device *idxd) int i; struct device *dev = &idxd->pdev->dev; - /* Setup bandwidth token limit */ - if (idxd->hw.gen_cap.config_en && idxd->token_limit) { + /* Setup bandwidth rdbuf limit */ + if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) { reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); - reg.token_limit = idxd->token_limit; + reg.rdbuf_limit = idxd->rdbuf_limit; iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); } @@ -827,15 +795,12 @@ static int idxd_wq_config_write(struct idxd_wq *wq) wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset); } + if (wq->size == 0 && wq->type != IDXD_WQT_NONE) + wq->size = WQ_DEFAULT_QUEUE_DEPTH; + /* byte 0-3 */ wq->wqcfg->wq_size = wq->size; - if (wq->size == 0) { - idxd->cmd_status = IDXD_SCMD_WQ_NO_SIZE; - dev_warn(dev, "Incorrect work queue size: 0\n"); - return -EINVAL; - } - /* bytes 4-7 */ wq->wqcfg->wq_thresh = wq->threshold; @@ -924,13 +889,12 @@ static void idxd_group_flags_setup(struct idxd_device *idxd) group->tc_b = group->grpcfg.flags.tc_b = 1; else group->grpcfg.flags.tc_b = group->tc_b; - group->grpcfg.flags.use_token_limit = group->use_token_limit; - group->grpcfg.flags.tokens_reserved = group->tokens_reserved; - if (group->tokens_allowed) - group->grpcfg.flags.tokens_allowed = - group->tokens_allowed; + group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit; + group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved; + if (group->rdbufs_allowed) + group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed; else - group->grpcfg.flags.tokens_allowed = idxd->max_tokens; + group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs; } } @@ -981,8 +945,6 @@ static int idxd_wqs_setup(struct idxd_device *idxd) if (!wq->group) continue; - if (!wq->size) - continue; if (wq_shared(wq) && !device_swq_supported(idxd)) { idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT; @@ -1123,7 +1085,7 @@ int idxd_device_load_config(struct idxd_device *idxd) int i, rc; reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); - idxd->token_limit = reg.token_limit; + idxd->rdbuf_limit = reg.rdbuf_limit; for (i = 0; i < idxd->max_groups; i++) { struct idxd_group *group = idxd->groups[i]; @@ -1142,6 +1104,106 @@ int idxd_device_load_config(struct idxd_device *idxd) return 0; } +static void idxd_flush_pending_descs(struct idxd_irq_entry *ie) +{ + struct idxd_desc *desc, *itr; + struct llist_node *head; + LIST_HEAD(flist); + enum idxd_complete_type ctype; + + spin_lock(&ie->list_lock); + head = llist_del_all(&ie->pending_llist); + if (head) { + llist_for_each_entry_safe(desc, itr, head, llnode) + list_add_tail(&desc->list, &ie->work_list); + } + + list_for_each_entry_safe(desc, itr, &ie->work_list, list) + list_move_tail(&desc->list, &flist); + spin_unlock(&ie->list_lock); + + list_for_each_entry_safe(desc, itr, &flist, list) { + list_del(&desc->list); + ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT; + idxd_dma_complete_txd(desc, ctype, true); + } +} + +static void idxd_device_set_perm_entry(struct idxd_device *idxd, + struct idxd_irq_entry *ie) +{ + union msix_perm mperm; + + if (ie->pasid == INVALID_IOASID) + return; + + mperm.bits = 0; + mperm.pasid = ie->pasid; + mperm.pasid_en = 1; + iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); +} + +static void idxd_device_clear_perm_entry(struct idxd_device *idxd, + struct idxd_irq_entry *ie) +{ + iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); +} + +void idxd_wq_free_irq(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct idxd_irq_entry *ie = &wq->ie; + + synchronize_irq(ie->vector); + free_irq(ie->vector, ie); + idxd_flush_pending_descs(ie); + if (idxd->request_int_handles) + idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); + idxd_device_clear_perm_entry(idxd, ie); + ie->vector = -1; + ie->int_handle = INVALID_INT_HANDLE; + ie->pasid = INVALID_IOASID; +} + +int idxd_wq_request_irq(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + struct idxd_irq_entry *ie; + int rc; + + ie = &wq->ie; + ie->vector = pci_irq_vector(pdev, ie->id); + ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID; + idxd_device_set_perm_entry(idxd, ie); + + rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie); + if (rc < 0) { + dev_err(dev, "Failed to request irq %d.\n", ie->vector); + goto err_irq; + } + + if (idxd->request_int_handles) { + rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle, + IDXD_IRQ_MSIX); + if (rc < 0) + goto err_int_handle; + } else { + ie->int_handle = ie->id; + } + + return 0; + +err_int_handle: + ie->int_handle = INVALID_INT_HANDLE; + free_irq(ie->vector, ie); +err_irq: + idxd_device_clear_perm_entry(idxd, ie); + ie->pasid = INVALID_IOASID; + return rc; +} + int __drv_enable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index c39e9483206a..bfff59617d04 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -21,20 +21,27 @@ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) } void idxd_dma_complete_txd(struct idxd_desc *desc, - enum idxd_complete_type comp_type) + enum idxd_complete_type comp_type, + bool free_desc) { + struct idxd_device *idxd = desc->wq->idxd; struct dma_async_tx_descriptor *tx; struct dmaengine_result res; int complete = 1; - if (desc->completion->status == DSA_COMP_SUCCESS) + if (desc->completion->status == DSA_COMP_SUCCESS) { res.result = DMA_TRANS_NOERROR; - else if (desc->completion->status) + } else if (desc->completion->status) { + if (idxd->request_int_handles && comp_type != IDXD_COMPLETE_ABORT && + desc->completion->status == DSA_COMP_INT_HANDLE_INVAL && + idxd_queue_int_handle_resubmit(desc)) + return; res.result = DMA_TRANS_WRITE_FAILED; - else if (comp_type == IDXD_COMPLETE_ABORT) + } else if (comp_type == IDXD_COMPLETE_ABORT) { res.result = DMA_TRANS_ABORTED; - else + } else { complete = 0; + } tx = &desc->txd; if (complete && tx->cookie) { @@ -44,6 +51,9 @@ void idxd_dma_complete_txd(struct idxd_desc *desc, tx->callback = NULL; tx->callback_result = NULL; } + + if (free_desc) + idxd_free_desc(desc->wq, desc); } static void op_flag_setup(unsigned long flags, u32 *desc_flags) @@ -153,8 +163,10 @@ static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) cookie = dma_cookie_assign(tx); rc = idxd_submit_desc(wq, desc); - if (rc < 0) + if (rc < 0) { + idxd_free_desc(wq, desc); return rc; + } return cookie; } @@ -277,6 +289,14 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); wq->type = IDXD_WQT_KERNEL; + + rc = idxd_wq_request_irq(wq); + if (rc < 0) { + idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; + dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc); + goto err_irq; + } + rc = __drv_enable_wq(wq); if (rc < 0) { dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc); @@ -310,13 +330,15 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) return 0; err_dma: - idxd_wq_quiesce(wq); + __idxd_wq_quiesce(wq); percpu_ref_exit(&wq->wq_active); err_ref: idxd_wq_free_resources(wq); err_res_alloc: __drv_disable_wq(wq); err: + idxd_wq_free_irq(wq); +err_irq: wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); return rc; @@ -327,11 +349,13 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev) struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); mutex_lock(&wq->wq_lock); - idxd_wq_quiesce(wq); + __idxd_wq_quiesce(wq); idxd_unregister_dma_channel(wq); idxd_wq_free_resources(wq); __drv_disable_wq(wq); percpu_ref_exit(&wq->wq_active); + idxd_wq_free_irq(wq); + wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); } diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 0cf8d3145870..da72eb15f610 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -10,6 +10,7 @@ #include <linux/cdev.h> #include <linux/idr.h> #include <linux/pci.h> +#include <linux/ioasid.h> #include <linux/perf_event.h> #include <uapi/linux/idxd.h> #include "registers.h" @@ -51,6 +52,9 @@ enum idxd_type { #define IDXD_NAME_SIZE 128 #define IDXD_PMU_EVENT_MAX 64 +#define IDXD_ENQCMDS_RETRIES 32 +#define IDXD_ENQCMDS_MAX_RETRIES 64 + struct idxd_device_driver { const char *name; enum idxd_dev_type *type; @@ -64,8 +68,8 @@ extern struct idxd_device_driver idxd_drv; extern struct idxd_device_driver idxd_dmaengine_drv; extern struct idxd_device_driver idxd_user_drv; +#define INVALID_INT_HANDLE -1 struct idxd_irq_entry { - struct idxd_device *idxd; int id; int vector; struct llist_head pending_llist; @@ -75,6 +79,8 @@ struct idxd_irq_entry { * and irq thread processing error descriptor. */ spinlock_t list_lock; + int int_handle; + ioasid_t pasid; }; struct idxd_group { @@ -84,9 +90,9 @@ struct idxd_group { int id; int num_engines; int num_wqs; - bool use_token_limit; - u8 tokens_allowed; - u8 tokens_reserved; + bool use_rdbuf_limit; + u8 rdbufs_allowed; + u8 rdbufs_reserved; int tc_a; int tc_b; }; @@ -145,6 +151,10 @@ struct idxd_cdev { #define WQ_NAME_SIZE 1024 #define WQ_TYPE_SIZE 10 +#define WQ_DEFAULT_QUEUE_DEPTH 16 +#define WQ_DEFAULT_MAX_XFER SZ_2M +#define WQ_DEFAULT_MAX_BATCH 32 + enum idxd_op_type { IDXD_OP_BLOCK = 0, IDXD_OP_NONBLOCK = 1, @@ -164,13 +174,16 @@ struct idxd_dma_chan { struct idxd_wq { void __iomem *portal; u32 portal_offset; + unsigned int enqcmds_retries; struct percpu_ref wq_active; struct completion wq_dead; + struct completion wq_resurrect; struct idxd_dev idxd_dev; struct idxd_cdev *idxd_cdev; struct wait_queue_head err_queue; struct idxd_device *idxd; int id; + struct idxd_irq_entry ie; enum idxd_wq_type type; struct idxd_group *group; int client_count; @@ -251,6 +264,7 @@ struct idxd_device { int id; int major; u32 cmd_status; + struct idxd_irq_entry ie; /* misc irq, msix 0 */ struct pci_dev *pdev; void __iomem *reg_base; @@ -266,6 +280,8 @@ struct idxd_device { unsigned int pasid; int num_groups; + int irq_cnt; + bool request_int_handles; u32 msix_perm_offset; u32 wqcfg_offset; @@ -276,24 +292,20 @@ struct idxd_device { u32 max_batch_size; int max_groups; int max_engines; - int max_tokens; + int max_rdbufs; int max_wqs; int max_wq_size; - int token_limit; - int nr_tokens; /* non-reserved tokens */ + int rdbuf_limit; + int nr_rdbufs; /* non-reserved read buffers */ unsigned int wqcfg_size; union sw_err_reg sw_err; wait_queue_head_t cmd_waitq; - int num_wq_irqs; - struct idxd_irq_entry *irq_entries; struct idxd_dma_dev *idxd_dma; struct workqueue_struct *wq; struct work_struct work; - int *int_handles; - struct idxd_pmu *idxd_pmu; }; @@ -380,6 +392,21 @@ static inline void idxd_dev_set_type(struct idxd_dev *idev, int type) idev->type = type; } +static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx) +{ + return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie; +} + +static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie) +{ + return container_of(ie, struct idxd_wq, ie); +} + +static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie) +{ + return container_of(ie, struct idxd_device, ie); +} + extern struct bus_type dsa_bus_type; extern bool support_enqcmd; @@ -518,17 +545,13 @@ void idxd_unregister_devices(struct idxd_device *idxd); int idxd_register_driver(void); void idxd_unregister_driver(void); void idxd_wqs_quiesce(struct idxd_device *idxd); +bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc); /* device interrupt control */ -void idxd_msix_perm_setup(struct idxd_device *idxd); -void idxd_msix_perm_clear(struct idxd_device *idxd); irqreturn_t idxd_misc_thread(int vec, void *data); irqreturn_t idxd_wq_thread(int irq, void *data); void idxd_mask_error_interrupts(struct idxd_device *idxd); void idxd_unmask_error_interrupts(struct idxd_device *idxd); -void idxd_mask_msix_vectors(struct idxd_device *idxd); -void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id); -void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id); /* device control */ int idxd_register_idxd_drv(void); @@ -564,13 +587,17 @@ int idxd_wq_map_portal(struct idxd_wq *wq); void idxd_wq_unmap_portal(struct idxd_wq *wq); int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid); int idxd_wq_disable_pasid(struct idxd_wq *wq); +void __idxd_wq_quiesce(struct idxd_wq *wq); void idxd_wq_quiesce(struct idxd_wq *wq); int idxd_wq_init_percpu_ref(struct idxd_wq *wq); +void idxd_wq_free_irq(struct idxd_wq *wq); +int idxd_wq_request_irq(struct idxd_wq *wq); /* submission */ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); +int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc); /* dmaengine */ int idxd_register_dma_device(struct idxd_device *idxd); @@ -579,7 +606,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq); void idxd_unregister_dma_channel(struct idxd_wq *wq); void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); void idxd_dma_complete_txd(struct idxd_desc *desc, - enum idxd_complete_type comp_type); + enum idxd_complete_type comp_type, bool free_desc); /* cdev */ int idxd_cdev_register(void); @@ -603,10 +630,4 @@ static inline void perfmon_init(void) {} static inline void perfmon_exit(void) {} #endif -static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason) -{ - idxd_dma_complete_txd(desc, reason); - idxd_free_desc(desc->wq, desc); -} - #endif diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 7bf03f371ce1..08a5f4310188 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -72,7 +72,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; struct device *dev = &pdev->dev; - struct idxd_irq_entry *irq_entry; + struct idxd_irq_entry *ie; int i, msixcnt; int rc = 0; @@ -81,6 +81,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) dev_err(dev, "Not MSI-X interrupt capable.\n"); return -ENOSPC; } + idxd->irq_cnt = msixcnt; rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); if (rc != msixcnt) { @@ -89,87 +90,34 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) } dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); - /* - * We implement 1 completion list per MSI-X entry except for - * entry 0, which is for errors and others. - */ - idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry), - GFP_KERNEL, dev_to_node(dev)); - if (!idxd->irq_entries) { - rc = -ENOMEM; - goto err_irq_entries; - } - - for (i = 0; i < msixcnt; i++) { - idxd->irq_entries[i].id = i; - idxd->irq_entries[i].idxd = idxd; - idxd->irq_entries[i].vector = pci_irq_vector(pdev, i); - spin_lock_init(&idxd->irq_entries[i].list_lock); - } - - idxd_msix_perm_setup(idxd); - irq_entry = &idxd->irq_entries[0]; - rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread, - 0, "idxd-misc", irq_entry); + ie = idxd_get_ie(idxd, 0); + ie->vector = pci_irq_vector(pdev, 0); + rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie); if (rc < 0) { dev_err(dev, "Failed to allocate misc interrupt.\n"); goto err_misc_irq; } + dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector); - dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector); - - /* first MSI-X entry is not for wq interrupts */ - idxd->num_wq_irqs = msixcnt - 1; + for (i = 0; i < idxd->max_wqs; i++) { + int msix_idx = i + 1; - for (i = 1; i < msixcnt; i++) { - irq_entry = &idxd->irq_entries[i]; + ie = idxd_get_ie(idxd, msix_idx); + ie->id = msix_idx; + ie->int_handle = INVALID_INT_HANDLE; + ie->pasid = INVALID_IOASID; - init_llist_head(&idxd->irq_entries[i].pending_llist); - INIT_LIST_HEAD(&idxd->irq_entries[i].work_list); - rc = request_threaded_irq(irq_entry->vector, NULL, - idxd_wq_thread, 0, "idxd-portal", irq_entry); - if (rc < 0) { - dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector); - goto err_wq_irqs; - } - - dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector); - if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) { - /* - * The MSIX vector enumeration starts at 1 with vector 0 being the - * misc interrupt that handles non I/O completion events. The - * interrupt handles are for IMS enumeration on guest. The misc - * interrupt vector does not require a handle and therefore we start - * the int_handles at index 0. Since 'i' starts at 1, the first - * int_handles index will be 0. - */ - rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1], - IDXD_IRQ_MSIX); - if (rc < 0) { - free_irq(irq_entry->vector, irq_entry); - goto err_wq_irqs; - } - dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]); - } + spin_lock_init(&ie->list_lock); + init_llist_head(&ie->pending_llist); + INIT_LIST_HEAD(&ie->work_list); } idxd_unmask_error_interrupts(idxd); return 0; - err_wq_irqs: - while (--i >= 0) { - irq_entry = &idxd->irq_entries[i]; - free_irq(irq_entry->vector, irq_entry); - if (i != 0) - idxd_device_release_int_handle(idxd, - idxd->int_handles[i], IDXD_IRQ_MSIX); - } err_misc_irq: - /* Disable error interrupt generation */ idxd_mask_error_interrupts(idxd); - idxd_msix_perm_clear(idxd); - err_irq_entries: pci_free_irq_vectors(pdev); dev_err(dev, "No usable interrupts\n"); return rc; @@ -178,26 +126,16 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) static void idxd_cleanup_interrupts(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; - struct idxd_irq_entry *irq_entry; - int i, msixcnt; + struct idxd_irq_entry *ie; + int msixcnt; msixcnt = pci_msix_vec_count(pdev); if (msixcnt <= 0) return; - irq_entry = &idxd->irq_entries[0]; - free_irq(irq_entry->vector, irq_entry); - - for (i = 1; i < msixcnt; i++) { - - irq_entry = &idxd->irq_entries[i]; - if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) - idxd_device_release_int_handle(idxd, idxd->int_handles[i], - IDXD_IRQ_MSIX); - free_irq(irq_entry->vector, irq_entry); - } - + ie = idxd_get_ie(idxd, 0); idxd_mask_error_interrupts(idxd); + free_irq(ie->vector, ie); pci_free_irq_vectors(pdev); } @@ -237,8 +175,10 @@ static int idxd_setup_wqs(struct idxd_device *idxd) mutex_init(&wq->wq_lock); init_waitqueue_head(&wq->err_queue); init_completion(&wq->wq_dead); - wq->max_xfer_bytes = idxd->max_xfer_bytes; - wq->max_batch_size = idxd->max_batch_size; + init_completion(&wq->wq_resurrect); + wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; + wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; + wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); if (!wq->wqcfg) { put_device(conf_dev); @@ -379,13 +319,6 @@ static int idxd_setup_internals(struct idxd_device *idxd) init_waitqueue_head(&idxd->cmd_waitq); - if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) { - idxd->int_handles = kcalloc_node(idxd->max_wqs, sizeof(int), GFP_KERNEL, - dev_to_node(dev)); - if (!idxd->int_handles) - return -ENOMEM; - } - rc = idxd_setup_wqs(idxd); if (rc < 0) goto err_wqs; @@ -416,7 +349,6 @@ static int idxd_setup_internals(struct idxd_device *idxd) for (i = 0; i < idxd->max_wqs; i++) put_device(wq_confdev(idxd->wqs[i])); err_wqs: - kfree(idxd->int_handles); return rc; } @@ -451,6 +383,10 @@ static void idxd_read_caps(struct idxd_device *idxd) dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); } + /* reading command capabilities */ + if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) + idxd->request_int_handles = true; + idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; @@ -464,9 +400,9 @@ static void idxd_read_caps(struct idxd_device *idxd) dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); idxd->max_groups = idxd->hw.group_cap.num_groups; dev_dbg(dev, "max groups: %u\n", idxd->max_groups); - idxd->max_tokens = idxd->hw.group_cap.total_tokens; - dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); - idxd->nr_tokens = idxd->max_tokens; + idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; + dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); + idxd->nr_rdbufs = idxd->max_rdbufs; /* read engine capabilities */ idxd->hw.engine_cap.bits = @@ -611,8 +547,6 @@ static int idxd_probe(struct idxd_device *idxd) if (rc) goto err_config; - dev_dbg(dev, "IDXD interrupt setup complete.\n"); - idxd->major = idxd_cdev_get_major(idxd); rc = perfmon_pmu_init(idxd); @@ -708,32 +642,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return rc; } -static void idxd_flush_pending_llist(struct idxd_irq_entry *ie) -{ - struct idxd_desc *desc, *itr; - struct llist_node *head; - - head = llist_del_all(&ie->pending_llist); - if (!head) - return; - - llist_for_each_entry_safe(desc, itr, head, llnode) { - idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); - idxd_free_desc(desc->wq, desc); - } -} - -static void idxd_flush_work_list(struct idxd_irq_entry *ie) -{ - struct idxd_desc *desc, *iter; - - list_for_each_entry_safe(desc, iter, &ie->work_list, list) { - list_del(&desc->list); - idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); - idxd_free_desc(desc->wq, desc); - } -} - void idxd_wqs_quiesce(struct idxd_device *idxd) { struct idxd_wq *wq; @@ -746,47 +654,19 @@ void idxd_wqs_quiesce(struct idxd_device *idxd) } } -static void idxd_release_int_handles(struct idxd_device *idxd) -{ - struct device *dev = &idxd->pdev->dev; - int i, rc; - - for (i = 0; i < idxd->num_wq_irqs; i++) { - if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) { - rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i], - IDXD_IRQ_MSIX); - if (rc < 0) - dev_warn(dev, "irq handle %d release failed\n", - idxd->int_handles[i]); - else - dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]); - } - } -} - static void idxd_shutdown(struct pci_dev *pdev) { struct idxd_device *idxd = pci_get_drvdata(pdev); - int rc, i; struct idxd_irq_entry *irq_entry; - int msixcnt = pci_msix_vec_count(pdev); + int rc; rc = idxd_device_disable(idxd); if (rc) dev_err(&pdev->dev, "Disabling device failed\n"); - dev_dbg(&pdev->dev, "%s called\n", __func__); - idxd_mask_msix_vectors(idxd); + irq_entry = &idxd->ie; + synchronize_irq(irq_entry->vector); idxd_mask_error_interrupts(idxd); - - for (i = 0; i < msixcnt; i++) { - irq_entry = &idxd->irq_entries[i]; - synchronize_irq(irq_entry->vector); - if (i == 0) - continue; - idxd_flush_pending_llist(irq_entry); - idxd_flush_work_list(irq_entry); - } flush_workqueue(idxd->wq); } @@ -794,8 +674,6 @@ static void idxd_remove(struct pci_dev *pdev) { struct idxd_device *idxd = pci_get_drvdata(pdev); struct idxd_irq_entry *irq_entry; - int msixcnt = pci_msix_vec_count(pdev); - int i; idxd_unregister_devices(idxd); /* @@ -811,12 +689,8 @@ static void idxd_remove(struct pci_dev *pdev) if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - for (i = 0; i < msixcnt; i++) { - irq_entry = &idxd->irq_entries[i]; - free_irq(irq_entry->vector, irq_entry); - } - idxd_msix_perm_clear(idxd); - idxd_release_int_handles(idxd); + irq_entry = idxd_get_ie(idxd, 0); + free_irq(irq_entry->vector, irq_entry); pci_free_irq_vectors(pdev); pci_iounmap(pdev, idxd->reg_base); iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index cf2c8bc4f147..743ead5ebc57 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -6,6 +6,7 @@ #include <linux/pci.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/dmaengine.h> +#include <linux/delay.h> #include <uapi/linux/idxd.h> #include "../dmaengine.h" #include "idxd.h" @@ -22,6 +23,16 @@ struct idxd_fault { struct idxd_device *idxd; }; +struct idxd_resubmit { + struct work_struct work; + struct idxd_desc *desc; +}; + +struct idxd_int_handle_revoke { + struct work_struct work; + struct idxd_device *idxd; +}; + static void idxd_device_reinit(struct work_struct *work) { struct idxd_device *idxd = container_of(work, struct idxd_device, work); @@ -55,6 +66,162 @@ static void idxd_device_reinit(struct work_struct *work) idxd_device_clear_state(idxd); } +/* + * The function sends a drain descriptor for the interrupt handle. The drain ensures + * all descriptors with this interrupt handle is flushed and the interrupt + * will allow the cleanup of the outstanding descriptors. + */ +static void idxd_int_handle_revoke_drain(struct idxd_irq_entry *ie) +{ + struct idxd_wq *wq = ie_to_wq(ie); + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + struct dsa_hw_desc desc = {}; + void __iomem *portal; + int rc; + + /* Issue a simple drain operation with interrupt but no completion record */ + desc.flags = IDXD_OP_FLAG_RCI; + desc.opcode = DSA_OPCODE_DRAIN; + desc.priv = 1; + + if (ie->pasid != INVALID_IOASID) + desc.pasid = ie->pasid; + desc.int_handle = ie->int_handle; + portal = idxd_wq_portal_addr(wq); + + /* + * The wmb() makes sure that the descriptor is all there before we + * issue. + */ + wmb(); + if (wq_dedicated(wq)) { + iosubmit_cmds512(portal, &desc, 1); + } else { + rc = idxd_enqcmds(wq, portal, &desc); + /* This should not fail unless hardware failed. */ + if (rc < 0) + dev_warn(dev, "Failed to submit drain desc on wq %d\n", wq->id); + } +} + +static void idxd_abort_invalid_int_handle_descs(struct idxd_irq_entry *ie) +{ + LIST_HEAD(flist); + struct idxd_desc *d, *t; + struct llist_node *head; + + spin_lock(&ie->list_lock); + head = llist_del_all(&ie->pending_llist); + if (head) { + llist_for_each_entry_safe(d, t, head, llnode) + list_add_tail(&d->list, &ie->work_list); + } + + list_for_each_entry_safe(d, t, &ie->work_list, list) { + if (d->completion->status == DSA_COMP_INT_HANDLE_INVAL) + list_move_tail(&d->list, &flist); + } + spin_unlock(&ie->list_lock); + + list_for_each_entry_safe(d, t, &flist, list) { + list_del(&d->list); + idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true); + } +} + +static void idxd_int_handle_revoke(struct work_struct *work) +{ + struct idxd_int_handle_revoke *revoke = + container_of(work, struct idxd_int_handle_revoke, work); + struct idxd_device *idxd = revoke->idxd; + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + int i, new_handle, rc; + + if (!idxd->request_int_handles) { + kfree(revoke); + dev_warn(dev, "Unexpected int handle refresh interrupt.\n"); + return; + } + + /* + * The loop attempts to acquire new interrupt handle for all interrupt + * vectors that supports a handle. If a new interrupt handle is acquired and the + * wq is kernel type, the driver will kill the percpu_ref to pause all + * ongoing descriptor submissions. The interrupt handle is then changed. + * After change, the percpu_ref is revived and all the pending submissions + * are woken to try again. A drain is sent to for the interrupt handle + * at the end to make sure all invalid int handle descriptors are processed. + */ + for (i = 1; i < idxd->irq_cnt; i++) { + struct idxd_irq_entry *ie = idxd_get_ie(idxd, i); + struct idxd_wq *wq = ie_to_wq(ie); + + if (ie->int_handle == INVALID_INT_HANDLE) + continue; + + rc = idxd_device_request_int_handle(idxd, i, &new_handle, IDXD_IRQ_MSIX); + if (rc < 0) { + dev_warn(dev, "get int handle %d failed: %d\n", i, rc); + /* + * Failed to acquire new interrupt handle. Kill the WQ + * and release all the pending submitters. The submitters will + * get error return code and handle appropriately. + */ + ie->int_handle = INVALID_INT_HANDLE; + idxd_wq_quiesce(wq); + idxd_abort_invalid_int_handle_descs(ie); + continue; + } + + /* No change in interrupt handle, nothing needs to be done */ + if (ie->int_handle == new_handle) + continue; + + if (wq->state != IDXD_WQ_ENABLED || wq->type != IDXD_WQT_KERNEL) { + /* + * All the MSIX interrupts are allocated at once during probe. + * Therefore we need to update all interrupts even if the WQ + * isn't supporting interrupt operations. + */ + ie->int_handle = new_handle; + continue; + } + + mutex_lock(&wq->wq_lock); + reinit_completion(&wq->wq_resurrect); + + /* Kill percpu_ref to pause additional descriptor submissions */ + percpu_ref_kill(&wq->wq_active); + + /* Wait for all submitters quiesce before we change interrupt handle */ + wait_for_completion(&wq->wq_dead); + + ie->int_handle = new_handle; + + /* Revive percpu ref and wake up all the waiting submitters */ + percpu_ref_reinit(&wq->wq_active); + complete_all(&wq->wq_resurrect); + mutex_unlock(&wq->wq_lock); + + /* + * The delay here is to wait for all possible MOVDIR64B that + * are issued before percpu_ref_kill() has happened to have + * reached the PCIe domain before the drain is issued. The driver + * needs to ensure that the drain descriptor issued does not pass + * all the other issued descriptors that contain the invalid + * interrupt handle in order to ensure that the drain descriptor + * interrupt will allow the cleanup of all the descriptors with + * invalid interrupt handle. + */ + if (wq_dedicated(wq)) + udelay(100); + idxd_int_handle_revoke_drain(ie); + } + kfree(revoke); +} + static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) { struct device *dev = &idxd->pdev->dev; @@ -101,6 +268,23 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) err = true; } + if (cause & IDXD_INTC_INT_HANDLE_REVOKED) { + struct idxd_int_handle_revoke *revoke; + + val |= IDXD_INTC_INT_HANDLE_REVOKED; + + revoke = kzalloc(sizeof(*revoke), GFP_ATOMIC); + if (revoke) { + revoke->idxd = idxd; + INIT_WORK(&revoke->work, idxd_int_handle_revoke); + queue_work(idxd->wq, &revoke->work); + + } else { + dev_err(dev, "Failed to allocate work for int handle revoke\n"); + idxd_wqs_quiesce(idxd); + } + } + if (cause & IDXD_INTC_CMD) { val |= IDXD_INTC_CMD; complete(idxd->cmd_done); @@ -157,7 +341,7 @@ halt: irqreturn_t idxd_misc_thread(int vec, void *data) { struct idxd_irq_entry *irq_entry = data; - struct idxd_device *idxd = irq_entry->idxd; + struct idxd_device *idxd = ie_to_idxd(irq_entry); int rc; u32 cause; @@ -177,6 +361,51 @@ irqreturn_t idxd_misc_thread(int vec, void *data) return IRQ_HANDLED; } +static void idxd_int_handle_resubmit_work(struct work_struct *work) +{ + struct idxd_resubmit *irw = container_of(work, struct idxd_resubmit, work); + struct idxd_desc *desc = irw->desc; + struct idxd_wq *wq = desc->wq; + int rc; + + desc->completion->status = 0; + rc = idxd_submit_desc(wq, desc); + if (rc < 0) { + dev_dbg(&wq->idxd->pdev->dev, "Failed to resubmit desc %d to wq %d.\n", + desc->id, wq->id); + /* + * If the error is not -EAGAIN, it means the submission failed due to wq + * has been killed instead of ENQCMDS failure. Here the driver needs to + * notify the submitter of the failure by reporting abort status. + * + * -EAGAIN comes from ENQCMDS failure. idxd_submit_desc() will handle the + * abort. + */ + if (rc != -EAGAIN) { + desc->completion->status = IDXD_COMP_DESC_ABORT; + idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, false); + } + idxd_free_desc(wq, desc); + } + kfree(irw); +} + +bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc) +{ + struct idxd_wq *wq = desc->wq; + struct idxd_device *idxd = wq->idxd; + struct idxd_resubmit *irw; + + irw = kzalloc(sizeof(*irw), GFP_KERNEL); + if (!irw) + return false; + + irw->desc = desc; + INIT_WORK(&irw->work, idxd_int_handle_resubmit_work); + queue_work(idxd->wq, &irw->work); + return true; +} + static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry) { struct idxd_desc *desc, *t; @@ -195,11 +424,11 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry) * and 0xff, which DSA_COMP_STATUS_MASK can mask out. */ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { - complete_desc(desc, IDXD_COMPLETE_ABORT); + idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true); continue; } - complete_desc(desc, IDXD_COMPLETE_NORMAL); + idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true); } else { spin_lock(&irq_entry->list_lock); list_add_tail(&desc->list, @@ -238,11 +467,11 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry) * and 0xff, which DSA_COMP_STATUS_MASK can mask out. */ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { - complete_desc(desc, IDXD_COMPLETE_ABORT); + idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true); continue; } - complete_desc(desc, IDXD_COMPLETE_NORMAL); + idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true); } } diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index 262c8220adbd..aa642aecdc0b 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -64,9 +64,9 @@ union wq_cap_reg { union group_cap_reg { struct { u64 num_groups:8; - u64 total_tokens:8; - u64 token_en:1; - u64 token_limit:1; + u64 total_rdbufs:8; /* formerly total_tokens */ + u64 rdbuf_ctrl:1; /* formerly token_en */ + u64 rdbuf_limit:1; /* formerly token_limit */ u64 rsvd:46; }; u64 bits; @@ -110,7 +110,7 @@ union offsets_reg { #define IDXD_GENCFG_OFFSET 0x80 union gencfg_reg { struct { - u32 token_limit:8; + u32 rdbuf_limit:8; u32 rsvd:4; u32 user_int_en:1; u32 rsvd2:19; @@ -158,6 +158,7 @@ enum idxd_device_reset_type { #define IDXD_INTC_OCCUPY 0x04 #define IDXD_INTC_PERFMON_OVFL 0x08 #define IDXD_INTC_HALT_STATE 0x10 +#define IDXD_INTC_INT_HANDLE_REVOKED 0x80000000 #define IDXD_CMD_OFFSET 0xa0 union idxd_command_reg { @@ -287,10 +288,10 @@ union group_flags { u32 tc_a:3; u32 tc_b:3; u32 rsvd:1; - u32 use_token_limit:1; - u32 tokens_reserved:8; + u32 use_rdbuf_limit:1; + u32 rdbufs_reserved:8; u32 rsvd2:4; - u32 tokens_allowed:8; + u32 rdbufs_allowed:8; u32 rsvd3:4; }; u32 bits; diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c index 83452fbbb168..e289fd48711a 100644 --- a/drivers/dma/idxd/submit.c +++ b/drivers/dma/idxd/submit.c @@ -21,15 +21,6 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu) if (device_pasid_enabled(idxd)) desc->hw->pasid = idxd->pasid; - /* - * On host, MSIX vecotr 0 is used for misc interrupt. Therefore when we match - * vector 1:1 to the WQ id, we need to add 1 - */ - if (!idxd->int_handles) - desc->hw->int_handle = wq->id + 1; - else - desc->hw->int_handle = idxd->int_handles[wq->id]; - return desc; } @@ -134,35 +125,58 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, spin_unlock(&ie->list_lock); if (found) - complete_desc(found, IDXD_COMPLETE_ABORT); + idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false); /* - * complete_desc() will return desc to allocator and the desc can be - * acquired by a different process and the desc->list can be modified. - * Delete desc from list so the list trasversing does not get corrupted - * by the other process. + * completing the descriptor will return desc to allocator and + * the desc can be acquired by a different process and the + * desc->list can be modified. Delete desc from list so the + * list trasversing does not get corrupted by the other process. */ list_for_each_entry_safe(d, t, &flist, list) { list_del_init(&d->list); - complete_desc(d, IDXD_COMPLETE_NORMAL); + idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true); } } +/* + * ENQCMDS typically fail when the WQ is inactive or busy. On host submission, the driver + * has better control of number of descriptors being submitted to a shared wq by limiting + * the number of driver allocated descriptors to the wq size. However, when the swq is + * exported to a guest kernel, it may be shared with multiple guest kernels. This means + * the likelihood of getting busy returned on the swq when submitting goes significantly up. + * Having a tunable retry mechanism allows the driver to keep trying for a bit before giving + * up. The sysfs knob can be tuned by the system administrator. + */ +int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc) +{ + int rc, retries = 0; + + do { + rc = enqcmds(portal, desc); + if (rc == 0) + break; + cpu_relax(); + } while (retries++ < wq->enqcmds_retries); + + return rc; +} + int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) { struct idxd_device *idxd = wq->idxd; struct idxd_irq_entry *ie = NULL; + u32 desc_flags = desc->hw->flags; void __iomem *portal; int rc; - if (idxd->state != IDXD_DEV_ENABLED) { - idxd_free_desc(wq, desc); + if (idxd->state != IDXD_DEV_ENABLED) return -EIO; - } if (!percpu_ref_tryget_live(&wq->wq_active)) { - idxd_free_desc(wq, desc); - return -ENXIO; + wait_for_completion(&wq->wq_resurrect); + if (!percpu_ref_tryget_live(&wq->wq_active)) + return -ENXIO; } portal = idxd_wq_portal_addr(wq); @@ -178,28 +192,21 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) * Pending the descriptor to the lockless list for the irq_entry * that we designated the descriptor to. */ - if (desc->hw->flags & IDXD_OP_FLAG_RCI) { - ie = &idxd->irq_entries[wq->id + 1]; + if (desc_flags & IDXD_OP_FLAG_RCI) { + ie = &wq->ie; + desc->hw->int_handle = ie->int_handle; llist_add(&desc->llnode, &ie->pending_llist); } if (wq_dedicated(wq)) { iosubmit_cmds512(portal, desc->hw, 1); } else { - /* - * It's not likely that we would receive queue full rejection - * since the descriptor allocation gates at wq size. If we - * receive a -EAGAIN, that means something went wrong such as the - * device is not accepting descriptor at all. - */ - rc = enqcmds(portal, desc->hw); + rc = idxd_enqcmds(wq, portal, desc->hw); if (rc < 0) { percpu_ref_put(&wq->wq_active); /* abort operation frees the descriptor */ if (ie) llist_abort_desc(wq, ie, desc); - else - idxd_free_desc(wq, desc); return rc; } } diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index a9025be940db..7e19ab92b61a 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -99,31 +99,39 @@ struct device_type idxd_engine_device_type = { /* Group attributes */ -static void idxd_set_free_tokens(struct idxd_device *idxd) +static void idxd_set_free_rdbufs(struct idxd_device *idxd) { - int i, tokens; + int i, rdbufs; - for (i = 0, tokens = 0; i < idxd->max_groups; i++) { + for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) { struct idxd_group *g = idxd->groups[i]; - tokens += g->tokens_reserved; + rdbufs += g->rdbufs_reserved; } - idxd->nr_tokens = idxd->max_tokens - tokens; + idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs; +} + +static ssize_t group_read_buffers_reserved_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = confdev_to_group(dev); + + return sysfs_emit(buf, "%u\n", group->rdbufs_reserved); } static ssize_t group_tokens_reserved_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct idxd_group *group = confdev_to_group(dev); - - return sysfs_emit(buf, "%u\n", group->tokens_reserved); + dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n"); + return group_read_buffers_reserved_show(dev, attr, buf); } -static ssize_t group_tokens_reserved_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t group_read_buffers_reserved_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct idxd_group *group = confdev_to_group(dev); struct idxd_device *idxd = group->idxd; @@ -143,33 +151,53 @@ static ssize_t group_tokens_reserved_store(struct device *dev, if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; - if (val > idxd->max_tokens) + if (val > idxd->max_rdbufs) return -EINVAL; - if (val > idxd->nr_tokens + group->tokens_reserved) + if (val > idxd->nr_rdbufs + group->rdbufs_reserved) return -EINVAL; - group->tokens_reserved = val; - idxd_set_free_tokens(idxd); + group->rdbufs_reserved = val; + idxd_set_free_rdbufs(idxd); return count; } +static ssize_t group_tokens_reserved_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n"); + return group_read_buffers_reserved_store(dev, attr, buf, count); +} + static struct device_attribute dev_attr_group_tokens_reserved = __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, group_tokens_reserved_store); +static struct device_attribute dev_attr_group_read_buffers_reserved = + __ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show, + group_read_buffers_reserved_store); + +static ssize_t group_read_buffers_allowed_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = confdev_to_group(dev); + + return sysfs_emit(buf, "%u\n", group->rdbufs_allowed); +} + static ssize_t group_tokens_allowed_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct idxd_group *group = confdev_to_group(dev); - - return sysfs_emit(buf, "%u\n", group->tokens_allowed); + dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n"); + return group_read_buffers_allowed_show(dev, attr, buf); } -static ssize_t group_tokens_allowed_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t group_read_buffers_allowed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct idxd_group *group = confdev_to_group(dev); struct idxd_device *idxd = group->idxd; @@ -190,29 +218,49 @@ static ssize_t group_tokens_allowed_store(struct device *dev, return -EPERM; if (val < 4 * group->num_engines || - val > group->tokens_reserved + idxd->nr_tokens) + val > group->rdbufs_reserved + idxd->nr_rdbufs) return -EINVAL; - group->tokens_allowed = val; + group->rdbufs_allowed = val; return count; } +static ssize_t group_tokens_allowed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n"); + return group_read_buffers_allowed_store(dev, attr, buf, count); +} + static struct device_attribute dev_attr_group_tokens_allowed = __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, group_tokens_allowed_store); +static struct device_attribute dev_attr_group_read_buffers_allowed = + __ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show, + group_read_buffers_allowed_store); + +static ssize_t group_use_read_buffer_limit_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = confdev_to_group(dev); + + return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit); +} + static ssize_t group_use_token_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct idxd_group *group = confdev_to_group(dev); - - return sysfs_emit(buf, "%u\n", group->use_token_limit); + dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n"); + return group_use_read_buffer_limit_show(dev, attr, buf); } -static ssize_t group_use_token_limit_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t group_use_read_buffer_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct idxd_group *group = confdev_to_group(dev); struct idxd_device *idxd = group->idxd; @@ -232,17 +280,29 @@ static ssize_t group_use_token_limit_store(struct device *dev, if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; - if (idxd->token_limit == 0) + if (idxd->rdbuf_limit == 0) return -EPERM; - group->use_token_limit = !!val; + group->use_rdbuf_limit = !!val; return count; } +static ssize_t group_use_token_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n"); + return group_use_read_buffer_limit_store(dev, attr, buf, count); +} + static struct device_attribute dev_attr_group_use_token_limit = __ATTR(use_token_limit, 0644, group_use_token_limit_show, group_use_token_limit_store); +static struct device_attribute dev_attr_group_use_read_buffer_limit = + __ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show, + group_use_read_buffer_limit_store); + static ssize_t group_engines_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -387,8 +447,11 @@ static struct attribute *idxd_group_attributes[] = { &dev_attr_group_work_queues.attr, &dev_attr_group_engines.attr, &dev_attr_group_use_token_limit.attr, + &dev_attr_group_use_read_buffer_limit.attr, &dev_attr_group_tokens_allowed.attr, + &dev_attr_group_read_buffers_allowed.attr, &dev_attr_group_tokens_reserved.attr, + &dev_attr_group_read_buffers_reserved.attr, &dev_attr_group_traffic_class_a.attr, &dev_attr_group_traffic_class_b.attr, NULL, @@ -945,6 +1008,41 @@ static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *at static struct device_attribute dev_attr_wq_occupancy = __ATTR(occupancy, 0444, wq_occupancy_show, NULL); +static ssize_t wq_enqcmds_retries_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = confdev_to_wq(dev); + + if (wq_dedicated(wq)) + return -EOPNOTSUPP; + + return sysfs_emit(buf, "%u\n", wq->enqcmds_retries); +} + +static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_wq *wq = confdev_to_wq(dev); + int rc; + unsigned int retries; + + if (wq_dedicated(wq)) + return -EOPNOTSUPP; + + rc = kstrtouint(buf, 10, &retries); + if (rc < 0) + return rc; + + if (retries > IDXD_ENQCMDS_MAX_RETRIES) + retries = IDXD_ENQCMDS_MAX_RETRIES; + + wq->enqcmds_retries = retries; + return count; +} + +static struct device_attribute dev_attr_wq_enqcmds_retries = + __ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store); + static struct attribute *idxd_wq_attributes[] = { &dev_attr_wq_clients.attr, &dev_attr_wq_state.attr, @@ -961,6 +1059,7 @@ static struct attribute *idxd_wq_attributes[] = { &dev_attr_wq_max_batch_size.attr, &dev_attr_wq_ats_disable.attr, &dev_attr_wq_occupancy.attr, + &dev_attr_wq_enqcmds_retries.attr, NULL, }; @@ -1156,26 +1255,42 @@ static ssize_t errors_show(struct device *dev, } static DEVICE_ATTR_RO(errors); +static ssize_t max_read_buffers_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = confdev_to_idxd(dev); + + return sysfs_emit(buf, "%u\n", idxd->max_rdbufs); +} + static ssize_t max_tokens_show(struct device *dev, struct device_attribute *attr, char *buf) { + dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n"); + return max_read_buffers_show(dev, attr, buf); +} + +static DEVICE_ATTR_RO(max_tokens); /* deprecated */ +static DEVICE_ATTR_RO(max_read_buffers); + +static ssize_t read_buffer_limit_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ struct idxd_device *idxd = confdev_to_idxd(dev); - return sysfs_emit(buf, "%u\n", idxd->max_tokens); + return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit); } -static DEVICE_ATTR_RO(max_tokens); static ssize_t token_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct idxd_device *idxd = confdev_to_idxd(dev); - - return sysfs_emit(buf, "%u\n", idxd->token_limit); + dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n"); + return read_buffer_limit_show(dev, attr, buf); } -static ssize_t token_limit_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t read_buffer_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct idxd_device *idxd = confdev_to_idxd(dev); unsigned long val; @@ -1191,16 +1306,26 @@ static ssize_t token_limit_store(struct device *dev, if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; - if (!idxd->hw.group_cap.token_limit) + if (!idxd->hw.group_cap.rdbuf_limit) return -EPERM; - if (val > idxd->hw.group_cap.total_tokens) + if (val > idxd->hw.group_cap.total_rdbufs) return -EINVAL; - idxd->token_limit = val; + idxd->rdbuf_limit = val; return count; } -static DEVICE_ATTR_RW(token_limit); + +static ssize_t token_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n"); + return read_buffer_limit_store(dev, attr, buf, count); +} + +static DEVICE_ATTR_RW(token_limit); /* deprecated */ +static DEVICE_ATTR_RW(read_buffer_limit); static ssize_t cdev_major_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1246,7 +1371,9 @@ static struct attribute *idxd_device_attributes[] = { &dev_attr_state.attr, &dev_attr_errors.attr, &dev_attr_max_tokens.attr, + &dev_attr_max_read_buffers.attr, &dev_attr_token_limit.attr, + &dev_attr_read_buffer_limit.attr, &dev_attr_cdev_major.attr, &dev_attr_cmd_status.attr, NULL, @@ -1268,8 +1395,6 @@ static void idxd_conf_device_release(struct device *dev) kfree(idxd->groups); kfree(idxd->wqs); kfree(idxd->engines); - kfree(idxd->irq_entries); - kfree(idxd->int_handles); ida_free(&idxd_ida, idxd->id); kfree(idxd); } diff --git a/drivers/dma/ioat/sysfs.c b/drivers/dma/ioat/sysfs.c index aa44bcd6a356..168adf28c5b1 100644 --- a/drivers/dma/ioat/sysfs.c +++ b/drivers/dma/ioat/sysfs.c @@ -158,8 +158,9 @@ static struct attribute *ioat_attrs[] = { &intr_coalesce_attr.attr, NULL, }; +ATTRIBUTE_GROUPS(ioat); struct kobj_type ioat_ktype = { .sysfs_ops = &ioat_sysfs_ops, - .default_attrs = ioat_attrs, + .default_groups = ioat_groups, }; diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index a23563cd118b..5a53d7fcef01 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -727,12 +727,6 @@ static int mmp_pdma_config_write(struct dma_chan *dchan, chan->dir = direction; chan->dev_addr = addr; - /* FIXME: drivers should be ported over to use the filter - * function. Once that's done, the following two lines can - * be removed. - */ - if (cfg->slave_id) - chan->drcmr = cfg->slave_id; return 0; } diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 9b0d463f89bb..9c8b4084ba2f 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -149,7 +149,7 @@ struct mv_xor_v2_descriptor { * @desc_size: HW descriptor size * @npendings: number of pending descriptors (for which tx_submit has * @hw_queue_idx: HW queue index - * @msi_desc: local interrupt descriptor information + * @irq: The Linux interrupt number * been called, but not yet issue_pending) */ struct mv_xor_v2_device { @@ -168,7 +168,7 @@ struct mv_xor_v2_device { int desc_size; unsigned int npendings; unsigned int hw_queue_idx; - struct msi_desc *msi_desc; + unsigned int irq; }; /** @@ -718,7 +718,6 @@ static int mv_xor_v2_probe(struct platform_device *pdev) int i, ret = 0; struct dma_device *dma_dev; struct mv_xor_v2_sw_desc *sw_desc; - struct msi_desc *msi_desc; BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) != MV_XOR_V2_EXT_DESC_SIZE); @@ -770,14 +769,9 @@ static int mv_xor_v2_probe(struct platform_device *pdev) if (ret) goto disable_clk; - msi_desc = first_msi_entry(&pdev->dev); - if (!msi_desc) { - ret = -ENODEV; - goto free_msi_irqs; - } - xor_dev->msi_desc = msi_desc; + xor_dev->irq = msi_get_virq(&pdev->dev, 0); - ret = devm_request_irq(&pdev->dev, msi_desc->irq, + ret = devm_request_irq(&pdev->dev, xor_dev->irq, mv_xor_v2_interrupt_handler, 0, dev_name(&pdev->dev), xor_dev); if (ret) @@ -892,7 +886,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev) xor_dev->desc_size * MV_XOR_V2_DESC_NUM, xor_dev->hw_desq_virt, xor_dev->hw_desq); - devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev); + devm_free_irq(&pdev->dev, xor_dev->irq, xor_dev); platform_msi_domain_free_irqs(&pdev->dev); diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 1da04112fcdb..c359decc07a3 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -835,7 +835,7 @@ static int pch_dma_probe(struct pci_dev *pdev, goto err_disable_pdev; } - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Cannot set proper DMA config\n"); goto err_free_res; diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index e2b5129c5f84..5e46e347e28b 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -3240,7 +3240,6 @@ static int ppc440spe_adma_dma2rxor_prep_src( struct ppc440spe_rxor *cursor, int index, int src_cnt, u32 addr) { - int rval = 0; u32 sign; struct ppc440spe_adma_desc_slot *desc = hdesc; int i; @@ -3348,7 +3347,7 @@ static int ppc440spe_adma_dma2rxor_prep_src( break; } - return rval; + return 0; } /** diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 52d04641e361..6078cc81892e 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -909,13 +909,6 @@ static void pxad_get_config(struct pxad_chan *chan, *dcmd |= PXA_DCMD_BURST16; else if (maxburst == 32) *dcmd |= PXA_DCMD_BURST32; - - /* FIXME: drivers should be ported over to use the filter - * function. Once that's done, the following two lines can - * be removed. - */ - if (chan->cfg.slave_id) - chan->drcmr = chan->cfg.slave_id; } static struct dma_async_tx_descriptor * diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index 1a1b7d8458c9..94f3648f7483 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -2206,10 +2206,8 @@ static int gpi_probe(struct platform_device *pdev) /* set up irq */ ret = platform_get_irq(pdev, i); - if (ret < 0) { - dev_err(gpi_dev->dev, "platform_get_irq failed for %d:%d\n", i, ret); + if (ret < 0) return ret; - } gpii->irq = ret; /* set up channel specific register info */ diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 23d64489d25f..65d054bb11aa 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -666,7 +666,7 @@ static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) struct device *dev = msi_desc_to_dev(desc); struct hidma_dev *dmadev = dev_get_drvdata(dev); - if (!desc->platform.msi_index) { + if (!desc->msi_index) { writel(msg->address_lo, dmadev->dev_evca + 0x118); writel(msg->address_hi, dmadev->dev_evca + 0x11C); writel(msg->data, dmadev->dev_evca + 0x120); @@ -678,11 +678,13 @@ static void hidma_free_msis(struct hidma_dev *dmadev) { #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN struct device *dev = dmadev->ddev.dev; - struct msi_desc *desc; + int i, virq; - /* free allocated MSI interrupts above */ - for_each_msi_entry(desc, dev) - devm_free_irq(dev, desc->irq, &dmadev->lldev); + for (i = 0; i < HIDMA_MSI_INTS; i++) { + virq = msi_get_virq(dev, i); + if (virq) + devm_free_irq(dev, virq, &dmadev->lldev); + } platform_msi_domain_free_irqs(dev); #endif @@ -692,45 +694,37 @@ static int hidma_request_msi(struct hidma_dev *dmadev, struct platform_device *pdev) { #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN - int rc; - struct msi_desc *desc; - struct msi_desc *failed_desc = NULL; + int rc, i, virq; rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS, hidma_write_msi_msg); if (rc) return rc; - for_each_msi_entry(desc, &pdev->dev) { - if (!desc->platform.msi_index) - dmadev->msi_virqbase = desc->irq; - - rc = devm_request_irq(&pdev->dev, desc->irq, + for (i = 0; i < HIDMA_MSI_INTS; i++) { + virq = msi_get_virq(&pdev->dev, i); + rc = devm_request_irq(&pdev->dev, virq, hidma_chirq_handler_msi, 0, "qcom-hidma-msi", &dmadev->lldev); - if (rc) { - failed_desc = desc; + if (rc) break; - } + if (!i) + dmadev->msi_virqbase = virq; } if (rc) { /* free allocated MSI interrupts above */ - for_each_msi_entry(desc, &pdev->dev) { - if (desc == failed_desc) - break; - devm_free_irq(&pdev->dev, desc->irq, - &dmadev->lldev); + for (--i; i >= 0; i--) { + virq = msi_get_virq(&pdev->dev, i); + devm_free_irq(&pdev->dev, virq, &dmadev->lldev); } + dev_warn(&pdev->dev, + "failed to request MSI irq, falling back to wired IRQ\n"); } else { /* Add callback to free MSIs on teardown */ hidma_ll_setup_irq(dmadev->lldev, true); - } - if (rc) - dev_warn(&pdev->dev, - "failed to request MSI irq, falling back to wired IRQ\n"); return rc; #else return -EINVAL; diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c index ee78bed8d60d..facdacf8aede 100644 --- a/drivers/dma/qcom/qcom_adm.c +++ b/drivers/dma/qcom/qcom_adm.c @@ -8,6 +8,7 @@ #include <linux/device.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> +#include <linux/dma/qcom_adm.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -140,6 +141,8 @@ struct adm_chan { struct adm_async_desc *curr_txd; struct dma_slave_config slave; + u32 crci; + u32 mux; struct list_head node; int error; @@ -379,8 +382,8 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan, return ERR_PTR(-EINVAL); } - crci = achan->slave.slave_id & 0xf; - if (!crci || achan->slave.slave_id > 0x1f) { + crci = achan->crci & 0xf; + if (!crci || achan->crci > 0x1f) { dev_err(adev->dev, "invalid crci value\n"); return ERR_PTR(-EINVAL); } @@ -403,9 +406,7 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan, if (!async_desc) return ERR_PTR(-ENOMEM); - if (crci) - async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ? - ADM_CRCI_CTL_MUX_SEL : 0; + async_desc->mux = achan->mux ? ADM_CRCI_CTL_MUX_SEL : 0; async_desc->crci = crci; async_desc->blk_size = blk_size; async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) + @@ -488,10 +489,13 @@ static int adm_terminate_all(struct dma_chan *chan) static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct adm_chan *achan = to_adm_chan(chan); + struct qcom_adm_peripheral_config *config = cfg->peripheral_config; unsigned long flag; spin_lock_irqsave(&achan->vc.lock, flag); memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config)); + if (cfg->peripheral_size == sizeof(config)) + achan->crci = config->crci; spin_unlock_irqrestore(&achan->vc.lock, flag); return 0; @@ -694,6 +698,45 @@ static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan, achan->vc.desc_free = adm_dma_free_desc; } +/** + * adm_dma_xlate + * @dma_spec: pointer to DMA specifier as found in the device tree + * @ofdma: pointer to DMA controller data + * + * This can use either 1-cell or 2-cell formats, the first cell + * identifies the slave device, while the optional second cell + * contains the crci value. + * + * Returns pointer to appropriate dma channel on success or NULL on error. + */ +static struct dma_chan *adm_dma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct dma_device *dev = ofdma->of_dma_data; + struct dma_chan *chan, *candidate = NULL; + struct adm_chan *achan; + + if (!dev || dma_spec->args_count > 2) + return NULL; + + list_for_each_entry(chan, &dev->channels, device_node) + if (chan->chan_id == dma_spec->args[0]) { + candidate = chan; + break; + } + + if (!candidate) + return NULL; + + achan = to_adm_chan(candidate); + if (dma_spec->args_count == 2) + achan->crci = dma_spec->args[1]; + else + achan->crci = 0; + + return dma_get_slave_channel(candidate); +} + static int adm_dma_probe(struct platform_device *pdev) { struct adm_device *adev; @@ -838,8 +881,7 @@ static int adm_dma_probe(struct platform_device *pdev) goto err_disable_clks; } - ret = of_dma_controller_register(pdev->dev.of_node, - of_dma_xlate_by_chan_id, + ret = of_dma_controller_register(pdev->dev.of_node, adm_dma_xlate, &adev->common); if (ret) goto err_unregister_dma; diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 5c7716fd6bc5..481f45c77ce1 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -236,7 +236,7 @@ struct rcar_dmac_of_data { #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8) #define RCAR_DMAOR_AE (1 << 2) #define RCAR_DMAOR_DME (1 << 0) -#define RCAR_DMACHCLR 0x0080 /* Not on R-Car V3U */ +#define RCAR_DMACHCLR 0x0080 /* Not on R-Car Gen4 */ #define RCAR_DMADPSEC 0x00a0 #define RCAR_DMASAR 0x0000 @@ -299,8 +299,8 @@ struct rcar_dmac_of_data { #define RCAR_DMAFIXDAR 0x0014 #define RCAR_DMAFIXDPBASE 0x0060 -/* For R-Car V3U */ -#define RCAR_V3U_DMACHCLR 0x0100 +/* For R-Car Gen4 */ +#define RCAR_GEN4_DMACHCLR 0x0100 /* Hardcode the MEMCPY transfer size to 4 bytes. */ #define RCAR_DMAC_MEMCPY_XFER_SIZE 4 @@ -345,7 +345,7 @@ static void rcar_dmac_chan_clear(struct rcar_dmac *dmac, struct rcar_dmac_chan *chan) { if (dmac->chan_base) - rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1); + rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1); else rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index)); } @@ -357,7 +357,7 @@ static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac) if (dmac->chan_base) { for_each_rcar_dmac_chan(i, dmac, chan) - rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1); + rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1); } else { rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask); } @@ -2009,7 +2009,7 @@ static const struct rcar_dmac_of_data rcar_dmac_data = { .chan_offset_stride = 0x80, }; -static const struct rcar_dmac_of_data rcar_v3u_dmac_data = { +static const struct rcar_dmac_of_data rcar_gen4_dmac_data = { .chan_offset_base = 0x0, .chan_offset_stride = 0x1000, }; @@ -2019,8 +2019,11 @@ static const struct of_device_id rcar_dmac_of_ids[] = { .compatible = "renesas,rcar-dmac", .data = &rcar_dmac_data, }, { + .compatible = "renesas,rcar-gen4-dmac", + .data = &rcar_gen4_dmac_data, + }, { .compatible = "renesas,dmac-r8a779a0", - .data = &rcar_v3u_dmac_data, + .data = &rcar_gen4_dmac_data, }, { /* Sentinel */ } }; diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 7f72b3f4cd1a..158e5e7defae 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -787,14 +787,6 @@ static int shdma_config(struct dma_chan *chan, return -EINVAL; /* - * overriding the slave_id through dma_slave_config is deprecated, - * but possibly some out-of-tree drivers still do it. - */ - if (WARN_ON_ONCE(config->slave_id && - config->slave_id != schan->real_slave_id)) - schan->real_slave_id = config->slave_id; - - /* * We could lock this, but you shouldn't be configuring the * channel, while using it... */ @@ -1042,9 +1034,7 @@ EXPORT_SYMBOL(shdma_cleanup); static int __init shdma_enter(void) { - shdma_slave_used = kcalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG), - sizeof(long), - GFP_KERNEL); + shdma_slave_used = bitmap_zalloc(slave_num, GFP_KERNEL); if (!shdma_slave_used) return -ENOMEM; return 0; @@ -1053,7 +1043,7 @@ module_init(shdma_enter); static void __exit shdma_exit(void) { - kfree(shdma_slave_used); + bitmap_free(shdma_slave_used); } module_exit(shdma_exit); diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 4357d2395e6b..7f158ef5672d 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -795,9 +795,6 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, return dst_datawidth; } - if (slave_cfg->slave_id) - schan->dev_id = slave_cfg->slave_id; - hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET; /* diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index d30a4a28d3bf..6f57ff0e7b37 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -10,6 +10,7 @@ * Inspired by stm32-dma.c and dma-jz4780.c */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> @@ -32,13 +33,6 @@ #include "virt-dma.h" -/* MDMA Generic getter/setter */ -#define STM32_MDMA_SHIFT(n) (ffs(n) - 1) -#define STM32_MDMA_SET(n, mask) (((n) << STM32_MDMA_SHIFT(mask)) & \ - (mask)) -#define STM32_MDMA_GET(n, mask) (((n) & (mask)) >> \ - STM32_MDMA_SHIFT(mask)) - #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */ #define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */ @@ -80,8 +74,7 @@ #define STM32_MDMA_CCR_HEX BIT(13) #define STM32_MDMA_CCR_BEX BIT(12) #define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6) -#define STM32_MDMA_CCR_PL(n) STM32_MDMA_SET(n, \ - STM32_MDMA_CCR_PL_MASK) +#define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n)) #define STM32_MDMA_CCR_TCIE BIT(5) #define STM32_MDMA_CCR_BTIE BIT(4) #define STM32_MDMA_CCR_BRTIE BIT(3) @@ -99,48 +92,33 @@ #define STM32_MDMA_CTCR_BWM BIT(31) #define STM32_MDMA_CTCR_SWRM BIT(30) #define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28) -#define STM32_MDMA_CTCR_TRGM(n) STM32_MDMA_SET((n), \ - STM32_MDMA_CTCR_TRGM_MSK) -#define STM32_MDMA_CTCR_TRGM_GET(n) STM32_MDMA_GET((n), \ - STM32_MDMA_CTCR_TRGM_MSK) +#define STM32_MDMA_CTCR_TRGM(n) FIELD_PREP(STM32_MDMA_CTCR_TRGM_MSK, (n)) +#define STM32_MDMA_CTCR_TRGM_GET(n) FIELD_GET(STM32_MDMA_CTCR_TRGM_MSK, (n)) #define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26) -#define STM32_MDMA_CTCR_PAM(n) STM32_MDMA_SET(n, \ - STM32_MDMA_CTCR_PAM_MASK) +#define STM32_MDMA_CTCR_PAM(n) FIELD_PREP(STM32_MDMA_CTCR_PAM_MASK, (n)) #define STM32_MDMA_CTCR_PKE BIT(25) #define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18) -#define STM32_MDMA_CTCR_TLEN(n) STM32_MDMA_SET((n), \ - STM32_MDMA_CTCR_TLEN_MSK) -#define STM32_MDMA_CTCR_TLEN_GET(n) STM32_MDMA_GET((n), \ - STM32_MDMA_CTCR_TLEN_MSK) +#define STM32_MDMA_CTCR_TLEN(n) FIELD_PREP(STM32_MDMA_CTCR_TLEN_MSK, (n)) +#define STM32_MDMA_CTCR_TLEN_GET(n) FIELD_GET(STM32_MDMA_CTCR_TLEN_MSK, (n)) #define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18) -#define STM32_MDMA_CTCR_LEN2(n) STM32_MDMA_SET((n), \ - STM32_MDMA_CTCR_LEN2_MSK) -#define STM32_MDMA_CTCR_LEN2_GET(n) STM32_MDMA_GET((n), \ - STM32_MDMA_CTCR_LEN2_MSK) +#define STM32_MDMA_CTCR_LEN2(n) FIELD_PREP(STM32_MDMA_CTCR_LEN2_MSK, (n)) +#define STM32_MDMA_CTCR_LEN2_GET(n) FIELD_GET(STM32_MDMA_CTCR_LEN2_MSK, (n)) #define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15) -#define STM32_MDMA_CTCR_DBURST(n) STM32_MDMA_SET(n, \ - STM32_MDMA_CTCR_DBURST_MASK) +#define STM32_MDMA_CTCR_DBURST(n) FIELD_PREP(STM32_MDMA_CTCR_DBURST_MASK, (n)) #define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12) -#define STM32_MDMA_CTCR_SBURST(n) STM32_MDMA_SET(n, \ - STM32_MDMA_CTCR_SBURST_MASK) +#define STM32_MDMA_CTCR_SBURST(n) FIELD_PREP(STM32_MDMA_CTCR_SBURST_MASK, (n)) #define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10) -#define STM32_MDMA_CTCR_DINCOS(n) STM32_MDMA_SET((n), \ - STM32_MDMA_CTCR_DINCOS_MASK) +#define STM32_MDMA_CTCR_DINCOS(n) FIELD_PREP(STM32_MDMA_CTCR_DINCOS_MASK, (n)) #define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8) -#define STM32_MDMA_CTCR_SINCOS(n) STM32_MDMA_SET((n), \ - STM32_MDMA_CTCR_SINCOS_MASK) +#define STM32_MDMA_CTCR_SINCOS(n) FIELD_PREP(STM32_MDMA_CTCR_SINCOS_MASK, (n)) #define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6) -#define STM32_MDMA_CTCR_DSIZE(n) STM32_MDMA_SET(n, \ - STM32_MDMA_CTCR_DSIZE_MASK) +#define STM32_MDMA_CTCR_DSIZE(n) FIELD_PREP(STM32_MDMA_CTCR_DSIZE_MASK, (n)) #define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4) -#define STM32_MDMA_CTCR_SSIZE(n) STM32_MDMA_SET(n, \ - STM32_MDMA_CTCR_SSIZE_MASK) +#define STM32_MDMA_CTCR_SSIZE(n) FIELD_PREP(STM32_MDMA_CTCR_SSIZE_MASK, (n)) #define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2) -#define STM32_MDMA_CTCR_DINC(n) STM32_MDMA_SET((n), \ - STM32_MDMA_CTCR_DINC_MASK) +#define STM32_MDMA_CTCR_DINC(n) FIELD_PREP(STM32_MDMA_CTCR_DINC_MASK, (n)) #define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0) -#define STM32_MDMA_CTCR_SINC(n) STM32_MDMA_SET((n), \ - STM32_MDMA_CTCR_SINC_MASK) +#define STM32_MDMA_CTCR_SINC(n) FIELD_PREP(STM32_MDMA_CTCR_SINC_MASK, (n)) #define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \ | STM32_MDMA_CTCR_DINC_MASK \ | STM32_MDMA_CTCR_SINCOS_MASK \ @@ -151,16 +129,13 @@ /* MDMA Channel x block number of data register */ #define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x)) #define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20) -#define STM32_MDMA_CBNDTR_BRC(n) STM32_MDMA_SET(n, \ - STM32_MDMA_CBNDTR_BRC_MK) -#define STM32_MDMA_CBNDTR_BRC_GET(n) STM32_MDMA_GET((n), \ - STM32_MDMA_CBNDTR_BRC_MK) +#define STM32_MDMA_CBNDTR_BRC(n) FIELD_PREP(STM32_MDMA_CBNDTR_BRC_MK, (n)) +#define STM32_MDMA_CBNDTR_BRC_GET(n) FIELD_GET(STM32_MDMA_CBNDTR_BRC_MK, (n)) #define STM32_MDMA_CBNDTR_BRDUM BIT(19) #define STM32_MDMA_CBNDTR_BRSUM BIT(18) #define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0) -#define STM32_MDMA_CBNDTR_BNDT(n) STM32_MDMA_SET(n, \ - STM32_MDMA_CBNDTR_BNDT_MASK) +#define STM32_MDMA_CBNDTR_BNDT(n) FIELD_PREP(STM32_MDMA_CBNDTR_BNDT_MASK, (n)) /* MDMA Channel x source address register */ #define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x)) @@ -171,11 +146,9 @@ /* MDMA Channel x block repeat address update register */ #define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x)) #define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16) -#define STM32_MDMA_CBRUR_DUV(n) STM32_MDMA_SET(n, \ - STM32_MDMA_CBRUR_DUV_MASK) +#define STM32_MDMA_CBRUR_DUV(n) FIELD_PREP(STM32_MDMA_CBRUR_DUV_MASK, (n)) #define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0) -#define STM32_MDMA_CBRUR_SUV(n) STM32_MDMA_SET(n, \ - STM32_MDMA_CBRUR_SUV_MASK) +#define STM32_MDMA_CBRUR_SUV(n) FIELD_PREP(STM32_MDMA_CBRUR_SUV_MASK, (n)) /* MDMA Channel x link address register */ #define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x)) @@ -184,9 +157,8 @@ #define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x)) #define STM32_MDMA_CTBR_DBUS BIT(17) #define STM32_MDMA_CTBR_SBUS BIT(16) -#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(7, 0) -#define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \ - STM32_MDMA_CTBR_TSEL_MASK) +#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(5, 0) +#define STM32_MDMA_CTBR_TSEL(n) FIELD_PREP(STM32_MDMA_CTBR_TSEL_MASK, (n)) /* MDMA Channel x mask address register */ #define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x)) @@ -1279,7 +1251,7 @@ static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan, u32 curr_hwdesc) { struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); - struct stm32_mdma_hwdesc *hwdesc = desc->node[0].hwdesc; + struct stm32_mdma_hwdesc *hwdesc; u32 cbndtr, residue, modulo, burst_size; int i; diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index b7260749e8ee..eaafcbe4ca94 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -343,12 +343,6 @@ static int tegra_dma_slave_config(struct dma_chan *dc, } memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); - if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID && - sconfig->device_fc) { - if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK) - return -EINVAL; - tdc->slave_id = sconfig->slave_id; - } tdc->config_init = true; return 0; diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile index bd496efadff7..1d4081a049b7 100644 --- a/drivers/dma/ti/Makefile +++ b/drivers/dma/ti/Makefile @@ -8,5 +8,6 @@ obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \ k3-psil-am654.o \ k3-psil-j721e.o \ k3-psil-j7200.o \ - k3-psil-am64.o + k3-psil-am64.o \ + k3-psil-j721s2.o obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 35d81bd857f1..08e47f44d325 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -1681,8 +1681,7 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data) dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val); emr = val; - for (i = find_next_bit(&emr, 32, 0); i < 32; - i = find_next_bit(&emr, 32, i + 1)) { + for_each_set_bit(i, &emr, 32) { int k = (j << 5) + i; /* Clear the corresponding EMR bits */ diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c new file mode 100644 index 000000000000..4c4172a4d271 --- /dev/null +++ b/drivers/dma/ti/k3-psil-j721s2.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_PDMA_MCASP(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pdma_acc32 = 1, \ + .pdma_burst = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +#define PSIL_SA2UL(x, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .notdpkt = tx, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep j721s2_src_ep_map[] = { + /* PDMA_MCASP - McASP0-4 */ + PSIL_PDMA_MCASP(0x4400), + PSIL_PDMA_MCASP(0x4401), + PSIL_PDMA_MCASP(0x4402), + PSIL_PDMA_MCASP(0x4403), + PSIL_PDMA_MCASP(0x4404), + /* PDMA_SPI_G0 - SPI0-3 */ + PSIL_PDMA_XY_PKT(0x4600), + PSIL_PDMA_XY_PKT(0x4601), + PSIL_PDMA_XY_PKT(0x4602), + PSIL_PDMA_XY_PKT(0x4603), + PSIL_PDMA_XY_PKT(0x4604), + PSIL_PDMA_XY_PKT(0x4605), + PSIL_PDMA_XY_PKT(0x4606), + PSIL_PDMA_XY_PKT(0x4607), + PSIL_PDMA_XY_PKT(0x4608), + PSIL_PDMA_XY_PKT(0x4609), + PSIL_PDMA_XY_PKT(0x460a), + PSIL_PDMA_XY_PKT(0x460b), + PSIL_PDMA_XY_PKT(0x460c), + PSIL_PDMA_XY_PKT(0x460d), + PSIL_PDMA_XY_PKT(0x460e), + PSIL_PDMA_XY_PKT(0x460f), + /* PDMA_SPI_G1 - SPI4-7 */ + PSIL_PDMA_XY_PKT(0x4610), + PSIL_PDMA_XY_PKT(0x4611), + PSIL_PDMA_XY_PKT(0x4612), + PSIL_PDMA_XY_PKT(0x4613), + PSIL_PDMA_XY_PKT(0x4614), + PSIL_PDMA_XY_PKT(0x4615), + PSIL_PDMA_XY_PKT(0x4616), + PSIL_PDMA_XY_PKT(0x4617), + PSIL_PDMA_XY_PKT(0x4618), + PSIL_PDMA_XY_PKT(0x4619), + PSIL_PDMA_XY_PKT(0x461a), + PSIL_PDMA_XY_PKT(0x461b), + PSIL_PDMA_XY_PKT(0x461c), + PSIL_PDMA_XY_PKT(0x461d), + PSIL_PDMA_XY_PKT(0x461e), + PSIL_PDMA_XY_PKT(0x461f), + /* PDMA_USART_G0 - UART0-1 */ + PSIL_PDMA_XY_PKT(0x4700), + PSIL_PDMA_XY_PKT(0x4701), + /* PDMA_USART_G1 - UART2-3 */ + PSIL_PDMA_XY_PKT(0x4702), + PSIL_PDMA_XY_PKT(0x4703), + /* PDMA_USART_G2 - UART4-9 */ + PSIL_PDMA_XY_PKT(0x4704), + PSIL_PDMA_XY_PKT(0x4705), + PSIL_PDMA_XY_PKT(0x4706), + PSIL_PDMA_XY_PKT(0x4707), + PSIL_PDMA_XY_PKT(0x4708), + PSIL_PDMA_XY_PKT(0x4709), + /* CPSW0 */ + PSIL_ETHERNET(0x7000), + /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */ + PSIL_PDMA_XY_PKT(0x7100), + PSIL_PDMA_XY_PKT(0x7101), + PSIL_PDMA_XY_PKT(0x7102), + PSIL_PDMA_XY_PKT(0x7103), + /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */ + PSIL_PDMA_XY_PKT(0x7200), + PSIL_PDMA_XY_PKT(0x7201), + PSIL_PDMA_XY_PKT(0x7202), + PSIL_PDMA_XY_PKT(0x7203), + PSIL_PDMA_XY_PKT(0x7204), + PSIL_PDMA_XY_PKT(0x7205), + PSIL_PDMA_XY_PKT(0x7206), + PSIL_PDMA_XY_PKT(0x7207), + /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */ + PSIL_PDMA_XY_PKT(0x7300), + /* MCU_PDMA_ADC - ADC0-1 */ + PSIL_PDMA_XY_TR(0x7400), + PSIL_PDMA_XY_TR(0x7401), + PSIL_PDMA_XY_TR(0x7402), + PSIL_PDMA_XY_TR(0x7403), + /* SA2UL */ + PSIL_SA2UL(0x7500, 0), + PSIL_SA2UL(0x7501, 0), + PSIL_SA2UL(0x7502, 0), + PSIL_SA2UL(0x7503, 0), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep j721s2_dst_ep_map[] = { + /* CPSW0 */ + PSIL_ETHERNET(0xf000), + PSIL_ETHERNET(0xf001), + PSIL_ETHERNET(0xf002), + PSIL_ETHERNET(0xf003), + PSIL_ETHERNET(0xf004), + PSIL_ETHERNET(0xf005), + PSIL_ETHERNET(0xf006), + PSIL_ETHERNET(0xf007), + /* SA2UL */ + PSIL_SA2UL(0xf500, 1), + PSIL_SA2UL(0xf501, 1), +}; + +struct psil_ep_map j721s2_ep_map = { + .name = "j721s2", + .src = j721s2_src_ep_map, + .src_count = ARRAY_SIZE(j721s2_src_ep_map), + .dst = j721s2_dst_ep_map, + .dst_count = ARRAY_SIZE(j721s2_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h index b74e192e3c2d..e51e179cdb56 100644 --- a/drivers/dma/ti/k3-psil-priv.h +++ b/drivers/dma/ti/k3-psil-priv.h @@ -41,5 +41,6 @@ extern struct psil_ep_map am654_ep_map; extern struct psil_ep_map j721e_ep_map; extern struct psil_ep_map j7200_ep_map; extern struct psil_ep_map am64_ep_map; +extern struct psil_ep_map j721s2_ep_map; #endif /* K3_PSIL_PRIV_H_ */ diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c index 13ce7367d870..8867b4bd0c51 100644 --- a/drivers/dma/ti/k3-psil.c +++ b/drivers/dma/ti/k3-psil.c @@ -21,6 +21,7 @@ static const struct soc_device_attribute k3_soc_devices[] = { { .family = "J721E", .data = &j721e_ep_map }, { .family = "J7200", .data = &j7200_ep_map }, { .family = "AM64X", .data = &am64_ep_map }, + { .family = "J721S2", .data = &j721s2_ep_map }, { /* sentinel */ } }; diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c index aada84f40723..d4f1e4e9603a 100644 --- a/drivers/dma/ti/k3-udma-private.c +++ b/drivers/dma/ti/k3-udma-private.c @@ -168,8 +168,7 @@ int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id) { const struct udma_oes_offsets *oes = &ud->soc_data->oes; - return ti_sci_inta_msi_get_virq(ud->dev, udma_tflow_id + - oes->pktdma_tchan_flow); + return msi_get_virq(ud->dev, udma_tflow_id + oes->pktdma_tchan_flow); } EXPORT_SYMBOL(xudma_pktdma_tflow_get_irq); @@ -177,7 +176,6 @@ int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id) { const struct udma_oes_offsets *oes = &ud->soc_data->oes; - return ti_sci_inta_msi_get_virq(ud->dev, udma_rflow_id + - oes->pktdma_rchan_flow); + return msi_get_virq(ud->dev, udma_rflow_id + oes->pktdma_rchan_flow); } EXPORT_SYMBOL(xudma_pktdma_rflow_get_irq); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 6e56d1cef5ee..d2d4cbe63e44 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -2313,8 +2313,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) /* Event from UDMA (TR events) only needed for slave TR mode channels */ if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { - uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev, - irq_udma_idx); + uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); if (uc->irq_num_udma <= 0) { dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", irq_udma_idx); @@ -2486,7 +2485,7 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan) uc->psil_paired = true; } - uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx); + uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); if (uc->irq_num_ring <= 0) { dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", irq_ring_idx); @@ -2503,8 +2502,7 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan) /* Event from BCDMA (TR events) only needed for slave channels */ if (is_slave_direction(uc->config.dir)) { - uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev, - irq_udma_idx); + uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); if (uc->irq_num_udma <= 0) { dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n", irq_udma_idx); @@ -2672,7 +2670,7 @@ static int pktdma_alloc_chan_resources(struct dma_chan *chan) uc->psil_paired = true; - uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx); + uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); if (uc->irq_num_ring <= 0) { dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", irq_ring_idx); @@ -4376,6 +4374,7 @@ static const struct soc_device_attribute k3_soc_devices[] = { { .family = "J721E", .data = &j721e_soc_data }, { .family = "J7200", .data = &j7200_soc_data }, { .family = "AM64X", .data = &am64_soc_data }, + { .family = "J721S2", .data = &j721e_soc_data}, { /* sentinel */ } }; @@ -5336,9 +5335,9 @@ static int udma_probe(struct platform_device *pdev) if (IS_ERR(ud->ringacc)) return PTR_ERR(ud->ringacc); - dev->msi_domain = of_msi_get_domain(dev, dev->of_node, + dev->msi.domain = of_msi_get_domain(dev, dev->of_node, DOMAIN_BUS_TI_SCI_INTA_MSI); - if (!dev->msi_domain) { + if (!dev->msi.domain) { dev_err(dev, "Failed to get MSI domain\n"); return -EPROBE_DEFER; } diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c index d6b8a202474f..290836b7e1be 100644 --- a/drivers/dma/uniphier-xdmac.c +++ b/drivers/dma/uniphier-xdmac.c @@ -131,8 +131,9 @@ uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc) static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc, struct uniphier_xdmac_desc *xd) { - u32 src_mode, src_addr, src_width; - u32 dst_mode, dst_addr, dst_width; + u32 src_mode, src_width; + u32 dst_mode, dst_width; + dma_addr_t src_addr, dst_addr; u32 val, its, tnum; enum dma_slave_buswidth buswidth; diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 4677ce08ed40..cd62bbb50e8b 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -2128,6 +2128,126 @@ error: } /** + * xilinx_cdma_prep_memcpy_sg - prepare descriptors for a memcpy_sg transaction + * @dchan: DMA channel + * @dst_sg: Destination scatter list + * @dst_sg_len: Number of entries in destination scatter list + * @src_sg: Source scatter list + * @src_sg_len: Number of entries in source scatter list + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *xilinx_cdma_prep_memcpy_sg( + struct dma_chan *dchan, struct scatterlist *dst_sg, + unsigned int dst_sg_len, struct scatterlist *src_sg, + unsigned int src_sg_len, unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_cdma_tx_segment *segment, *prev = NULL; + struct xilinx_cdma_desc_hw *hw; + size_t len, dst_avail, src_avail; + dma_addr_t dma_dst, dma_src; + + if (unlikely(dst_sg_len == 0 || src_sg_len == 0)) + return NULL; + + if (unlikely(!dst_sg || !src_sg)) + return NULL; + + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + dst_avail = sg_dma_len(dst_sg); + src_avail = sg_dma_len(src_sg); + /* + * loop until there is either no more source or no more destination + * scatterlist entry + */ + while (true) { + len = min_t(size_t, src_avail, dst_avail); + len = min_t(size_t, len, chan->xdev->max_buffer_len); + if (len == 0) + goto fetch; + + /* Allocate the link descriptor from DMA pool */ + segment = xilinx_cdma_alloc_tx_segment(chan); + if (!segment) + goto error; + + dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - + dst_avail; + dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - + src_avail; + hw = &segment->hw; + hw->control = len; + hw->src_addr = dma_src; + hw->dest_addr = dma_dst; + if (chan->ext_addr) { + hw->src_addr_msb = upper_32_bits(dma_src); + hw->dest_addr_msb = upper_32_bits(dma_dst); + } + + if (prev) { + prev->hw.next_desc = segment->phys; + if (chan->ext_addr) + prev->hw.next_desc_msb = + upper_32_bits(segment->phys); + } + + prev = segment; + dst_avail -= len; + src_avail -= len; + list_add_tail(&segment->node, &desc->segments); + +fetch: + /* Fetch the next dst scatterlist entry */ + if (dst_avail == 0) { + if (dst_sg_len == 0) + break; + dst_sg = sg_next(dst_sg); + if (dst_sg == NULL) + break; + dst_sg_len--; + dst_avail = sg_dma_len(dst_sg); + } + /* Fetch the next src scatterlist entry */ + if (src_avail == 0) { + if (src_sg_len == 0) + break; + src_sg = sg_next(src_sg); + if (src_sg == NULL) + break; + src_sg_len--; + src_avail = sg_dma_len(src_sg); + } + } + + if (list_empty(&desc->segments)) { + dev_err(chan->xdev->dev, + "%s: Zero-size SG transfer requested\n", __func__); + goto error; + } + + /* Link the last hardware descriptor with the first. */ + segment = list_first_entry(&desc->segments, + struct xilinx_cdma_tx_segment, node); + desc->async_tx.phys = segment->phys; + prev->hw.next_desc = segment->phys; + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction * @dchan: DMA channel * @sgl: scatterlist to transfer to/from @@ -2860,7 +2980,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, } /* Request the interrupt */ - chan->irq = irq_of_parse_and_map(node, chan->tdest); + chan->irq = of_irq_get(node, chan->tdest); + if (chan->irq < 0) + return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n"); err = request_irq(chan->irq, xdev->dma_config->irq_handler, IRQF_SHARED, "xilinx-dma-controller", chan); if (err) { @@ -2934,8 +3056,11 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0) dev_warn(xdev->dev, "missing dma-channels property\n"); - for (i = 0; i < nr_channels; i++) - xilinx_dma_chan_probe(xdev, node); + for (i = 0; i < nr_channels; i++) { + ret = xilinx_dma_chan_probe(xdev, node); + if (ret) + return ret; + } return 0; } @@ -3115,7 +3240,9 @@ static int xilinx_dma_probe(struct platform_device *pdev) DMA_RESIDUE_GRANULARITY_SEGMENT; } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); + dma_cap_set(DMA_MEMCPY_SG, xdev->common.cap_mask); xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; + xdev->common.device_prep_dma_memcpy_sg = xilinx_cdma_prep_memcpy_sg; /* Residue calculation is supported by only AXI DMA and CDMA */ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c index ce5c66e6897d..b0f4948b00a5 100644 --- a/drivers/dma/xilinx/xilinx_dpdma.c +++ b/drivers/dma/xilinx/xilinx_dpdma.c @@ -12,6 +12,7 @@ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/delay.h> +#include <linux/dma/xilinx_dpdma.h> #include <linux/dmaengine.h> #include <linux/dmapool.h> #include <linux/interrupt.h> @@ -1273,6 +1274,7 @@ static int xilinx_dpdma_config(struct dma_chan *dchan, struct dma_slave_config *config) { struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dpdma_peripheral_config *pconfig; unsigned long flags; /* @@ -1282,15 +1284,18 @@ static int xilinx_dpdma_config(struct dma_chan *dchan, * fixed both on the DPDMA side and on the DP controller side. */ - spin_lock_irqsave(&chan->lock, flags); - /* - * Abuse the slave_id to indicate that the channel is part of a video - * group. + * Use the peripheral_config to indicate that the channel is part + * of a video group. This requires matching use of the custom + * structure in each driver. */ - if (chan->id <= ZYNQMP_DPDMA_VIDEO2) - chan->video_group = config->slave_id != 0; + pconfig = config->peripheral_config; + if (WARN_ON(pconfig && config->peripheral_size != sizeof(*pconfig))) + return -EINVAL; + spin_lock_irqsave(&chan->lock, flags); + if (chan->id <= ZYNQMP_DPDMA_VIDEO2 && pconfig) + chan->video_group = pconfig->video_group; spin_unlock_irqrestore(&chan->lock, flags); return 0; diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c index 0cb440bdd5cb..f2b65d967384 100644 --- a/drivers/extcon/extcon-usb-gpio.c +++ b/drivers/extcon/extcon-usb-gpio.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * drivers/extcon/extcon-usb-gpio.c - USB GPIO extcon driver * * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index e7a9561a826d..a09e704fd0fa 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -576,19 +576,7 @@ EXPORT_SYMBOL_GPL(extcon_set_state); */ int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, bool state) { - int ret, index; - unsigned long flags; - - index = find_cable_index_by_id(edev, id); - if (index < 0) - return index; - - /* Check whether the external connector's state is changed. */ - spin_lock_irqsave(&edev->lock, flags); - ret = is_extcon_changed(edev, index, state); - spin_unlock_irqrestore(&edev->lock, flags); - if (!ret) - return 0; + int ret; ret = extcon_set_state(edev, id, state); if (ret < 0) diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 87039c5c03fd..eefcc4146749 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -452,7 +452,7 @@ static void scmi_vio_remove(struct virtio_device *vdev) * outstanding message on any vqueue to be ignored by complete_cb: now * we can just stop processing buffers and destroy the vqueues. */ - vdev->config->reset(vdev); + virtio_reset_device(vdev); vdev->config->del_vqs(vdev); /* Ensure scmi_vdev is visible as NULL */ smp_store_mb(scmi_vdev, NULL); diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index 948dd8382686..e48108e694f8 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -12,16 +12,10 @@ #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/delay.h> -#include <linux/device.h> -#include <linux/firmware.h> -#include <linux/interrupt.h> -#include <linux/list.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/regmap.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <linux/workqueue.h> #include <linux/firmware/cirrus/cs_dsp.h> #include <linux/firmware/cirrus/wmfw.h> @@ -622,7 +616,8 @@ static void cs_dsp_halo_show_fw_status(struct cs_dsp *dsp) offs[0], offs[1], offs[2], offs[3]); } -static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg) +static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg, + unsigned int off) { const struct cs_dsp_alg_region *alg_region = &ctl->alg_region; struct cs_dsp *dsp = ctl->dsp; @@ -635,7 +630,7 @@ static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg return -EINVAL; } - *reg = dsp->ops->region_to_reg(mem, ctl->alg_region.base + ctl->offset); + *reg = dsp->ops->region_to_reg(mem, ctl->alg_region.base + ctl->offset + off); return 0; } @@ -659,10 +654,12 @@ int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int unsigned int reg; int i, ret; + lockdep_assert_held(&dsp->pwr_lock); + if (!dsp->running) return -EPERM; - ret = cs_dsp_coeff_base_reg(ctl, ®); + ret = cs_dsp_coeff_base_reg(ctl, ®, 0); if (ret) return ret; @@ -716,14 +713,14 @@ int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int EXPORT_SYMBOL_GPL(cs_dsp_coeff_write_acked_control); static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, - const void *buf, size_t len) + unsigned int off, const void *buf, size_t len) { struct cs_dsp *dsp = ctl->dsp; void *scratch; int ret; unsigned int reg; - ret = cs_dsp_coeff_base_reg(ctl, ®); + ret = cs_dsp_coeff_base_reg(ctl, ®, off); if (ret) return ret; @@ -749,38 +746,49 @@ static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, /** * cs_dsp_coeff_write_ctrl() - Writes the given buffer to the given coefficient control * @ctl: pointer to coefficient control + * @off: word offset at which data should be written * @buf: the buffer to write to the given control - * @len: the length of the buffer + * @len: the length of the buffer in bytes * * Must be called with pwr_lock held. * * Return: Zero for success, a negative number on error. */ -int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, const void *buf, size_t len) +int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, + unsigned int off, const void *buf, size_t len) { int ret = 0; + if (!ctl) + return -ENOENT; + + lockdep_assert_held(&ctl->dsp->pwr_lock); + + if (len + off * sizeof(u32) > ctl->len) + return -EINVAL; + if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) ret = -EPERM; else if (buf != ctl->cache) - memcpy(ctl->cache, buf, len); + memcpy(ctl->cache + off * sizeof(u32), buf, len); ctl->set = 1; if (ctl->enabled && ctl->dsp->running) - ret = cs_dsp_coeff_write_ctrl_raw(ctl, buf, len); + ret = cs_dsp_coeff_write_ctrl_raw(ctl, off, buf, len); return ret; } EXPORT_SYMBOL_GPL(cs_dsp_coeff_write_ctrl); -static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len) +static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, + unsigned int off, void *buf, size_t len) { struct cs_dsp *dsp = ctl->dsp; void *scratch; int ret; unsigned int reg; - ret = cs_dsp_coeff_base_reg(ctl, ®); + ret = cs_dsp_coeff_base_reg(ctl, ®, off); if (ret) return ret; @@ -806,28 +814,38 @@ static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, void *buf, s /** * cs_dsp_coeff_read_ctrl() - Reads the given coefficient control into the given buffer * @ctl: pointer to coefficient control + * @off: word offset at which data should be read * @buf: the buffer to store to the given control - * @len: the length of the buffer + * @len: the length of the buffer in bytes * * Must be called with pwr_lock held. * * Return: Zero for success, a negative number on error. */ -int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len) +int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, + unsigned int off, void *buf, size_t len) { int ret = 0; + if (!ctl) + return -ENOENT; + + lockdep_assert_held(&ctl->dsp->pwr_lock); + + if (len + off * sizeof(u32) > ctl->len) + return -EINVAL; + if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) { if (ctl->enabled && ctl->dsp->running) - return cs_dsp_coeff_read_ctrl_raw(ctl, buf, len); + return cs_dsp_coeff_read_ctrl_raw(ctl, off, buf, len); else return -EPERM; } else { if (!ctl->flags && ctl->enabled && ctl->dsp->running) - ret = cs_dsp_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len); + ret = cs_dsp_coeff_read_ctrl_raw(ctl, 0, ctl->cache, ctl->len); if (buf != ctl->cache) - memcpy(buf, ctl->cache, len); + memcpy(buf, ctl->cache + off * sizeof(u32), len); } return ret; @@ -851,7 +869,7 @@ static int cs_dsp_coeff_init_control_caches(struct cs_dsp *dsp) * created so we don't need to do anything. */ if (!ctl->flags || (ctl->flags & WMFW_CTL_FLAG_READABLE)) { - ret = cs_dsp_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len); + ret = cs_dsp_coeff_read_ctrl_raw(ctl, 0, ctl->cache, ctl->len); if (ret < 0) return ret; } @@ -869,7 +887,7 @@ static int cs_dsp_coeff_sync_controls(struct cs_dsp *dsp) if (!ctl->enabled) continue; if (ctl->set && !(ctl->flags & WMFW_CTL_FLAG_VOLATILE)) { - ret = cs_dsp_coeff_write_ctrl_raw(ctl, ctl->cache, + ret = cs_dsp_coeff_write_ctrl_raw(ctl, 0, ctl->cache, ctl->len); if (ret < 0) return ret; @@ -1159,6 +1177,7 @@ static int cs_dsp_parse_coeff(struct cs_dsp *dsp, return -EINVAL; break; case WMFW_CTL_TYPE_HOSTEVENT: + case WMFW_CTL_TYPE_FWEVENT: ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk, WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | @@ -1459,6 +1478,8 @@ struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, in { struct cs_dsp_coeff_ctl *pos, *rslt = NULL; + lockdep_assert_held(&dsp->pwr_lock); + list_for_each_entry(pos, &dsp->ctl_list, list) { if (!pos->subname) continue; @@ -1554,6 +1575,8 @@ struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp, { struct cs_dsp_alg_region *alg_region; + lockdep_assert_held(&dsp->pwr_lock); + list_for_each_entry(alg_region, &dsp->alg_regions, list) { if (id == alg_region->alg && type == alg_region->type) return alg_region; @@ -1565,7 +1588,7 @@ EXPORT_SYMBOL_GPL(cs_dsp_find_alg_region); static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp, int type, __be32 id, - __be32 base) + __be32 ver, __be32 base) { struct cs_dsp_alg_region *alg_region; @@ -1575,6 +1598,7 @@ static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp, alg_region->type = type; alg_region->alg = be32_to_cpu(id); + alg_region->ver = be32_to_cpu(ver); alg_region->base = be32_to_cpu(base); list_add_tail(&alg_region->list, &dsp->alg_regions); @@ -1624,14 +1648,14 @@ static void cs_dsp_parse_wmfw_v3_id_header(struct cs_dsp *dsp, nalgs); } -static int cs_dsp_create_regions(struct cs_dsp *dsp, __be32 id, int nregions, - const int *type, __be32 *base) +static int cs_dsp_create_regions(struct cs_dsp *dsp, __be32 id, __be32 ver, + int nregions, const int *type, __be32 *base) { struct cs_dsp_alg_region *alg_region; int i; for (i = 0; i < nregions; i++) { - alg_region = cs_dsp_create_region(dsp, type[i], id, base[i]); + alg_region = cs_dsp_create_region(dsp, type[i], id, ver, base[i]); if (IS_ERR(alg_region)) return PTR_ERR(alg_region); } @@ -1666,12 +1690,14 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp) cs_dsp_parse_wmfw_id_header(dsp, &adsp1_id.fw, n_algs); alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM, - adsp1_id.fw.id, adsp1_id.zm); + adsp1_id.fw.id, adsp1_id.fw.ver, + adsp1_id.zm); if (IS_ERR(alg_region)) return PTR_ERR(alg_region); alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM, - adsp1_id.fw.id, adsp1_id.dm); + adsp1_id.fw.id, adsp1_id.fw.ver, + adsp1_id.dm); if (IS_ERR(alg_region)) return PTR_ERR(alg_region); @@ -1694,6 +1720,7 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp) alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM, adsp1_alg[i].alg.id, + adsp1_alg[i].alg.ver, adsp1_alg[i].dm); if (IS_ERR(alg_region)) { ret = PTR_ERR(alg_region); @@ -1715,6 +1742,7 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp) alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM, adsp1_alg[i].alg.id, + adsp1_alg[i].alg.ver, adsp1_alg[i].zm); if (IS_ERR(alg_region)) { ret = PTR_ERR(alg_region); @@ -1767,17 +1795,20 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp) cs_dsp_parse_wmfw_id_header(dsp, &adsp2_id.fw, n_algs); alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM, - adsp2_id.fw.id, adsp2_id.xm); + adsp2_id.fw.id, adsp2_id.fw.ver, + adsp2_id.xm); if (IS_ERR(alg_region)) return PTR_ERR(alg_region); alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM, - adsp2_id.fw.id, adsp2_id.ym); + adsp2_id.fw.id, adsp2_id.fw.ver, + adsp2_id.ym); if (IS_ERR(alg_region)) return PTR_ERR(alg_region); alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM, - adsp2_id.fw.id, adsp2_id.zm); + adsp2_id.fw.id, adsp2_id.fw.ver, + adsp2_id.zm); if (IS_ERR(alg_region)) return PTR_ERR(alg_region); @@ -1802,6 +1833,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp) alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM, adsp2_alg[i].alg.id, + adsp2_alg[i].alg.ver, adsp2_alg[i].xm); if (IS_ERR(alg_region)) { ret = PTR_ERR(alg_region); @@ -1823,6 +1855,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp) alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM, adsp2_alg[i].alg.id, + adsp2_alg[i].alg.ver, adsp2_alg[i].ym); if (IS_ERR(alg_region)) { ret = PTR_ERR(alg_region); @@ -1844,6 +1877,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp) alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM, adsp2_alg[i].alg.id, + adsp2_alg[i].alg.ver, adsp2_alg[i].zm); if (IS_ERR(alg_region)) { ret = PTR_ERR(alg_region); @@ -1869,7 +1903,7 @@ out: return ret; } -static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id, +static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id, __be32 ver, __be32 xm_base, __be32 ym_base) { static const int types[] = { @@ -1878,7 +1912,7 @@ static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id, }; __be32 bases[] = { xm_base, xm_base, ym_base, ym_base }; - return cs_dsp_create_regions(dsp, id, ARRAY_SIZE(types), types, bases); + return cs_dsp_create_regions(dsp, id, ver, ARRAY_SIZE(types), types, bases); } static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp) @@ -1906,7 +1940,7 @@ static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp) cs_dsp_parse_wmfw_v3_id_header(dsp, &halo_id.fw, n_algs); - ret = cs_dsp_halo_create_regions(dsp, halo_id.fw.id, + ret = cs_dsp_halo_create_regions(dsp, halo_id.fw.id, halo_id.fw.ver, halo_id.xm_base, halo_id.ym_base); if (ret) return ret; @@ -1930,6 +1964,7 @@ static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp) be32_to_cpu(halo_alg[i].ym_base)); ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id, + halo_alg[i].alg.ver, halo_alg[i].xm_base, halo_alg[i].ym_base); if (ret) @@ -1951,7 +1986,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware const struct cs_dsp_region *mem; struct cs_dsp_alg_region *alg_region; const char *region_name; - int ret, pos, blocks, type, offset, reg; + int ret, pos, blocks, type, offset, reg, version; + char *text = NULL; struct cs_dsp_buf *buf; if (!firmware) @@ -1973,6 +2009,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware switch (be32_to_cpu(hdr->rev) & 0xff) { case 1: + case 2: break; default: cs_dsp_err(dsp, "%s: Unsupported coefficient file format %d\n", @@ -1995,6 +2032,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware type = le16_to_cpu(blk->type); offset = le16_to_cpu(blk->offset); + version = le32_to_cpu(blk->ver) >> 8; cs_dsp_dbg(dsp, "%s.%d: %x v%d.%d.%d\n", file, blocks, le32_to_cpu(blk->id), @@ -2008,6 +2046,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware region_name = "Unknown"; switch (type) { case (WMFW_NAME_TEXT << 8): + text = kzalloc(le32_to_cpu(blk->len) + 1, GFP_KERNEL); + break; case (WMFW_INFO_TEXT << 8): case (WMFW_METADATA << 8): break; @@ -2052,6 +2092,16 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware alg_region = cs_dsp_find_alg_region(dsp, type, le32_to_cpu(blk->id)); if (alg_region) { + if (version != alg_region->ver) + cs_dsp_warn(dsp, + "Algorithm coefficient version %d.%d.%d but expected %d.%d.%d\n", + (version >> 16) & 0xFF, + (version >> 8) & 0xFF, + version & 0xFF, + (alg_region->ver >> 16) & 0xFF, + (alg_region->ver >> 8) & 0xFF, + alg_region->ver & 0xFF); + reg = alg_region->base; reg = dsp->ops->region_to_reg(mem, reg); reg += offset; @@ -2067,6 +2117,13 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware break; } + if (text) { + memcpy(text, blk->data, le32_to_cpu(blk->len)); + cs_dsp_info(dsp, "%s: %s\n", dsp->fw_name, text); + kfree(text); + text = NULL; + } + if (reg) { if (le32_to_cpu(blk->len) > firmware->size - pos - sizeof(*blk)) { @@ -2117,6 +2174,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware out_fw: regmap_async_complete(regmap); cs_dsp_buf_free(&buf_list); + kfree(text); return ret; } @@ -2600,6 +2658,12 @@ int cs_dsp_run(struct cs_dsp *dsp) goto err; } + if (dsp->client_ops->pre_run) { + ret = dsp->client_ops->pre_run(dsp); + if (ret) + goto err; + } + /* Sync set controls */ ret = cs_dsp_coeff_sync_controls(dsp); if (ret != 0) @@ -2680,10 +2744,16 @@ EXPORT_SYMBOL_GPL(cs_dsp_stop); static int cs_dsp_halo_start_core(struct cs_dsp *dsp) { - return regmap_update_bits(dsp->regmap, - dsp->base + HALO_CCM_CORE_CONTROL, - HALO_CORE_RESET | HALO_CORE_EN, - HALO_CORE_RESET | HALO_CORE_EN); + int ret; + + ret = regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL, + HALO_CORE_RESET | HALO_CORE_EN, + HALO_CORE_RESET | HALO_CORE_EN); + if (ret) + return ret; + + return regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL, + HALO_CORE_RESET, 0); } static void cs_dsp_halo_stop_core(struct cs_dsp *dsp) @@ -2789,6 +2859,8 @@ int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int me unsigned int reg; int ret; + lockdep_assert_held(&dsp->pwr_lock); + if (!mem) return -EINVAL; @@ -2842,6 +2914,8 @@ int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_ad __be32 val = cpu_to_be32(data & 0x00ffffffu); unsigned int reg; + lockdep_assert_held(&dsp->pwr_lock); + if (!mem) return -EINVAL; diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c index e87e7f1b1a33..da93864d7abc 100644 --- a/drivers/firmware/efi/libstub/efi-stub.c +++ b/drivers/firmware/efi/libstub/efi-stub.c @@ -40,6 +40,8 @@ #ifdef CONFIG_ARM64 # define EFI_RT_VIRTUAL_LIMIT DEFAULT_MAP_WINDOW_64 +#elif defined(CONFIG_RISCV) +# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE_MIN #else # define EFI_RT_VIRTUAL_LIMIT TASK_SIZE #endif diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig index 97968aece54f..931544c9f63d 100644 --- a/drivers/firmware/google/Kconfig +++ b/drivers/firmware/google/Kconfig @@ -3,9 +3,9 @@ menuconfig GOOGLE_FIRMWARE bool "Google Firmware Drivers" default n help - These firmware drivers are used by Google's servers. They are - only useful if you are working directly on one of their - proprietary servers. If in doubt, say "N". + These firmware drivers are used by Google servers, + Chromebooks and other devices using coreboot firmware. + If in doubt, say "N". if GOOGLE_FIRMWARE diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index c62f05420d32..a69399a6b7c0 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -388,9 +388,8 @@ static void fw_cfg_sysfs_cache_cleanup(void) struct fw_cfg_sysfs_entry *entry, *next; list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) { - /* will end up invoking fw_cfg_sysfs_cache_delist() - * via each object's release() method (i.e. destructor) - */ + fw_cfg_sysfs_cache_delist(entry); + kobject_del(&entry->kobj); kobject_put(&entry->kobj); } } @@ -449,7 +448,6 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj) { struct fw_cfg_sysfs_entry *entry = to_entry(kobj); - fw_cfg_sysfs_cache_delist(entry); kfree(entry); } @@ -602,20 +600,18 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) /* set file entry information */ entry->size = be32_to_cpu(f->size); entry->select = be16_to_cpu(f->select); - memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); + strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, fw_cfg_sel_ko, "%d", entry->select); - if (err) { - kobject_put(&entry->kobj); - return err; - } + if (err) + goto err_put_entry; /* add raw binary content access */ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); if (err) - goto err_add_raw; + goto err_del_entry; /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */ fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name); @@ -624,9 +620,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) fw_cfg_sysfs_cache_enlist(entry); return 0; -err_add_raw: +err_del_entry: kobject_del(&entry->kobj); - kfree(entry); +err_put_entry: + kobject_put(&entry->kobj); return err; } diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 0dd117860b63..450c5f6a1cbf 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -23,6 +23,7 @@ #include <linux/hashtable.h> #include <linux/firmware/xlnx-zynqmp.h> +#include <linux/firmware/xlnx-event-manager.h> #include "zynqmp-debug.h" /* Max HashMap Order for PM API feature check (1<<7 = 128) */ @@ -38,6 +39,8 @@ static bool feature_check_enabled; static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER); +static struct platform_device *em_dev; + /** * struct pm_api_feature_data - PM API Feature data * @pm_api_id: PM API Id, used as key to index into hashmap @@ -160,7 +163,7 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2, * * Return: Returns status, either success or error+reason */ -static int zynqmp_pm_feature(u32 api_id) +int zynqmp_pm_feature(const u32 api_id) { int ret; u32 ret_payload[PAYLOAD_ARG_CNT]; @@ -197,6 +200,7 @@ static int zynqmp_pm_feature(u32 api_id) return ret; } +EXPORT_SYMBOL_GPL(zynqmp_pm_feature); /** * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer @@ -1117,6 +1121,29 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out) EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine); /** + * zynqmp_pm_register_notifier() - PM API for register a subsystem + * to be notified about specific + * event/error. + * @node: Node ID to which the event is related. + * @event: Event Mask of Error events for which wants to get notified. + * @wake: Wake subsystem upon capturing the event if value 1 + * @enable: Enable the registration for value 1, disable for value 0 + * + * This function is used to register/un-register for particular node-event + * combination in firmware. + * + * Return: Returns status, either success or error+reason + */ + +int zynqmp_pm_register_notifier(const u32 node, const u32 event, + const u32 wake, const u32 enable) +{ + return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, node, event, + wake, enable, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_register_notifier); + +/** * zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart * @type: Shutdown or restart? 0 for shutdown, 1 for restart * @subtype: Specifies which system should be restarted or shut down @@ -1471,6 +1498,15 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) zynqmp_pm_api_debugfs_init(); + np = of_find_compatible_node(NULL, NULL, "xlnx,versal"); + if (np) { + em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager", + -1, NULL, 0); + if (IS_ERR(em_dev)) + dev_err_probe(&pdev->dev, PTR_ERR(em_dev), "EM register fail with error\n"); + } + of_node_put(np); + return of_platform_populate(dev->of_node, NULL, NULL, dev); } @@ -1488,6 +1524,8 @@ static int zynqmp_firmware_remove(struct platform_device *pdev) kfree(feature_data); } + platform_device_unregister(em_dev); + return 0; } diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index ccf4546eff29..4ffb9da537d8 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -652,19 +652,15 @@ static int altera_cvp_probe(struct pci_dev *pdev, snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s", ALTERA_CVP_MGR_NAME, pci_name(pdev)); - mgr = devm_fpga_mgr_create(&pdev->dev, conf->mgr_name, - &altera_cvp_ops, conf); - if (!mgr) { - ret = -ENOMEM; + mgr = fpga_mgr_register(&pdev->dev, conf->mgr_name, + &altera_cvp_ops, conf); + if (IS_ERR(mgr)) { + ret = PTR_ERR(mgr); goto err_unmap; } pci_set_drvdata(pdev, mgr); - ret = fpga_mgr_register(mgr); - if (ret) - goto err_unmap; - return 0; err_unmap: diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c index a78e49c63c64..ff3a646fd9e3 100644 --- a/drivers/fpga/altera-fpga2sdram.c +++ b/drivers/fpga/altera-fpga2sdram.c @@ -121,17 +121,13 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) /* Get f2s bridge configuration saved in handoff register */ regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask); - br = devm_fpga_bridge_create(dev, F2S_BRIDGE_NAME, - &altera_fpga2sdram_br_ops, priv); - if (!br) - return -ENOMEM; + br = fpga_bridge_register(dev, F2S_BRIDGE_NAME, + &altera_fpga2sdram_br_ops, priv); + if (IS_ERR(br)) + return PTR_ERR(br); platform_set_drvdata(pdev, br); - ret = fpga_bridge_register(br); - if (ret) - return ret; - dev_info(dev, "driver initialized with handoff %08x\n", priv->mask); if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) { diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c index 7d22a44d652e..445f4b011167 100644 --- a/drivers/fpga/altera-freeze-bridge.c +++ b/drivers/fpga/altera-freeze-bridge.c @@ -246,14 +246,14 @@ static int altera_freeze_br_probe(struct platform_device *pdev) priv->base_addr = base_addr; - br = devm_fpga_bridge_create(dev, FREEZE_BRIDGE_NAME, - &altera_freeze_br_br_ops, priv); - if (!br) - return -ENOMEM; + br = fpga_bridge_register(dev, FREEZE_BRIDGE_NAME, + &altera_freeze_br_br_ops, priv); + if (IS_ERR(br)) + return PTR_ERR(br); platform_set_drvdata(pdev, br); - return fpga_bridge_register(br); + return 0; } static int altera_freeze_br_remove(struct platform_device *pdev) diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c index 77b95f251821..aa758426c22b 100644 --- a/drivers/fpga/altera-hps2fpga.c +++ b/drivers/fpga/altera-hps2fpga.c @@ -180,19 +180,15 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) } } - br = devm_fpga_bridge_create(dev, priv->name, - &altera_hps2fpga_br_ops, priv); - if (!br) { - ret = -ENOMEM; + br = fpga_bridge_register(dev, priv->name, + &altera_hps2fpga_br_ops, priv); + if (IS_ERR(br)) { + ret = PTR_ERR(br); goto err; } platform_set_drvdata(pdev, br); - ret = fpga_bridge_register(br); - if (ret) - goto err; - return 0; err: diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c index dfdf21ed34c4..be0667968d33 100644 --- a/drivers/fpga/altera-pr-ip-core.c +++ b/drivers/fpga/altera-pr-ip-core.c @@ -191,11 +191,8 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base) (val & ALT_PR_CSR_STATUS_MSK) >> ALT_PR_CSR_STATUS_SFT, (int)(val & ALT_PR_CSR_PR_START)); - mgr = devm_fpga_mgr_create(dev, dev_name(dev), &alt_pr_ops, priv); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(dev, mgr); + mgr = devm_fpga_mgr_register(dev, dev_name(dev), &alt_pr_ops, priv); + return PTR_ERR_OR_ZERO(mgr); } EXPORT_SYMBOL_GPL(alt_pr_register); diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index 23bfd4d1ad0f..5e1e009dba89 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -302,12 +302,9 @@ static int altera_ps_probe(struct spi_device *spi) snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s", dev_driver_string(&spi->dev), dev_name(&spi->dev)); - mgr = devm_fpga_mgr_create(&spi->dev, conf->mgr_name, - &altera_ps_ops, conf); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(&spi->dev, mgr); + mgr = devm_fpga_mgr_register(&spi->dev, conf->mgr_name, + &altera_ps_ops, conf); + return PTR_ERR_OR_ZERO(mgr); } static const struct spi_device_id altera_ps_spi_ids[] = { diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c index 3ff9f3a687ce..808d1f4d76df 100644 --- a/drivers/fpga/dfl-fme-br.c +++ b/drivers/fpga/dfl-fme-br.c @@ -68,14 +68,14 @@ static int fme_br_probe(struct platform_device *pdev) priv->pdata = dev_get_platdata(dev); - br = devm_fpga_bridge_create(dev, "DFL FPGA FME Bridge", - &fme_bridge_ops, priv); - if (!br) - return -ENOMEM; + br = fpga_bridge_register(dev, "DFL FPGA FME Bridge", + &fme_bridge_ops, priv); + if (IS_ERR(br)) + return PTR_ERR(br); platform_set_drvdata(pdev, br); - return fpga_bridge_register(br); + return 0; } static int fme_br_remove(struct platform_device *pdev) diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c index 313420405d5e..af0785783b52 100644 --- a/drivers/fpga/dfl-fme-mgr.c +++ b/drivers/fpga/dfl-fme-mgr.c @@ -276,7 +276,7 @@ static void fme_mgr_get_compat_id(void __iomem *fme_pr, static int fme_mgr_probe(struct platform_device *pdev) { struct dfl_fme_mgr_pdata *pdata = dev_get_platdata(&pdev->dev); - struct fpga_compat_id *compat_id; + struct fpga_manager_info info = { 0 }; struct device *dev = &pdev->dev; struct fme_mgr_priv *priv; struct fpga_manager *mgr; @@ -296,20 +296,16 @@ static int fme_mgr_probe(struct platform_device *pdev) return PTR_ERR(priv->ioaddr); } - compat_id = devm_kzalloc(dev, sizeof(*compat_id), GFP_KERNEL); - if (!compat_id) + info.name = "DFL FME FPGA Manager"; + info.mops = &fme_mgr_ops; + info.priv = priv; + info.compat_id = devm_kzalloc(dev, sizeof(*info.compat_id), GFP_KERNEL); + if (!info.compat_id) return -ENOMEM; - fme_mgr_get_compat_id(priv->ioaddr, compat_id); - - mgr = devm_fpga_mgr_create(dev, "DFL FME FPGA Manager", - &fme_mgr_ops, priv); - if (!mgr) - return -ENOMEM; - - mgr->compat_id = compat_id; - - return devm_fpga_mgr_register(dev, mgr); + fme_mgr_get_compat_id(priv->ioaddr, info.compat_id); + mgr = devm_fpga_mgr_register_full(dev, &info); + return PTR_ERR_OR_ZERO(mgr); } static struct platform_driver fme_mgr_driver = { diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c index 1eeb42af1012..4aebde0a7f1c 100644 --- a/drivers/fpga/dfl-fme-region.c +++ b/drivers/fpga/dfl-fme-region.c @@ -30,6 +30,7 @@ static int fme_region_get_bridges(struct fpga_region *region) static int fme_region_probe(struct platform_device *pdev) { struct dfl_fme_region_pdata *pdata = dev_get_platdata(&pdev->dev); + struct fpga_region_info info = { 0 }; struct device *dev = &pdev->dev; struct fpga_region *region; struct fpga_manager *mgr; @@ -39,20 +40,18 @@ static int fme_region_probe(struct platform_device *pdev) if (IS_ERR(mgr)) return -EPROBE_DEFER; - region = devm_fpga_region_create(dev, mgr, fme_region_get_bridges); - if (!region) { - ret = -ENOMEM; + info.mgr = mgr; + info.compat_id = mgr->compat_id; + info.get_bridges = fme_region_get_bridges; + info.priv = pdata; + region = fpga_region_register_full(dev, &info); + if (IS_ERR(region)) { + ret = PTR_ERR(region); goto eprobe_mgr_put; } - region->priv = pdata; - region->compat_id = mgr->compat_id; platform_set_drvdata(pdev, region); - ret = fpga_region_register(region); - if (ret) - goto eprobe_mgr_put; - dev_dbg(dev, "DFL FME FPGA Region probed\n"); return 0; diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index f86666cf2c6a..599bb21d86af 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -1407,19 +1407,15 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info) if (!cdev) return ERR_PTR(-ENOMEM); - cdev->region = devm_fpga_region_create(info->dev, NULL, NULL); - if (!cdev->region) { - ret = -ENOMEM; - goto free_cdev_exit; - } - cdev->parent = info->dev; mutex_init(&cdev->lock); INIT_LIST_HEAD(&cdev->port_dev_list); - ret = fpga_region_register(cdev->region); - if (ret) + cdev->region = fpga_region_register(info->dev, NULL, NULL); + if (IS_ERR(cdev->region)) { + ret = PTR_ERR(cdev->region); goto free_cdev_exit; + } /* create and init build info for enumeration */ binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL); diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 798f55670646..16f2b164a178 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -312,36 +312,41 @@ static struct attribute *fpga_bridge_attrs[] = { ATTRIBUTE_GROUPS(fpga_bridge); /** - * fpga_bridge_create - create and initialize a struct fpga_bridge + * fpga_bridge_register - create and register an FPGA Bridge device * @parent: FPGA bridge device from pdev * @name: FPGA bridge name * @br_ops: pointer to structure of fpga bridge ops * @priv: FPGA bridge private data * - * The caller of this function is responsible for freeing the bridge with - * fpga_bridge_free(). Using devm_fpga_bridge_create() instead is recommended. - * - * Return: struct fpga_bridge or NULL + * Return: struct fpga_bridge pointer or ERR_PTR() */ -struct fpga_bridge *fpga_bridge_create(struct device *parent, const char *name, - const struct fpga_bridge_ops *br_ops, - void *priv) +struct fpga_bridge * +fpga_bridge_register(struct device *parent, const char *name, + const struct fpga_bridge_ops *br_ops, + void *priv) { struct fpga_bridge *bridge; int id, ret; + if (!br_ops) { + dev_err(parent, "Attempt to register without fpga_bridge_ops\n"); + return ERR_PTR(-EINVAL); + } + if (!name || !strlen(name)) { dev_err(parent, "Attempt to register with no name!\n"); - return NULL; + return ERR_PTR(-EINVAL); } bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); if (!bridge) - return NULL; + return ERR_PTR(-ENOMEM); id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL); - if (id < 0) + if (id < 0) { + ret = id; goto error_kfree; + } mutex_init(&bridge->mutex); INIT_LIST_HEAD(&bridge->node); @@ -350,17 +355,23 @@ struct fpga_bridge *fpga_bridge_create(struct device *parent, const char *name, bridge->br_ops = br_ops; bridge->priv = priv; - device_initialize(&bridge->dev); bridge->dev.groups = br_ops->groups; bridge->dev.class = fpga_bridge_class; bridge->dev.parent = parent; bridge->dev.of_node = parent->of_node; bridge->dev.id = id; + of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev); ret = dev_set_name(&bridge->dev, "br%d", id); if (ret) goto error_device; + ret = device_register(&bridge->dev); + if (ret) { + put_device(&bridge->dev); + return ERR_PTR(ret); + } + return bridge; error_device: @@ -368,88 +379,7 @@ error_device: error_kfree: kfree(bridge); - return NULL; -} -EXPORT_SYMBOL_GPL(fpga_bridge_create); - -/** - * fpga_bridge_free - free an fpga bridge created by fpga_bridge_create() - * @bridge: FPGA bridge struct - */ -void fpga_bridge_free(struct fpga_bridge *bridge) -{ - ida_simple_remove(&fpga_bridge_ida, bridge->dev.id); - kfree(bridge); -} -EXPORT_SYMBOL_GPL(fpga_bridge_free); - -static void devm_fpga_bridge_release(struct device *dev, void *res) -{ - struct fpga_bridge *bridge = *(struct fpga_bridge **)res; - - fpga_bridge_free(bridge); -} - -/** - * devm_fpga_bridge_create - create and init a managed struct fpga_bridge - * @parent: FPGA bridge device from pdev - * @name: FPGA bridge name - * @br_ops: pointer to structure of fpga bridge ops - * @priv: FPGA bridge private data - * - * This function is intended for use in an FPGA bridge driver's probe function. - * After the bridge driver creates the struct with devm_fpga_bridge_create(), it - * should register the bridge with fpga_bridge_register(). The bridge driver's - * remove function should call fpga_bridge_unregister(). The bridge struct - * allocated with this function will be freed automatically on driver detach. - * This includes the case of a probe function returning error before calling - * fpga_bridge_register(), the struct will still get cleaned up. - * - * Return: struct fpga_bridge or NULL - */ -struct fpga_bridge -*devm_fpga_bridge_create(struct device *parent, const char *name, - const struct fpga_bridge_ops *br_ops, void *priv) -{ - struct fpga_bridge **ptr, *bridge; - - ptr = devres_alloc(devm_fpga_bridge_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return NULL; - - bridge = fpga_bridge_create(parent, name, br_ops, priv); - if (!bridge) { - devres_free(ptr); - } else { - *ptr = bridge; - devres_add(parent, ptr); - } - - return bridge; -} -EXPORT_SYMBOL_GPL(devm_fpga_bridge_create); - -/** - * fpga_bridge_register - register an FPGA bridge - * - * @bridge: FPGA bridge struct - * - * Return: 0 for success, error code otherwise. - */ -int fpga_bridge_register(struct fpga_bridge *bridge) -{ - struct device *dev = &bridge->dev; - int ret; - - ret = device_add(dev); - if (ret) - return ret; - - of_platform_populate(dev->of_node, NULL, NULL, dev); - - dev_info(dev->parent, "fpga bridge [%s] registered\n", bridge->name); - - return 0; + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(fpga_bridge_register); @@ -475,6 +405,10 @@ EXPORT_SYMBOL_GPL(fpga_bridge_unregister); static void fpga_bridge_dev_release(struct device *dev) { + struct fpga_bridge *bridge = to_fpga_bridge(dev); + + ida_simple_remove(&fpga_bridge_ida, bridge->dev.id); + kfree(bridge); } static int __init fpga_bridge_dev_init(void) diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index aa30889e2320..d49a9ce34568 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -592,49 +592,49 @@ void fpga_mgr_unlock(struct fpga_manager *mgr) EXPORT_SYMBOL_GPL(fpga_mgr_unlock); /** - * fpga_mgr_create - create and initialize an FPGA manager struct + * fpga_mgr_register_full - create and register an FPGA Manager device * @parent: fpga manager device from pdev - * @name: fpga manager name - * @mops: pointer to structure of fpga manager ops - * @priv: fpga manager private data + * @info: parameters for fpga manager * - * The caller of this function is responsible for freeing the struct with - * fpga_mgr_free(). Using devm_fpga_mgr_create() instead is recommended. + * The caller of this function is responsible for calling fpga_mgr_unregister(). + * Using devm_fpga_mgr_register_full() instead is recommended. * - * Return: pointer to struct fpga_manager or NULL + * Return: pointer to struct fpga_manager pointer or ERR_PTR() */ -struct fpga_manager *fpga_mgr_create(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, - void *priv) +struct fpga_manager * +fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info) { + const struct fpga_manager_ops *mops = info->mops; struct fpga_manager *mgr; int id, ret; if (!mops) { dev_err(parent, "Attempt to register without fpga_manager_ops\n"); - return NULL; + return ERR_PTR(-EINVAL); } - if (!name || !strlen(name)) { + if (!info->name || !strlen(info->name)) { dev_err(parent, "Attempt to register with no name!\n"); - return NULL; + return ERR_PTR(-EINVAL); } mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); if (!mgr) - return NULL; + return ERR_PTR(-ENOMEM); id = ida_simple_get(&fpga_mgr_ida, 0, 0, GFP_KERNEL); - if (id < 0) + if (id < 0) { + ret = id; goto error_kfree; + } mutex_init(&mgr->ref_mutex); - mgr->name = name; - mgr->mops = mops; - mgr->priv = priv; + mgr->name = info->name; + mgr->mops = info->mops; + mgr->priv = info->priv; + mgr->compat_id = info->compat_id; - device_initialize(&mgr->dev); mgr->dev.class = fpga_mgr_class; mgr->dev.groups = mops->groups; mgr->dev.parent = parent; @@ -645,6 +645,19 @@ struct fpga_manager *fpga_mgr_create(struct device *parent, const char *name, if (ret) goto error_device; + /* + * Initialize framework state by requesting low level driver read state + * from device. FPGA may be in reset mode or may have been programmed + * by bootloader or EEPROM. + */ + mgr->state = fpga_mgr_state(mgr); + + ret = device_register(&mgr->dev); + if (ret) { + put_device(&mgr->dev); + return ERR_PTR(ret); + } + return mgr; error_device: @@ -652,96 +665,36 @@ error_device: error_kfree: kfree(mgr); - return NULL; + return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(fpga_mgr_create); +EXPORT_SYMBOL_GPL(fpga_mgr_register_full); /** - * fpga_mgr_free - free an FPGA manager created with fpga_mgr_create() - * @mgr: fpga manager struct - */ -void fpga_mgr_free(struct fpga_manager *mgr) -{ - ida_simple_remove(&fpga_mgr_ida, mgr->dev.id); - kfree(mgr); -} -EXPORT_SYMBOL_GPL(fpga_mgr_free); - -static void devm_fpga_mgr_release(struct device *dev, void *res) -{ - struct fpga_mgr_devres *dr = res; - - fpga_mgr_free(dr->mgr); -} - -/** - * devm_fpga_mgr_create - create and initialize a managed FPGA manager struct + * fpga_mgr_register - create and register an FPGA Manager device * @parent: fpga manager device from pdev * @name: fpga manager name * @mops: pointer to structure of fpga manager ops * @priv: fpga manager private data * - * This function is intended for use in an FPGA manager driver's probe function. - * After the manager driver creates the manager struct with - * devm_fpga_mgr_create(), it should register it with fpga_mgr_register(). The - * manager driver's remove function should call fpga_mgr_unregister(). The - * manager struct allocated with this function will be freed automatically on - * driver detach. This includes the case of a probe function returning error - * before calling fpga_mgr_register(), the struct will still get cleaned up. + * The caller of this function is responsible for calling fpga_mgr_unregister(). + * Using devm_fpga_mgr_register() instead is recommended. This simple + * version of the register function should be sufficient for most users. The + * fpga_mgr_register_full() function is available for users that need to pass + * additional, optional parameters. * - * Return: pointer to struct fpga_manager or NULL + * Return: pointer to struct fpga_manager pointer or ERR_PTR() */ -struct fpga_manager *devm_fpga_mgr_create(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, - void *priv) +struct fpga_manager * +fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv) { - struct fpga_mgr_devres *dr; + struct fpga_manager_info info = { 0 }; - dr = devres_alloc(devm_fpga_mgr_release, sizeof(*dr), GFP_KERNEL); - if (!dr) - return NULL; + info.name = name; + info.mops = mops; + info.priv = priv; - dr->mgr = fpga_mgr_create(parent, name, mops, priv); - if (!dr->mgr) { - devres_free(dr); - return NULL; - } - - devres_add(parent, dr); - - return dr->mgr; -} -EXPORT_SYMBOL_GPL(devm_fpga_mgr_create); - -/** - * fpga_mgr_register - register an FPGA manager - * @mgr: fpga manager struct - * - * Return: 0 on success, negative error code otherwise. - */ -int fpga_mgr_register(struct fpga_manager *mgr) -{ - int ret; - - /* - * Initialize framework state by requesting low level driver read state - * from device. FPGA may be in reset mode or may have been programmed - * by bootloader or EEPROM. - */ - mgr->state = fpga_mgr_state(mgr); - - ret = device_add(&mgr->dev); - if (ret) - goto error_device; - - dev_info(&mgr->dev, "%s registered\n", mgr->name); - - return 0; - -error_device: - ida_simple_remove(&fpga_mgr_ida, mgr->dev.id); - - return ret; + return fpga_mgr_register_full(parent, &info); } EXPORT_SYMBOL_GPL(fpga_mgr_register); @@ -765,14 +718,6 @@ void fpga_mgr_unregister(struct fpga_manager *mgr) } EXPORT_SYMBOL_GPL(fpga_mgr_unregister); -static int fpga_mgr_devres_match(struct device *dev, void *res, - void *match_data) -{ - struct fpga_mgr_devres *dr = res; - - return match_data == dr->mgr; -} - static void devm_fpga_mgr_unregister(struct device *dev, void *res) { struct fpga_mgr_devres *dr = res; @@ -781,45 +726,67 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res) } /** - * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register() - * @dev: managing device for this FPGA manager - * @mgr: fpga manager struct + * devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register() + * @parent: fpga manager device from pdev + * @info: parameters for fpga manager * - * This is the devres variant of fpga_mgr_register() for which the unregister + * This is the devres variant of fpga_mgr_register_full() for which the unregister * function will be called automatically when the managing device is detached. */ -int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr) +struct fpga_manager * +devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info) { struct fpga_mgr_devres *dr; - int ret; - - /* - * Make sure that the struct fpga_manager * that is passed in is - * managed itself. - */ - if (WARN_ON(!devres_find(dev, devm_fpga_mgr_release, - fpga_mgr_devres_match, mgr))) - return -EINVAL; + struct fpga_manager *mgr; dr = devres_alloc(devm_fpga_mgr_unregister, sizeof(*dr), GFP_KERNEL); if (!dr) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - ret = fpga_mgr_register(mgr); - if (ret) { + mgr = fpga_mgr_register_full(parent, info); + if (IS_ERR(mgr)) { devres_free(dr); - return ret; + return mgr; } dr->mgr = mgr; - devres_add(dev, dr); + devres_add(parent, dr); - return 0; + return mgr; +} +EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full); + +/** + * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register() + * @parent: fpga manager device from pdev + * @name: fpga manager name + * @mops: pointer to structure of fpga manager ops + * @priv: fpga manager private data + * + * This is the devres variant of fpga_mgr_register() for which the + * unregister function will be called automatically when the managing + * device is detached. + */ +struct fpga_manager * +devm_fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv) +{ + struct fpga_manager_info info = { 0 }; + + info.name = name; + info.mops = mops; + info.priv = priv; + + return devm_fpga_mgr_register_full(parent, &info); } EXPORT_SYMBOL_GPL(devm_fpga_mgr_register); static void fpga_mgr_dev_release(struct device *dev) { + struct fpga_manager *mgr = to_fpga_manager(dev); + + ida_simple_remove(&fpga_mgr_ida, mgr->dev.id); + kfree(mgr); } static int __init fpga_mgr_class_init(void) diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c index a4838715221f..b0ac18de4885 100644 --- a/drivers/fpga/fpga-region.c +++ b/drivers/fpga/fpga-region.c @@ -180,39 +180,42 @@ static struct attribute *fpga_region_attrs[] = { ATTRIBUTE_GROUPS(fpga_region); /** - * fpga_region_create - alloc and init a struct fpga_region + * fpga_region_register_full - create and register an FPGA Region device * @parent: device parent - * @mgr: manager that programs this region - * @get_bridges: optional function to get bridges to a list - * - * The caller of this function is responsible for freeing the resulting region - * struct with fpga_region_free(). Using devm_fpga_region_create() instead is - * recommended. + * @info: parameters for FPGA Region * - * Return: struct fpga_region or NULL + * Return: struct fpga_region or ERR_PTR() */ -struct fpga_region -*fpga_region_create(struct device *parent, - struct fpga_manager *mgr, - int (*get_bridges)(struct fpga_region *)) +struct fpga_region * +fpga_region_register_full(struct device *parent, const struct fpga_region_info *info) { struct fpga_region *region; int id, ret = 0; + if (!info) { + dev_err(parent, + "Attempt to register without required info structure\n"); + return ERR_PTR(-EINVAL); + } + region = kzalloc(sizeof(*region), GFP_KERNEL); if (!region) - return NULL; + return ERR_PTR(-ENOMEM); id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL); - if (id < 0) + if (id < 0) { + ret = id; goto err_free; + } + + region->mgr = info->mgr; + region->compat_id = info->compat_id; + region->priv = info->priv; + region->get_bridges = info->get_bridges; - region->mgr = mgr; - region->get_bridges = get_bridges; mutex_init(®ion->mutex); INIT_LIST_HEAD(®ion->bridge_list); - device_initialize(®ion->dev); region->dev.class = fpga_region_class; region->dev.parent = parent; region->dev.of_node = parent->of_node; @@ -222,6 +225,12 @@ struct fpga_region if (ret) goto err_remove; + ret = device_register(®ion->dev); + if (ret) { + put_device(®ion->dev); + return ERR_PTR(ret); + } + return region; err_remove: @@ -229,76 +238,32 @@ err_remove: err_free: kfree(region); - return NULL; -} -EXPORT_SYMBOL_GPL(fpga_region_create); - -/** - * fpga_region_free - free an FPGA region created by fpga_region_create() - * @region: FPGA region - */ -void fpga_region_free(struct fpga_region *region) -{ - ida_simple_remove(&fpga_region_ida, region->dev.id); - kfree(region); -} -EXPORT_SYMBOL_GPL(fpga_region_free); - -static void devm_fpga_region_release(struct device *dev, void *res) -{ - struct fpga_region *region = *(struct fpga_region **)res; - - fpga_region_free(region); + return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(fpga_region_register_full); /** - * devm_fpga_region_create - create and initialize a managed FPGA region struct + * fpga_region_register - create and register an FPGA Region device * @parent: device parent * @mgr: manager that programs this region * @get_bridges: optional function to get bridges to a list * - * This function is intended for use in an FPGA region driver's probe function. - * After the region driver creates the region struct with - * devm_fpga_region_create(), it should register it with fpga_region_register(). - * The region driver's remove function should call fpga_region_unregister(). - * The region struct allocated with this function will be freed automatically on - * driver detach. This includes the case of a probe function returning error - * before calling fpga_region_register(), the struct will still get cleaned up. + * This simple version of the register function should be sufficient for most users. + * The fpga_region_register_full() function is available for users that need to + * pass additional, optional parameters. * - * Return: struct fpga_region or NULL + * Return: struct fpga_region or ERR_PTR() */ -struct fpga_region -*devm_fpga_region_create(struct device *parent, - struct fpga_manager *mgr, - int (*get_bridges)(struct fpga_region *)) +struct fpga_region * +fpga_region_register(struct device *parent, struct fpga_manager *mgr, + int (*get_bridges)(struct fpga_region *)) { - struct fpga_region **ptr, *region; - - ptr = devres_alloc(devm_fpga_region_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return NULL; + struct fpga_region_info info = { 0 }; - region = fpga_region_create(parent, mgr, get_bridges); - if (!region) { - devres_free(ptr); - } else { - *ptr = region; - devres_add(parent, ptr); - } + info.mgr = mgr; + info.get_bridges = get_bridges; - return region; -} -EXPORT_SYMBOL_GPL(devm_fpga_region_create); - -/** - * fpga_region_register - register an FPGA region - * @region: FPGA region - * - * Return: 0 or -errno - */ -int fpga_region_register(struct fpga_region *region) -{ - return device_add(®ion->dev); + return fpga_region_register_full(parent, &info); } EXPORT_SYMBOL_GPL(fpga_region_register); @@ -316,6 +281,10 @@ EXPORT_SYMBOL_GPL(fpga_region_unregister); static void fpga_region_dev_release(struct device *dev) { + struct fpga_region *region = to_fpga_region(dev); + + ida_simple_remove(&fpga_region_ida, region->dev.id); + kfree(region); } /** diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c index 029d3cdb918d..7cbb3558b844 100644 --- a/drivers/fpga/ice40-spi.c +++ b/drivers/fpga/ice40-spi.c @@ -178,12 +178,9 @@ static int ice40_fpga_probe(struct spi_device *spi) return ret; } - mgr = devm_fpga_mgr_create(dev, "Lattice iCE40 FPGA Manager", - &ice40_fpga_ops, priv); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(dev, mgr); + mgr = devm_fpga_mgr_register(dev, "Lattice iCE40 FPGA Manager", + &ice40_fpga_ops, priv); + return PTR_ERR_OR_ZERO(mgr); } static const struct of_device_id ice40_fpga_of_match[] = { diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c index ea2ec3c6815c..905607992a12 100644 --- a/drivers/fpga/machxo2-spi.c +++ b/drivers/fpga/machxo2-spi.c @@ -370,12 +370,9 @@ static int machxo2_spi_probe(struct spi_device *spi) return -EINVAL; } - mgr = devm_fpga_mgr_create(dev, "Lattice MachXO2 SPI FPGA Manager", - &machxo2_ops, spi); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(dev, mgr); + mgr = devm_fpga_mgr_register(dev, "Lattice MachXO2 SPI FPGA Manager", + &machxo2_ops, spi); + return PTR_ERR_OR_ZERO(mgr); } #ifdef CONFIG_OF diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c index e3c25576b6b9..50b83057c048 100644 --- a/drivers/fpga/of-fpga-region.c +++ b/drivers/fpga/of-fpga-region.c @@ -405,16 +405,12 @@ static int of_fpga_region_probe(struct platform_device *pdev) if (IS_ERR(mgr)) return -EPROBE_DEFER; - region = devm_fpga_region_create(dev, mgr, of_fpga_region_get_bridges); - if (!region) { - ret = -ENOMEM; + region = fpga_region_register(dev, mgr, of_fpga_region_get_bridges); + if (IS_ERR(region)) { + ret = PTR_ERR(region); goto eprobe_mgr_put; } - ret = fpga_region_register(region); - if (ret) - goto eprobe_mgr_put; - of_platform_populate(np, fpga_region_of_match, NULL, ®ion->dev); platform_set_drvdata(pdev, region); @@ -448,7 +444,7 @@ static struct platform_driver of_fpga_region_driver = { }; /** - * fpga_region_init - init function for fpga_region class + * of_fpga_region_init - init function for fpga_region class * Creates the fpga_region class and registers a reconfig notifier. */ static int __init of_fpga_region_init(void) diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c index 573d88bdf730..ac8e89b8a5cc 100644 --- a/drivers/fpga/socfpga-a10.c +++ b/drivers/fpga/socfpga-a10.c @@ -508,19 +508,15 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev) return -EBUSY; } - mgr = devm_fpga_mgr_create(dev, "SoCFPGA Arria10 FPGA Manager", - &socfpga_a10_fpga_mgr_ops, priv); - if (!mgr) - return -ENOMEM; - - platform_set_drvdata(pdev, mgr); - - ret = fpga_mgr_register(mgr); - if (ret) { + mgr = fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager", + &socfpga_a10_fpga_mgr_ops, priv); + if (IS_ERR(mgr)) { clk_disable_unprepare(priv->clk); - return ret; + return PTR_ERR(mgr); } + platform_set_drvdata(pdev, mgr); + return 0; } diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c index 1f467173fc1f..7e0741f99696 100644 --- a/drivers/fpga/socfpga.c +++ b/drivers/fpga/socfpga.c @@ -571,12 +571,9 @@ static int socfpga_fpga_probe(struct platform_device *pdev) if (ret) return ret; - mgr = devm_fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager", - &socfpga_fpga_ops, priv); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(dev, mgr); + mgr = devm_fpga_mgr_register(dev, "Altera SOCFPGA FPGA Manager", + &socfpga_fpga_ops, priv); + return PTR_ERR_OR_ZERO(mgr); } #ifdef CONFIG_OF diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index 047fd7f23706..357cea58ec98 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c @@ -419,23 +419,16 @@ static int s10_probe(struct platform_device *pdev) init_completion(&priv->status_return_completion); - mgr = fpga_mgr_create(dev, "Stratix10 SOC FPGA Manager", - &s10_ops, priv); - if (!mgr) { - dev_err(dev, "unable to create FPGA manager\n"); - ret = -ENOMEM; - goto probe_err; - } - - ret = fpga_mgr_register(mgr); - if (ret) { + mgr = fpga_mgr_register(dev, "Stratix10 SOC FPGA Manager", + &s10_ops, priv); + if (IS_ERR(mgr)) { dev_err(dev, "unable to register FPGA manager\n"); - fpga_mgr_free(mgr); + ret = PTR_ERR(mgr); goto probe_err; } platform_set_drvdata(pdev, mgr); - return ret; + return 0; probe_err: stratix10_svc_free_channel(priv->chan); @@ -448,7 +441,6 @@ static int s10_remove(struct platform_device *pdev) struct s10_priv *priv = mgr->priv; fpga_mgr_unregister(mgr); - fpga_mgr_free(mgr); stratix10_svc_free_channel(priv->chan); return 0; diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c index 167abb0b08d4..8e6e9c840d9d 100644 --- a/drivers/fpga/ts73xx-fpga.c +++ b/drivers/fpga/ts73xx-fpga.c @@ -116,12 +116,9 @@ static int ts73xx_fpga_probe(struct platform_device *pdev) if (IS_ERR(priv->io_base)) return PTR_ERR(priv->io_base); - mgr = devm_fpga_mgr_create(kdev, "TS-73xx FPGA Manager", - &ts73xx_fpga_ops, priv); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(kdev, mgr); + mgr = devm_fpga_mgr_register(kdev, "TS-73xx FPGA Manager", + &ts73xx_fpga_ops, priv); + return PTR_ERR_OR_ZERO(mgr); } static struct platform_driver ts73xx_fpga_driver = { diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c index 5b0dda304bd2..e1601b3a345b 100644 --- a/drivers/fpga/versal-fpga.c +++ b/drivers/fpga/versal-fpga.c @@ -54,12 +54,9 @@ static int versal_fpga_probe(struct platform_device *pdev) return ret; } - mgr = devm_fpga_mgr_create(dev, "Xilinx Versal FPGA Manager", - &versal_fpga_ops, NULL); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(dev, mgr); + mgr = devm_fpga_mgr_register(dev, "Xilinx Versal FPGA Manager", + &versal_fpga_ops, NULL); + return PTR_ERR_OR_ZERO(mgr); } static const struct of_device_id versal_fpga_of_match[] = { diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c index e986ed47c4ed..2d9c491f7be9 100644 --- a/drivers/fpga/xilinx-pr-decoupler.c +++ b/drivers/fpga/xilinx-pr-decoupler.c @@ -140,22 +140,17 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev) clk_disable(priv->clk); - br = devm_fpga_bridge_create(&pdev->dev, priv->ipconfig->name, - &xlnx_pr_decoupler_br_ops, priv); - if (!br) { - err = -ENOMEM; - goto err_clk; - } - - platform_set_drvdata(pdev, br); - - err = fpga_bridge_register(br); - if (err) { + br = fpga_bridge_register(&pdev->dev, priv->ipconfig->name, + &xlnx_pr_decoupler_br_ops, priv); + if (IS_ERR(br)) { + err = PTR_ERR(br); dev_err(&pdev->dev, "unable to register %s", priv->ipconfig->name); goto err_clk; } + platform_set_drvdata(pdev, br); + return 0; err_clk: diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c index b6bcf1d9233d..e1a227e7ff2a 100644 --- a/drivers/fpga/xilinx-spi.c +++ b/drivers/fpga/xilinx-spi.c @@ -247,13 +247,10 @@ static int xilinx_spi_probe(struct spi_device *spi) return dev_err_probe(&spi->dev, PTR_ERR(conf->done), "Failed to get DONE gpio\n"); - mgr = devm_fpga_mgr_create(&spi->dev, - "Xilinx Slave Serial FPGA Manager", - &xilinx_spi_ops, conf); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(&spi->dev, mgr); + mgr = devm_fpga_mgr_register(&spi->dev, + "Xilinx Slave Serial FPGA Manager", + &xilinx_spi_ops, conf); + return PTR_ERR_OR_ZERO(mgr); } #ifdef CONFIG_OF diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index 9b75bd4f93d8..426aa34c6a0d 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -609,20 +609,16 @@ static int zynq_fpga_probe(struct platform_device *pdev) clk_disable(priv->clk); - mgr = devm_fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager", - &zynq_fpga_ops, priv); - if (!mgr) - return -ENOMEM; - - platform_set_drvdata(pdev, mgr); - - err = fpga_mgr_register(mgr); - if (err) { + mgr = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager", + &zynq_fpga_ops, priv); + if (IS_ERR(mgr)) { dev_err(dev, "unable to register FPGA manager\n"); clk_unprepare(priv->clk); - return err; + return PTR_ERR(mgr); } + platform_set_drvdata(pdev, mgr); + return 0; } diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c index 7d3d5650c322..c60f20949c47 100644 --- a/drivers/fpga/zynqmp-fpga.c +++ b/drivers/fpga/zynqmp-fpga.c @@ -95,12 +95,9 @@ static int zynqmp_fpga_probe(struct platform_device *pdev) priv->dev = dev; - mgr = devm_fpga_mgr_create(dev, "Xilinx ZynqMP FPGA Manager", - &zynqmp_fpga_ops, priv); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(dev, mgr); + mgr = devm_fpga_mgr_register(dev, "Xilinx ZynqMP FPGA Manager", + &zynqmp_fpga_ops, priv); + return PTR_ERR_OR_ZERO(mgr); } #ifdef CONFIG_OF diff --git a/drivers/gnss/Kconfig b/drivers/gnss/Kconfig index bd12e3d57baa..d7fe265c2869 100644 --- a/drivers/gnss/Kconfig +++ b/drivers/gnss/Kconfig @@ -54,4 +54,15 @@ config GNSS_UBX_SERIAL If unsure, say N. +config GNSS_USB + tristate "USB GNSS receiver support" + depends on USB + help + Say Y here if you have a GNSS receiver which uses a USB interface. + + To compile this driver as a module, choose M here: the module will + be called gnss-usb. + + If unsure, say N. + endif # GNSS diff --git a/drivers/gnss/Makefile b/drivers/gnss/Makefile index 451f11401ecc..bb2cbada3435 100644 --- a/drivers/gnss/Makefile +++ b/drivers/gnss/Makefile @@ -17,3 +17,6 @@ gnss-sirf-y := sirf.o obj-$(CONFIG_GNSS_UBX_SERIAL) += gnss-ubx.o gnss-ubx-y := ubx.o + +obj-$(CONFIG_GNSS_USB) += gnss-usb.o +gnss-usb-y := usb.o diff --git a/drivers/gnss/mtk.c b/drivers/gnss/mtk.c index d1fc55560daf..c62b1211f4fe 100644 --- a/drivers/gnss/mtk.c +++ b/drivers/gnss/mtk.c @@ -126,7 +126,7 @@ static void mtk_remove(struct serdev_device *serdev) if (data->vbackup) regulator_disable(data->vbackup); gnss_serial_free(gserial); -}; +} #ifdef CONFIG_OF static const struct of_device_id mtk_of_match[] = { diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c index def64b36d994..5d8e9bfb24d0 100644 --- a/drivers/gnss/serial.c +++ b/drivers/gnss/serial.c @@ -165,7 +165,7 @@ void gnss_serial_free(struct gnss_serial *gserial) { gnss_put_device(gserial->gdev); kfree(gserial); -}; +} EXPORT_SYMBOL_GPL(gnss_serial_free); int gnss_serial_register(struct gnss_serial *gserial) diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c index 2ecb1d3e8eeb..bcb53ccfee4d 100644 --- a/drivers/gnss/sirf.c +++ b/drivers/gnss/sirf.c @@ -551,7 +551,7 @@ static void sirf_remove(struct serdev_device *serdev) regulator_disable(data->vcc); gnss_put_device(data->gdev); -}; +} #ifdef CONFIG_OF static const struct of_device_id sirf_of_match[] = { diff --git a/drivers/gnss/ubx.c b/drivers/gnss/ubx.c index 7b05bc40532e..c951be202ca2 100644 --- a/drivers/gnss/ubx.c +++ b/drivers/gnss/ubx.c @@ -126,7 +126,7 @@ static void ubx_remove(struct serdev_device *serdev) if (data->v_bckp) regulator_disable(data->v_bckp); gnss_serial_free(gserial); -}; +} #ifdef CONFIG_OF static const struct of_device_id ubx_of_match[] = { diff --git a/drivers/gnss/usb.c b/drivers/gnss/usb.c new file mode 100644 index 000000000000..028ce56b20ea --- /dev/null +++ b/drivers/gnss/usb.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic USB GNSS receiver driver + * + * Copyright (C) 2021 Johan Hovold <johan@kernel.org> + */ + +#include <linux/errno.h> +#include <linux/gnss.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/usb.h> + +#define GNSS_USB_READ_BUF_LEN 512 +#define GNSS_USB_WRITE_TIMEOUT 1000 + +static const struct usb_device_id gnss_usb_id_table[] = { + { USB_DEVICE(0x1199, 0xb000) }, /* Sierra Wireless XM1210 */ + { } +}; +MODULE_DEVICE_TABLE(usb, gnss_usb_id_table); + +struct gnss_usb { + struct usb_device *udev; + struct usb_interface *intf; + struct gnss_device *gdev; + struct urb *read_urb; + unsigned int write_pipe; +}; + +static void gnss_usb_rx_complete(struct urb *urb) +{ + struct gnss_usb *gusb = urb->context; + struct gnss_device *gdev = gusb->gdev; + int status = urb->status; + int len; + int ret; + + switch (status) { + case 0: + break; + case -ENOENT: + case -ECONNRESET: + case -ESHUTDOWN: + dev_dbg(&gdev->dev, "urb stopped: %d\n", status); + return; + case -EPIPE: + dev_err(&gdev->dev, "urb stopped: %d\n", status); + return; + default: + dev_dbg(&gdev->dev, "nonzero urb status: %d\n", status); + goto resubmit; + } + + len = urb->actual_length; + if (len == 0) + goto resubmit; + + ret = gnss_insert_raw(gdev, urb->transfer_buffer, len); + if (ret < len) + dev_dbg(&gdev->dev, "dropped %d bytes\n", len - ret); +resubmit: + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret && ret != -EPERM && ret != -ENODEV) + dev_err(&gdev->dev, "failed to resubmit urb: %d\n", ret); +} + +static int gnss_usb_open(struct gnss_device *gdev) +{ + struct gnss_usb *gusb = gnss_get_drvdata(gdev); + int ret; + + ret = usb_submit_urb(gusb->read_urb, GFP_KERNEL); + if (ret) { + if (ret != -EPERM && ret != -ENODEV) + dev_err(&gdev->dev, "failed to submit urb: %d\n", ret); + return ret; + } + + return 0; +} + +static void gnss_usb_close(struct gnss_device *gdev) +{ + struct gnss_usb *gusb = gnss_get_drvdata(gdev); + + usb_kill_urb(gusb->read_urb); +} + +static int gnss_usb_write_raw(struct gnss_device *gdev, + const unsigned char *buf, size_t count) +{ + struct gnss_usb *gusb = gnss_get_drvdata(gdev); + void *tbuf; + int ret; + + tbuf = kmemdup(buf, count, GFP_KERNEL); + if (!tbuf) + return -ENOMEM; + + ret = usb_bulk_msg(gusb->udev, gusb->write_pipe, tbuf, count, NULL, + GNSS_USB_WRITE_TIMEOUT); + kfree(tbuf); + if (ret) + return ret; + + return count; +} + +static const struct gnss_operations gnss_usb_gnss_ops = { + .open = gnss_usb_open, + .close = gnss_usb_close, + .write_raw = gnss_usb_write_raw, +}; + +static int gnss_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *in, *out; + struct gnss_device *gdev; + struct gnss_usb *gusb; + struct urb *urb; + size_t buf_len; + void *buf; + int ret; + + ret = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, + NULL); + if (ret) + return ret; + + gusb = kzalloc(sizeof(*gusb), GFP_KERNEL); + if (!gusb) + return -ENOMEM; + + gdev = gnss_allocate_device(&intf->dev); + if (!gdev) { + ret = -ENOMEM; + goto err_free_gusb; + } + + gdev->ops = &gnss_usb_gnss_ops; + gdev->type = GNSS_TYPE_NMEA; + gnss_set_drvdata(gdev, gusb); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + ret = -ENOMEM; + goto err_put_gdev; + } + + buf_len = max(usb_endpoint_maxp(in), GNSS_USB_READ_BUF_LEN); + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_free_urb; + } + + usb_fill_bulk_urb(urb, udev, + usb_rcvbulkpipe(udev, usb_endpoint_num(in)), + buf, buf_len, gnss_usb_rx_complete, gusb); + + gusb->intf = intf; + gusb->udev = udev; + gusb->gdev = gdev; + gusb->read_urb = urb; + gusb->write_pipe = usb_sndbulkpipe(udev, usb_endpoint_num(out)); + + ret = gnss_register_device(gdev); + if (ret) + goto err_free_buf; + + usb_set_intfdata(intf, gusb); + + return 0; + +err_free_buf: + kfree(buf); +err_free_urb: + usb_free_urb(urb); +err_put_gdev: + gnss_put_device(gdev); +err_free_gusb: + kfree(gusb); + + return ret; +} + +static void gnss_usb_disconnect(struct usb_interface *intf) +{ + struct gnss_usb *gusb = usb_get_intfdata(intf); + + gnss_deregister_device(gusb->gdev); + + kfree(gusb->read_urb->transfer_buffer); + usb_free_urb(gusb->read_urb); + gnss_put_device(gusb->gdev); + kfree(gusb); +} + +static struct usb_driver gnss_usb_driver = { + .name = "gnss-usb", + .probe = gnss_usb_probe, + .disconnect = gnss_usb_disconnect, + .id_table = gnss_usb_id_table, +}; +module_usb_driver(gnss_usb_driver); + +MODULE_AUTHOR("Johan Hovold <johan@kernel.org>"); +MODULE_DESCRIPTION("Generic USB GNSS receiver driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c index 50003ad2e589..52b8b72ded77 100644 --- a/drivers/gpio/gpio-idt3243x.c +++ b/drivers/gpio/gpio-idt3243x.c @@ -132,7 +132,7 @@ static int idt_gpio_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct gpio_irq_chip *girq; struct idt_gpio_ctrl *ctrl; - unsigned int parent_irq; + int parent_irq; int ngpios; int ret; @@ -164,8 +164,8 @@ static int idt_gpio_probe(struct platform_device *pdev) return PTR_ERR(ctrl->pic); parent_irq = platform_get_irq(pdev, 0); - if (!parent_irq) - return -EINVAL; + if (parent_irq < 0) + return parent_irq; girq = &ctrl->gc.irq; girq->chip = &idt_gpio_irqchip; diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 70d6ae20b1da..a964e25ea620 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -47,7 +47,7 @@ struct mpc8xxx_gpio_chip { unsigned offset, int value); struct irq_domain *irq; - unsigned int irqn; + int irqn; }; /* @@ -388,8 +388,8 @@ static int mpc8xxx_probe(struct platform_device *pdev) } mpc8xxx_gc->irqn = platform_get_irq(pdev, 0); - if (!mpc8xxx_gc->irqn) - return 0; + if (mpc8xxx_gc->irqn < 0) + return mpc8xxx_gc->irqn; mpc8xxx_gc->irq = irq_domain_create_linear(fwnode, MPC8XXX_GPIO_PINS, diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c index 9f4941bc5760..fcc5e8c08973 100644 --- a/drivers/gpio/gpio-virtio.c +++ b/drivers/gpio/gpio-virtio.c @@ -450,7 +450,7 @@ static void virtio_gpio_request_vq(struct virtqueue *vq) static void virtio_gpio_free_vqs(struct virtio_device *vdev) { - vdev->config->reset(vdev); + virtio_reset_device(vdev); vdev->config->del_vqs(vdev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 776a947b45df..6ca1db3c243f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -514,13 +514,6 @@ out_put: return r; } -uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev) -{ - struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - - return amdgpu_vram_mgr_usage(vram_man); -} - uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, struct amdgpu_device *src) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 61f899e54fd5..ac841ae8f5cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -223,7 +223,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, uint64_t *bo_size, void *metadata_buffer, size_t buffer_size, uint32_t *metadata_size, uint32_t *flags); -uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev); uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, struct amdgpu_device *src); int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 0311d799a010..06d07502a1f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -298,7 +298,6 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, { s64 time_us, increment_us; u64 free_vram, total_vram, used_vram; - struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); /* Allow a maximum of 200 accumulated ms. This is basically per-IB * throttling. * @@ -315,7 +314,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, } total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); - used_vram = amdgpu_vram_mgr_usage(vram_man); + used_vram = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr); free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; spin_lock(&adev->mm_stats.lock); @@ -362,7 +361,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { u64 total_vis_vram = adev->gmc.visible_vram_size; u64 used_vis_vram = - amdgpu_vram_mgr_vis_usage(vram_man); + amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); if (used_vis_vram < total_vis_vram) { u64 free_vis_vram = total_vis_vram - used_vis_vram; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a8b08a72b71b..ed077de426d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -552,7 +552,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev, } /** - * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range + * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range * * this function is invoked only the debugfs register access */ @@ -567,6 +567,8 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, adev->gfx.rlc.funcs->is_rlcg_access_range) { if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0); + } else if ((reg * 4) >= adev->rmmio_size) { + adev->pcie_wreg(adev, reg * 4, v); } else { writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); } @@ -1448,7 +1450,7 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; break; default: - return -EINVAL; + break; } return 0; @@ -2352,7 +2354,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) } if (amdgpu_sriov_vf(adev)) - amdgpu_virt_exchange_data(adev); + amdgpu_virt_init_data_exchange(adev); r = amdgpu_ib_pool_init(adev); if (r) { @@ -3496,9 +3498,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); - r = amdgpu_device_init_apu_flags(adev); - if (r) - return r; + amdgpu_device_init_apu_flags(adev); r = amdgpu_device_check_arguments(adev); if (r) @@ -3833,6 +3833,7 @@ failed: static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) { + /* Clear all CPU mappings pointing to this device */ unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); @@ -3913,6 +3914,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) void amdgpu_device_fini_sw(struct amdgpu_device *adev) { + int idx; + amdgpu_fence_driver_sw_fini(adev); amdgpu_device_ip_fini(adev); release_firmware(adev->firmware.gpu_info_fw); @@ -3937,6 +3940,14 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) vga_client_unregister(adev->pdev); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + + iounmap(adev->rmmio); + adev->rmmio = NULL; + amdgpu_device_doorbell_fini(adev); + drm_dev_exit(idx); + } + if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); if (adev->mman.discovery_bin) @@ -3957,8 +3968,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) */ static void amdgpu_device_evict_resources(struct amdgpu_device *adev) { - /* No need to evict vram on APUs for suspend to ram */ - if (adev->in_s3 && (adev->flags & AMD_IS_APU)) + /* No need to evict vram on APUs for suspend to ram or s2idle */ + if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) return; if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM)) @@ -4005,16 +4016,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (!adev->in_s0ix) amdgpu_amdkfd_suspend(adev, adev->in_runpm); - /* First evict vram memory */ amdgpu_device_evict_resources(adev); amdgpu_fence_driver_hw_fini(adev); amdgpu_device_ip_suspend_phase2(adev); - /* This second call to evict device resources is to evict - * the gart page table using the CPU. - */ - amdgpu_device_evict_resources(adev); return 0; } @@ -4359,8 +4365,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, goto error; amdgpu_virt_init_data_exchange(adev); - /* we need recover gart prior to run SMC/CP/SDMA resume */ - amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)); r = amdgpu_device_fw_loading(adev); if (r) @@ -4446,33 +4450,24 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) if (amdgpu_gpu_recovery == -1) { switch (adev->asic_type) { - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_TOPAZ: - case CHIP_TONGA: - case CHIP_FIJI: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - case CHIP_VEGAM: - case CHIP_VEGA20: - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_VANGOGH: - case CHIP_ALDEBARAN: - break; - default: +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_VERDE: + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_OLAND: + case CHIP_HAINAN: +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: +#endif + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_CYAN_SKILLFISH: goto disabled; + default: + break; } } @@ -4680,10 +4675,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, amdgpu_inc_vram_lost(tmp_adev); } - r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT)); - if (r) - goto out; - r = amdgpu_device_fw_loading(tmp_adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 028190d42bb2..81bfee978b74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -243,6 +243,30 @@ static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); } +static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) +{ + /* + * So far, apply this quirk only on those Navy Flounder boards which + * have a bad harvest table of VCN config. + */ + if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) { + switch (adev->pdev->revision) { + case 0xC1: + case 0xC2: + case 0xC3: + case 0xC5: + case 0xC7: + case 0xCF: + case 0xDF: + adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; + break; + default: + break; + } + } +} + static int amdgpu_discovery_init(struct amdgpu_device *adev) { struct table_info *info; @@ -548,10 +572,9 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) break; } } - /* some IP discovery tables on Navy Flounder don't have this set correctly */ - if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) - adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; + + amdgpu_discovery_harvest_config_quirk(adev); + if (vcn_harvest_count == adev->vcn.num_vcn_inst) { adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b63ed1ddf713..b21bcdc97460 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1930,11 +1930,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENODEV; } - if (flags == 0) { - DRM_INFO("Unsupported asic. Remove me when IP discovery init is in place.\n"); - return -ENODEV; - } - if (amdgpu_virtual_display || amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) supports_atomic = true; @@ -2194,9 +2189,9 @@ static int amdgpu_pmops_suspend(struct device *dev) if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = true; - adev->in_s3 = true; + else + adev->in_s3 = true; r = amdgpu_device_suspend(drm_dev, true); - adev->in_s3 = false; if (r) return r; if (!adev->in_s0ix) @@ -2217,6 +2212,8 @@ static int amdgpu_pmops_resume(struct device *dev) r = amdgpu_device_resume(drm_dev, true); if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = false; + else + adev->in_s3 = false; return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index d3e4203f6217..645950a653a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -114,80 +114,12 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) */ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) { - int r; - - if (adev->gart.bo == NULL) { - struct amdgpu_bo_param bp; - - memset(&bp, 0, sizeof(bp)); - bp.size = adev->gart.table_size; - bp.byte_align = PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; - bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - bp.type = ttm_bo_type_kernel; - bp.resv = NULL; - bp.bo_ptr_size = sizeof(struct amdgpu_bo); - - r = amdgpu_bo_create(adev, &bp, &adev->gart.bo); - if (r) { - return r; - } - } - return 0; -} - -/** - * amdgpu_gart_table_vram_pin - pin gart page table in vram - * - * @adev: amdgpu_device pointer - * - * Pin the GART page table in vram so it will not be moved - * by the memory manager (pcie r4xx, r5xx+). These asics require the - * gart table to be in video memory. - * Returns 0 for success, error for failure. - */ -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gart.bo, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(adev->gart.bo, AMDGPU_GEM_DOMAIN_VRAM); - if (r) { - amdgpu_bo_unreserve(adev->gart.bo); - return r; - } - r = amdgpu_bo_kmap(adev->gart.bo, &adev->gart.ptr); - if (r) - amdgpu_bo_unpin(adev->gart.bo); - amdgpu_bo_unreserve(adev->gart.bo); - return r; -} - -/** - * amdgpu_gart_table_vram_unpin - unpin gart page table in vram - * - * @adev: amdgpu_device pointer - * - * Unpin the GART page table in vram (pcie r4xx, r5xx+). - * These asics require the gart table to be in video memory. - */ -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev) -{ - int r; + if (adev->gart.bo != NULL) + return 0; - if (adev->gart.bo == NULL) { - return; - } - r = amdgpu_bo_reserve(adev->gart.bo, true); - if (likely(r == 0)) { - amdgpu_bo_kunmap(adev->gart.bo); - amdgpu_bo_unpin(adev->gart.bo); - amdgpu_bo_unreserve(adev->gart.bo); - adev->gart.ptr = NULL; - } + return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo, + NULL, (void *)&adev->gart.ptr); } /** @@ -201,11 +133,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev) */ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) { - if (adev->gart.bo == NULL) { - return; - } - amdgpu_bo_unref(&adev->gart.bo); - adev->gart.ptr = NULL; + amdgpu_bo_free_kernel(&adev->gart.bo, NULL, (void *)&adev->gart.ptr); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 9a6507af1670..c0d8f40a5b45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -264,9 +264,6 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) vma->vm_flags &= ~VM_MAYWRITE; - if (bo->kfd_bo) - vma->vm_flags |= VM_DONTCOPY; - return drm_gem_ttm_mmap(obj, vma); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 675a72ef305d..72022df264f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -77,10 +77,8 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - struct ttm_resource_manager *man; - man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); - return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man)); + return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr)); } static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO, @@ -206,30 +204,27 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, /** * amdgpu_gtt_mgr_usage - return usage of GTT domain * - * @man: TTM memory type manager + * @mgr: amdgpu_gtt_mgr pointer * * Return how many bytes are used in the GTT domain */ -uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man) +uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr) { - struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); - return atomic64_read(&mgr->used) * PAGE_SIZE; } /** * amdgpu_gtt_mgr_recover - re-init gart * - * @man: TTM memory type manager + * @mgr: amdgpu_gtt_mgr pointer * * Re-init the gart for each known BO in the GTT. */ -int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man) +int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr) { - struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); - struct amdgpu_device *adev; struct amdgpu_gtt_node *node; struct drm_mm_node *mm_node; + struct amdgpu_device *adev; int r = 0; adev = container_of(mgr, typeof(*adev), mman.gtt_mgr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 09ad17944eb2..1ebb91db2274 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -678,13 +678,13 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: - ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); + ui64 = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VIS_VRAM_USAGE: - ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); + ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_GTT_USAGE: - ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)); + ui64 = amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_GDS_CONFIG: { struct drm_amdgpu_info_gds gds_info; @@ -715,8 +715,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) } case AMDGPU_INFO_MEMORY: { struct drm_amdgpu_memory_info mem; - struct ttm_resource_manager *vram_man = - ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); struct ttm_resource_manager *gtt_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); memset(&mem, 0, sizeof(mem)); @@ -725,7 +723,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) atomic64_read(&adev->vram_pin_size) - AMDGPU_VM_RESERVED_VRAM; mem.vram.heap_usage = - amdgpu_vram_mgr_usage(vram_man); + amdgpu_vram_mgr_usage(&adev->mman.vram_mgr); mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.total_heap_size = @@ -735,7 +733,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) atomic64_read(&adev->visible_pin_size), mem.vram.usable_heap_size); mem.cpu_accessible_vram.heap_usage = - amdgpu_vram_mgr_vis_usage(vram_man); + amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); mem.cpu_accessible_vram.max_allocation = mem.cpu_accessible_vram.usable_heap_size * 3 / 4; @@ -744,7 +742,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) mem.gtt.usable_heap_size = mem.gtt.total_heap_size - atomic64_read(&adev->gart_pin_size); mem.gtt.heap_usage = - amdgpu_gtt_mgr_usage(gtt_man); + amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr); mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; return copy_to_user(out, &mem, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 3a7b56e57cec..5661b82d84d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <linux/dma-buf.h> +#include <drm/drm_drv.h> #include <drm/amdgpu_drm.h> #include <drm/drm_cache.h> #include "amdgpu.h" @@ -1061,7 +1062,18 @@ int amdgpu_bo_init(struct amdgpu_device *adev) */ void amdgpu_bo_fini(struct amdgpu_device *adev) { + int idx; + amdgpu_ttm_fini(adev); + + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + + if (!adev->gmc.xgmi.connected_to_cpu) { + arch_phys_wc_del(adev->gmc.vram_mtrr); + arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); + } + drm_dev_exit(idx); + } } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 91e6e87562ac..8f47c14ecbc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1592,6 +1592,7 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) /* Let IP handle its data, maybe we need get the output * from the callback to udpate the error type/count, etc */ + memset(&err_data, 0, sizeof(err_data)); ret = data->cb(obj->adev, &err_data, &entry); /* ue will trigger an interrupt, and in that case * we need do a reset to recovery the whole system. @@ -1838,8 +1839,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, .size = AMDGPU_GPU_PAGE_SIZE, .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, }; - status = amdgpu_vram_mgr_query_page_status( - ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM), + status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, data->bps[i].retired_page); if (status == -EBUSY) (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; @@ -1940,8 +1940,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, goto out; } - amdgpu_vram_mgr_reserve_range( - ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM), + amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr, bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT, AMDGPU_GPU_PAGE_SIZE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fb0d8bffdce2..5c3f24069f2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -43,6 +43,7 @@ #include <linux/sizes.h> #include <linux/module.h> +#include <drm/drm_drv.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> @@ -1804,6 +1805,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) */ void amdgpu_ttm_fini(struct amdgpu_device *adev) { + int idx; if (!adev->mman.initialized) return; @@ -1818,6 +1820,15 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) NULL, NULL); amdgpu_ttm_fw_reserve_vram_fini(adev); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + + if (adev->mman.aper_base_kaddr) + iounmap(adev->mman.aper_base_kaddr); + adev->mman.aper_base_kaddr = NULL; + + drm_dev_exit(idx); + } + amdgpu_vram_mgr_fini(adev); amdgpu_gtt_mgr_fini(adev); amdgpu_preempt_mgr_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 7346ecff4438..f8f48be16d80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -114,8 +114,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev); void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem); -uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man); -int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man); +uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr); +int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr); uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man); @@ -129,11 +129,11 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, void amdgpu_vram_mgr_free_sgt(struct device *dev, enum dma_data_direction dir, struct sg_table *sgt); -uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man); -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man); -int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, +uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr); +uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr); +int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, uint64_t start, uint64_t size); -int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man, +int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start); int amdgpu_ttm_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index f8e574cc0e22..07bc0f504713 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -553,7 +553,6 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev) static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) { struct amd_sriov_msg_vf2pf_info *vf2pf_info; - struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf; @@ -576,8 +575,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) vf2pf_info->driver_cert = 0; vf2pf_info->os_info.all = 0; - vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20; - vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20; + vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr) >> 20; + vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20; vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20; @@ -626,20 +625,20 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; - if (adev->bios != NULL) { - adev->virt.vf2pf_update_interval_ms = 2000; + if (adev->mman.fw_vram_usage_va != NULL) { + /* go through this logic in ip_init and reset to init workqueue*/ + amdgpu_virt_exchange_data(adev); + INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); + schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); + } else if (adev->bios != NULL) { + /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/ adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); amdgpu_virt_read_pf2vf_data(adev); } - - if (adev->virt.vf2pf_update_interval_ms != 0) { - INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); - schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); - } } @@ -675,12 +674,6 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) if (adev->virt.ras_init_done) amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); } - } else if (adev->bios != NULL) { - adev->virt.fw_reserve.p_pf2vf = - (struct amd_sriov_msg_pf2vf_info_header *) - (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); - - amdgpu_virt_read_pf2vf_data(adev); } } @@ -727,6 +720,10 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) vi_set_virt_ops(adev); break; case CHIP_VEGA10: + soc15_set_virt_ops(adev); + /* send a dummy GPU_INIT_DATA request to host on vega10 */ + amdgpu_virt_request_init_data(adev); + break; case CHIP_VEGA20: case CHIP_ARCTURUS: case CHIP_ALDEBARAN: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 2dcc68e04e84..d99c8779b51e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -144,15 +144,16 @@ static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + unsigned long flags; if (crtc->state->event) { - spin_lock(&crtc->dev->event_lock); + spin_lock_irqsave(&crtc->dev->event_lock, flags); if (drm_crtc_vblank_get(crtc) != 0) drm_crtc_send_vblank_event(crtc, crtc->state->event); else drm_crtc_arm_vblank_event(crtc, crtc->state->event); - spin_unlock(&crtc->dev->event_lock); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); crtc->state->event = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 7b2b0980ec41..7a2b487db57c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -96,10 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - struct ttm_resource_manager *man; - man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man)); + return sysfs_emit(buf, "%llu\n", + amdgpu_vram_mgr_usage(&adev->mman.vram_mgr)); } /** @@ -116,10 +115,9 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - struct ttm_resource_manager *man; - man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man)); + return sysfs_emit(buf, "%llu\n", + amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr)); } /** @@ -263,16 +261,15 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) /** * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM * - * @man: TTM memory type manager + * @mgr: amdgpu_vram_mgr pointer * @start: start address of the range in VRAM * @size: size of the range * - * Reserve memory from start addess with the specified size in VRAM + * Reserve memory from start address with the specified size in VRAM */ -int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, +int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, uint64_t start, uint64_t size) { - struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_reservation *rsv; rsv = kzalloc(sizeof(*rsv), GFP_KERNEL); @@ -285,7 +282,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, spin_lock(&mgr->lock); list_add_tail(&mgr->reservations_pending, &rsv->node); - amdgpu_vram_mgr_do_reserve(man); + amdgpu_vram_mgr_do_reserve(&mgr->manager); spin_unlock(&mgr->lock); return 0; @@ -294,7 +291,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, /** * amdgpu_vram_mgr_query_page_status - query the reservation status * - * @man: TTM memory type manager + * @mgr: amdgpu_vram_mgr pointer * @start: start address of a page in VRAM * * Returns: @@ -302,10 +299,9 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, * 0: the page has been reserved * -ENOENT: the input page is not a reservation */ -int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man, +int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start) { - struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_reservation *rsv; int ret; @@ -632,28 +628,24 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev, /** * amdgpu_vram_mgr_usage - how many bytes are used in this domain * - * @man: TTM memory type manager + * @mgr: amdgpu_vram_mgr pointer * * Returns how many bytes are used in this domain. */ -uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man) +uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr) { - struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); - return atomic64_read(&mgr->usage); } /** * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part * - * @man: TTM memory type manager + * @mgr: amdgpu_vram_mgr pointer * * Returns how many bytes are used in the visible part of VRAM */ -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man) +uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr) { - struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); - return atomic64_read(&mgr->vis_usage); } @@ -675,8 +667,8 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, spin_unlock(&mgr->lock); drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", - man->size, amdgpu_vram_mgr_usage(man) >> 20, - amdgpu_vram_mgr_vis_usage(man) >> 20); + man->size, amdgpu_vram_mgr_usage(mgr) >> 20, + amdgpu_vram_mgr_vis_usage(mgr) >> 20); } static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index a38c6a747fa4..e8b8f28c2f72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -208,6 +208,7 @@ static struct attribute *amdgpu_xgmi_hive_attrs[] = { &amdgpu_xgmi_hive_id, NULL }; +ATTRIBUTE_GROUPS(amdgpu_xgmi_hive); static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj, struct attribute *attr, char *buf) @@ -237,7 +238,7 @@ static const struct sysfs_ops amdgpu_xgmi_hive_ops = { struct kobj_type amdgpu_xgmi_hive_type = { .release = amdgpu_xgmi_hive_release, .sysfs_ops = &amdgpu_xgmi_hive_ops, - .default_attrs = amdgpu_xgmi_hive_attrs, + .default_groups = amdgpu_xgmi_hive_groups, }; static ssize_t amdgpu_xgmi_show_device_id(struct device *dev, diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 54f28c075f21..f10ce740a29c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1428,6 +1428,10 @@ static int cik_asic_reset(struct amdgpu_device *adev) { int r; + /* APUs don't have full asic reset */ + if (adev->flags & AMD_IS_APU) + return 0; + if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { dev_info(adev->dev, "BACO reset\n"); r = amdgpu_dpm_baco_reset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 3d5d47a799e3..38bb42727715 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -989,7 +989,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) goto skip_pin_bo; - r = amdgpu_gart_table_vram_pin(adev); + r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); if (r) return r; @@ -1060,7 +1060,6 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) { adev->gfxhub.funcs->gart_disable(adev); adev->mmhub.funcs->gart_disable(adev); - amdgpu_gart_table_vram_unpin(adev); } static int gmc_v10_0_hw_fini(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 0fe714f54cca..cd6c38e083d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -476,7 +476,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } - r = amdgpu_gart_table_vram_pin(adev); + r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); if (r) return r; @@ -608,7 +608,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) WREG32(mmVM_L2_CNTL3, VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK | (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); - amdgpu_gart_table_vram_unpin(adev); } static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0a50fdaced7e..ab8adbff9e2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -620,7 +620,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } - r = amdgpu_gart_table_vram_pin(adev); + r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); if (r) return r; @@ -758,7 +758,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); WREG32(mmVM_L2_CNTL, tmp); WREG32(mmVM_L2_CNTL2, 0); - amdgpu_gart_table_vram_unpin(adev); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 63b890f1e8af..054733838292 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -844,7 +844,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } - r = amdgpu_gart_table_vram_pin(adev); + r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); if (r) return r; @@ -999,7 +999,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); WREG32(mmVM_L2_CNTL, tmp); WREG32(mmVM_L2_CNTL2, 0); - amdgpu_gart_table_vram_unpin(adev); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 57f2729a7bd0..88c1eb9ad068 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -72,6 +72,9 @@ #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2 +#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea +#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2 + static const char *gfxhub_client_ids[] = { "CB", @@ -1134,6 +1137,8 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); unsigned size; + /* TODO move to DC so GMC doesn't need to hard-code DCN registers */ + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { @@ -1142,7 +1147,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): - case IP_VERSION(2, 1, 0): viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); size = (REG_GET_FIELD(viewport, HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * @@ -1150,6 +1154,14 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * 4); break; + case IP_VERSION(2, 1, 0): + viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2); + size = (REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * + REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * + 4); + break; default: viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * @@ -1743,7 +1755,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) goto skip_pin_bo; - r = amdgpu_gart_table_vram_pin(adev); + r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); if (r) return r; @@ -1821,7 +1833,6 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) { adev->gfxhub.funcs->gart_disable(adev); adev->mmhub.funcs->gart_disable(adev); - amdgpu_gart_table_vram_unpin(adev); } static int gmc_v9_0_hw_fini(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 0077e738db31..56da5ab82987 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -180,6 +180,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); } + } else if (req == IDH_REQ_GPU_INIT_DATA){ + /* Dummy REQ_GPU_INIT_DATA handling */ + r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY); + /* version set to 0 since dummy */ + adev->virt.req_init_data_ver = 0; } return 0; @@ -381,10 +386,16 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); } +static int xgpu_ai_request_init_data(struct amdgpu_device *adev) +{ + return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); +} + const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .req_full_gpu = xgpu_ai_request_full_gpu_access, .rel_full_gpu = xgpu_ai_release_full_gpu_access, .reset_gpu = xgpu_ai_request_reset, .wait_reset = NULL, .trans_msg = xgpu_ai_mailbox_trans_msg, + .req_init_data = xgpu_ai_request_init_data, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index f9aa4d0bb638..fa7e13e0459e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -35,6 +35,7 @@ enum idh_request { IDH_REQ_GPU_FINI_ACCESS, IDH_REL_GPU_FINI_ACCESS, IDH_REQ_GPU_RESET_ACCESS, + IDH_REQ_GPU_INIT_DATA, IDH_LOG_VF_ERROR = 200, IDH_READY_TO_RESET = 201, @@ -48,6 +49,7 @@ enum idh_event { IDH_SUCCESS, IDH_FAIL, IDH_QUERY_ALIVE, + IDH_REQ_GPU_INIT_DATA_READY, IDH_TEXT_MESSAGE = 255, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index fe9a7cc8d9eb..6645ebbd2696 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -956,6 +956,10 @@ static int vi_asic_reset(struct amdgpu_device *adev) { int r; + /* APUs don't have full asic reset */ + if (adev->flags & AMD_IS_APU) + return 0; + if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { dev_info(adev->dev, "BACO reset\n"); r = amdgpu_dpm_baco_reset(adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index f187596faf66..9624bbe8b501 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1060,6 +1060,9 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, return -ENODEV; /* same everything but the other direction */ props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL); + if (!props2) + return -ENOMEM; + props2->node_from = id_to; props2->node_to = id_from; props2->kobj = NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 127d41d0e4f0..2b65d0acae2c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -68,20 +68,20 @@ static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd) case IP_VERSION(4, 0, 1):/* VEGA12 */ case IP_VERSION(4, 1, 0):/* RAVEN */ case IP_VERSION(4, 1, 1):/* RAVEN */ - case IP_VERSION(4, 1, 2):/* RENIOR */ + case IP_VERSION(4, 1, 2):/* RENOIR */ case IP_VERSION(5, 2, 1):/* VANGOGH */ case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ kfd->device_info.num_sdma_queues_per_engine = 2; break; case IP_VERSION(4, 2, 0):/* VEGA20 */ - case IP_VERSION(4, 2, 2):/* ARCTUTUS */ + case IP_VERSION(4, 2, 2):/* ARCTURUS */ case IP_VERSION(4, 4, 0):/* ALDEBARAN */ case IP_VERSION(5, 0, 0):/* NAVI10 */ case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */ case IP_VERSION(5, 0, 2):/* NAVI14 */ case IP_VERSION(5, 0, 5):/* NAVI12 */ case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */ - case IP_VERSION(5, 2, 2):/* NAVY_FLOUDER */ + case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */ case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */ case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */ kfd->device_info.num_sdma_queues_per_engine = 8; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 19890e350107..4b6814949aad 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1004,14 +1004,17 @@ static void uninitialize(struct device_queue_manager *dqm) static int start_nocpsch(struct device_queue_manager *dqm) { + int r = 0; + pr_info("SW scheduler is used"); init_interrupts(dqm); if (dqm->dev->adev->asic_type == CHIP_HAWAII) - return pm_init(&dqm->packet_mgr, dqm); - dqm->sched_running = true; + r = pm_init(&dqm->packet_mgr, dqm); + if (!r) + dqm->sched_running = true; - return 0; + return r; } static int stop_nocpsch(struct device_queue_manager *dqm) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index b8ac28fb1231..e8bc28009c22 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -197,6 +197,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, */ return source_id == SOC15_INTSRC_CP_END_OF_PIPE || source_id == SOC15_INTSRC_SDMA_TRAP || + source_id == SOC15_INTSRC_SDMA_ECC || source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || source_id == SOC15_INTSRC_CP_BAD_OPCODE || ((client_id == SOC15_IH_CLIENTID_VMC || diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index f1930ff2c74a..d1145da5348f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -461,6 +461,7 @@ static struct attribute *procfs_queue_attrs[] = { &attr_queue_gpuid, NULL }; +ATTRIBUTE_GROUPS(procfs_queue); static const struct sysfs_ops procfs_queue_ops = { .show = kfd_procfs_queue_show, @@ -468,7 +469,7 @@ static const struct sysfs_ops procfs_queue_ops = { static struct kobj_type procfs_queue_type = { .sysfs_ops = &procfs_queue_ops, - .default_attrs = procfs_queue_attrs, + .default_groups = procfs_queue_groups, }; static const struct sysfs_ops procfs_stats_ops = { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index aa5ee91cd595..f2805ba74c80 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -107,7 +107,7 @@ static void svm_range_add_to_svms(struct svm_range *prange) pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange, prange->start, prange->last); - list_add_tail(&prange->list, &prange->svms->list); + list_move_tail(&prange->list, &prange->svms->list); prange->it_node.start = prange->start; prange->it_node.last = prange->last; interval_tree_insert(&prange->it_node, &prange->svms->objects); @@ -295,8 +295,6 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, prange->last = last; INIT_LIST_HEAD(&prange->list); INIT_LIST_HEAD(&prange->update_list); - INIT_LIST_HEAD(&prange->remove_list); - INIT_LIST_HEAD(&prange->insert_list); INIT_LIST_HEAD(&prange->svm_bo_list); INIT_LIST_HEAD(&prange->deferred_list); INIT_LIST_HEAD(&prange->child_list); @@ -1018,7 +1016,7 @@ svm_range_split_tail(struct svm_range *prange, int r = svm_range_split(prange, prange->start, new_last, &tail); if (!r) - list_add(&tail->insert_list, insert_list); + list_add(&tail->list, insert_list); return r; } @@ -1030,7 +1028,7 @@ svm_range_split_head(struct svm_range *prange, int r = svm_range_split(prange, new_start, prange->last, &head); if (!r) - list_add(&head->insert_list, insert_list); + list_add(&head->list, insert_list); return r; } @@ -1898,8 +1896,8 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, goto out; } - list_add(&old->remove_list, remove_list); - list_add(&prange->insert_list, insert_list); + list_add(&old->update_list, remove_list); + list_add(&prange->list, insert_list); list_add(&prange->update_list, update_list); if (node->start < start) { @@ -1931,7 +1929,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, goto out; } - list_add(&prange->insert_list, insert_list); + list_add(&prange->list, insert_list); list_add(&prange->update_list, update_list); } @@ -1946,13 +1944,13 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, r = -ENOMEM; goto out; } - list_add(&prange->insert_list, insert_list); + list_add(&prange->list, insert_list); list_add(&prange->update_list, update_list); } out: if (r) - list_for_each_entry_safe(prange, tmp, insert_list, insert_list) + list_for_each_entry_safe(prange, tmp, insert_list, list) svm_range_free(prange); return r; @@ -3236,7 +3234,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, goto out; } /* Apply changes as a transaction */ - list_for_each_entry_safe(prange, next, &insert_list, insert_list) { + list_for_each_entry_safe(prange, next, &insert_list, list) { svm_range_add_to_svms(prange); svm_range_add_notifier_locked(mm, prange); } @@ -3244,8 +3242,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, svm_range_apply_attrs(p, prange, nattr, attrs); /* TODO: unmap ranges from GPU that lost access */ } - list_for_each_entry_safe(prange, next, &remove_list, - remove_list) { + list_for_each_entry_safe(prange, next, &remove_list, update_list) { pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange, prange->start, prange->last); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 2f8a95e86dcb..949b477e2f4c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -76,8 +76,6 @@ struct svm_work_list_item { * aligned, page size is (last - start + 1) * @list: link list node, used to scan all ranges of svms * @update_list:link list node used to add to update_list - * @remove_list:link list node used to add to remove list - * @insert_list:link list node used to add to insert list * @mapping: bo_va mapping structure to create and update GPU page table * @npages: number of pages * @dma_addr: dma mapping address on each GPU for system memory physical page @@ -113,8 +111,6 @@ struct svm_range { struct interval_tree_node it_node; struct list_head list; struct list_head update_list; - struct list_head remove_list; - struct list_head insert_list; uint64_t npages; dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; struct ttm_resource *ttm_res; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2f0b14f8f833..7f9773f8dab6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -658,7 +658,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, struct drm_connector_list_iter iter; struct dc_link *link; uint8_t link_index = 0; - struct drm_device *dev = adev->dm.ddev; + struct drm_device *dev; if (adev == NULL) return; @@ -675,6 +675,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, link_index = notify->link_index; link = adev->dm.dc->links[link_index]; + dev = adev->dm.ddev; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { @@ -1161,6 +1162,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return 0; } +static void dm_dmub_hw_resume(struct amdgpu_device *adev) +{ + struct dmub_srv *dmub_srv = adev->dm.dmub_srv; + enum dmub_status status; + bool init; + + if (!dmub_srv) { + /* DMUB isn't supported on the ASIC. */ + return; + } + + status = dmub_srv_is_hw_init(dmub_srv, &init); + if (status != DMUB_STATUS_OK) + DRM_WARN("DMUB hardware init check failed: %d\n", status); + + if (status == DMUB_STATUS_OK && init) { + /* Wait for firmware load to finish. */ + status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); + if (status != DMUB_STATUS_OK) + DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + } else { + /* Perform the full hardware initialization. */ + dm_dmub_hw_init(adev); + } +} + #if defined(CONFIG_DRM_AMD_DC_DCN) static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) { @@ -2637,9 +2664,7 @@ static int dm_resume(void *handle) amdgpu_dm_outbox_init(adev); /* Before powering on DC we need to re-initialize DMUB. */ - r = dm_dmub_hw_init(adev); - if (r) - DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + dm_dmub_hw_resume(adev); /* power on hardware */ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -6073,6 +6098,7 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, struct dsc_dec_dpcd_caps *dsc_caps) { stream->timing.flags.DSC = 0; + dsc_caps->is_dsc_supported = false; if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP)) { @@ -10737,6 +10763,8 @@ static int dm_update_plane_state(struct dc *dc, dm_new_plane_state->dc_state = dc_new_plane_state; + dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); + /* Tell DC to do a full surface update every time there * is a plane change. Inefficient, but works for now. */ @@ -10889,7 +10917,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, enum dc_status status; int ret, i; bool lock_and_validation_needed = false; - struct dm_crtc_state *dm_old_crtc_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; #if defined(CONFIG_DRM_AMD_DC_DCN) struct dsc_mst_fairness_vars vars[MAX_PIPES]; struct drm_dp_mst_topology_state *mst_state; @@ -11071,6 +11099,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + if (dm_new_crtc_state->mpo_requested) + DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); + } + /* Check cursor planes scaling */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index c98e402eab0c..b9a69b0cef23 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -626,6 +626,8 @@ struct dm_crtc_state { bool cm_has_degamma; bool cm_is_degamma_srgb; + bool mpo_requested; + int update_type; int active_planes; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 9f35f2e8f971..cac80ba69072 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -38,7 +38,6 @@ #include "clk/clk_11_0_0_offset.h" #include "clk/clk_11_0_0_sh_mask.h" -#include "irq/dcn20/irq_service_dcn20.h" #undef FN #define FN(reg_name, field_name) \ @@ -223,8 +222,6 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, bool force_reset = false; bool p_state_change_support; int total_plane_count; - int irq_src; - uint32_t hpd_state; if (dc->work_arounds.skip_clock_update) return; @@ -242,13 +239,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, if (dc->res_pool->pp_smu) pp_smu = &dc->res_pool->pp_smu->nv_funcs; - for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) { - hpd_state = dc_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src); - if (hpd_state) - break; - } - - if (display_count == 0 && !hpd_state) + if (display_count == 0) enter_display_off = true; if (enter_display_off == safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index fbda42313bfe..f4dee0e48a67 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -42,7 +42,6 @@ #include "clk/clk_10_0_2_sh_mask.h" #include "renoir_ip_offset.h" -#include "irq/dcn21/irq_service_dcn21.h" /* Constants */ @@ -129,11 +128,9 @@ static void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; int display_count; - int irq_src; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; - uint32_t hpd_state; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; @@ -150,14 +147,8 @@ static void rn_update_clocks(struct clk_mgr *clk_mgr_base, display_count = rn_get_active_display_cnt_wa(dc, context); - for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD5; irq_src++) { - hpd_state = dc_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src); - if (hpd_state) - break; - } - /* if we can go lower, go lower */ - if (display_count == 0 && !hpd_state) { + if (display_count == 0) { rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index b7ace235a2d5..a1011f3273f3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -119,6 +119,12 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000); + if (result == VBIOSSMC_Result_Failed) { + ASSERT(0); + REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); + return -1; + } + if (IS_SMU_TIMEOUT(result)) { ASSERT(0); dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index dc1380b6c5e0..b5e570d33ca9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -3971,102 +3971,73 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) { struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct link_encoder *link_enc = NULL; -#endif + struct cp_psp_stream_config config = {0}; + enum dp_panel_mode panel_mode = + dp_get_panel_mode(pipe_ctx->stream->link); - if (cp_psp && cp_psp->funcs.update_stream_config) { - struct cp_psp_stream_config config = {0}; - enum dp_panel_mode panel_mode = - dp_get_panel_mode(pipe_ctx->stream->link); + if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL) + return; - config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; - /*stream_enc_inst*/ - config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; - config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; -#if defined(CONFIG_DRM_AMD_DC_DCN) - config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; - - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY || - pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) - link_enc = pipe_ctx->stream->link->link_enc; - else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) - if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_stream( - pipe_ctx->stream->ctx->dc, - pipe_ctx->stream); - } - ASSERT(link_enc); + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) + link_enc = pipe_ctx->stream->link->link_enc; + else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) + link_enc = link_enc_cfg_get_link_enc_used_by_stream( + pipe_ctx->stream->ctx->dc, + pipe_ctx->stream); + ASSERT(link_enc); + if (link_enc == NULL) + return; - // Initialize PHY ID with ABCDE - 01234 mapping except when it is B0 - config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + /* otg instance */ + config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; - // Add flag to guard new A0 DIG mapping - if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && - pipe_ctx->stream->link->dc->ctx->dce_version == DCN_VERSION_3_1) { - config.dig_be = link_enc->preferred_engine; - config.dio_output_type = pipe_ctx->stream->link->ep_type; - config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; - } else { - config.dio_output_type = 0; - config.dio_output_idx = 0; - } + /* dig front end */ + config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; - // Add flag to guard B0 implementation - if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && - link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { - // enum ID 1-4 maps to DPIA PHY ID 0-3 - config.phy_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1; - } else { // for non DPIA mode over B0, ABCDE maps to 01564 - - switch (link_enc->transmitter) { - case TRANSMITTER_UNIPHY_A: - config.phy_idx = 0; - break; - case TRANSMITTER_UNIPHY_B: - config.phy_idx = 1; - break; - case TRANSMITTER_UNIPHY_C: - config.phy_idx = 5; - break; - case TRANSMITTER_UNIPHY_D: - config.phy_idx = 6; - break; - case TRANSMITTER_UNIPHY_E: - config.phy_idx = 4; - break; - default: - config.phy_idx = 0; - break; - } + /* stream encoder index */ + config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + config.stream_enc_idx = + pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; +#endif - } - } - } else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_stream( - pipe_ctx->stream->ctx->dc, - pipe_ctx->stream); - config.phy_idx = 0; /* Clear phy_idx for non-physical display endpoints. */ - } - ASSERT(link_enc); - if (link_enc) - config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; - if (is_dp_128b_132b_signal(pipe_ctx)) { - config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; + /* dig back end */ + config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; - config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; - config.dp2_enabled = 1; - } + /* link encoder index */ + config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; #endif - config.dpms_off = dpms_off; - config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; - config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP); - config.mst_enabled = (pipe_ctx->stream->signal == - SIGNAL_TYPE_DISPLAY_PORT_MST); - cp_psp->funcs.update_stream_config(cp_psp->handle, &config); - } + /* dio output index */ + config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + + /* phy index */ + config.phy_idx = resource_transmitter_to_phy_idx( + pipe_ctx->stream->link->dc, link_enc->transmitter); + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + /* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */ + config.phy_idx = 0; + + /* stream properties */ + config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0; + config.mst_enabled = (pipe_ctx->stream->signal == + SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0; +#if defined(CONFIG_DRM_AMD_DC_DCN) + config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0; +#endif + config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? + 1 : 0; + config.dpms_off = dpms_off; + + /* dm stream context */ + config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + + cp_psp->funcs.update_stream_config(cp_psp->handle, &config); } #endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index de5c7d1e0267..d4ff6cc6b8d9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -3216,3 +3216,36 @@ struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( return hpo_dp_link_enc; } #endif + +uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter) +{ + /* TODO - get transmitter to phy idx mapping from DMUB */ + uint8_t phy_idx = transmitter - TRANSMITTER_UNIPHY_A; + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dc->ctx->dce_version == DCN_VERSION_3_1 && + dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + phy_idx = 0; + break; + case TRANSMITTER_UNIPHY_B: + phy_idx = 1; + break; + case TRANSMITTER_UNIPHY_C: + phy_idx = 5; + break; + case TRANSMITTER_UNIPHY_D: + phy_idx = 6; + break; + case TRANSMITTER_UNIPHY_E: + phy_idx = 4; + break; + default: + phy_idx = 0; + break; + } + } +#endif + return phy_idx; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index f19015413ce3..530a72e3eefe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1365,7 +1365,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) uint32_t opp_id_src1 = OPP_ID_INVALID; // Step 1: To find out which OPTC is running & OPTC DSC is ON - for (i = 0; i < dc->res_pool->res_cap->num_timing_generator; i++) { + // We can't use res_pool->res_cap->num_timing_generator to check + // Because it records display pipes default setting built in driver, + // not display pipes of the current chip. + // Some ASICs would be fused display pipes less than the default setting. + // In dcnxx_resource_construct function, driver would obatin real information. + for (i = 0; i < dc->res_pool->timing_generator_count; i++) { uint32_t optc_dsc_state = 0; struct timing_generator *tg = dc->res_pool->timing_generators[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index 71c359f9cdd2..8b9b1a5309ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -100,6 +100,35 @@ static uint8_t phy_id_from_transmitter(enum transmitter t) return phy_id; } +static bool has_query_dp_alt(struct link_encoder *enc) +{ + struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; + + /* Supports development firmware and firmware >= 4.0.11 */ + return dc_dmub_srv && + !(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) && + dc_dmub_srv->dmub->fw_version <= DMUB_FW_VERSION(4, 0, 10)); +} + +static bool query_dp_alt_from_dmub(struct link_encoder *enc, + union dmub_rb_cmd *cmd) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; + + memset(cmd, 0, sizeof(*cmd)); + cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS; + cmd->query_dp_alt.header.sub_type = + DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; + cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); + cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); + + if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd)) + return false; + + return true; +} + void dcn31_link_encoder_set_dio_phy_mux( struct link_encoder *enc, enum encoder_type_select sel, @@ -569,45 +598,90 @@ void dcn31_link_encoder_disable_output( bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; union dmub_rb_cmd cmd; - bool is_usb_c_alt_mode = false; + uint32_t dp_alt_mode_disable; - if (enc->features.flags.bits.DP_IS_USB_C && dc_dmub_srv) { - memset(&cmd, 0, sizeof(cmd)); - cmd.query_dp_alt.header.type = DMUB_CMD__VBIOS; - cmd.query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; - cmd.query_dp_alt.header.payload_bytes = sizeof(cmd.panel_cntl.data); - cmd.query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); + /* Only applicable to USB-C PHY. */ + if (!enc->features.flags.bits.DP_IS_USB_C) + return false; - if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd)) + /* + * Use the new interface from DMCUB if available. + * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running. + */ + if (has_query_dp_alt(enc)) { + if (!query_dp_alt_from_dmub(enc, &cmd)) return false; - is_usb_c_alt_mode = (cmd.query_dp_alt.data.is_dp_alt_disable == 0); + return (cmd.query_dp_alt.data.is_dp_alt_disable == 0); } - return is_usb_c_alt_mode; + /* Legacy path, avoid if possible. */ + if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, + &dp_alt_mode_disable); + } else { + /* + * B0 phys use a new set of registers to check whether alt mode is disabled. + * if value == 1 alt mode is disabled, otherwise it is enabled. + */ + if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) || + (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) || + (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, + &dp_alt_mode_disable); + } else { + REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, + &dp_alt_mode_disable); + } + } + + return (dp_alt_mode_disable == 0); } void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; union dmub_rb_cmd cmd; + uint32_t is_in_usb_c_dp4_mode = 0; dcn10_link_encoder_get_max_link_cap(enc, link_settings); - if (enc->features.flags.bits.DP_IS_USB_C && dc_dmub_srv) { - memset(&cmd, 0, sizeof(cmd)); - cmd.query_dp_alt.header.type = DMUB_CMD__VBIOS; - cmd.query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; - cmd.query_dp_alt.header.payload_bytes = sizeof(cmd.panel_cntl.data); - cmd.query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); + /* Take the link cap directly if not USB */ + if (!enc->features.flags.bits.DP_IS_USB_C) + return; - if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd)) + /* + * Use the new interface from DMCUB if available. + * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running. + */ + if (has_query_dp_alt(enc)) { + if (!query_dp_alt_from_dmub(enc, &cmd)) return; - if (cmd.query_dp_alt.data.is_usb && cmd.query_dp_alt.data.is_dp4 == 0) + if (cmd.query_dp_alt.data.is_usb && + cmd.query_dp_alt.data.is_dp4 == 0) link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); + + return; } + + /* Legacy path, avoid if possible. */ + if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, + &is_in_usb_c_dp4_mode); + } else { + if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) || + (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) || + (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, + &is_in_usb_c_dp4_mode); + } else { + REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, + &is_in_usb_c_dp4_mode); + } + } + + if (!is_in_usb_c_dp4_mode) + link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 4d9c64d982d7..42ed47e8133d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -1984,7 +1984,7 @@ static void dcn31_calculate_wm_and_dlg_fp( pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - if (dc->config.forced_clocks) { + if (dc->config.forced_clocks || dc->debug.max_disp_clk) { pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; } diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h index 511f9e1159c7..4229369c57f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -34,12 +34,12 @@ struct cp_psp_stream_config { uint8_t dig_fe; uint8_t link_enc_idx; uint8_t stream_enc_idx; - uint8_t phy_idx; uint8_t dio_output_idx; - uint8_t dio_output_type; + uint8_t phy_idx; uint8_t assr_enabled; uint8_t mst_enabled; uint8_t dp2_enabled; + uint8_t usb4_enabled; void *dm_stream_ctx; bool dpms_off; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index e589cbe67307..4249bf306e09 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -208,4 +208,6 @@ struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( const struct dc_link *link); #endif +uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter); + #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index 9ccafe007b23..c4b067d01895 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -132,31 +132,6 @@ enum dc_irq_source to_dal_irq_source_dcn20( } } -uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source) -{ - const struct irq_source_info *info; - uint32_t addr; - uint32_t value; - uint32_t current_status; - - info = find_irq_source_info(irq_service, source); - if (!info) - return 0; - - addr = info->status_reg; - if (!addr) - return 0; - - value = dm_read_reg(irq_service->ctx, addr); - current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE); - - return current_status; -} - static bool hpd_ack( struct irq_service *irq_service, const struct irq_source_info *info) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h index 4d69ab24ca25..aee4b37999f1 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h @@ -31,6 +31,4 @@ struct irq_service *dal_irq_service_dcn20_create( struct irq_service_init_data *init_data); -uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source); - #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 235294534c43..0f15bcada4e9 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -134,31 +134,6 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic return DC_IRQ_SOURCE_INVALID; } -uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source) -{ - const struct irq_source_info *info; - uint32_t addr; - uint32_t value; - uint32_t current_status; - - info = find_irq_source_info(irq_service, source); - if (!info) - return 0; - - addr = info->status_reg; - if (!addr) - return 0; - - value = dm_read_reg(irq_service->ctx, addr); - current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE); - - return current_status; -} - static bool hpd_ack( struct irq_service *irq_service, const struct irq_source_info *info) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h index 616470e32380..da2bd0e93d7a 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h @@ -31,6 +31,4 @@ struct irq_service *dal_irq_service_dcn21_create( struct irq_service_init_data *init_data); -uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source); - #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index 4db1133e4466..a2a4fbeb83f8 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service) *irq_service = NULL; } -const struct irq_source_info *find_irq_source_info( +static const struct irq_source_info *find_irq_source_info( struct irq_service *irq_service, enum dc_irq_source source) { diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h index e60b82480093..dbfcb096eedd 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h @@ -69,10 +69,6 @@ struct irq_service { const struct irq_service_funcs *funcs; }; -const struct irq_source_info *find_irq_source_info( - struct irq_service *irq_service, - enum dc_irq_source source); - void dal_irq_service_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data); diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index 6d648c889866..f7420c3f5672 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -104,6 +104,7 @@ struct mod_hdcp_displayport { uint8_t rev; uint8_t assr_enabled; uint8_t mst_enabled; + uint8_t usb4_enabled; }; struct mod_hdcp_hdmi { @@ -249,7 +250,6 @@ struct mod_hdcp_link { uint8_t ddc_line; uint8_t link_enc_idx; uint8_t phy_idx; - uint8_t dio_output_type; uint8_t dio_output_id; uint8_t hdcp_supported_informational; union { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 380811b91350..4885c4ae78b7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1625,10 +1625,18 @@ static int aldebaran_set_df_cstate(struct smu_context *smu, static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en) { - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - en ? 0 : 1, - NULL); + struct amdgpu_device *adev = smu->adev; + + /* The message only works on master die and NACK will be sent + back for other dies, only send it on master die */ + if (!adev->smuio.funcs->get_socket_id(adev) && + !adev->smuio.funcs->get_die_id(adev)) + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GmiPwrDnControl, + en ? 0 : 1, + NULL); + else + return 0; } static const struct throttling_logging_label { diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index aef2fbd676e5..9603193d2fa1 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -828,8 +828,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, } if (!crtc_state->enable && !can_update_disabled) { - drm_dbg_kms(plane_state->crtc->dev, - "Cannot update plane of a disabled CRTC.\n"); + drm_dbg_kms(plane_state->plane->dev, + "Cannot update plane of a disabled CRTC.\n"); return -EINVAL; } @@ -839,8 +839,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); if (hscale < 0 || vscale < 0) { - drm_dbg_kms(plane_state->crtc->dev, - "Invalid scaling of plane\n"); + drm_dbg_kms(plane_state->plane->dev, + "Invalid scaling of plane\n"); drm_rect_debug_print("src: ", &plane_state->src, true); drm_rect_debug_print("dst: ", &plane_state->dst, false); return -ERANGE; @@ -864,8 +864,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, return 0; if (!can_position && !drm_rect_equals(dst, &clip)) { - drm_dbg_kms(plane_state->crtc->dev, - "Plane must cover entire CRTC\n"); + drm_dbg_kms(plane_state->plane->dev, + "Plane must cover entire CRTC\n"); drm_rect_debug_print("dst: ", dst, false); drm_rect_debug_print("clip: ", &clip, false); return -EINVAL; @@ -1016,7 +1016,7 @@ crtc_needs_disable(struct drm_crtc_state *old_state, * it's in self refresh mode and needs to be fully disabled. */ return old_state->active || - (old_state->self_refresh_active && !new_state->enable) || + (old_state->self_refresh_active && !new_state->active) || new_state->self_refresh_active; } diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index ded8968b3e8a..0327d595e028 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -209,11 +209,11 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); if (ret) return ret; - src = data[0].vaddr; /* TODO: Use mapping abstraction properly */ ret = drm_gem_fb_vmap(fb, map, data); if (ret) goto out_drm_gem_fb_end_cpu_access; + src = data[0].vaddr; /* TODO: Use mapping abstraction properly */ switch (fb->format->format) { case DRM_FORMAT_RGB565: diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 9c9d574f0b8c..cab505277595 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1298,6 +1298,28 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), DKL_TX_DP20BITMODE, 0); + + if (IS_ALDERLAKE_P(dev_priv)) { + u32 val; + + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + if (ln == 0) { + val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0); + val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(2); + } else { + val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(3); + val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(3); + } + } else { + val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0); + val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0); + } + + intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), + DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK | + DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, + val); + } } } diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index 1e689d573512..e2dfb93a82bd 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -477,14 +477,14 @@ static const struct intel_ddi_buf_trans icl_combo_phy_trans_hdmi = { static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_dp[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ - { .icl = { 0xA, 0x47, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */ - { .icl = { 0xC, 0x64, 0x34, 0x00, 0x0B } }, /* 350 700 6.0 */ - { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 350 900 8.2 */ + { .icl = { 0xA, 0x47, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */ + { .icl = { 0xC, 0x64, 0x33, 0x00, 0x0C } }, /* 350 700 6.0 */ + { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ - { .icl = { 0xC, 0x64, 0x38, 0x00, 0x07 } }, /* 500 700 2.9 */ + { .icl = { 0xC, 0x64, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ - { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */ + { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index aaf970c37aa2..1478c02a82cb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -538,6 +538,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) { struct i915_mmap_offset *mmo, *mn; + if (obj->ops->unmap_virtual) + obj->ops->unmap_virtual(obj); + spin_lock(&obj->mmo.lock); rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index f9f7e44099fe..4b4829eb16c2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -67,6 +67,7 @@ struct drm_i915_gem_object_ops { int (*pwrite)(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg); u64 (*mmap_offset)(struct drm_i915_gem_object *obj); + void (*unmap_virtual)(struct drm_i915_gem_object *obj); int (*dmabuf_export)(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 89b70f5cde7a..9f429ed6e78a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -161,7 +161,6 @@ retry: /* Immediately discard the backing storage */ int i915_gem_object_truncate(struct drm_i915_gem_object *obj) { - drm_gem_free_mmap_offset(&obj->base); if (obj->ops->truncate) return obj->ops->truncate(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 923cc7ad8d70..de3fe79b665a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -556,6 +556,20 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, return intel_region_ttm_resource_to_rsgt(obj->mm.region, res); } +static int i915_ttm_truncate(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + int err; + + WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED); + + err = i915_ttm_move_notify(bo); + if (err) + return err; + + return i915_ttm_purge(obj); +} + static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); @@ -883,6 +897,11 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) if (ret) return ret; + if (obj->mm.madv != I915_MADV_WILLNEED) { + dma_resv_unlock(bo->base.resv); + return VM_FAULT_SIGBUS; + } + if (drm_dev_enter(dev, &idx)) { ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, TTM_BO_VM_NUM_PREFAULT); @@ -945,6 +964,11 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) return drm_vma_node_offset_addr(&obj->base.vma_node); } +static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) +{ + ttm_bo_unmap_virtual(i915_gem_to_ttm(obj)); +} + static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { .name = "i915_gem_object_ttm", .flags = I915_GEM_OBJECT_IS_SHRINKABLE | @@ -952,7 +976,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { .get_pages = i915_ttm_get_pages, .put_pages = i915_ttm_put_pages, - .truncate = i915_ttm_purge, + .truncate = i915_ttm_truncate, .shrinker_release_pages = i915_ttm_shrinker_release_pages, .adjust_lru = i915_ttm_adjust_lru, @@ -960,6 +984,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { .migrate = i915_ttm_migrate, .mmap_offset = i915_ttm_mmap_offset, + .unmap_virtual = i915_ttm_unmap_virtual, .mmap_ops = &vm_ops_ttm, }; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 743e6ab2c40b..c6291429b00c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -1368,20 +1368,10 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915, } } - if (!obj->ops->mmap_ops) { - err = check_absent(addr, obj->base.size); - if (err) { - pr_err("%s: was not absent\n", obj->mm.region->name); - goto out_unmap; - } - } else { - /* ttm allows access to evicted regions by design */ - - err = check_present(addr, obj->base.size); - if (err) { - pr_err("%s: was not present\n", obj->mm.region->name); - goto out_unmap; - } + err = check_absent(addr, obj->base.size); + if (err) { + pr_err("%s: was not absent\n", obj->mm.region->name); + goto out_unmap; } out_unmap: diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4c28dadf8d69..971d601fe751 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -11166,8 +11166,12 @@ enum skl_power_gate { _DKL_PHY2_BASE) + \ _DKL_TX_DPCNTL1) -#define _DKL_TX_DPCNTL2 0x2C8 -#define DKL_TX_DP20BITMODE (1 << 2) +#define _DKL_TX_DPCNTL2 0x2C8 +#define DKL_TX_DP20BITMODE REG_BIT(2) +#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK REG_GENMASK(4, 3) +#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK, (val)) +#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK REG_GENMASK(6, 5) +#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, (val)) #define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \ _DKL_PHY1_BASE, \ _DKL_PHY2_BASE) + \ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c index 195b2323ec00..4b6f5655fab5 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c @@ -107,9 +107,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev, static void i915_pxp_tee_component_unbind(struct device *i915_kdev, struct device *tee_kdev, void *data) { + struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev); + intel_wakeref_t wakeref; - intel_pxp_fini_hw(pxp); + with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref) + intel_pxp_fini_hw(pxp); mutex_lock(&pxp->tee_mutex); pxp->pxp_component = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index e2488559cc9f..11ad210919c8 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -666,18 +666,18 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { r = -ENOMEM; - goto out_suspend; + goto err_suspend; } if (rdev->accel_working) { vm = &fpriv->vm; r = radeon_vm_init(rdev, vm); if (r) - goto out_fpriv; + goto err_fpriv; r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (r) - goto out_vm_fini; + goto err_vm_fini; /* map the ib pool buffer read only into * virtual address space */ @@ -685,7 +685,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) rdev->ring_tmp_bo.bo); if (!vm->ib_bo_va) { r = -ENOMEM; - goto out_vm_fini; + goto err_vm_fini; } r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, @@ -693,19 +693,21 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); if (r) - goto out_vm_fini; + goto err_vm_fini; } file_priv->driver_priv = fpriv; } - if (!r) - goto out_suspend; + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + return 0; -out_vm_fini: +err_vm_fini: radeon_vm_fini(rdev, vm); -out_fpriv: +err_fpriv: kfree(fpriv); -out_suspend: + +err_suspend: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index b64d93da651d..5e2b0175df36 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -658,8 +658,10 @@ int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node) return -EPROBE_DEFER; phy = platform_get_drvdata(pdev); - if (!phy) + if (!phy) { + put_device(&pdev->dev); return -EPROBE_DEFER; + } hdmi->phy = phy; diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index 0037eefe3239..a3ad7c9736ec 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -68,9 +68,11 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp) #if defined(__i386__) || defined(__x86_64__) if (caching == ttm_write_combined) tmp = pgprot_writecombine(tmp); +#ifndef CONFIG_UML else if (boot_cpu_data.x86 > 3) tmp = pgprot_noncached(tmp); -#endif +#endif /* CONFIG_UML */ +#endif /* __i386__ || __x86_64__ */ #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ defined(__powerpc__) || defined(__mips__) if (caching == ttm_write_combined) diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 21f410901694..3313b92db531 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -279,7 +279,7 @@ void virtio_gpu_deinit(struct drm_device *dev) flush_work(&vgdev->ctrlq.dequeue_work); flush_work(&vgdev->cursorq.dequeue_work); flush_work(&vgdev->config_changed_work); - vgdev->vdev->config->reset(vgdev->vdev); + virtio_reset_device(vgdev->vdev); vgdev->vdev->config->del_vqs(vgdev->vdev); } diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index ff2b308d8651..11c409cbc88e 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -24,6 +24,7 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dma/xilinx_dpdma.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/module.h> @@ -1058,14 +1059,18 @@ static void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer, zynqmp_disp_avbuf_set_format(layer->disp, layer, layer->disp_fmt); /* - * Set slave_id for each DMA channel to indicate they're part of a + * Set pconfig for each DMA channel to indicate they're part of a * video group. */ for (i = 0; i < info->num_planes; i++) { struct zynqmp_disp_layer_dma *dma = &layer->dmas[i]; + struct xilinx_dpdma_peripheral_config pconfig = { + .video_group = true, + }; struct dma_slave_config config = { .direction = DMA_MEM_TO_DEV, - .slave_id = 1, + .peripheral_config = &pconfig, + .peripheral_size = sizeof(pconfig), }; dmaengine_slave_config(dma->chan, &config); diff --git a/drivers/greybus/es2.c b/drivers/greybus/es2.c index 15661c7f3633..e89cca015095 100644 --- a/drivers/greybus/es2.c +++ b/drivers/greybus/es2.c @@ -78,7 +78,7 @@ struct es2_cport_in { * @hd: pointer to our gb_host_device structure * * @cport_in: endpoint, urbs and buffer for cport in messages - * @cport_out_endpoint: endpoint for for cport out messages + * @cport_out_endpoint: endpoint for cport out messages * @cport_out_urb: array of urbs for the CPort out messages * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or * not. diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 26cee452ec44..85975031389b 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -400,6 +400,7 @@ #define USB_DEVICE_ID_HP_X2 0x074d #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755 #define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05 +#define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100 0x29CF #define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817 #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544 #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 1ce75e8b49d5..112901d2d8d2 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -330,6 +330,8 @@ static const struct hid_device_id hid_battery_quirks[] = { HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15), HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100), + HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15), HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN), diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c index 72957a9f7117..efa6140915f4 100644 --- a/drivers/hid/hid-vivaldi.c +++ b/drivers/hid/hid-vivaldi.c @@ -6,16 +6,17 @@ * Author: Sean O'Brien <seobrien@chromium.org> */ +#include <linux/device.h> #include <linux/hid.h> +#include <linux/kernel.h> #include <linux/module.h> +#include <linux/sysfs.h> #define MIN_FN_ROW_KEY 1 #define MAX_FN_ROW_KEY 24 #define HID_VD_FN_ROW_PHYSMAP 0x00000001 #define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP) -static struct hid_driver hid_vivaldi; - struct vivaldi_data { u32 function_row_physmap[MAX_FN_ROW_KEY - MIN_FN_ROW_KEY + 1]; int max_function_row_key; @@ -40,7 +41,7 @@ static ssize_t function_row_physmap_show(struct device *dev, return size; } -DEVICE_ATTR_RO(function_row_physmap); +static DEVICE_ATTR_RO(function_row_physmap); static struct attribute *sysfs_attrs[] = { &dev_attr_function_row_physmap.attr, NULL @@ -74,10 +75,11 @@ static void vivaldi_feature_mapping(struct hid_device *hdev, struct hid_usage *usage) { struct vivaldi_data *drvdata = hid_get_drvdata(hdev); + struct hid_report *report = field->report; int fn_key; int ret; u32 report_len; - u8 *buf; + u8 *report_data, *buf; if (field->logical != HID_USAGE_FN_ROW_PHYSMAP || (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL) @@ -89,12 +91,24 @@ static void vivaldi_feature_mapping(struct hid_device *hdev, if (fn_key > drvdata->max_function_row_key) drvdata->max_function_row_key = fn_key; - buf = hid_alloc_report_buf(field->report, GFP_KERNEL); - if (!buf) + report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL); + if (!report_data) return; - report_len = hid_report_len(field->report); - ret = hid_hw_raw_request(hdev, field->report->id, buf, + report_len = hid_report_len(report); + if (!report->id) { + /* + * hid_hw_raw_request() will stuff report ID (which will be 0) + * into the first byte of the buffer even for unnumbered + * reports, so we need to account for this to avoid getting + * -EOVERFLOW in return. + * Note that hid_alloc_report_buf() adds 7 bytes to the size + * so we can safely say that we have space for an extra byte. + */ + report_len++; + } + + ret = hid_hw_raw_request(hdev, report->id, report_data, report_len, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) { @@ -103,7 +117,16 @@ static void vivaldi_feature_mapping(struct hid_device *hdev, goto out; } - ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, buf, + if (!report->id) { + /* + * Undo the damage from hid_hw_raw_request() for unnumbered + * reports. + */ + report_data++; + report_len--; + } + + ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data, report_len, 0); if (ret) { dev_warn(&hdev->dev, "failed to report feature %d\n", diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 8fe3efcb8327..614adb510dbd 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -28,11 +28,22 @@ struct uhid_device { struct mutex devlock; + + /* This flag tracks whether the HID device is usable for commands from + * userspace. The flag is already set before hid_add_device(), which + * runs in workqueue context, to allow hid_add_device() to communicate + * with userspace. + * However, if hid_add_device() fails, the flag is cleared without + * holding devlock. + * We guarantee that if @running changes from true to false while you're + * holding @devlock, it's still fine to access @hid. + */ bool running; __u8 *rd_data; uint rd_size; + /* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */ struct hid_device *hid; struct uhid_event input_buf; @@ -63,9 +74,18 @@ static void uhid_device_add_worker(struct work_struct *work) if (ret) { hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret); - hid_destroy_device(uhid->hid); - uhid->hid = NULL; - uhid->running = false; + /* We used to call hid_destroy_device() here, but that's really + * messy to get right because we have to coordinate with + * concurrent writes from userspace that might be in the middle + * of using uhid->hid. + * Just leave uhid->hid as-is for now, and clean it up when + * userspace tries to close or reinitialize the uhid instance. + * + * However, we do have to clear the ->running flag and do a + * wakeup to make sure userspace knows that the device is gone. + */ + WRITE_ONCE(uhid->running, false); + wake_up_interruptible(&uhid->report_wait); } } @@ -174,9 +194,9 @@ static int __uhid_report_queue_and_wait(struct uhid_device *uhid, spin_unlock_irqrestore(&uhid->qlock, flags); ret = wait_event_interruptible_timeout(uhid->report_wait, - !uhid->report_running || !uhid->running, + !uhid->report_running || !READ_ONCE(uhid->running), 5 * HZ); - if (!ret || !uhid->running || uhid->report_running) + if (!ret || !READ_ONCE(uhid->running) || uhid->report_running) ret = -EIO; else if (ret < 0) ret = -ERESTARTSYS; @@ -217,7 +237,7 @@ static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum, struct uhid_event *ev; int ret; - if (!uhid->running) + if (!READ_ONCE(uhid->running)) return -EIO; ev = kzalloc(sizeof(*ev), GFP_KERNEL); @@ -259,7 +279,7 @@ static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum, struct uhid_event *ev; int ret; - if (!uhid->running || count > UHID_DATA_MAX) + if (!READ_ONCE(uhid->running) || count > UHID_DATA_MAX) return -EIO; ev = kzalloc(sizeof(*ev), GFP_KERNEL); @@ -474,7 +494,7 @@ static int uhid_dev_create2(struct uhid_device *uhid, void *rd_data; int ret; - if (uhid->running) + if (uhid->hid) return -EALREADY; rd_size = ev->u.create2.rd_size; @@ -556,15 +576,16 @@ static int uhid_dev_create(struct uhid_device *uhid, static int uhid_dev_destroy(struct uhid_device *uhid) { - if (!uhid->running) + if (!uhid->hid) return -EINVAL; - uhid->running = false; + WRITE_ONCE(uhid->running, false); wake_up_interruptible(&uhid->report_wait); cancel_work_sync(&uhid->worker); hid_destroy_device(uhid->hid); + uhid->hid = NULL; kfree(uhid->rd_data); return 0; @@ -572,7 +593,7 @@ static int uhid_dev_destroy(struct uhid_device *uhid) static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev) { - if (!uhid->running) + if (!READ_ONCE(uhid->running)) return -EINVAL; hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data, @@ -583,7 +604,7 @@ static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev) static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev) { - if (!uhid->running) + if (!READ_ONCE(uhid->running)) return -EINVAL; hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data, @@ -595,7 +616,7 @@ static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev) static int uhid_dev_get_report_reply(struct uhid_device *uhid, struct uhid_event *ev) { - if (!uhid->running) + if (!READ_ONCE(uhid->running)) return -EINVAL; uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev); @@ -605,7 +626,7 @@ static int uhid_dev_get_report_reply(struct uhid_device *uhid, static int uhid_dev_set_report_reply(struct uhid_device *uhid, struct uhid_event *ev) { - if (!uhid->running) + if (!READ_ONCE(uhid->running)) return -EINVAL; uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev); diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 2a4cc39962e7..a7176fc0635d 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2588,6 +2588,24 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, } } +static bool wacom_wac_slot_is_active(struct input_dev *dev, int key) +{ + struct input_mt *mt = dev->mt; + struct input_mt_slot *s; + + if (!mt) + return false; + + for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { + if (s->key == key && + input_mt_get_value(s, ABS_MT_TRACKING_ID) >= 0) { + return true; + } + } + + return false; +} + static void wacom_wac_finger_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { @@ -2638,9 +2656,14 @@ static void wacom_wac_finger_event(struct hid_device *hdev, } if (usage->usage_index + 1 == field->report_count) { - if (equivalent_usage == wacom_wac->hid_data.last_slot_field && - wacom_wac->hid_data.confidence) - wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); + if (equivalent_usage == wacom_wac->hid_data.last_slot_field) { + bool touch_removed = wacom_wac_slot_is_active(wacom_wac->touch_input, + wacom_wac->hid_data.id) && !wacom_wac->hid_data.tipswitch; + + if (wacom_wac->hid_data.confidence || touch_removed) { + wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); + } + } } } @@ -2659,6 +2682,10 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev, hid_data->confidence = true; + hid_data->cc_report = 0; + hid_data->cc_index = -1; + hid_data->cc_value_index = -1; + for (i = 0; i < report->maxfield; i++) { struct hid_field *field = report->field[i]; int j; @@ -2692,11 +2719,14 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev, hid_data->cc_index >= 0) { struct hid_field *field = report->field[hid_data->cc_index]; int value = field->value[hid_data->cc_value_index]; - if (value) + if (value) { hid_data->num_expected = value; + hid_data->num_received = 0; + } } else { hid_data->num_expected = wacom_wac->features.touch_max; + hid_data->num_received = 0; } } @@ -2724,6 +2754,7 @@ static void wacom_wac_finger_report(struct hid_device *hdev, input_sync(input); wacom_wac->hid_data.num_received = 0; + wacom_wac->hid_data.num_expected = 0; /* keep touch state for pen event */ wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 2829575fd9b7..60375879612f 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -1554,7 +1554,7 @@ int vmbus_request_offers(void) struct vmbus_channel_msginfo *msginfo; int ret; - msginfo = kmalloc(sizeof(*msginfo) + + msginfo = kzalloc(sizeof(*msginfo) + sizeof(struct vmbus_channel_message_header), GFP_KERNEL); if (!msginfo) diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index 7be173a99f27..181d16bbf49d 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -44,10 +44,10 @@ EXPORT_SYMBOL_GPL(hv_vp_index); u32 hv_max_vp_index; EXPORT_SYMBOL_GPL(hv_max_vp_index); -void __percpu **hyperv_pcpu_input_arg; +void * __percpu *hyperv_pcpu_input_arg; EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg); -void __percpu **hyperv_pcpu_output_arg; +void * __percpu *hyperv_pcpu_output_arg; EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg); /* @@ -295,3 +295,14 @@ u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_s return HV_STATUS_INVALID_PARAMETER; } EXPORT_SYMBOL_GPL(hv_ghcb_hypercall); + +void __weak *hv_map_memory(void *addr, unsigned long size) +{ + return NULL; +} +EXPORT_SYMBOL_GPL(hv_map_memory); + +void __weak hv_unmap_memory(void *addr) +{ +} +EXPORT_SYMBOL_GPL(hv_unmap_memory); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 7ae04ccb1043..17bf55fe3169 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -33,6 +33,7 @@ #include <linux/random.h> #include <linux/kernel.h> #include <linux/syscore_ops.h> +#include <linux/dma-map-ops.h> #include <clocksource/hyperv_timer.h> #include "hyperv_vmbus.h" @@ -2078,6 +2079,7 @@ struct hv_device *vmbus_device_create(const guid_t *type, return child_device_obj; } +static u64 vmbus_dma_mask = DMA_BIT_MASK(64); /* * vmbus_device_register - Register the child device */ @@ -2118,6 +2120,8 @@ int vmbus_device_register(struct hv_device *child_device_obj) } hv_debug_add_dev_dir(child_device_obj); + child_device_obj->device.dma_mask = &vmbus_dma_mask; + child_device_obj->device.dma_parms = &child_device_obj->dma_parms; return 0; err_kset_unregister: diff --git a/drivers/hwspinlock/stm32_hwspinlock.c b/drivers/hwspinlock/stm32_hwspinlock.c index 3ad0ce0da4d9..5bd11a7fab65 100644 --- a/drivers/hwspinlock/stm32_hwspinlock.c +++ b/drivers/hwspinlock/stm32_hwspinlock.c @@ -54,8 +54,23 @@ static const struct hwspinlock_ops stm32_hwspinlock_ops = { .relax = stm32_hwspinlock_relax, }; +static void stm32_hwspinlock_disable_clk(void *data) +{ + struct platform_device *pdev = data; + struct stm32_hwspinlock *hw = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + + pm_runtime_get_sync(dev); + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + pm_runtime_put_noidle(dev); + + clk_disable_unprepare(hw->clk); +} + static int stm32_hwspinlock_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct stm32_hwspinlock *hw; void __iomem *io_base; size_t array_size; @@ -66,41 +81,43 @@ static int stm32_hwspinlock_probe(struct platform_device *pdev) return PTR_ERR(io_base); array_size = STM32_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock); - hw = devm_kzalloc(&pdev->dev, sizeof(*hw) + array_size, GFP_KERNEL); + hw = devm_kzalloc(dev, sizeof(*hw) + array_size, GFP_KERNEL); if (!hw) return -ENOMEM; - hw->clk = devm_clk_get(&pdev->dev, "hsem"); + hw->clk = devm_clk_get(dev, "hsem"); if (IS_ERR(hw->clk)) return PTR_ERR(hw->clk); - for (i = 0; i < STM32_MUTEX_NUM_LOCKS; i++) - hw->bank.lock[i].priv = io_base + i * sizeof(u32); + ret = clk_prepare_enable(hw->clk); + if (ret) { + dev_err(dev, "Failed to prepare_enable clock\n"); + return ret; + } platform_set_drvdata(pdev, hw); - pm_runtime_enable(&pdev->dev); - ret = hwspin_lock_register(&hw->bank, &pdev->dev, &stm32_hwspinlock_ops, - 0, STM32_MUTEX_NUM_LOCKS); + pm_runtime_get_noresume(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + pm_runtime_put(dev); - if (ret) - pm_runtime_disable(&pdev->dev); + ret = devm_add_action_or_reset(dev, stm32_hwspinlock_disable_clk, pdev); + if (ret) { + dev_err(dev, "Failed to register action\n"); + return ret; + } - return ret; -} + for (i = 0; i < STM32_MUTEX_NUM_LOCKS; i++) + hw->bank.lock[i].priv = io_base + i * sizeof(u32); -static int stm32_hwspinlock_remove(struct platform_device *pdev) -{ - struct stm32_hwspinlock *hw = platform_get_drvdata(pdev); - int ret; + ret = devm_hwspin_lock_register(dev, &hw->bank, &stm32_hwspinlock_ops, + 0, STM32_MUTEX_NUM_LOCKS); - ret = hwspin_lock_unregister(&hw->bank); if (ret) - dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret); - - pm_runtime_disable(&pdev->dev); + dev_err(dev, "Failed to register hwspinlock\n"); - return 0; + return ret; } static int __maybe_unused stm32_hwspinlock_runtime_suspend(struct device *dev) @@ -135,7 +152,6 @@ MODULE_DEVICE_TABLE(of, stm32_hwpinlock_ids); static struct platform_driver stm32_hwspinlock_driver = { .probe = stm32_hwspinlock_probe, - .remove = stm32_hwspinlock_remove, .driver = { .name = "stm32_hwspinlock", .of_match_table = stm32_hwpinlock_ids, diff --git a/drivers/hwtracing/coresight/coresight-cfg-preload.c b/drivers/hwtracing/coresight/coresight-cfg-preload.c index 751af3710d56..e237a4edfa09 100644 --- a/drivers/hwtracing/coresight/coresight-cfg-preload.c +++ b/drivers/hwtracing/coresight/coresight-cfg-preload.c @@ -24,8 +24,13 @@ static struct cscfg_config_desc *preload_cfgs[] = { NULL }; +static struct cscfg_load_owner_info preload_owner = { + .type = CSCFG_OWNER_PRELOAD, +}; + /* preload called on initialisation */ -int cscfg_preload(void) +int cscfg_preload(void *owner_handle) { - return cscfg_load_config_sets(preload_cfgs, preload_feats); + preload_owner.owner_handle = owner_handle; + return cscfg_load_config_sets(preload_cfgs, preload_feats, &preload_owner); } diff --git a/drivers/hwtracing/coresight/coresight-config.h b/drivers/hwtracing/coresight/coresight-config.h index 25eb6c632692..9bd44b940add 100644 --- a/drivers/hwtracing/coresight/coresight-config.h +++ b/drivers/hwtracing/coresight/coresight-config.h @@ -97,6 +97,8 @@ struct cscfg_regval_desc { * @params_desc: array of parameters used. * @nr_regs: number of registers used. * @regs_desc: array of registers used. + * @load_owner: handle to load owner for dynamic load and unload of features. + * @fs_group: reference to configfs group for dynamic unload. */ struct cscfg_feature_desc { const char *name; @@ -107,6 +109,8 @@ struct cscfg_feature_desc { struct cscfg_parameter_desc *params_desc; int nr_regs; struct cscfg_regval_desc *regs_desc; + void *load_owner; + struct config_group *fs_group; }; /** @@ -128,7 +132,8 @@ struct cscfg_feature_desc { * @presets: Array of preset values. * @event_ea: Extended attribute for perf event value * @active_cnt: ref count for activate on this configuration. - * + * @load_owner: handle to load owner for dynamic load and unload of configs. + * @fs_group: reference to configfs group for dynamic unload. */ struct cscfg_config_desc { const char *name; @@ -141,6 +146,8 @@ struct cscfg_config_desc { const u64 *presets; /* nr_presets * nr_total_params */ struct dev_ext_attribute *event_ea; atomic_t active_cnt; + void *load_owner; + struct config_group *fs_group; }; /** diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 8a18c71df37a..88653d1c06a4 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -729,7 +729,7 @@ static inline void coresight_put_ref(struct coresight_device *csdev) * coresight_grab_device - Power up this device and any of the helper * devices connected to it for trace operation. Since the helper devices * don't appear on the trace path, they should be handled along with the - * the master device. + * master device. */ static int coresight_grab_device(struct coresight_device *csdev) { diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 86a313857b58..bf18128cf5de 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -722,7 +722,16 @@ static int etm4_enable_sysfs(struct coresight_device *csdev) { struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct etm4_enable_arg arg = { }; - int ret; + unsigned long cfg_hash; + int ret, preset; + + /* enable any config activated by configfs */ + cscfg_config_sysfs_get_active_cfg(&cfg_hash, &preset); + if (cfg_hash) { + ret = cscfg_csdev_enable_active_config(csdev, cfg_hash, preset); + if (ret) + return ret; + } spin_lock(&drvdata->spinlock); diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index 58062a5a8238..bb14a3a8a921 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -856,13 +856,11 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id) { int ret; void __iomem *base; - unsigned long *guaranteed; struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; struct stm_drvdata *drvdata; struct resource *res = &adev->res; struct resource ch_res; - size_t bitmap_size; struct coresight_desc desc = { 0 }; desc.name = coresight_alloc_device_name(&stm_devs, dev); @@ -904,12 +902,10 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id) else drvdata->numsp = stm_num_stimulus_port(drvdata); - bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long); - - guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); - if (!guaranteed) + drvdata->chs.guaranteed = devm_bitmap_zalloc(dev, drvdata->numsp, + GFP_KERNEL); + if (!drvdata->chs.guaranteed) return -ENOMEM; - drvdata->chs.guaranteed = guaranteed; spin_lock_init(&drvdata->spinlock); diff --git a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c index c547816b9000..433ede94dd63 100644 --- a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c +++ b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c @@ -6,6 +6,7 @@ #include <linux/configfs.h> +#include "coresight-config.h" #include "coresight-syscfg-configfs.h" /* create a default ci_type. */ @@ -87,9 +88,75 @@ static ssize_t cscfg_cfg_values_show(struct config_item *item, char *page) } CONFIGFS_ATTR_RO(cscfg_cfg_, values); +static ssize_t cscfg_cfg_enable_show(struct config_item *item, char *page) +{ + struct cscfg_fs_config *fs_config = container_of(to_config_group(item), + struct cscfg_fs_config, group); + + return scnprintf(page, PAGE_SIZE, "%d\n", fs_config->active); +} + +static ssize_t cscfg_cfg_enable_store(struct config_item *item, + const char *page, size_t count) +{ + struct cscfg_fs_config *fs_config = container_of(to_config_group(item), + struct cscfg_fs_config, group); + int err; + bool val; + + err = kstrtobool(page, &val); + if (!err) + err = cscfg_config_sysfs_activate(fs_config->config_desc, val); + if (!err) { + fs_config->active = val; + if (val) + cscfg_config_sysfs_set_preset(fs_config->preset); + } + return err ? err : count; +} +CONFIGFS_ATTR(cscfg_cfg_, enable); + +static ssize_t cscfg_cfg_preset_show(struct config_item *item, char *page) +{ + struct cscfg_fs_config *fs_config = container_of(to_config_group(item), + struct cscfg_fs_config, group); + + return scnprintf(page, PAGE_SIZE, "%d\n", fs_config->preset); +} + +static ssize_t cscfg_cfg_preset_store(struct config_item *item, + const char *page, size_t count) +{ + struct cscfg_fs_config *fs_config = container_of(to_config_group(item), + struct cscfg_fs_config, group); + int preset, err; + + err = kstrtoint(page, 0, &preset); + if (!err) { + /* + * presets start at 1, and go up to max (15), + * but the config may provide fewer. + */ + if ((preset < 1) || (preset > fs_config->config_desc->nr_presets)) + err = -EINVAL; + } + + if (!err) { + /* set new value */ + fs_config->preset = preset; + /* set on system if active */ + if (fs_config->active) + cscfg_config_sysfs_set_preset(fs_config->preset); + } + return err ? err : count; +} +CONFIGFS_ATTR(cscfg_cfg_, preset); + static struct configfs_attribute *cscfg_config_view_attrs[] = { &cscfg_cfg_attr_description, &cscfg_cfg_attr_feature_refs, + &cscfg_cfg_attr_enable, + &cscfg_cfg_attr_preset, NULL, }; @@ -334,9 +401,19 @@ int cscfg_configfs_add_config(struct cscfg_config_desc *config_desc) if (IS_ERR(new_group)) return PTR_ERR(new_group); err = configfs_register_group(&cscfg_configs_grp, new_group); + if (!err) + config_desc->fs_group = new_group; return err; } +void cscfg_configfs_del_config(struct cscfg_config_desc *config_desc) +{ + if (config_desc->fs_group) { + configfs_unregister_group(config_desc->fs_group); + config_desc->fs_group = NULL; + } +} + static struct config_item_type cscfg_features_type = { .ct_owner = THIS_MODULE, }; @@ -358,9 +435,19 @@ int cscfg_configfs_add_feature(struct cscfg_feature_desc *feat_desc) if (IS_ERR(new_group)) return PTR_ERR(new_group); err = configfs_register_group(&cscfg_features_grp, new_group); + if (!err) + feat_desc->fs_group = new_group; return err; } +void cscfg_configfs_del_feature(struct cscfg_feature_desc *feat_desc) +{ + if (feat_desc->fs_group) { + configfs_unregister_group(feat_desc->fs_group); + feat_desc->fs_group = NULL; + } +} + int cscfg_configfs_init(struct cscfg_manager *cscfg_mgr) { struct configfs_subsystem *subsys; diff --git a/drivers/hwtracing/coresight/coresight-syscfg-configfs.h b/drivers/hwtracing/coresight/coresight-syscfg-configfs.h index 7d6ffe35ca4c..373d84d43268 100644 --- a/drivers/hwtracing/coresight/coresight-syscfg-configfs.h +++ b/drivers/hwtracing/coresight/coresight-syscfg-configfs.h @@ -15,6 +15,8 @@ struct cscfg_fs_config { struct cscfg_config_desc *config_desc; struct config_group group; + bool active; + int preset; }; /* container for feature view */ @@ -41,5 +43,7 @@ int cscfg_configfs_init(struct cscfg_manager *cscfg_mgr); void cscfg_configfs_release(struct cscfg_manager *cscfg_mgr); int cscfg_configfs_add_config(struct cscfg_config_desc *config_desc); int cscfg_configfs_add_feature(struct cscfg_feature_desc *feat_desc); +void cscfg_configfs_del_config(struct cscfg_config_desc *config_desc); +void cscfg_configfs_del_feature(struct cscfg_feature_desc *feat_desc); #endif /* CORESIGHT_SYSCFG_CONFIGFS_H */ diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c index 43054568430f..098fc34c4829 100644 --- a/drivers/hwtracing/coresight/coresight-syscfg.c +++ b/drivers/hwtracing/coresight/coresight-syscfg.c @@ -250,6 +250,13 @@ static int cscfg_check_feat_for_cfg(struct cscfg_config_desc *config_desc) static int cscfg_load_feat(struct cscfg_feature_desc *feat_desc) { int err; + struct cscfg_feature_desc *feat_desc_exist; + + /* new feature must have unique name */ + list_for_each_entry(feat_desc_exist, &cscfg_mgr->feat_desc_list, item) { + if (!strcmp(feat_desc_exist->name, feat_desc->name)) + return -EEXIST; + } /* add feature to any matching registered devices */ err = cscfg_add_feat_to_csdevs(feat_desc); @@ -267,6 +274,13 @@ static int cscfg_load_feat(struct cscfg_feature_desc *feat_desc) static int cscfg_load_config(struct cscfg_config_desc *config_desc) { int err; + struct cscfg_config_desc *config_desc_exist; + + /* new configuration must have a unique name */ + list_for_each_entry(config_desc_exist, &cscfg_mgr->config_desc_list, item) { + if (!strcmp(config_desc_exist->name, config_desc->name)) + return -EEXIST; + } /* validate features are present */ err = cscfg_check_feat_for_cfg(config_desc); @@ -354,6 +368,92 @@ unlock_exit: return err; } +/* + * Conditionally up reference count on owner to prevent unload. + * + * module loaded configs need to be locked in to prevent premature unload. + */ +static int cscfg_owner_get(struct cscfg_load_owner_info *owner_info) +{ + if ((owner_info->type == CSCFG_OWNER_MODULE) && + (!try_module_get(owner_info->owner_handle))) + return -EINVAL; + return 0; +} + +/* conditionally lower ref count on an owner */ +static void cscfg_owner_put(struct cscfg_load_owner_info *owner_info) +{ + if (owner_info->type == CSCFG_OWNER_MODULE) + module_put(owner_info->owner_handle); +} + +static void cscfg_remove_owned_csdev_configs(struct coresight_device *csdev, void *load_owner) +{ + struct cscfg_config_csdev *config_csdev, *tmp; + + if (list_empty(&csdev->config_csdev_list)) + return; + + list_for_each_entry_safe(config_csdev, tmp, &csdev->config_csdev_list, node) { + if (config_csdev->config_desc->load_owner == load_owner) + list_del(&config_csdev->node); + } +} + +static void cscfg_remove_owned_csdev_features(struct coresight_device *csdev, void *load_owner) +{ + struct cscfg_feature_csdev *feat_csdev, *tmp; + + if (list_empty(&csdev->feature_csdev_list)) + return; + + list_for_each_entry_safe(feat_csdev, tmp, &csdev->feature_csdev_list, node) { + if (feat_csdev->feat_desc->load_owner == load_owner) + list_del(&feat_csdev->node); + } +} + +/* + * removal is relatively easy - just remove from all lists, anything that + * matches the owner. Memory for the descriptors will be managed by the owner, + * memory for the csdev items is devm_ allocated with the individual csdev + * devices. + */ +static void cscfg_unload_owned_cfgs_feats(void *load_owner) +{ + struct cscfg_config_desc *config_desc, *cfg_tmp; + struct cscfg_feature_desc *feat_desc, *feat_tmp; + struct cscfg_registered_csdev *csdev_item; + + /* remove from each csdev instance feature and config lists */ + list_for_each_entry(csdev_item, &cscfg_mgr->csdev_desc_list, item) { + /* + * for each csdev, check the loaded lists and remove if + * referenced descriptor is owned + */ + cscfg_remove_owned_csdev_configs(csdev_item->csdev, load_owner); + cscfg_remove_owned_csdev_features(csdev_item->csdev, load_owner); + } + + /* remove from the config descriptor lists */ + list_for_each_entry_safe(config_desc, cfg_tmp, &cscfg_mgr->config_desc_list, item) { + if (config_desc->load_owner == load_owner) { + cscfg_configfs_del_config(config_desc); + etm_perf_del_symlink_cscfg(config_desc); + list_del(&config_desc->item); + } + } + + /* remove from the feature descriptor lists */ + list_for_each_entry_safe(feat_desc, feat_tmp, &cscfg_mgr->feat_desc_list, item) { + if (feat_desc->load_owner == load_owner) { + cscfg_configfs_del_feature(feat_desc); + list_del(&feat_desc->item); + } + } +} + /** * cscfg_load_config_sets - API function to load feature and config sets. * @@ -361,13 +461,22 @@ unlock_exit: * descriptors and load into the system. * Features are loaded first to ensure configuration dependencies can be met. * + * To facilitate dynamic loading and unloading, features and configurations + * have a "load_owner", to allow later unload by the same owner. An owner may + * be a loadable module or configuration dynamically created via configfs. + * As later loaded configurations can use earlier loaded features, creating load + * dependencies, a load order list is maintained. Unload is strictly in the + * reverse order to load. + * * @config_descs: 0 terminated array of configuration descriptors. * @feat_descs: 0 terminated array of feature descriptors. + * @owner_info: Information on the owner of this set. */ int cscfg_load_config_sets(struct cscfg_config_desc **config_descs, - struct cscfg_feature_desc **feat_descs) + struct cscfg_feature_desc **feat_descs, + struct cscfg_load_owner_info *owner_info) { - int err, i = 0; + int err = 0, i = 0; mutex_lock(&cscfg_mutex); @@ -380,8 +489,10 @@ int cscfg_load_config_sets(struct cscfg_config_desc **config_descs, if (err) { pr_err("coresight-syscfg: Failed to load feature %s\n", feat_descs[i]->name); + cscfg_unload_owned_cfgs_feats(owner_info); goto exit_unlock; } + feat_descs[i]->load_owner = owner_info; i++; } } @@ -396,18 +507,86 @@ int cscfg_load_config_sets(struct cscfg_config_desc **config_descs, if (err) { pr_err("coresight-syscfg: Failed to load configuration %s\n", config_descs[i]->name); + cscfg_unload_owned_cfgs_feats(owner_info); goto exit_unlock; } + config_descs[i]->load_owner = owner_info; i++; } } + /* add the load owner to the load order list */ + list_add_tail(&owner_info->item, &cscfg_mgr->load_order_list); + if (!list_is_singular(&cscfg_mgr->load_order_list)) { + /* lock previous item in load order list */ + err = cscfg_owner_get(list_prev_entry(owner_info, item)); + if (err) { + cscfg_unload_owned_cfgs_feats(owner_info); + list_del(&owner_info->item); + } + } + exit_unlock: mutex_unlock(&cscfg_mutex); return err; } EXPORT_SYMBOL_GPL(cscfg_load_config_sets); +/** + * cscfg_unload_config_sets - unload a set of configurations by owner. + * + * Dynamic unload of configuration and feature sets is done on the basis of + * the load owner of that set. Later loaded configurations can depend on + * features loaded earlier. + * + * Therefore, unload is only possible if:- + * 1) no configurations are active. + * 2) the set being unloaded was the last to be loaded to maintain dependencies. + * + * @owner_info: Information on owner for set being unloaded. + */ +int cscfg_unload_config_sets(struct cscfg_load_owner_info *owner_info) +{ + int err = 0; + struct cscfg_load_owner_info *load_list_item = NULL; + + mutex_lock(&cscfg_mutex); + + /* cannot unload if anything is active */ + if (atomic_read(&cscfg_mgr->sys_active_cnt)) { + err = -EBUSY; + goto exit_unlock; + } + + /* cannot unload if not last loaded in load order */ + if (!list_empty(&cscfg_mgr->load_order_list)) { + load_list_item = list_last_entry(&cscfg_mgr->load_order_list, + struct cscfg_load_owner_info, item); + if (load_list_item != owner_info) + load_list_item = NULL; + } + + if (!load_list_item) { + err = -EINVAL; + goto exit_unlock; + } + + /* unload all belonging to load_owner */ + cscfg_unload_owned_cfgs_feats(owner_info); + + /* remove from load order list */ + if (!list_is_singular(&cscfg_mgr->load_order_list)) { + /* unlock previous item in load order list */ + cscfg_owner_put(list_prev_entry(owner_info, item)); + } + list_del(&owner_info->item); + +exit_unlock: + mutex_unlock(&cscfg_mutex); + return err; +} +EXPORT_SYMBOL_GPL(cscfg_unload_config_sets); + /* Handle coresight device registration and add configs and features to devices */ /* iterate through config lists and load matching configs to device */ @@ -566,32 +745,26 @@ unlock_exit: } EXPORT_SYMBOL_GPL(cscfg_csdev_reset_feats); -/** - * cscfg_activate_config - Mark a configuration descriptor as active. - * - * This will be seen when csdev devices are enabled in the system. - * Only activated configurations can be enabled on individual devices. - * Activation protects the configuration from alteration or removal while - * active. - * - * Selection by hash value - generated from the configuration name when it - * was loaded and added to the cs_etm/configurations file system for selection - * by perf. +/* + * This activate configuration for either perf or sysfs. Perf can have multiple + * active configs, selected per event, sysfs is limited to one. * * Increments the configuration descriptor active count and the global active * count. * * @cfg_hash: Hash value of the selected configuration name. */ -int cscfg_activate_config(unsigned long cfg_hash) +static int _cscfg_activate_config(unsigned long cfg_hash) { struct cscfg_config_desc *config_desc; int err = -EINVAL; - mutex_lock(&cscfg_mutex); - list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) { if ((unsigned long)config_desc->event_ea->var == cfg_hash) { + /* must ensure that config cannot be unloaded in use */ + err = cscfg_owner_get(config_desc->load_owner); + if (err) + break; /* * increment the global active count - control changes to * active configurations @@ -609,6 +782,101 @@ int cscfg_activate_config(unsigned long cfg_hash) break; } } + return err; +} + +static void _cscfg_deactivate_config(unsigned long cfg_hash) +{ + struct cscfg_config_desc *config_desc; + + list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) { + if ((unsigned long)config_desc->event_ea->var == cfg_hash) { + atomic_dec(&config_desc->active_cnt); + atomic_dec(&cscfg_mgr->sys_active_cnt); + cscfg_owner_put(config_desc->load_owner); + dev_dbg(cscfg_device(), "Deactivate config %s.\n", config_desc->name); + break; + } + } +} + +/* + * called from configfs to set/clear the active configuration for use when + * using sysfs to control trace. + */ +int cscfg_config_sysfs_activate(struct cscfg_config_desc *config_desc, bool activate) +{ + unsigned long cfg_hash; + int err = 0; + + mutex_lock(&cscfg_mutex); + + cfg_hash = (unsigned long)config_desc->event_ea->var; + + if (activate) { + /* cannot be a current active value to activate this */ + if (cscfg_mgr->sysfs_active_config) { + err = -EBUSY; + goto exit_unlock; + } + err = _cscfg_activate_config(cfg_hash); + if (!err) + cscfg_mgr->sysfs_active_config = cfg_hash; + } else { + /* disable if matching current value */ + if (cscfg_mgr->sysfs_active_config == cfg_hash) { + _cscfg_deactivate_config(cfg_hash); + cscfg_mgr->sysfs_active_config = 0; + } else + err = -EINVAL; + } + +exit_unlock: + mutex_unlock(&cscfg_mutex); + return err; +} + +/* set the sysfs preset value */ +void cscfg_config_sysfs_set_preset(int preset) +{ + mutex_lock(&cscfg_mutex); + cscfg_mgr->sysfs_active_preset = preset; + mutex_unlock(&cscfg_mutex); +} + +/* + * Used by a device to get the config and preset selected as active in configfs, + * when using sysfs to control trace. + */ +void cscfg_config_sysfs_get_active_cfg(unsigned long *cfg_hash, int *preset) +{ + mutex_lock(&cscfg_mutex); + *preset = cscfg_mgr->sysfs_active_preset; + *cfg_hash = cscfg_mgr->sysfs_active_config; + mutex_unlock(&cscfg_mutex); +} +EXPORT_SYMBOL_GPL(cscfg_config_sysfs_get_active_cfg); + +/** + * cscfg_activate_config - Mark a configuration descriptor as active. + * + * This will be seen when csdev devices are enabled in the system. + * Only activated configurations can be enabled on individual devices. + * Activation protects the configuration from alteration or removal while + * active. + * + * Selection by hash value - generated from the configuration name when it + * was loaded and added to the cs_etm/configurations file system for selection + * by perf. + * + * @cfg_hash: Hash value of the selected configuration name. + */ +int cscfg_activate_config(unsigned long cfg_hash) +{ + int err = 0; + + mutex_lock(&cscfg_mutex); + err = _cscfg_activate_config(cfg_hash); mutex_unlock(&cscfg_mutex); return err; @@ -624,18 +892,8 @@ EXPORT_SYMBOL_GPL(cscfg_activate_config); */ void cscfg_deactivate_config(unsigned long cfg_hash) { - struct cscfg_config_desc *config_desc; - mutex_lock(&cscfg_mutex); - - list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) { - if ((unsigned long)config_desc->event_ea->var == cfg_hash) { - atomic_dec(&config_desc->active_cnt); - atomic_dec(&cscfg_mgr->sys_active_cnt); - dev_dbg(cscfg_device(), "Deactivate config %s.\n", config_desc->name); - break; - } - } + _cscfg_deactivate_config(cfg_hash); mutex_unlock(&cscfg_mutex); } EXPORT_SYMBOL_GPL(cscfg_deactivate_config); @@ -827,10 +1085,11 @@ int __init cscfg_init(void) INIT_LIST_HEAD(&cscfg_mgr->csdev_desc_list); INIT_LIST_HEAD(&cscfg_mgr->feat_desc_list); INIT_LIST_HEAD(&cscfg_mgr->config_desc_list); + INIT_LIST_HEAD(&cscfg_mgr->load_order_list); atomic_set(&cscfg_mgr->sys_active_cnt, 0); /* preload built-in configurations */ - err = cscfg_preload(); + err = cscfg_preload(THIS_MODULE); if (err) goto exit_err; diff --git a/drivers/hwtracing/coresight/coresight-syscfg.h b/drivers/hwtracing/coresight/coresight-syscfg.h index 8d018efd6ead..9106ffab4833 100644 --- a/drivers/hwtracing/coresight/coresight-syscfg.h +++ b/drivers/hwtracing/coresight/coresight-syscfg.h @@ -25,16 +25,22 @@ * @csdev_desc_list: List of coresight devices registered with the configuration manager. * @feat_desc_list: List of feature descriptors to load into registered devices. * @config_desc_list: List of system configuration descriptors to load into registered devices. + * @load_order_list: Ordered list of owners for dynamically loaded configurations. * @sys_active_cnt: Total number of active config descriptor references. * @cfgfs_subsys: configfs subsystem used to manage configurations. + * @sysfs_active_config:Active config hash used if CoreSight controlled from sysfs. + * @sysfs_active_preset:Active preset index used if CoreSight controlled from sysfs. */ struct cscfg_manager { struct device dev; struct list_head csdev_desc_list; struct list_head feat_desc_list; struct list_head config_desc_list; + struct list_head load_order_list; atomic_t sys_active_cnt; struct configfs_subsystem cfgfs_subsys; + u32 sysfs_active_config; + int sysfs_active_preset; }; /* get reference to dev in cscfg_manager */ @@ -56,18 +62,44 @@ struct cscfg_registered_csdev { struct list_head item; }; +/* owner types for loading and unloading of config and feature sets */ +enum cscfg_load_owner_type { + CSCFG_OWNER_PRELOAD, + CSCFG_OWNER_MODULE, +}; + +/** + * Load item - item to add to the load order list allowing dynamic load and + * unload of configurations and features. Caller loading a config + * set provides a context handle for unload. API ensures that + * items unloaded strictly in reverse order from load to ensure + * dependencies are respected. + * + * @item: list entry for load order list. + * @type: type of owner - allows interpretation of owner_handle. + * @owner_handle: load context - handle for owner of loaded configs. + */ +struct cscfg_load_owner_info { + struct list_head item; + int type; + void *owner_handle; +}; + /* internal core operations for cscfg */ int __init cscfg_init(void); void cscfg_exit(void); -int cscfg_preload(void); +int cscfg_preload(void *owner_handle); const struct cscfg_feature_desc *cscfg_get_named_feat_desc(const char *name); int cscfg_update_feat_param_val(struct cscfg_feature_desc *feat_desc, int param_idx, u64 value); - +int cscfg_config_sysfs_activate(struct cscfg_config_desc *cfg_desc, bool activate); +void cscfg_config_sysfs_set_preset(int preset); /* syscfg manager external API */ int cscfg_load_config_sets(struct cscfg_config_desc **cfg_descs, - struct cscfg_feature_desc **feat_descs); + struct cscfg_feature_desc **feat_descs, + struct cscfg_load_owner_info *owner_info); +int cscfg_unload_config_sets(struct cscfg_load_owner_info *owner_info); int cscfg_register_csdev(struct coresight_device *csdev, u32 match_flags, struct cscfg_csdev_feat_ops *ops); void cscfg_unregister_csdev(struct coresight_device *csdev); @@ -77,5 +109,6 @@ void cscfg_csdev_reset_feats(struct coresight_device *csdev); int cscfg_csdev_enable_active_config(struct coresight_device *csdev, unsigned long cfg_hash, int preset); void cscfg_csdev_disable_active_config(struct coresight_device *csdev); +void cscfg_config_sysfs_get_active_cfg(unsigned long *cfg_hash, int *preset); #endif /* CORESIGHT_SYSCFG_H */ diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index c6b854a9e476..42da31c1ab70 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -617,7 +617,7 @@ config I2C_EXYNOS5 help High-speed I2C controller on Samsung Exynos5 and newer Samsung SoCs: Exynos5250, Exynos5260, Exynos5410, Exynos542x, Exynos5800, - Exynos5433 and Exynos7. + Exynos5433, Exynos7, Exynos850 and ExynosAutoV9. Choose Y here only if you build for such Samsung SoC. config I2C_GPIO @@ -1153,22 +1153,12 @@ config I2C_XILINX This driver can also be built as a module. If so, the module will be called xilinx_i2c. -config I2C_XLR - tristate "Netlogic XLR I2C support" - depends on CPU_XLR || COMPILE_TEST - help - This driver enables support for the on-chip I2C interface of - the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs. - - This driver can also be built as a module. If so, the module - will be called i2c-xlr. - config I2C_XLP9XX - tristate "XLP9XX I2C support" - depends on CPU_XLP || ARCH_THUNDER2 || COMPILE_TEST + tristate "Cavium ThunderX2 I2C support" + depends on ARCH_THUNDER2 || COMPILE_TEST help This driver enables support for the on-chip I2C interface of - the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors. + the Cavium ThunderX2 processors. (Originally on Netlogic XLP SoCs.) This driver can also be built as a module. If so, the module will be called i2c-xlp9xx. diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index d85899fef8c7..1d00dce77098 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -119,7 +119,6 @@ obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o i2c-thunderx-objs := i2c-octeon-core.o i2c-thunderx-pcidrv.o obj-$(CONFIG_I2C_THUNDERX) += i2c-thunderx.o obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o -obj-$(CONFIG_I2C_XLR) += i2c-xlr.o obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index 67e8b97c0c95..771e53d3d197 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -16,8 +16,6 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> -#include <linux/irqchip/chained_irq.h> -#include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_address.h> diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index 37443edbf754..dfc534065595 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -402,7 +402,7 @@ static const struct i2c_adapter_quirks bcm2835_i2c_quirks = { static int bcm2835_i2c_probe(struct platform_device *pdev) { struct bcm2835_i2c_dev *i2c_dev; - struct resource *mem, *irq; + struct resource *mem; int ret; struct i2c_adapter *adap; struct clk *mclk; @@ -452,12 +452,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) return ret; } - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!irq) { - dev_err(&pdev->dev, "No IRQ resource\n"); - return -ENODEV; - } - i2c_dev->irq = irq->start; + i2c_dev->irq = platform_get_irq(pdev, 0); + if (i2c_dev->irq < 0) + return i2c_dev->irq; ret = request_irq(i2c_dev->irq, bcm2835_i2c_isr, IRQF_SHARED, dev_name(&pdev->dev), i2c_dev); diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 60a2e750cee9..4b26cba40139 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -191,23 +191,26 @@ struct reset_control; * @cmd_complete: tx completion indicator * @clk: input reference clock * @pclk: clock required to access the registers + * @rst: optional reset for the controller * @slave: represent an I2C slave device + * @get_clk_rate_khz: callback to retrieve IP specific bus speed * @cmd_err: run time hadware error code * @msgs: points to an array of messages currently being transferred * @msgs_num: the number of elements in msgs - * @msg_write_idx: the element index of the current tx message in the msgs - * array + * @msg_write_idx: the element index of the current tx message in the msgs array * @tx_buf_len: the length of the current tx buffer * @tx_buf: the current tx buffer - * @msg_read_idx: the element index of the current rx message in the msgs - * array + * @msg_read_idx: the element index of the current rx message in the msgs array * @rx_buf_len: the length of the current rx buffer * @rx_buf: the current rx buffer * @msg_err: error status of the current transfer * @status: i2c master status, one of STATUS_* * @abort_source: copy of the TX_ABRT_SOURCE register * @irq: interrupt number for the i2c master + * @flags: platform specific flags like type of IO accessors or model * @adapter: i2c subsystem adapter node + * @functionality: I2C_FUNC_* ORed bits to reflect what controller does support + * @master_cfg: configuration for the master device * @slave_cfg: configuration for the slave device * @tx_fifo_depth: depth of the hardware tx fifo * @rx_fifo_depth: depth of the hardware rx fifo @@ -228,7 +231,9 @@ struct reset_control; * @disable: function to disable the controller * @disable_int: function to disable all interrupts * @init: function to initialize the I2C hardware + * @set_sda_hold_time: callback to retrieve IP specific SDA hold timing * @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE + * @rinfo: I²C GPIO recovery information * @suspended: set to true if the controller is suspended * * HCNT and LCNT parameters can be used if the platform knows more accurate diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 9b08bb5df38d..9177463c2cbb 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -701,7 +701,8 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy); if (stat & DW_IC_INTR_ACTIVITY) regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy); - if (stat & DW_IC_INTR_STOP_DET) + if ((stat & DW_IC_INTR_STOP_DET) && + ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL))) regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy); if (stat & DW_IC_INTR_START_DET) regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy); @@ -723,6 +724,7 @@ static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev) if (stat & DW_IC_INTR_TX_ABRT) { dev->cmd_err |= DW_IC_ERR_TX_ABRT; dev->status = STATUS_IDLE; + dev->rx_outstanding = 0; /* * Anytime TX_ABRT is set, the contents of the tx/rx @@ -745,7 +747,8 @@ static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev) */ tx_aborted: - if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) + if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) && + (dev->rx_outstanding == 0)) complete(&dev->cmd_complete); else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { /* Workaround to trigger pending interrupt */ diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 0f409a4c2da0..ef4250f8852b 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -38,11 +38,18 @@ enum dw_pci_ctl_id_t { navi_amd, }; +/* + * This is a legacy structure to describe the hardware counters + * to configure signal timings on the bus. For Device Tree platforms + * one should use the respective properties and for ACPI there is + * a set of ACPI methods that provide these counters. No new + * platform should use this structure. + */ struct dw_scl_sda_cfg { - u32 ss_hcnt; - u32 fs_hcnt; - u32 ss_lcnt; - u32 fs_lcnt; + u16 ss_hcnt; + u16 fs_hcnt; + u16 ss_lcnt; + u16 fs_lcnt; u32 sda_hold; }; @@ -206,8 +213,7 @@ static struct dw_pci_controller dw_pci_controllers[] = { }, }; -#ifdef CONFIG_PM -static int i2c_dw_pci_suspend(struct device *dev) +static int __maybe_unused i2c_dw_pci_suspend(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); @@ -217,7 +223,7 @@ static int i2c_dw_pci_suspend(struct device *dev) return 0; } -static int i2c_dw_pci_resume(struct device *dev) +static int __maybe_unused i2c_dw_pci_resume(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); int ret; @@ -227,7 +233,6 @@ static int i2c_dw_pci_resume(struct device *dev) return ret; } -#endif static UNIVERSAL_DEV_PM_OPS(i2c_dw_pm_ops, i2c_dw_pci_suspend, i2c_dw_pci_resume, NULL); @@ -241,28 +246,24 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, struct dw_pci_controller *controller; struct dw_scl_sda_cfg *cfg; - if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) { - dev_err(&pdev->dev, "%s: invalid driver data %ld\n", __func__, - id->driver_data); - return -EINVAL; - } + if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) + return dev_err_probe(&pdev->dev, -EINVAL, + "Invalid driver data %ld\n", + id->driver_data); controller = &dw_pci_controllers[id->driver_data]; r = pcim_enable_device(pdev); - if (r) { - dev_err(&pdev->dev, "Failed to enable I2C PCI device (%d)\n", - r); - return r; - } + if (r) + return dev_err_probe(&pdev->dev, r, + "Failed to enable I2C PCI device\n"); pci_set_master(pdev); r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); - if (r) { - dev_err(&pdev->dev, "I/O memory remapping failed\n"); - return r; - } + if (r) + return dev_err_probe(&pdev->dev, r, + "I/O memory remapping failed\n"); dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL); if (!dev) @@ -352,9 +353,6 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev) pci_free_irq_vectors(pdev); } -/* work with hotplug and coldplug */ -MODULE_ALIAS("i2c_designware-pci"); - static const struct pci_device_id i2_designware_pci_ids[] = { /* Medfield */ { PCI_VDEVICE(INTEL, 0x0817), medfield }, @@ -411,9 +409,10 @@ static struct pci_driver dw_i2c_driver = { .pm = &i2c_dw_pm_ops, }, }; - module_pci_driver(dw_i2c_driver); +/* Work with hotplug and coldplug */ +MODULE_ALIAS("i2c_designware-pci"); MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>"); MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 21113665ddea..2bd81abc86f6 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -293,6 +293,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) DPM_FLAG_MAY_SKIP_RESUME); } + device_enable_async_suspend(&pdev->dev); + /* The code below assumes runtime PM to be disabled. */ WARN_ON(pm_runtime_enabled(&pdev->dev)); diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index 97d4f3ac0abd..b812d1090c0f 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -169,6 +169,7 @@ enum i2c_type_exynos { I2C_TYPE_EXYNOS5, I2C_TYPE_EXYNOS7, + I2C_TYPE_EXYNOSAUTOV9, }; struct exynos5_i2c { @@ -181,7 +182,8 @@ struct exynos5_i2c { unsigned int irq; void __iomem *regs; - struct clk *clk; + struct clk *clk; /* operating clock */ + struct clk *pclk; /* bus clock */ struct device *dev; int state; @@ -230,6 +232,11 @@ static const struct exynos_hsi2c_variant exynos7_hsi2c_data = { .hw = I2C_TYPE_EXYNOS7, }; +static const struct exynos_hsi2c_variant exynosautov9_hsi2c_data = { + .fifo_depth = 64, + .hw = I2C_TYPE_EXYNOSAUTOV9, +}; + static const struct of_device_id exynos5_i2c_match[] = { { .compatible = "samsung,exynos5-hsi2c", @@ -243,6 +250,9 @@ static const struct of_device_id exynos5_i2c_match[] = { }, { .compatible = "samsung,exynos7-hsi2c", .data = &exynos7_hsi2c_data + }, { + .compatible = "samsung,exynosautov9-hsi2c", + .data = &exynosautov9_hsi2c_data }, {}, }; MODULE_DEVICE_TABLE(of, exynos5_i2c_match); @@ -282,6 +292,31 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings) int div, clk_cycle, temp; /* + * In case of HSI2C controllers in ExynosAutoV9: + * + * FSCL = IPCLK / ((CLK_DIV + 1) * 16) + * T_SCL_LOW = IPCLK * (CLK_DIV + 1) * (N + M) + * [N : number of 0's in the TSCL_H_HS] + * [M : number of 0's in the TSCL_L_HS] + * T_SCL_HIGH = IPCLK * (CLK_DIV + 1) * (N + M) + * [N : number of 1's in the TSCL_H_HS] + * [M : number of 1's in the TSCL_L_HS] + * + * Result of (N + M) is always 8. + * In general case, we don't need to control timing_s1 and timing_s2. + */ + if (i2c->variant->hw == I2C_TYPE_EXYNOSAUTOV9) { + div = ((clkin / (16 * i2c->op_clock)) - 1); + i2c_timing_s3 = div << 16; + if (hs_timings) + writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_HS3); + else + writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_FS3); + + return 0; + } + + /* * In case of HSI2C controller in Exynos5 series * FPCLK / FI2C = * (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) + 8 + 2 * FLT_CYCLE @@ -422,7 +457,10 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) writel(int_status, i2c->regs + HSI2C_INT_STATUS); /* handle interrupt related to the transfer status */ - if (i2c->variant->hw == I2C_TYPE_EXYNOS7) { + switch (i2c->variant->hw) { + case I2C_TYPE_EXYNOSAUTOV9: + fallthrough; + case I2C_TYPE_EXYNOS7: if (int_status & HSI2C_INT_TRANS_DONE) { i2c->trans_done = 1; i2c->state = 0; @@ -443,7 +481,12 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) i2c->state = -ETIMEDOUT; goto stop; } - } else if (int_status & HSI2C_INT_I2C) { + + break; + case I2C_TYPE_EXYNOS5: + if (!(int_status & HSI2C_INT_I2C)) + break; + trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); if (trans_status & HSI2C_NO_DEV_ACK) { dev_dbg(i2c->dev, "No ACK from device\n"); @@ -465,6 +508,8 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) i2c->trans_done = 1; i2c->state = 0; } + + break; } if ((i2c->msg->flags & I2C_M_RD) && (int_status & @@ -569,13 +614,13 @@ static void exynos5_i2c_bus_check(struct exynos5_i2c *i2c) { unsigned long timeout; - if (i2c->variant->hw != I2C_TYPE_EXYNOS7) + if (i2c->variant->hw == I2C_TYPE_EXYNOS5) return; /* - * HSI2C_MASTER_ST_LOSE state in EXYNOS7 variant before transaction - * indicates that bus is stuck (SDA is low). In such case bus recovery - * can be performed. + * HSI2C_MASTER_ST_LOSE state (in Exynos7 and ExynosAutoV9 variants) + * before transaction indicates that bus is stuck (SDA is low). + * In such case bus recovery can be performed. */ timeout = jiffies + msecs_to_jiffies(100); for (;;) { @@ -611,10 +656,10 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop) unsigned long flags; unsigned short trig_lvl; - if (i2c->variant->hw == I2C_TYPE_EXYNOS7) - int_en |= HSI2C_INT_I2C_TRANS; - else + if (i2c->variant->hw == I2C_TYPE_EXYNOS5) int_en |= HSI2C_INT_I2C; + else + int_en |= HSI2C_INT_I2C_TRANS; i2c_ctl = readl(i2c->regs + HSI2C_CTL); i2c_ctl &= ~(HSI2C_TXCHON | HSI2C_RXCHON); @@ -713,10 +758,14 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap, struct exynos5_i2c *i2c = adap->algo_data; int i, ret; - ret = clk_enable(i2c->clk); + ret = clk_enable(i2c->pclk); if (ret) return ret; + ret = clk_enable(i2c->clk); + if (ret) + goto err_pclk; + for (i = 0; i < num; ++i) { ret = exynos5_i2c_xfer_msg(i2c, msgs + i, i + 1 == num); if (ret) @@ -724,6 +773,8 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap, } clk_disable(i2c->clk); +err_pclk: + clk_disable(i2c->pclk); return ret ?: num; } @@ -763,10 +814,20 @@ static int exynos5_i2c_probe(struct platform_device *pdev) return -ENOENT; } - ret = clk_prepare_enable(i2c->clk); + i2c->pclk = devm_clk_get_optional(&pdev->dev, "hsi2c_pclk"); + if (IS_ERR(i2c->pclk)) { + return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk), + "cannot get pclk"); + } + + ret = clk_prepare_enable(i2c->pclk); if (ret) return ret; + ret = clk_prepare_enable(i2c->clk); + if (ret) + goto err_pclk; + i2c->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->regs)) { ret = PTR_ERR(i2c->regs); @@ -809,11 +870,15 @@ static int exynos5_i2c_probe(struct platform_device *pdev) platform_set_drvdata(pdev, i2c); clk_disable(i2c->clk); + clk_disable(i2c->pclk); return 0; err_clk: clk_disable_unprepare(i2c->clk); + + err_pclk: + clk_disable_unprepare(i2c->pclk); return ret; } @@ -824,6 +889,7 @@ static int exynos5_i2c_remove(struct platform_device *pdev) i2c_del_adapter(&i2c->adap); clk_unprepare(i2c->clk); + clk_unprepare(i2c->pclk); return 0; } @@ -835,6 +901,7 @@ static int exynos5_i2c_suspend_noirq(struct device *dev) i2c_mark_adapter_suspended(&i2c->adap); clk_unprepare(i2c->clk); + clk_unprepare(i2c->pclk); return 0; } @@ -844,21 +911,30 @@ static int exynos5_i2c_resume_noirq(struct device *dev) struct exynos5_i2c *i2c = dev_get_drvdata(dev); int ret = 0; - ret = clk_prepare_enable(i2c->clk); + ret = clk_prepare_enable(i2c->pclk); if (ret) return ret; + ret = clk_prepare_enable(i2c->clk); + if (ret) + goto err_pclk; + ret = exynos5_hsi2c_clock_setup(i2c); - if (ret) { - clk_disable_unprepare(i2c->clk); - return ret; - } + if (ret) + goto err_clk; exynos5_i2c_init(i2c); clk_disable(i2c->clk); + clk_disable(i2c->pclk); i2c_mark_adapter_resumed(&i2c->adap); return 0; + +err_clk: + clk_disable_unprepare(i2c->clk); +err_pclk: + clk_disable_unprepare(i2c->pclk); + return ret; } #endif diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 41446f9cc52d..7428cc6af5cc 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -328,22 +328,14 @@ static int i801_check_pre(struct i801_priv *priv) status = inb_p(SMBHSTSTS(priv)); if (status & SMBHSTSTS_HOST_BUSY) { - dev_err(&priv->pci_dev->dev, "SMBus is busy, can't use it!\n"); + pci_err(priv->pci_dev, "SMBus is busy, can't use it!\n"); return -EBUSY; } status &= STATUS_FLAGS; if (status) { - dev_dbg(&priv->pci_dev->dev, "Clearing status flags (%02x)\n", - status); + pci_dbg(priv->pci_dev, "Clearing status flags (%02x)\n", status); outb_p(status, SMBHSTSTS(priv)); - status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS; - if (status) { - dev_err(&priv->pci_dev->dev, - "Failed clearing status flags (%02x)\n", - status); - return -EBUSY; - } } /* @@ -356,27 +348,14 @@ static int i801_check_pre(struct i801_priv *priv) if (priv->features & FEATURE_SMBUS_PEC) { status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE; if (status) { - dev_dbg(&priv->pci_dev->dev, - "Clearing aux status flags (%02x)\n", status); + pci_dbg(priv->pci_dev, "Clearing aux status flags (%02x)\n", status); outb_p(status, SMBAUXSTS(priv)); - status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE; - if (status) { - dev_err(&priv->pci_dev->dev, - "Failed clearing aux status flags (%02x)\n", - status); - return -EBUSY; - } } } return 0; } -/* - * Convert the status register to an error code, and clear it. - * Note that status only contains the bits we want to clear, not the - * actual register value. - */ static int i801_check_post(struct i801_priv *priv, int status) { int result = 0; @@ -401,7 +380,6 @@ static int i801_check_post(struct i801_priv *priv, int status) !(status & SMBHSTSTS_FAILED)) dev_err(&priv->pci_dev->dev, "Failed terminating the transaction\n"); - outb_p(STATUS_FLAGS, SMBHSTSTS(priv)); return -ETIMEDOUT; } @@ -440,9 +418,6 @@ static int i801_check_post(struct i801_priv *priv, int status) dev_dbg(&priv->pci_dev->dev, "Lost arbitration\n"); } - /* Clear status flags except BYTE_DONE, to be cleared by caller */ - outb_p(status, SMBHSTSTS(priv)); - return result; } @@ -523,9 +498,11 @@ static int i801_block_transaction_by_block(struct i801_priv *priv, return -EOPNOTSUPP; } + /* Set block buffer mode */ + outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv)); + inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */ - /* Use 32-byte buffer to process this transaction */ if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; outb_p(len, SMBHSTDAT0(priv)); @@ -760,14 +737,6 @@ exit: return i801_check_post(priv, status); } -static int i801_set_block_buffer_mode(struct i801_priv *priv) -{ - outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv)); - if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0) - return -EIO; - return 0; -} - /* Block transaction function */ static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *data, char read_write, int command) @@ -775,6 +744,11 @@ static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data * int result = 0; unsigned char hostc; + if (read_write == I2C_SMBUS_READ && command == I2C_SMBUS_BLOCK_DATA) + data->block[0] = I2C_SMBUS_BLOCK_MAX; + else if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) + return -EPROTO; + if (command == I2C_SMBUS_I2C_BLOCK_DATA) { if (read_write == I2C_SMBUS_WRITE) { /* set I2C_EN bit in configuration register */ @@ -788,22 +762,11 @@ static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data * } } - if (read_write == I2C_SMBUS_WRITE - || command == I2C_SMBUS_I2C_BLOCK_DATA) { - if (data->block[0] < 1) - data->block[0] = 1; - if (data->block[0] > I2C_SMBUS_BLOCK_MAX) - data->block[0] = I2C_SMBUS_BLOCK_MAX; - } else { - data->block[0] = 32; /* max for SMBus block reads */ - } - /* Experience has shown that the block buffer can only be used for SMBus (not I2C) block transactions, even though the datasheet doesn't mention this limitation. */ - if ((priv->features & FEATURE_BLOCK_BUFFER) - && command != I2C_SMBUS_I2C_BLOCK_DATA - && i801_set_block_buffer_mode(priv) == 0) + if ((priv->features & FEATURE_BLOCK_BUFFER) && + command != I2C_SMBUS_I2C_BLOCK_DATA) result = i801_block_transaction_by_block(priv, data, read_write, command); @@ -951,8 +914,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, } out: - /* Unlock the SMBus device for use by BIOS/ACPI */ - outb_p(SMBHSTSTS_INUSE_STS, SMBHSTSTS(priv)); + /* + * Unlock the SMBus device for use by BIOS/ACPI, + * and clear status flags if not done already. + */ + outb_p(SMBHSTSTS_INUSE_STS | STATUS_FLAGS, SMBHSTSTS(priv)); pm_runtime_mark_last_busy(&priv->pci_dev->dev); pm_runtime_put_autosuspend(&priv->pci_dev->dev); @@ -1009,66 +975,72 @@ static const struct i2c_algorithm smbus_algorithm = { .functionality = i801_func, }; +#define FEATURES_ICH5 (FEATURE_BLOCK_PROC | FEATURE_I2C_BLOCK_READ | \ + FEATURE_IRQ | FEATURE_SMBUS_PEC | \ + FEATURE_BLOCK_BUFFER | FEATURE_HOST_NOTIFY) +#define FEATURES_ICH4 (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER | \ + FEATURE_HOST_NOTIFY) + static const struct pci_device_id i801_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_3) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_3) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_3) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_4) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_16) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_17) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EP80579_1) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GEMINILAKE_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EBG_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_N_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS) }, + { PCI_DEVICE_DATA(INTEL, 82801AA_3, 0) }, + { PCI_DEVICE_DATA(INTEL, 82801AB_3, 0) }, + { PCI_DEVICE_DATA(INTEL, 82801BA_2, 0) }, + { PCI_DEVICE_DATA(INTEL, 82801CA_3, FEATURE_HOST_NOTIFY) }, + { PCI_DEVICE_DATA(INTEL, 82801DB_3, FEATURES_ICH4) }, + { PCI_DEVICE_DATA(INTEL, 82801EB_3, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, ESB_4, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, ICH6_16, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, ICH7_17, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, ESB2_17, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, ICH8_5, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, ICH9_6, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, EP80579_1, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, ICH10_4, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, ICH10_5, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, 5_3400_SERIES_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, COUGARPOINT_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF0, FEATURES_ICH5 | FEATURE_IDF) }, + { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF1, FEATURES_ICH5 | FEATURE_IDF) }, + { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF2, FEATURES_ICH5 | FEATURE_IDF) }, + { PCI_DEVICE_DATA(INTEL, DH89XXCC_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, PANTHERPOINT_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, LYNXPOINT_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, LYNXPOINT_LP_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, AVOTON_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS0, FEATURES_ICH5 | FEATURE_IDF) }, + { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS1, FEATURES_ICH5 | FEATURE_IDF) }, + { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS2, FEATURES_ICH5 | FEATURE_IDF) }, + { PCI_DEVICE_DATA(INTEL, COLETOCREEK_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, GEMINILAKE_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, WILDCATPOINT_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, WILDCATPOINT_LP_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, BAYTRAIL_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, BRASWELL_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, SUNRISEPOINT_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, + { PCI_DEVICE_DATA(INTEL, SUNRISEPOINT_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, + { PCI_DEVICE_DATA(INTEL, CDF_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, DNV_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, + { PCI_DEVICE_DATA(INTEL, EBG_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, BROXTON_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, LEWISBURG_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, + { PCI_DEVICE_DATA(INTEL, LEWISBURG_SSKU_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, + { PCI_DEVICE_DATA(INTEL, KABYLAKE_PCH_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, + { PCI_DEVICE_DATA(INTEL, CANNONLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, CANNONLAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, ICELAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, ICELAKE_N_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, COMETLAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, COMETLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, COMETLAKE_V_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, + { PCI_DEVICE_DATA(INTEL, ELKHART_LAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, TIGERLAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, TIGERLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, JASPER_LAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_M_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { 0, } }; @@ -1493,15 +1465,14 @@ static inline unsigned int i801_get_adapter_class(struct i801_priv *priv) } #endif -static const struct itco_wdt_platform_data spt_tco_platform_data = { - .name = "Intel PCH", - .version = 4, -}; - static struct platform_device * i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev, struct resource *tco_res) { + static const struct itco_wdt_platform_data pldata = { + .name = "Intel PCH", + .version = 4, + }; struct resource *res; unsigned int devfn; u64 base64_addr; @@ -1544,22 +1515,20 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev, res->flags = IORESOURCE_MEM; return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1, - tco_res, 2, &spt_tco_platform_data, - sizeof(spt_tco_platform_data)); + tco_res, 2, &pldata, sizeof(pldata)); } -static const struct itco_wdt_platform_data cnl_tco_platform_data = { - .name = "Intel PCH", - .version = 6, -}; - static struct platform_device * i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev, struct resource *tco_res) { - return platform_device_register_resndata(&pci_dev->dev, - "iTCO_wdt", -1, tco_res, 1, &cnl_tco_platform_data, - sizeof(cnl_tco_platform_data)); + static const struct itco_wdt_platform_data pldata = { + .name = "Intel PCH", + .version = 6, + }; + + return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1, + tco_res, 1, &pldata, sizeof(pldata)); } static void i801_add_tco(struct i801_priv *priv) @@ -1697,72 +1666,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) mutex_init(&priv->acpi_lock); priv->pci_dev = dev; - switch (dev->device) { - case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS: - case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS: - case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS: - case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS: - case PCI_DEVICE_ID_INTEL_DNV_SMBUS: - case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: - case PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS: - priv->features |= FEATURE_BLOCK_PROC; - priv->features |= FEATURE_I2C_BLOCK_READ; - priv->features |= FEATURE_IRQ; - priv->features |= FEATURE_SMBUS_PEC; - priv->features |= FEATURE_BLOCK_BUFFER; - priv->features |= FEATURE_TCO_SPT; - priv->features |= FEATURE_HOST_NOTIFY; - break; - - case PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS: - case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS: - case PCI_DEVICE_ID_INTEL_CDF_SMBUS: - case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS: - case PCI_DEVICE_ID_INTEL_ICELAKE_N_SMBUS: - case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS: - case PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS: - case PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS: - case PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS: - case PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS: - case PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS: - case PCI_DEVICE_ID_INTEL_EBG_SMBUS: - case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS: - case PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS: - case PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS: - priv->features |= FEATURE_BLOCK_PROC; - priv->features |= FEATURE_I2C_BLOCK_READ; - priv->features |= FEATURE_IRQ; - priv->features |= FEATURE_SMBUS_PEC; - priv->features |= FEATURE_BLOCK_BUFFER; - priv->features |= FEATURE_TCO_CNL; - priv->features |= FEATURE_HOST_NOTIFY; - break; - - case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0: - case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1: - case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2: - case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0: - case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1: - case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2: - priv->features |= FEATURE_IDF; - fallthrough; - default: - priv->features |= FEATURE_BLOCK_PROC; - priv->features |= FEATURE_I2C_BLOCK_READ; - priv->features |= FEATURE_IRQ; - fallthrough; - case PCI_DEVICE_ID_INTEL_82801DB_3: - priv->features |= FEATURE_SMBUS_PEC; - priv->features |= FEATURE_BLOCK_BUFFER; - fallthrough; - case PCI_DEVICE_ID_INTEL_82801CA_3: - priv->features |= FEATURE_HOST_NOTIFY; - fallthrough; - case PCI_DEVICE_ID_INTEL_82801BA_2: - case PCI_DEVICE_ID_INTEL_82801AB_3: - case PCI_DEVICE_ID_INTEL_82801AA_3: - break; - } + priv->features = id->driver_data; /* Disable features on user request */ for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) { diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 3576b63a6c03..27f969b3dc07 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -37,6 +37,8 @@ #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/hrtimer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> @@ -51,6 +53,8 @@ /* This will be the driver name the kernel reports */ #define DRIVER_NAME "imx-i2c" +#define I2C_IMX_CHECK_DELAY 30000 /* Time to check for bus idle, in NS */ + /* * Enable DMA if transfer byte size is bigger than this threshold. * As the hardware request, it must bigger than 4 bytes.\ @@ -210,6 +214,10 @@ struct imx_i2c_struct { struct imx_i2c_dma *dma; struct i2c_client *slave; enum i2c_slave_event last_slave_event; + + /* For checking slave events. */ + spinlock_t slave_lock; + struct hrtimer slave_timer; }; static const struct imx_i2c_hwdata imx1_i2c_hwdata = { @@ -680,7 +688,7 @@ static void i2c_imx_slave_event(struct imx_i2c_struct *i2c_imx, static void i2c_imx_slave_finish_op(struct imx_i2c_struct *i2c_imx) { - u8 val; + u8 val = 0; while (i2c_imx->last_slave_event != I2C_SLAVE_STOP) { switch (i2c_imx->last_slave_event) { @@ -701,10 +709,11 @@ static void i2c_imx_slave_finish_op(struct imx_i2c_struct *i2c_imx) } } -static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx, - unsigned int status, unsigned int ctl) +/* Returns true if the timer should be restarted, false if not. */ +static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx, + unsigned int status, unsigned int ctl) { - u8 value; + u8 value = 0; if (status & I2SR_IAL) { /* Arbitration lost */ i2c_imx_clear_irq(i2c_imx, I2SR_IAL); @@ -712,6 +721,16 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx, return IRQ_HANDLED; } + if (!(status & I2SR_IBB)) { + /* No master on the bus, that could mean a stop condition. */ + i2c_imx_slave_finish_op(i2c_imx); + return IRQ_HANDLED; + } + + if (!(status & I2SR_ICF)) + /* Data transfer still in progress, ignore this. */ + goto out; + if (status & I2SR_IAAS) { /* Addressed as a slave */ i2c_imx_slave_finish_op(i2c_imx); if (status & I2SR_SRW) { /* Master wants to read from us*/ @@ -737,16 +756,9 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx, imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); } } else if (!(ctl & I2CR_MTX)) { /* Receive mode */ - if (status & I2SR_IBB) { /* No STOP signal detected */ - value = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); - i2c_imx_slave_event(i2c_imx, - I2C_SLAVE_WRITE_RECEIVED, &value); - } else { /* STOP signal is detected */ - dev_dbg(&i2c_imx->adapter.dev, - "STOP signal detected"); - i2c_imx_slave_event(i2c_imx, - I2C_SLAVE_STOP, &value); - } + value = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); + i2c_imx_slave_event(i2c_imx, + I2C_SLAVE_WRITE_RECEIVED, &value); } else if (!(status & I2SR_RXAK)) { /* Transmit mode received ACK */ ctl |= I2CR_MTX; imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR); @@ -755,15 +767,43 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx, I2C_SLAVE_READ_PROCESSED, &value); imx_i2c_write_reg(value, i2c_imx, IMX_I2C_I2DR); - } else { /* Transmit mode received NAK */ + } else { /* Transmit mode received NAK, operation is done */ ctl &= ~I2CR_MTX; imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR); imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); + i2c_imx_slave_finish_op(i2c_imx); + return IRQ_HANDLED; } +out: + /* + * No need to check the return value here. If it returns 0 or + * 1, then everything is fine. If it returns -1, then the + * timer is running in the handler. This will still work, + * though it may be redone (or already have been done) by the + * timer function. + */ + hrtimer_try_to_cancel(&i2c_imx->slave_timer); + hrtimer_forward_now(&i2c_imx->slave_timer, I2C_IMX_CHECK_DELAY); + hrtimer_restart(&i2c_imx->slave_timer); return IRQ_HANDLED; } +static enum hrtimer_restart i2c_imx_slave_timeout(struct hrtimer *t) +{ + struct imx_i2c_struct *i2c_imx = container_of(t, struct imx_i2c_struct, + slave_timer); + unsigned int ctl, status; + unsigned long flags; + + spin_lock_irqsave(&i2c_imx->slave_lock, flags); + status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); + ctl = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + i2c_imx_slave_handle(i2c_imx, status, ctl); + spin_unlock_irqrestore(&i2c_imx->slave_lock, flags); + return HRTIMER_NORESTART; +} + static void i2c_imx_slave_init(struct imx_i2c_struct *i2c_imx) { int temp; @@ -843,7 +883,9 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id) { struct imx_i2c_struct *i2c_imx = dev_id; unsigned int ctl, status; + unsigned long flags; + spin_lock_irqsave(&i2c_imx->slave_lock, flags); status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); ctl = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); @@ -851,14 +893,20 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id) i2c_imx_clear_irq(i2c_imx, I2SR_IIF); if (i2c_imx->slave) { if (!(ctl & I2CR_MSTA)) { - return i2c_imx_slave_isr(i2c_imx, status, ctl); - } else if (i2c_imx->last_slave_event != - I2C_SLAVE_STOP) { - i2c_imx_slave_finish_op(i2c_imx); + irqreturn_t ret; + + ret = i2c_imx_slave_handle(i2c_imx, + status, ctl); + spin_unlock_irqrestore(&i2c_imx->slave_lock, + flags); + return ret; } + i2c_imx_slave_finish_op(i2c_imx); } + spin_unlock_irqrestore(&i2c_imx->slave_lock, flags); return i2c_imx_master_isr(i2c_imx, status); } + spin_unlock_irqrestore(&i2c_imx->slave_lock, flags); return IRQ_NONE; } @@ -1378,6 +1426,10 @@ static int i2c_imx_probe(struct platform_device *pdev) if (!i2c_imx) return -ENOMEM; + spin_lock_init(&i2c_imx->slave_lock); + hrtimer_init(&i2c_imx->slave_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + i2c_imx->slave_timer.function = i2c_imx_slave_timeout; + match = device_get_match_data(&pdev->dev); if (match) i2c_imx->hwdata = match; @@ -1491,6 +1543,8 @@ static int i2c_imx_remove(struct platform_device *pdev) if (ret < 0) return ret; + hrtimer_cancel(&i2c_imx->slave_timer); + /* remove adapter */ dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n"); i2c_del_adapter(&i2c_imx->adapter); diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index db26cc36e13f..6c698c10d3cd 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -119,23 +119,30 @@ static inline void writeccr(struct mpc_i2c *i2c, u32 x) /* Sometimes 9th clock pulse isn't generated, and slave doesn't release * the bus, because it wants to send ACK. * Following sequence of enabling/disabling and sending start/stop generates - * the 9 pulses, so it's all OK. + * the 9 pulses, each with a START then ending with STOP, so it's all OK. */ static void mpc_i2c_fixup(struct mpc_i2c *i2c) { int k; - u32 delay_val = 1000000 / i2c->real_clk + 1; - - if (delay_val < 2) - delay_val = 2; + unsigned long flags; for (k = 9; k; k--) { writeccr(i2c, 0); - writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN); + writeb(0, i2c->base + MPC_I2C_SR); /* clear any status bits */ + writeccr(i2c, CCR_MEN | CCR_MSTA); /* START */ + readb(i2c->base + MPC_I2C_DR); /* init xfer */ + udelay(15); /* let it hit the bus */ + local_irq_save(flags); /* should not be delayed further */ + writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSTA); /* delay SDA */ readb(i2c->base + MPC_I2C_DR); - writeccr(i2c, CCR_MEN); - udelay(delay_val << 1); + if (k != 1) + udelay(5); + local_irq_restore(flags); } + writeccr(i2c, CCR_MEN); /* Initiate STOP */ + readb(i2c->base + MPC_I2C_DR); + udelay(15); /* Let STOP propagate */ + writeccr(i2c, 0); } static int i2c_mpc_wait_sr(struct mpc_i2c *i2c, int mask) diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index fc13511f4562..f71c730f9838 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -367,11 +367,15 @@ static void rcar_i2c_next_msg(struct rcar_i2c_priv *priv) rcar_i2c_prepare_msg(priv); } -static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv) +static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv, bool terminate) { struct dma_chan *chan = priv->dma_direction == DMA_FROM_DEVICE ? priv->dma_rx : priv->dma_tx; + /* only allowed from thread context! */ + if (terminate) + dmaengine_terminate_sync(chan); + dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), sg_dma_len(&priv->sg), priv->dma_direction); @@ -386,25 +390,13 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv) rcar_i2c_write(priv, ICDMAER, 0); } -static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv) -{ - if (priv->dma_direction == DMA_NONE) - return; - else if (priv->dma_direction == DMA_FROM_DEVICE) - dmaengine_terminate_all(priv->dma_rx); - else if (priv->dma_direction == DMA_TO_DEVICE) - dmaengine_terminate_all(priv->dma_tx); - - rcar_i2c_dma_unmap(priv); -} - static void rcar_i2c_dma_callback(void *data) { struct rcar_i2c_priv *priv = data; priv->pos += sg_dma_len(&priv->sg); - rcar_i2c_dma_unmap(priv); + rcar_i2c_cleanup_dma(priv, false); } static bool rcar_i2c_dma(struct rcar_i2c_priv *priv) @@ -456,7 +448,7 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv) DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) { dev_dbg(dev, "dma prep slave sg failed, using PIO\n"); - rcar_i2c_cleanup_dma(priv); + rcar_i2c_cleanup_dma(priv, false); return false; } @@ -466,7 +458,7 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv) cookie = dmaengine_submit(txdesc); if (dma_submit_error(cookie)) { dev_dbg(dev, "submitting dma failed, using PIO\n"); - rcar_i2c_cleanup_dma(priv); + rcar_i2c_cleanup_dma(priv, false); return false; } @@ -846,7 +838,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, /* cleanup DMA if it couldn't complete properly due to an error */ if (priv->dma_direction != DMA_NONE) - rcar_i2c_cleanup_dma(priv); + rcar_i2c_cleanup_dma(priv, true); if (!time_left) { rcar_i2c_init(priv); diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index 78b84445ee6a..8dfd27dc6149 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -433,12 +433,12 @@ static int riic_i2c_probe(struct platform_device *pdev) } for (i = 0; i < ARRAY_SIZE(riic_irqs); i++) { - res = platform_get_resource(pdev, IORESOURCE_IRQ, riic_irqs[i].res_num); - if (!res) - return -ENODEV; + ret = platform_get_irq(pdev, riic_irqs[i].res_num); + if (ret < 0) + return ret; - ret = devm_request_irq(&pdev->dev, res->start, riic_irqs[i].isr, - 0, riic_irqs[i].name, riic); + ret = devm_request_irq(&pdev->dev, ret, riic_irqs[i].isr, + 0, riic_irqs[i].name, riic); if (ret) { dev_err(&pdev->dev, "failed to request irq %s\n", riic_irqs[i].name); return ret; diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 02ddb237f69a..989040a73626 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -1338,8 +1338,15 @@ static int rk3x_i2c_probe(struct platform_device *pdev) goto err_pclk; } + ret = clk_enable(i2c->clk); + if (ret < 0) { + dev_err(&pdev->dev, "Can't enable bus clk: %d\n", ret); + goto err_clk_notifier; + } + clk_rate = clk_get_rate(i2c->clk); rk3x_i2c_adapt_div(i2c, clk_rate); + clk_disable(i2c->clk); ret = i2c_add_adapter(&i2c->adap); if (ret < 0) diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index db8fa4186814..72f024a0c363 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -442,34 +442,26 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static void sh_mobile_i2c_dma_unmap(struct sh_mobile_i2c_data *pd) +static void sh_mobile_i2c_cleanup_dma(struct sh_mobile_i2c_data *pd, bool terminate) { struct dma_chan *chan = pd->dma_direction == DMA_FROM_DEVICE ? pd->dma_rx : pd->dma_tx; + /* only allowed from thread context! */ + if (terminate) + dmaengine_terminate_sync(chan); + dma_unmap_single(chan->device->dev, sg_dma_address(&pd->sg), pd->msg->len, pd->dma_direction); pd->dma_direction = DMA_NONE; } -static void sh_mobile_i2c_cleanup_dma(struct sh_mobile_i2c_data *pd) -{ - if (pd->dma_direction == DMA_NONE) - return; - else if (pd->dma_direction == DMA_FROM_DEVICE) - dmaengine_terminate_sync(pd->dma_rx); - else if (pd->dma_direction == DMA_TO_DEVICE) - dmaengine_terminate_sync(pd->dma_tx); - - sh_mobile_i2c_dma_unmap(pd); -} - static void sh_mobile_i2c_dma_callback(void *data) { struct sh_mobile_i2c_data *pd = data; - sh_mobile_i2c_dma_unmap(pd); + sh_mobile_i2c_cleanup_dma(pd, false); pd->pos = pd->msg->len; pd->stop_after_dma = true; @@ -549,7 +541,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) { dev_dbg(pd->dev, "dma prep slave sg failed, using PIO\n"); - sh_mobile_i2c_cleanup_dma(pd); + sh_mobile_i2c_cleanup_dma(pd, false); return; } @@ -559,7 +551,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) cookie = dmaengine_submit(txdesc); if (dma_submit_error(cookie)) { dev_dbg(pd->dev, "submitting dma failed, using PIO\n"); - sh_mobile_i2c_cleanup_dma(pd); + sh_mobile_i2c_cleanup_dma(pd, false); return; } @@ -698,7 +690,7 @@ static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd, if (!time_left) { dev_err(pd->dev, "Transfer request timed out\n"); if (pd->dma_direction != DMA_NONE) - sh_mobile_i2c_cleanup_dma(pd); + sh_mobile_i2c_cleanup_dma(pd, true); err = -ETIMEDOUT; break; @@ -838,20 +830,38 @@ static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd) static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, struct sh_mobile_i2c_data *pd) { - struct resource *res; - resource_size_t n; + struct device_node *np = dev_of_node(&dev->dev); int k = 0, ret; - while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) { - for (n = res->start; n <= res->end; n++) { - ret = devm_request_irq(&dev->dev, n, sh_mobile_i2c_isr, - 0, dev_name(&dev->dev), pd); + if (np) { + int irq; + + while ((irq = platform_get_irq_optional(dev, k)) != -ENXIO) { + if (irq < 0) + return irq; + ret = devm_request_irq(&dev->dev, irq, sh_mobile_i2c_isr, + 0, dev_name(&dev->dev), pd); if (ret) { - dev_err(&dev->dev, "cannot request IRQ %pa\n", &n); + dev_err(&dev->dev, "cannot request IRQ %d\n", irq); return ret; } + k++; + } + } else { + struct resource *res; + resource_size_t n; + + while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) { + for (n = res->start; n <= res->end; n++) { + ret = devm_request_irq(&dev->dev, n, sh_mobile_i2c_isr, + 0, dev_name(&dev->dev), pd); + if (ret) { + dev_err(&dev->dev, "cannot request IRQ %pa\n", &n); + return ret; + } + } + k++; } - k++; } return k > 0 ? 0 : -ENOENT; diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 66145d2b9b55..6d4aa64b195d 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -828,18 +828,14 @@ static void stm32f7_i2c_smbus_reload(struct stm32f7_i2c_dev *i2c_dev) writel_relaxed(cr2, i2c_dev->base + STM32F7_I2C_CR2); } -static int stm32f7_i2c_release_bus(struct i2c_adapter *i2c_adap) +static void stm32f7_i2c_release_bus(struct i2c_adapter *i2c_adap) { struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); - dev_info(i2c_dev->dev, "Trying to recover bus\n"); - stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_PE); stm32f7_i2c_hw_config(i2c_dev); - - return 0; } static int stm32f7_i2c_wait_free_bus(struct stm32f7_i2c_dev *i2c_dev) @@ -854,13 +850,7 @@ static int stm32f7_i2c_wait_free_bus(struct stm32f7_i2c_dev *i2c_dev) if (!ret) return 0; - dev_info(i2c_dev->dev, "bus busy\n"); - - ret = stm32f7_i2c_release_bus(&i2c_dev->adap); - if (ret) { - dev_err(i2c_dev->dev, "Failed to recover the bus (%d)\n", ret); - return ret; - } + stm32f7_i2c_release_bus(&i2c_dev->adap); return -EBUSY; } diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index b3184c422826..03cea102ab76 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -6,6 +6,7 @@ * Author: Colin Cross <ccross@android.com> */ +#include <linux/acpi.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> @@ -245,7 +246,7 @@ struct tegra_i2c_hw_feature { * @msg_buf: pointer to current message data * @msg_buf_remaining: size of unsent data in the message buffer * @msg_read: indicates that the transfer is a read access - * @bus_clk_rate: current I2C bus clock rate + * @timings: i2c timings information like bus frequency * @multimaster_mode: indicates that I2C controller is in multi-master mode * @tx_dma_chan: DMA transmit channel * @rx_dma_chan: DMA receive channel @@ -272,7 +273,7 @@ struct tegra_i2c_dev { unsigned int nclocks; struct clk *div_clk; - u32 bus_clk_rate; + struct i2c_timings timings; struct completion msg_complete; size_t msg_buf_remaining; @@ -608,6 +609,8 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev) static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) { u32 val, clk_divisor, clk_multiplier, tsu_thd, tlow, thigh, non_hs_mode; + acpi_handle handle = ACPI_HANDLE(i2c_dev->dev); + struct i2c_timings *t = &i2c_dev->timings; int err; /* @@ -618,7 +621,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) * emit a noisy warning on error, which won't stay unnoticed and * won't hose machine entirely. */ - err = reset_control_reset(i2c_dev->rst); + if (handle) + err = acpi_evaluate_object(handle, "_RST", NULL, NULL); + else + err = reset_control_reset(i2c_dev->rst); + WARN_ON_ONCE(err); if (i2c_dev->is_dvc) @@ -636,14 +643,14 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) if (i2c_dev->is_vi) tegra_i2c_vi_init(i2c_dev); - switch (i2c_dev->bus_clk_rate) { + switch (t->bus_freq_hz) { case I2C_MAX_STANDARD_MODE_FREQ + 1 ... I2C_MAX_FAST_MODE_PLUS_FREQ: default: tlow = i2c_dev->hw->tlow_fast_fastplus_mode; thigh = i2c_dev->hw->thigh_fast_fastplus_mode; tsu_thd = i2c_dev->hw->setup_hold_time_fast_fast_plus_mode; - if (i2c_dev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ) + if (t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ) non_hs_mode = i2c_dev->hw->clk_divisor_fast_plus_mode; else non_hs_mode = i2c_dev->hw->clk_divisor_fast_mode; @@ -679,7 +686,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) clk_multiplier = (tlow + thigh + 2) * (non_hs_mode + 1); err = clk_set_rate(i2c_dev->div_clk, - i2c_dev->bus_clk_rate * clk_multiplier); + t->bus_freq_hz * clk_multiplier); if (err) { dev_err(i2c_dev->dev, "failed to set div-clk rate: %d\n", err); return err; @@ -718,7 +725,7 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev) * before disabling the controller so that the STOP condition has * been delivered properly. */ - udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate)); + udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->timings.bus_freq_hz)); cnfg = i2c_readl(i2c_dev, I2C_CNFG); if (cnfg & I2C_CNFG_PACKET_MODE_EN) @@ -1248,7 +1255,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, * Total bits = 9 bits per byte (including ACK bit) + Start & stop bits */ xfer_time += DIV_ROUND_CLOSEST(((xfer_size * 9) + 2) * MSEC_PER_SEC, - i2c_dev->bus_clk_rate); + i2c_dev->timings.bus_freq_hz); int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST; tegra_i2c_unmask_irq(i2c_dev, int_mask); @@ -1625,14 +1632,10 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev) { struct device_node *np = i2c_dev->dev->of_node; bool multi_mode; - int err; - err = of_property_read_u32(np, "clock-frequency", - &i2c_dev->bus_clk_rate); - if (err) - i2c_dev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; + i2c_parse_fw_timings(i2c_dev->dev, &i2c_dev->timings, true); - multi_mode = of_property_read_bool(np, "multi-master"); + multi_mode = device_property_read_bool(i2c_dev->dev, "multi-master"); i2c_dev->multimaster_mode = multi_mode; if (of_device_is_compatible(np, "nvidia,tegra20-i2c-dvc")) @@ -1642,10 +1645,26 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev) i2c_dev->is_vi = true; } +static int tegra_i2c_init_reset(struct tegra_i2c_dev *i2c_dev) +{ + if (ACPI_HANDLE(i2c_dev->dev)) + return 0; + + i2c_dev->rst = devm_reset_control_get_exclusive(i2c_dev->dev, "i2c"); + if (IS_ERR(i2c_dev->rst)) + return dev_err_probe(i2c_dev->dev, PTR_ERR(i2c_dev->rst), + "failed to get reset control\n"); + + return 0; +} + static int tegra_i2c_init_clocks(struct tegra_i2c_dev *i2c_dev) { int err; + if (ACPI_HANDLE(i2c_dev->dev)) + return 0; + i2c_dev->clocks[i2c_dev->nclocks++].id = "div-clk"; if (i2c_dev->hw == &tegra20_i2c_hw || i2c_dev->hw == &tegra30_i2c_hw) @@ -1720,7 +1739,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) init_completion(&i2c_dev->msg_complete); init_completion(&i2c_dev->dma_complete); - i2c_dev->hw = of_device_get_match_data(&pdev->dev); + i2c_dev->hw = device_get_match_data(&pdev->dev); i2c_dev->cont_id = pdev->id; i2c_dev->dev = &pdev->dev; @@ -1746,15 +1765,12 @@ static int tegra_i2c_probe(struct platform_device *pdev) if (err) return err; - i2c_dev->rst = devm_reset_control_get_exclusive(i2c_dev->dev, "i2c"); - if (IS_ERR(i2c_dev->rst)) { - dev_err_probe(i2c_dev->dev, PTR_ERR(i2c_dev->rst), - "failed to get reset control\n"); - return PTR_ERR(i2c_dev->rst); - } - tegra_i2c_parse_dt(i2c_dev); + err = tegra_i2c_init_reset(i2c_dev); + if (err) + return err; + err = tegra_i2c_init_clocks(i2c_dev); if (err) return err; @@ -1923,12 +1939,21 @@ static const struct dev_pm_ops tegra_i2c_pm = { NULL) }; +static const struct acpi_device_id tegra_i2c_acpi_match[] = { + {.id = "NVDA0101", .driver_data = (kernel_ulong_t)&tegra210_i2c_hw}, + {.id = "NVDA0201", .driver_data = (kernel_ulong_t)&tegra186_i2c_hw}, + {.id = "NVDA0301", .driver_data = (kernel_ulong_t)&tegra194_i2c_hw}, + { } +}; +MODULE_DEVICE_TABLE(acpi, tegra_i2c_acpi_match); + static struct platform_driver tegra_i2c_driver = { .probe = tegra_i2c_probe, .remove = tegra_i2c_remove, .driver = { .name = "tegra-i2c", .of_match_table = tegra_i2c_of_match, + .acpi_match_table = tegra_i2c_acpi_match, .pm = &tegra_i2c_pm, }, }; diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c index 41eb0dcc3204..4b9536f50800 100644 --- a/drivers/i2c/busses/i2c-virtio.c +++ b/drivers/i2c/busses/i2c-virtio.c @@ -165,7 +165,7 @@ err_free: static void virtio_i2c_del_vqs(struct virtio_device *vdev) { - vdev->config->reset(vdev); + virtio_reset_device(vdev); vdev->config->del_vqs(vdev); } diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c index 6d24dc385522..4e3b11c0f732 100644 --- a/drivers/i2c/busses/i2c-xlp9xx.c +++ b/drivers/i2c/busses/i2c-xlp9xx.c @@ -572,12 +572,6 @@ static int xlp9xx_i2c_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id xlp9xx_i2c_of_match[] = { - { .compatible = "netlogic,xlp980-i2c", }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, xlp9xx_i2c_of_match); - #ifdef CONFIG_ACPI static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = { {"BRCM9007", 0}, @@ -592,7 +586,6 @@ static struct platform_driver xlp9xx_i2c_driver = { .remove = xlp9xx_i2c_remove, .driver = { .name = "xlp9xx-i2c", - .of_match_table = xlp9xx_i2c_of_match, .acpi_match_table = ACPI_PTR(xlp9xx_i2c_acpi_ids), }, }; diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c deleted file mode 100644 index 9ce20652d494..000000000000 --- a/drivers/i2c/busses/i2c-xlr.c +++ /dev/null @@ -1,470 +0,0 @@ -/* - * Copyright 2011, Netlogic Microsystems Inc. - * Copyright 2004, Matt Porter <mporter@kernel.crashing.org> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include <linux/err.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/ioport.h> -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/i2c.h> -#include <linux/io.h> -#include <linux/platform_device.h> -#include <linux/of_device.h> -#include <linux/clk.h> -#include <linux/interrupt.h> -#include <linux/wait.h> - -/* XLR I2C REGISTERS */ -#define XLR_I2C_CFG 0x00 -#define XLR_I2C_CLKDIV 0x01 -#define XLR_I2C_DEVADDR 0x02 -#define XLR_I2C_ADDR 0x03 -#define XLR_I2C_DATAOUT 0x04 -#define XLR_I2C_DATAIN 0x05 -#define XLR_I2C_STATUS 0x06 -#define XLR_I2C_STARTXFR 0x07 -#define XLR_I2C_BYTECNT 0x08 -#define XLR_I2C_HDSTATIM 0x09 - -/* Sigma Designs additional registers */ -#define XLR_I2C_INT_EN 0x09 -#define XLR_I2C_INT_STAT 0x0a - -/* XLR I2C REGISTERS FLAGS */ -#define XLR_I2C_BUS_BUSY 0x01 -#define XLR_I2C_SDOEMPTY 0x02 -#define XLR_I2C_RXRDY 0x04 -#define XLR_I2C_ACK_ERR 0x08 -#define XLR_I2C_ARB_STARTERR 0x30 - -/* Register Values */ -#define XLR_I2C_CFG_ADDR 0xF8 -#define XLR_I2C_CFG_NOADDR 0xFA -#define XLR_I2C_STARTXFR_ND 0x02 /* No Data */ -#define XLR_I2C_STARTXFR_RD 0x01 /* Read */ -#define XLR_I2C_STARTXFR_WR 0x00 /* Write */ - -#define XLR_I2C_TIMEOUT 10 /* timeout per byte in msec */ - -/* - * On XLR/XLS, we need to use __raw_ IO to read the I2C registers - * because they are in the big-endian MMIO area on the SoC. - * - * The readl/writel implementation on XLR/XLS byteswaps, because - * those are for its little-endian PCI space (see arch/mips/Kconfig). - */ -static inline void xlr_i2c_wreg(u32 __iomem *base, unsigned int reg, u32 val) -{ - __raw_writel(val, base + reg); -} - -static inline u32 xlr_i2c_rdreg(u32 __iomem *base, unsigned int reg) -{ - return __raw_readl(base + reg); -} - -#define XLR_I2C_FLAG_IRQ 1 - -struct xlr_i2c_config { - u32 flags; /* optional feature support */ - u32 status_busy; /* value of STATUS[0] when busy */ - u32 cfg_extra; /* extra CFG bits to set */ -}; - -struct xlr_i2c_private { - struct i2c_adapter adap; - u32 __iomem *iobase; - int irq; - int pos; - struct i2c_msg *msg; - const struct xlr_i2c_config *cfg; - wait_queue_head_t wait; - struct clk *clk; -}; - -static int xlr_i2c_busy(struct xlr_i2c_private *priv, u32 status) -{ - return (status & XLR_I2C_BUS_BUSY) == priv->cfg->status_busy; -} - -static int xlr_i2c_idle(struct xlr_i2c_private *priv) -{ - return !xlr_i2c_busy(priv, xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS)); -} - -static int xlr_i2c_wait(struct xlr_i2c_private *priv, unsigned long timeout) -{ - int status; - int t; - - t = wait_event_timeout(priv->wait, xlr_i2c_idle(priv), - msecs_to_jiffies(timeout)); - if (!t) - return -ETIMEDOUT; - - status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS); - - return status & XLR_I2C_ACK_ERR ? -EIO : 0; -} - -static void xlr_i2c_tx_irq(struct xlr_i2c_private *priv, u32 status) -{ - struct i2c_msg *msg = priv->msg; - - if (status & XLR_I2C_SDOEMPTY) - xlr_i2c_wreg(priv->iobase, XLR_I2C_DATAOUT, - msg->buf[priv->pos++]); -} - -static void xlr_i2c_rx_irq(struct xlr_i2c_private *priv, u32 status) -{ - struct i2c_msg *msg = priv->msg; - - if (status & XLR_I2C_RXRDY) - msg->buf[priv->pos++] = - xlr_i2c_rdreg(priv->iobase, XLR_I2C_DATAIN); -} - -static irqreturn_t xlr_i2c_irq(int irq, void *dev_id) -{ - struct xlr_i2c_private *priv = dev_id; - struct i2c_msg *msg = priv->msg; - u32 int_stat, status; - - int_stat = xlr_i2c_rdreg(priv->iobase, XLR_I2C_INT_STAT); - if (!int_stat) - return IRQ_NONE; - - xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_STAT, int_stat); - - if (!msg) - return IRQ_HANDLED; - - status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS); - - if (priv->pos < msg->len) { - if (msg->flags & I2C_M_RD) - xlr_i2c_rx_irq(priv, status); - else - xlr_i2c_tx_irq(priv, status); - } - - if (!xlr_i2c_busy(priv, status)) - wake_up(&priv->wait); - - return IRQ_HANDLED; -} - -static int xlr_i2c_tx(struct xlr_i2c_private *priv, u16 len, - u8 *buf, u16 addr) -{ - struct i2c_adapter *adap = &priv->adap; - unsigned long timeout, stoptime, checktime; - u32 i2c_status; - int pos, timedout; - u8 offset; - u32 xfer; - - offset = buf[0]; - xlr_i2c_wreg(priv->iobase, XLR_I2C_ADDR, offset); - xlr_i2c_wreg(priv->iobase, XLR_I2C_DEVADDR, addr); - xlr_i2c_wreg(priv->iobase, XLR_I2C_CFG, - XLR_I2C_CFG_ADDR | priv->cfg->cfg_extra); - - timeout = msecs_to_jiffies(XLR_I2C_TIMEOUT); - stoptime = jiffies + timeout; - timedout = 0; - - if (len == 1) { - xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len - 1); - xfer = XLR_I2C_STARTXFR_ND; - pos = 1; - } else { - xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len - 2); - xlr_i2c_wreg(priv->iobase, XLR_I2C_DATAOUT, buf[1]); - xfer = XLR_I2C_STARTXFR_WR; - pos = 2; - } - - priv->pos = pos; - -retry: - /* retry can only happen on the first byte */ - xlr_i2c_wreg(priv->iobase, XLR_I2C_STARTXFR, xfer); - - if (priv->irq > 0) - return xlr_i2c_wait(priv, XLR_I2C_TIMEOUT * len); - - while (!timedout) { - checktime = jiffies; - i2c_status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS); - - if ((i2c_status & XLR_I2C_SDOEMPTY) && pos < len) { - xlr_i2c_wreg(priv->iobase, XLR_I2C_DATAOUT, buf[pos++]); - - /* reset timeout on successful xmit */ - stoptime = jiffies + timeout; - } - timedout = time_after(checktime, stoptime); - - if (i2c_status & XLR_I2C_ARB_STARTERR) { - if (timedout) - break; - goto retry; - } - - if (i2c_status & XLR_I2C_ACK_ERR) - return -EIO; - - if (!xlr_i2c_busy(priv, i2c_status) && pos >= len) - return 0; - } - dev_err(&adap->dev, "I2C transmit timeout\n"); - return -ETIMEDOUT; -} - -static int xlr_i2c_rx(struct xlr_i2c_private *priv, u16 len, u8 *buf, u16 addr) -{ - struct i2c_adapter *adap = &priv->adap; - u32 i2c_status; - unsigned long timeout, stoptime, checktime; - int nbytes, timedout; - - xlr_i2c_wreg(priv->iobase, XLR_I2C_CFG, - XLR_I2C_CFG_NOADDR | priv->cfg->cfg_extra); - xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len - 1); - xlr_i2c_wreg(priv->iobase, XLR_I2C_DEVADDR, addr); - - priv->pos = 0; - - timeout = msecs_to_jiffies(XLR_I2C_TIMEOUT); - stoptime = jiffies + timeout; - timedout = 0; - nbytes = 0; -retry: - xlr_i2c_wreg(priv->iobase, XLR_I2C_STARTXFR, XLR_I2C_STARTXFR_RD); - - if (priv->irq > 0) - return xlr_i2c_wait(priv, XLR_I2C_TIMEOUT * len); - - while (!timedout) { - checktime = jiffies; - i2c_status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS); - if (i2c_status & XLR_I2C_RXRDY) { - if (nbytes >= len) - return -EIO; /* should not happen */ - - buf[nbytes++] = - xlr_i2c_rdreg(priv->iobase, XLR_I2C_DATAIN); - - /* reset timeout on successful read */ - stoptime = jiffies + timeout; - } - - timedout = time_after(checktime, stoptime); - if (i2c_status & XLR_I2C_ARB_STARTERR) { - if (timedout) - break; - goto retry; - } - - if (i2c_status & XLR_I2C_ACK_ERR) - return -EIO; - - if (!xlr_i2c_busy(priv, i2c_status)) - return 0; - } - - dev_err(&adap->dev, "I2C receive timeout\n"); - return -ETIMEDOUT; -} - -static int xlr_i2c_xfer(struct i2c_adapter *adap, - struct i2c_msg *msgs, int num) -{ - struct i2c_msg *msg; - int i; - int ret = 0; - struct xlr_i2c_private *priv = i2c_get_adapdata(adap); - - ret = clk_enable(priv->clk); - if (ret) - return ret; - - if (priv->irq) - xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_EN, 0xf); - - - for (i = 0; ret == 0 && i < num; i++) { - msg = &msgs[i]; - priv->msg = msg; - if (msg->flags & I2C_M_RD) - ret = xlr_i2c_rx(priv, msg->len, &msg->buf[0], - msg->addr); - else - ret = xlr_i2c_tx(priv, msg->len, &msg->buf[0], - msg->addr); - } - - if (priv->irq) - xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_EN, 0); - - clk_disable(priv->clk); - priv->msg = NULL; - - return (ret != 0) ? ret : num; -} - -static u32 xlr_func(struct i2c_adapter *adap) -{ - /* Emulate SMBUS over I2C */ - return (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | I2C_FUNC_I2C; -} - -static const struct i2c_algorithm xlr_i2c_algo = { - .master_xfer = xlr_i2c_xfer, - .functionality = xlr_func, -}; - -static const struct i2c_adapter_quirks xlr_i2c_quirks = { - .flags = I2C_AQ_NO_ZERO_LEN, -}; - -static const struct xlr_i2c_config xlr_i2c_config_default = { - .status_busy = XLR_I2C_BUS_BUSY, - .cfg_extra = 0, -}; - -static const struct xlr_i2c_config xlr_i2c_config_tangox = { - .flags = XLR_I2C_FLAG_IRQ, - .status_busy = 0, - .cfg_extra = 1 << 8, -}; - -static const struct of_device_id xlr_i2c_dt_ids[] = { - { - .compatible = "sigma,smp8642-i2c", - .data = &xlr_i2c_config_tangox, - }, - { } -}; -MODULE_DEVICE_TABLE(of, xlr_i2c_dt_ids); - -static int xlr_i2c_probe(struct platform_device *pdev) -{ - const struct of_device_id *match; - struct xlr_i2c_private *priv; - struct clk *clk; - unsigned long clk_rate; - unsigned long clk_div; - u32 busfreq; - int irq; - int ret; - - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - match = of_match_device(xlr_i2c_dt_ids, &pdev->dev); - if (match) - priv->cfg = match->data; - else - priv->cfg = &xlr_i2c_config_default; - - priv->iobase = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(priv->iobase)) - return PTR_ERR(priv->iobase); - - irq = platform_get_irq(pdev, 0); - - if (irq > 0 && (priv->cfg->flags & XLR_I2C_FLAG_IRQ)) { - priv->irq = irq; - - xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_EN, 0); - xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_STAT, 0xf); - - ret = devm_request_irq(&pdev->dev, priv->irq, xlr_i2c_irq, - IRQF_SHARED, dev_name(&pdev->dev), - priv); - if (ret) - return ret; - - init_waitqueue_head(&priv->wait); - } - - if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", - &busfreq)) - busfreq = I2C_MAX_STANDARD_MODE_FREQ; - - clk = devm_clk_get(&pdev->dev, NULL); - if (!IS_ERR(clk)) { - ret = clk_prepare_enable(clk); - if (ret) - return ret; - - clk_rate = clk_get_rate(clk); - clk_div = DIV_ROUND_UP(clk_rate, 2 * busfreq); - xlr_i2c_wreg(priv->iobase, XLR_I2C_CLKDIV, clk_div); - - clk_disable(clk); - priv->clk = clk; - } - - priv->adap.dev.parent = &pdev->dev; - priv->adap.dev.of_node = pdev->dev.of_node; - priv->adap.owner = THIS_MODULE; - priv->adap.algo_data = priv; - priv->adap.algo = &xlr_i2c_algo; - priv->adap.quirks = &xlr_i2c_quirks; - priv->adap.nr = pdev->id; - priv->adap.class = I2C_CLASS_HWMON; - snprintf(priv->adap.name, sizeof(priv->adap.name), "xlr-i2c"); - - i2c_set_adapdata(&priv->adap, priv); - ret = i2c_add_numbered_adapter(&priv->adap); - if (ret < 0) - goto err_unprepare_clk; - - platform_set_drvdata(pdev, priv); - dev_info(&priv->adap.dev, "Added I2C Bus.\n"); - return 0; - -err_unprepare_clk: - clk_unprepare(clk); - return ret; -} - -static int xlr_i2c_remove(struct platform_device *pdev) -{ - struct xlr_i2c_private *priv; - - priv = platform_get_drvdata(pdev); - i2c_del_adapter(&priv->adap); - clk_unprepare(priv->clk); - - return 0; -} - -static struct platform_driver xlr_i2c_driver = { - .probe = xlr_i2c_probe, - .remove = xlr_i2c_remove, - .driver = { - .name = "xlr-i2cbus", - .of_match_table = xlr_i2c_dt_ids, - }, -}; - -module_platform_driver(xlr_i2c_driver); - -MODULE_AUTHOR("Ganesan Ramalingam <ganesanr@netlogicmicro.com>"); -MODULE_DESCRIPTION("XLR/XLS SoC I2C Controller driver"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:xlr-i2cbus"); diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 73253e667de1..2c59dd748a49 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -953,6 +953,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf client->dev.of_node = of_node_get(info->of_node); client->dev.fwnode = info->fwnode; + device_enable_async_suspend(&client->dev); i2c_dev_set_name(adap, client, info); if (info->swnode) { @@ -1482,6 +1483,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap) if (res) goto out_reg; + device_enable_async_suspend(&adap->dev); pm_runtime_no_callbacks(&adap->dev); pm_suspend_ignore_children(&adap->dev, true); pm_runtime_enable(&adap->dev); diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c index bac415a52b78..73a23e117ebe 100644 --- a/drivers/i2c/muxes/i2c-mux-gpio.c +++ b/drivers/i2c/muxes/i2c-mux-gpio.c @@ -7,6 +7,7 @@ #include <linux/i2c.h> #include <linux/i2c-mux.h> +#include <linux/overflow.h> #include <linux/platform_data/i2c-mux-gpio.h> #include <linux/platform_device.h> #include <linux/module.h> @@ -49,49 +50,11 @@ static int i2c_mux_gpio_deselect(struct i2c_mux_core *muxc, u32 chan) return 0; } -#ifdef CONFIG_ACPI - -static int i2c_mux_gpio_get_acpi_adr(struct device *dev, - struct fwnode_handle *fwdev, - unsigned int *adr) - -{ - unsigned long long adr64; - acpi_status status; - - status = acpi_evaluate_integer(ACPI_HANDLE_FWNODE(fwdev), - METHOD_NAME__ADR, - NULL, &adr64); - - if (!ACPI_SUCCESS(status)) { - dev_err(dev, "Cannot get address\n"); - return -EINVAL; - } - - *adr = adr64; - if (*adr != adr64) { - dev_err(dev, "Address out of range\n"); - return -ERANGE; - } - - return 0; -} - -#else - -static int i2c_mux_gpio_get_acpi_adr(struct device *dev, - struct fwnode_handle *fwdev, - unsigned int *adr) -{ - return -EINVAL; -} - -#endif - static int i2c_mux_gpio_probe_fw(struct gpiomux *mux, struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct fwnode_handle *fwnode = dev_fwnode(dev); struct device_node *np = dev->of_node; struct device_node *adapter_np; struct i2c_adapter *adapter = NULL; @@ -99,7 +62,7 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux, unsigned *values; int rc, i = 0; - if (is_of_node(dev->fwnode)) { + if (is_of_node(fwnode)) { if (!np) return -ENODEV; @@ -111,7 +74,7 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux, adapter = of_find_i2c_adapter_by_node(adapter_np); of_node_put(adapter_np); - } else if (is_acpi_node(dev->fwnode)) { + } else if (is_acpi_node(fwnode)) { /* * In ACPI land the mux should be a direct child of the i2c * bus it muxes. @@ -141,16 +104,16 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux, fwnode_property_read_u32(child, "reg", values + i); } else if (is_acpi_node(child)) { - rc = i2c_mux_gpio_get_acpi_adr(dev, child, values + i); + rc = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), values + i); if (rc) - return rc; + return dev_err_probe(dev, rc, "Cannot get address\n"); } i++; } mux->data.values = values; - if (fwnode_property_read_u32(dev->fwnode, "idle-state", &mux->data.idle)) + if (device_property_read_u32(dev, "idle-state", &mux->data.idle)) mux->data.idle = I2C_MUX_GPIO_NO_IDLE; return 0; @@ -190,7 +153,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev) return -EPROBE_DEFER; muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, - ngpios * sizeof(*mux->gpios), 0, + array_size(ngpios, sizeof(*mux->gpios)), 0, i2c_mux_gpio_select, NULL); if (!muxc) { ret = -ENOMEM; diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index c3b4c677b442..dfe18dcd008d 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -343,7 +343,8 @@ struct bus_type i3c_bus_type = { static enum i3c_addr_slot_status i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr) { - int status, bitpos = addr * 2; + unsigned long status; + int bitpos = addr * 2; if (addr > I2C_MAX_ADDR) return I3C_ADDR_SLOT_RSVD; diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index 03a368da51b9..51a8608203de 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -793,6 +793,10 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m) return -ENOMEM; pos = dw_i3c_master_get_free_pos(master); + if (pos < 0) { + dw_i3c_master_free_xfer(xfer); + return pos; + } cmd = &xfer->cmds[0]; cmd->cmd_hi = 0x1; cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) | diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c index 1b73647cc3b1..8c01123dc4ed 100644 --- a/drivers/i3c/master/mipi-i3c-hci/core.c +++ b/drivers/i3c/master/mipi-i3c-hci/core.c @@ -662,7 +662,7 @@ static int i3c_hci_init(struct i3c_hci *hci) /* Make sure our data ordering fits the host's */ regval = reg_read(HC_CONTROL); - if (IS_ENABLED(CONFIG_BIG_ENDIAN)) { + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) { if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) { regval |= HC_CONTROL_DATA_BIG_ENDIAN; reg_write(HC_CONTROL, regval); diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c index 783e551a2c85..97bb49ff5b53 100644 --- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c +++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c @@ -160,9 +160,7 @@ static int hci_dat_v1_get_index(struct i3c_hci *hci, u8 dev_addr) unsigned int dat_idx; u32 dat_w0; - for (dat_idx = find_first_bit(hci->DAT_data, hci->DAT_entries); - dat_idx < hci->DAT_entries; - dat_idx = find_next_bit(hci->DAT_data, hci->DAT_entries, dat_idx)) { + for_each_set_bit(dat_idx, hci->DAT_data, hci->DAT_entries) { dat_w0 = dat_w0_read(dat_idx); if (FIELD_GET(DAT_0_DYNAMIC_ADDRESS, dat_w0) == dev_addr) return dat_idx; diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c index af873a9be050..2990ac9eaade 100644 --- a/drivers/i3c/master/mipi-i3c-hci/dma.c +++ b/drivers/i3c/master/mipi-i3c-hci/dma.c @@ -223,7 +223,7 @@ static int hci_dma_init(struct i3c_hci *hci) } if (nr_rings > XFER_RINGS) nr_rings = XFER_RINGS; - rings = kzalloc(sizeof(*rings) + nr_rings * sizeof(*rh), GFP_KERNEL); + rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL); if (!rings) return -ENOMEM; hci->io_data = rings; diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h index 80beb1d5be8f..f109923f6c3f 100644 --- a/drivers/i3c/master/mipi-i3c-hci/hci.h +++ b/drivers/i3c/master/mipi-i3c-hci/hci.h @@ -98,7 +98,7 @@ struct hci_xfer { static inline struct hci_xfer *hci_alloc_xfer(unsigned int n) { - return kzalloc(sizeof(struct hci_xfer) * n, GFP_KERNEL); + return kcalloc(n, sizeof(struct hci_xfer), GFP_KERNEL); } static inline void hci_free_xfer(struct hci_xfer *xfer, unsigned int n) diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index 879e5a64acaf..7550dad64ecf 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -17,7 +17,9 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> /* Master Mode Registers */ #define SVC_I3C_MCONFIG 0x000 @@ -119,6 +121,7 @@ #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x)) #define SVC_I3C_MAX_DEVS 32 +#define SVC_I3C_PM_TIMEOUT_MS 1000 /* This parameter depends on the implementation and may be tuned */ #define SVC_I3C_FIFO_SIZE 16 @@ -236,6 +239,40 @@ static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master) writel(mask, master->regs + SVC_I3C_MINTCLR); } +static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master) +{ + /* Clear pending warnings */ + writel(readl(master->regs + SVC_I3C_MERRWARN), + master->regs + SVC_I3C_MERRWARN); +} + +static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master) +{ + /* Flush FIFOs */ + writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB, + master->regs + SVC_I3C_MDATACTRL); +} + +static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master) +{ + u32 reg; + + /* Set RX and TX tigger levels, flush FIFOs */ + reg = SVC_I3C_MDATACTRL_FLUSHTB | + SVC_I3C_MDATACTRL_FLUSHRB | + SVC_I3C_MDATACTRL_UNLOCK_TRIG | + SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL | + SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY; + writel(reg, master->regs + SVC_I3C_MDATACTRL); +} + +static void svc_i3c_master_reset(struct svc_i3c_master *master) +{ + svc_i3c_master_clear_merrwarn(master); + svc_i3c_master_reset_fifo_trigger(master); + svc_i3c_master_disable_interrupts(master); +} + static inline struct svc_i3c_master * to_svc_i3c_master(struct i3c_master_controller *master) { @@ -279,12 +316,6 @@ static void svc_i3c_master_emit_stop(struct svc_i3c_master *master) udelay(1); } -static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master) -{ - writel(readl(master->regs + SVC_I3C_MERRWARN), - master->regs + SVC_I3C_MERRWARN); -} - static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master, struct i3c_dev_desc *dev) { @@ -449,13 +480,23 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m) struct i3c_device_info info = {}; unsigned long fclk_rate, fclk_period_ns; unsigned int high_period_ns, od_low_period_ns; - u32 ppbaud, pplow, odhpp, odbaud, i2cbaud, reg; + u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg; int ret; + ret = pm_runtime_resume_and_get(master->dev); + if (ret < 0) { + dev_err(master->dev, + "<%s> cannot resume i3c bus master, err: %d\n", + __func__, ret); + return ret; + } + /* Timings derivation */ fclk_rate = clk_get_rate(master->fclk); - if (!fclk_rate) - return -EINVAL; + if (!fclk_rate) { + ret = -EINVAL; + goto rpm_out; + } fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate); @@ -479,6 +520,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m) switch (bus->mode) { case I3C_BUS_MODE_PURE: i2cbaud = 0; + odstop = 0; break; case I3C_BUS_MODE_MIXED_FAST: case I3C_BUS_MODE_MIXED_LIMITED: @@ -487,6 +529,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m) * between the high and low period does not really matter. */ i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2; + odstop = 1; break; case I3C_BUS_MODE_MIXED_SLOW: /* @@ -494,15 +537,16 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m) * constraints as the FM+ mode. */ i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2; + odstop = 1; break; default: - return -EINVAL; + goto rpm_out; } reg = SVC_I3C_MCONFIG_MASTER_EN | SVC_I3C_MCONFIG_DISTO(0) | SVC_I3C_MCONFIG_HKEEP(0) | - SVC_I3C_MCONFIG_ODSTOP(0) | + SVC_I3C_MCONFIG_ODSTOP(odstop) | SVC_I3C_MCONFIG_PPBAUD(ppbaud) | SVC_I3C_MCONFIG_PPLOW(pplow) | SVC_I3C_MCONFIG_ODBAUD(odbaud) | @@ -514,7 +558,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m) /* Master core's registration */ ret = i3c_master_get_free_addr(m, 0); if (ret < 0) - return ret; + goto rpm_out; info.dyn_addr = ret; @@ -523,21 +567,33 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m) ret = i3c_master_set_info(&master->base, &info); if (ret) - return ret; + goto rpm_out; - svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); +rpm_out: + pm_runtime_mark_last_busy(master->dev); + pm_runtime_put_autosuspend(master->dev); - return 0; + return ret; } static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m) { struct svc_i3c_master *master = to_svc_i3c_master(m); + int ret; + + ret = pm_runtime_resume_and_get(master->dev); + if (ret < 0) { + dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); + return; + } svc_i3c_master_disable_interrupts(master); /* Disable master */ writel(0, master->regs + SVC_I3C_MCONFIG); + + pm_runtime_mark_last_busy(master->dev); + pm_runtime_put_autosuspend(master->dev); } static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master) @@ -656,8 +712,10 @@ static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst, u32 reg; for (i = 0; i < len; i++) { - ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, - SVC_I3C_MSTATUS_RXPEND(reg), 0, 1000); + ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, + reg, + SVC_I3C_MSTATUS_RXPEND(reg), + 0, 1000); if (ret) return ret; @@ -687,10 +745,11 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master, * Either one slave will send its ID, or the assignment process * is done. */ - ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, - SVC_I3C_MSTATUS_RXPEND(reg) | - SVC_I3C_MSTATUS_MCTRLDONE(reg), - 1, 1000); + ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, + reg, + SVC_I3C_MSTATUS_RXPEND(reg) | + SVC_I3C_MSTATUS_MCTRLDONE(reg), + 1, 1000); if (ret) return ret; @@ -744,11 +803,12 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master, } /* Wait for the slave to be ready to receive its address */ - ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, - SVC_I3C_MSTATUS_MCTRLDONE(reg) && - SVC_I3C_MSTATUS_STATE_DAA(reg) && - SVC_I3C_MSTATUS_BETWEEN(reg), - 0, 1000); + ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, + reg, + SVC_I3C_MSTATUS_MCTRLDONE(reg) && + SVC_I3C_MSTATUS_STATE_DAA(reg) && + SVC_I3C_MSTATUS_BETWEEN(reg), + 0, 1000); if (ret) return ret; @@ -832,31 +892,36 @@ static int svc_i3c_master_do_daa(struct i3c_master_controller *m) unsigned int dev_nb; int ret, i; + ret = pm_runtime_resume_and_get(master->dev); + if (ret < 0) { + dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); + return ret; + } + spin_lock_irqsave(&master->xferqueue.lock, flags); ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb); spin_unlock_irqrestore(&master->xferqueue.lock, flags); - if (ret) - goto emit_stop; + if (ret) { + svc_i3c_master_emit_stop(master); + svc_i3c_master_clear_merrwarn(master); + goto rpm_out; + } /* Register all devices who participated to the core */ for (i = 0; i < dev_nb; i++) { ret = i3c_master_add_i3c_dev_locked(m, addrs[i]); if (ret) - return ret; + goto rpm_out; } /* Configure IBI auto-rules */ ret = svc_i3c_update_ibirules(master); - if (ret) { + if (ret) dev_err(master->dev, "Cannot handle such a list of devices"); - return ret; - } - return 0; - -emit_stop: - svc_i3c_master_emit_stop(master); - svc_i3c_master_clear_merrwarn(master); +rpm_out: + pm_runtime_mark_last_busy(master->dev); + pm_runtime_put_autosuspend(master->dev); return ret; } @@ -864,27 +929,35 @@ emit_stop: static int svc_i3c_master_read(struct svc_i3c_master *master, u8 *in, unsigned int len) { - int offset = 0, i, ret; - u32 mdctrl; + int offset = 0, i; + u32 mdctrl, mstatus; + bool completed = false; + unsigned int count; + unsigned long start = jiffies; - while (offset < len) { - unsigned int count; + while (!completed) { + mstatus = readl(master->regs + SVC_I3C_MSTATUS); + if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0) + completed = true; - ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL, - mdctrl, - !(mdctrl & SVC_I3C_MDATACTRL_RXEMPTY), - 0, 1000); - if (ret) - return ret; + if (time_after(jiffies, start + msecs_to_jiffies(1000))) { + dev_dbg(master->dev, "I3C read timeout\n"); + return -ETIMEDOUT; + } + mdctrl = readl(master->regs + SVC_I3C_MDATACTRL); count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl); + if (offset + count > len) { + dev_err(master->dev, "I3C receive length too long!\n"); + return -EINVAL; + } for (i = 0; i < count; i++) in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB); offset += count; } - return 0; + return offset; } static int svc_i3c_master_write(struct svc_i3c_master *master, @@ -917,7 +990,7 @@ static int svc_i3c_master_write(struct svc_i3c_master *master, static int svc_i3c_master_xfer(struct svc_i3c_master *master, bool rnw, unsigned int xfer_type, u8 addr, u8 *in, const u8 *out, unsigned int xfer_len, - unsigned int read_len, bool continued) + unsigned int *read_len, bool continued) { u32 reg; int ret; @@ -927,7 +1000,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master, SVC_I3C_MCTRL_IBIRESP_NACK | SVC_I3C_MCTRL_DIR(rnw) | SVC_I3C_MCTRL_ADDR(addr) | - SVC_I3C_MCTRL_RDTERM(read_len), + SVC_I3C_MCTRL_RDTERM(*read_len), master->regs + SVC_I3C_MCTRL); ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, @@ -939,17 +1012,27 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master, ret = svc_i3c_master_read(master, in, xfer_len); else ret = svc_i3c_master_write(master, out, xfer_len); - if (ret) + if (ret < 0) goto emit_stop; + if (rnw) + *read_len = ret; + ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000); if (ret) goto emit_stop; - if (!continued) + writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS); + + if (!continued) { svc_i3c_master_emit_stop(master); + /* Wait idle if stop is sent. */ + readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, + SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000); + } + return 0; emit_stop: @@ -1007,17 +1090,29 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master) if (!xfer) return; + ret = pm_runtime_resume_and_get(master->dev); + if (ret < 0) { + dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); + return; + } + + svc_i3c_master_clear_merrwarn(master); + svc_i3c_master_flush_fifo(master); + for (i = 0; i < xfer->ncmds; i++) { struct svc_i3c_cmd *cmd = &xfer->cmds[i]; ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type, cmd->addr, cmd->in, cmd->out, - cmd->len, cmd->read_len, + cmd->len, &cmd->read_len, cmd->continued); if (ret) break; } + pm_runtime_mark_last_busy(master->dev); + pm_runtime_put_autosuspend(master->dev); + xfer->ret = ret; complete(&xfer->comp); @@ -1141,6 +1236,9 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master, if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) svc_i3c_master_dequeue_xfer(master, xfer); + if (cmd->read_len != xfer_len) + ccc->dests[0].payload.len = cmd->read_len; + ret = xfer->ret; svc_i3c_master_free_xfer(xfer); @@ -1291,6 +1389,16 @@ static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev) static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev) { struct i3c_master_controller *m = i3c_dev_get_master(dev); + struct svc_i3c_master *master = to_svc_i3c_master(m); + int ret; + + ret = pm_runtime_resume_and_get(master->dev); + if (ret < 0) { + dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); + return ret; + } + + svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); } @@ -1298,8 +1406,17 @@ static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev) static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev) { struct i3c_master_controller *m = i3c_dev_get_master(dev); + struct svc_i3c_master *master = to_svc_i3c_master(m); + int ret; + + svc_i3c_master_disable_interrupts(master); - return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); + ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); + + pm_runtime_mark_last_busy(master->dev); + pm_runtime_put_autosuspend(master->dev); + + return ret; } static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev, @@ -1330,23 +1447,35 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = { .disable_ibi = svc_i3c_master_disable_ibi, }; -static void svc_i3c_master_reset(struct svc_i3c_master *master) +static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master) { - u32 reg; + int ret = 0; - /* Clear pending warnings */ - writel(readl(master->regs + SVC_I3C_MERRWARN), - master->regs + SVC_I3C_MERRWARN); + ret = clk_prepare_enable(master->pclk); + if (ret) + return ret; - /* Set RX and TX tigger levels, flush FIFOs */ - reg = SVC_I3C_MDATACTRL_FLUSHTB | - SVC_I3C_MDATACTRL_FLUSHRB | - SVC_I3C_MDATACTRL_UNLOCK_TRIG | - SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL | - SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY; - writel(reg, master->regs + SVC_I3C_MDATACTRL); + ret = clk_prepare_enable(master->fclk); + if (ret) { + clk_disable_unprepare(master->pclk); + return ret; + } - svc_i3c_master_disable_interrupts(master); + ret = clk_prepare_enable(master->sclk); + if (ret) { + clk_disable_unprepare(master->pclk); + clk_disable_unprepare(master->fclk); + return ret; + } + + return 0; +} + +static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master) +{ + clk_disable_unprepare(master->pclk); + clk_disable_unprepare(master->fclk); + clk_disable_unprepare(master->sclk); } static int svc_i3c_master_probe(struct platform_device *pdev) @@ -1381,26 +1510,16 @@ static int svc_i3c_master_probe(struct platform_device *pdev) master->dev = dev; - svc_i3c_master_reset(master); - - ret = clk_prepare_enable(master->pclk); + ret = svc_i3c_master_prepare_clks(master); if (ret) return ret; - ret = clk_prepare_enable(master->fclk); - if (ret) - goto err_disable_pclk; - - ret = clk_prepare_enable(master->sclk); - if (ret) - goto err_disable_fclk; - INIT_WORK(&master->hj_work, svc_i3c_master_hj_work); INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work); ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler, IRQF_NO_SUSPEND, "svc-i3c-irq", master); if (ret) - goto err_disable_sclk; + goto err_disable_clks; master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0); @@ -1414,27 +1533,38 @@ static int svc_i3c_master_probe(struct platform_device *pdev) GFP_KERNEL); if (!master->ibi.slots) { ret = -ENOMEM; - goto err_disable_sclk; + goto err_disable_clks; } platform_set_drvdata(pdev, master); + pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + svc_i3c_master_reset(master); + /* Register the master */ ret = i3c_master_register(&master->base, &pdev->dev, &svc_i3c_master_ops, false); if (ret) - goto err_disable_sclk; + goto rpm_disable; - return 0; + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); -err_disable_sclk: - clk_disable_unprepare(master->sclk); + return 0; -err_disable_fclk: - clk_disable_unprepare(master->fclk); +rpm_disable: + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_disable(&pdev->dev); -err_disable_pclk: - clk_disable_unprepare(master->pclk); +err_disable_clks: + svc_i3c_master_unprepare_clks(master); return ret; } @@ -1448,17 +1578,45 @@ static int svc_i3c_master_remove(struct platform_device *pdev) if (ret) return ret; - clk_disable_unprepare(master->pclk); - clk_disable_unprepare(master->fclk); - clk_disable_unprepare(master->sclk); + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); return 0; } +static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev) +{ + struct svc_i3c_master *master = dev_get_drvdata(dev); + + svc_i3c_master_unprepare_clks(master); + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int __maybe_unused svc_i3c_runtime_resume(struct device *dev) +{ + struct svc_i3c_master *master = dev_get_drvdata(dev); + int ret = 0; + + pinctrl_pm_select_default_state(dev); + svc_i3c_master_prepare_clks(master); + + return ret; +} + +static const struct dev_pm_ops svc_i3c_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend, + svc_i3c_runtime_resume, NULL) +}; + static const struct of_device_id svc_i3c_master_of_match_tbl[] = { { .compatible = "silvaco,i3c-master" }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl); static struct platform_driver svc_i3c_master = { .probe = svc_i3c_master_probe, @@ -1466,6 +1624,7 @@ static struct platform_driver svc_i3c_master = { .driver = { .name = "silvaco-i3c-master", .of_match_table = svc_i3c_master_of_match_tbl, + .pm = &svc_i3c_pm_ops, }, }; module_platform_driver(svc_i3c_master); diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig index 2334ad249b46..b190846c3dc2 100644 --- a/drivers/iio/Kconfig +++ b/drivers/iio/Kconfig @@ -70,6 +70,7 @@ config IIO_TRIGGERED_EVENT source "drivers/iio/accel/Kconfig" source "drivers/iio/adc/Kconfig" +source "drivers/iio/addac/Kconfig" source "drivers/iio/afe/Kconfig" source "drivers/iio/amplifiers/Kconfig" source "drivers/iio/cdc/Kconfig" @@ -77,6 +78,7 @@ source "drivers/iio/chemical/Kconfig" source "drivers/iio/common/Kconfig" source "drivers/iio/dac/Kconfig" source "drivers/iio/dummy/Kconfig" +source "drivers/iio/filter/Kconfig" source "drivers/iio/frequency/Kconfig" source "drivers/iio/gyro/Kconfig" source "drivers/iio/health/Kconfig" diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile index 65e39bd4f934..3be08cdadd7e 100644 --- a/drivers/iio/Makefile +++ b/drivers/iio/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o obj-y += accel/ obj-y += adc/ +obj-y += addac/ obj-y += afe/ obj-y += amplifiers/ obj-y += buffer/ @@ -24,6 +25,7 @@ obj-y += common/ obj-y += dac/ obj-y += dummy/ obj-y += gyro/ +obj-y += filter/ obj-y += frequency/ obj-y += health/ obj-y += humidity/ diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index 2edfcb4819b7..d8a454c266d5 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c @@ -658,7 +658,7 @@ static const struct iio_chan_spec_ext_info bma023_ext_info[] = { static const struct iio_chan_spec_ext_info bma180_ext_info[] = { IIO_ENUM("power_mode", IIO_SHARED_BY_TYPE, &bma180_power_mode_enum), - IIO_ENUM_AVAILABLE("power_mode", &bma180_power_mode_enum), + IIO_ENUM_AVAILABLE("power_mode", IIO_SHARED_BY_TYPE, &bma180_power_mode_enum), IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bma180_accel_get_mount_matrix), { } }; @@ -938,7 +938,7 @@ static int bma180_probe(struct i2c_client *client, i2c_set_clientdata(client, indio_dev); data->client = client; if (client->dev.of_node) - chip = (enum chip_ids)of_device_get_match_data(dev); + chip = (uintptr_t)of_device_get_match_data(dev); else chip = id->driver_data; data->part_info = &bma180_part_info[chip]; diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c index bc4c626e454d..74024d7ce5ac 100644 --- a/drivers/iio/accel/bma220_spi.c +++ b/drivers/iio/accel/bma220_spi.c @@ -27,7 +27,6 @@ #define BMA220_CHIP_ID 0xDD #define BMA220_READ_MASK BIT(7) #define BMA220_RANGE_MASK GENMASK(1, 0) -#define BMA220_DATA_SHIFT 2 #define BMA220_SUSPEND_SLEEP 0xFF #define BMA220_SUSPEND_WAKE 0x00 @@ -45,7 +44,7 @@ .sign = 's', \ .realbits = 6, \ .storagebits = 8, \ - .shift = BMA220_DATA_SHIFT, \ + .shift = 2, \ .endianness = IIO_CPU, \ }, \ } @@ -125,7 +124,8 @@ static int bma220_read_raw(struct iio_dev *indio_dev, ret = bma220_read_reg(data->spi_device, chan->address); if (ret < 0) return -EINVAL; - *val = sign_extend32(ret >> BMA220_DATA_SHIFT, 5); + *val = sign_extend32(ret >> chan->scan_type.shift, + chan->scan_type.realbits - 1); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: ret = bma220_read_reg(data->spi_device, BMA220_REG_RANGE); diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index b0678c351e82..e6081dd0a880 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -170,7 +170,7 @@ static const struct { {1000, 0, 0x0E}, {2000, 0, 0x0F} }; -static const struct { +static __maybe_unused const struct { int bw_bits; int msec; } bmc150_accel_sample_upd_time[] = { {0x08, 64}, diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 24c9387c2968..0fe570316848 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -315,7 +315,7 @@ static const char *const kxtf9_samp_freq_avail = "25 50 100 200 400 800"; /* Refer to section 4 of the specification */ -static const struct { +static __maybe_unused const struct { int odr_bits; int usec; } odr_start_up_times[KX_MAX_CHIPS][12] = { @@ -927,7 +927,8 @@ static int kxcjk1013_read_raw(struct iio_dev *indio_dev, mutex_unlock(&data->mutex); return ret; } - *val = sign_extend32(ret >> 4, 11); + *val = sign_extend32(ret >> chan->scan_type.shift, + chan->scan_type.realbits - 1); ret = kxcjk1013_set_power_state(data, false); } mutex_unlock(&data->mutex); diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c index 777c6c384b09..e6739ba74edf 100644 --- a/drivers/iio/accel/mma7455_core.c +++ b/drivers/iio/accel/mma7455_core.c @@ -134,7 +134,8 @@ static int mma7455_read_raw(struct iio_dev *indio_dev, if (ret) return ret; - *val = sign_extend32(le16_to_cpu(data), 9); + *val = sign_extend32(le16_to_cpu(data), + chan->scan_type.realbits - 1); return IIO_VAL_INT; diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c index cd6cdf2c51b0..24b83ccdb950 100644 --- a/drivers/iio/accel/mma7660.c +++ b/drivers/iio/accel/mma7660.c @@ -210,10 +210,16 @@ static int mma7660_probe(struct i2c_client *client, static int mma7660_remove(struct i2c_client *client) { struct iio_dev *indio_dev = i2c_get_clientdata(client); + int ret; iio_device_unregister(indio_dev); - return mma7660_set_mode(iio_priv(indio_dev), MMA7660_MODE_STANDBY); + ret = mma7660_set_mode(iio_priv(indio_dev), MMA7660_MODE_STANDBY); + if (ret) + dev_warn(&client->dev, "Failed to put device in stand-by mode (%pe), ignoring\n", + ERR_PTR(ret)); + + return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 09c7f10fefb6..64b82b4503ad 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1053,7 +1053,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p) { struct iio_dev *indio_dev = p; struct mma8452_data *data = iio_priv(indio_dev); - int ret = IRQ_NONE; + irqreturn_t ret = IRQ_NONE; int src; src = i2c_smbus_read_byte_data(data->client, MMA8452_INT_SRC); diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c index ba3ecb3b57dc..0570ab1cc064 100644 --- a/drivers/iio/accel/mma9553.c +++ b/drivers/iio/accel/mma9553.c @@ -917,7 +917,7 @@ static const struct iio_enum mma9553_calibgender_enum = { static const struct iio_chan_spec_ext_info mma9553_ext_info[] = { IIO_ENUM("calibgender", IIO_SHARED_BY_TYPE, &mma9553_calibgender_enum), - IIO_ENUM_AVAILABLE("calibgender", &mma9553_calibgender_enum), + IIO_ENUM_AVAILABLE("calibgender", IIO_SHARED_BY_TYPE, &mma9553_calibgender_enum), {}, }; diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c index c6b75308148a..43ecacbdc95a 100644 --- a/drivers/iio/accel/sca3000.c +++ b/drivers/iio/accel/sca3000.c @@ -534,6 +534,13 @@ static const struct iio_chan_spec sca3000_channels_with_temp[] = { BIT(IIO_CHAN_INFO_OFFSET), /* No buffer support */ .scan_index = -1, + .scan_type = { + .sign = 'u', + .realbits = 9, + .storagebits = 16, + .shift = 5, + .endianness = IIO_BE, + }, }, { .type = IIO_ACCEL, @@ -730,8 +737,9 @@ static int sca3000_read_raw(struct iio_dev *indio_dev, mutex_unlock(&st->lock); return ret; } - *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF; - *val = sign_extend32(*val, 12); + *val = sign_extend32(be16_to_cpup((__be16 *)st->rx) >> + chan->scan_type.shift, + chan->scan_type.realbits - 1); } else { /* get the temperature when available */ ret = sca3000_read_data_short(st, @@ -741,8 +749,9 @@ static int sca3000_read_raw(struct iio_dev *indio_dev, mutex_unlock(&st->lock); return ret; } - *val = ((st->rx[0] & 0x3F) << 3) | - ((st->rx[1] & 0xE0) >> 5); + *val = (be16_to_cpup((__be16 *)st->rx) >> + chan->scan_type.shift) & + GENMASK(chan->scan_type.realbits - 1, 0); } mutex_unlock(&st->lock); return IIO_VAL_INT; diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c index 43c621d0f11e..de0cdf8c1f94 100644 --- a/drivers/iio/accel/stk8312.c +++ b/drivers/iio/accel/stk8312.c @@ -355,7 +355,7 @@ static int stk8312_read_raw(struct iio_dev *indio_dev, mutex_unlock(&data->lock); return ret; } - *val = sign_extend32(ret, 7); + *val = sign_extend32(ret, chan->scan_type.realbits - 1); ret = stk8312_set_mode(data, data->mode & (~STK8312_MODE_ACTIVE)); mutex_unlock(&data->lock); diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c index e137a34b5c9a..517c57ed9e94 100644 --- a/drivers/iio/accel/stk8ba50.c +++ b/drivers/iio/accel/stk8ba50.c @@ -227,7 +227,8 @@ static int stk8ba50_read_raw(struct iio_dev *indio_dev, mutex_unlock(&data->lock); return -EINVAL; } - *val = sign_extend32(ret >> STK8BA50_DATA_SHIFT, 9); + *val = sign_extend32(ret >> chan->scan_type.shift, + chan->scan_type.realbits - 1); stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND); mutex_unlock(&data->lock); return IIO_VAL_INT; diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 3363af15a43f..4fdc8bfbb407 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -1146,7 +1146,7 @@ config TI_ADS7950 config TI_ADS8344 tristate "Texas Instruments ADS8344" - depends on SPI && OF + depends on SPI help If you say yes here you get support for Texas Instruments ADS8344 ADC chips @@ -1156,7 +1156,7 @@ config TI_ADS8344 config TI_ADS8688 tristate "Texas Instruments ADS8688" - depends on SPI && OF + depends on SPI help If you say yes here you get support for Texas Instruments ADS8684 and and ADS8688 ADC chips @@ -1166,7 +1166,7 @@ config TI_ADS8688 config TI_ADS124S08 tristate "Texas Instruments ADS124S08" - depends on SPI && OF + depends on SPI help If you say yes here you get support for Texas Instruments ADS124S08 and ADS124S06 ADC chips @@ -1288,4 +1288,19 @@ config XILINX_XADC The driver can also be build as a module. If so, the module will be called xilinx-xadc. +config XILINX_AMS + tristate "Xilinx AMS driver" + depends on ARCH_ZYNQMP || COMPILE_TEST + depends on HAS_IOMEM + help + Say yes here to have support for the Xilinx AMS for Ultrascale/Ultrascale+ + System Monitor. With this you can measure and monitor the Voltages and + Temperature values on the SOC. + + The driver supports Voltage and Temperature monitoring on Xilinx Ultrascale + devices. + + The driver can also be built as a module. If so, the module will be called + xilinx-ams. + endmenu diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index d3f53549720c..4a8f1833993b 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -115,4 +115,5 @@ obj-$(CONFIG_VF610_ADC) += vf610_adc.o obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o +obj-$(CONFIG_XILINX_AMS) += xilinx-ams.o obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index 2121a812b0c3..cc990205f306 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -257,7 +257,8 @@ static const struct iio_chan_spec_ext_info ad7192_calibsys_ext_info[] = { }, IIO_ENUM("sys_calibration_mode", IIO_SEPARATE, &ad7192_syscalib_mode_enum), - IIO_ENUM_AVAILABLE("sys_calibration_mode", &ad7192_syscalib_mode_enum), + IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE, + &ad7192_syscalib_mode_enum), {} }; diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index a8ec3efd659e..1d345d66742d 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -159,7 +159,8 @@ static int ad7266_read_raw(struct iio_dev *indio_dev, *val = (*val >> 2) & 0xfff; if (chan->scan_type.sign == 's') - *val = sign_extend32(*val, 11); + *val = sign_extend32(*val, + chan->scan_type.realbits - 1); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h index 9350ef1f63b5..4f82d7c9acfd 100644 --- a/drivers/iio/adc/ad7606.h +++ b/drivers/iio/adc/ad7606.h @@ -62,7 +62,7 @@ struct ad7606_chip_info { * struct ad7606_state - driver instance specific data * @dev pointer to kernel device * @chip_info entry in the table of chips that describes this device - * @reg regulator info for the the power supply of the device + * @reg regulator info for the power supply of the device * @bops bus operations (SPI or parallel) * @range voltage range selection, selects which scale to apply * @oversampling oversampling selection diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index 1d652d9b2f5c..cd418bd8bd87 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -467,9 +467,6 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig) } EXPORT_SYMBOL_GPL(ad_sd_validate_trigger); -static const struct iio_trigger_ops ad_sd_trigger_ops = { -}; - static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_dev) { struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev); @@ -486,7 +483,6 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de if (sigma_delta->trig == NULL) return -ENOMEM; - sigma_delta->trig->ops = &ad_sd_trigger_ops; init_completion(&sigma_delta->completion); sigma_delta->irq_dis = true; diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 92a57cf10fba..854b1f81d807 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -1662,10 +1662,9 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev, } } -static void at91_adc_dma_init(struct platform_device *pdev) +static void at91_adc_dma_init(struct at91_adc_state *st) { - struct iio_dev *indio_dev = platform_get_drvdata(pdev); - struct at91_adc_state *st = iio_priv(indio_dev); + struct device *dev = &st->indio_dev->dev; struct dma_slave_config config = {0}; /* we have 2 bytes for each channel */ unsigned int sample_size = st->soc_info.platform->nr_channels * 2; @@ -1680,9 +1679,9 @@ static void at91_adc_dma_init(struct platform_device *pdev) if (st->dma_st.dma_chan) return; - st->dma_st.dma_chan = dma_request_chan(&pdev->dev, "rx"); + st->dma_st.dma_chan = dma_request_chan(dev, "rx"); if (IS_ERR(st->dma_st.dma_chan)) { - dev_info(&pdev->dev, "can't get DMA channel\n"); + dev_info(dev, "can't get DMA channel\n"); st->dma_st.dma_chan = NULL; goto dma_exit; } @@ -1692,7 +1691,7 @@ static void at91_adc_dma_init(struct platform_device *pdev) &st->dma_st.rx_dma_buf, GFP_KERNEL); if (!st->dma_st.rx_buf) { - dev_info(&pdev->dev, "can't allocate coherent DMA area\n"); + dev_info(dev, "can't allocate coherent DMA area\n"); goto dma_chan_disable; } @@ -1705,11 +1704,11 @@ static void at91_adc_dma_init(struct platform_device *pdev) config.dst_maxburst = 1; if (dmaengine_slave_config(st->dma_st.dma_chan, &config)) { - dev_info(&pdev->dev, "can't configure DMA slave\n"); + dev_info(dev, "can't configure DMA slave\n"); goto dma_free_area; } - dev_info(&pdev->dev, "using %s for rx DMA transfers\n", + dev_info(dev, "using %s for rx DMA transfers\n", dma_chan_name(st->dma_st.dma_chan)); return; @@ -1721,13 +1720,12 @@ dma_chan_disable: dma_release_channel(st->dma_st.dma_chan); st->dma_st.dma_chan = NULL; dma_exit: - dev_info(&pdev->dev, "continuing without DMA support\n"); + dev_info(dev, "continuing without DMA support\n"); } -static void at91_adc_dma_disable(struct platform_device *pdev) +static void at91_adc_dma_disable(struct at91_adc_state *st) { - struct iio_dev *indio_dev = platform_get_drvdata(pdev); - struct at91_adc_state *st = iio_priv(indio_dev); + struct device *dev = &st->indio_dev->dev; /* we have 2 bytes for each channel */ unsigned int sample_size = st->soc_info.platform->nr_channels * 2; unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE * @@ -1745,7 +1743,7 @@ static void at91_adc_dma_disable(struct platform_device *pdev) dma_release_channel(st->dma_st.dma_chan); st->dma_st.dma_chan = NULL; - dev_info(&pdev->dev, "continuing without DMA support\n"); + dev_info(dev, "continuing without DMA support\n"); } static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val) @@ -1771,9 +1769,9 @@ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val) */ if (val == 1) - at91_adc_dma_disable(to_platform_device(&indio_dev->dev)); + at91_adc_dma_disable(st); else if (val > 1) - at91_adc_dma_init(to_platform_device(&indio_dev->dev)); + at91_adc_dma_init(st); /* * We can start the DMA only after setting the watermark and @@ -1781,7 +1779,7 @@ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val) */ ret = at91_adc_buffer_prepare(indio_dev); if (ret) - at91_adc_dma_disable(to_platform_device(&indio_dev->dev)); + at91_adc_dma_disable(st); return ret; } @@ -1828,7 +1826,7 @@ static void at91_adc_hw_init(struct iio_dev *indio_dev) static ssize_t at91_adc_get_fifo_state(struct device *dev, struct device_attribute *attr, char *buf) { - struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct at91_adc_state *st = iio_priv(indio_dev); return scnprintf(buf, PAGE_SIZE, "%d\n", !!st->dma_st.dma_chan); @@ -1837,7 +1835,7 @@ static ssize_t at91_adc_get_fifo_state(struct device *dev, static ssize_t at91_adc_get_watermark(struct device *dev, struct device_attribute *attr, char *buf) { - struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct at91_adc_state *st = iio_priv(indio_dev); return scnprintf(buf, PAGE_SIZE, "%d\n", st->dma_st.watermark); @@ -2078,7 +2076,7 @@ static int at91_adc_probe(struct platform_device *pdev) return 0; dma_disable: - at91_adc_dma_disable(pdev); + at91_adc_dma_disable(st); per_clk_disable_unprepare: clk_disable_unprepare(st->per_clk); vref_disable: @@ -2095,7 +2093,7 @@ static int at91_adc_remove(struct platform_device *pdev) iio_device_unregister(indio_dev); - at91_adc_dma_disable(pdev); + at91_adc_dma_disable(st); clk_disable_unprepare(st->per_clk); diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c index df99f1365c39..53bf7d4899d2 100644 --- a/drivers/iio/adc/axp20x_adc.c +++ b/drivers/iio/adc/axp20x_adc.c @@ -186,6 +186,8 @@ static const struct iio_chan_spec axp20x_adc_channels[] = { AXP20X_BATT_CHRG_I_H), AXP20X_ADC_CHANNEL(AXP20X_BATT_DISCHRG_I, "batt_dischrg_i", IIO_CURRENT, AXP20X_BATT_DISCHRG_I_H), + AXP20X_ADC_CHANNEL(AXP20X_TS_IN, "ts_v", IIO_VOLTAGE, + AXP20X_TS_IN_H), }; static const struct iio_chan_spec axp22x_adc_channels[] = { @@ -203,6 +205,8 @@ static const struct iio_chan_spec axp22x_adc_channels[] = { AXP20X_BATT_CHRG_I_H), AXP20X_ADC_CHANNEL(AXP22X_BATT_DISCHRG_I, "batt_dischrg_i", IIO_CURRENT, AXP20X_BATT_DISCHRG_I_H), + AXP20X_ADC_CHANNEL(AXP22X_TS_IN, "ts_v", IIO_VOLTAGE, + AXP22X_TS_ADC_H), }; static const struct iio_chan_spec axp813_adc_channels[] = { @@ -222,6 +226,8 @@ static const struct iio_chan_spec axp813_adc_channels[] = { AXP20X_BATT_CHRG_I_H), AXP20X_ADC_CHANNEL(AXP22X_BATT_DISCHRG_I, "batt_dischrg_i", IIO_CURRENT, AXP20X_BATT_DISCHRG_I_H), + AXP20X_ADC_CHANNEL(AXP813_TS_IN, "ts_v", IIO_VOLTAGE, + AXP288_TS_ADC_H), }; static int axp20x_adc_raw(struct iio_dev *indio_dev, @@ -296,11 +302,36 @@ static int axp20x_adc_scale_voltage(int channel, int *val, int *val2) *val2 = 400000; return IIO_VAL_INT_PLUS_MICRO; + case AXP20X_TS_IN: + /* 0.8 mV per LSB */ + *val = 0; + *val2 = 800000; + return IIO_VAL_INT_PLUS_MICRO; + default: return -EINVAL; } } +static int axp22x_adc_scale_voltage(int channel, int *val, int *val2) +{ + switch (channel) { + case AXP22X_BATT_V: + /* 1.1 mV per LSB */ + *val = 1; + *val2 = 100000; + return IIO_VAL_INT_PLUS_MICRO; + + case AXP22X_TS_IN: + /* 0.8 mV per LSB */ + *val = 0; + *val2 = 800000; + return IIO_VAL_INT_PLUS_MICRO; + + default: + return -EINVAL; + } +} static int axp813_adc_scale_voltage(int channel, int *val, int *val2) { switch (channel) { @@ -314,6 +345,12 @@ static int axp813_adc_scale_voltage(int channel, int *val, int *val2) *val2 = 100000; return IIO_VAL_INT_PLUS_MICRO; + case AXP813_TS_IN: + /* 0.8 mV per LSB */ + *val = 0; + *val2 = 800000; + return IIO_VAL_INT_PLUS_MICRO; + default: return -EINVAL; } @@ -367,12 +404,7 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val, { switch (chan->type) { case IIO_VOLTAGE: - if (chan->channel != AXP22X_BATT_V) - return -EINVAL; - - *val = 1; - *val2 = 100000; - return IIO_VAL_INT_PLUS_MICRO; + return axp22x_adc_scale_voltage(chan->channel, val, val2); case IIO_CURRENT: *val = 1; @@ -476,6 +508,7 @@ static int axp22x_read_raw(struct iio_dev *indio_dev, { switch (mask) { case IIO_CHAN_INFO_OFFSET: + /* For PMIC temp only */ *val = -2677; return IIO_VAL_INT; diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c index d73eac36153f..e911c25d106d 100644 --- a/drivers/iio/adc/envelope-detector.c +++ b/drivers/iio/adc/envelope-detector.c @@ -31,14 +31,13 @@ #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/mod_devicetable.h> #include <linux/mutex.h> #include <linux/iio/consumer.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/interrupt.h> #include <linux/irq.h> -#include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/workqueue.h> diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c index 8b353e26668e..e665e14c6e54 100644 --- a/drivers/iio/adc/hi8435.c +++ b/drivers/iio/adc/hi8435.c @@ -350,7 +350,7 @@ static const struct iio_enum hi8435_sensing_mode = { static const struct iio_chan_spec_ext_info hi8435_ext_info[] = { IIO_ENUM("sensing_mode", IIO_SEPARATE, &hi8435_sensing_mode), - IIO_ENUM_AVAILABLE("sensing_mode", &hi8435_sensing_mode), + IIO_ENUM_AVAILABLE("sensing_mode", IIO_SHARED_BY_TYPE, &hi8435_sensing_mode), {}, }; diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c index 092f8d296527..12f5b8e34c84 100644 --- a/drivers/iio/adc/imx7d_adc.c +++ b/drivers/iio/adc/imx7d_adc.c @@ -522,12 +522,11 @@ static int imx7d_adc_probe(struct platform_device *pdev) imx7d_adc_feature_config(info); - ret = imx7d_adc_enable(&indio_dev->dev); + ret = imx7d_adc_enable(dev); if (ret) return ret; - ret = devm_add_action_or_reset(dev, __imx7d_adc_disable, - &indio_dev->dev); + ret = devm_add_action_or_reset(dev, __imx7d_adc_disable, dev); if (ret) return ret; diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index a4b2ff9e0dd5..4f9992a51e64 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -550,7 +550,7 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev, bool val; int ret; - ret = strtobool((const char *) buf, &val); + ret = strtobool(buf, &val); if (ret) return ret; @@ -842,15 +842,13 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev) dev_dbg(&indio_dev->dev, "Async readout mode: %d\n", chip->allow_async_readout); - task = kthread_create(ina2xx_capture_thread, (void *)indio_dev, - "%s:%d-%uus", indio_dev->name, - iio_device_id(indio_dev), - sampling_us); + task = kthread_run(ina2xx_capture_thread, (void *)indio_dev, + "%s:%d-%uus", indio_dev->name, + iio_device_id(indio_dev), + sampling_us); if (IS_ERR(task)) return PTR_ERR(task); - get_task_struct(task); - wake_up_process(task); chip->task = task; return 0; @@ -862,7 +860,6 @@ static int ina2xx_buffer_disable(struct iio_dev *indio_dev) if (chip->task) { kthread_stop(chip->task); - put_task_struct(chip->task); chip->task = NULL; } @@ -974,7 +971,7 @@ static int ina2xx_probe(struct i2c_client *client, } if (client->dev.of_node) - type = (enum ina2xx_ids)of_device_get_match_data(&client->dev); + type = (uintptr_t)of_device_get_match_data(&client->dev); else type = id->driver_data; chip->config = &ina2xx_config[type]; diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c index ceefa4d793cf..ae9c9384f23e 100644 --- a/drivers/iio/adc/lpc18xx_adc.c +++ b/drivers/iio/adc/lpc18xx_adc.c @@ -157,9 +157,6 @@ static int lpc18xx_adc_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, PTR_ERR(adc->clk), "error getting clock\n"); - rate = clk_get_rate(adc->clk); - clkdiv = DIV_ROUND_UP(rate, LPC18XX_ADC_CLK_TARGET); - adc->vref = devm_regulator_get(&pdev->dev, "vref"); if (IS_ERR(adc->vref)) return dev_err_probe(&pdev->dev, PTR_ERR(adc->vref), @@ -192,6 +189,9 @@ static int lpc18xx_adc_probe(struct platform_device *pdev) if (ret) return ret; + rate = clk_get_rate(adc->clk); + clkdiv = DIV_ROUND_UP(rate, LPC18XX_ADC_CLK_TARGET); + adc->cr_reg = (clkdiv << LPC18XX_ADC_CR_CLKDIV_SHIFT) | LPC18XX_ADC_CR_PDN; writel(adc->cr_reg, adc->base + LPC18XX_ADC_CR); diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c index 052ab23f10b2..01a4275e9c46 100644 --- a/drivers/iio/adc/max9611.c +++ b/drivers/iio/adc/max9611.c @@ -22,7 +22,8 @@ #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/mod_devicetable.h> +#include <linux/property.h> #define DRIVER_NAME "max9611" @@ -513,11 +514,9 @@ static int max9611_probe(struct i2c_client *client, const struct i2c_device_id *id) { const char * const shunt_res_prop = "shunt-resistor-micro-ohms"; - const struct device_node *of_node = client->dev.of_node; - const struct of_device_id *of_id = - of_match_device(max9611_of_table, &client->dev); struct max9611_dev *max9611; struct iio_dev *indio_dev; + struct device *dev = &client->dev; unsigned int of_shunt; int ret; @@ -528,15 +527,14 @@ static int max9611_probe(struct i2c_client *client, i2c_set_clientdata(client, indio_dev); max9611 = iio_priv(indio_dev); - max9611->dev = &client->dev; + max9611->dev = dev; max9611->i2c_client = client; mutex_init(&max9611->lock); - ret = of_property_read_u32(of_node, shunt_res_prop, &of_shunt); + ret = device_property_read_u32(dev, shunt_res_prop, &of_shunt); if (ret) { - dev_err(&client->dev, - "Missing %s property for %pOF node\n", - shunt_res_prop, of_node); + dev_err(dev, "Missing %s property for %pfw node\n", + shunt_res_prop, dev_fwnode(dev)); return ret; } max9611->shunt_resistor_uohm = of_shunt; @@ -545,13 +543,13 @@ static int max9611_probe(struct i2c_client *client, if (ret) return ret; - indio_dev->name = of_id->data; + indio_dev->name = device_get_match_data(dev); indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &indio_info; indio_dev->channels = max9611_channels; indio_dev->num_channels = ARRAY_SIZE(max9611_channels); - return devm_iio_device_register(&client->dev, indio_dev); + return devm_iio_device_register(dev, indio_dev); } static struct i2c_driver max9611_driver = { diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c index e573da5397bb..13535f148c4c 100644 --- a/drivers/iio/adc/mcp3911.c +++ b/drivers/iio/adc/mcp3911.c @@ -10,6 +10,8 @@ #include <linux/err.h> #include <linux/iio/iio.h> #include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> @@ -200,12 +202,13 @@ static const struct iio_info mcp3911_info = { .write_raw = mcp3911_write_raw, }; -static int mcp3911_config(struct mcp3911 *adc, struct device_node *of_node) +static int mcp3911_config(struct mcp3911 *adc) { + struct device *dev = &adc->spi->dev; u32 configreg; int ret; - of_property_read_u32(of_node, "device-addr", &adc->dev_addr); + device_property_read_u32(dev, "device-addr", &adc->dev_addr); if (adc->dev_addr > 3) { dev_err(&adc->spi->dev, "invalid device address (%i). Must be in range 0-3.\n", @@ -289,7 +292,7 @@ static int mcp3911_probe(struct spi_device *spi) } } - ret = mcp3911_config(adc, spi->dev.of_node); + ret = mcp3911_config(adc); if (ret) goto clk_disable; diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c index a48895046408..727ea6c68049 100644 --- a/drivers/iio/adc/rcar-gyroadc.c +++ b/drivers/iio/adc/rcar-gyroadc.c @@ -511,8 +511,7 @@ static int rcar_gyroadc_probe(struct platform_device *pdev) if (ret) return ret; - priv->model = (enum rcar_gyroadc_model) - of_device_get_match_data(&pdev->dev); + priv->model = (uintptr_t)of_device_get_match_data(&pdev->dev); platform_set_drvdata(pdev, indio_dev); diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c index 32fbf57c362f..9d5be52bd948 100644 --- a/drivers/iio/adc/rzg2l_adc.c +++ b/drivers/iio/adc/rzg2l_adc.c @@ -506,10 +506,8 @@ static int rzg2l_adc_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "no irq resource\n"); + if (irq < 0) return irq; - } ret = devm_request_irq(dev, irq, rzg2l_adc_isr, 0, dev_name(dev), adc); diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 8cd258cb2682..897166d9e45c 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -2025,7 +2025,8 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev, if (strlen(name) >= STM32_ADC_CH_SZ) { dev_err(&indio_dev->dev, "Label %s exceeds %d characters\n", name, STM32_ADC_CH_SZ); - return -EINVAL; + ret = -EINVAL; + goto err; } strncpy(adc->chan_name[val], name, STM32_ADC_CH_SZ); ret = stm32_adc_populate_int_ch(indio_dev, name, val); diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c index fba659bfdb40..d2d405388499 100644 --- a/drivers/iio/adc/stmpe-adc.c +++ b/drivers/iio/adc/stmpe-adc.c @@ -256,6 +256,7 @@ static int stmpe_adc_probe(struct platform_device *pdev) struct stmpe_adc *info; struct device_node *np; u32 norequest_mask = 0; + unsigned long bits; int irq_temp, irq_adc; int num_chan = 0; int i = 0; @@ -309,8 +310,8 @@ static int stmpe_adc_probe(struct platform_device *pdev) of_property_read_u32(np, "st,norequest-mask", &norequest_mask); - for_each_clear_bit(i, (unsigned long *) &norequest_mask, - (STMPE_ADC_LAST_NR + 1)) { + bits = norequest_mask; + for_each_clear_bit(i, &bits, (STMPE_ADC_LAST_NR + 1)) { stmpe_adc_voltage_chan(&info->stmpe_adc_iio_channels[num_chan], i); num_chan++; } diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c index 16fc608db36a..bd48b073e720 100644 --- a/drivers/iio/adc/ti-adc081c.c +++ b/drivers/iio/adc/ti-adc081c.c @@ -19,6 +19,7 @@ #include <linux/i2c.h> #include <linux/module.h> #include <linux/mod_devicetable.h> +#include <linux/property.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> @@ -156,13 +157,16 @@ static int adc081c_probe(struct i2c_client *client, { struct iio_dev *iio; struct adc081c *adc; - struct adcxx1c_model *model; + const struct adcxx1c_model *model; int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) return -EOPNOTSUPP; - model = &adcxx1c_models[id->driver_data]; + if (dev_fwnode(&client->dev)) + model = device_get_match_data(&client->dev); + else + model = &adcxx1c_models[id->driver_data]; iio = devm_iio_device_alloc(&client->dev, sizeof(*adc)); if (!iio) @@ -210,10 +214,17 @@ static const struct i2c_device_id adc081c_id[] = { }; MODULE_DEVICE_TABLE(i2c, adc081c_id); +static const struct acpi_device_id adc081c_acpi_match[] = { + /* Used on some AAEON boards */ + { "ADC081C", (kernel_ulong_t)&adcxx1c_models[ADC081C] }, + { } +}; +MODULE_DEVICE_TABLE(acpi, adc081c_acpi_match); + static const struct of_device_id adc081c_of_match[] = { - { .compatible = "ti,adc081c" }, - { .compatible = "ti,adc101c" }, - { .compatible = "ti,adc121c" }, + { .compatible = "ti,adc081c", .data = &adcxx1c_models[ADC081C] }, + { .compatible = "ti,adc101c", .data = &adcxx1c_models[ADC101C] }, + { .compatible = "ti,adc121c", .data = &adcxx1c_models[ADC121C] }, { } }; MODULE_DEVICE_TABLE(of, adc081c_of_match); @@ -222,6 +233,7 @@ static struct i2c_driver adc081c_driver = { .driver = { .name = "adc081c", .of_match_table = adc081c_of_match, + .acpi_match_table = adc081c_acpi_match, }, .probe = adc081c_probe, .id_table = adc081c_id, diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c index fcd5d39dd03e..6eb62b564dae 100644 --- a/drivers/iio/adc/ti-adc12138.c +++ b/drivers/iio/adc/ti-adc12138.c @@ -11,6 +11,7 @@ #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/clk.h> +#include <linux/property.h> #include <linux/spi/spi.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> @@ -239,7 +240,8 @@ static int adc12138_read_raw(struct iio_dev *iio, if (ret) return ret; - *value = sign_extend32(be16_to_cpu(data) >> 3, 12); + *value = sign_extend32(be16_to_cpu(data) >> channel->scan_type.shift, + channel->scan_type.realbits - 1); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: @@ -429,8 +431,8 @@ static int adc12138_probe(struct spi_device *spi) return -EINVAL; } - ret = of_property_read_u32(spi->dev.of_node, "ti,acquisition-time", - &adc->acquisition_time); + ret = device_property_read_u32(&spi->dev, "ti,acquisition-time", + &adc->acquisition_time); if (ret) adc->acquisition_time = 10; @@ -516,8 +518,6 @@ static int adc12138_remove(struct spi_device *spi) return 0; } -#ifdef CONFIG_OF - static const struct of_device_id adc12138_dt_ids[] = { { .compatible = "ti,adc12130", }, { .compatible = "ti,adc12132", }, @@ -526,8 +526,6 @@ static const struct of_device_id adc12138_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, adc12138_dt_ids); -#endif - static const struct spi_device_id adc12138_id[] = { { "adc12130", adc12130 }, { "adc12132", adc12132 }, @@ -539,7 +537,7 @@ MODULE_DEVICE_TABLE(spi, adc12138_id); static struct spi_driver adc12138_driver = { .driver = { .name = "adc12138", - .of_match_table = of_match_ptr(adc12138_dt_ids), + .of_match_table = adc12138_dt_ids, }, .probe = adc12138_probe, .remove = adc12138_remove, diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index b0352e91ac16..068efbce1710 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -464,9 +464,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev, mutex_lock(&data->lock); switch (mask) { - case IIO_CHAN_INFO_RAW: { - int shift = chan->scan_type.shift; - + case IIO_CHAN_INFO_RAW: ret = iio_device_claim_direct_mode(indio_dev); if (ret) break; @@ -487,7 +485,8 @@ static int ads1015_read_raw(struct iio_dev *indio_dev, goto release_direct; } - *val = sign_extend32(*val >> shift, 15 - shift); + *val = sign_extend32(*val >> chan->scan_type.shift, + chan->scan_type.realbits - 1); ret = ads1015_set_power_state(data, false); if (ret < 0) @@ -497,7 +496,6 @@ static int ads1015_read_raw(struct iio_dev *indio_dev, release_direct: iio_device_release_direct_mode(indio_dev); break; - } case IIO_CHAN_INFO_SCALE: idx = data->channel_data[chan->address].pga; *val = ads1015_fullscale_range[idx]; @@ -952,7 +950,7 @@ static int ads1015_probe(struct i2c_client *client, indio_dev->name = ADS1015_DRV_NAME; indio_dev->modes = INDIO_DIRECT_MODE; - chip = (enum chip_ids)device_get_match_data(&client->dev); + chip = (uintptr_t)device_get_match_data(&client->dev); if (chip == ADSXXXX) chip = id->driver_data; switch (chip) { diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c index 17d0da5877a9..767b3b634809 100644 --- a/drivers/iio/adc/ti-ads124s08.c +++ b/drivers/iio/adc/ti-ads124s08.c @@ -8,8 +8,7 @@ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/of.h> -#include <linux/of_gpio.h> +#include <linux/mod_devicetable.h> #include <linux/slab.h> #include <linux/sysfs.h> diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c index 79c803537dc4..2e24717d7f55 100644 --- a/drivers/iio/adc/ti-ads8688.c +++ b/drivers/iio/adc/ti-ads8688.c @@ -281,12 +281,10 @@ static int ads8688_write_reg_range(struct iio_dev *indio_dev, enum ads8688_range range) { unsigned int tmp; - int ret; tmp = ADS8688_PROG_REG_RANGE_CH(chan->channel); - ret = ads8688_prog_write(indio_dev, tmp, range); - return ret; + return ads8688_prog_write(indio_dev, tmp, range); } static int ads8688_write_raw(struct iio_dev *indio_dev, diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c new file mode 100644 index 000000000000..8343c5f74121 --- /dev/null +++ b/drivers/iio/adc/xilinx-ams.c @@ -0,0 +1,1451 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Xilinx AMS driver + * + * Copyright (C) 2021 Xilinx, Inc. + * + * Manish Narani <mnarani@xilinx.com> + * Rajnikant Bhojani <rajnikant.bhojani@xilinx.com> + */ + +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/overflow.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/slab.h> + +#include <linux/iio/events.h> +#include <linux/iio/iio.h> + +/* AMS registers definitions */ +#define AMS_ISR_0 0x010 +#define AMS_ISR_1 0x014 +#define AMS_IER_0 0x020 +#define AMS_IER_1 0x024 +#define AMS_IDR_0 0x028 +#define AMS_IDR_1 0x02C +#define AMS_PS_CSTS 0x040 +#define AMS_PL_CSTS 0x044 + +#define AMS_VCC_PSPLL0 0x060 +#define AMS_VCC_PSPLL3 0x06C +#define AMS_VCCINT 0x078 +#define AMS_VCCBRAM 0x07C +#define AMS_VCCAUX 0x080 +#define AMS_PSDDRPLL 0x084 +#define AMS_PSINTFPDDR 0x09C + +#define AMS_VCC_PSPLL0_CH 48 +#define AMS_VCC_PSPLL3_CH 51 +#define AMS_VCCINT_CH 54 +#define AMS_VCCBRAM_CH 55 +#define AMS_VCCAUX_CH 56 +#define AMS_PSDDRPLL_CH 57 +#define AMS_PSINTFPDDR_CH 63 + +#define AMS_REG_CONFIG0 0x100 +#define AMS_REG_CONFIG1 0x104 +#define AMS_REG_CONFIG3 0x10C +#define AMS_REG_CONFIG4 0x110 +#define AMS_REG_SEQ_CH0 0x120 +#define AMS_REG_SEQ_CH1 0x124 +#define AMS_REG_SEQ_CH2 0x118 + +#define AMS_VUSER0_MASK BIT(0) +#define AMS_VUSER1_MASK BIT(1) +#define AMS_VUSER2_MASK BIT(2) +#define AMS_VUSER3_MASK BIT(3) + +#define AMS_TEMP 0x000 +#define AMS_SUPPLY1 0x004 +#define AMS_SUPPLY2 0x008 +#define AMS_VP_VN 0x00C +#define AMS_VREFP 0x010 +#define AMS_VREFN 0x014 +#define AMS_SUPPLY3 0x018 +#define AMS_SUPPLY4 0x034 +#define AMS_SUPPLY5 0x038 +#define AMS_SUPPLY6 0x03C +#define AMS_SUPPLY7 0x200 +#define AMS_SUPPLY8 0x204 +#define AMS_SUPPLY9 0x208 +#define AMS_SUPPLY10 0x20C +#define AMS_VCCAMS 0x210 +#define AMS_TEMP_REMOTE 0x214 + +#define AMS_REG_VAUX(x) (0x40 + 4 * (x)) + +#define AMS_PS_RESET_VALUE 0xFFFF +#define AMS_PL_RESET_VALUE 0xFFFF + +#define AMS_CONF0_CHANNEL_NUM_MASK GENMASK(6, 0) + +#define AMS_CONF1_SEQ_MASK GENMASK(15, 12) +#define AMS_CONF1_SEQ_DEFAULT FIELD_PREP(AMS_CONF1_SEQ_MASK, 0) +#define AMS_CONF1_SEQ_CONTINUOUS FIELD_PREP(AMS_CONF1_SEQ_MASK, 1) +#define AMS_CONF1_SEQ_SINGLE_CHANNEL FIELD_PREP(AMS_CONF1_SEQ_MASK, 2) + +#define AMS_REG_SEQ0_MASK GENMASK(15, 0) +#define AMS_REG_SEQ2_MASK GENMASK(21, 16) +#define AMS_REG_SEQ1_MASK GENMASK_ULL(37, 22) + +#define AMS_PS_SEQ_MASK GENMASK(21, 0) +#define AMS_PL_SEQ_MASK GENMASK_ULL(59, 22) + +#define AMS_ALARM_TEMP 0x140 +#define AMS_ALARM_SUPPLY1 0x144 +#define AMS_ALARM_SUPPLY2 0x148 +#define AMS_ALARM_SUPPLY3 0x160 +#define AMS_ALARM_SUPPLY4 0x164 +#define AMS_ALARM_SUPPLY5 0x168 +#define AMS_ALARM_SUPPLY6 0x16C +#define AMS_ALARM_SUPPLY7 0x180 +#define AMS_ALARM_SUPPLY8 0x184 +#define AMS_ALARM_SUPPLY9 0x188 +#define AMS_ALARM_SUPPLY10 0x18C +#define AMS_ALARM_VCCAMS 0x190 +#define AMS_ALARM_TEMP_REMOTE 0x194 +#define AMS_ALARM_THRESHOLD_OFF_10 0x10 +#define AMS_ALARM_THRESHOLD_OFF_20 0x20 + +#define AMS_ALARM_THR_DIRECT_MASK BIT(1) +#define AMS_ALARM_THR_MIN 0x0000 +#define AMS_ALARM_THR_MAX (BIT(16) - 1) + +#define AMS_ALARM_MASK GENMASK_ULL(63, 0) +#define AMS_NO_OF_ALARMS 32 +#define AMS_PL_ALARM_START 16 +#define AMS_PL_ALARM_MASK GENMASK(31, 16) +#define AMS_ISR0_ALARM_MASK GENMASK(31, 0) +#define AMS_ISR1_ALARM_MASK (GENMASK(31, 29) | GENMASK(4, 0)) +#define AMS_ISR1_EOC_MASK BIT(3) +#define AMS_ISR1_INTR_MASK GENMASK_ULL(63, 32) +#define AMS_ISR0_ALARM_2_TO_0_MASK GENMASK(2, 0) +#define AMS_ISR0_ALARM_6_TO_3_MASK GENMASK(6, 3) +#define AMS_ISR0_ALARM_12_TO_7_MASK GENMASK(13, 8) +#define AMS_CONF1_ALARM_2_TO_0_MASK GENMASK(3, 1) +#define AMS_CONF1_ALARM_6_TO_3_MASK GENMASK(11, 8) +#define AMS_CONF1_ALARM_12_TO_7_MASK GENMASK(5, 0) +#define AMS_REGCFG1_ALARM_MASK \ + (AMS_CONF1_ALARM_2_TO_0_MASK | AMS_CONF1_ALARM_6_TO_3_MASK | BIT(0)) +#define AMS_REGCFG3_ALARM_MASK AMS_CONF1_ALARM_12_TO_7_MASK + +#define AMS_PS_CSTS_PS_READY (BIT(27) | BIT(16)) +#define AMS_PL_CSTS_ACCESS_MASK BIT(1) + +#define AMS_PL_MAX_FIXED_CHANNEL 10 +#define AMS_PL_MAX_EXT_CHANNEL 20 + +#define AMS_INIT_POLL_TIME_US 200 +#define AMS_INIT_TIMEOUT_US 10000 +#define AMS_UNMASK_TIMEOUT_MS 500 + +/* + * Following scale and offset value is derived from + * UG580 (v1.7) December 20, 2016 + */ +#define AMS_SUPPLY_SCALE_1VOLT_mV 1000 +#define AMS_SUPPLY_SCALE_3VOLT_mV 3000 +#define AMS_SUPPLY_SCALE_6VOLT_mV 6000 +#define AMS_SUPPLY_SCALE_DIV_BIT 16 + +#define AMS_TEMP_SCALE 509314 +#define AMS_TEMP_SCALE_DIV_BIT 16 +#define AMS_TEMP_OFFSET -((280230LL << 16) / 509314) + +enum ams_alarm_bit { + AMS_ALARM_BIT_TEMP = 0, + AMS_ALARM_BIT_SUPPLY1 = 1, + AMS_ALARM_BIT_SUPPLY2 = 2, + AMS_ALARM_BIT_SUPPLY3 = 3, + AMS_ALARM_BIT_SUPPLY4 = 4, + AMS_ALARM_BIT_SUPPLY5 = 5, + AMS_ALARM_BIT_SUPPLY6 = 6, + AMS_ALARM_BIT_RESERVED = 7, + AMS_ALARM_BIT_SUPPLY7 = 8, + AMS_ALARM_BIT_SUPPLY8 = 9, + AMS_ALARM_BIT_SUPPLY9 = 10, + AMS_ALARM_BIT_SUPPLY10 = 11, + AMS_ALARM_BIT_VCCAMS = 12, + AMS_ALARM_BIT_TEMP_REMOTE = 13, +}; + +enum ams_seq { + AMS_SEQ_VCC_PSPLL = 0, + AMS_SEQ_VCC_PSBATT = 1, + AMS_SEQ_VCCINT = 2, + AMS_SEQ_VCCBRAM = 3, + AMS_SEQ_VCCAUX = 4, + AMS_SEQ_PSDDRPLL = 5, + AMS_SEQ_INTDDR = 6, +}; + +enum ams_ps_pl_seq { + AMS_SEQ_CALIB = 0, + AMS_SEQ_RSVD_1 = 1, + AMS_SEQ_RSVD_2 = 2, + AMS_SEQ_TEST = 3, + AMS_SEQ_RSVD_4 = 4, + AMS_SEQ_SUPPLY4 = 5, + AMS_SEQ_SUPPLY5 = 6, + AMS_SEQ_SUPPLY6 = 7, + AMS_SEQ_TEMP = 8, + AMS_SEQ_SUPPLY2 = 9, + AMS_SEQ_SUPPLY1 = 10, + AMS_SEQ_VP_VN = 11, + AMS_SEQ_VREFP = 12, + AMS_SEQ_VREFN = 13, + AMS_SEQ_SUPPLY3 = 14, + AMS_SEQ_CURRENT_MON = 15, + AMS_SEQ_SUPPLY7 = 16, + AMS_SEQ_SUPPLY8 = 17, + AMS_SEQ_SUPPLY9 = 18, + AMS_SEQ_SUPPLY10 = 19, + AMS_SEQ_VCCAMS = 20, + AMS_SEQ_TEMP_REMOTE = 21, + AMS_SEQ_MAX = 22 +}; + +#define AMS_PS_SEQ_MAX AMS_SEQ_MAX +#define AMS_SEQ(x) (AMS_SEQ_MAX + (x)) +#define PS_SEQ(x) (x) +#define PL_SEQ(x) (AMS_PS_SEQ_MAX + (x)) +#define AMS_CTRL_SEQ_BASE (AMS_PS_SEQ_MAX * 3) + +#define AMS_CHAN_TEMP(_scan_index, _addr) { \ + .type = IIO_TEMP, \ + .indexed = 1, \ + .address = (_addr), \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ + .event_spec = ams_temp_events, \ + .scan_index = _scan_index, \ + .num_event_specs = ARRAY_SIZE(ams_temp_events), \ +} + +#define AMS_CHAN_VOLTAGE(_scan_index, _addr, _alarm) { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .address = (_addr), \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE), \ + .event_spec = (_alarm) ? ams_voltage_events : NULL, \ + .scan_index = _scan_index, \ + .num_event_specs = (_alarm) ? ARRAY_SIZE(ams_voltage_events) : 0, \ +} + +#define AMS_PS_CHAN_TEMP(_scan_index, _addr) \ + AMS_CHAN_TEMP(PS_SEQ(_scan_index), _addr) +#define AMS_PS_CHAN_VOLTAGE(_scan_index, _addr) \ + AMS_CHAN_VOLTAGE(PS_SEQ(_scan_index), _addr, true) + +#define AMS_PL_CHAN_TEMP(_scan_index, _addr) \ + AMS_CHAN_TEMP(PL_SEQ(_scan_index), _addr) +#define AMS_PL_CHAN_VOLTAGE(_scan_index, _addr, _alarm) \ + AMS_CHAN_VOLTAGE(PL_SEQ(_scan_index), _addr, _alarm) +#define AMS_PL_AUX_CHAN_VOLTAGE(_auxno) \ + AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(_auxno)), AMS_REG_VAUX(_auxno), false) +#define AMS_CTRL_CHAN_VOLTAGE(_scan_index, _addr) \ + AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(AMS_SEQ(_scan_index))), _addr, false) + +/** + * struct ams - This structure contains necessary state for xilinx-ams to operate + * @base: physical base address of device + * @ps_base: physical base address of PS device + * @pl_base: physical base address of PL device + * @clk: clocks associated with the device + * @dev: pointer to device struct + * @lock: to handle multiple user interaction + * @intr_lock: to protect interrupt mask values + * @alarm_mask: alarm configuration + * @current_masked_alarm: currently masked due to alarm + * @intr_mask: interrupt configuration + * @ams_unmask_work: re-enables event once the event condition disappears + * + */ +struct ams { + void __iomem *base; + void __iomem *ps_base; + void __iomem *pl_base; + struct clk *clk; + struct device *dev; + struct mutex lock; + spinlock_t intr_lock; + unsigned int alarm_mask; + unsigned int current_masked_alarm; + u64 intr_mask; + struct delayed_work ams_unmask_work; +}; + +static inline void ams_ps_update_reg(struct ams *ams, unsigned int offset, + u32 mask, u32 data) +{ + u32 val, regval; + + val = readl(ams->ps_base + offset); + regval = (val & ~mask) | (data & mask); + writel(regval, ams->ps_base + offset); +} + +static inline void ams_pl_update_reg(struct ams *ams, unsigned int offset, + u32 mask, u32 data) +{ + u32 val, regval; + + val = readl(ams->pl_base + offset); + regval = (val & ~mask) | (data & mask); + writel(regval, ams->pl_base + offset); +} + +static void ams_update_intrmask(struct ams *ams, u64 mask, u64 val) +{ + u32 regval; + + ams->intr_mask = (ams->intr_mask & ~mask) | (val & mask); + + regval = ~(ams->intr_mask | ams->current_masked_alarm); + writel(regval, ams->base + AMS_IER_0); + + regval = ~(FIELD_GET(AMS_ISR1_INTR_MASK, ams->intr_mask)); + writel(regval, ams->base + AMS_IER_1); + + regval = ams->intr_mask | ams->current_masked_alarm; + writel(regval, ams->base + AMS_IDR_0); + + regval = FIELD_GET(AMS_ISR1_INTR_MASK, ams->intr_mask); + writel(regval, ams->base + AMS_IDR_1); +} + +static void ams_disable_all_alarms(struct ams *ams) +{ + /* disable PS module alarm */ + if (ams->ps_base) { + ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK, + AMS_REGCFG1_ALARM_MASK); + ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, + AMS_REGCFG3_ALARM_MASK); + } + + /* disable PL module alarm */ + if (ams->pl_base) { + ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK, + AMS_REGCFG1_ALARM_MASK); + ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, + AMS_REGCFG3_ALARM_MASK); + } +} + +static void ams_update_ps_alarm(struct ams *ams, unsigned long alarm_mask) +{ + u32 cfg; + u32 val; + + val = FIELD_GET(AMS_ISR0_ALARM_2_TO_0_MASK, alarm_mask); + cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_2_TO_0_MASK, val)); + + val = FIELD_GET(AMS_ISR0_ALARM_6_TO_3_MASK, alarm_mask); + cfg &= ~(FIELD_PREP(AMS_CONF1_ALARM_6_TO_3_MASK, val)); + + ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK, cfg); + + val = FIELD_GET(AMS_ISR0_ALARM_12_TO_7_MASK, alarm_mask); + cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_12_TO_7_MASK, val)); + ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg); +} + +static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask) +{ + unsigned long pl_alarm_mask; + u32 cfg; + u32 val; + + pl_alarm_mask = FIELD_GET(AMS_PL_ALARM_MASK, alarm_mask); + + val = FIELD_GET(AMS_ISR0_ALARM_2_TO_0_MASK, pl_alarm_mask); + cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_2_TO_0_MASK, val)); + + val = FIELD_GET(AMS_ISR0_ALARM_6_TO_3_MASK, pl_alarm_mask); + cfg &= ~(FIELD_PREP(AMS_CONF1_ALARM_6_TO_3_MASK, val)); + + ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK, cfg); + + val = FIELD_GET(AMS_ISR0_ALARM_12_TO_7_MASK, pl_alarm_mask); + cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_12_TO_7_MASK, val)); + ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg); +} + +static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask) +{ + unsigned long flags; + + if (ams->ps_base) + ams_update_ps_alarm(ams, alarm_mask); + + if (ams->pl_base) + ams_update_pl_alarm(ams, alarm_mask); + + spin_lock_irqsave(&ams->intr_lock, flags); + ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask); + spin_unlock_irqrestore(&ams->intr_lock, flags); +} + +static void ams_enable_channel_sequence(struct iio_dev *indio_dev) +{ + struct ams *ams = iio_priv(indio_dev); + unsigned long long scan_mask; + int i; + u32 regval; + + /* + * Enable channel sequence. First 22 bits of scan_mask represent + * PS channels, and next remaining bits represent PL channels. + */ + + /* Run calibration of PS & PL as part of the sequence */ + scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX); + for (i = 0; i < indio_dev->num_channels; i++) + scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index); + + if (ams->ps_base) { + /* put sysmon in a soft reset to change the sequence */ + ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK, + AMS_CONF1_SEQ_DEFAULT); + + /* configure basic channels */ + regval = FIELD_GET(AMS_REG_SEQ0_MASK, scan_mask); + writel(regval, ams->ps_base + AMS_REG_SEQ_CH0); + + regval = FIELD_GET(AMS_REG_SEQ2_MASK, scan_mask); + writel(regval, ams->ps_base + AMS_REG_SEQ_CH2); + + /* set continuous sequence mode */ + ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK, + AMS_CONF1_SEQ_CONTINUOUS); + } + + if (ams->pl_base) { + /* put sysmon in a soft reset to change the sequence */ + ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK, + AMS_CONF1_SEQ_DEFAULT); + + /* configure basic channels */ + scan_mask = FIELD_GET(AMS_PL_SEQ_MASK, scan_mask); + + regval = FIELD_GET(AMS_REG_SEQ0_MASK, scan_mask); + writel(regval, ams->pl_base + AMS_REG_SEQ_CH0); + + regval = FIELD_GET(AMS_REG_SEQ1_MASK, scan_mask); + writel(regval, ams->pl_base + AMS_REG_SEQ_CH1); + + regval = FIELD_GET(AMS_REG_SEQ2_MASK, scan_mask); + writel(regval, ams->pl_base + AMS_REG_SEQ_CH2); + + /* set continuous sequence mode */ + ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK, + AMS_CONF1_SEQ_CONTINUOUS); + } +} + +static int ams_init_device(struct ams *ams) +{ + u32 expect = AMS_PS_CSTS_PS_READY; + u32 reg, value; + int ret; + + /* reset AMS */ + if (ams->ps_base) { + writel(AMS_PS_RESET_VALUE, ams->ps_base + AMS_VP_VN); + + ret = readl_poll_timeout(ams->base + AMS_PS_CSTS, reg, (reg & expect), + AMS_INIT_POLL_TIME_US, AMS_INIT_TIMEOUT_US); + if (ret) + return ret; + + /* put sysmon in a default state */ + ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK, + AMS_CONF1_SEQ_DEFAULT); + } + + if (ams->pl_base) { + value = readl(ams->base + AMS_PL_CSTS); + if (value == 0) + return 0; + + writel(AMS_PL_RESET_VALUE, ams->pl_base + AMS_VP_VN); + + /* put sysmon in a default state */ + ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK, + AMS_CONF1_SEQ_DEFAULT); + } + + ams_disable_all_alarms(ams); + + /* Disable interrupt */ + ams_update_intrmask(ams, AMS_ALARM_MASK, AMS_ALARM_MASK); + + /* Clear any pending interrupt */ + writel(AMS_ISR0_ALARM_MASK, ams->base + AMS_ISR_0); + writel(AMS_ISR1_ALARM_MASK, ams->base + AMS_ISR_1); + + return 0; +} + +static int ams_enable_single_channel(struct ams *ams, unsigned int offset) +{ + u8 channel_num; + + switch (offset) { + case AMS_VCC_PSPLL0: + channel_num = AMS_VCC_PSPLL0_CH; + break; + case AMS_VCC_PSPLL3: + channel_num = AMS_VCC_PSPLL3_CH; + break; + case AMS_VCCINT: + channel_num = AMS_VCCINT_CH; + break; + case AMS_VCCBRAM: + channel_num = AMS_VCCBRAM_CH; + break; + case AMS_VCCAUX: + channel_num = AMS_VCCAUX_CH; + break; + case AMS_PSDDRPLL: + channel_num = AMS_PSDDRPLL_CH; + break; + case AMS_PSINTFPDDR: + channel_num = AMS_PSINTFPDDR_CH; + break; + default: + return -EINVAL; + } + + /* set single channel, sequencer off mode */ + ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK, + AMS_CONF1_SEQ_SINGLE_CHANNEL); + + /* write the channel number */ + ams_ps_update_reg(ams, AMS_REG_CONFIG0, AMS_CONF0_CHANNEL_NUM_MASK, + channel_num); + + return 0; +} + +static int ams_read_vcc_reg(struct ams *ams, unsigned int offset, u32 *data) +{ + u32 expect = AMS_ISR1_EOC_MASK; + u32 reg; + int ret; + + ret = ams_enable_single_channel(ams, offset); + if (ret) + return ret; + + ret = readl_poll_timeout(ams->base + AMS_ISR_1, reg, (reg & expect), + AMS_INIT_POLL_TIME_US, AMS_INIT_TIMEOUT_US); + if (ret) + return ret; + + *data = readl(ams->base + offset); + + return 0; +} + +static int ams_get_ps_scale(int address) +{ + int val; + + switch (address) { + case AMS_SUPPLY1: + case AMS_SUPPLY2: + case AMS_SUPPLY3: + case AMS_SUPPLY4: + case AMS_SUPPLY9: + case AMS_SUPPLY10: + case AMS_VCCAMS: + val = AMS_SUPPLY_SCALE_3VOLT_mV; + break; + case AMS_SUPPLY5: + case AMS_SUPPLY6: + case AMS_SUPPLY7: + case AMS_SUPPLY8: + val = AMS_SUPPLY_SCALE_6VOLT_mV; + break; + default: + val = AMS_SUPPLY_SCALE_1VOLT_mV; + break; + } + + return val; +} + +static int ams_get_pl_scale(struct ams *ams, int address) +{ + int val, regval; + + switch (address) { + case AMS_SUPPLY1: + case AMS_SUPPLY2: + case AMS_SUPPLY3: + case AMS_SUPPLY4: + case AMS_SUPPLY5: + case AMS_SUPPLY6: + case AMS_VCCAMS: + case AMS_VREFP: + case AMS_VREFN: + val = AMS_SUPPLY_SCALE_3VOLT_mV; + break; + case AMS_SUPPLY7: + regval = readl(ams->pl_base + AMS_REG_CONFIG4); + if (FIELD_GET(AMS_VUSER0_MASK, regval)) + val = AMS_SUPPLY_SCALE_6VOLT_mV; + else + val = AMS_SUPPLY_SCALE_3VOLT_mV; + break; + case AMS_SUPPLY8: + regval = readl(ams->pl_base + AMS_REG_CONFIG4); + if (FIELD_GET(AMS_VUSER1_MASK, regval)) + val = AMS_SUPPLY_SCALE_6VOLT_mV; + else + val = AMS_SUPPLY_SCALE_3VOLT_mV; + break; + case AMS_SUPPLY9: + regval = readl(ams->pl_base + AMS_REG_CONFIG4); + if (FIELD_GET(AMS_VUSER2_MASK, regval)) + val = AMS_SUPPLY_SCALE_6VOLT_mV; + else + val = AMS_SUPPLY_SCALE_3VOLT_mV; + break; + case AMS_SUPPLY10: + regval = readl(ams->pl_base + AMS_REG_CONFIG4); + if (FIELD_GET(AMS_VUSER3_MASK, regval)) + val = AMS_SUPPLY_SCALE_6VOLT_mV; + else + val = AMS_SUPPLY_SCALE_3VOLT_mV; + break; + case AMS_VP_VN: + case AMS_REG_VAUX(0) ... AMS_REG_VAUX(15): + val = AMS_SUPPLY_SCALE_1VOLT_mV; + break; + default: + val = AMS_SUPPLY_SCALE_1VOLT_mV; + break; + } + + return val; +} + +static int ams_get_ctrl_scale(int address) +{ + int val; + + switch (address) { + case AMS_VCC_PSPLL0: + case AMS_VCC_PSPLL3: + case AMS_VCCINT: + case AMS_VCCBRAM: + case AMS_VCCAUX: + case AMS_PSDDRPLL: + case AMS_PSINTFPDDR: + val = AMS_SUPPLY_SCALE_3VOLT_mV; + break; + default: + val = AMS_SUPPLY_SCALE_1VOLT_mV; + break; + } + + return val; +} + +static int ams_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct ams *ams = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + mutex_lock(&ams->lock); + if (chan->scan_index >= AMS_CTRL_SEQ_BASE) { + ret = ams_read_vcc_reg(ams, chan->address, val); + if (ret) + goto unlock_mutex; + ams_enable_channel_sequence(indio_dev); + } else if (chan->scan_index >= AMS_PS_SEQ_MAX) + *val = readl(ams->pl_base + chan->address); + else + *val = readl(ams->ps_base + chan->address); + + ret = IIO_VAL_INT; +unlock_mutex: + mutex_unlock(&ams->lock); + return ret; + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_VOLTAGE: + if (chan->scan_index < AMS_PS_SEQ_MAX) + *val = ams_get_ps_scale(chan->address); + else if (chan->scan_index >= AMS_PS_SEQ_MAX && + chan->scan_index < AMS_CTRL_SEQ_BASE) + *val = ams_get_pl_scale(ams, chan->address); + else + *val = ams_get_ctrl_scale(chan->address); + + *val2 = AMS_SUPPLY_SCALE_DIV_BIT; + return IIO_VAL_FRACTIONAL_LOG2; + case IIO_TEMP: + *val = AMS_TEMP_SCALE; + *val2 = AMS_TEMP_SCALE_DIV_BIT; + return IIO_VAL_FRACTIONAL_LOG2; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_OFFSET: + /* Only the temperature channel has an offset */ + *val = AMS_TEMP_OFFSET; + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int ams_get_alarm_offset(int scan_index, enum iio_event_direction dir) +{ + int offset; + + if (scan_index >= AMS_PS_SEQ_MAX) + scan_index -= AMS_PS_SEQ_MAX; + + if (dir == IIO_EV_DIR_FALLING) { + if (scan_index < AMS_SEQ_SUPPLY7) + offset = AMS_ALARM_THRESHOLD_OFF_10; + else + offset = AMS_ALARM_THRESHOLD_OFF_20; + } else { + offset = 0; + } + + switch (scan_index) { + case AMS_SEQ_TEMP: + return AMS_ALARM_TEMP + offset; + case AMS_SEQ_SUPPLY1: + return AMS_ALARM_SUPPLY1 + offset; + case AMS_SEQ_SUPPLY2: + return AMS_ALARM_SUPPLY2 + offset; + case AMS_SEQ_SUPPLY3: + return AMS_ALARM_SUPPLY3 + offset; + case AMS_SEQ_SUPPLY4: + return AMS_ALARM_SUPPLY4 + offset; + case AMS_SEQ_SUPPLY5: + return AMS_ALARM_SUPPLY5 + offset; + case AMS_SEQ_SUPPLY6: + return AMS_ALARM_SUPPLY6 + offset; + case AMS_SEQ_SUPPLY7: + return AMS_ALARM_SUPPLY7 + offset; + case AMS_SEQ_SUPPLY8: + return AMS_ALARM_SUPPLY8 + offset; + case AMS_SEQ_SUPPLY9: + return AMS_ALARM_SUPPLY9 + offset; + case AMS_SEQ_SUPPLY10: + return AMS_ALARM_SUPPLY10 + offset; + case AMS_SEQ_VCCAMS: + return AMS_ALARM_VCCAMS + offset; + case AMS_SEQ_TEMP_REMOTE: + return AMS_ALARM_TEMP_REMOTE + offset; + default: + return 0; + } +} + +static const struct iio_chan_spec *ams_event_to_channel(struct iio_dev *dev, + u32 event) +{ + int scan_index = 0, i; + + if (event >= AMS_PL_ALARM_START) { + event -= AMS_PL_ALARM_START; + scan_index = AMS_PS_SEQ_MAX; + } + + switch (event) { + case AMS_ALARM_BIT_TEMP: + scan_index += AMS_SEQ_TEMP; + break; + case AMS_ALARM_BIT_SUPPLY1: + scan_index += AMS_SEQ_SUPPLY1; + break; + case AMS_ALARM_BIT_SUPPLY2: + scan_index += AMS_SEQ_SUPPLY2; + break; + case AMS_ALARM_BIT_SUPPLY3: + scan_index += AMS_SEQ_SUPPLY3; + break; + case AMS_ALARM_BIT_SUPPLY4: + scan_index += AMS_SEQ_SUPPLY4; + break; + case AMS_ALARM_BIT_SUPPLY5: + scan_index += AMS_SEQ_SUPPLY5; + break; + case AMS_ALARM_BIT_SUPPLY6: + scan_index += AMS_SEQ_SUPPLY6; + break; + case AMS_ALARM_BIT_SUPPLY7: + scan_index += AMS_SEQ_SUPPLY7; + break; + case AMS_ALARM_BIT_SUPPLY8: + scan_index += AMS_SEQ_SUPPLY8; + break; + case AMS_ALARM_BIT_SUPPLY9: + scan_index += AMS_SEQ_SUPPLY9; + break; + case AMS_ALARM_BIT_SUPPLY10: + scan_index += AMS_SEQ_SUPPLY10; + break; + case AMS_ALARM_BIT_VCCAMS: + scan_index += AMS_SEQ_VCCAMS; + break; + case AMS_ALARM_BIT_TEMP_REMOTE: + scan_index += AMS_SEQ_TEMP_REMOTE; + break; + default: + break; + } + + for (i = 0; i < dev->num_channels; i++) + if (dev->channels[i].scan_index == scan_index) + break; + + return &dev->channels[i]; +} + +static int ams_get_alarm_mask(int scan_index) +{ + int bit = 0; + + if (scan_index >= AMS_PS_SEQ_MAX) { + bit = AMS_PL_ALARM_START; + scan_index -= AMS_PS_SEQ_MAX; + } + + switch (scan_index) { + case AMS_SEQ_TEMP: + return BIT(AMS_ALARM_BIT_TEMP + bit); + case AMS_SEQ_SUPPLY1: + return BIT(AMS_ALARM_BIT_SUPPLY1 + bit); + case AMS_SEQ_SUPPLY2: + return BIT(AMS_ALARM_BIT_SUPPLY2 + bit); + case AMS_SEQ_SUPPLY3: + return BIT(AMS_ALARM_BIT_SUPPLY3 + bit); + case AMS_SEQ_SUPPLY4: + return BIT(AMS_ALARM_BIT_SUPPLY4 + bit); + case AMS_SEQ_SUPPLY5: + return BIT(AMS_ALARM_BIT_SUPPLY5 + bit); + case AMS_SEQ_SUPPLY6: + return BIT(AMS_ALARM_BIT_SUPPLY6 + bit); + case AMS_SEQ_SUPPLY7: + return BIT(AMS_ALARM_BIT_SUPPLY7 + bit); + case AMS_SEQ_SUPPLY8: + return BIT(AMS_ALARM_BIT_SUPPLY8 + bit); + case AMS_SEQ_SUPPLY9: + return BIT(AMS_ALARM_BIT_SUPPLY9 + bit); + case AMS_SEQ_SUPPLY10: + return BIT(AMS_ALARM_BIT_SUPPLY10 + bit); + case AMS_SEQ_VCCAMS: + return BIT(AMS_ALARM_BIT_VCCAMS + bit); + case AMS_SEQ_TEMP_REMOTE: + return BIT(AMS_ALARM_BIT_TEMP_REMOTE + bit); + default: + return 0; + } +} + +static int ams_read_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir) +{ + struct ams *ams = iio_priv(indio_dev); + + return !!(ams->alarm_mask & ams_get_alarm_mask(chan->scan_index)); +} + +static int ams_write_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + int state) +{ + struct ams *ams = iio_priv(indio_dev); + unsigned int alarm; + + alarm = ams_get_alarm_mask(chan->scan_index); + + mutex_lock(&ams->lock); + + if (state) + ams->alarm_mask |= alarm; + else + ams->alarm_mask &= ~alarm; + + ams_update_alarm(ams, ams->alarm_mask); + + mutex_unlock(&ams->lock); + + return 0; +} + +static int ams_read_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, int *val, int *val2) +{ + struct ams *ams = iio_priv(indio_dev); + unsigned int offset = ams_get_alarm_offset(chan->scan_index, dir); + + mutex_lock(&ams->lock); + + if (chan->scan_index >= AMS_PS_SEQ_MAX) + *val = readl(ams->pl_base + offset); + else + *val = readl(ams->ps_base + offset); + + mutex_unlock(&ams->lock); + + return IIO_VAL_INT; +} + +static int ams_write_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, int val, int val2) +{ + struct ams *ams = iio_priv(indio_dev); + unsigned int offset; + + mutex_lock(&ams->lock); + + /* Set temperature channel threshold to direct threshold */ + if (chan->type == IIO_TEMP) { + offset = ams_get_alarm_offset(chan->scan_index, IIO_EV_DIR_FALLING); + + if (chan->scan_index >= AMS_PS_SEQ_MAX) + ams_pl_update_reg(ams, offset, + AMS_ALARM_THR_DIRECT_MASK, + AMS_ALARM_THR_DIRECT_MASK); + else + ams_ps_update_reg(ams, offset, + AMS_ALARM_THR_DIRECT_MASK, + AMS_ALARM_THR_DIRECT_MASK); + } + + offset = ams_get_alarm_offset(chan->scan_index, dir); + if (chan->scan_index >= AMS_PS_SEQ_MAX) + writel(val, ams->pl_base + offset); + else + writel(val, ams->ps_base + offset); + + mutex_unlock(&ams->lock); + + return 0; +} + +static void ams_handle_event(struct iio_dev *indio_dev, u32 event) +{ + const struct iio_chan_spec *chan; + + chan = ams_event_to_channel(indio_dev, event); + + if (chan->type == IIO_TEMP) { + /* + * The temperature channel only supports over-temperature + * events. + */ + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(chan->type, chan->channel, + IIO_EV_TYPE_THRESH, + IIO_EV_DIR_RISING), + iio_get_time_ns(indio_dev)); + } else { + /* + * For other channels we don't know whether it is a upper or + * lower threshold event. Userspace will have to check the + * channel value if it wants to know. + */ + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(chan->type, chan->channel, + IIO_EV_TYPE_THRESH, + IIO_EV_DIR_EITHER), + iio_get_time_ns(indio_dev)); + } +} + +static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events) +{ + unsigned int bit; + + for_each_set_bit(bit, &events, AMS_NO_OF_ALARMS) + ams_handle_event(indio_dev, bit); +} + +/** + * ams_unmask_worker - ams alarm interrupt unmask worker + * @work: work to be done + * + * The ZynqMP threshold interrupts are level sensitive. Since we can't make the + * threshold condition go way from within the interrupt handler, this means as + * soon as a threshold condition is present we would enter the interrupt handler + * again and again. To work around this we mask all active threshold interrupts + * in the interrupt handler and start a timer. In this timer we poll the + * interrupt status and only if the interrupt is inactive we unmask it again. + */ +static void ams_unmask_worker(struct work_struct *work) +{ + struct ams *ams = container_of(work, struct ams, ams_unmask_work.work); + unsigned int status, unmask; + + spin_lock_irq(&ams->intr_lock); + + status = readl(ams->base + AMS_ISR_0); + + /* Clear those bits which are not active anymore */ + unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm; + + /* Clear status of disabled alarm */ + unmask |= ams->intr_mask; + + ams->current_masked_alarm &= status; + + /* Also clear those which are masked out anyway */ + ams->current_masked_alarm &= ~ams->intr_mask; + + /* Clear the interrupts before we unmask them */ + writel(unmask, ams->base + AMS_ISR_0); + + ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK); + + spin_unlock_irq(&ams->intr_lock); + + /* If still pending some alarm re-trigger the timer */ + if (ams->current_masked_alarm) + schedule_delayed_work(&ams->ams_unmask_work, + msecs_to_jiffies(AMS_UNMASK_TIMEOUT_MS)); +} + +static irqreturn_t ams_irq(int irq, void *data) +{ + struct iio_dev *indio_dev = data; + struct ams *ams = iio_priv(indio_dev); + u32 isr0; + + spin_lock(&ams->intr_lock); + + isr0 = readl(ams->base + AMS_ISR_0); + + /* Only process alarms that are not masked */ + isr0 &= ~((ams->intr_mask & AMS_ISR0_ALARM_MASK) | ams->current_masked_alarm); + if (!isr0) { + spin_unlock(&ams->intr_lock); + return IRQ_NONE; + } + + /* Clear interrupt */ + writel(isr0, ams->base + AMS_ISR_0); + + /* Mask the alarm interrupts until cleared */ + ams->current_masked_alarm |= isr0; + ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK); + + ams_handle_events(indio_dev, isr0); + + schedule_delayed_work(&ams->ams_unmask_work, + msecs_to_jiffies(AMS_UNMASK_TIMEOUT_MS)); + + spin_unlock(&ams->intr_lock); + + return IRQ_HANDLED; +} + +static const struct iio_event_spec ams_temp_events[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_RISING, + .mask_separate = BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_VALUE), + }, +}; + +static const struct iio_event_spec ams_voltage_events[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_RISING, + .mask_separate = BIT(IIO_EV_INFO_VALUE), + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_FALLING, + .mask_separate = BIT(IIO_EV_INFO_VALUE), + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_EITHER, + .mask_separate = BIT(IIO_EV_INFO_ENABLE), + }, +}; + +static const struct iio_chan_spec ams_ps_channels[] = { + AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP), + AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP_REMOTE, AMS_TEMP_REMOTE), + AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1), + AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2), + AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3), + AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4), + AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5), + AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6), + AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7), + AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8), + AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9), + AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10), + AMS_PS_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS), +}; + +static const struct iio_chan_spec ams_pl_channels[] = { + AMS_PL_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, true), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, true), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFP, AMS_VREFP, false), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFN, AMS_VREFN, false), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, true), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, true), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, true), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, true), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, true), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VP_VN, AMS_VP_VN, false), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, true), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, true), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, true), + AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, true), + AMS_PL_AUX_CHAN_VOLTAGE(0), + AMS_PL_AUX_CHAN_VOLTAGE(1), + AMS_PL_AUX_CHAN_VOLTAGE(2), + AMS_PL_AUX_CHAN_VOLTAGE(3), + AMS_PL_AUX_CHAN_VOLTAGE(4), + AMS_PL_AUX_CHAN_VOLTAGE(5), + AMS_PL_AUX_CHAN_VOLTAGE(6), + AMS_PL_AUX_CHAN_VOLTAGE(7), + AMS_PL_AUX_CHAN_VOLTAGE(8), + AMS_PL_AUX_CHAN_VOLTAGE(9), + AMS_PL_AUX_CHAN_VOLTAGE(10), + AMS_PL_AUX_CHAN_VOLTAGE(11), + AMS_PL_AUX_CHAN_VOLTAGE(12), + AMS_PL_AUX_CHAN_VOLTAGE(13), + AMS_PL_AUX_CHAN_VOLTAGE(14), + AMS_PL_AUX_CHAN_VOLTAGE(15), +}; + +static const struct iio_chan_spec ams_ctrl_channels[] = { + AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSPLL, AMS_VCC_PSPLL0), + AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSBATT, AMS_VCC_PSPLL3), + AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCINT, AMS_VCCINT), + AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCBRAM, AMS_VCCBRAM), + AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCAUX, AMS_VCCAUX), + AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_PSDDRPLL, AMS_PSDDRPLL), + AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_INTDDR, AMS_PSINTFPDDR), +}; + +static int ams_get_ext_chan(struct fwnode_handle *chan_node, + struct iio_chan_spec *channels, int num_channels) +{ + struct iio_chan_spec *chan; + struct fwnode_handle *child; + unsigned int reg, ext_chan; + int ret; + + fwnode_for_each_child_node(chan_node, child) { + ret = fwnode_property_read_u32(child, "reg", ®); + if (ret || reg > AMS_PL_MAX_EXT_CHANNEL + 30) + continue; + + chan = &channels[num_channels]; + ext_chan = reg + AMS_PL_MAX_FIXED_CHANNEL - 30; + memcpy(chan, &ams_pl_channels[ext_chan], sizeof(*channels)); + + if (fwnode_property_read_bool(child, "xlnx,bipolar")) + chan->scan_type.sign = 's'; + + num_channels++; + } + + return num_channels; +} + +static void ams_iounmap_ps(void *data) +{ + struct ams *ams = data; + + iounmap(ams->ps_base); +} + +static void ams_iounmap_pl(void *data) +{ + struct ams *ams = data; + + iounmap(ams->pl_base); +} + +static int ams_init_module(struct iio_dev *indio_dev, + struct fwnode_handle *fwnode, + struct iio_chan_spec *channels) +{ + struct device *dev = indio_dev->dev.parent; + struct ams *ams = iio_priv(indio_dev); + int num_channels = 0; + int ret; + + if (fwnode_property_match_string(fwnode, "compatible", + "xlnx,zynqmp-ams-ps") == 0) { + ams->ps_base = fwnode_iomap(fwnode, 0); + if (!ams->ps_base) + return -ENXIO; + ret = devm_add_action_or_reset(dev, ams_iounmap_ps, ams); + if (ret < 0) + return ret; + + /* add PS channels to iio device channels */ + memcpy(channels, ams_ps_channels, sizeof(ams_ps_channels)); + } else if (fwnode_property_match_string(fwnode, "compatible", + "xlnx,zynqmp-ams-pl") == 0) { + ams->pl_base = fwnode_iomap(fwnode, 0); + if (!ams->pl_base) + return -ENXIO; + + ret = devm_add_action_or_reset(dev, ams_iounmap_pl, ams); + if (ret < 0) + return ret; + + /* Copy only first 10 fix channels */ + memcpy(channels, ams_pl_channels, AMS_PL_MAX_FIXED_CHANNEL * sizeof(*channels)); + num_channels += AMS_PL_MAX_FIXED_CHANNEL; + num_channels = ams_get_ext_chan(fwnode, channels, + num_channels); + } else if (fwnode_property_match_string(fwnode, "compatible", + "xlnx,zynqmp-ams") == 0) { + /* add AMS channels to iio device channels */ + memcpy(channels, ams_ctrl_channels, sizeof(ams_ctrl_channels)); + num_channels += ARRAY_SIZE(ams_ctrl_channels); + } else { + return -EINVAL; + } + + return num_channels; +} + +static int ams_parse_firmware(struct iio_dev *indio_dev) +{ + struct ams *ams = iio_priv(indio_dev); + struct iio_chan_spec *ams_channels, *dev_channels; + struct device *dev = indio_dev->dev.parent; + struct fwnode_handle *child = NULL; + struct fwnode_handle *fwnode = dev_fwnode(dev); + size_t ams_size, dev_size; + int ret, ch_cnt = 0, i, rising_off, falling_off; + unsigned int num_channels = 0; + + ams_size = ARRAY_SIZE(ams_ps_channels) + ARRAY_SIZE(ams_pl_channels) + + ARRAY_SIZE(ams_ctrl_channels); + + /* Initialize buffer for channel specification */ + ams_channels = devm_kcalloc(dev, ams_size, sizeof(*ams_channels), GFP_KERNEL); + if (!ams_channels) + return -ENOMEM; + + if (fwnode_device_is_available(fwnode)) { + ret = ams_init_module(indio_dev, fwnode, ams_channels); + if (ret < 0) + return ret; + + num_channels += ret; + } + + fwnode_for_each_child_node(fwnode, child) { + if (fwnode_device_is_available(child)) { + ret = ams_init_module(indio_dev, child, ams_channels + num_channels); + if (ret < 0) { + fwnode_handle_put(child); + return ret; + } + + num_channels += ret; + } + } + + for (i = 0; i < num_channels; i++) { + ams_channels[i].channel = ch_cnt++; + + if (ams_channels[i].scan_index < AMS_CTRL_SEQ_BASE) { + /* set threshold to max and min for each channel */ + falling_off = + ams_get_alarm_offset(ams_channels[i].scan_index, + IIO_EV_DIR_FALLING); + rising_off = + ams_get_alarm_offset(ams_channels[i].scan_index, + IIO_EV_DIR_RISING); + if (ams_channels[i].scan_index >= AMS_PS_SEQ_MAX) { + writel(AMS_ALARM_THR_MIN, + ams->pl_base + falling_off); + writel(AMS_ALARM_THR_MAX, + ams->pl_base + rising_off); + } else { + writel(AMS_ALARM_THR_MIN, + ams->ps_base + falling_off); + writel(AMS_ALARM_THR_MAX, + ams->ps_base + rising_off); + } + } + } + + dev_size = array_size(sizeof(*dev_channels), num_channels); + if (dev_size == SIZE_MAX) + return -ENOMEM; + + dev_channels = devm_krealloc(dev, ams_channels, dev_size, GFP_KERNEL); + if (!dev_channels) + ret = -ENOMEM; + + indio_dev->channels = dev_channels; + indio_dev->num_channels = num_channels; + + return 0; +} + +static const struct iio_info iio_ams_info = { + .read_raw = &ams_read_raw, + .read_event_config = &ams_read_event_config, + .write_event_config = &ams_write_event_config, + .read_event_value = &ams_read_event_value, + .write_event_value = &ams_write_event_value, +}; + +static const struct of_device_id ams_of_match_table[] = { + { .compatible = "xlnx,zynqmp-ams" }, + { } +}; +MODULE_DEVICE_TABLE(of, ams_of_match_table); + +static void ams_clk_disable_unprepare(void *data) +{ + clk_disable_unprepare(data); +} + +static void ams_cancel_delayed_work(void *data) +{ + cancel_delayed_work(data); +} + +static int ams_probe(struct platform_device *pdev) +{ + struct iio_dev *indio_dev; + struct ams *ams; + int ret; + int irq; + + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*ams)); + if (!indio_dev) + return -ENOMEM; + + ams = iio_priv(indio_dev); + mutex_init(&ams->lock); + spin_lock_init(&ams->intr_lock); + + indio_dev->name = "xilinx-ams"; + + indio_dev->info = &iio_ams_info; + indio_dev->modes = INDIO_DIRECT_MODE; + + ams->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ams->base)) + return PTR_ERR(ams->base); + + ams->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(ams->clk)) + return PTR_ERR(ams->clk); + + ret = clk_prepare_enable(ams->clk); + if (ret < 0) + return ret; + + ret = devm_add_action_or_reset(&pdev->dev, ams_clk_disable_unprepare, ams->clk); + if (ret < 0) + return ret; + + INIT_DELAYED_WORK(&ams->ams_unmask_work, ams_unmask_worker); + ret = devm_add_action_or_reset(&pdev->dev, ams_cancel_delayed_work, + &ams->ams_unmask_work); + if (ret < 0) + return ret; + + ret = ams_parse_firmware(indio_dev); + if (ret) + return dev_err_probe(&pdev->dev, ret, "failure in parsing DT\n"); + + ret = ams_init_device(ams); + if (ret) + return dev_err_probe(&pdev->dev, ret, "failed to initialize AMS\n"); + + ams_enable_channel_sequence(indio_dev); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return ret; + + ret = devm_request_irq(&pdev->dev, irq, &ams_irq, 0, "ams-irq", + indio_dev); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "failed to register interrupt\n"); + + platform_set_drvdata(pdev, indio_dev); + + return devm_iio_device_register(&pdev->dev, indio_dev); +} + +static int __maybe_unused ams_suspend(struct device *dev) +{ + struct ams *ams = iio_priv(dev_get_drvdata(dev)); + + clk_disable_unprepare(ams->clk); + + return 0; +} + +static int __maybe_unused ams_resume(struct device *dev) +{ + struct ams *ams = iio_priv(dev_get_drvdata(dev)); + + return clk_prepare_enable(ams->clk); +} + +static SIMPLE_DEV_PM_OPS(ams_pm_ops, ams_suspend, ams_resume); + +static struct platform_driver ams_driver = { + .probe = ams_probe, + .driver = { + .name = "xilinx-ams", + .pm = &ams_pm_ops, + .of_match_table = ams_of_match_table, + }, +}; +module_platform_driver(ams_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Xilinx, Inc."); diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 83bea5ef765d..823c8e5f9809 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -107,6 +107,7 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500; #define XADC_AXI_INT_ALARM_MASK 0x3c0f #define XADC_FLAGS_BUFFERED BIT(0) +#define XADC_FLAGS_IRQ_OPTIONAL BIT(1) /* * The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does @@ -562,7 +563,7 @@ static const struct xadc_ops xadc_7s_axi_ops = { .get_dclk_rate = xadc_axi_get_dclk, .update_alarm = xadc_axi_update_alarm, .interrupt_handler = xadc_axi_interrupt_handler, - .flags = XADC_FLAGS_BUFFERED, + .flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL, .type = XADC_TYPE_S7, }; @@ -573,7 +574,7 @@ static const struct xadc_ops xadc_us_axi_ops = { .get_dclk_rate = xadc_axi_get_dclk, .update_alarm = xadc_axi_update_alarm, .interrupt_handler = xadc_axi_interrupt_handler, - .flags = XADC_FLAGS_BUFFERED, + .flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL, .type = XADC_TYPE_US, }; @@ -943,7 +944,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev, *val = 1000; break; } - *val2 = chan->scan_type.realbits; + *val2 = bits; return IIO_VAL_FRACTIONAL_LOG2; case IIO_TEMP: /* Temp in C = (val * 503.975) / 2**bits - 273.15 */ @@ -1182,7 +1183,7 @@ static const struct of_device_id xadc_of_match_table[] = { MODULE_DEVICE_TABLE(of, xadc_of_match_table); static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np, - unsigned int *conf) + unsigned int *conf, int irq) { struct device *dev = indio_dev->dev.parent; struct xadc *xadc = iio_priv(indio_dev); @@ -1195,6 +1196,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np, u32 ext_mux_chan; u32 reg; int ret; + int i; *conf = 0; @@ -1273,6 +1275,14 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np, } of_node_put(chan_node); + /* No IRQ => no events */ + if (irq <= 0) { + for (i = 0; i < num_channels; i++) { + channels[i].event_spec = NULL; + channels[i].num_event_specs = 0; + } + } + indio_dev->num_channels = num_channels; indio_dev->channels = devm_krealloc(dev, channels, sizeof(*channels) * num_channels, @@ -1307,6 +1317,7 @@ static int xadc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct of_device_id *id; + const struct xadc_ops *ops; struct iio_dev *indio_dev; unsigned int bipolar_mask; unsigned int conf0; @@ -1322,9 +1333,12 @@ static int xadc_probe(struct platform_device *pdev) if (!id) return -EINVAL; - irq = platform_get_irq(pdev, 0); - if (irq <= 0) - return -ENXIO; + ops = id->data; + + irq = platform_get_irq_optional(pdev, 0); + if (irq < 0 && + (irq != -ENXIO || !(ops->flags & XADC_FLAGS_IRQ_OPTIONAL))) + return irq; indio_dev = devm_iio_device_alloc(dev, sizeof(*xadc)); if (!indio_dev) @@ -1345,7 +1359,7 @@ static int xadc_probe(struct platform_device *pdev) indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &xadc_info; - ret = xadc_parse_dt(indio_dev, dev->of_node, &conf0); + ret = xadc_parse_dt(indio_dev, dev->of_node, &conf0, irq); if (ret) return ret; @@ -1357,14 +1371,16 @@ static int xadc_probe(struct platform_device *pdev) if (ret) return ret; - xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst"); - if (IS_ERR(xadc->convst_trigger)) - return PTR_ERR(xadc->convst_trigger); + if (irq > 0) { + xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst"); + if (IS_ERR(xadc->convst_trigger)) + return PTR_ERR(xadc->convst_trigger); - xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev, - "samplerate"); - if (IS_ERR(xadc->samplerate_trigger)) - return PTR_ERR(xadc->samplerate_trigger); + xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev, + "samplerate"); + if (IS_ERR(xadc->samplerate_trigger)) + return PTR_ERR(xadc->samplerate_trigger); + } } xadc->clk = devm_clk_get(dev, NULL); @@ -1396,15 +1412,17 @@ static int xadc_probe(struct platform_device *pdev) } } - ret = devm_request_irq(dev, irq, xadc->ops->interrupt_handler, 0, - dev_name(dev), indio_dev); - if (ret) - return ret; + if (irq > 0) { + ret = devm_request_irq(dev, irq, xadc->ops->interrupt_handler, + 0, dev_name(dev), indio_dev); + if (ret) + return ret; - ret = devm_add_action_or_reset(dev, xadc_cancel_delayed_work, - &xadc->zynq_unmask_work); - if (ret) - return ret; + ret = devm_add_action_or_reset(dev, xadc_cancel_delayed_work, + &xadc->zynq_unmask_work); + if (ret) + return ret; + } ret = xadc->ops->setup(pdev, indio_dev, irq); if (ret) diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig new file mode 100644 index 000000000000..138492362f20 --- /dev/null +++ b/drivers/iio/addac/Kconfig @@ -0,0 +1,20 @@ +# +# ADC DAC drivers +# +# When adding new entries keep the list in alphabetical order + +menu "Analog to digital and digital to analog converters" + +config AD74413R + tristate "Analog Devices AD74412R/AD74413R driver" + depends on GPIOLIB && SPI + select REGMAP_SPI + select CRC8 + help + Say yes here to build support for Analog Devices AD74412R/AD74413R + quad-channel software configurable input/output solution. + + To compile this driver as a module, choose M here: the + module will be called ad74413r. + +endmenu diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile new file mode 100644 index 000000000000..cfd4bbe64ad3 --- /dev/null +++ b/drivers/iio/addac/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for industrial I/O ADDAC drivers +# + +# When adding new entries keep the list in alphabetical order +obj-$(CONFIG_AD74413R) += ad74413r.o diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c new file mode 100644 index 000000000000..5271073bb74e --- /dev/null +++ b/drivers/iio/addac/ad74413r.c @@ -0,0 +1,1475 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Analog Devices, Inc. + * Author: Cosmin Tanislav <cosmin.tanislav@analog.com> + */ + +#include <asm/unaligned.h> +#include <linux/bitfield.h> +#include <linux/crc8.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gpio/driver.h> +#include <linux/iio/buffer.h> +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> +#include <linux/iio/trigger.h> +#include <linux/iio/trigger_consumer.h> +#include <linux/iio/triggered_buffer.h> +#include <linux/interrupt.h> +#include <linux/mod_devicetable.h> +#include <linux/property.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +#include <dt-bindings/iio/addac/adi,ad74413r.h> + +#define AD74413R_CRC_POLYNOMIAL 0x7 +DECLARE_CRC8_TABLE(ad74413r_crc8_table); + +#define AD74413R_CHANNEL_MAX 4 + +#define AD74413R_FRAME_SIZE 4 + +struct ad74413r_chip_info { + const char *name; + bool hart_support; +}; + +struct ad74413r_channel_config { + u32 func; + bool gpo_comparator; + bool initialized; +}; + +struct ad74413r_channels { + struct iio_chan_spec *channels; + unsigned int num_channels; +}; + +struct ad74413r_state { + struct ad74413r_channel_config channel_configs[AD74413R_CHANNEL_MAX]; + unsigned int gpo_gpio_offsets[AD74413R_CHANNEL_MAX]; + unsigned int comp_gpio_offsets[AD74413R_CHANNEL_MAX]; + struct gpio_chip gpo_gpiochip; + struct gpio_chip comp_gpiochip; + struct completion adc_data_completion; + unsigned int num_gpo_gpios; + unsigned int num_comparator_gpios; + u32 sense_resistor_ohms; + + /* + * Synchronize consecutive operations when doing a one-shot + * conversion and when updating the ADC samples SPI message. + */ + struct mutex lock; + + const struct ad74413r_chip_info *chip_info; + struct spi_device *spi; + struct regulator *refin_reg; + struct regmap *regmap; + struct device *dev; + struct iio_trigger *trig; + + size_t adc_active_channels; + struct spi_message adc_samples_msg; + struct spi_transfer adc_samples_xfer[AD74413R_CHANNEL_MAX + 1]; + + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + struct { + u8 rx_buf[AD74413R_FRAME_SIZE * AD74413R_CHANNEL_MAX]; + s64 timestamp; + } adc_samples_buf ____cacheline_aligned; + + u8 adc_samples_tx_buf[AD74413R_FRAME_SIZE * AD74413R_CHANNEL_MAX]; + u8 reg_tx_buf[AD74413R_FRAME_SIZE]; + u8 reg_rx_buf[AD74413R_FRAME_SIZE]; +}; + +#define AD74413R_REG_NOP 0x00 + +#define AD74413R_REG_CH_FUNC_SETUP_X(x) (0x01 + (x)) +#define AD74413R_CH_FUNC_SETUP_MASK GENMASK(3, 0) + +#define AD74413R_REG_ADC_CONFIG_X(x) (0x05 + (x)) +#define AD74413R_ADC_CONFIG_RANGE_MASK GENMASK(7, 5) +#define AD74413R_ADC_CONFIG_REJECTION_MASK GENMASK(4, 3) +#define AD74413R_ADC_RANGE_10V 0b000 +#define AD74413R_ADC_RANGE_2P5V_EXT_POW 0b001 +#define AD74413R_ADC_RANGE_2P5V_INT_POW 0b010 +#define AD74413R_ADC_RANGE_5V_BI_DIR 0b011 +#define AD74413R_ADC_REJECTION_50_60 0b00 +#define AD74413R_ADC_REJECTION_NONE 0b01 +#define AD74413R_ADC_REJECTION_50_60_HART 0b10 +#define AD74413R_ADC_REJECTION_HART 0b11 + +#define AD74413R_REG_DIN_CONFIG_X(x) (0x09 + (x)) +#define AD74413R_DIN_DEBOUNCE_MASK GENMASK(4, 0) +#define AD74413R_DIN_DEBOUNCE_LEN BIT(5) + +#define AD74413R_REG_DAC_CODE_X(x) (0x16 + (x)) +#define AD74413R_DAC_CODE_MAX GENMASK(12, 0) +#define AD74413R_DAC_VOLTAGE_MAX 11000 + +#define AD74413R_REG_GPO_PAR_DATA 0x0d +#define AD74413R_REG_GPO_CONFIG_X(x) (0x0e + (x)) +#define AD74413R_GPO_CONFIG_DATA_MASK BIT(3) +#define AD74413R_GPO_CONFIG_SELECT_MASK GENMASK(2, 0) +#define AD74413R_GPO_CONFIG_100K_PULL_DOWN 0b000 +#define AD74413R_GPO_CONFIG_LOGIC 0b001 +#define AD74413R_GPO_CONFIG_LOGIC_PARALLEL 0b010 +#define AD74413R_GPO_CONFIG_COMPARATOR 0b011 +#define AD74413R_GPO_CONFIG_HIGH_IMPEDANCE 0b100 + +#define AD74413R_REG_ADC_CONV_CTRL 0x23 +#define AD74413R_CONV_SEQ_MASK GENMASK(9, 8) +#define AD74413R_CONV_SEQ_ON 0b00 +#define AD74413R_CONV_SEQ_SINGLE 0b01 +#define AD74413R_CONV_SEQ_CONTINUOUS 0b10 +#define AD74413R_CONV_SEQ_OFF 0b11 +#define AD74413R_CH_EN_MASK(x) BIT(x) + +#define AD74413R_REG_DIN_COMP_OUT 0x25 +#define AD74413R_DIN_COMP_OUT_SHIFT_X(x) x + +#define AD74413R_REG_ADC_RESULT_X(x) (0x26 + (x)) +#define AD74413R_ADC_RESULT_MAX GENMASK(15, 0) + +#define AD74413R_REG_READ_SELECT 0x41 + +#define AD74413R_REG_CMD_KEY 0x44 +#define AD74413R_CMD_KEY_LDAC 0x953a +#define AD74413R_CMD_KEY_RESET1 0x15fa +#define AD74413R_CMD_KEY_RESET2 0xaf51 + +static const int ad74413r_adc_sampling_rates[] = { + 20, 4800, +}; + +static const int ad74413r_adc_sampling_rates_hart[] = { + 10, 20, 1200, 4800, +}; + +static int ad74413r_crc(u8 *buf) +{ + return crc8(ad74413r_crc8_table, buf, 3, 0); +} + +static void ad74413r_format_reg_write(u8 reg, u16 val, u8 *buf) +{ + buf[0] = reg; + put_unaligned_be16(val, &buf[1]); + buf[3] = ad74413r_crc(buf); +} + +static int ad74413r_reg_write(void *context, unsigned int reg, unsigned int val) +{ + struct ad74413r_state *st = context; + + ad74413r_format_reg_write(reg, val, st->reg_tx_buf); + + return spi_write(st->spi, st->reg_tx_buf, AD74413R_FRAME_SIZE); +} + +static int ad74413r_crc_check(struct ad74413r_state *st, u8 *buf) +{ + u8 expected_crc = ad74413r_crc(buf); + + if (buf[3] != expected_crc) { + dev_err(st->dev, "Bad CRC %02x for %02x%02x%02x\n", + buf[3], buf[0], buf[1], buf[2]); + return -EINVAL; + } + + return 0; +} + +static int ad74413r_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + struct ad74413r_state *st = context; + struct spi_transfer reg_read_xfer[] = { + { + .tx_buf = st->reg_tx_buf, + .len = AD74413R_FRAME_SIZE, + .cs_change = 1, + }, + { + .rx_buf = st->reg_rx_buf, + .len = AD74413R_FRAME_SIZE, + }, + }; + int ret; + + ad74413r_format_reg_write(AD74413R_REG_READ_SELECT, reg, + st->reg_tx_buf); + + ret = spi_sync_transfer(st->spi, reg_read_xfer, + ARRAY_SIZE(reg_read_xfer)); + if (ret) + return ret; + + ret = ad74413r_crc_check(st, st->reg_rx_buf); + if (ret) + return ret; + + *val = get_unaligned_be16(&st->reg_rx_buf[1]); + + return 0; +} + +static const struct regmap_config ad74413r_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .reg_read = ad74413r_reg_read, + .reg_write = ad74413r_reg_write, +}; + +static int ad74413r_set_gpo_config(struct ad74413r_state *st, + unsigned int offset, u8 mode) +{ + return regmap_update_bits(st->regmap, AD74413R_REG_GPO_CONFIG_X(offset), + AD74413R_GPO_CONFIG_SELECT_MASK, mode); +} + +static const unsigned int ad74413r_debounce_map[AD74413R_DIN_DEBOUNCE_LEN] = { + 0, 13, 18, 24, 32, 42, 56, 75, + 100, 130, 180, 240, 320, 420, 560, 750, + 1000, 1300, 1800, 2400, 3200, 4200, 5600, 7500, + 10000, 13000, 18000, 24000, 32000, 42000, 56000, 75000, +}; + +static int ad74413r_set_comp_debounce(struct ad74413r_state *st, + unsigned int offset, + unsigned int debounce) +{ + unsigned int val = AD74413R_DIN_DEBOUNCE_LEN - 1; + unsigned int i; + + for (i = 0; i < AD74413R_DIN_DEBOUNCE_LEN; i++) + if (debounce <= ad74413r_debounce_map[i]) { + val = i; + break; + } + + return regmap_update_bits(st->regmap, + AD74413R_REG_DIN_CONFIG_X(offset), + AD74413R_DIN_DEBOUNCE_MASK, + val); +} + +static void ad74413r_gpio_set(struct gpio_chip *chip, + unsigned int offset, int val) +{ + struct ad74413r_state *st = gpiochip_get_data(chip); + unsigned int real_offset = st->gpo_gpio_offsets[offset]; + int ret; + + ret = ad74413r_set_gpo_config(st, real_offset, + AD74413R_GPO_CONFIG_LOGIC); + if (ret) + return; + + regmap_update_bits(st->regmap, AD74413R_REG_GPO_CONFIG_X(real_offset), + AD74413R_GPO_CONFIG_DATA_MASK, + val ? AD74413R_GPO_CONFIG_DATA_MASK : 0); +} + +static void ad74413r_gpio_set_multiple(struct gpio_chip *chip, + unsigned long *mask, + unsigned long *bits) +{ + struct ad74413r_state *st = gpiochip_get_data(chip); + unsigned long real_mask = 0; + unsigned long real_bits = 0; + unsigned int offset = 0; + int ret; + + for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) { + unsigned int real_offset = st->gpo_gpio_offsets[offset]; + + ret = ad74413r_set_gpo_config(st, real_offset, + AD74413R_GPO_CONFIG_LOGIC_PARALLEL); + if (ret) + return; + + real_mask |= BIT(real_offset); + if (*bits & offset) + real_bits |= BIT(real_offset); + } + + regmap_update_bits(st->regmap, AD74413R_REG_GPO_PAR_DATA, + real_mask, real_bits); +} + +static int ad74413r_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct ad74413r_state *st = gpiochip_get_data(chip); + unsigned int real_offset = st->comp_gpio_offsets[offset]; + unsigned int status; + int ret; + + ret = regmap_read(st->regmap, AD74413R_REG_DIN_COMP_OUT, &status); + if (ret) + return ret; + + status &= AD74413R_DIN_COMP_OUT_SHIFT_X(real_offset); + + return status ? 1 : 0; +} + +static int ad74413r_gpio_get_multiple(struct gpio_chip *chip, + unsigned long *mask, + unsigned long *bits) +{ + struct ad74413r_state *st = gpiochip_get_data(chip); + unsigned int offset = 0; + unsigned int val; + int ret; + + ret = regmap_read(st->regmap, AD74413R_REG_DIN_COMP_OUT, &val); + if (ret) + return ret; + + for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) { + unsigned int real_offset = st->comp_gpio_offsets[offset]; + + if (val & BIT(real_offset)) + *bits |= offset; + } + + return ret; +} + +static int ad74413r_gpio_get_gpo_direction(struct gpio_chip *chip, + unsigned int offset) +{ + return GPIO_LINE_DIRECTION_OUT; +} + +static int ad74413r_gpio_get_comp_direction(struct gpio_chip *chip, + unsigned int offset) +{ + return GPIO_LINE_DIRECTION_IN; +} + +static int ad74413r_gpio_set_gpo_config(struct gpio_chip *chip, + unsigned int offset, + unsigned long config) +{ + struct ad74413r_state *st = gpiochip_get_data(chip); + unsigned int real_offset = st->gpo_gpio_offsets[offset]; + + switch (pinconf_to_config_param(config)) { + case PIN_CONFIG_BIAS_PULL_DOWN: + return ad74413r_set_gpo_config(st, real_offset, + AD74413R_GPO_CONFIG_100K_PULL_DOWN); + case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: + return ad74413r_set_gpo_config(st, real_offset, + AD74413R_GPO_CONFIG_HIGH_IMPEDANCE); + default: + return -ENOTSUPP; + } +} + +static int ad74413r_gpio_set_comp_config(struct gpio_chip *chip, + unsigned int offset, + unsigned long config) +{ + struct ad74413r_state *st = gpiochip_get_data(chip); + unsigned int real_offset = st->comp_gpio_offsets[offset]; + + switch (pinconf_to_config_param(config)) { + case PIN_CONFIG_INPUT_DEBOUNCE: + return ad74413r_set_comp_debounce(st, real_offset, + pinconf_to_config_argument(config)); + default: + return -ENOTSUPP; + } +} + +static int ad74413r_reset(struct ad74413r_state *st) +{ + int ret; + + ret = regmap_write(st->regmap, AD74413R_REG_CMD_KEY, + AD74413R_CMD_KEY_RESET1); + if (ret) + return ret; + + return regmap_write(st->regmap, AD74413R_REG_CMD_KEY, + AD74413R_CMD_KEY_RESET2); +} + +static int ad74413r_set_channel_dac_code(struct ad74413r_state *st, + unsigned int channel, int dac_code) +{ + struct reg_sequence reg_seq[2] = { + { AD74413R_REG_DAC_CODE_X(channel), dac_code }, + { AD74413R_REG_CMD_KEY, AD74413R_CMD_KEY_LDAC }, + }; + + return regmap_multi_reg_write(st->regmap, reg_seq, 2); +} + +static int ad74413r_set_channel_function(struct ad74413r_state *st, + unsigned int channel, u8 func) +{ + return regmap_update_bits(st->regmap, + AD74413R_REG_CH_FUNC_SETUP_X(channel), + AD74413R_CH_FUNC_SETUP_MASK, func); +} + +static int ad74413r_set_adc_conv_seq(struct ad74413r_state *st, + unsigned int status) +{ + int ret; + + /* + * These bits do not clear when a conversion completes. + * To enable a subsequent conversion, repeat the write. + */ + ret = regmap_write_bits(st->regmap, AD74413R_REG_ADC_CONV_CTRL, + AD74413R_CONV_SEQ_MASK, + FIELD_PREP(AD74413R_CONV_SEQ_MASK, status)); + if (ret) + return ret; + + /* + * Wait 100us before starting conversions. + */ + usleep_range(100, 120); + + return 0; +} + +static int ad74413r_set_adc_channel_enable(struct ad74413r_state *st, + unsigned int channel, + bool status) +{ + return regmap_update_bits(st->regmap, AD74413R_REG_ADC_CONV_CTRL, + AD74413R_CH_EN_MASK(channel), + status ? AD74413R_CH_EN_MASK(channel) : 0); +} + +static int ad74413r_get_adc_range(struct ad74413r_state *st, + unsigned int channel, + unsigned int *val) +{ + int ret; + + ret = regmap_read(st->regmap, AD74413R_REG_ADC_CONFIG_X(channel), val); + if (ret) + return ret; + + *val = FIELD_GET(AD74413R_ADC_CONFIG_RANGE_MASK, *val); + + return 0; +} + +static int ad74413r_get_adc_rejection(struct ad74413r_state *st, + unsigned int channel, + unsigned int *val) +{ + int ret; + + ret = regmap_read(st->regmap, AD74413R_REG_ADC_CONFIG_X(channel), val); + if (ret) + return ret; + + *val = FIELD_GET(AD74413R_ADC_CONFIG_REJECTION_MASK, *val); + + return 0; +} + +static int ad74413r_set_adc_rejection(struct ad74413r_state *st, + unsigned int channel, + unsigned int val) +{ + return regmap_update_bits(st->regmap, + AD74413R_REG_ADC_CONFIG_X(channel), + AD74413R_ADC_CONFIG_REJECTION_MASK, + FIELD_PREP(AD74413R_ADC_CONFIG_REJECTION_MASK, + val)); +} + +static int ad74413r_rejection_to_rate(struct ad74413r_state *st, + unsigned int rej, int *val) +{ + switch (rej) { + case AD74413R_ADC_REJECTION_50_60: + *val = 20; + return 0; + case AD74413R_ADC_REJECTION_NONE: + *val = 4800; + return 0; + case AD74413R_ADC_REJECTION_50_60_HART: + *val = 10; + return 0; + case AD74413R_ADC_REJECTION_HART: + *val = 1200; + return 0; + default: + dev_err(st->dev, "ADC rejection invalid\n"); + return -EINVAL; + } +} + +static int ad74413r_rate_to_rejection(struct ad74413r_state *st, + int rate, unsigned int *val) +{ + switch (rate) { + case 20: + *val = AD74413R_ADC_REJECTION_50_60; + return 0; + case 4800: + *val = AD74413R_ADC_REJECTION_NONE; + return 0; + case 10: + *val = AD74413R_ADC_REJECTION_50_60_HART; + return 0; + case 1200: + *val = AD74413R_ADC_REJECTION_HART; + return 0; + default: + dev_err(st->dev, "ADC rate invalid\n"); + return -EINVAL; + } +} + +static int ad74413r_range_to_voltage_range(struct ad74413r_state *st, + unsigned int range, int *val) +{ + switch (range) { + case AD74413R_ADC_RANGE_10V: + *val = 10000; + return 0; + case AD74413R_ADC_RANGE_2P5V_EXT_POW: + case AD74413R_ADC_RANGE_2P5V_INT_POW: + *val = 2500; + return 0; + case AD74413R_ADC_RANGE_5V_BI_DIR: + *val = 5000; + return 0; + default: + dev_err(st->dev, "ADC range invalid\n"); + return -EINVAL; + } +} + +static int ad74413r_range_to_voltage_offset(struct ad74413r_state *st, + unsigned int range, int *val) +{ + switch (range) { + case AD74413R_ADC_RANGE_10V: + case AD74413R_ADC_RANGE_2P5V_EXT_POW: + *val = 0; + return 0; + case AD74413R_ADC_RANGE_2P5V_INT_POW: + case AD74413R_ADC_RANGE_5V_BI_DIR: + *val = -2500; + return 0; + default: + dev_err(st->dev, "ADC range invalid\n"); + return -EINVAL; + } +} + +static int ad74413r_range_to_voltage_offset_raw(struct ad74413r_state *st, + unsigned int range, int *val) +{ + switch (range) { + case AD74413R_ADC_RANGE_10V: + case AD74413R_ADC_RANGE_2P5V_EXT_POW: + *val = 0; + return 0; + case AD74413R_ADC_RANGE_2P5V_INT_POW: + *val = -((int)AD74413R_ADC_RESULT_MAX); + return 0; + case AD74413R_ADC_RANGE_5V_BI_DIR: + *val = -((int)AD74413R_ADC_RESULT_MAX / 2); + return 0; + default: + dev_err(st->dev, "ADC range invalid\n"); + return -EINVAL; + } +} + +static int ad74413r_get_output_voltage_scale(struct ad74413r_state *st, + int *val, int *val2) +{ + *val = AD74413R_DAC_VOLTAGE_MAX; + *val2 = AD74413R_DAC_CODE_MAX; + + return IIO_VAL_FRACTIONAL; +} + +static int ad74413r_get_output_current_scale(struct ad74413r_state *st, + int *val, int *val2) +{ + *val = regulator_get_voltage(st->refin_reg); + *val2 = st->sense_resistor_ohms * AD74413R_DAC_CODE_MAX * 1000; + + return IIO_VAL_FRACTIONAL; +} + +static int ad74413r_get_input_voltage_scale(struct ad74413r_state *st, + unsigned int channel, + int *val, int *val2) +{ + unsigned int range; + int ret; + + ret = ad74413r_get_adc_range(st, channel, &range); + if (ret) + return ret; + + ret = ad74413r_range_to_voltage_range(st, range, val); + if (ret) + return ret; + + *val2 = AD74413R_ADC_RESULT_MAX; + + return IIO_VAL_FRACTIONAL; +} + +static int ad74413r_get_input_voltage_offset(struct ad74413r_state *st, + unsigned int channel, int *val) +{ + unsigned int range; + int ret; + + ret = ad74413r_get_adc_range(st, channel, &range); + if (ret) + return ret; + + ret = ad74413r_range_to_voltage_offset_raw(st, range, val); + if (ret) + return ret; + + return IIO_VAL_INT; +} + +static int ad74413r_get_input_current_scale(struct ad74413r_state *st, + unsigned int channel, int *val, + int *val2) +{ + unsigned int range; + int ret; + + ret = ad74413r_get_adc_range(st, channel, &range); + if (ret) + return ret; + + ret = ad74413r_range_to_voltage_range(st, range, val); + if (ret) + return ret; + + *val2 = AD74413R_ADC_RESULT_MAX * st->sense_resistor_ohms; + + return IIO_VAL_FRACTIONAL; +} + +static int ad74413_get_input_current_offset(struct ad74413r_state *st, + unsigned int channel, int *val) +{ + unsigned int range; + int voltage_range; + int voltage_offset; + int ret; + + ret = ad74413r_get_adc_range(st, channel, &range); + if (ret) + return ret; + + ret = ad74413r_range_to_voltage_range(st, range, &voltage_range); + if (ret) + return ret; + + ret = ad74413r_range_to_voltage_offset(st, range, &voltage_offset); + if (ret) + return ret; + + *val = voltage_offset * AD74413R_ADC_RESULT_MAX / voltage_range; + + return IIO_VAL_INT; +} + +static int ad74413r_get_adc_rate(struct ad74413r_state *st, + unsigned int channel, int *val) +{ + unsigned int rejection; + int ret; + + ret = ad74413r_get_adc_rejection(st, channel, &rejection); + if (ret) + return ret; + + ret = ad74413r_rejection_to_rate(st, rejection, val); + if (ret) + return ret; + + return IIO_VAL_INT; +} + +static int ad74413r_set_adc_rate(struct ad74413r_state *st, + unsigned int channel, int val) +{ + unsigned int rejection; + int ret; + + ret = ad74413r_rate_to_rejection(st, val, &rejection); + if (ret) + return ret; + + return ad74413r_set_adc_rejection(st, channel, rejection); +} + +static irqreturn_t ad74413r_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct ad74413r_state *st = iio_priv(indio_dev); + u8 *rx_buf = st->adc_samples_buf.rx_buf; + unsigned int i; + int ret; + + ret = spi_sync(st->spi, &st->adc_samples_msg); + if (ret) + goto out; + + for (i = 0; i < st->adc_active_channels; i++) + ad74413r_crc_check(st, &rx_buf[i * AD74413R_FRAME_SIZE]); + + iio_push_to_buffers_with_timestamp(indio_dev, &st->adc_samples_buf, + iio_get_time_ns(indio_dev)); + +out: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static irqreturn_t ad74413r_adc_data_interrupt(int irq, void *data) +{ + struct iio_dev *indio_dev = data; + struct ad74413r_state *st = iio_priv(indio_dev); + + if (iio_buffer_enabled(indio_dev)) + iio_trigger_poll(st->trig); + else + complete(&st->adc_data_completion); + + return IRQ_HANDLED; +} + +static int _ad74413r_get_single_adc_result(struct ad74413r_state *st, + unsigned int channel, int *val) +{ + unsigned int uval; + int ret; + + reinit_completion(&st->adc_data_completion); + + ret = ad74413r_set_adc_channel_enable(st, channel, true); + if (ret) + return ret; + + ret = ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_SINGLE); + if (ret) + return ret; + + ret = wait_for_completion_timeout(&st->adc_data_completion, + msecs_to_jiffies(1000)); + if (!ret) { + ret = -ETIMEDOUT; + return ret; + } + + ret = regmap_read(st->regmap, AD74413R_REG_ADC_RESULT_X(channel), + &uval); + if (ret) + return ret; + + ret = ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_OFF); + if (ret) + return ret; + + ret = ad74413r_set_adc_channel_enable(st, channel, false); + if (ret) + return ret; + + *val = uval; + + return IIO_VAL_INT; +} + +static int ad74413r_get_single_adc_result(struct iio_dev *indio_dev, + unsigned int channel, int *val) +{ + struct ad74413r_state *st = iio_priv(indio_dev); + int ret; + + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + mutex_lock(&st->lock); + ret = _ad74413r_get_single_adc_result(st, channel, val); + mutex_unlock(&st->lock); + + iio_device_release_direct_mode(indio_dev); + + return ret; +} + +static void ad74413r_adc_to_resistance_result(int adc_result, int *val) +{ + if (adc_result == AD74413R_ADC_RESULT_MAX) + adc_result = AD74413R_ADC_RESULT_MAX - 1; + + *val = DIV_ROUND_CLOSEST(adc_result * 2100, + AD74413R_ADC_RESULT_MAX - adc_result); +} + +static int ad74413r_update_scan_mode(struct iio_dev *indio_dev, + const unsigned long *active_scan_mask) +{ + struct ad74413r_state *st = iio_priv(indio_dev); + struct spi_transfer *xfer = st->adc_samples_xfer; + u8 *rx_buf = &st->adc_samples_buf.rx_buf[-1 * AD74413R_FRAME_SIZE]; + u8 *tx_buf = st->adc_samples_tx_buf; + unsigned int channel; + int ret = -EINVAL; + + mutex_lock(&st->lock); + + spi_message_init(&st->adc_samples_msg); + st->adc_active_channels = 0; + + for_each_clear_bit(channel, active_scan_mask, AD74413R_CHANNEL_MAX) { + ret = ad74413r_set_adc_channel_enable(st, channel, false); + if (ret) + goto out; + } + + if (*active_scan_mask == 0) + goto out; + + /* + * The read select register is used to select which register's value + * will be sent by the slave on the next SPI frame. + * + * Create an SPI message that, on each step, writes to the read select + * register to select the ADC result of the next enabled channel, and + * reads the ADC result of the previous enabled channel. + * + * Example: + * W: [WCH1] [WCH2] [WCH2] [WCH3] [ ] + * R: [ ] [RCH1] [RCH2] [RCH3] [RCH4] + */ + + for_each_set_bit(channel, active_scan_mask, AD74413R_CHANNEL_MAX) { + ret = ad74413r_set_adc_channel_enable(st, channel, true); + if (ret) + goto out; + + st->adc_active_channels++; + + if (xfer == st->adc_samples_xfer) + xfer->rx_buf = NULL; + else + xfer->rx_buf = rx_buf; + + xfer->tx_buf = tx_buf; + xfer->len = AD74413R_FRAME_SIZE; + xfer->cs_change = 1; + + ad74413r_format_reg_write(AD74413R_REG_READ_SELECT, + AD74413R_REG_ADC_RESULT_X(channel), + tx_buf); + + spi_message_add_tail(xfer, &st->adc_samples_msg); + + xfer++; + tx_buf += AD74413R_FRAME_SIZE; + rx_buf += AD74413R_FRAME_SIZE; + } + + xfer->rx_buf = rx_buf; + xfer->tx_buf = NULL; + xfer->len = AD74413R_FRAME_SIZE; + xfer->cs_change = 0; + + spi_message_add_tail(xfer, &st->adc_samples_msg); + +out: + mutex_unlock(&st->lock); + + return ret; +} + +static int ad74413r_buffer_postenable(struct iio_dev *indio_dev) +{ + struct ad74413r_state *st = iio_priv(indio_dev); + + return ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_CONTINUOUS); +} + +static int ad74413r_buffer_predisable(struct iio_dev *indio_dev) +{ + struct ad74413r_state *st = iio_priv(indio_dev); + + return ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_OFF); +} + +static int ad74413r_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long info) +{ + struct ad74413r_state *st = iio_priv(indio_dev); + + switch (info) { + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_VOLTAGE: + if (chan->output) + return ad74413r_get_output_voltage_scale(st, + val, val2); + else + return ad74413r_get_input_voltage_scale(st, + chan->channel, val, val2); + case IIO_CURRENT: + if (chan->output) + return ad74413r_get_output_current_scale(st, + val, val2); + else + return ad74413r_get_input_current_scale(st, + chan->channel, val, val2); + default: + return -EINVAL; + } + case IIO_CHAN_INFO_OFFSET: + switch (chan->type) { + case IIO_VOLTAGE: + return ad74413r_get_input_voltage_offset(st, + chan->channel, val); + case IIO_CURRENT: + return ad74413_get_input_current_offset(st, + chan->channel, val); + default: + return -EINVAL; + } + case IIO_CHAN_INFO_RAW: + if (chan->output) + return -EINVAL; + + return ad74413r_get_single_adc_result(indio_dev, chan->channel, + val); + case IIO_CHAN_INFO_PROCESSED: { + int ret; + + ret = ad74413r_get_single_adc_result(indio_dev, chan->channel, + val); + if (ret) + return ret; + + ad74413r_adc_to_resistance_result(*val, val); + + return ret; + } + case IIO_CHAN_INFO_SAMP_FREQ: + return ad74413r_get_adc_rate(st, chan->channel, val); + default: + return -EINVAL; + } +} + +static int ad74413r_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long info) +{ + struct ad74413r_state *st = iio_priv(indio_dev); + + switch (info) { + case IIO_CHAN_INFO_RAW: + if (!chan->output) + return -EINVAL; + + if (val < 0 || val > AD74413R_DAC_CODE_MAX) { + dev_err(st->dev, "Invalid DAC code\n"); + return -EINVAL; + } + + return ad74413r_set_channel_dac_code(st, chan->channel, val); + case IIO_CHAN_INFO_SAMP_FREQ: + return ad74413r_set_adc_rate(st, chan->channel, val); + default: + return -EINVAL; + } +} + +static int ad74413r_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long info) +{ + struct ad74413r_state *st = iio_priv(indio_dev); + + switch (info) { + case IIO_CHAN_INFO_SAMP_FREQ: + if (st->chip_info->hart_support) { + *vals = ad74413r_adc_sampling_rates_hart; + *length = ARRAY_SIZE(ad74413r_adc_sampling_rates_hart); + } else { + *vals = ad74413r_adc_sampling_rates; + *length = ARRAY_SIZE(ad74413r_adc_sampling_rates); + } + *type = IIO_VAL_INT; + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + +static const struct iio_buffer_setup_ops ad74413r_buffer_ops = { + .postenable = &ad74413r_buffer_postenable, + .predisable = &ad74413r_buffer_predisable, +}; + +static const struct iio_trigger_ops ad74413r_trigger_ops = { + .validate_device = iio_trigger_validate_own_device, +}; + +static const struct iio_info ad74413r_info = { + .read_raw = &ad74413r_read_raw, + .write_raw = &ad74413r_write_raw, + .read_avail = &ad74413r_read_avail, + .update_scan_mode = &ad74413r_update_scan_mode, +}; + +#define AD74413R_DAC_CHANNEL(_type, extra_mask_separate) \ + { \ + .type = (_type), \ + .indexed = 1, \ + .output = 1, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \ + | (extra_mask_separate), \ + } + +#define AD74413R_ADC_CHANNEL(_type, extra_mask_separate) \ + { \ + .type = (_type), \ + .indexed = 1, \ + .output = 0, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \ + | BIT(IIO_CHAN_INFO_SAMP_FREQ) \ + | (extra_mask_separate), \ + .info_mask_separate_available = \ + BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = 16, \ + .storagebits = 32, \ + .shift = 8, \ + .endianness = IIO_BE, \ + }, \ + } + +#define AD74413R_ADC_VOLTAGE_CHANNEL \ + AD74413R_ADC_CHANNEL(IIO_VOLTAGE, BIT(IIO_CHAN_INFO_SCALE) \ + | BIT(IIO_CHAN_INFO_OFFSET)) + +#define AD74413R_ADC_CURRENT_CHANNEL \ + AD74413R_ADC_CHANNEL(IIO_CURRENT, BIT(IIO_CHAN_INFO_SCALE) \ + | BIT(IIO_CHAN_INFO_OFFSET)) + +static struct iio_chan_spec ad74413r_voltage_output_channels[] = { + AD74413R_DAC_CHANNEL(IIO_VOLTAGE, BIT(IIO_CHAN_INFO_SCALE)), + AD74413R_ADC_CURRENT_CHANNEL, +}; + +static struct iio_chan_spec ad74413r_current_output_channels[] = { + AD74413R_DAC_CHANNEL(IIO_CURRENT, BIT(IIO_CHAN_INFO_SCALE)), + AD74413R_ADC_VOLTAGE_CHANNEL, +}; + +static struct iio_chan_spec ad74413r_voltage_input_channels[] = { + AD74413R_ADC_VOLTAGE_CHANNEL, +}; + +static struct iio_chan_spec ad74413r_current_input_channels[] = { + AD74413R_ADC_CURRENT_CHANNEL, +}; + +static struct iio_chan_spec ad74413r_resistance_input_channels[] = { + AD74413R_ADC_CHANNEL(IIO_RESISTANCE, BIT(IIO_CHAN_INFO_PROCESSED)), +}; + +static struct iio_chan_spec ad74413r_digital_input_channels[] = { + AD74413R_ADC_VOLTAGE_CHANNEL, +}; + +#define _AD74413R_CHANNELS(_channels) \ + { \ + .channels = _channels, \ + .num_channels = ARRAY_SIZE(_channels), \ + } + +#define AD74413R_CHANNELS(name) \ + _AD74413R_CHANNELS(ad74413r_ ## name ## _channels) + +static const struct ad74413r_channels ad74413r_channels_map[] = { + [CH_FUNC_HIGH_IMPEDANCE] = AD74413R_CHANNELS(voltage_input), + [CH_FUNC_VOLTAGE_OUTPUT] = AD74413R_CHANNELS(voltage_output), + [CH_FUNC_CURRENT_OUTPUT] = AD74413R_CHANNELS(current_output), + [CH_FUNC_VOLTAGE_INPUT] = AD74413R_CHANNELS(voltage_input), + [CH_FUNC_CURRENT_INPUT_EXT_POWER] = AD74413R_CHANNELS(current_input), + [CH_FUNC_CURRENT_INPUT_LOOP_POWER] = AD74413R_CHANNELS(current_input), + [CH_FUNC_RESISTANCE_INPUT] = AD74413R_CHANNELS(resistance_input), + [CH_FUNC_DIGITAL_INPUT_LOGIC] = AD74413R_CHANNELS(digital_input), + [CH_FUNC_DIGITAL_INPUT_LOOP_POWER] = AD74413R_CHANNELS(digital_input), + [CH_FUNC_CURRENT_INPUT_EXT_POWER_HART] = AD74413R_CHANNELS(current_input), + [CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART] = AD74413R_CHANNELS(current_input), +}; + +static int ad74413r_parse_channel_config(struct iio_dev *indio_dev, + struct fwnode_handle *channel_node) +{ + struct ad74413r_state *st = iio_priv(indio_dev); + struct ad74413r_channel_config *config; + u32 index; + int ret; + + ret = fwnode_property_read_u32(channel_node, "reg", &index); + if (ret) { + dev_err(st->dev, "Failed to read channel reg: %d\n", ret); + return ret; + } + + if (index >= AD74413R_CHANNEL_MAX) { + dev_err(st->dev, "Channel index %u is too large\n", index); + return -EINVAL; + } + + config = &st->channel_configs[index]; + if (config->initialized) { + dev_err(st->dev, "Channel %u already initialized\n", index); + return -EINVAL; + } + + config->func = CH_FUNC_HIGH_IMPEDANCE; + fwnode_property_read_u32(channel_node, "adi,ch-func", &config->func); + + if (config->func < CH_FUNC_MIN || config->func > CH_FUNC_MAX) { + dev_err(st->dev, "Invalid channel function %u\n", config->func); + return -EINVAL; + } + + if (!st->chip_info->hart_support && + (config->func == CH_FUNC_CURRENT_INPUT_EXT_POWER_HART || + config->func == CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART)) { + dev_err(st->dev, "Unsupported HART function %u\n", config->func); + return -EINVAL; + } + + if (config->func == CH_FUNC_DIGITAL_INPUT_LOGIC || + config->func == CH_FUNC_DIGITAL_INPUT_LOOP_POWER) + st->num_comparator_gpios++; + + config->gpo_comparator = fwnode_property_read_bool(channel_node, + "adi,gpo-comparator"); + + if (!config->gpo_comparator) + st->num_gpo_gpios++; + + indio_dev->num_channels += ad74413r_channels_map[config->func].num_channels; + + config->initialized = true; + + return 0; +} + +static int ad74413r_parse_channel_configs(struct iio_dev *indio_dev) +{ + struct ad74413r_state *st = iio_priv(indio_dev); + struct fwnode_handle *channel_node = NULL; + int ret; + + fwnode_for_each_available_child_node(dev_fwnode(st->dev), channel_node) { + ret = ad74413r_parse_channel_config(indio_dev, channel_node); + if (ret) + goto put_channel_node; + } + + return 0; + +put_channel_node: + fwnode_handle_put(channel_node); + + return ret; +} + +static int ad74413r_setup_channels(struct iio_dev *indio_dev) +{ + struct ad74413r_state *st = iio_priv(indio_dev); + struct ad74413r_channel_config *config; + struct iio_chan_spec *channels, *chans; + unsigned int i, num_chans, chan_i; + int ret; + + channels = devm_kcalloc(st->dev, sizeof(*channels), + indio_dev->num_channels, GFP_KERNEL); + if (!channels) + return -ENOMEM; + + indio_dev->channels = channels; + + for (i = 0; i < AD74413R_CHANNEL_MAX; i++) { + config = &st->channel_configs[i]; + chans = ad74413r_channels_map[config->func].channels; + num_chans = ad74413r_channels_map[config->func].num_channels; + + memcpy(channels, chans, num_chans * sizeof(*chans)); + + for (chan_i = 0; chan_i < num_chans; chan_i++) { + struct iio_chan_spec *chan = &channels[chan_i]; + + chan->channel = i; + if (chan->output) + chan->scan_index = -1; + else + chan->scan_index = i; + } + + ret = ad74413r_set_channel_function(st, i, config->func); + if (ret) + return ret; + + channels += num_chans; + } + + return 0; +} + +static int ad74413r_setup_gpios(struct ad74413r_state *st) +{ + struct ad74413r_channel_config *config; + unsigned int comp_gpio_i = 0; + unsigned int gpo_gpio_i = 0; + unsigned int i; + u8 gpo_config; + int ret; + + for (i = 0; i < AD74413R_CHANNEL_MAX; i++) { + config = &st->channel_configs[i]; + + if (config->gpo_comparator) { + gpo_config = AD74413R_GPO_CONFIG_COMPARATOR; + } else { + gpo_config = AD74413R_GPO_CONFIG_LOGIC; + st->gpo_gpio_offsets[gpo_gpio_i++] = i; + } + + if (config->func == CH_FUNC_DIGITAL_INPUT_LOGIC || + config->func == CH_FUNC_DIGITAL_INPUT_LOOP_POWER) + st->comp_gpio_offsets[comp_gpio_i++] = i; + + ret = ad74413r_set_gpo_config(st, i, gpo_config); + if (ret) + return ret; + } + + return 0; +} + +static void ad74413r_regulator_disable(void *regulator) +{ + regulator_disable(regulator); +} + +static int ad74413r_probe(struct spi_device *spi) +{ + struct ad74413r_state *st; + struct iio_dev *indio_dev; + int ret; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + st = iio_priv(indio_dev); + + st->spi = spi; + st->dev = &spi->dev; + st->chip_info = device_get_match_data(&spi->dev); + mutex_init(&st->lock); + init_completion(&st->adc_data_completion); + + st->regmap = devm_regmap_init(st->dev, NULL, st, + &ad74413r_regmap_config); + if (IS_ERR(st->regmap)) + return PTR_ERR(st->regmap); + + st->refin_reg = devm_regulator_get(st->dev, "refin"); + if (IS_ERR(st->refin_reg)) + return dev_err_probe(st->dev, PTR_ERR(st->refin_reg), + "Failed to get refin regulator\n"); + + ret = regulator_enable(st->refin_reg); + if (ret) + return ret; + + ret = devm_add_action_or_reset(st->dev, ad74413r_regulator_disable, + st->refin_reg); + if (ret) + return ret; + + st->sense_resistor_ohms = 100000000; + device_property_read_u32(st->dev, "shunt-resistor-micro-ohms", + &st->sense_resistor_ohms); + st->sense_resistor_ohms /= 1000000; + + st->trig = devm_iio_trigger_alloc(st->dev, "%s-dev%d", + st->chip_info->name, iio_device_id(indio_dev)); + if (!st->trig) + return -ENOMEM; + + st->trig->ops = &ad74413r_trigger_ops; + iio_trigger_set_drvdata(st->trig, st); + + ret = devm_iio_trigger_register(st->dev, st->trig); + if (ret) + return ret; + + indio_dev->name = st->chip_info->name; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->info = &ad74413r_info; + indio_dev->trig = iio_trigger_get(st->trig); + + ret = ad74413r_reset(st); + if (ret) + return ret; + + ret = ad74413r_parse_channel_configs(indio_dev); + if (ret) + return ret; + + ret = ad74413r_setup_channels(indio_dev); + if (ret) + return ret; + + ret = ad74413r_setup_gpios(st); + if (ret) + return ret; + + if (st->num_gpo_gpios) { + st->gpo_gpiochip.owner = THIS_MODULE; + st->gpo_gpiochip.label = st->chip_info->name; + st->gpo_gpiochip.base = -1; + st->gpo_gpiochip.ngpio = st->num_gpo_gpios; + st->gpo_gpiochip.parent = st->dev; + st->gpo_gpiochip.can_sleep = true; + st->gpo_gpiochip.set = ad74413r_gpio_set; + st->gpo_gpiochip.set_multiple = ad74413r_gpio_set_multiple; + st->gpo_gpiochip.set_config = ad74413r_gpio_set_gpo_config; + st->gpo_gpiochip.get_direction = + ad74413r_gpio_get_gpo_direction; + + ret = devm_gpiochip_add_data(st->dev, &st->gpo_gpiochip, st); + if (ret) + return ret; + } + + if (st->num_comparator_gpios) { + st->comp_gpiochip.owner = THIS_MODULE; + st->comp_gpiochip.label = st->chip_info->name; + st->comp_gpiochip.base = -1; + st->comp_gpiochip.ngpio = st->num_comparator_gpios; + st->comp_gpiochip.parent = st->dev; + st->comp_gpiochip.can_sleep = true; + st->comp_gpiochip.get = ad74413r_gpio_get; + st->comp_gpiochip.get_multiple = ad74413r_gpio_get_multiple; + st->comp_gpiochip.set_config = ad74413r_gpio_set_comp_config; + st->comp_gpiochip.get_direction = + ad74413r_gpio_get_comp_direction; + + ret = devm_gpiochip_add_data(st->dev, &st->comp_gpiochip, st); + if (ret) + return ret; + } + + ret = ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_OFF); + if (ret) + return ret; + + ret = devm_request_irq(st->dev, spi->irq, ad74413r_adc_data_interrupt, + 0, st->chip_info->name, indio_dev); + if (ret) + return dev_err_probe(st->dev, ret, "Failed to request irq\n"); + + ret = devm_iio_triggered_buffer_setup(st->dev, indio_dev, + &iio_pollfunc_store_time, + &ad74413r_trigger_handler, + &ad74413r_buffer_ops); + if (ret) + return ret; + + return devm_iio_device_register(st->dev, indio_dev); +} + +static int ad74413r_unregister_driver(struct spi_driver *spi) +{ + spi_unregister_driver(spi); + + return 0; +} + +static int __init ad74413r_register_driver(struct spi_driver *spi) +{ + crc8_populate_msb(ad74413r_crc8_table, AD74413R_CRC_POLYNOMIAL); + + return spi_register_driver(spi); +} + +static const struct ad74413r_chip_info ad74412r_chip_info_data = { + .hart_support = false, + .name = "ad74412r", +}; + +static const struct ad74413r_chip_info ad74413r_chip_info_data = { + .hart_support = true, + .name = "ad74413r", +}; + +static const struct of_device_id ad74413r_dt_id[] = { + { + .compatible = "adi,ad74412r", + .data = &ad74412r_chip_info_data, + }, + { + .compatible = "adi,ad74413r", + .data = &ad74413r_chip_info_data, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, ad74413r_dt_id); + +static struct spi_driver ad74413r_driver = { + .driver = { + .name = "ad74413r", + .of_match_table = ad74413r_dt_id, + }, + .probe = ad74413r_probe, +}; + +module_driver(ad74413r_driver, + ad74413r_register_driver, + ad74413r_unregister_driver); + +MODULE_AUTHOR("Cosmin Tanislav <cosmin.tanislav@analog.com>"); +MODULE_DESCRIPTION("Analog Devices AD74413R ADDAC"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c index 9efa692151f0..16c0a77f6a1c 100644 --- a/drivers/iio/amplifiers/hmc425a.c +++ b/drivers/iio/amplifiers/hmc425a.c @@ -192,7 +192,7 @@ static int hmc425a_probe(struct platform_device *pdev) return -ENOMEM; st = iio_priv(indio_dev); - st->type = (enum hmc425a_type)of_device_get_match_data(&pdev->dev); + st->type = (uintptr_t)of_device_get_match_data(&pdev->dev); st->chip_info = &hmc425a_chip_info_tbl[st->type]; indio_dev->num_channels = st->chip_info->num_channels; diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index 1ac94c4e9792..f8ce26a24c57 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -67,7 +67,7 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue, dma_cookie_t cookie; block->bytes_used = min(block->size, dmaengine_buffer->max_size); - block->bytes_used = rounddown(block->bytes_used, + block->bytes_used = round_down(block->bytes_used, dmaengine_buffer->align); desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c index 9cb99585b6ff..04b44a327614 100644 --- a/drivers/iio/chemical/atlas-sensor.c +++ b/drivers/iio/chemical/atlas-sensor.c @@ -434,9 +434,6 @@ static int atlas_buffer_predisable(struct iio_dev *indio_dev) return 0; } -static const struct iio_trigger_ops atlas_interrupt_trigger_ops = { -}; - static const struct iio_buffer_setup_ops atlas_buffer_setup_ops = { .postenable = atlas_buffer_postenable, .predisable = atlas_buffer_predisable, @@ -645,7 +642,6 @@ static int atlas_probe(struct i2c_client *client, data->client = client; data->trig = trig; data->chip = chip; - trig->ops = &atlas_interrupt_trigger_ops; iio_trigger_set_drvdata(trig, indio_dev); i2c_set_clientdata(client, indio_dev); diff --git a/drivers/iio/chemical/sunrise_co2.c b/drivers/iio/chemical/sunrise_co2.c index 233bd0f379c9..8440dc0c77cf 100644 --- a/drivers/iio/chemical/sunrise_co2.c +++ b/drivers/iio/chemical/sunrise_co2.c @@ -407,24 +407,24 @@ static int sunrise_read_raw(struct iio_dev *iio_dev, mutex_lock(&sunrise->lock); ret = sunrise_read_word(sunrise, SUNRISE_CO2_FILTERED_COMP_REG, &value); - *val = value; mutex_unlock(&sunrise->lock); if (ret) return ret; + *val = value; return IIO_VAL_INT; case IIO_TEMP: mutex_lock(&sunrise->lock); ret = sunrise_read_word(sunrise, SUNRISE_CHIP_TEMPERATURE_REG, &value); - *val = value; mutex_unlock(&sunrise->lock); if (ret) return ret; + *val = value; return IIO_VAL_INT; default: diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c index 23b22a5f5c1c..e7e1c74a351e 100644 --- a/drivers/iio/chemical/vz89x.c +++ b/drivers/iio/chemical/vz89x.c @@ -242,7 +242,7 @@ static int vz89x_get_resistance_reading(struct vz89x_data *data, struct iio_chan_spec const *chan, int *val) { - u8 *tmp = (u8 *) &data->buffer[chan->address]; + u8 *tmp = &data->buffer[chan->address]; switch (chan->scan_type.endianness) { case IIO_LE: diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c index 7cf2bf282cef..d538bf3ab1ef 100644 --- a/drivers/iio/common/scmi_sensors/scmi_iio.c +++ b/drivers/iio/common/scmi_sensors/scmi_iio.c @@ -279,6 +279,52 @@ static int scmi_iio_get_odr_val(struct iio_dev *iio_dev, int *val, int *val2) return 0; } +static int scmi_iio_read_channel_data(struct iio_dev *iio_dev, + struct iio_chan_spec const *ch, int *val, int *val2) +{ + struct scmi_iio_priv *sensor = iio_priv(iio_dev); + u32 sensor_config; + struct scmi_sensor_reading readings[SCMI_IIO_NUM_OF_AXIS]; + int err; + + sensor_config = FIELD_PREP(SCMI_SENS_CFG_SENSOR_ENABLED_MASK, + SCMI_SENS_CFG_SENSOR_ENABLE); + err = sensor->sensor_ops->config_set( + sensor->ph, sensor->sensor_info->id, sensor_config); + if (err) { + dev_err(&iio_dev->dev, + "Error in enabling sensor %s err %d", + sensor->sensor_info->name, err); + return err; + } + + err = sensor->sensor_ops->reading_get_timestamped( + sensor->ph, sensor->sensor_info->id, + sensor->sensor_info->num_axis, readings); + if (err) { + dev_err(&iio_dev->dev, + "Error in reading raw attribute for sensor %s err %d", + sensor->sensor_info->name, err); + return err; + } + + sensor_config = FIELD_PREP(SCMI_SENS_CFG_SENSOR_ENABLED_MASK, + SCMI_SENS_CFG_SENSOR_DISABLE); + err = sensor->sensor_ops->config_set( + sensor->ph, sensor->sensor_info->id, sensor_config); + if (err) { + dev_err(&iio_dev->dev, + "Error in disabling sensor %s err %d", + sensor->sensor_info->name, err); + return err; + } + + *val = lower_32_bits(readings[ch->scan_index].value); + *val2 = upper_32_bits(readings[ch->scan_index].value); + + return IIO_VAL_INT_64; +} + static int scmi_iio_read_raw(struct iio_dev *iio_dev, struct iio_chan_spec const *ch, int *val, int *val2, long mask) @@ -300,6 +346,14 @@ static int scmi_iio_read_raw(struct iio_dev *iio_dev, case IIO_CHAN_INFO_SAMP_FREQ: ret = scmi_iio_get_odr_val(iio_dev, val, val2); return ret ? ret : IIO_VAL_INT_PLUS_MICRO; + case IIO_CHAN_INFO_RAW: + ret = iio_device_claim_direct_mode(iio_dev); + if (ret) + return ret; + + ret = scmi_iio_read_channel_data(iio_dev, ch, val, val2); + iio_device_release_direct_mode(iio_dev); + return ret; default: return -EINVAL; } @@ -381,7 +435,8 @@ static void scmi_iio_set_data_channel(struct iio_chan_spec *iio_chan, iio_chan->type = type; iio_chan->modified = 1; iio_chan->channel2 = mod; - iio_chan->info_mask_separate = BIT(IIO_CHAN_INFO_SCALE); + iio_chan->info_mask_separate = + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_RAW); iio_chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ); iio_chan->info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ); diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 1de395bda03e..eb452d0c423c 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -638,7 +638,7 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, struct device_attribute *attr, char *buf) { int i, len = 0; - struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct st_sensor_data *sdata = iio_priv(indio_dev); mutex_lock(&indio_dev->mlock); @@ -660,7 +660,7 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev, struct device_attribute *attr, char *buf) { int i, len = 0, q, r; - struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct st_sensor_data *sdata = iio_priv(indio_dev); mutex_lock(&indio_dev->mlock); diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index 75e1f2b48638..bfcf7568de32 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig @@ -6,6 +6,16 @@ menu "Digital to analog converters" +config AD3552R + tristate "Analog Devices AD3552R DAC driver" + depends on SPI_MASTER + help + Say yes here to build support for Analog Devices AD3552R + Digital to Analog Converter. + + To compile this driver as a module, choose M here: the + module will be called ad3552r. + config AD5064 tristate "Analog Devices AD5064 and similar multi-channel DAC driver" depends on (SPI_MASTER && I2C!=m) || I2C @@ -221,6 +231,17 @@ config AD5791 To compile this driver as a module, choose M here: the module will be called ad5791. +config AD7293 + tristate "Analog Devices AD7293 Power Amplifier Current Controller" + depends on SPI + help + Say yes here to build support for Analog Devices AD7293 + Power Amplifier Current Controller with + ADC, DACs, and Temperature and Current Sensors + + To compile this driver as a module, choose M here: the + module will be called ad7293. + config AD7303 tristate "Analog Devices AD7303 DAC driver" depends on SPI @@ -329,7 +350,6 @@ config MAX517 config MAX5821 tristate "Maxim MAX5821 DAC driver" depends on I2C - depends on OF help Say yes here to build support for Maxim MAX5821 10 bits DAC. diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile index 33e16f14902a..01a50131572f 100644 --- a/drivers/iio/dac/Makefile +++ b/drivers/iio/dac/Makefile @@ -4,6 +4,7 @@ # # When adding new entries keep the list in alphabetical order +obj-$(CONFIG_AD3552R) += ad3552r.o obj-$(CONFIG_AD5360) += ad5360.o obj-$(CONFIG_AD5380) += ad5380.o obj-$(CONFIG_AD5421) += ad5421.o @@ -25,6 +26,7 @@ obj-$(CONFIG_AD5791) += ad5791.o obj-$(CONFIG_AD5686) += ad5686.o obj-$(CONFIG_AD5686_SPI) += ad5686-spi.o obj-$(CONFIG_AD5696_I2C) += ad5696-i2c.o +obj-$(CONFIG_AD7293) += ad7293.o obj-$(CONFIG_AD7303) += ad7303.o obj-$(CONFIG_AD8801) += ad8801.o obj-$(CONFIG_CIO_DAC) += cio-dac.o diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c new file mode 100644 index 000000000000..97f13c0b9631 --- /dev/null +++ b/drivers/iio/dac/ad3552r.c @@ -0,0 +1,1138 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Analog Devices AD3552R + * Digital to Analog converter driver + * + * Copyright 2021 Analog Devices Inc. + */ +#include <asm/unaligned.h> +#include <linux/device.h> +#include <linux/iio/triggered_buffer.h> +#include <linux/iio/trigger_consumer.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +/* Register addresses */ +/* Primary address space */ +#define AD3552R_REG_ADDR_INTERFACE_CONFIG_A 0x00 +#define AD3552R_MASK_SOFTWARE_RESET (BIT(7) | BIT(0)) +#define AD3552R_MASK_ADDR_ASCENSION BIT(5) +#define AD3552R_MASK_SDO_ACTIVE BIT(4) +#define AD3552R_REG_ADDR_INTERFACE_CONFIG_B 0x01 +#define AD3552R_MASK_SINGLE_INST BIT(7) +#define AD3552R_MASK_SHORT_INSTRUCTION BIT(3) +#define AD3552R_REG_ADDR_DEVICE_CONFIG 0x02 +#define AD3552R_MASK_DEVICE_STATUS(n) BIT(4 + (n)) +#define AD3552R_MASK_CUSTOM_MODES GENMASK(3, 2) +#define AD3552R_MASK_OPERATING_MODES GENMASK(1, 0) +#define AD3552R_REG_ADDR_CHIP_TYPE 0x03 +#define AD3552R_MASK_CLASS GENMASK(7, 0) +#define AD3552R_REG_ADDR_PRODUCT_ID_L 0x04 +#define AD3552R_REG_ADDR_PRODUCT_ID_H 0x05 +#define AD3552R_REG_ADDR_CHIP_GRADE 0x06 +#define AD3552R_MASK_GRADE GENMASK(7, 4) +#define AD3552R_MASK_DEVICE_REVISION GENMASK(3, 0) +#define AD3552R_REG_ADDR_SCRATCH_PAD 0x0A +#define AD3552R_REG_ADDR_SPI_REVISION 0x0B +#define AD3552R_REG_ADDR_VENDOR_L 0x0C +#define AD3552R_REG_ADDR_VENDOR_H 0x0D +#define AD3552R_REG_ADDR_STREAM_MODE 0x0E +#define AD3552R_MASK_LENGTH GENMASK(7, 0) +#define AD3552R_REG_ADDR_TRANSFER_REGISTER 0x0F +#define AD3552R_MASK_MULTI_IO_MODE GENMASK(7, 6) +#define AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE BIT(2) +#define AD3552R_REG_ADDR_INTERFACE_CONFIG_C 0x10 +#define AD3552R_MASK_CRC_ENABLE (GENMASK(7, 6) |\ + GENMASK(1, 0)) +#define AD3552R_MASK_STRICT_REGISTER_ACCESS BIT(5) +#define AD3552R_REG_ADDR_INTERFACE_STATUS_A 0x11 +#define AD3552R_MASK_INTERFACE_NOT_READY BIT(7) +#define AD3552R_MASK_CLOCK_COUNTING_ERROR BIT(5) +#define AD3552R_MASK_INVALID_OR_NO_CRC BIT(3) +#define AD3552R_MASK_WRITE_TO_READ_ONLY_REGISTER BIT(2) +#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS BIT(1) +#define AD3552R_MASK_REGISTER_ADDRESS_INVALID BIT(0) +#define AD3552R_REG_ADDR_INTERFACE_CONFIG_D 0x14 +#define AD3552R_MASK_ALERT_ENABLE_PULLUP BIT(6) +#define AD3552R_MASK_MEM_CRC_EN BIT(4) +#define AD3552R_MASK_SDO_DRIVE_STRENGTH GENMASK(3, 2) +#define AD3552R_MASK_DUAL_SPI_SYNCHROUNOUS_EN BIT(1) +#define AD3552R_MASK_SPI_CONFIG_DDR BIT(0) +#define AD3552R_REG_ADDR_SH_REFERENCE_CONFIG 0x15 +#define AD3552R_MASK_IDUMP_FAST_MODE BIT(6) +#define AD3552R_MASK_SAMPLE_HOLD_DIFFERENTIAL_USER_EN BIT(5) +#define AD3552R_MASK_SAMPLE_HOLD_USER_TRIM GENMASK(4, 3) +#define AD3552R_MASK_SAMPLE_HOLD_USER_ENABLE BIT(2) +#define AD3552R_MASK_REFERENCE_VOLTAGE_SEL GENMASK(1, 0) +#define AD3552R_REG_ADDR_ERR_ALARM_MASK 0x16 +#define AD3552R_MASK_REF_RANGE_ALARM BIT(6) +#define AD3552R_MASK_CLOCK_COUNT_ERR_ALARM BIT(5) +#define AD3552R_MASK_MEM_CRC_ERR_ALARM BIT(4) +#define AD3552R_MASK_SPI_CRC_ERR_ALARM BIT(3) +#define AD3552R_MASK_WRITE_TO_READ_ONLY_ALARM BIT(2) +#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS_ALARM BIT(1) +#define AD3552R_MASK_REGISTER_ADDRESS_INVALID_ALARM BIT(0) +#define AD3552R_REG_ADDR_ERR_STATUS 0x17 +#define AD3552R_MASK_REF_RANGE_ERR_STATUS BIT(6) +#define AD3552R_MASK_DUAL_SPI_STREAM_EXCEEDS_DAC_ERR_STATUS BIT(5) +#define AD3552R_MASK_MEM_CRC_ERR_STATUS BIT(4) +#define AD3552R_MASK_RESET_STATUS BIT(0) +#define AD3552R_REG_ADDR_POWERDOWN_CONFIG 0x18 +#define AD3552R_MASK_CH_DAC_POWERDOWN(ch) BIT(4 + (ch)) +#define AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(ch) BIT(ch) +#define AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE 0x19 +#define AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch) ((ch) ? GENMASK(7, 4) :\ + GENMASK(3, 0)) +#define AD3552R_REG_ADDR_CH_OFFSET(ch) (0x1B + (ch) * 2) +#define AD3552R_MASK_CH_OFFSET_BITS_0_7 GENMASK(7, 0) +#define AD3552R_REG_ADDR_CH_GAIN(ch) (0x1C + (ch) * 2) +#define AD3552R_MASK_CH_RANGE_OVERRIDE BIT(7) +#define AD3552R_MASK_CH_GAIN_SCALING_N GENMASK(6, 5) +#define AD3552R_MASK_CH_GAIN_SCALING_P GENMASK(4, 3) +#define AD3552R_MASK_CH_OFFSET_POLARITY BIT(2) +#define AD3552R_MASK_CH_OFFSET_BIT_8 BIT(0) +/* + * Secondary region + * For multibyte registers specify the highest address because the access is + * done in descending order + */ +#define AD3552R_SECONDARY_REGION_START 0x28 +#define AD3552R_REG_ADDR_HW_LDAC_16B 0x28 +#define AD3552R_REG_ADDR_CH_DAC_16B(ch) (0x2C - (1 - ch) * 2) +#define AD3552R_REG_ADDR_DAC_PAGE_MASK_16B 0x2E +#define AD3552R_REG_ADDR_CH_SELECT_16B 0x2F +#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_16B 0x31 +#define AD3552R_REG_ADDR_SW_LDAC_16B 0x32 +#define AD3552R_REG_ADDR_CH_INPUT_16B(ch) (0x36 - (1 - ch) * 2) +/* 3 bytes registers */ +#define AD3552R_REG_START_24B 0x37 +#define AD3552R_REG_ADDR_HW_LDAC_24B 0x37 +#define AD3552R_REG_ADDR_CH_DAC_24B(ch) (0x3D - (1 - ch) * 3) +#define AD3552R_REG_ADDR_DAC_PAGE_MASK_24B 0x40 +#define AD3552R_REG_ADDR_CH_SELECT_24B 0x41 +#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_24B 0x44 +#define AD3552R_REG_ADDR_SW_LDAC_24B 0x45 +#define AD3552R_REG_ADDR_CH_INPUT_24B(ch) (0x4B - (1 - ch) * 3) + +/* Useful defines */ +#define AD3552R_NUM_CH 2 +#define AD3552R_MASK_CH(ch) BIT(ch) +#define AD3552R_MASK_ALL_CH GENMASK(1, 0) +#define AD3552R_MAX_REG_SIZE 3 +#define AD3552R_READ_BIT BIT(7) +#define AD3552R_ADDR_MASK GENMASK(6, 0) +#define AD3552R_MASK_DAC_12B 0xFFF0 +#define AD3552R_DEFAULT_CONFIG_B_VALUE 0x8 +#define AD3552R_SCRATCH_PAD_TEST_VAL1 0x34 +#define AD3552R_SCRATCH_PAD_TEST_VAL2 0xB2 +#define AD3552R_GAIN_SCALE 1000 +#define AD3552R_LDAC_PULSE_US 100 + +enum ad3552r_ch_vref_select { + /* Internal source with Vref I/O floating */ + AD3552R_INTERNAL_VREF_PIN_FLOATING, + /* Internal source with Vref I/O at 2.5V */ + AD3552R_INTERNAL_VREF_PIN_2P5V, + /* External source with Vref I/O as input */ + AD3552R_EXTERNAL_VREF_PIN_INPUT +}; + +enum ad3542r_id { + AD3542R_ID = 0x4008, + AD3552R_ID = 0x4009, +}; + +enum ad3552r_ch_output_range { + /* Range from 0 V to 2.5 V. Requires Rfb1x connection */ + AD3552R_CH_OUTPUT_RANGE_0__2P5V, + /* Range from 0 V to 5 V. Requires Rfb1x connection */ + AD3552R_CH_OUTPUT_RANGE_0__5V, + /* Range from 0 V to 10 V. Requires Rfb2x connection */ + AD3552R_CH_OUTPUT_RANGE_0__10V, + /* Range from -5 V to 5 V. Requires Rfb2x connection */ + AD3552R_CH_OUTPUT_RANGE_NEG_5__5V, + /* Range from -10 V to 10 V. Requires Rfb4x connection */ + AD3552R_CH_OUTPUT_RANGE_NEG_10__10V, +}; + +static const s32 ad3552r_ch_ranges[][2] = { + [AD3552R_CH_OUTPUT_RANGE_0__2P5V] = {0, 2500}, + [AD3552R_CH_OUTPUT_RANGE_0__5V] = {0, 5000}, + [AD3552R_CH_OUTPUT_RANGE_0__10V] = {0, 10000}, + [AD3552R_CH_OUTPUT_RANGE_NEG_5__5V] = {-5000, 5000}, + [AD3552R_CH_OUTPUT_RANGE_NEG_10__10V] = {-10000, 10000} +}; + +enum ad3542r_ch_output_range { + /* Range from 0 V to 2.5 V. Requires Rfb1x connection */ + AD3542R_CH_OUTPUT_RANGE_0__2P5V, + /* Range from 0 V to 3 V. Requires Rfb1x connection */ + AD3542R_CH_OUTPUT_RANGE_0__3V, + /* Range from 0 V to 5 V. Requires Rfb1x connection */ + AD3542R_CH_OUTPUT_RANGE_0__5V, + /* Range from 0 V to 10 V. Requires Rfb2x connection */ + AD3542R_CH_OUTPUT_RANGE_0__10V, + /* Range from -2.5 V to 7.5 V. Requires Rfb2x connection */ + AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V, + /* Range from -5 V to 5 V. Requires Rfb2x connection */ + AD3542R_CH_OUTPUT_RANGE_NEG_5__5V, +}; + +static const s32 ad3542r_ch_ranges[][2] = { + [AD3542R_CH_OUTPUT_RANGE_0__2P5V] = {0, 2500}, + [AD3542R_CH_OUTPUT_RANGE_0__3V] = {0, 3000}, + [AD3542R_CH_OUTPUT_RANGE_0__5V] = {0, 5000}, + [AD3542R_CH_OUTPUT_RANGE_0__10V] = {0, 10000}, + [AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = {-2500, 7500}, + [AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = {-5000, 5000} +}; + +enum ad3552r_ch_gain_scaling { + /* Gain scaling of 1 */ + AD3552R_CH_GAIN_SCALING_1, + /* Gain scaling of 0.5 */ + AD3552R_CH_GAIN_SCALING_0_5, + /* Gain scaling of 0.25 */ + AD3552R_CH_GAIN_SCALING_0_25, + /* Gain scaling of 0.125 */ + AD3552R_CH_GAIN_SCALING_0_125, +}; + +/* Gain * AD3552R_GAIN_SCALE */ +static const s32 gains_scaling_table[] = { + [AD3552R_CH_GAIN_SCALING_1] = 1000, + [AD3552R_CH_GAIN_SCALING_0_5] = 500, + [AD3552R_CH_GAIN_SCALING_0_25] = 250, + [AD3552R_CH_GAIN_SCALING_0_125] = 125 +}; + +enum ad3552r_dev_attributes { + /* - Direct register values */ + /* From 0-3 */ + AD3552R_SDO_DRIVE_STRENGTH, + /* + * 0 -> Internal Vref, vref_io pin floating (default) + * 1 -> Internal Vref, vref_io driven by internal vref + * 2 or 3 -> External Vref + */ + AD3552R_VREF_SELECT, + /* Read registers in ascending order if set. Else descending */ + AD3552R_ADDR_ASCENSION, +}; + +enum ad3552r_ch_attributes { + /* DAC powerdown */ + AD3552R_CH_DAC_POWERDOWN, + /* DAC amplifier powerdown */ + AD3552R_CH_AMPLIFIER_POWERDOWN, + /* Select the output range. Select from enum ad3552r_ch_output_range */ + AD3552R_CH_OUTPUT_RANGE_SEL, + /* + * Over-rider the range selector in order to manually set the output + * voltage range + */ + AD3552R_CH_RANGE_OVERRIDE, + /* Manually set the offset voltage */ + AD3552R_CH_GAIN_OFFSET, + /* Sets the polarity of the offset. */ + AD3552R_CH_GAIN_OFFSET_POLARITY, + /* PDAC gain scaling */ + AD3552R_CH_GAIN_SCALING_P, + /* NDAC gain scaling */ + AD3552R_CH_GAIN_SCALING_N, + /* Rfb value */ + AD3552R_CH_RFB, + /* Channel select. When set allow Input -> DAC and Mask -> DAC */ + AD3552R_CH_SELECT, +}; + +struct ad3552r_ch_data { + s32 scale_int; + s32 scale_dec; + s32 offset_int; + s32 offset_dec; + s16 gain_offset; + u16 rfb; + u8 n; + u8 p; + u8 range; + bool range_override; +}; + +struct ad3552r_desc { + /* Used to look the spi bus for atomic operations where needed */ + struct mutex lock; + struct gpio_desc *gpio_reset; + struct gpio_desc *gpio_ldac; + struct spi_device *spi; + struct ad3552r_ch_data ch_data[AD3552R_NUM_CH]; + struct iio_chan_spec channels[AD3552R_NUM_CH + 1]; + unsigned long enabled_ch; + unsigned int num_ch; + enum ad3542r_id chip_id; +}; + +static const u16 addr_mask_map[][2] = { + [AD3552R_ADDR_ASCENSION] = { + AD3552R_REG_ADDR_INTERFACE_CONFIG_A, + AD3552R_MASK_ADDR_ASCENSION + }, + [AD3552R_SDO_DRIVE_STRENGTH] = { + AD3552R_REG_ADDR_INTERFACE_CONFIG_D, + AD3552R_MASK_SDO_DRIVE_STRENGTH + }, + [AD3552R_VREF_SELECT] = { + AD3552R_REG_ADDR_SH_REFERENCE_CONFIG, + AD3552R_MASK_REFERENCE_VOLTAGE_SEL + }, +}; + +/* 0 -> reg addr, 1->ch0 mask, 2->ch1 mask */ +static const u16 addr_mask_map_ch[][3] = { + [AD3552R_CH_DAC_POWERDOWN] = { + AD3552R_REG_ADDR_POWERDOWN_CONFIG, + AD3552R_MASK_CH_DAC_POWERDOWN(0), + AD3552R_MASK_CH_DAC_POWERDOWN(1) + }, + [AD3552R_CH_AMPLIFIER_POWERDOWN] = { + AD3552R_REG_ADDR_POWERDOWN_CONFIG, + AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(0), + AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(1) + }, + [AD3552R_CH_OUTPUT_RANGE_SEL] = { + AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE, + AD3552R_MASK_CH_OUTPUT_RANGE_SEL(0), + AD3552R_MASK_CH_OUTPUT_RANGE_SEL(1) + }, + [AD3552R_CH_SELECT] = { + AD3552R_REG_ADDR_CH_SELECT_16B, + AD3552R_MASK_CH(0), + AD3552R_MASK_CH(1) + } +}; + +static u8 _ad3552r_reg_len(u8 addr) +{ + switch (addr) { + case AD3552R_REG_ADDR_HW_LDAC_16B: + case AD3552R_REG_ADDR_CH_SELECT_16B: + case AD3552R_REG_ADDR_SW_LDAC_16B: + case AD3552R_REG_ADDR_HW_LDAC_24B: + case AD3552R_REG_ADDR_CH_SELECT_24B: + case AD3552R_REG_ADDR_SW_LDAC_24B: + return 1; + default: + break; + } + + if (addr > AD3552R_REG_ADDR_HW_LDAC_24B) + return 3; + if (addr > AD3552R_REG_ADDR_HW_LDAC_16B) + return 2; + + return 1; +} + +/* SPI transfer to device */ +static int ad3552r_transfer(struct ad3552r_desc *dac, u8 addr, u32 len, + u8 *data, bool is_read) +{ + /* Maximum transfer: Addr (1B) + 2 * (Data Reg (3B)) + SW LDAC(1B) */ + u8 buf[8]; + + buf[0] = addr & AD3552R_ADDR_MASK; + buf[0] |= is_read ? AD3552R_READ_BIT : 0; + if (is_read) + return spi_write_then_read(dac->spi, buf, 1, data, len); + + memcpy(buf + 1, data, len); + return spi_write_then_read(dac->spi, buf, len + 1, NULL, 0); +} + +static int ad3552r_write_reg(struct ad3552r_desc *dac, u8 addr, u16 val) +{ + u8 reg_len; + u8 buf[AD3552R_MAX_REG_SIZE] = { 0 }; + + reg_len = _ad3552r_reg_len(addr); + if (reg_len == 2) + /* Only DAC register are 2 bytes wide */ + val &= AD3552R_MASK_DAC_12B; + if (reg_len == 1) + buf[0] = val & 0xFF; + else + /* reg_len can be 2 or 3, but 3rd bytes needs to be set to 0 */ + put_unaligned_be16(val, buf); + + return ad3552r_transfer(dac, addr, reg_len, buf, false); +} + +static int ad3552r_read_reg(struct ad3552r_desc *dac, u8 addr, u16 *val) +{ + int err; + u8 reg_len, buf[AD3552R_MAX_REG_SIZE] = { 0 }; + + reg_len = _ad3552r_reg_len(addr); + err = ad3552r_transfer(dac, addr, reg_len, buf, true); + if (err) + return err; + + if (reg_len == 1) + *val = buf[0]; + else + /* reg_len can be 2 or 3, but only first 2 bytes are relevant */ + *val = get_unaligned_be16(buf); + + return 0; +} + +static u16 ad3552r_field_prep(u16 val, u16 mask) +{ + return (val << __ffs(mask)) & mask; +} + +/* Update field of a register, shift val if needed */ +static int ad3552r_update_reg_field(struct ad3552r_desc *dac, u8 addr, u16 mask, + u16 val) +{ + int ret; + u16 reg; + + ret = ad3552r_read_reg(dac, addr, ®); + if (ret < 0) + return ret; + + reg &= ~mask; + reg |= ad3552r_field_prep(val, mask); + + return ad3552r_write_reg(dac, addr, reg); +} + +static int ad3552r_set_ch_value(struct ad3552r_desc *dac, + enum ad3552r_ch_attributes attr, + u8 ch, + u16 val) +{ + /* Update register related to attributes in chip */ + return ad3552r_update_reg_field(dac, addr_mask_map_ch[attr][0], + addr_mask_map_ch[attr][ch + 1], val); +} + +#define AD3552R_CH_DAC(_idx) ((struct iio_chan_spec) { \ + .type = IIO_VOLTAGE, \ + .output = true, \ + .indexed = true, \ + .channel = _idx, \ + .scan_index = _idx, \ + .scan_type = { \ + .sign = 'u', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_BE, \ + }, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_ENABLE) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ +}) + +static int ad3552r_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, + int *val2, + long mask) +{ + struct ad3552r_desc *dac = iio_priv(indio_dev); + u16 tmp_val; + int err; + u8 ch = chan->channel; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + mutex_lock(&dac->lock); + err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_CH_DAC_24B(ch), + &tmp_val); + mutex_unlock(&dac->lock); + if (err < 0) + return err; + *val = tmp_val; + return IIO_VAL_INT; + case IIO_CHAN_INFO_ENABLE: + mutex_lock(&dac->lock); + err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_POWERDOWN_CONFIG, + &tmp_val); + mutex_unlock(&dac->lock); + if (err < 0) + return err; + *val = !((tmp_val & AD3552R_MASK_CH_DAC_POWERDOWN(ch)) >> + __ffs(AD3552R_MASK_CH_DAC_POWERDOWN(ch))); + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + *val = dac->ch_data[ch].scale_int; + *val2 = dac->ch_data[ch].scale_dec; + return IIO_VAL_INT_PLUS_MICRO; + case IIO_CHAN_INFO_OFFSET: + *val = dac->ch_data[ch].offset_int; + *val2 = dac->ch_data[ch].offset_dec; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } +} + +static int ad3552r_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, + int val2, + long mask) +{ + struct ad3552r_desc *dac = iio_priv(indio_dev); + int err; + + mutex_lock(&dac->lock); + switch (mask) { + case IIO_CHAN_INFO_RAW: + err = ad3552r_write_reg(dac, + AD3552R_REG_ADDR_CH_DAC_24B(chan->channel), + val); + break; + case IIO_CHAN_INFO_ENABLE: + err = ad3552r_set_ch_value(dac, AD3552R_CH_DAC_POWERDOWN, + chan->channel, !val); + break; + default: + err = -EINVAL; + break; + } + mutex_unlock(&dac->lock); + + return err; +} + +static const struct iio_info ad3552r_iio_info = { + .read_raw = ad3552r_read_raw, + .write_raw = ad3552r_write_raw +}; + +static int32_t ad3552r_trigger_hw_ldac(struct gpio_desc *ldac) +{ + gpiod_set_value_cansleep(ldac, 0); + usleep_range(AD3552R_LDAC_PULSE_US, AD3552R_LDAC_PULSE_US + 10); + gpiod_set_value_cansleep(ldac, 1); + + return 0; +} + +static int ad3552r_write_all_channels(struct ad3552r_desc *dac, u8 *data) +{ + int err, len; + u8 addr, buff[AD3552R_NUM_CH * AD3552R_MAX_REG_SIZE + 1]; + + addr = AD3552R_REG_ADDR_CH_INPUT_24B(1); + /* CH1 */ + memcpy(buff, data + 2, 2); + buff[2] = 0; + /* CH0 */ + memcpy(buff + 3, data, 2); + buff[5] = 0; + len = 6; + if (!dac->gpio_ldac) { + /* Software LDAC */ + buff[6] = AD3552R_MASK_ALL_CH; + ++len; + } + err = ad3552r_transfer(dac, addr, len, buff, false); + if (err) + return err; + + if (dac->gpio_ldac) + return ad3552r_trigger_hw_ldac(dac->gpio_ldac); + + return 0; +} + +static int ad3552r_write_codes(struct ad3552r_desc *dac, u32 mask, u8 *data) +{ + int err; + u8 addr, buff[AD3552R_MAX_REG_SIZE]; + + if (mask == AD3552R_MASK_ALL_CH) { + if (memcmp(data, data + 2, 2) != 0) + return ad3552r_write_all_channels(dac, data); + + addr = AD3552R_REG_ADDR_INPUT_PAGE_MASK_24B; + } else { + addr = AD3552R_REG_ADDR_CH_INPUT_24B(__ffs(mask)); + } + + memcpy(buff, data, 2); + buff[2] = 0; + err = ad3552r_transfer(dac, addr, 3, data, false); + if (err) + return err; + + if (dac->gpio_ldac) + return ad3552r_trigger_hw_ldac(dac->gpio_ldac); + + return ad3552r_write_reg(dac, AD3552R_REG_ADDR_SW_LDAC_24B, mask); +} + +static irqreturn_t ad3552r_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct iio_buffer *buf = indio_dev->buffer; + struct ad3552r_desc *dac = iio_priv(indio_dev); + /* Maximum size of a scan */ + u8 buff[AD3552R_NUM_CH * AD3552R_MAX_REG_SIZE]; + int err; + + memset(buff, 0, sizeof(buff)); + err = iio_pop_from_buffer(buf, buff); + if (err) + goto end; + + mutex_lock(&dac->lock); + ad3552r_write_codes(dac, *indio_dev->active_scan_mask, buff); + mutex_unlock(&dac->lock); +end: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static int ad3552r_check_scratch_pad(struct ad3552r_desc *dac) +{ + const u16 val1 = AD3552R_SCRATCH_PAD_TEST_VAL1; + const u16 val2 = AD3552R_SCRATCH_PAD_TEST_VAL2; + u16 val; + int err; + + err = ad3552r_write_reg(dac, AD3552R_REG_ADDR_SCRATCH_PAD, val1); + if (err < 0) + return err; + + err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_SCRATCH_PAD, &val); + if (err < 0) + return err; + + if (val1 != val) + return -ENODEV; + + err = ad3552r_write_reg(dac, AD3552R_REG_ADDR_SCRATCH_PAD, val2); + if (err < 0) + return err; + + err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_SCRATCH_PAD, &val); + if (err < 0) + return err; + + if (val2 != val) + return -ENODEV; + + return 0; +} + +struct reg_addr_pool { + struct ad3552r_desc *dac; + u8 addr; +}; + +static int ad3552r_read_reg_wrapper(struct reg_addr_pool *addr) +{ + int err; + u16 val; + + err = ad3552r_read_reg(addr->dac, addr->addr, &val); + if (err) + return err; + + return val; +} + +static int ad3552r_reset(struct ad3552r_desc *dac) +{ + struct reg_addr_pool addr; + int ret; + u16 val; + + dac->gpio_reset = devm_gpiod_get_optional(&dac->spi->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(dac->gpio_reset)) + return dev_err_probe(&dac->spi->dev, PTR_ERR(dac->gpio_reset), + "Error while getting gpio reset"); + + if (dac->gpio_reset) { + /* Perform hardware reset */ + usleep_range(10, 20); + gpiod_set_value_cansleep(dac->gpio_reset, 1); + } else { + /* Perform software reset if no GPIO provided */ + ret = ad3552r_update_reg_field(dac, + AD3552R_REG_ADDR_INTERFACE_CONFIG_A, + AD3552R_MASK_SOFTWARE_RESET, + AD3552R_MASK_SOFTWARE_RESET); + if (ret < 0) + return ret; + + } + + addr.dac = dac; + addr.addr = AD3552R_REG_ADDR_INTERFACE_CONFIG_B; + ret = readx_poll_timeout(ad3552r_read_reg_wrapper, &addr, val, + val == AD3552R_DEFAULT_CONFIG_B_VALUE || + val < 0, + 5000, 50000); + if (val < 0) + ret = val; + if (ret) { + dev_err(&dac->spi->dev, "Error while resetting"); + return ret; + } + + ret = readx_poll_timeout(ad3552r_read_reg_wrapper, &addr, val, + !(val & AD3552R_MASK_INTERFACE_NOT_READY) || + val < 0, + 5000, 50000); + if (val < 0) + ret = val; + if (ret) { + dev_err(&dac->spi->dev, "Error while resetting"); + return ret; + } + + return ad3552r_update_reg_field(dac, + addr_mask_map[AD3552R_ADDR_ASCENSION][0], + addr_mask_map[AD3552R_ADDR_ASCENSION][1], + val); +} + +static void ad3552r_get_custom_range(struct ad3552r_desc *dac, s32 i, s32 *v_min, + s32 *v_max) +{ + s64 vref, tmp, common, offset, gn, gp; + /* + * From datasheet formula (In Volts): + * Vmin = 2.5 + [(GainN + Offset / 1024) * 2.5 * Rfb * 1.03] + * Vmax = 2.5 - [(GainP + Offset / 1024) * 2.5 * Rfb * 1.03] + * Calculus are converted to milivolts + */ + vref = 2500; + /* 2.5 * 1.03 * 1000 (To mV) */ + common = 2575 * dac->ch_data[i].rfb; + offset = dac->ch_data[i].gain_offset; + + gn = gains_scaling_table[dac->ch_data[i].n]; + tmp = (1024 * gn + AD3552R_GAIN_SCALE * offset) * common; + tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE); + *v_max = vref + tmp; + + gp = gains_scaling_table[dac->ch_data[i].p]; + tmp = (1024 * gp - AD3552R_GAIN_SCALE * offset) * common; + tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE); + *v_min = vref - tmp; +} + +static void ad3552r_calc_gain_and_offset(struct ad3552r_desc *dac, s32 ch) +{ + s32 idx, v_max, v_min, span, rem; + s64 tmp; + + if (dac->ch_data[ch].range_override) { + ad3552r_get_custom_range(dac, ch, &v_min, &v_max); + } else { + /* Normal range */ + idx = dac->ch_data[ch].range; + if (dac->chip_id == AD3542R_ID) { + v_min = ad3542r_ch_ranges[idx][0]; + v_max = ad3542r_ch_ranges[idx][1]; + } else { + v_min = ad3552r_ch_ranges[idx][0]; + v_max = ad3552r_ch_ranges[idx][1]; + } + } + + /* + * From datasheet formula: + * Vout = Span * (D / 65536) + Vmin + * Converted to scale and offset: + * Scale = Span / 65536 + * Offset = 65536 * Vmin / Span + * + * Reminders are in micros in order to be printed as + * IIO_VAL_INT_PLUS_MICRO + */ + span = v_max - v_min; + dac->ch_data[ch].scale_int = div_s64_rem(span, 65536, &rem); + /* Do operations in microvolts */ + dac->ch_data[ch].scale_dec = DIV_ROUND_CLOSEST((s64)rem * 1000000, + 65536); + + dac->ch_data[ch].offset_int = div_s64_rem(v_min * 65536, span, &rem); + tmp = (s64)rem * 1000000; + dac->ch_data[ch].offset_dec = div_s64(tmp, span); +} + +static int ad3552r_find_range(u16 id, s32 *vals) +{ + int i, len; + const s32 (*ranges)[2]; + + if (id == AD3542R_ID) { + len = ARRAY_SIZE(ad3542r_ch_ranges); + ranges = ad3542r_ch_ranges; + } else { + len = ARRAY_SIZE(ad3552r_ch_ranges); + ranges = ad3552r_ch_ranges; + } + + for (i = 0; i < len; i++) + if (vals[0] == ranges[i][0] * 1000 && + vals[1] == ranges[i][1] * 1000) + return i; + + return -EINVAL; +} + +static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac, + struct fwnode_handle *child, + u32 ch) +{ + struct device *dev = &dac->spi->dev; + struct fwnode_handle *gain_child; + u32 val; + int err; + u8 addr; + u16 reg = 0, offset; + + gain_child = fwnode_get_named_child_node(child, + "custom-output-range-config"); + if (IS_ERR(gain_child)) { + dev_err(dev, + "mandatory custom-output-range-config property missing\n"); + return PTR_ERR(gain_child); + } + + dac->ch_data[ch].range_override = 1; + reg |= ad3552r_field_prep(1, AD3552R_MASK_CH_RANGE_OVERRIDE); + + err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-p", &val); + if (err) { + dev_err(dev, "mandatory adi,gain-scaling-p property missing\n"); + goto put_child; + } + reg |= ad3552r_field_prep(val, AD3552R_MASK_CH_GAIN_SCALING_P); + dac->ch_data[ch].p = val; + + err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-n", &val); + if (err) { + dev_err(dev, "mandatory adi,gain-scaling-n property missing\n"); + goto put_child; + } + reg |= ad3552r_field_prep(val, AD3552R_MASK_CH_GAIN_SCALING_N); + dac->ch_data[ch].n = val; + + err = fwnode_property_read_u32(gain_child, "adi,rfb-ohms", &val); + if (err) { + dev_err(dev, "mandatory adi,rfb-ohms property missing\n"); + goto put_child; + } + dac->ch_data[ch].rfb = val; + + err = fwnode_property_read_u32(gain_child, "adi,gain-offset", &val); + if (err) { + dev_err(dev, "mandatory adi,gain-offset property missing\n"); + goto put_child; + } + dac->ch_data[ch].gain_offset = val; + + offset = abs((s32)val); + reg |= ad3552r_field_prep((offset >> 8), AD3552R_MASK_CH_OFFSET_BIT_8); + + reg |= ad3552r_field_prep((s32)val < 0, AD3552R_MASK_CH_OFFSET_POLARITY); + addr = AD3552R_REG_ADDR_CH_GAIN(ch); + err = ad3552r_write_reg(dac, addr, + offset & AD3552R_MASK_CH_OFFSET_BITS_0_7); + if (err) { + dev_err(dev, "Error writing register\n"); + goto put_child; + } + + err = ad3552r_write_reg(dac, addr, reg); + if (err) { + dev_err(dev, "Error writing register\n"); + goto put_child; + } + +put_child: + fwnode_handle_put(gain_child); + + return err; +} + +static void ad3552r_reg_disable(void *reg) +{ + regulator_disable(reg); +} + +static int ad3552r_configure_device(struct ad3552r_desc *dac) +{ + struct device *dev = &dac->spi->dev; + struct fwnode_handle *child; + struct regulator *vref; + int err, cnt = 0, voltage, delta = 100000; + u32 vals[2], val, ch; + + dac->gpio_ldac = devm_gpiod_get_optional(dev, "ldac", GPIOD_OUT_HIGH); + if (IS_ERR(dac->gpio_ldac)) + return dev_err_probe(dev, PTR_ERR(dac->gpio_ldac), + "Error getting gpio ldac"); + + vref = devm_regulator_get_optional(dev, "vref"); + if (IS_ERR(vref)) { + if (PTR_ERR(vref) != -ENODEV) + return dev_err_probe(dev, PTR_ERR(vref), + "Error getting vref"); + + if (device_property_read_bool(dev, "adi,vref-out-en")) + val = AD3552R_INTERNAL_VREF_PIN_2P5V; + else + val = AD3552R_INTERNAL_VREF_PIN_FLOATING; + } else { + err = regulator_enable(vref); + if (err) { + dev_err(dev, "Failed to enable external vref supply\n"); + return err; + } + + err = devm_add_action_or_reset(dev, ad3552r_reg_disable, vref); + if (err) { + regulator_disable(vref); + return err; + } + + voltage = regulator_get_voltage(vref); + if (voltage > 2500000 + delta || voltage < 2500000 - delta) { + dev_warn(dev, "vref-supply must be 2.5V"); + return -EINVAL; + } + val = AD3552R_EXTERNAL_VREF_PIN_INPUT; + } + + err = ad3552r_update_reg_field(dac, + addr_mask_map[AD3552R_VREF_SELECT][0], + addr_mask_map[AD3552R_VREF_SELECT][1], + val); + if (err) + return err; + + err = device_property_read_u32(dev, "adi,sdo-drive-strength", &val); + if (!err) { + if (val > 3) { + dev_err(dev, "adi,sdo-drive-strength must be less than 4\n"); + return -EINVAL; + } + + err = ad3552r_update_reg_field(dac, + addr_mask_map[AD3552R_SDO_DRIVE_STRENGTH][0], + addr_mask_map[AD3552R_SDO_DRIVE_STRENGTH][1], + val); + if (err) + return err; + } + + dac->num_ch = device_get_child_node_count(dev); + if (!dac->num_ch) { + dev_err(dev, "No channels defined\n"); + return -ENODEV; + } + + device_for_each_child_node(dev, child) { + err = fwnode_property_read_u32(child, "reg", &ch); + if (err) { + dev_err(dev, "mandatory reg property missing\n"); + goto put_child; + } + if (ch >= AD3552R_NUM_CH) { + dev_err(dev, "reg must be less than %d\n", + AD3552R_NUM_CH); + err = -EINVAL; + goto put_child; + } + + if (fwnode_property_present(child, "adi,output-range-microvolt")) { + err = fwnode_property_read_u32_array(child, + "adi,output-range-microvolt", + vals, + 2); + if (err) { + dev_err(dev, + "adi,output-range-microvolt property could not be parsed\n"); + goto put_child; + } + + err = ad3552r_find_range(dac->chip_id, vals); + if (err < 0) { + dev_err(dev, + "Invalid adi,output-range-microvolt value\n"); + goto put_child; + } + val = err; + err = ad3552r_set_ch_value(dac, + AD3552R_CH_OUTPUT_RANGE_SEL, + ch, val); + if (err) + goto put_child; + + dac->ch_data[ch].range = val; + } else if (dac->chip_id == AD3542R_ID) { + dev_err(dev, + "adi,output-range-microvolt is required for ad3542r\n"); + err = -EINVAL; + goto put_child; + } else { + err = ad3552r_configure_custom_gain(dac, child, ch); + if (err) + goto put_child; + } + + ad3552r_calc_gain_and_offset(dac, ch); + dac->enabled_ch |= BIT(ch); + + err = ad3552r_set_ch_value(dac, AD3552R_CH_SELECT, ch, 1); + if (err < 0) + goto put_child; + + dac->channels[cnt] = AD3552R_CH_DAC(ch); + ++cnt; + + } + + /* Disable unused channels */ + for_each_clear_bit(ch, &dac->enabled_ch, AD3552R_NUM_CH) { + err = ad3552r_set_ch_value(dac, AD3552R_CH_AMPLIFIER_POWERDOWN, + ch, 1); + if (err) + return err; + } + + dac->num_ch = cnt; + + return 0; +put_child: + fwnode_handle_put(child); + + return err; +} + +static int ad3552r_init(struct ad3552r_desc *dac) +{ + int err; + u16 val, id; + + err = ad3552r_reset(dac); + if (err) { + dev_err(&dac->spi->dev, "Reset failed\n"); + return err; + } + + err = ad3552r_check_scratch_pad(dac); + if (err) { + dev_err(&dac->spi->dev, "Scratch pad test failed\n"); + return err; + } + + err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_PRODUCT_ID_L, &val); + if (err) { + dev_err(&dac->spi->dev, "Fail read PRODUCT_ID_L\n"); + return err; + } + + id = val; + err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_PRODUCT_ID_H, &val); + if (err) { + dev_err(&dac->spi->dev, "Fail read PRODUCT_ID_H\n"); + return err; + } + + id |= val << 8; + if (id != dac->chip_id) { + dev_err(&dac->spi->dev, "Product id not matching\n"); + return -ENODEV; + } + + return ad3552r_configure_device(dac); +} + +static int ad3552r_probe(struct spi_device *spi) +{ + const struct spi_device_id *id = spi_get_device_id(spi); + struct ad3552r_desc *dac; + struct iio_dev *indio_dev; + int err; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*dac)); + if (!indio_dev) + return -ENOMEM; + + dac = iio_priv(indio_dev); + dac->spi = spi; + dac->chip_id = id->driver_data; + + mutex_init(&dac->lock); + + err = ad3552r_init(dac); + if (err) + return err; + + /* Config triggered buffer device */ + if (dac->chip_id == AD3552R_ID) + indio_dev->name = "ad3552r"; + else + indio_dev->name = "ad3542r"; + indio_dev->dev.parent = &spi->dev; + indio_dev->info = &ad3552r_iio_info; + indio_dev->num_channels = dac->num_ch; + indio_dev->channels = dac->channels; + indio_dev->modes = INDIO_DIRECT_MODE; + + err = devm_iio_triggered_buffer_setup_ext(&indio_dev->dev, indio_dev, NULL, + &ad3552r_trigger_handler, + IIO_BUFFER_DIRECTION_OUT, + NULL, + NULL); + if (err) + return err; + + return devm_iio_device_register(&spi->dev, indio_dev); +} + +static const struct spi_device_id ad3552r_id[] = { + { "ad3542r", AD3542R_ID }, + { "ad3552r", AD3552R_ID }, + { } +}; +MODULE_DEVICE_TABLE(spi, ad3552r_id); + +static const struct of_device_id ad3552r_of_match[] = { + { .compatible = "adi,ad3542r"}, + { .compatible = "adi,ad3552r"}, + { } +}; +MODULE_DEVICE_TABLE(of, ad3552r_of_match); + +static struct spi_driver ad3552r_driver = { + .driver = { + .name = "ad3552r", + .of_match_table = ad3552r_of_match, + }, + .probe = ad3552r_probe, + .id_table = ad3552r_id +}; +module_spi_driver(ad3552r_driver); + +MODULE_AUTHOR("Mihail Chindris <mihail.chindris@analog.com>"); +MODULE_DESCRIPTION("Analog Device AD3552R DAC"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c index fd9cac4f6321..27ee2c63c5d4 100644 --- a/drivers/iio/dac/ad5064.c +++ b/drivers/iio/dac/ad5064.c @@ -377,7 +377,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { .shared = IIO_SEPARATE, }, IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5064_powerdown_mode_enum), - IIO_ENUM_AVAILABLE("powerdown_mode", &ad5064_powerdown_mode_enum), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5064_powerdown_mode_enum), { }, }; @@ -389,7 +389,7 @@ static const struct iio_chan_spec_ext_info ltc2617_ext_info[] = { .shared = IIO_SEPARATE, }, IIO_ENUM("powerdown_mode", IIO_SEPARATE, <c2617_powerdown_mode_enum), - IIO_ENUM_AVAILABLE("powerdown_mode", <c2617_powerdown_mode_enum), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, <c2617_powerdown_mode_enum), { }, }; diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c index 8ca26bb4b62f..e38860a6a9f3 100644 --- a/drivers/iio/dac/ad5380.c +++ b/drivers/iio/dac/ad5380.c @@ -249,7 +249,7 @@ static const struct iio_chan_spec_ext_info ad5380_ext_info[] = { }, IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5380_powerdown_mode_enum), - IIO_ENUM_AVAILABLE("powerdown_mode", &ad5380_powerdown_mode_enum), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5380_powerdown_mode_enum), { }, }; diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c index 3cc5513a6cbf..1c9b54c012a7 100644 --- a/drivers/iio/dac/ad5446.c +++ b/drivers/iio/dac/ad5446.c @@ -142,7 +142,7 @@ static const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[] = { .shared = IIO_SEPARATE, }, IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5446_powerdown_mode_enum), - IIO_ENUM_AVAILABLE("powerdown_mode", &ad5446_powerdown_mode_enum), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5446_powerdown_mode_enum), { }, }; diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c index 19cdf9890d02..b631261efa97 100644 --- a/drivers/iio/dac/ad5504.c +++ b/drivers/iio/dac/ad5504.c @@ -241,7 +241,7 @@ static const struct iio_chan_spec_ext_info ad5504_ext_info[] = { }, IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5504_powerdown_mode_enum), - IIO_ENUM_AVAILABLE("powerdown_mode", &ad5504_powerdown_mode_enum), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5504_powerdown_mode_enum), { }, }; diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c index 530529feebb5..3c98941b9f99 100644 --- a/drivers/iio/dac/ad5624r_spi.c +++ b/drivers/iio/dac/ad5624r_spi.c @@ -159,7 +159,7 @@ static const struct iio_chan_spec_ext_info ad5624r_ext_info[] = { }, IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5624r_powerdown_mode_enum), - IIO_ENUM_AVAILABLE("powerdown_mode", &ad5624r_powerdown_mode_enum), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5624r_powerdown_mode_enum), { }, }; diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c index 8f001db775f4..e592a995f404 100644 --- a/drivers/iio/dac/ad5686.c +++ b/drivers/iio/dac/ad5686.c @@ -184,7 +184,7 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = { .shared = IIO_SEPARATE, }, IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5686_powerdown_mode_enum), - IIO_ENUM_AVAILABLE("powerdown_mode", &ad5686_powerdown_mode_enum), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5686_powerdown_mode_enum), { }, }; diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c index cabc38d54085..7a62e6e1d5f1 100644 --- a/drivers/iio/dac/ad5755.c +++ b/drivers/iio/dac/ad5755.c @@ -13,10 +13,10 @@ #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/delay.h> -#include <linux/of.h> +#include <linux/property.h> + #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> -#include <linux/platform_data/ad5755.h> #define AD5755_NUM_CHANNELS 4 @@ -63,6 +63,101 @@ #define AD5755_SLEW_RATE_SHIFT 3 #define AD5755_SLEW_ENABLE BIT(12) +enum ad5755_mode { + AD5755_MODE_VOLTAGE_0V_5V = 0, + AD5755_MODE_VOLTAGE_0V_10V = 1, + AD5755_MODE_VOLTAGE_PLUSMINUS_5V = 2, + AD5755_MODE_VOLTAGE_PLUSMINUS_10V = 3, + AD5755_MODE_CURRENT_4mA_20mA = 4, + AD5755_MODE_CURRENT_0mA_20mA = 5, + AD5755_MODE_CURRENT_0mA_24mA = 6, +}; + +enum ad5755_dc_dc_phase { + AD5755_DC_DC_PHASE_ALL_SAME_EDGE = 0, + AD5755_DC_DC_PHASE_A_B_SAME_EDGE_C_D_OPP_EDGE = 1, + AD5755_DC_DC_PHASE_A_C_SAME_EDGE_B_D_OPP_EDGE = 2, + AD5755_DC_DC_PHASE_90_DEGREE = 3, +}; + +enum ad5755_dc_dc_freq { + AD5755_DC_DC_FREQ_250kHZ = 0, + AD5755_DC_DC_FREQ_410kHZ = 1, + AD5755_DC_DC_FREQ_650kHZ = 2, +}; + +enum ad5755_dc_dc_maxv { + AD5755_DC_DC_MAXV_23V = 0, + AD5755_DC_DC_MAXV_24V5 = 1, + AD5755_DC_DC_MAXV_27V = 2, + AD5755_DC_DC_MAXV_29V5 = 3, +}; + +enum ad5755_slew_rate { + AD5755_SLEW_RATE_64k = 0, + AD5755_SLEW_RATE_32k = 1, + AD5755_SLEW_RATE_16k = 2, + AD5755_SLEW_RATE_8k = 3, + AD5755_SLEW_RATE_4k = 4, + AD5755_SLEW_RATE_2k = 5, + AD5755_SLEW_RATE_1k = 6, + AD5755_SLEW_RATE_500 = 7, + AD5755_SLEW_RATE_250 = 8, + AD5755_SLEW_RATE_125 = 9, + AD5755_SLEW_RATE_64 = 10, + AD5755_SLEW_RATE_32 = 11, + AD5755_SLEW_RATE_16 = 12, + AD5755_SLEW_RATE_8 = 13, + AD5755_SLEW_RATE_4 = 14, + AD5755_SLEW_RATE_0_5 = 15, +}; + +enum ad5755_slew_step_size { + AD5755_SLEW_STEP_SIZE_1 = 0, + AD5755_SLEW_STEP_SIZE_2 = 1, + AD5755_SLEW_STEP_SIZE_4 = 2, + AD5755_SLEW_STEP_SIZE_8 = 3, + AD5755_SLEW_STEP_SIZE_16 = 4, + AD5755_SLEW_STEP_SIZE_32 = 5, + AD5755_SLEW_STEP_SIZE_64 = 6, + AD5755_SLEW_STEP_SIZE_128 = 7, + AD5755_SLEW_STEP_SIZE_256 = 8, +}; + +/** + * struct ad5755_platform_data - AD5755 DAC driver platform data + * @ext_dc_dc_compenstation_resistor: Whether an external DC-DC converter + * compensation register is used. + * @dc_dc_phase: DC-DC converter phase. + * @dc_dc_freq: DC-DC converter frequency. + * @dc_dc_maxv: DC-DC maximum allowed boost voltage. + * @dac: Per DAC instance parameters. + * @dac.mode: The mode to be used for the DAC output. + * @dac.ext_current_sense_resistor: Whether an external current sense resistor + * is used. + * @dac.enable_voltage_overrange: Whether to enable 20% voltage output overrange. + * @dac.slew.enable: Whether to enable digital slew. + * @dac.slew.rate: Slew rate of the digital slew. + * @dac.slew.step_size: Slew step size of the digital slew. + **/ +struct ad5755_platform_data { + bool ext_dc_dc_compenstation_resistor; + enum ad5755_dc_dc_phase dc_dc_phase; + enum ad5755_dc_dc_freq dc_dc_freq; + enum ad5755_dc_dc_maxv dc_dc_maxv; + + struct { + enum ad5755_mode mode; + bool ext_current_sense_resistor; + bool enable_voltage_overrange; + struct { + bool enable; + enum ad5755_slew_rate rate; + enum ad5755_slew_step_size step_size; + } slew; + } dac[4]; +}; + /** * struct ad5755_chip_info - chip specific information * @channel_template: channel specification @@ -111,7 +206,6 @@ enum ad5755_type { ID_AD5737, }; -#ifdef CONFIG_OF static const int ad5755_dcdc_freq_table[][2] = { { 250000, AD5755_DC_DC_FREQ_250kHZ }, { 410000, AD5755_DC_DC_FREQ_410kHZ }, @@ -154,7 +248,6 @@ static const int ad5755_slew_step_table[][2] = { { 2, AD5755_SLEW_STEP_SIZE_2 }, { 1, AD5755_SLEW_STEP_SIZE_1 }, }; -#endif static int ad5755_write_unlocked(struct iio_dev *indio_dev, unsigned int reg, unsigned int val) @@ -604,30 +697,29 @@ static const struct ad5755_platform_data ad5755_default_pdata = { }, }; -#ifdef CONFIG_OF -static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) +static struct ad5755_platform_data *ad5755_parse_fw(struct device *dev) { - struct device_node *np = dev->of_node; - struct device_node *pp; + struct fwnode_handle *pp; struct ad5755_platform_data *pdata; unsigned int tmp; unsigned int tmparray[3]; int devnr, i; + if (!dev_fwnode(dev)) + return NULL; + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return NULL; pdata->ext_dc_dc_compenstation_resistor = - of_property_read_bool(np, "adi,ext-dc-dc-compenstation-resistor"); + device_property_read_bool(dev, "adi,ext-dc-dc-compenstation-resistor"); - if (!of_property_read_u32(np, "adi,dc-dc-phase", &tmp)) - pdata->dc_dc_phase = tmp; - else - pdata->dc_dc_phase = AD5755_DC_DC_PHASE_ALL_SAME_EDGE; + pdata->dc_dc_phase = AD5755_DC_DC_PHASE_ALL_SAME_EDGE; + device_property_read_u32(dev, "adi,dc-dc-phase", &pdata->dc_dc_phase); pdata->dc_dc_freq = AD5755_DC_DC_FREQ_410kHZ; - if (!of_property_read_u32(np, "adi,dc-dc-freq-hz", &tmp)) { + if (!device_property_read_u32(dev, "adi,dc-dc-freq-hz", &tmp)) { for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_freq_table); i++) { if (tmp == ad5755_dcdc_freq_table[i][0]) { pdata->dc_dc_freq = ad5755_dcdc_freq_table[i][1]; @@ -641,7 +733,7 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) } pdata->dc_dc_maxv = AD5755_DC_DC_MAXV_23V; - if (!of_property_read_u32(np, "adi,dc-dc-max-microvolt", &tmp)) { + if (!device_property_read_u32(dev, "adi,dc-dc-max-microvolt", &tmp)) { for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_maxv_table); i++) { if (tmp == ad5755_dcdc_maxv_table[i][0]) { pdata->dc_dc_maxv = ad5755_dcdc_maxv_table[i][1]; @@ -654,25 +746,23 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) } devnr = 0; - for_each_child_of_node(np, pp) { + device_for_each_child_node(dev, pp) { if (devnr >= AD5755_NUM_CHANNELS) { dev_err(dev, "There are too many channels defined in DT\n"); goto error_out; } - if (!of_property_read_u32(pp, "adi,mode", &tmp)) - pdata->dac[devnr].mode = tmp; - else - pdata->dac[devnr].mode = AD5755_MODE_CURRENT_4mA_20mA; + pdata->dac[devnr].mode = AD5755_MODE_CURRENT_4mA_20mA; + fwnode_property_read_u32(pp, "adi,mode", &pdata->dac[devnr].mode); pdata->dac[devnr].ext_current_sense_resistor = - of_property_read_bool(pp, "adi,ext-current-sense-resistor"); + fwnode_property_read_bool(pp, "adi,ext-current-sense-resistor"); pdata->dac[devnr].enable_voltage_overrange = - of_property_read_bool(pp, "adi,enable-voltage-overrange"); + fwnode_property_read_bool(pp, "adi,enable-voltage-overrange"); - if (!of_property_read_u32_array(pp, "adi,slew", tmparray, 3)) { + if (!fwnode_property_read_u32_array(pp, "adi,slew", tmparray, 3)) { pdata->dac[devnr].slew.enable = tmparray[0]; pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k; @@ -715,18 +805,11 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) devm_kfree(dev, pdata); return NULL; } -#else -static -struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) -{ - return NULL; -} -#endif static int ad5755_probe(struct spi_device *spi) { enum ad5755_type type = spi_get_device_id(spi)->driver_data; - const struct ad5755_platform_data *pdata = dev_get_platdata(&spi->dev); + const struct ad5755_platform_data *pdata; struct iio_dev *indio_dev; struct ad5755_state *st; int ret; @@ -751,13 +834,10 @@ static int ad5755_probe(struct spi_device *spi) mutex_init(&st->lock); - if (spi->dev.of_node) - pdata = ad5755_parse_dt(&spi->dev); - else - pdata = spi->dev.platform_data; + pdata = ad5755_parse_fw(&spi->dev); if (!pdata) { - dev_warn(&spi->dev, "no platform data? using default\n"); + dev_warn(&spi->dev, "no firmware provided parameters? using default\n"); pdata = &ad5755_default_pdata; } diff --git a/drivers/iio/dac/ad5758.c b/drivers/iio/dac/ad5758.c index 0572ef518101..98771e37a7b5 100644 --- a/drivers/iio/dac/ad5758.c +++ b/drivers/iio/dac/ad5758.c @@ -10,9 +10,8 @@ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/mod_devicetable.h> #include <linux/property.h> -#include <linux/of.h> -#include <linux/of_device.h> #include <linux/spi/spi.h> #include <linux/gpio/consumer.h> diff --git a/drivers/iio/dac/ad5766.c b/drivers/iio/dac/ad5766.c index b0d220c3a126..43189af2fb1f 100644 --- a/drivers/iio/dac/ad5766.c +++ b/drivers/iio/dac/ad5766.c @@ -426,14 +426,6 @@ static ssize_t ad5766_write_ext(struct iio_dev *indio_dev, .shared = _shared, \ } -#define IIO_ENUM_AVAILABLE_SHARED(_name, _shared, _e) \ -{ \ - .name = (_name "_available"), \ - .shared = _shared, \ - .read = iio_enum_available_read, \ - .private = (uintptr_t)(_e), \ -} - static const struct iio_chan_spec_ext_info ad5766_ext_info[] = { _AD5766_CHAN_EXT_INFO("dither_enable", AD5766_DITHER_ENABLE, @@ -443,9 +435,8 @@ static const struct iio_chan_spec_ext_info ad5766_ext_info[] = { _AD5766_CHAN_EXT_INFO("dither_source", AD5766_DITHER_SOURCE, IIO_SEPARATE), IIO_ENUM("dither_scale", IIO_SEPARATE, &ad5766_dither_scale_enum), - IIO_ENUM_AVAILABLE_SHARED("dither_scale", - IIO_SEPARATE, - &ad5766_dither_scale_enum), + IIO_ENUM_AVAILABLE("dither_scale", IIO_SEPARATE, + &ad5766_dither_scale_enum), {} }; diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c index a0923b76e8b6..7b4579d73d18 100644 --- a/drivers/iio/dac/ad5791.c +++ b/drivers/iio/dac/ad5791.c @@ -285,7 +285,7 @@ static const struct iio_chan_spec_ext_info ad5791_ext_info[] = { }, IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5791_powerdown_mode_enum), - IIO_ENUM_AVAILABLE("powerdown_mode", &ad5791_powerdown_mode_enum), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5791_powerdown_mode_enum), { }, }; diff --git a/drivers/iio/dac/ad7293.c b/drivers/iio/dac/ad7293.c new file mode 100644 index 000000000000..59a38ca4c3c7 --- /dev/null +++ b/drivers/iio/dac/ad7293.c @@ -0,0 +1,934 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AD7293 driver + * + * Copyright 2021 Analog Devices Inc. + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/gpio/consumer.h> +#include <linux/iio/iio.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +#include <asm/unaligned.h> + +#define AD7293_R1B BIT(16) +#define AD7293_R2B BIT(17) +#define AD7293_PAGE_ADDR_MSK GENMASK(15, 8) +#define AD7293_PAGE(x) FIELD_PREP(AD7293_PAGE_ADDR_MSK, x) + +/* AD7293 Register Map Common */ +#define AD7293_REG_NO_OP (AD7293_R1B | AD7293_PAGE(0x0) | 0x0) +#define AD7293_REG_PAGE_SELECT (AD7293_R1B | AD7293_PAGE(0x0) | 0x1) +#define AD7293_REG_CONV_CMD (AD7293_R2B | AD7293_PAGE(0x0) | 0x2) +#define AD7293_REG_RESULT (AD7293_R1B | AD7293_PAGE(0x0) | 0x3) +#define AD7293_REG_DAC_EN (AD7293_R1B | AD7293_PAGE(0x0) | 0x4) +#define AD7293_REG_DEVICE_ID (AD7293_R2B | AD7293_PAGE(0x0) | 0xC) +#define AD7293_REG_SOFT_RESET (AD7293_R2B | AD7293_PAGE(0x0) | 0xF) + +/* AD7293 Register Map Page 0x0 */ +#define AD7293_REG_VIN0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x10) +#define AD7293_REG_VIN1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x11) +#define AD7293_REG_VIN2 (AD7293_R2B | AD7293_PAGE(0x0) | 0x12) +#define AD7293_REG_VIN3 (AD7293_R2B | AD7293_PAGE(0x0) | 0x13) +#define AD7293_REG_TSENSE_INT (AD7293_R2B | AD7293_PAGE(0x0) | 0x20) +#define AD7293_REG_TSENSE_D0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x21) +#define AD7293_REG_TSENSE_D1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x22) +#define AD7293_REG_ISENSE_0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x28) +#define AD7293_REG_ISENSE_1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x29) +#define AD7293_REG_ISENSE_2 (AD7293_R2B | AD7293_PAGE(0x0) | 0x2A) +#define AD7293_REG_ISENSE_3 (AD7293_R2B | AD7293_PAGE(0x0) | 0x2B) +#define AD7293_REG_UNI_VOUT0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x30) +#define AD7293_REG_UNI_VOUT1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x31) +#define AD7293_REG_UNI_VOUT2 (AD7293_R2B | AD7293_PAGE(0x0) | 0x32) +#define AD7293_REG_UNI_VOUT3 (AD7293_R2B | AD7293_PAGE(0x0) | 0x33) +#define AD7293_REG_BI_VOUT0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x34) +#define AD7293_REG_BI_VOUT1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x35) +#define AD7293_REG_BI_VOUT2 (AD7293_R2B | AD7293_PAGE(0x0) | 0x36) +#define AD7293_REG_BI_VOUT3 (AD7293_R2B | AD7293_PAGE(0x0) | 0x37) + +/* AD7293 Register Map Page 0x2 */ +#define AD7293_REG_DIGITAL_OUT_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x11) +#define AD7293_REG_DIGITAL_INOUT_FUNC (AD7293_R2B | AD7293_PAGE(0x2) | 0x12) +#define AD7293_REG_DIGITAL_FUNC_POL (AD7293_R2B | AD7293_PAGE(0x2) | 0x13) +#define AD7293_REG_GENERAL (AD7293_R2B | AD7293_PAGE(0x2) | 0x14) +#define AD7293_REG_VINX_RANGE0 (AD7293_R2B | AD7293_PAGE(0x2) | 0x15) +#define AD7293_REG_VINX_RANGE1 (AD7293_R2B | AD7293_PAGE(0x2) | 0x16) +#define AD7293_REG_VINX_DIFF_SE (AD7293_R2B | AD7293_PAGE(0x2) | 0x17) +#define AD7293_REG_VINX_FILTER (AD7293_R2B | AD7293_PAGE(0x2) | 0x18) +#define AD7293_REG_BG_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x19) +#define AD7293_REG_CONV_DELAY (AD7293_R2B | AD7293_PAGE(0x2) | 0x1A) +#define AD7293_REG_TSENSE_BG_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x1B) +#define AD7293_REG_ISENSE_BG_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x1C) +#define AD7293_REG_ISENSE_GAIN (AD7293_R2B | AD7293_PAGE(0x2) | 0x1D) +#define AD7293_REG_DAC_SNOOZE_O (AD7293_R2B | AD7293_PAGE(0x2) | 0x1F) +#define AD7293_REG_DAC_SNOOZE_1 (AD7293_R2B | AD7293_PAGE(0x2) | 0x20) +#define AD7293_REG_RSX_MON_BG_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x23) +#define AD7293_REG_INTEGR_CL (AD7293_R2B | AD7293_PAGE(0x2) | 0x28) +#define AD7293_REG_PA_ON_CTRL (AD7293_R2B | AD7293_PAGE(0x2) | 0x29) +#define AD7293_REG_RAMP_TIME_0 (AD7293_R2B | AD7293_PAGE(0x2) | 0x2A) +#define AD7293_REG_RAMP_TIME_1 (AD7293_R2B | AD7293_PAGE(0x2) | 0x2B) +#define AD7293_REG_RAMP_TIME_2 (AD7293_R2B | AD7293_PAGE(0x2) | 0x2C) +#define AD7293_REG_RAMP_TIME_3 (AD7293_R2B | AD7293_PAGE(0x2) | 0x2D) +#define AD7293_REG_CL_FR_IT (AD7293_R2B | AD7293_PAGE(0x2) | 0x2E) +#define AD7293_REG_INTX_AVSS_AVDD (AD7293_R2B | AD7293_PAGE(0x2) | 0x2F) + +/* AD7293 Register Map Page 0x3 */ +#define AD7293_REG_VINX_SEQ (AD7293_R2B | AD7293_PAGE(0x3) | 0x10) +#define AD7293_REG_ISENSEX_TSENSEX_SEQ (AD7293_R2B | AD7293_PAGE(0x3) | 0x11) +#define AD7293_REG_RSX_MON_BI_VOUTX_SEQ (AD7293_R2B | AD7293_PAGE(0x3) | 0x12) + +/* AD7293 Register Map Page 0xE */ +#define AD7293_REG_VIN0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x10) +#define AD7293_REG_VIN1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x11) +#define AD7293_REG_VIN2_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x12) +#define AD7293_REG_VIN3_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x13) +#define AD7293_REG_TSENSE_INT_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x20) +#define AD7293_REG_TSENSE_D0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x21) +#define AD7293_REG_TSENSE_D1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x22) +#define AD7293_REG_ISENSE0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x28) +#define AD7293_REG_ISENSE1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x29) +#define AD7293_REG_ISENSE2_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x2A) +#define AD7293_REG_ISENSE3_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x2B) +#define AD7293_REG_UNI_VOUT0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x30) +#define AD7293_REG_UNI_VOUT1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x31) +#define AD7293_REG_UNI_VOUT2_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x32) +#define AD7293_REG_UNI_VOUT3_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x33) +#define AD7293_REG_BI_VOUT0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x34) +#define AD7293_REG_BI_VOUT1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x35) +#define AD7293_REG_BI_VOUT2_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x36) +#define AD7293_REG_BI_VOUT3_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x37) + +/* AD7293 Miscellaneous Definitions */ +#define AD7293_READ BIT(7) +#define AD7293_TRANSF_LEN_MSK GENMASK(17, 16) + +#define AD7293_REG_ADDR_MSK GENMASK(7, 0) +#define AD7293_REG_VOUT_OFFSET_MSK GENMASK(5, 4) +#define AD7293_REG_DATA_RAW_MSK GENMASK(15, 4) +#define AD7293_REG_VINX_RANGE_GET_CH_MSK(x, ch) (((x) >> (ch)) & 0x1) +#define AD7293_REG_VINX_RANGE_SET_CH_MSK(x, ch) (((x) & 0x1) << (ch)) +#define AD7293_CHIP_ID 0x18 + +enum ad7293_ch_type { + AD7293_ADC_VINX, + AD7293_ADC_TSENSE, + AD7293_ADC_ISENSE, + AD7293_DAC, +}; + +enum ad7293_max_offset { + AD7293_TSENSE_MIN_OFFSET_CH = 4, + AD7293_ISENSE_MIN_OFFSET_CH = 7, + AD7293_VOUT_MIN_OFFSET_CH = 11, + AD7293_VOUT_MAX_OFFSET_CH = 18, +}; + +static const int dac_offset_table[] = {0, 1, 2}; + +static const int isense_gain_table[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + +static const int adc_range_table[] = {0, 1, 2, 3}; + +struct ad7293_state { + struct spi_device *spi; + /* Protect against concurrent accesses to the device, page selection and data content */ + struct mutex lock; + struct gpio_desc *gpio_reset; + struct regulator *reg_avdd; + struct regulator *reg_vdrive; + u8 page_select; + u8 data[3] ____cacheline_aligned; +}; + +static int ad7293_page_select(struct ad7293_state *st, unsigned int reg) +{ + int ret; + + if (st->page_select != FIELD_GET(AD7293_PAGE_ADDR_MSK, reg)) { + st->data[0] = FIELD_GET(AD7293_REG_ADDR_MSK, AD7293_REG_PAGE_SELECT); + st->data[1] = FIELD_GET(AD7293_PAGE_ADDR_MSK, reg); + + ret = spi_write(st->spi, &st->data[0], 2); + if (ret) + return ret; + + st->page_select = FIELD_GET(AD7293_PAGE_ADDR_MSK, reg); + } + + return 0; +} + +static int __ad7293_spi_read(struct ad7293_state *st, unsigned int reg, + u16 *val) +{ + int ret; + unsigned int length; + struct spi_transfer t = {0}; + + length = FIELD_GET(AD7293_TRANSF_LEN_MSK, reg); + + ret = ad7293_page_select(st, reg); + if (ret) + return ret; + + st->data[0] = AD7293_READ | FIELD_GET(AD7293_REG_ADDR_MSK, reg); + st->data[1] = 0x0; + st->data[2] = 0x0; + + t.tx_buf = &st->data[0]; + t.rx_buf = &st->data[0]; + t.len = length + 1; + + ret = spi_sync_transfer(st->spi, &t, 1); + if (ret) + return ret; + + if (length == 1) + *val = st->data[1]; + else + *val = get_unaligned_be16(&st->data[1]); + + return 0; +} + +static int ad7293_spi_read(struct ad7293_state *st, unsigned int reg, + u16 *val) +{ + int ret; + + mutex_lock(&st->lock); + ret = __ad7293_spi_read(st, reg, val); + mutex_unlock(&st->lock); + + return ret; +} + +static int __ad7293_spi_write(struct ad7293_state *st, unsigned int reg, + u16 val) +{ + int ret; + unsigned int length; + + length = FIELD_GET(AD7293_TRANSF_LEN_MSK, reg); + + ret = ad7293_page_select(st, reg); + if (ret) + return ret; + + st->data[0] = FIELD_GET(AD7293_REG_ADDR_MSK, reg); + + if (length == 1) + st->data[1] = val; + else + put_unaligned_be16(val, &st->data[1]); + + return spi_write(st->spi, &st->data[0], length + 1); +} + +static int ad7293_spi_write(struct ad7293_state *st, unsigned int reg, + u16 val) +{ + int ret; + + mutex_lock(&st->lock); + ret = __ad7293_spi_write(st, reg, val); + mutex_unlock(&st->lock); + + return ret; +} + +static int __ad7293_spi_update_bits(struct ad7293_state *st, unsigned int reg, + u16 mask, u16 val) +{ + int ret; + u16 data, temp; + + ret = __ad7293_spi_read(st, reg, &data); + if (ret) + return ret; + + temp = (data & ~mask) | (val & mask); + + return __ad7293_spi_write(st, reg, temp); +} + +static int ad7293_spi_update_bits(struct ad7293_state *st, unsigned int reg, + u16 mask, u16 val) +{ + int ret; + + mutex_lock(&st->lock); + ret = __ad7293_spi_update_bits(st, reg, mask, val); + mutex_unlock(&st->lock); + + return ret; +} + +static int ad7293_adc_get_scale(struct ad7293_state *st, unsigned int ch, + u16 *range) +{ + int ret; + u16 data; + + mutex_lock(&st->lock); + + ret = __ad7293_spi_read(st, AD7293_REG_VINX_RANGE1, &data); + if (ret) + goto exit; + + *range = AD7293_REG_VINX_RANGE_GET_CH_MSK(data, ch); + + ret = __ad7293_spi_read(st, AD7293_REG_VINX_RANGE0, &data); + if (ret) + goto exit; + + *range |= AD7293_REG_VINX_RANGE_GET_CH_MSK(data, ch) << 1; + +exit: + mutex_unlock(&st->lock); + + return ret; +} + +static int ad7293_adc_set_scale(struct ad7293_state *st, unsigned int ch, + u16 range) +{ + int ret; + unsigned int ch_msk = BIT(ch); + + mutex_lock(&st->lock); + ret = __ad7293_spi_update_bits(st, AD7293_REG_VINX_RANGE1, ch_msk, + AD7293_REG_VINX_RANGE_SET_CH_MSK(range, ch)); + if (ret) + goto exit; + + ret = __ad7293_spi_update_bits(st, AD7293_REG_VINX_RANGE0, ch_msk, + AD7293_REG_VINX_RANGE_SET_CH_MSK((range >> 1), ch)); + +exit: + mutex_unlock(&st->lock); + + return ret; +} + +static int ad7293_get_offset(struct ad7293_state *st, unsigned int ch, + u16 *offset) +{ + if (ch < AD7293_TSENSE_MIN_OFFSET_CH) + return ad7293_spi_read(st, AD7293_REG_VIN0_OFFSET + ch, offset); + else if (ch < AD7293_ISENSE_MIN_OFFSET_CH) + return ad7293_spi_read(st, AD7293_REG_TSENSE_INT_OFFSET + (ch - 4), offset); + else if (ch < AD7293_VOUT_MIN_OFFSET_CH) + return ad7293_spi_read(st, AD7293_REG_ISENSE0_OFFSET + (ch - 7), offset); + else if (ch <= AD7293_VOUT_MAX_OFFSET_CH) + return ad7293_spi_read(st, AD7293_REG_UNI_VOUT0_OFFSET + (ch - 11), offset); + + return -EINVAL; +} + +static int ad7293_set_offset(struct ad7293_state *st, unsigned int ch, + u16 offset) +{ + if (ch < AD7293_TSENSE_MIN_OFFSET_CH) + return ad7293_spi_write(st, AD7293_REG_VIN0_OFFSET + ch, + offset); + else if (ch < AD7293_ISENSE_MIN_OFFSET_CH) + return ad7293_spi_write(st, + AD7293_REG_TSENSE_INT_OFFSET + + (ch - AD7293_TSENSE_MIN_OFFSET_CH), + offset); + else if (ch < AD7293_VOUT_MIN_OFFSET_CH) + return ad7293_spi_write(st, + AD7293_REG_ISENSE0_OFFSET + + (ch - AD7293_ISENSE_MIN_OFFSET_CH), + offset); + else if (ch <= AD7293_VOUT_MAX_OFFSET_CH) + return ad7293_spi_update_bits(st, + AD7293_REG_UNI_VOUT0_OFFSET + + (ch - AD7293_VOUT_MIN_OFFSET_CH), + AD7293_REG_VOUT_OFFSET_MSK, + FIELD_PREP(AD7293_REG_VOUT_OFFSET_MSK, offset)); + + return -EINVAL; +} + +static int ad7293_isense_set_scale(struct ad7293_state *st, unsigned int ch, + u16 gain) +{ + unsigned int ch_msk = (0xf << (4 * ch)); + + return ad7293_spi_update_bits(st, AD7293_REG_ISENSE_GAIN, ch_msk, + gain << (4 * ch)); +} + +static int ad7293_isense_get_scale(struct ad7293_state *st, unsigned int ch, + u16 *gain) +{ + int ret; + + ret = ad7293_spi_read(st, AD7293_REG_ISENSE_GAIN, gain); + if (ret) + return ret; + + *gain = (*gain >> (4 * ch)) & 0xf; + + return ret; +} + +static int ad7293_dac_write_raw(struct ad7293_state *st, unsigned int ch, + u16 raw) +{ + int ret; + + mutex_lock(&st->lock); + + ret = __ad7293_spi_update_bits(st, AD7293_REG_DAC_EN, BIT(ch), BIT(ch)); + if (ret) + goto exit; + + ret = __ad7293_spi_write(st, AD7293_REG_UNI_VOUT0 + ch, + FIELD_PREP(AD7293_REG_DATA_RAW_MSK, raw)); + +exit: + mutex_unlock(&st->lock); + + return ret; +} + +static int ad7293_ch_read_raw(struct ad7293_state *st, enum ad7293_ch_type type, + unsigned int ch, u16 *raw) +{ + int ret; + unsigned int reg_wr, reg_rd, data_wr; + + switch (type) { + case AD7293_ADC_VINX: + reg_wr = AD7293_REG_VINX_SEQ; + reg_rd = AD7293_REG_VIN0 + ch; + data_wr = BIT(ch); + + break; + case AD7293_ADC_TSENSE: + reg_wr = AD7293_REG_ISENSEX_TSENSEX_SEQ; + reg_rd = AD7293_REG_TSENSE_INT + ch; + data_wr = BIT(ch); + + break; + case AD7293_ADC_ISENSE: + reg_wr = AD7293_REG_ISENSEX_TSENSEX_SEQ; + reg_rd = AD7293_REG_ISENSE_0 + ch; + data_wr = BIT(ch) << 8; + + break; + case AD7293_DAC: + reg_rd = AD7293_REG_UNI_VOUT0 + ch; + + break; + default: + return -EINVAL; + } + + mutex_lock(&st->lock); + + if (type != AD7293_DAC) { + if (type == AD7293_ADC_TSENSE) { + ret = __ad7293_spi_write(st, AD7293_REG_TSENSE_BG_EN, + BIT(ch)); + if (ret) + goto exit; + + usleep_range(9000, 9900); + } else if (type == AD7293_ADC_ISENSE) { + ret = __ad7293_spi_write(st, AD7293_REG_ISENSE_BG_EN, + BIT(ch)); + if (ret) + goto exit; + + usleep_range(2000, 7000); + } + + ret = __ad7293_spi_write(st, reg_wr, data_wr); + if (ret) + goto exit; + + ret = __ad7293_spi_write(st, AD7293_REG_CONV_CMD, 0x82); + if (ret) + goto exit; + } + + ret = __ad7293_spi_read(st, reg_rd, raw); + + *raw = FIELD_GET(AD7293_REG_DATA_RAW_MSK, *raw); + +exit: + mutex_unlock(&st->lock); + + return ret; +} + +static int ad7293_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long info) +{ + struct ad7293_state *st = iio_priv(indio_dev); + int ret; + u16 data; + + switch (info) { + case IIO_CHAN_INFO_RAW: + switch (chan->type) { + case IIO_VOLTAGE: + if (chan->output) + ret = ad7293_ch_read_raw(st, AD7293_DAC, + chan->channel, &data); + else + ret = ad7293_ch_read_raw(st, AD7293_ADC_VINX, + chan->channel, &data); + + break; + case IIO_CURRENT: + ret = ad7293_ch_read_raw(st, AD7293_ADC_ISENSE, + chan->channel, &data); + + break; + case IIO_TEMP: + ret = ad7293_ch_read_raw(st, AD7293_ADC_TSENSE, + chan->channel, &data); + + break; + default: + return -EINVAL; + } + + if (ret) + return ret; + + *val = data; + + return IIO_VAL_INT; + case IIO_CHAN_INFO_OFFSET: + switch (chan->type) { + case IIO_VOLTAGE: + if (chan->output) { + ret = ad7293_get_offset(st, + chan->channel + AD7293_VOUT_MIN_OFFSET_CH, + &data); + + data = FIELD_GET(AD7293_REG_VOUT_OFFSET_MSK, data); + } else { + ret = ad7293_get_offset(st, chan->channel, &data); + } + + break; + case IIO_CURRENT: + ret = ad7293_get_offset(st, + chan->channel + AD7293_ISENSE_MIN_OFFSET_CH, + &data); + + break; + case IIO_TEMP: + ret = ad7293_get_offset(st, + chan->channel + AD7293_TSENSE_MIN_OFFSET_CH, + &data); + + break; + default: + return -EINVAL; + } + if (ret) + return ret; + + *val = data; + + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_VOLTAGE: + ret = ad7293_adc_get_scale(st, chan->channel, &data); + if (ret) + return ret; + + *val = data; + + return IIO_VAL_INT; + case IIO_CURRENT: + ret = ad7293_isense_get_scale(st, chan->channel, &data); + if (ret) + return ret; + + *val = data; + + return IIO_VAL_INT; + case IIO_TEMP: + *val = 1; + *val2 = 8; + + return IIO_VAL_FRACTIONAL; + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + +static int ad7293_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long info) +{ + struct ad7293_state *st = iio_priv(indio_dev); + + switch (info) { + case IIO_CHAN_INFO_RAW: + switch (chan->type) { + case IIO_VOLTAGE: + if (!chan->output) + return -EINVAL; + + return ad7293_dac_write_raw(st, chan->channel, val); + default: + return -EINVAL; + } + case IIO_CHAN_INFO_OFFSET: + switch (chan->type) { + case IIO_VOLTAGE: + if (chan->output) + return ad7293_set_offset(st, + chan->channel + + AD7293_VOUT_MIN_OFFSET_CH, + val); + else + return ad7293_set_offset(st, chan->channel, val); + case IIO_CURRENT: + return ad7293_set_offset(st, + chan->channel + + AD7293_ISENSE_MIN_OFFSET_CH, + val); + case IIO_TEMP: + return ad7293_set_offset(st, + chan->channel + + AD7293_TSENSE_MIN_OFFSET_CH, + val); + default: + return -EINVAL; + } + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_VOLTAGE: + return ad7293_adc_set_scale(st, chan->channel, val); + case IIO_CURRENT: + return ad7293_isense_set_scale(st, chan->channel, val); + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + +static int ad7293_reg_access(struct iio_dev *indio_dev, + unsigned int reg, + unsigned int write_val, + unsigned int *read_val) +{ + struct ad7293_state *st = iio_priv(indio_dev); + int ret; + + if (read_val) { + u16 temp; + ret = ad7293_spi_read(st, reg, &temp); + *read_val = temp; + } else { + ret = ad7293_spi_write(st, reg, (u16)write_val); + } + + return ret; +} + +static int ad7293_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long info) +{ + switch (info) { + case IIO_CHAN_INFO_OFFSET: + *vals = dac_offset_table; + *type = IIO_VAL_INT; + *length = ARRAY_SIZE(dac_offset_table); + + return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_SCALE: + *type = IIO_VAL_INT; + + switch (chan->type) { + case IIO_VOLTAGE: + *vals = adc_range_table; + *length = ARRAY_SIZE(adc_range_table); + return IIO_AVAIL_LIST; + case IIO_CURRENT: + *vals = isense_gain_table; + *length = ARRAY_SIZE(isense_gain_table); + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + +#define AD7293_CHAN_ADC(_channel) { \ + .type = IIO_VOLTAGE, \ + .output = 0, \ + .indexed = 1, \ + .channel = _channel, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ + .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE) \ +} + +#define AD7293_CHAN_DAC(_channel) { \ + .type = IIO_VOLTAGE, \ + .output = 1, \ + .indexed = 1, \ + .channel = _channel, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ + .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_OFFSET) \ +} + +#define AD7293_CHAN_ISENSE(_channel) { \ + .type = IIO_CURRENT, \ + .output = 0, \ + .indexed = 1, \ + .channel = _channel, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_OFFSET) | \ + BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE) \ +} + +#define AD7293_CHAN_TEMP(_channel) { \ + .type = IIO_TEMP, \ + .output = 0, \ + .indexed = 1, \ + .channel = _channel, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \ +} + +static const struct iio_chan_spec ad7293_channels[] = { + AD7293_CHAN_ADC(0), + AD7293_CHAN_ADC(1), + AD7293_CHAN_ADC(2), + AD7293_CHAN_ADC(3), + AD7293_CHAN_ISENSE(0), + AD7293_CHAN_ISENSE(1), + AD7293_CHAN_ISENSE(2), + AD7293_CHAN_ISENSE(3), + AD7293_CHAN_TEMP(0), + AD7293_CHAN_TEMP(1), + AD7293_CHAN_TEMP(2), + AD7293_CHAN_DAC(0), + AD7293_CHAN_DAC(1), + AD7293_CHAN_DAC(2), + AD7293_CHAN_DAC(3), + AD7293_CHAN_DAC(4), + AD7293_CHAN_DAC(5), + AD7293_CHAN_DAC(6), + AD7293_CHAN_DAC(7) +}; + +static int ad7293_soft_reset(struct ad7293_state *st) +{ + int ret; + + ret = __ad7293_spi_write(st, AD7293_REG_SOFT_RESET, 0x7293); + if (ret) + return ret; + + return __ad7293_spi_write(st, AD7293_REG_SOFT_RESET, 0x0000); +} + +static int ad7293_reset(struct ad7293_state *st) +{ + if (st->gpio_reset) { + gpiod_set_value(st->gpio_reset, 0); + usleep_range(100, 1000); + gpiod_set_value(st->gpio_reset, 1); + usleep_range(100, 1000); + + return 0; + } + + /* Perform a software reset */ + return ad7293_soft_reset(st); +} + +static int ad7293_properties_parse(struct ad7293_state *st) +{ + struct spi_device *spi = st->spi; + + st->gpio_reset = devm_gpiod_get_optional(&st->spi->dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(st->gpio_reset)) + return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_reset), + "failed to get the reset GPIO\n"); + + st->reg_avdd = devm_regulator_get(&spi->dev, "avdd"); + if (IS_ERR(st->reg_avdd)) + return dev_err_probe(&spi->dev, PTR_ERR(st->reg_avdd), + "failed to get the AVDD voltage\n"); + + st->reg_vdrive = devm_regulator_get(&spi->dev, "vdrive"); + if (IS_ERR(st->reg_vdrive)) + return dev_err_probe(&spi->dev, PTR_ERR(st->reg_vdrive), + "failed to get the VDRIVE voltage\n"); + + return 0; +} + +static void ad7293_reg_disable(void *data) +{ + regulator_disable(data); +} + +static int ad7293_init(struct ad7293_state *st) +{ + int ret; + u16 chip_id; + struct spi_device *spi = st->spi; + + ret = ad7293_properties_parse(st); + if (ret) + return ret; + + ret = ad7293_reset(st); + if (ret) + return ret; + + ret = regulator_enable(st->reg_avdd); + if (ret) { + dev_err(&spi->dev, + "Failed to enable specified AVDD Voltage!\n"); + return ret; + } + + ret = devm_add_action_or_reset(&spi->dev, ad7293_reg_disable, + st->reg_avdd); + if (ret) + return ret; + + ret = regulator_enable(st->reg_vdrive); + if (ret) { + dev_err(&spi->dev, + "Failed to enable specified VDRIVE Voltage!\n"); + return ret; + } + + ret = devm_add_action_or_reset(&spi->dev, ad7293_reg_disable, + st->reg_vdrive); + if (ret) + return ret; + + ret = regulator_get_voltage(st->reg_avdd); + if (ret < 0) { + dev_err(&spi->dev, "Failed to read avdd regulator: %d\n", ret); + return ret; + } + + if (ret > 5500000 || ret < 4500000) + return -EINVAL; + + ret = regulator_get_voltage(st->reg_vdrive); + if (ret < 0) { + dev_err(&spi->dev, + "Failed to read vdrive regulator: %d\n", ret); + return ret; + } + if (ret > 5500000 || ret < 1700000) + return -EINVAL; + + /* Check Chip ID */ + ret = __ad7293_spi_read(st, AD7293_REG_DEVICE_ID, &chip_id); + if (ret) + return ret; + + if (chip_id != AD7293_CHIP_ID) { + dev_err(&spi->dev, "Invalid Chip ID.\n"); + return -EINVAL; + } + + return 0; +} + +static const struct iio_info ad7293_info = { + .read_raw = ad7293_read_raw, + .write_raw = ad7293_write_raw, + .read_avail = &ad7293_read_avail, + .debugfs_reg_access = &ad7293_reg_access, +}; + +static int ad7293_probe(struct spi_device *spi) +{ + struct iio_dev *indio_dev; + struct ad7293_state *st; + int ret; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + st = iio_priv(indio_dev); + + indio_dev->info = &ad7293_info; + indio_dev->name = "ad7293"; + indio_dev->channels = ad7293_channels; + indio_dev->num_channels = ARRAY_SIZE(ad7293_channels); + + st->spi = spi; + st->page_select = 0; + + mutex_init(&st->lock); + + ret = ad7293_init(st); + if (ret) + return ret; + + return devm_iio_device_register(&spi->dev, indio_dev); +} + +static const struct spi_device_id ad7293_id[] = { + { "ad7293", 0 }, + {} +}; +MODULE_DEVICE_TABLE(spi, ad7293_id); + +static const struct of_device_id ad7293_of_match[] = { + { .compatible = "adi,ad7293" }, + {} +}; +MODULE_DEVICE_TABLE(of, ad7293_of_match); + +static struct spi_driver ad7293_driver = { + .driver = { + .name = "ad7293", + .of_match_table = ad7293_of_match, + }, + .probe = ad7293_probe, + .id_table = ad7293_id, +}; +module_spi_driver(ad7293_driver); + +MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com"); +MODULE_DESCRIPTION("Analog Devices AD7293"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c index 5d1819448102..83ce9489259c 100644 --- a/drivers/iio/dac/dpot-dac.c +++ b/drivers/iio/dac/dpot-dac.c @@ -30,7 +30,7 @@ #include <linux/iio/consumer.h> #include <linux/iio/iio.h> #include <linux/module.h> -#include <linux/of.h> +#include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c index 5502e4f62f0d..60467c6f2c6e 100644 --- a/drivers/iio/dac/lpc18xx_dac.c +++ b/drivers/iio/dac/lpc18xx_dac.c @@ -16,9 +16,8 @@ #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> +#include <linux/mod_devicetable.h> #include <linux/mutex.h> -#include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c index 7da4710a6408..fce640b7f1c8 100644 --- a/drivers/iio/dac/max5821.c +++ b/drivers/iio/dac/max5821.c @@ -137,7 +137,7 @@ static const struct iio_chan_spec_ext_info max5821_ext_info[] = { .shared = IIO_SEPARATE, }, IIO_ENUM("powerdown_mode", IIO_SEPARATE, &max5821_powerdown_mode_enum), - IIO_ENUM_AVAILABLE("powerdown_mode", &max5821_powerdown_mode_enum), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &max5821_powerdown_mode_enum), { }, }; diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c index 34b14aafb630..842bad57cb88 100644 --- a/drivers/iio/dac/mcp4725.c +++ b/drivers/iio/dac/mcp4725.c @@ -221,8 +221,8 @@ static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = { }, IIO_ENUM("powerdown_mode", IIO_SEPARATE, &mcp472x_powerdown_mode_enum[MCP4725]), - IIO_ENUM_AVAILABLE("powerdown_mode", - &mcp472x_powerdown_mode_enum[MCP4725]), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, + &mcp472x_powerdown_mode_enum[MCP4725]), { }, }; @@ -235,8 +235,8 @@ static const struct iio_chan_spec_ext_info mcp4726_ext_info[] = { }, IIO_ENUM("powerdown_mode", IIO_SEPARATE, &mcp472x_powerdown_mode_enum[MCP4726]), - IIO_ENUM_AVAILABLE("powerdown_mode", - &mcp472x_powerdown_mode_enum[MCP4726]), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, + &mcp472x_powerdown_mode_enum[MCP4726]), { }, }; @@ -386,7 +386,7 @@ static int mcp4725_probe(struct i2c_client *client, i2c_set_clientdata(client, indio_dev); data->client = client; if (dev_fwnode(&client->dev)) - data->id = (enum chip_id)device_get_match_data(&client->dev); + data->id = (uintptr_t)device_get_match_data(&client->dev); else data->id = id->driver_data; pdata = dev_get_platdata(&client->dev); diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c index dd2e306824e7..cd71cc4553a7 100644 --- a/drivers/iio/dac/stm32-dac.c +++ b/drivers/iio/dac/stm32-dac.c @@ -246,7 +246,7 @@ static const struct iio_chan_spec_ext_info stm32_dac_ext_info[] = { .shared = IIO_SEPARATE, }, IIO_ENUM("powerdown_mode", IIO_SEPARATE, &stm32_dac_powerdown_mode_en), - IIO_ENUM_AVAILABLE("powerdown_mode", &stm32_dac_powerdown_mode_en), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &stm32_dac_powerdown_mode_en), {}, }; diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c index 5c14bfb16521..6beda2193683 100644 --- a/drivers/iio/dac/ti-dac082s085.c +++ b/drivers/iio/dac/ti-dac082s085.c @@ -160,7 +160,7 @@ static const struct iio_chan_spec_ext_info ti_dac_ext_info[] = { .shared = IIO_SHARED_BY_TYPE, }, IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode), - IIO_ENUM_AVAILABLE("powerdown_mode", &ti_dac_powerdown_mode), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode), { }, }; diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c index 546a4cf6c5ef..4a3b8d875518 100644 --- a/drivers/iio/dac/ti-dac5571.c +++ b/drivers/iio/dac/ti-dac5571.c @@ -212,7 +212,7 @@ static const struct iio_chan_spec_ext_info dac5571_ext_info[] = { .shared = IIO_SEPARATE, }, IIO_ENUM("powerdown_mode", IIO_SEPARATE, &dac5571_powerdown_mode), - IIO_ENUM_AVAILABLE("powerdown_mode", &dac5571_powerdown_mode), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &dac5571_powerdown_mode), {}, }; diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c index 09218c3029f0..99f275829ec2 100644 --- a/drivers/iio/dac/ti-dac7311.c +++ b/drivers/iio/dac/ti-dac7311.c @@ -146,7 +146,7 @@ static const struct iio_chan_spec_ext_info ti_dac_ext_info[] = { .shared = IIO_SHARED_BY_TYPE, }, IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode), - IIO_ENUM_AVAILABLE("powerdown_mode", &ti_dac_powerdown_mode), + IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode), { }, }; diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c index 59aa60d4ca37..d81c2b2dad82 100644 --- a/drivers/iio/dummy/iio_simple_dummy_buffer.c +++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c @@ -45,7 +45,6 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; - int len = 0; u16 *data; data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); @@ -79,7 +78,6 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p) indio_dev->masklength, j); /* random access read from the 'device' */ data[i] = fakedata[j]; - len += 2; } } diff --git a/drivers/iio/filter/Kconfig b/drivers/iio/filter/Kconfig new file mode 100644 index 000000000000..3ae35817ad82 --- /dev/null +++ b/drivers/iio/filter/Kconfig @@ -0,0 +1,18 @@ +# +# Filter drivers +# +# When adding new entries keep the list in alphabetical order + +menu "Filters" + +config ADMV8818 + tristate "Analog Devices ADMV8818 High-Pass and Low-Pass Filter" + depends on SPI && COMMON_CLK && 64BIT + help + Say yes here to build support for Analog Devices ADMV8818 + 2 GHz to 18 GHz, Digitally Tunable, High-Pass and Low-Pass Filter. + + To compile this driver as a module, choose M here: the + modiule will be called admv8818. + +endmenu diff --git a/drivers/iio/filter/Makefile b/drivers/iio/filter/Makefile new file mode 100644 index 000000000000..55e228c0dd20 --- /dev/null +++ b/drivers/iio/filter/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for industrial I/O Filter drivers +# + +# When adding new entries keep the list in alphabetical order +obj-$(CONFIG_ADMV8818) += admv8818.o diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c new file mode 100644 index 000000000000..68de45fe21b4 --- /dev/null +++ b/drivers/iio/filter/admv8818.c @@ -0,0 +1,665 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ADMV8818 driver + * + * Copyright 2021 Analog Devices Inc. + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/iio/iio.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/regmap.h> +#include <linux/spi/spi.h> +#include <linux/units.h> + +/* ADMV8818 Register Map */ +#define ADMV8818_REG_SPI_CONFIG_A 0x0 +#define ADMV8818_REG_SPI_CONFIG_B 0x1 +#define ADMV8818_REG_CHIPTYPE 0x3 +#define ADMV8818_REG_PRODUCT_ID_L 0x4 +#define ADMV8818_REG_PRODUCT_ID_H 0x5 +#define ADMV8818_REG_FAST_LATCH_POINTER 0x10 +#define ADMV8818_REG_FAST_LATCH_STOP 0x11 +#define ADMV8818_REG_FAST_LATCH_START 0x12 +#define ADMV8818_REG_FAST_LATCH_DIRECTION 0x13 +#define ADMV8818_REG_FAST_LATCH_STATE 0x14 +#define ADMV8818_REG_WR0_SW 0x20 +#define ADMV8818_REG_WR0_FILTER 0x21 +#define ADMV8818_REG_WR1_SW 0x22 +#define ADMV8818_REG_WR1_FILTER 0x23 +#define ADMV8818_REG_WR2_SW 0x24 +#define ADMV8818_REG_WR2_FILTER 0x25 +#define ADMV8818_REG_WR3_SW 0x26 +#define ADMV8818_REG_WR3_FILTER 0x27 +#define ADMV8818_REG_WR4_SW 0x28 +#define ADMV8818_REG_WR4_FILTER 0x29 +#define ADMV8818_REG_LUT0_SW 0x100 +#define ADMV8818_REG_LUT0_FILTER 0x101 +#define ADMV8818_REG_LUT127_SW 0x1FE +#define ADMV8818_REG_LUT127_FILTER 0x1FF + +/* ADMV8818_REG_SPI_CONFIG_A Map */ +#define ADMV8818_SOFTRESET_N_MSK BIT(7) +#define ADMV8818_LSB_FIRST_N_MSK BIT(6) +#define ADMV8818_ENDIAN_N_MSK BIT(5) +#define ADMV8818_SDOACTIVE_N_MSK BIT(4) +#define ADMV8818_SDOACTIVE_MSK BIT(3) +#define ADMV8818_ENDIAN_MSK BIT(2) +#define ADMV8818_LSBFIRST_MSK BIT(1) +#define ADMV8818_SOFTRESET_MSK BIT(0) + +/* ADMV8818_REG_SPI_CONFIG_B Map */ +#define ADMV8818_SINGLE_INSTRUCTION_MSK BIT(7) +#define ADMV8818_CSB_STALL_MSK BIT(6) +#define ADMV8818_MASTER_SLAVE_RB_MSK BIT(5) +#define ADMV8818_MASTER_SLAVE_TRANSFER_MSK BIT(0) + +/* ADMV8818_REG_WR0_SW Map */ +#define ADMV8818_SW_IN_SET_WR0_MSK BIT(7) +#define ADMV8818_SW_OUT_SET_WR0_MSK BIT(6) +#define ADMV8818_SW_IN_WR0_MSK GENMASK(5, 3) +#define ADMV8818_SW_OUT_WR0_MSK GENMASK(2, 0) + +/* ADMV8818_REG_WR0_FILTER Map */ +#define ADMV8818_HPF_WR0_MSK GENMASK(7, 4) +#define ADMV8818_LPF_WR0_MSK GENMASK(3, 0) + +enum { + ADMV8818_BW_FREQ, + ADMV8818_CENTER_FREQ +}; + +enum { + ADMV8818_AUTO_MODE, + ADMV8818_MANUAL_MODE, +}; + +struct admv8818_state { + struct spi_device *spi; + struct regmap *regmap; + struct clk *clkin; + struct notifier_block nb; + /* Protect against concurrent accesses to the device and data content*/ + struct mutex lock; + unsigned int filter_mode; + u64 cf_hz; +}; + +static const unsigned long long freq_range_hpf[4][2] = { + {1750000000ULL, 3550000000ULL}, + {3400000000ULL, 7250000000ULL}, + {6600000000, 12000000000}, + {12500000000, 19900000000} +}; + +static const unsigned long long freq_range_lpf[4][2] = { + {2050000000ULL, 3850000000ULL}, + {3350000000ULL, 7250000000ULL}, + {7000000000, 13000000000}, + {12550000000, 18500000000} +}; + +static const struct regmap_config admv8818_regmap_config = { + .reg_bits = 16, + .val_bits = 8, + .read_flag_mask = 0x80, + .max_register = 0x1FF, +}; + +static const char * const admv8818_modes[] = { + [0] = "auto", + [1] = "manual" +}; + +static int __admv8818_hpf_select(struct admv8818_state *st, u64 freq) +{ + unsigned int hpf_step = 0, hpf_band = 0, i, j; + u64 freq_step; + int ret; + + if (freq < freq_range_hpf[0][0]) + goto hpf_write; + + if (freq > freq_range_hpf[3][1]) { + hpf_step = 15; + hpf_band = 4; + + goto hpf_write; + } + + for (i = 0; i < 4; i++) { + freq_step = div_u64((freq_range_hpf[i][1] - + freq_range_hpf[i][0]), 15); + + if (freq > freq_range_hpf[i][0] && + (freq < freq_range_hpf[i][1] + freq_step)) { + hpf_band = i + 1; + + for (j = 1; j <= 16; j++) { + if (freq < (freq_range_hpf[i][0] + (freq_step * j))) { + hpf_step = j - 1; + break; + } + } + break; + } + } + + /* Close HPF frequency gap between 12 and 12.5 GHz */ + if (freq >= 12000 * HZ_PER_MHZ && freq <= 12500 * HZ_PER_MHZ) { + hpf_band = 3; + hpf_step = 15; + } + +hpf_write: + ret = regmap_update_bits(st->regmap, ADMV8818_REG_WR0_SW, + ADMV8818_SW_IN_SET_WR0_MSK | + ADMV8818_SW_IN_WR0_MSK, + FIELD_PREP(ADMV8818_SW_IN_SET_WR0_MSK, 1) | + FIELD_PREP(ADMV8818_SW_IN_WR0_MSK, hpf_band)); + if (ret) + return ret; + + return regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER, + ADMV8818_HPF_WR0_MSK, + FIELD_PREP(ADMV8818_HPF_WR0_MSK, hpf_step)); +} + +static int admv8818_hpf_select(struct admv8818_state *st, u64 freq) +{ + int ret; + + mutex_lock(&st->lock); + ret = __admv8818_hpf_select(st, freq); + mutex_unlock(&st->lock); + + return ret; +} + +static int __admv8818_lpf_select(struct admv8818_state *st, u64 freq) +{ + unsigned int lpf_step = 0, lpf_band = 0, i, j; + u64 freq_step; + int ret; + + if (freq > freq_range_lpf[3][1]) + goto lpf_write; + + if (freq < freq_range_lpf[0][0]) { + lpf_band = 1; + + goto lpf_write; + } + + for (i = 0; i < 4; i++) { + if (freq > freq_range_lpf[i][0] && freq < freq_range_lpf[i][1]) { + lpf_band = i + 1; + freq_step = div_u64((freq_range_lpf[i][1] - freq_range_lpf[i][0]), 15); + + for (j = 0; j <= 15; j++) { + if (freq < (freq_range_lpf[i][0] + (freq_step * j))) { + lpf_step = j; + break; + } + } + break; + } + } + +lpf_write: + ret = regmap_update_bits(st->regmap, ADMV8818_REG_WR0_SW, + ADMV8818_SW_OUT_SET_WR0_MSK | + ADMV8818_SW_OUT_WR0_MSK, + FIELD_PREP(ADMV8818_SW_OUT_SET_WR0_MSK, 1) | + FIELD_PREP(ADMV8818_SW_OUT_WR0_MSK, lpf_band)); + if (ret) + return ret; + + return regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER, + ADMV8818_LPF_WR0_MSK, + FIELD_PREP(ADMV8818_LPF_WR0_MSK, lpf_step)); +} + +static int admv8818_lpf_select(struct admv8818_state *st, u64 freq) +{ + int ret; + + mutex_lock(&st->lock); + ret = __admv8818_lpf_select(st, freq); + mutex_unlock(&st->lock); + + return ret; +} + +static int admv8818_rfin_band_select(struct admv8818_state *st) +{ + int ret; + + st->cf_hz = clk_get_rate(st->clkin); + + mutex_lock(&st->lock); + + ret = __admv8818_hpf_select(st, st->cf_hz); + if (ret) + goto exit; + + ret = __admv8818_lpf_select(st, st->cf_hz); +exit: + mutex_unlock(&st->lock); + return ret; +} + +static int __admv8818_read_hpf_freq(struct admv8818_state *st, u64 *hpf_freq) +{ + unsigned int data, hpf_band, hpf_state; + int ret; + + ret = regmap_read(st->regmap, ADMV8818_REG_WR0_SW, &data); + if (ret) + return ret; + + hpf_band = FIELD_GET(ADMV8818_SW_IN_WR0_MSK, data); + if (!hpf_band) { + *hpf_freq = 0; + return ret; + } + + ret = regmap_read(st->regmap, ADMV8818_REG_WR0_FILTER, &data); + if (ret) + return ret; + + hpf_state = FIELD_GET(ADMV8818_HPF_WR0_MSK, data); + + *hpf_freq = div_u64(freq_range_hpf[hpf_band - 1][1] - freq_range_hpf[hpf_band - 1][0], 15); + *hpf_freq = freq_range_hpf[hpf_band - 1][0] + (*hpf_freq * hpf_state); + + return ret; +} + +static int admv8818_read_hpf_freq(struct admv8818_state *st, u64 *hpf_freq) +{ + int ret; + + mutex_lock(&st->lock); + ret = __admv8818_read_hpf_freq(st, hpf_freq); + mutex_unlock(&st->lock); + + return ret; +} + +static int __admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq) +{ + unsigned int data, lpf_band, lpf_state; + int ret; + + ret = regmap_read(st->regmap, ADMV8818_REG_WR0_SW, &data); + if (ret) + return ret; + + lpf_band = FIELD_GET(ADMV8818_SW_OUT_WR0_MSK, data); + if (!lpf_band) { + *lpf_freq = 0; + return ret; + } + + ret = regmap_read(st->regmap, ADMV8818_REG_WR0_FILTER, &data); + if (ret) + return ret; + + lpf_state = FIELD_GET(ADMV8818_LPF_WR0_MSK, data); + + *lpf_freq = div_u64(freq_range_lpf[lpf_band - 1][1] - freq_range_lpf[lpf_band - 1][0], 15); + *lpf_freq = freq_range_lpf[lpf_band - 1][0] + (*lpf_freq * lpf_state); + + return ret; +} + +static int admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq) +{ + int ret; + + mutex_lock(&st->lock); + ret = __admv8818_read_lpf_freq(st, lpf_freq); + mutex_unlock(&st->lock); + + return ret; +} + +static int admv8818_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long info) +{ + struct admv8818_state *st = iio_priv(indio_dev); + + u64 freq = ((u64)val2 << 32 | (u32)val); + + switch (info) { + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + return admv8818_lpf_select(st, freq); + case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY: + return admv8818_hpf_select(st, freq); + default: + return -EINVAL; + } +} + +static int admv8818_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long info) +{ + struct admv8818_state *st = iio_priv(indio_dev); + int ret; + u64 freq; + + switch (info) { + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + ret = admv8818_read_lpf_freq(st, &freq); + if (ret) + return ret; + + *val = (u32)freq; + *val2 = (u32)(freq >> 32); + + return IIO_VAL_INT_64; + case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY: + ret = admv8818_read_hpf_freq(st, &freq); + if (ret) + return ret; + + *val = (u32)freq; + *val2 = (u32)(freq >> 32); + + return IIO_VAL_INT_64; + default: + return -EINVAL; + } +} + +static int admv8818_reg_access(struct iio_dev *indio_dev, + unsigned int reg, + unsigned int write_val, + unsigned int *read_val) +{ + struct admv8818_state *st = iio_priv(indio_dev); + + if (read_val) + return regmap_read(st->regmap, reg, read_val); + else + return regmap_write(st->regmap, reg, write_val); +} + +static int admv8818_get_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + struct admv8818_state *st = iio_priv(indio_dev); + + return st->filter_mode; +} + +static int admv8818_set_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + unsigned int mode) +{ + struct admv8818_state *st = iio_priv(indio_dev); + int ret = 0; + + if (!st->clkin) { + if (mode == ADMV8818_MANUAL_MODE) + return 0; + + return -EINVAL; + } + + switch (mode) { + case ADMV8818_AUTO_MODE: + if (!st->filter_mode) + return 0; + + ret = clk_prepare_enable(st->clkin); + if (ret) + return ret; + + ret = clk_notifier_register(st->clkin, &st->nb); + if (ret) { + clk_disable_unprepare(st->clkin); + + return ret; + } + + break; + case ADMV8818_MANUAL_MODE: + if (st->filter_mode) + return 0; + + clk_disable_unprepare(st->clkin); + + ret = clk_notifier_unregister(st->clkin, &st->nb); + if (ret) + return ret; + + break; + default: + return -EINVAL; + } + + st->filter_mode = mode; + + return ret; +} + +static const struct iio_info admv8818_info = { + .write_raw = admv8818_write_raw, + .read_raw = admv8818_read_raw, + .debugfs_reg_access = &admv8818_reg_access, +}; + +static const struct iio_enum admv8818_mode_enum = { + .items = admv8818_modes, + .num_items = ARRAY_SIZE(admv8818_modes), + .get = admv8818_get_mode, + .set = admv8818_set_mode, +}; + +static const struct iio_chan_spec_ext_info admv8818_ext_info[] = { + IIO_ENUM("filter_mode", IIO_SHARED_BY_ALL, &admv8818_mode_enum), + IIO_ENUM_AVAILABLE("filter_mode", IIO_SHARED_BY_ALL, &admv8818_mode_enum), + { }, +}; + +#define ADMV8818_CHAN(_channel) { \ + .type = IIO_ALTVOLTAGE, \ + .output = 1, \ + .indexed = 1, \ + .channel = _channel, \ + .info_mask_separate = \ + BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \ + BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY) \ +} + +#define ADMV8818_CHAN_BW_CF(_channel, _admv8818_ext_info) { \ + .type = IIO_ALTVOLTAGE, \ + .output = 1, \ + .indexed = 1, \ + .channel = _channel, \ + .ext_info = _admv8818_ext_info, \ +} + +static const struct iio_chan_spec admv8818_channels[] = { + ADMV8818_CHAN(0), + ADMV8818_CHAN_BW_CF(0, admv8818_ext_info), +}; + +static int admv8818_freq_change(struct notifier_block *nb, unsigned long action, void *data) +{ + struct admv8818_state *st = container_of(nb, struct admv8818_state, nb); + + if (action == POST_RATE_CHANGE) + return notifier_from_errno(admv8818_rfin_band_select(st)); + + return NOTIFY_OK; +} + +static void admv8818_clk_notifier_unreg(void *data) +{ + struct admv8818_state *st = data; + + if (st->filter_mode == 0) + clk_notifier_unregister(st->clkin, &st->nb); +} + +static void admv8818_clk_disable(void *data) +{ + struct admv8818_state *st = data; + + if (st->filter_mode == 0) + clk_disable_unprepare(st->clkin); +} + +static int admv8818_init(struct admv8818_state *st) +{ + int ret; + struct spi_device *spi = st->spi; + unsigned int chip_id; + + ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A, + ADMV8818_SOFTRESET_N_MSK | + ADMV8818_SOFTRESET_MSK, + FIELD_PREP(ADMV8818_SOFTRESET_N_MSK, 1) | + FIELD_PREP(ADMV8818_SOFTRESET_MSK, 1)); + if (ret) { + dev_err(&spi->dev, "ADMV8818 Soft Reset failed.\n"); + return ret; + } + + ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A, + ADMV8818_SDOACTIVE_N_MSK | + ADMV8818_SDOACTIVE_MSK, + FIELD_PREP(ADMV8818_SDOACTIVE_N_MSK, 1) | + FIELD_PREP(ADMV8818_SDOACTIVE_MSK, 1)); + if (ret) { + dev_err(&spi->dev, "ADMV8818 SDO Enable failed.\n"); + return ret; + } + + ret = regmap_read(st->regmap, ADMV8818_REG_CHIPTYPE, &chip_id); + if (ret) { + dev_err(&spi->dev, "ADMV8818 Chip ID read failed.\n"); + return ret; + } + + if (chip_id != 0x1) { + dev_err(&spi->dev, "ADMV8818 Invalid Chip ID.\n"); + return -EINVAL; + } + + ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_B, + ADMV8818_SINGLE_INSTRUCTION_MSK, + FIELD_PREP(ADMV8818_SINGLE_INSTRUCTION_MSK, 1)); + if (ret) { + dev_err(&spi->dev, "ADMV8818 Single Instruction failed.\n"); + return ret; + } + + if (st->clkin) + return admv8818_rfin_band_select(st); + else + return 0; +} + +static int admv8818_clk_setup(struct admv8818_state *st) +{ + struct spi_device *spi = st->spi; + int ret; + + st->clkin = devm_clk_get_optional(&spi->dev, "rf_in"); + if (IS_ERR(st->clkin)) + return dev_err_probe(&spi->dev, PTR_ERR(st->clkin), + "failed to get the input clock\n"); + else if (!st->clkin) + return 0; + + ret = clk_prepare_enable(st->clkin); + if (ret) + return ret; + + ret = devm_add_action_or_reset(&spi->dev, admv8818_clk_disable, st); + if (ret) + return ret; + + st->nb.notifier_call = admv8818_freq_change; + ret = clk_notifier_register(st->clkin, &st->nb); + if (ret < 0) + return ret; + + return devm_add_action_or_reset(&spi->dev, admv8818_clk_notifier_unreg, st); +} + +static int admv8818_probe(struct spi_device *spi) +{ + struct iio_dev *indio_dev; + struct regmap *regmap; + struct admv8818_state *st; + int ret; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + regmap = devm_regmap_init_spi(spi, &admv8818_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + st = iio_priv(indio_dev); + st->regmap = regmap; + + indio_dev->info = &admv8818_info; + indio_dev->name = "admv8818"; + indio_dev->channels = admv8818_channels; + indio_dev->num_channels = ARRAY_SIZE(admv8818_channels); + + st->spi = spi; + + ret = admv8818_clk_setup(st); + if (ret) + return ret; + + mutex_init(&st->lock); + + ret = admv8818_init(st); + if (ret) + return ret; + + return devm_iio_device_register(&spi->dev, indio_dev); +} + +static const struct spi_device_id admv8818_id[] = { + { "admv8818", 0 }, + {} +}; +MODULE_DEVICE_TABLE(spi, admv8818_id); + +static const struct of_device_id admv8818_of_match[] = { + { .compatible = "adi,admv8818" }, + {} +}; +MODULE_DEVICE_TABLE(of, admv8818_of_match); + +static struct spi_driver admv8818_driver = { + .driver = { + .name = "admv8818", + .of_match_table = admv8818_of_match, + }, + .probe = admv8818_probe, + .id_table = admv8818_id, +}; +module_spi_driver(admv8818_driver); + +MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com"); +MODULE_DESCRIPTION("Analog Devices ADMV8818"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig index 2c9e0559e8a4..b44036f843af 100644 --- a/drivers/iio/frequency/Kconfig +++ b/drivers/iio/frequency/Kconfig @@ -50,6 +50,16 @@ config ADF4371 To compile this driver as a module, choose M here: the module will be called adf4371. +config ADMV1013 + tristate "Analog Devices ADMV1013 Microwave Upconverter" + depends on SPI && COMMON_CLK + help + Say yes here to build support for Analog Devices ADMV1013 + 24 GHz to 44 GHz, Wideband, Microwave Upconverter. + + To compile this driver as a module, choose M here: the + module will be called admv1013. + config ADRF6780 tristate "Analog Devices ADRF6780 Microwave Upconverter" depends on SPI diff --git a/drivers/iio/frequency/Makefile b/drivers/iio/frequency/Makefile index ae3136c79202..ae6899856c99 100644 --- a/drivers/iio/frequency/Makefile +++ b/drivers/iio/frequency/Makefile @@ -7,4 +7,5 @@ obj-$(CONFIG_AD9523) += ad9523.o obj-$(CONFIG_ADF4350) += adf4350.o obj-$(CONFIG_ADF4371) += adf4371.o +obj-$(CONFIG_ADMV1013) += admv1013.o obj-$(CONFIG_ADRF6780) += adrf6780.o diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c new file mode 100644 index 000000000000..6cdeb50143af --- /dev/null +++ b/drivers/iio/frequency/admv1013.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ADMV1013 driver + * + * Copyright 2021 Analog Devices Inc. + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/iio/iio.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/notifier.h> +#include <linux/property.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> +#include <linux/units.h> + +#include <asm/unaligned.h> + +/* ADMV1013 Register Map */ +#define ADMV1013_REG_SPI_CONTROL 0x00 +#define ADMV1013_REG_ALARM 0x01 +#define ADMV1013_REG_ALARM_MASKS 0x02 +#define ADMV1013_REG_ENABLE 0x03 +#define ADMV1013_REG_LO_AMP_I 0x05 +#define ADMV1013_REG_LO_AMP_Q 0x06 +#define ADMV1013_REG_OFFSET_ADJUST_I 0x07 +#define ADMV1013_REG_OFFSET_ADJUST_Q 0x08 +#define ADMV1013_REG_QUAD 0x09 +#define ADMV1013_REG_VVA_TEMP_COMP 0x0A + +/* ADMV1013_REG_SPI_CONTROL Map */ +#define ADMV1013_PARITY_EN_MSK BIT(15) +#define ADMV1013_SPI_SOFT_RESET_MSK BIT(14) +#define ADMV1013_CHIP_ID_MSK GENMASK(11, 4) +#define ADMV1013_CHIP_ID 0xA +#define ADMV1013_REVISION_ID_MSK GENMASK(3, 0) + +/* ADMV1013_REG_ALARM Map */ +#define ADMV1013_PARITY_ERROR_MSK BIT(15) +#define ADMV1013_TOO_FEW_ERRORS_MSK BIT(14) +#define ADMV1013_TOO_MANY_ERRORS_MSK BIT(13) +#define ADMV1013_ADDRESS_RANGE_ERROR_MSK BIT(12) + +/* ADMV1013_REG_ENABLE Map */ +#define ADMV1013_VGA_PD_MSK BIT(15) +#define ADMV1013_MIXER_PD_MSK BIT(14) +#define ADMV1013_QUAD_PD_MSK GENMASK(13, 11) +#define ADMV1013_BG_PD_MSK BIT(10) +#define ADMV1013_MIXER_IF_EN_MSK BIT(7) +#define ADMV1013_DET_EN_MSK BIT(5) + +/* ADMV1013_REG_LO_AMP Map */ +#define ADMV1013_LOAMP_PH_ADJ_FINE_MSK GENMASK(13, 7) +#define ADMV1013_MIXER_VGATE_MSK GENMASK(6, 0) + +/* ADMV1013_REG_OFFSET_ADJUST Map */ +#define ADMV1013_MIXER_OFF_ADJ_P_MSK GENMASK(15, 9) +#define ADMV1013_MIXER_OFF_ADJ_N_MSK GENMASK(8, 2) + +/* ADMV1013_REG_QUAD Map */ +#define ADMV1013_QUAD_SE_MODE_MSK GENMASK(9, 6) +#define ADMV1013_QUAD_FILTERS_MSK GENMASK(3, 0) + +/* ADMV1013_REG_VVA_TEMP_COMP Map */ +#define ADMV1013_VVA_TEMP_COMP_MSK GENMASK(15, 0) + +/* ADMV1013 Miscellaneous Defines */ +#define ADMV1013_READ BIT(7) +#define ADMV1013_REG_ADDR_READ_MSK GENMASK(6, 1) +#define ADMV1013_REG_ADDR_WRITE_MSK GENMASK(22, 17) +#define ADMV1013_REG_DATA_MSK GENMASK(16, 1) + +enum { + ADMV1013_IQ_MODE, + ADMV1013_IF_MODE +}; + +enum { + ADMV1013_RFMOD_I_CALIBPHASE, + ADMV1013_RFMOD_Q_CALIBPHASE, +}; + +enum { + ADMV1013_SE_MODE_POS = 6, + ADMV1013_SE_MODE_NEG = 9, + ADMV1013_SE_MODE_DIFF = 12 +}; + +struct admv1013_state { + struct spi_device *spi; + struct clk *clkin; + /* Protect against concurrent accesses to the device and to data */ + struct mutex lock; + struct regulator *reg; + struct notifier_block nb; + unsigned int input_mode; + unsigned int quad_se_mode; + bool det_en; + u8 data[3] ____cacheline_aligned; +}; + +static int __admv1013_spi_read(struct admv1013_state *st, unsigned int reg, + unsigned int *val) +{ + int ret; + struct spi_transfer t = {0}; + + st->data[0] = ADMV1013_READ | FIELD_PREP(ADMV1013_REG_ADDR_READ_MSK, reg); + st->data[1] = 0x0; + st->data[2] = 0x0; + + t.rx_buf = &st->data[0]; + t.tx_buf = &st->data[0]; + t.len = 3; + + ret = spi_sync_transfer(st->spi, &t, 1); + if (ret) + return ret; + + *val = FIELD_GET(ADMV1013_REG_DATA_MSK, get_unaligned_be24(&st->data[0])); + + return ret; +} + +static int admv1013_spi_read(struct admv1013_state *st, unsigned int reg, + unsigned int *val) +{ + int ret; + + mutex_lock(&st->lock); + ret = __admv1013_spi_read(st, reg, val); + mutex_unlock(&st->lock); + + return ret; +} + +static int __admv1013_spi_write(struct admv1013_state *st, + unsigned int reg, + unsigned int val) +{ + put_unaligned_be24(FIELD_PREP(ADMV1013_REG_DATA_MSK, val) | + FIELD_PREP(ADMV1013_REG_ADDR_WRITE_MSK, reg), &st->data[0]); + + return spi_write(st->spi, &st->data[0], 3); +} + +static int admv1013_spi_write(struct admv1013_state *st, unsigned int reg, + unsigned int val) +{ + int ret; + + mutex_lock(&st->lock); + ret = __admv1013_spi_write(st, reg, val); + mutex_unlock(&st->lock); + + return ret; +} + +static int __admv1013_spi_update_bits(struct admv1013_state *st, unsigned int reg, + unsigned int mask, unsigned int val) +{ + int ret; + unsigned int data, temp; + + ret = __admv1013_spi_read(st, reg, &data); + if (ret) + return ret; + + temp = (data & ~mask) | (val & mask); + + return __admv1013_spi_write(st, reg, temp); +} + +static int admv1013_spi_update_bits(struct admv1013_state *st, unsigned int reg, + unsigned int mask, unsigned int val) +{ + int ret; + + mutex_lock(&st->lock); + ret = __admv1013_spi_update_bits(st, reg, mask, val); + mutex_unlock(&st->lock); + + return ret; +} + +static int admv1013_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long info) +{ + struct admv1013_state *st = iio_priv(indio_dev); + unsigned int data, addr; + int ret; + + switch (info) { + case IIO_CHAN_INFO_CALIBBIAS: + switch (chan->channel) { + case IIO_MOD_I: + addr = ADMV1013_REG_OFFSET_ADJUST_I; + break; + case IIO_MOD_Q: + addr = ADMV1013_REG_OFFSET_ADJUST_Q; + break; + default: + return -EINVAL; + } + + ret = admv1013_spi_read(st, addr, &data); + if (ret) + return ret; + + if (!chan->channel) + *val = FIELD_GET(ADMV1013_MIXER_OFF_ADJ_P_MSK, data); + else + *val = FIELD_GET(ADMV1013_MIXER_OFF_ADJ_N_MSK, data); + + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int admv1013_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long info) +{ + struct admv1013_state *st = iio_priv(indio_dev); + unsigned int addr, data, msk; + + switch (info) { + case IIO_CHAN_INFO_CALIBBIAS: + switch (chan->channel2) { + case IIO_MOD_I: + addr = ADMV1013_REG_OFFSET_ADJUST_I; + break; + case IIO_MOD_Q: + addr = ADMV1013_REG_OFFSET_ADJUST_Q; + break; + default: + return -EINVAL; + } + + if (!chan->channel) { + msk = ADMV1013_MIXER_OFF_ADJ_P_MSK; + data = FIELD_PREP(ADMV1013_MIXER_OFF_ADJ_P_MSK, val); + } else { + msk = ADMV1013_MIXER_OFF_ADJ_N_MSK; + data = FIELD_PREP(ADMV1013_MIXER_OFF_ADJ_N_MSK, val); + } + + return admv1013_spi_update_bits(st, addr, msk, data); + default: + return -EINVAL; + } +} + +static ssize_t admv1013_read(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + char *buf) +{ + struct admv1013_state *st = iio_priv(indio_dev); + unsigned int data, addr; + int ret; + + switch ((u32)private) { + case ADMV1013_RFMOD_I_CALIBPHASE: + addr = ADMV1013_REG_LO_AMP_I; + break; + case ADMV1013_RFMOD_Q_CALIBPHASE: + addr = ADMV1013_REG_LO_AMP_Q; + break; + default: + return -EINVAL; + } + + ret = admv1013_spi_read(st, addr, &data); + if (ret) + return ret; + + data = FIELD_GET(ADMV1013_LOAMP_PH_ADJ_FINE_MSK, data); + + return sysfs_emit(buf, "%u\n", data); +} + +static ssize_t admv1013_write(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + const char *buf, size_t len) +{ + struct admv1013_state *st = iio_priv(indio_dev); + unsigned int data; + int ret; + + ret = kstrtou32(buf, 10, &data); + if (ret) + return ret; + + data = FIELD_PREP(ADMV1013_LOAMP_PH_ADJ_FINE_MSK, data); + + switch ((u32)private) { + case ADMV1013_RFMOD_I_CALIBPHASE: + ret = admv1013_spi_update_bits(st, ADMV1013_REG_LO_AMP_I, + ADMV1013_LOAMP_PH_ADJ_FINE_MSK, + data); + if (ret) + return ret; + break; + case ADMV1013_RFMOD_Q_CALIBPHASE: + ret = admv1013_spi_update_bits(st, ADMV1013_REG_LO_AMP_Q, + ADMV1013_LOAMP_PH_ADJ_FINE_MSK, + data); + if (ret) + return ret; + break; + default: + return -EINVAL; + } + + return ret ? ret : len; +} + +static int admv1013_update_quad_filters(struct admv1013_state *st) +{ + unsigned int filt_raw; + u64 rate = clk_get_rate(st->clkin); + + if (rate >= (5400 * HZ_PER_MHZ) && rate <= (7000 * HZ_PER_MHZ)) + filt_raw = 15; + else if (rate >= (5400 * HZ_PER_MHZ) && rate <= (8000 * HZ_PER_MHZ)) + filt_raw = 10; + else if (rate >= (6600 * HZ_PER_MHZ) && rate <= (9200 * HZ_PER_MHZ)) + filt_raw = 5; + else + filt_raw = 0; + + return __admv1013_spi_update_bits(st, ADMV1013_REG_QUAD, + ADMV1013_QUAD_FILTERS_MSK, + FIELD_PREP(ADMV1013_QUAD_FILTERS_MSK, filt_raw)); +} + +static int admv1013_update_mixer_vgate(struct admv1013_state *st) +{ + unsigned int vcm, mixer_vgate; + + vcm = regulator_get_voltage(st->reg); + + if (vcm >= 0 && vcm < 1800000) + mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100; + else if (vcm > 1800000 && vcm < 2600000) + mixer_vgate = (2375 * vcm / 1000000 + 125) / 100; + else + return -EINVAL; + + return __admv1013_spi_update_bits(st, ADMV1013_REG_LO_AMP_I, + ADMV1013_MIXER_VGATE_MSK, + FIELD_PREP(ADMV1013_MIXER_VGATE_MSK, mixer_vgate)); +} + +static int admv1013_reg_access(struct iio_dev *indio_dev, + unsigned int reg, + unsigned int write_val, + unsigned int *read_val) +{ + struct admv1013_state *st = iio_priv(indio_dev); + + if (read_val) + return admv1013_spi_read(st, reg, read_val); + else + return admv1013_spi_write(st, reg, write_val); +} + +static const struct iio_info admv1013_info = { + .read_raw = admv1013_read_raw, + .write_raw = admv1013_write_raw, + .debugfs_reg_access = &admv1013_reg_access, +}; + +static int admv1013_freq_change(struct notifier_block *nb, unsigned long action, void *data) +{ + struct admv1013_state *st = container_of(nb, struct admv1013_state, nb); + int ret; + + if (action == POST_RATE_CHANGE) { + mutex_lock(&st->lock); + ret = notifier_from_errno(admv1013_update_quad_filters(st)); + mutex_unlock(&st->lock); + return ret; + } + + return NOTIFY_OK; +} + +#define _ADMV1013_EXT_INFO(_name, _shared, _ident) { \ + .name = _name, \ + .read = admv1013_read, \ + .write = admv1013_write, \ + .private = _ident, \ + .shared = _shared, \ +} + +static const struct iio_chan_spec_ext_info admv1013_ext_info[] = { + _ADMV1013_EXT_INFO("i_calibphase", IIO_SEPARATE, ADMV1013_RFMOD_I_CALIBPHASE), + _ADMV1013_EXT_INFO("q_calibphase", IIO_SEPARATE, ADMV1013_RFMOD_Q_CALIBPHASE), + { }, +}; + +#define ADMV1013_CHAN_PHASE(_channel, _channel2, _admv1013_ext_info) { \ + .type = IIO_ALTVOLTAGE, \ + .output = 0, \ + .indexed = 1, \ + .channel2 = _channel2, \ + .channel = _channel, \ + .differential = 1, \ + .ext_info = _admv1013_ext_info, \ + } + +#define ADMV1013_CHAN_CALIB(_channel, rf_comp) { \ + .type = IIO_ALTVOLTAGE, \ + .output = 0, \ + .indexed = 1, \ + .channel = _channel, \ + .channel2 = IIO_MOD_##rf_comp, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_CALIBBIAS), \ + } + +static const struct iio_chan_spec admv1013_channels[] = { + ADMV1013_CHAN_PHASE(0, 1, admv1013_ext_info), + ADMV1013_CHAN_CALIB(0, I), + ADMV1013_CHAN_CALIB(0, Q), + ADMV1013_CHAN_CALIB(1, I), + ADMV1013_CHAN_CALIB(1, Q), +}; + +static int admv1013_init(struct admv1013_state *st) +{ + int ret; + unsigned int data; + struct spi_device *spi = st->spi; + + /* Perform a software reset */ + ret = __admv1013_spi_update_bits(st, ADMV1013_REG_SPI_CONTROL, + ADMV1013_SPI_SOFT_RESET_MSK, + FIELD_PREP(ADMV1013_SPI_SOFT_RESET_MSK, 1)); + if (ret) + return ret; + + ret = __admv1013_spi_update_bits(st, ADMV1013_REG_SPI_CONTROL, + ADMV1013_SPI_SOFT_RESET_MSK, + FIELD_PREP(ADMV1013_SPI_SOFT_RESET_MSK, 0)); + if (ret) + return ret; + + ret = __admv1013_spi_read(st, ADMV1013_REG_SPI_CONTROL, &data); + if (ret) + return ret; + + data = FIELD_GET(ADMV1013_CHIP_ID_MSK, data); + if (data != ADMV1013_CHIP_ID) { + dev_err(&spi->dev, "Invalid Chip ID.\n"); + return -EINVAL; + } + + ret = __admv1013_spi_write(st, ADMV1013_REG_VVA_TEMP_COMP, 0xE700); + if (ret) + return ret; + + data = FIELD_PREP(ADMV1013_QUAD_SE_MODE_MSK, st->quad_se_mode); + + ret = __admv1013_spi_update_bits(st, ADMV1013_REG_QUAD, + ADMV1013_QUAD_SE_MODE_MSK, data); + if (ret) + return ret; + + ret = admv1013_update_mixer_vgate(st); + if (ret) + return ret; + + ret = admv1013_update_quad_filters(st); + if (ret) + return ret; + + return __admv1013_spi_update_bits(st, ADMV1013_REG_ENABLE, + ADMV1013_DET_EN_MSK | + ADMV1013_MIXER_IF_EN_MSK, + st->det_en | + st->input_mode); +} + +static void admv1013_clk_disable(void *data) +{ + clk_disable_unprepare(data); +} + +static void admv1013_reg_disable(void *data) +{ + regulator_disable(data); +} + +static void admv1013_powerdown(void *data) +{ + unsigned int enable_reg, enable_reg_msk; + + /* Disable all components in the Enable Register */ + enable_reg_msk = ADMV1013_VGA_PD_MSK | + ADMV1013_MIXER_PD_MSK | + ADMV1013_QUAD_PD_MSK | + ADMV1013_BG_PD_MSK | + ADMV1013_MIXER_IF_EN_MSK | + ADMV1013_DET_EN_MSK; + + enable_reg = FIELD_PREP(ADMV1013_VGA_PD_MSK, 1) | + FIELD_PREP(ADMV1013_MIXER_PD_MSK, 1) | + FIELD_PREP(ADMV1013_QUAD_PD_MSK, 7) | + FIELD_PREP(ADMV1013_BG_PD_MSK, 1) | + FIELD_PREP(ADMV1013_MIXER_IF_EN_MSK, 0) | + FIELD_PREP(ADMV1013_DET_EN_MSK, 0); + + admv1013_spi_update_bits(data, ADMV1013_REG_ENABLE, enable_reg_msk, enable_reg); +} + +static int admv1013_properties_parse(struct admv1013_state *st) +{ + int ret; + const char *str; + struct spi_device *spi = st->spi; + + st->det_en = device_property_read_bool(&spi->dev, "adi,detector-enable"); + + ret = device_property_read_string(&spi->dev, "adi,input-mode", &str); + if (ret) + st->input_mode = ADMV1013_IQ_MODE; + + if (!strcmp(str, "iq")) + st->input_mode = ADMV1013_IQ_MODE; + else if (!strcmp(str, "if")) + st->input_mode = ADMV1013_IF_MODE; + else + return -EINVAL; + + ret = device_property_read_string(&spi->dev, "adi,quad-se-mode", &str); + if (ret) + st->quad_se_mode = ADMV1013_SE_MODE_DIFF; + + if (!strcmp(str, "diff")) + st->quad_se_mode = ADMV1013_SE_MODE_DIFF; + else if (!strcmp(str, "se-pos")) + st->quad_se_mode = ADMV1013_SE_MODE_POS; + else if (!strcmp(str, "se-neg")) + st->quad_se_mode = ADMV1013_SE_MODE_NEG; + else + return -EINVAL; + + st->reg = devm_regulator_get(&spi->dev, "vcm"); + if (IS_ERR(st->reg)) + return dev_err_probe(&spi->dev, PTR_ERR(st->reg), + "failed to get the common-mode voltage\n"); + + st->clkin = devm_clk_get(&spi->dev, "lo_in"); + if (IS_ERR(st->clkin)) + return dev_err_probe(&spi->dev, PTR_ERR(st->clkin), + "failed to get the LO input clock\n"); + + return 0; +} + +static int admv1013_probe(struct spi_device *spi) +{ + struct iio_dev *indio_dev; + struct admv1013_state *st; + int ret; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + st = iio_priv(indio_dev); + + indio_dev->info = &admv1013_info; + indio_dev->name = "admv1013"; + indio_dev->channels = admv1013_channels; + indio_dev->num_channels = ARRAY_SIZE(admv1013_channels); + + st->spi = spi; + + ret = admv1013_properties_parse(st); + if (ret) + return ret; + + ret = regulator_enable(st->reg); + if (ret) { + dev_err(&spi->dev, "Failed to enable specified Common-Mode Voltage!\n"); + return ret; + } + + ret = devm_add_action_or_reset(&spi->dev, admv1013_reg_disable, + st->reg); + if (ret) + return ret; + + ret = clk_prepare_enable(st->clkin); + if (ret) + return ret; + + ret = devm_add_action_or_reset(&spi->dev, admv1013_clk_disable, st->clkin); + if (ret) + return ret; + + st->nb.notifier_call = admv1013_freq_change; + ret = devm_clk_notifier_register(&spi->dev, st->clkin, &st->nb); + if (ret) + return ret; + + mutex_init(&st->lock); + + ret = admv1013_init(st); + if (ret) { + dev_err(&spi->dev, "admv1013 init failed\n"); + return ret; + } + + ret = devm_add_action_or_reset(&spi->dev, admv1013_powerdown, st); + if (ret) + return ret; + + return devm_iio_device_register(&spi->dev, indio_dev); +} + +static const struct spi_device_id admv1013_id[] = { + { "admv1013", 0}, + {} +}; +MODULE_DEVICE_TABLE(spi, admv1013_id); + +static const struct of_device_id admv1013_of_match[] = { + { .compatible = "adi,admv1013" }, + {}, +}; +MODULE_DEVICE_TABLE(of, admv1013_of_match); + +static struct spi_driver admv1013_driver = { + .driver = { + .name = "admv1013", + .of_match_table = admv1013_of_match, + }, + .probe = admv1013_probe, + .id_table = admv1013_id, +}; +module_spi_driver(admv1013_driver); + +MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com"); +MODULE_DESCRIPTION("Analog Devices ADMV1013"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index 97b82f9a8e45..273f16dcaff8 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -345,9 +345,6 @@ err: return IRQ_HANDLED; } -static const struct iio_trigger_ops afe4403_trigger_ops = { -}; - #define AFE4403_TIMING_PAIRS \ { AFE440X_LED2STC, 0x000050 }, \ { AFE440X_LED2ENDC, 0x0003e7 }, \ @@ -530,8 +527,6 @@ static int afe4403_probe(struct spi_device *spi) iio_trigger_set_drvdata(afe->trig, indio_dev); - afe->trig->ops = &afe4403_trigger_ops; - ret = iio_trigger_register(afe->trig); if (ret) { dev_err(afe->dev, "Unable to register IIO trigger\n"); diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index 7ef3f5e34de5..aa9311e1e655 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c @@ -347,9 +347,6 @@ err: return IRQ_HANDLED; } -static const struct iio_trigger_ops afe4404_trigger_ops = { -}; - /* Default timings from data-sheet */ #define AFE4404_TIMING_PAIRS \ { AFE440X_PRPCOUNT, 39999 }, \ @@ -537,8 +534,6 @@ static int afe4404_probe(struct i2c_client *client, iio_trigger_set_drvdata(afe->trig, indio_dev); - afe->trig->ops = &afe4404_trigger_ops; - ret = iio_trigger_register(afe->trig); if (ret) { dev_err(afe->dev, "Unable to register IIO trigger\n"); diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h index 61e318431de9..501e286702ef 100644 --- a/drivers/iio/iio_core.h +++ b/drivers/iio/iio_core.h @@ -16,7 +16,7 @@ struct iio_buffer; struct iio_chan_spec; struct iio_dev; -extern struct device_type iio_device_type; +extern const struct device_type iio_device_type; struct iio_dev_buffer_pair { struct iio_dev *indio_dev; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c index 85b1934cec60..33d9afb1ba91 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c @@ -58,7 +58,7 @@ static int inv_icm42600_probe(struct i2c_client *client) match = device_get_match_data(&client->dev); if (!match) return -EINVAL; - chip = (enum inv_icm42600_chip)match; + chip = (uintptr_t)match; regmap = devm_regmap_init_i2c(client, &inv_icm42600_regmap_config); if (IS_ERR(regmap)) diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c index 323789697a08..e6305e5fa975 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c @@ -57,7 +57,7 @@ static int inv_icm42600_probe(struct spi_device *spi) match = device_get_match_data(&spi->dev); if (!match) return -EINVAL; - chip = (enum inv_icm42600_chip)match; + chip = (uintptr_t)match; regmap = devm_regmap_init_spi(spi, &inv_icm42600_regmap_config); if (IS_ERR(regmap)) diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c index 3ef17e3f50e2..fe03707ec2d3 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c @@ -110,7 +110,7 @@ static int inv_mpu_probe(struct i2c_client *client, match = device_get_match_data(&client->dev); if (match) { - chip_type = (enum inv_devices)match; + chip_type = (uintptr_t)match; name = client->name; } else if (id) { chip_type = (enum inv_devices) diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c index b056f3fe2561..6800356b25fb 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c @@ -45,7 +45,7 @@ static int inv_mpu_probe(struct spi_device *spi) chip_type = (enum inv_devices)spi_id->driver_data; name = spi_id->name; } else if ((match = device_get_match_data(&spi->dev))) { - chip_type = (enum inv_devices)match; + chip_type = (uintptr_t)match; name = dev_name(&spi->dev); } else { return -ENODEV; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index f2cbbc756459..727b4b6ac696 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -2244,7 +2244,9 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, return err; hub_settings = &hw->settings->shub_settings; - if (hub_settings->master_en.addr) { + if (hub_settings->master_en.addr && + (!dev_fwnode(dev) || + !device_property_read_bool(dev, "st,disable-sensor-hub"))) { err = st_lsm6dsx_shub_probe(hw, name); if (err < 0) return err; diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index e180728914c0..94eb9f6cf128 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -1727,8 +1727,7 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev) struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); const struct iio_chan_spec *channels; struct iio_buffer *buffer; - int unwind_idx; - int ret, i; + int ret, i, idx; size_t sz; channels = indio_dev->channels; @@ -1743,15 +1742,12 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev) if (!iio_dev_opaque->attached_buffers_cnt) return 0; - for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) { - buffer = iio_dev_opaque->attached_buffers[i]; - ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, i); - if (ret) { - unwind_idx = i - 1; + for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) { + buffer = iio_dev_opaque->attached_buffers[idx]; + ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx); + if (ret) goto error_unwind_sysfs_and_mask; - } } - unwind_idx = iio_dev_opaque->attached_buffers_cnt - 1; sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler)); iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL); @@ -1767,9 +1763,9 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev) return 0; error_unwind_sysfs_and_mask: - for (; unwind_idx >= 0; unwind_idx--) { - buffer = iio_dev_opaque->attached_buffers[unwind_idx]; - __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, unwind_idx); + while (idx--) { + buffer = iio_dev_opaque->attached_buffers[idx]; + __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx); } return ret; } diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 463a63d5bf56..409c278a4c2c 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -702,6 +702,9 @@ static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type, } case IIO_VAL_CHAR: return sysfs_emit_at(buf, offset, "%c", (char)vals[0]); + case IIO_VAL_INT_64: + tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]); + return sysfs_emit_at(buf, offset, "%lld", tmp2); default: return 0; } @@ -1619,7 +1622,7 @@ static void iio_dev_release(struct device *device) kfree(iio_dev_opaque); } -struct device_type iio_device_type = { +const struct device_type iio_device_type = { .name = "iio_device", .release = iio_dev_release, }; @@ -1653,7 +1656,6 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) indio_dev->dev.type = &iio_device_type; indio_dev->dev.bus = &iio_bus_type; device_initialize(&indio_dev->dev); - iio_device_set_drvdata(indio_dev, (void *)indio_dev); mutex_init(&indio_dev->mlock); mutex_init(&iio_dev_opaque->info_exist_lock); INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list); diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index 93990ff1dfe3..f504ed351b3e 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -162,6 +162,39 @@ static struct iio_trigger *iio_trigger_acquire_by_name(const char *name) return trig; } +static void iio_reenable_work_fn(struct work_struct *work) +{ + struct iio_trigger *trig = container_of(work, struct iio_trigger, + reenable_work); + + /* + * This 'might' occur after the trigger state is set to disabled - + * in that case the driver should skip reenabling. + */ + trig->ops->reenable(trig); +} + +/* + * In general, reenable callbacks may need to sleep and this path is + * not performance sensitive, so just queue up a work item + * to reneable the trigger for us. + * + * Races that can cause this. + * 1) A handler occurs entirely in interrupt context so the counter + * the final decrement is still in this interrupt. + * 2) The trigger has been removed, but one last interrupt gets through. + * + * For (1) we must call reenable, but not in atomic context. + * For (2) it should be safe to call reenanble, if drivers never blindly + * reenable after state is off. + */ +static void iio_trigger_notify_done_atomic(struct iio_trigger *trig) +{ + if (atomic_dec_and_test(&trig->use_count) && trig->ops && + trig->ops->reenable) + schedule_work(&trig->reenable_work); +} + void iio_trigger_poll(struct iio_trigger *trig) { int i; @@ -173,7 +206,7 @@ void iio_trigger_poll(struct iio_trigger *trig) if (trig->subirqs[i].enabled) generic_handle_irq(trig->subirq_base + i); else - iio_trigger_notify_done(trig); + iio_trigger_notify_done_atomic(trig); } } } @@ -535,6 +568,7 @@ struct iio_trigger *viio_trigger_alloc(struct device *parent, trig->dev.type = &iio_trig_type; trig->dev.bus = &iio_bus_type; device_initialize(&trig->dev); + INIT_WORK(&trig->reenable_work, iio_reenable_work_fn); mutex_init(&trig->pool_lock); trig->subirq_base = irq_alloc_descs(-1, 0, diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c index 3e7fb16ab1f6..50d34a98839c 100644 --- a/drivers/iio/light/cm3605.c +++ b/drivers/iio/light/cm3605.c @@ -10,6 +10,7 @@ */ #include <linux/module.h> +#include <linux/mod_devicetable.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/events.h> @@ -18,7 +19,7 @@ #include <linux/init.h> #include <linux/leds.h> #include <linux/platform_device.h> -#include <linux/of.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> @@ -156,7 +157,6 @@ static int cm3605_probe(struct platform_device *pdev) struct cm3605 *cm3605; struct iio_dev *indio_dev; struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; enum iio_chan_type ch_type; u32 rset; int irq; @@ -171,7 +171,7 @@ static int cm3605_probe(struct platform_device *pdev) cm3605->dev = dev; cm3605->dir = IIO_EV_DIR_FALLING; - ret = of_property_read_u32(np, "capella,aset-resistance-ohms", &rset); + ret = device_property_read_u32(dev, "capella,aset-resistance-ohms", &rset); if (ret) { dev_info(dev, "no RSET specified, assuming 100K\n"); rset = 100000; diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c index d1d9f2d319e4..b820041159f7 100644 --- a/drivers/iio/light/gp2ap020a00f.c +++ b/drivers/iio/light/gp2ap020a00f.c @@ -1467,9 +1467,6 @@ static const struct iio_buffer_setup_ops gp2ap020a00f_buffer_setup_ops = { .predisable = &gp2ap020a00f_buffer_predisable, }; -static const struct iio_trigger_ops gp2ap020a00f_trigger_ops = { -}; - static int gp2ap020a00f_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -1550,8 +1547,6 @@ static int gp2ap020a00f_probe(struct i2c_client *client, goto error_uninit_buffer; } - data->trig->ops = &gp2ap020a00f_trigger_ops; - init_irq_work(&data->work, gp2ap020a00f_iio_trigger_work); err = iio_trigger_register(data->trig); diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index b2983b1a9ed1..47d61ec2bb50 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * ltr501.c - Support for Lite-On LTR501 ambient light and proximity sensor + * Support for Lite-On LTR501 and similar ambient light and proximity sensors. * * Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net> * @@ -98,6 +98,7 @@ enum { ltr501 = 0, ltr559, ltr301, + ltr303, }; struct ltr501_gain { @@ -165,6 +166,7 @@ struct ltr501_data { struct regmap_field *reg_ps_rate; struct regmap_field *reg_als_prst; struct regmap_field *reg_ps_prst; + uint32_t near_level; }; static const struct ltr501_samp_table ltr501_als_samp_table[] = { @@ -524,6 +526,25 @@ static int ltr501_write_intr_prst(struct ltr501_data *data, return -EINVAL; } +static ssize_t ltr501_read_near_level(struct iio_dev *indio_dev, + uintptr_t priv, + const struct iio_chan_spec *chan, + char *buf) +{ + struct ltr501_data *data = iio_priv(indio_dev); + + return sprintf(buf, "%u\n", data->near_level); +} + +static const struct iio_chan_spec_ext_info ltr501_ext_info[] = { + { + .name = "nearlevel", + .shared = IIO_SEPARATE, + .read = ltr501_read_near_level, + }, + { /* sentinel */ } +}; + static const struct iio_event_spec ltr501_als_event_spec[] = { { .type = IIO_EV_TYPE_THRESH, @@ -608,6 +629,7 @@ static const struct iio_chan_spec ltr501_channels[] = { }, .event_spec = ltr501_pxs_event_spec, .num_event_specs = ARRAY_SIZE(ltr501_pxs_event_spec), + .ext_info = ltr501_ext_info, }, IIO_CHAN_SOFT_TIMESTAMP(3), }; @@ -1231,6 +1253,18 @@ static const struct ltr501_chip_info ltr501_chip_info_tbl[] = { .channels = ltr301_channels, .no_channels = ARRAY_SIZE(ltr301_channels), }, + [ltr303] = { + .partid = 0x0A, + .als_gain = ltr559_als_gain_tbl, + .als_gain_tbl_size = ARRAY_SIZE(ltr559_als_gain_tbl), + .als_mode_active = BIT(0), + .als_gain_mask = BIT(2) | BIT(3) | BIT(4), + .als_gain_shift = 2, + .info = <r301_info, + .info_no_irq = <r301_info_no_irq, + .channels = ltr301_channels, + .no_channels = ARRAY_SIZE(ltr301_channels), + }, }; static int ltr501_write_contr(struct ltr501_data *data, u8 als_val, u8 ps_val) @@ -1518,6 +1552,10 @@ static int ltr501_probe(struct i2c_client *client, if ((partid >> 4) != data->chip_info->partid) return -ENODEV; + if (device_property_read_u32(&client->dev, "proximity-near-level", + &data->near_level)) + data->near_level = 0; + indio_dev->info = data->chip_info->info; indio_dev->channels = data->chip_info->channels; indio_dev->num_channels = data->chip_info->no_channels; @@ -1605,6 +1643,7 @@ static const struct i2c_device_id ltr501_id[] = { { "ltr501", ltr501}, { "ltr559", ltr559}, { "ltr301", ltr301}, + { "ltr303", ltr303}, { } }; MODULE_DEVICE_TABLE(i2c, ltr501_id); @@ -1613,6 +1652,7 @@ static const struct of_device_id ltr501_of_match[] = { { .compatible = "liteon,ltr501", }, { .compatible = "liteon,ltr559", }, { .compatible = "liteon,ltr301", }, + { .compatible = "liteon,ltr303", }, {} }; MODULE_DEVICE_TABLE(of, ltr501_of_match); diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index 6e82dc54a417..55879a20ae52 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -929,7 +929,7 @@ static int ak8975_probe(struct i2c_client *client, /* id will be NULL when enumerated via ACPI */ match = device_get_match_data(&client->dev); if (match) { - chipset = (enum asahi_compass_chipset)(match); + chipset = (uintptr_t)match; name = dev_name(&client->dev); } else if (id) { chipset = (enum asahi_compass_chipset)(id->driver_data); diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c index f08726bf5ec3..5a730d9bdbb0 100644 --- a/drivers/iio/magnetometer/hmc5843_core.c +++ b/drivers/iio/magnetometer/hmc5843_core.c @@ -246,7 +246,7 @@ static const struct iio_enum hmc5843_meas_conf_enum = { static const struct iio_chan_spec_ext_info hmc5843_ext_info[] = { IIO_ENUM("meas_conf", IIO_SHARED_BY_TYPE, &hmc5843_meas_conf_enum), - IIO_ENUM_AVAILABLE("meas_conf", &hmc5843_meas_conf_enum), + IIO_ENUM_AVAILABLE("meas_conf", IIO_SHARED_BY_TYPE, &hmc5843_meas_conf_enum), IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, hmc5843_get_mount_matrix), { } }; @@ -260,7 +260,7 @@ static const struct iio_enum hmc5983_meas_conf_enum = { static const struct iio_chan_spec_ext_info hmc5983_ext_info[] = { IIO_ENUM("meas_conf", IIO_SHARED_BY_TYPE, &hmc5983_meas_conf_enum), - IIO_ENUM_AVAILABLE("meas_conf", &hmc5983_meas_conf_enum), + IIO_ENUM_AVAILABLE("meas_conf", IIO_SHARED_BY_TYPE, &hmc5983_meas_conf_enum), IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, hmc5843_get_mount_matrix), { } }; diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c index c96415a1aead..17c62d806218 100644 --- a/drivers/iio/magnetometer/mag3110.c +++ b/drivers/iio/magnetometer/mag3110.c @@ -291,7 +291,8 @@ static int mag3110_read_raw(struct iio_dev *indio_dev, if (ret < 0) goto release; *val = sign_extend32( - be16_to_cpu(buffer[chan->scan_index]), 15); + be16_to_cpu(buffer[chan->scan_index]), + chan->scan_type.realbits - 1); ret = IIO_VAL_INT; break; case IIO_TEMP: /* in 1 C / LSB */ @@ -306,7 +307,8 @@ static int mag3110_read_raw(struct iio_dev *indio_dev, mutex_unlock(&data->lock); if (ret < 0) goto release; - *val = sign_extend32(ret, 7); + *val = sign_extend32(ret, + chan->scan_type.realbits - 1); ret = IIO_VAL_INT; break; default: diff --git a/drivers/iio/potentiometer/mcp41010.c b/drivers/iio/potentiometer/mcp41010.c index 79ccac6d4be0..30a4594d4e11 100644 --- a/drivers/iio/potentiometer/mcp41010.c +++ b/drivers/iio/potentiometer/mcp41010.c @@ -21,9 +21,9 @@ #include <linux/iio/iio.h> #include <linux/iio/types.h> #include <linux/module.h> +#include <linux/mod_devicetable.h> #include <linux/mutex.h> -#include <linux/of.h> -#include <linux/of_device.h> +#include <linux/property.h> #include <linux/spi/spi.h> #define MCP41010_MAX_WIPERS 2 @@ -146,7 +146,7 @@ static int mcp41010_probe(struct spi_device *spi) data = iio_priv(indio_dev); spi_set_drvdata(spi, indio_dev); data->spi = spi; - data->cfg = of_device_get_match_data(&spi->dev); + data->cfg = device_get_match_data(&spi->dev); if (!data->cfg) data->cfg = &mcp41010_cfg[spi_get_device_id(spi)->driver_data]; diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c index ed30bdaa10ec..fe514f0b5506 100644 --- a/drivers/iio/potentiostat/lmp91000.c +++ b/drivers/iio/potentiostat/lmp91000.c @@ -271,9 +271,6 @@ static int lmp91000_buffer_cb(const void *val, void *private) return 0; } -static const struct iio_trigger_ops lmp91000_trigger_ops = { -}; - static int lmp91000_buffer_postenable(struct iio_dev *indio_dev) { struct lmp91000_data *data = iio_priv(indio_dev); @@ -330,7 +327,6 @@ static int lmp91000_probe(struct i2c_client *client, return -ENOMEM; } - data->trig->ops = &lmp91000_trigger_ops; init_completion(&data->completion); ret = lmp91000_read_config(data); diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 6b7da40f99c8..bf8167f43c56 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -1138,7 +1138,6 @@ int bmp280_common_probe(struct device *dev, } EXPORT_SYMBOL(bmp280_common_probe); -#ifdef CONFIG_PM static int bmp280_runtime_suspend(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); @@ -1159,15 +1158,9 @@ static int bmp280_runtime_resume(struct device *dev) usleep_range(data->start_up_time, data->start_up_time + 100); return data->chip_info->chip_config(data); } -#endif /* CONFIG_PM */ -const struct dev_pm_ops bmp280_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) - SET_RUNTIME_PM_OPS(bmp280_runtime_suspend, - bmp280_runtime_resume, NULL) -}; -EXPORT_SYMBOL(bmp280_dev_pm_ops); +EXPORT_RUNTIME_DEV_PM_OPS(bmp280_dev_pm_ops, bmp280_runtime_suspend, + bmp280_runtime_resume, NULL); MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>"); MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor"); diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c index 8b03ea15c0d0..35045bd92846 100644 --- a/drivers/iio/pressure/bmp280-i2c.c +++ b/drivers/iio/pressure/bmp280-i2c.c @@ -58,7 +58,7 @@ static struct i2c_driver bmp280_i2c_driver = { .driver = { .name = "bmp280", .of_match_table = bmp280_of_i2c_match, - .pm = &bmp280_dev_pm_ops, + .pm = pm_ptr(&bmp280_dev_pm_ops), }, .probe = bmp280_i2c_probe, .id_table = bmp280_i2c_id, diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c index 625b86878ad8..41f6cc56d229 100644 --- a/drivers/iio/pressure/bmp280-spi.c +++ b/drivers/iio/pressure/bmp280-spi.c @@ -109,7 +109,7 @@ static struct spi_driver bmp280_spi_driver = { .driver = { .name = "bmp280", .of_match_table = bmp280_of_spi_match, - .pm = &bmp280_dev_pm_ops, + .pm = pm_ptr(&bmp280_dev_pm_ops), }, .id_table = bmp280_spi_id, .probe = bmp280_spi_probe, diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c index 1eb9e7b29e05..e95b9a5475b4 100644 --- a/drivers/iio/pressure/mpl3115.c +++ b/drivers/iio/pressure/mpl3115.c @@ -74,7 +74,6 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct mpl3115_data *data = iio_priv(indio_dev); - __be32 tmp = 0; int ret; switch (mask) { @@ -84,7 +83,9 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev, return ret; switch (chan->type) { - case IIO_PRESSURE: /* in 0.25 pascal / LSB */ + case IIO_PRESSURE: { /* in 0.25 pascal / LSB */ + __be32 tmp = 0; + mutex_lock(&data->lock); ret = mpl3115_request(data); if (ret < 0) { @@ -96,10 +97,13 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev, mutex_unlock(&data->lock); if (ret < 0) break; - *val = be32_to_cpu(tmp) >> 12; + *val = be32_to_cpu(tmp) >> chan->scan_type.shift; ret = IIO_VAL_INT; break; - case IIO_TEMP: /* in 0.0625 celsius / LSB */ + } + case IIO_TEMP: { /* in 0.0625 celsius / LSB */ + __be16 tmp; + mutex_lock(&data->lock); ret = mpl3115_request(data); if (ret < 0) { @@ -111,9 +115,11 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev, mutex_unlock(&data->lock); if (ret < 0) break; - *val = sign_extend32(be32_to_cpu(tmp) >> 20, 11); + *val = sign_extend32(be16_to_cpu(tmp) >> chan->scan_type.shift, + chan->scan_type.realbits - 1); ret = IIO_VAL_INT; break; + } default: ret = -EINVAL; break; diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h index 86b1c4b1820d..cbc9349c342a 100644 --- a/drivers/iio/pressure/ms5611.h +++ b/drivers/iio/pressure/ms5611.h @@ -50,9 +50,9 @@ struct ms5611_state { const struct ms5611_osr *pressure_osr; const struct ms5611_osr *temp_osr; - int (*reset)(struct device *dev); - int (*read_prom_word)(struct device *dev, int index, u16 *word); - int (*read_adc_temp_and_pressure)(struct device *dev, + int (*reset)(struct ms5611_state *st); + int (*read_prom_word)(struct ms5611_state *st, int index, u16 *word); + int (*read_adc_temp_and_pressure)(struct ms5611_state *st, s32 *temp, s32 *pressure); struct ms5611_chip_info *chip_info; diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c index ee75f08655c9..a4d0b54cde9b 100644 --- a/drivers/iio/pressure/ms5611_core.c +++ b/drivers/iio/pressure/ms5611_core.c @@ -85,8 +85,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev) struct ms5611_state *st = iio_priv(indio_dev); for (i = 0; i < MS5611_PROM_WORDS_NB; i++) { - ret = st->read_prom_word(&indio_dev->dev, - i, &st->chip_info->prom[i]); + ret = st->read_prom_word(st, i, &st->chip_info->prom[i]); if (ret < 0) { dev_err(&indio_dev->dev, "failed to read prom at %d\n", i); @@ -108,7 +107,7 @@ static int ms5611_read_temp_and_pressure(struct iio_dev *indio_dev, int ret; struct ms5611_state *st = iio_priv(indio_dev); - ret = st->read_adc_temp_and_pressure(&indio_dev->dev, temp, pressure); + ret = st->read_adc_temp_and_pressure(st, temp, pressure); if (ret < 0) { dev_err(&indio_dev->dev, "failed to read temperature and pressure\n"); @@ -196,7 +195,7 @@ static int ms5611_reset(struct iio_dev *indio_dev) int ret; struct ms5611_state *st = iio_priv(indio_dev); - ret = st->reset(&indio_dev->dev); + ret = st->reset(st); if (ret < 0) { dev_err(&indio_dev->dev, "failed to reset device\n"); return ret; diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c index 5c82d80f85b6..1047a85527a9 100644 --- a/drivers/iio/pressure/ms5611_i2c.c +++ b/drivers/iio/pressure/ms5611_i2c.c @@ -20,17 +20,15 @@ #include "ms5611.h" -static int ms5611_i2c_reset(struct device *dev) +static int ms5611_i2c_reset(struct ms5611_state *st) { - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); - return i2c_smbus_write_byte(st->client, MS5611_RESET); } -static int ms5611_i2c_read_prom_word(struct device *dev, int index, u16 *word) +static int ms5611_i2c_read_prom_word(struct ms5611_state *st, int index, + u16 *word) { int ret; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); ret = i2c_smbus_read_word_swapped(st->client, MS5611_READ_PROM_WORD + (index << 1)); @@ -57,11 +55,10 @@ static int ms5611_i2c_read_adc(struct ms5611_state *st, s32 *val) return 0; } -static int ms5611_i2c_read_adc_temp_and_pressure(struct device *dev, +static int ms5611_i2c_read_adc_temp_and_pressure(struct ms5611_state *st, s32 *temp, s32 *pressure) { int ret; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); const struct ms5611_osr *osr = st->temp_osr; ret = i2c_smbus_write_byte(st->client, osr->cmd); diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c index 79bed64c9b68..9fa2dcd71760 100644 --- a/drivers/iio/pressure/ms5611_spi.c +++ b/drivers/iio/pressure/ms5611_spi.c @@ -15,18 +15,17 @@ #include "ms5611.h" -static int ms5611_spi_reset(struct device *dev) +static int ms5611_spi_reset(struct ms5611_state *st) { u8 cmd = MS5611_RESET; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); return spi_write_then_read(st->client, &cmd, 1, NULL, 0); } -static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word) +static int ms5611_spi_read_prom_word(struct ms5611_state *st, int index, + u16 *word) { int ret; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); ret = spi_w8r16be(st->client, MS5611_READ_PROM_WORD + (index << 1)); if (ret < 0) @@ -37,11 +36,10 @@ static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word) return 0; } -static int ms5611_spi_read_adc(struct device *dev, s32 *val) +static int ms5611_spi_read_adc(struct ms5611_state *st, s32 *val) { int ret; u8 buf[3] = { MS5611_READ_ADC }; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); ret = spi_write_then_read(st->client, buf, 1, buf, 3); if (ret < 0) @@ -52,11 +50,10 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val) return 0; } -static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev, +static int ms5611_spi_read_adc_temp_and_pressure(struct ms5611_state *st, s32 *temp, s32 *pressure) { int ret; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); const struct ms5611_osr *osr = st->temp_osr; /* @@ -68,7 +65,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev, return ret; usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL)); - ret = ms5611_spi_read_adc(dev, temp); + ret = ms5611_spi_read_adc(st, temp); if (ret < 0) return ret; @@ -78,7 +75,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev, return ret; usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL)); - return ms5611_spi_read_adc(dev, pressure); + return ms5611_spi_read_adc(st, pressure); } static int ms5611_spi_probe(struct spi_device *spi) diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c index 3797a8f54276..51f4f92ae84a 100644 --- a/drivers/iio/proximity/as3935.c +++ b/drivers/iio/proximity/as3935.c @@ -133,7 +133,7 @@ static ssize_t as3935_sensor_sensitivity_store(struct device *dev, unsigned long val; int ret; - ret = kstrtoul((const char *) buf, 10, &val); + ret = kstrtoul(buf, 10, &val); if (ret) return -EINVAL; @@ -238,9 +238,6 @@ err_read: return IRQ_HANDLED; } -static const struct iio_trigger_ops iio_interrupt_trigger_ops = { -}; - static void as3935_event_work(struct work_struct *work) { struct as3935_state *st; @@ -417,7 +414,6 @@ static int as3935_probe(struct spi_device *spi) st->trig = trig; st->noise_tripped = jiffies - HZ; iio_trigger_set_drvdata(trig, indio_dev); - trig->ops = &iio_interrupt_trigger_ops; ret = devm_iio_trigger_register(dev, trig); if (ret) { diff --git a/drivers/iio/test/iio-test-format.c b/drivers/iio/test/iio-test-format.c index f1e951eddb43..237321436b83 100644 --- a/drivers/iio/test/iio-test-format.c +++ b/drivers/iio/test/iio-test-format.c @@ -14,10 +14,13 @@ static void iio_test_iio_format_value_integer(struct kunit *test) { - char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL); + char *buf; int val; int ret; + buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + val = 42; ret = iio_format_value(buf, IIO_VAL_INT, 1, &val); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "42\n"); @@ -41,153 +44,219 @@ static void iio_test_iio_format_value_integer(struct kunit *test) static void iio_test_iio_format_value_fixedpoint(struct kunit *test) { - char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL); int values[2]; + char *buf; int ret; + buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + /* positive >= 1 */ values[0] = 1; values[1] = 10; - ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, values); + ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1.000010\n"); - ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, 2, values); + ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1.000010 dB\n"); - ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, 2, values); + ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1.000000010\n"); /* positive < 1 */ values[0] = 0; values[1] = 12; - ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, values); + ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000012\n"); - ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, 2, values); + ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000012 dB\n"); - ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, 2, values); + ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000000012\n"); /* negative <= -1 */ values[0] = -1; values[1] = 10; - ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, values); + ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1.000010\n"); - ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, 2, values); + ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1.000010 dB\n"); - ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, 2, values); + ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1.000000010\n"); /* negative > -1 */ values[0] = 0; values[1] = -123; - ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, values); + ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000123\n"); - ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, 2, values); + ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000123 dB\n"); - ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, 2, values); + ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000000123\n"); } static void iio_test_iio_format_value_fractional(struct kunit *test) { - char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL); int values[2]; + char *buf; int ret; + buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + /* positive < 1 */ values[0] = 1; values[1] = 10; - ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values); + ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.100000000\n"); /* positive >= 1 */ values[0] = 100; values[1] = 3; - ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values); + ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "33.333333333\n"); /* negative > -1 */ values[0] = -1; values[1] = 1000000000; - ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values); + ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000000001\n"); /* negative <= -1 */ values[0] = -200; values[1] = 3; - ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values); + ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-66.666666666\n"); /* Zero */ values[0] = 0; values[1] = -10; - ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values); + ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000000000\n"); } static void iio_test_iio_format_value_fractional_log2(struct kunit *test) { - char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL); int values[2]; + char *buf; int ret; + buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + /* positive < 1 */ values[0] = 123; values[1] = 10; - ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values); + ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.120117187\n"); /* positive >= 1 */ values[0] = 1234567; values[1] = 10; - ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values); + ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1205.631835937\n"); /* negative > -1 */ values[0] = -123; values[1] = 10; - ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values); + ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.120117187\n"); /* negative <= -1 */ values[0] = -1234567; values[1] = 10; - ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values); + ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1205.631835937\n"); /* Zero */ values[0] = 0; values[1] = 10; - ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values); + ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000000000\n"); } static void iio_test_iio_format_value_multiple(struct kunit *test) { - char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL); int values[] = {1, -2, 3, -4, 5}; + char *buf; int ret; + buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + ret = iio_format_value(buf, IIO_VAL_INT_MULTIPLE, ARRAY_SIZE(values), values); IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1 -2 3 -4 5 \n"); } +static void iio_test_iio_format_value_integer_64(struct kunit *test) +{ + int values[2]; + s64 value; + char *buf; + int ret; + + buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + + value = 24; + values[0] = lower_32_bits(value); + values[1] = upper_32_bits(value); + ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values); + IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "24\n"); + + value = -24; + values[0] = lower_32_bits(value); + values[1] = upper_32_bits(value); + ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values); + IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-24\n"); + + value = 0; + values[0] = lower_32_bits(value); + values[1] = upper_32_bits(value); + ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values); + IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0\n"); + + value = UINT_MAX; + values[0] = lower_32_bits(value); + values[1] = upper_32_bits(value); + ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values); + IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "4294967295\n"); + + value = -((s64)UINT_MAX); + values[0] = lower_32_bits(value); + values[1] = upper_32_bits(value); + ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values); + IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-4294967295\n"); + + value = LLONG_MAX; + values[0] = lower_32_bits(value); + values[1] = upper_32_bits(value); + ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values); + IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "9223372036854775807\n"); + + value = LLONG_MIN; + values[0] = lower_32_bits(value); + values[1] = upper_32_bits(value); + ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values); + IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-9223372036854775808\n"); +} + static struct kunit_case iio_format_test_cases[] = { KUNIT_CASE(iio_test_iio_format_value_integer), KUNIT_CASE(iio_test_iio_format_value_fixedpoint), KUNIT_CASE(iio_test_iio_format_value_fractional), KUNIT_CASE(iio_test_iio_format_value_fractional_log2), KUNIT_CASE(iio_test_iio_format_value_multiple), + KUNIT_CASE(iio_test_iio_format_value_integer_64), {} }; diff --git a/drivers/iio/trigger/iio-trig-interrupt.c b/drivers/iio/trigger/iio-trig-interrupt.c index f746c460bf2a..5f49cd105fae 100644 --- a/drivers/iio/trigger/iio-trig-interrupt.c +++ b/drivers/iio/trigger/iio-trig-interrupt.c @@ -25,9 +25,6 @@ static irqreturn_t iio_interrupt_trigger_poll(int irq, void *private) return IRQ_HANDLED; } -static const struct iio_trigger_ops iio_interrupt_trigger_ops = { -}; - static int iio_interrupt_trigger_probe(struct platform_device *pdev) { struct iio_interrupt_trigger_info *trig_info; @@ -58,7 +55,6 @@ static int iio_interrupt_trigger_probe(struct platform_device *pdev) } iio_trigger_set_drvdata(trig, trig_info); trig_info->irq = irq; - trig->ops = &iio_interrupt_trigger_ops; ret = request_irq(irq, iio_interrupt_trigger_poll, irqflags, trig->name, trig); if (ret) { diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c index e9adfff45b39..2a4b75897910 100644 --- a/drivers/iio/trigger/iio-trig-sysfs.c +++ b/drivers/iio/trigger/iio-trig-sysfs.c @@ -124,9 +124,6 @@ static const struct attribute_group *iio_sysfs_trigger_attr_groups[] = { NULL }; -static const struct iio_trigger_ops iio_sysfs_trigger_ops = { -}; - static int iio_sysfs_trigger_probe(int id) { struct iio_sysfs_trig *t; @@ -156,7 +153,6 @@ static int iio_sysfs_trigger_probe(int id) } t->trig->dev.groups = iio_sysfs_trigger_attr_groups; - t->trig->ops = &iio_sysfs_trigger_ops; iio_trigger_set_drvdata(t->trig, t); t->work = IRQ_WORK_INIT_HARD(iio_sysfs_trigger_work); diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c index 4353b749ecef..4f9461e1412c 100644 --- a/drivers/iio/trigger/stm32-timer-trigger.c +++ b/drivers/iio/trigger/stm32-timer-trigger.c @@ -696,9 +696,9 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = { .write = stm32_count_set_preset }, IIO_ENUM("enable_mode", IIO_SEPARATE, &stm32_enable_mode_enum), - IIO_ENUM_AVAILABLE("enable_mode", &stm32_enable_mode_enum), + IIO_ENUM_AVAILABLE("enable_mode", IIO_SHARED_BY_TYPE, &stm32_enable_mode_enum), IIO_ENUM("trigger_mode", IIO_SEPARATE, &stm32_trigger_mode_enum), - IIO_ENUM_AVAILABLE("trigger_mode", &stm32_trigger_mode_enum), + IIO_ENUM_AVAILABLE("trigger_mode", IIO_SHARED_BY_TYPE, &stm32_trigger_mode_enum), {} }; diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index b79f816a7203..f6aa1a964573 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -956,7 +956,7 @@ int rdma_query_gid(struct ib_device *device, u32 port_num, { struct ib_gid_table *table; unsigned long flags; - int res = -EINVAL; + int res; if (!rdma_is_port_valid(device, port_num)) return -EINVAL; @@ -964,9 +964,15 @@ int rdma_query_gid(struct ib_device *device, u32 port_num, table = rdma_gid_table(device, port_num); read_lock_irqsave(&table->rwlock, flags); - if (index < 0 || index >= table->sz || - !is_gid_entry_valid(table->data_vec[index])) + if (index < 0 || index >= table->sz) { + res = -EINVAL; goto done; + } + + if (!is_gid_entry_valid(table->data_vec[index])) { + res = -ENOENT; + goto done; + } memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid)); res = 0; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 835ac54d4a24..27a00ce2e101 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -766,6 +766,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) unsigned int p; u16 pkey, index; enum ib_port_state port_state; + int ret; int i; cma_dev = NULL; @@ -784,9 +785,14 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) continue; - for (i = 0; !rdma_query_gid(cur_dev->device, - p, i, &gid); - i++) { + + for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; + ++i) { + ret = rdma_query_gid(cur_dev->device, p, i, + &gid); + if (ret) + continue; + if (!memcmp(&gid, dgid, sizeof(gid))) { cma_dev = cur_dev; sgid = gid; @@ -4033,8 +4039,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); - req.private_data_len = offset + conn_param->private_data_len; - if (req.private_data_len < conn_param->private_data_len) + if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) return -EINVAL; if (req.private_data_len) { @@ -4093,8 +4098,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); - req.private_data_len = offset + conn_param->private_data_len; - if (req.private_data_len < conn_param->private_data_len) + if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) return -EINVAL; if (req.private_data_len) { diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 22a4adda7981..a311df07b1bd 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2461,7 +2461,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, ++i) { ret = rdma_query_gid(device, port, i, &tmp_gid); if (ret) - return ret; + continue; + if (!memcmp(&tmp_gid, gid, sizeof *gid)) { *port_num = port; if (index) diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index a3f84b50c46a..84c53bd2a52d 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -433,6 +433,7 @@ static struct attribute *port_default_attrs[] = { &ib_port_attr_link_layer.attr, NULL }; +ATTRIBUTE_GROUPS(port_default); static ssize_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf) { @@ -774,7 +775,7 @@ static void ib_port_gid_attr_release(struct kobject *kobj) static struct kobj_type port_type = { .release = ib_port_release, .sysfs_ops = &port_sysfs_ops, - .default_attrs = port_default_attrs + .default_groups = port_default_groups, }; static struct kobj_type gid_attr_type = { diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 7a47343d11f9..aead24c1a682 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -227,7 +227,6 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, const struct mmu_interval_notifier_ops *ops) { struct ib_umem_odp *umem_odp; - struct mm_struct *mm; int ret; if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND))) @@ -241,7 +240,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, umem_odp->umem.length = size; umem_odp->umem.address = addr; umem_odp->umem.writable = ib_access_writable(access); - umem_odp->umem.owning_mm = mm = current->mm; + umem_odp->umem.owning_mm = current->mm; umem_odp->notifier.ops = ops; umem_odp->page_shift = PAGE_SHIFT; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index d1345d76d9b1..6b6393176b3c 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1399,7 +1399,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs, attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; attr.qp_type = cmd->qp_type; - attr.create_flags = 0; attr.cap.max_send_wr = cmd->max_send_wr; attr.cap.max_recv_wr = cmd->max_recv_wr; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 29cc0d14399a..3224f18a66e5 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -262,13 +262,12 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str) int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index, u16 *pkey) { - struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + if (index > 0) + return -EINVAL; - /* Ignore port_num */ + *pkey = IB_DEFAULT_PKEY_FULL; - memset(pkey, 0, sizeof(*pkey)); - return bnxt_qplib_get_pkey(&rdev->qplib_res, - &rdev->qplib_res.pkey_tbl, index, pkey); + return 0; } int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num, diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index b44944fb9b24..3d6834d3d4fb 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -893,7 +893,6 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq, qplib_srq); struct ib_event ib_event; - int rc = 0; ib_event.device = &srq->rdev->ibdev; ib_event.element.srq = &srq->ib_srq; @@ -907,7 +906,7 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, (*srq->ib_srq.event_handler)(&ib_event, srq->ib_srq.srq_context); } - return rc; + return 0; } static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index ca88849559bf..96e581ced50e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -46,6 +46,7 @@ #include <linux/delay.h> #include <linux/prefetch.h> #include <linux/if_ether.h> +#include <rdma/ib_mad.h> #include "roce_hsi.h" @@ -1232,7 +1233,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_modify_qp req; struct creq_modify_qp_resp resp; - u16 cmd_flags = 0, pkey; + u16 cmd_flags = 0; u32 temp32[4]; u32 bmask; int rc; @@ -1255,11 +1256,9 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS) req.access = qp->access; - if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) { - if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl, - qp->pkey_index, &pkey)) - req.pkey = cpu_to_le16(pkey); - } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) + req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL); + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY) req.qkey = cpu_to_le32(qp->qkey); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 3de854727460..061b2895dd9b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -555,7 +555,7 @@ skip_ctx_setup: void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { - kfree(rcfw->cmdq.cmdq_bitmap); + bitmap_free(rcfw->cmdq.cmdq_bitmap); kfree(rcfw->qp_tbl); kfree(rcfw->crsqe_tbl); bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq); @@ -572,7 +572,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, struct bnxt_qplib_sg_info sginfo = {}; struct bnxt_qplib_cmdq_ctx *cmdq; struct bnxt_qplib_creq_ctx *creq; - u32 bmap_size = 0; rcfw->pdev = res->pdev; cmdq = &rcfw->cmdq; @@ -613,13 +612,10 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, if (!rcfw->crsqe_tbl) goto fail; - bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long); - cmdq->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); + cmdq->cmdq_bitmap = bitmap_zalloc(rcfw->cmdq_depth, GFP_KERNEL); if (!cmdq->cmdq_bitmap) goto fail; - cmdq->bmap_size = bmap_size; - /* Allocate one extra to hold the QP1 entries */ rcfw->qp_tbl_size = qp_tbl_sz + 1; rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node), @@ -667,8 +663,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) iounmap(cmdq->cmdq_mbox.reg.bar_reg); iounmap(creq->creq_db.reg.bar_reg); - indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size); - if (indx != cmdq->bmap_size) + indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth); + if (indx != rcfw->cmdq_depth) dev_err(&rcfw->pdev->dev, "disabling RCFW with pending cmd-bit %lx\n", indx); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 82faa4e4cda8..0a3d8e7da3d4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -152,7 +152,6 @@ struct bnxt_qplib_cmdq_ctx { wait_queue_head_t waitq; unsigned long flags; unsigned long *cmdq_bitmap; - u32 bmap_size; u32 seq_num; }; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index bc1ba4b51ba4..126d4f26f75a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -649,31 +649,6 @@ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); } -static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl) -{ - if (!pkey_tbl->tbl) - dev_dbg(&res->pdev->dev, "PKEY tbl not present\n"); - else - kfree(pkey_tbl->tbl); - - pkey_tbl->tbl = NULL; - pkey_tbl->max = 0; - pkey_tbl->active = 0; -} - -static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, - u16 max) -{ - pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL); - if (!pkey_tbl->tbl) - return -ENOMEM; - - pkey_tbl->max = max; - return 0; -}; - /* PDs */ int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd) { @@ -843,24 +818,6 @@ unmap_io: return -ENOMEM; } -/* PKEYs */ -static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl) -{ - memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); - pkey_tbl->active = 0; -} - -static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl) -{ - u16 pkey = 0xFFFF; - - memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); - - /* pkey default = 0xFFFF */ - bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false); -} - /* Stats */ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, struct bnxt_qplib_stats *stats) @@ -891,21 +848,18 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res) { - bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl); bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl); } int bnxt_qplib_init_res(struct bnxt_qplib_res *res) { bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev); - bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl); return 0; } void bnxt_qplib_free_res(struct bnxt_qplib_res *res) { - bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl); bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl); bnxt_qplib_free_pd_tbl(&res->pd_tbl); bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl); @@ -924,10 +878,6 @@ int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, if (rc) goto fail; - rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey); - if (rc) - goto fail; - rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd); if (rc) goto fail; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index e1411a2352a7..982e2c96dac2 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -185,12 +185,6 @@ struct bnxt_qplib_sgid_tbl { u8 *vlan; }; -struct bnxt_qplib_pkey_tbl { - u16 *tbl; - u16 max; - u16 active; -}; - struct bnxt_qplib_dpi { u32 dpi; void __iomem *dbr; @@ -258,7 +252,6 @@ struct bnxt_qplib_res { struct bnxt_qplib_rcfw *rcfw; struct bnxt_qplib_pd_tbl pd_tbl; struct bnxt_qplib_sgid_tbl sgid_tbl; - struct bnxt_qplib_pkey_tbl pkey_tbl; struct bnxt_qplib_dpi_tbl dpi_tbl; bool prio; bool is_vf; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 379e715ebd30..b802981b7171 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -146,17 +146,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_srq = le16_to_cpu(sb->max_srq); attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1; attr->max_srq_sges = sb->max_srq_sge; - attr->max_pkey = le32_to_cpu(sb->max_pkeys); - /* - * Some versions of FW reports more than 0xFFFF. - * Restrict it for now to 0xFFFF to avoid - * reporting trucated value - */ - if (attr->max_pkey > 0xFFFF) { - /* ib_port_attr::pkey_tbl_len is u16 */ - attr->max_pkey = 0xFFFF; - } - + attr->max_pkey = 1; attr->max_inline_data = le32_to_cpu(sb->max_inline_data); attr->l2_db_size = (sb->l2_db_space_size + 1) * (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); @@ -414,93 +404,6 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return rc; } -/* pkeys */ -int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index, - u16 *pkey) -{ - if (index == 0xFFFF) { - *pkey = 0xFFFF; - return 0; - } - if (index >= pkey_tbl->max) { - dev_err(&res->pdev->dev, - "Index %d exceeded PKEY table max (%d)\n", - index, pkey_tbl->max); - return -EINVAL; - } - memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey)); - return 0; -} - -int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update) -{ - int i, rc = 0; - - if (!pkey_tbl) { - dev_err(&res->pdev->dev, "PKEY table not allocated\n"); - return -EINVAL; - } - - /* Do we need a pkey_lock here? */ - if (!pkey_tbl->active) { - dev_err(&res->pdev->dev, "PKEY table has no active entries\n"); - return -ENOMEM; - } - for (i = 0; i < pkey_tbl->max; i++) { - if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey))) - break; - } - if (i == pkey_tbl->max) { - dev_err(&res->pdev->dev, - "PKEY 0x%04x not found in the pkey table\n", *pkey); - return -ENOMEM; - } - memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey)); - pkey_tbl->active--; - - /* unlock */ - return rc; -} - -int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update) -{ - int i, free_idx, rc = 0; - - if (!pkey_tbl) { - dev_err(&res->pdev->dev, "PKEY table not allocated\n"); - return -EINVAL; - } - - /* Do we need a pkey_lock here? */ - if (pkey_tbl->active == pkey_tbl->max) { - dev_err(&res->pdev->dev, "PKEY table is full\n"); - return -ENOMEM; - } - free_idx = pkey_tbl->max; - for (i = 0; i < pkey_tbl->max; i++) { - if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey))) - return -EALREADY; - else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max) - free_idx = i; - } - if (free_idx == pkey_tbl->max) { - dev_err(&res->pdev->dev, - "PKEY table is FULL but count is not MAX??\n"); - return -ENOMEM; - } - /* Add PKEY to the pkey_tbl */ - memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey)); - pkey_tbl->active++; - - /* unlock */ - return rc; -} - /* AH */ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, bool block) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index a18f568cb23e..5939e8fc8353 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -255,15 +255,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, u16 gid_idx, const u8 *smac); -int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index, - u16 *pkey); -int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update); -int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update); int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_dev_attr *attr, bool vf); int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res, diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 913f39ee4416..c16017f6e8db 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2471,7 +2471,8 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, skb_get(skb); rpl = cplhdr(skb); if (!is_t4(adapter_type)) { - skb_trim(skb, roundup(sizeof(*rpl5), 16)); + BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16)); + skb_trim(skb, sizeof(*rpl5)); rpl5 = (void *)rpl; INIT_TP_WR(rpl5, ep->hwtid); } else { @@ -2487,7 +2488,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); opt2 |= T5_ISS_F; rpl5 = (void *)rpl; - memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); + memset_after(rpl5, 0, iss); if (peer2peer) isn += 4; rpl5->iss = cpu_to_be32(isn); diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c index 724d23297b35..f64e7e02b129 100644 --- a/drivers/infiniband/hw/cxgb4/id_table.c +++ b/drivers/infiniband/hw/cxgb4/id_table.c @@ -59,7 +59,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc) alloc->last = obj + 1; if (alloc->last >= alloc->max) alloc->last = 0; - set_bit(obj, alloc->table); + __set_bit(obj, alloc->table); obj += alloc->start; } else obj = -1; @@ -75,37 +75,32 @@ void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj) obj -= alloc->start; spin_lock_irqsave(&alloc->lock, flags); - clear_bit(obj, alloc->table); + __clear_bit(obj, alloc->table); spin_unlock_irqrestore(&alloc->lock, flags); } int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, u32 reserved, u32 flags) { - int i; - alloc->start = start; alloc->flags = flags; if (flags & C4IW_ID_TABLE_F_RANDOM) alloc->last = prandom_u32() % RANDOM_SKIP; else alloc->last = 0; - alloc->max = num; + alloc->max = num; spin_lock_init(&alloc->lock); - alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long), - GFP_KERNEL); + alloc->table = bitmap_zalloc(num, GFP_KERNEL); if (!alloc->table) return -ENOMEM; - bitmap_zero(alloc->table, num); if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY)) - for (i = 0; i < reserved; ++i) - set_bit(i, alloc->table); + bitmap_set(alloc->table, 0, reserved); return 0; } void c4iw_id_table_free(struct c4iw_id_table *alloc) { - kfree(alloc->table); + bitmap_free(alloc->table); } diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 0c8fd5a85fcb..89f36a3a9af0 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -41,6 +41,7 @@ #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/inetdevice.h> +#include <net/addrconf.h> #include <linux/io.h> #include <asm/irq.h> @@ -264,7 +265,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro return -EINVAL; dev = to_c4iw_dev(ibdev); - memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); + addrconf_addr_eui48((u8 *)&props->sys_image_guid, + dev->rdev.lldi.ports[0]->dev_addr); props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type); props->fw_ver = dev->rdev.lldi.fw_vers; props->device_cap_flags = dev->device_cap_flags; @@ -525,8 +527,8 @@ void c4iw_register_device(struct work_struct *work) struct c4iw_dev *dev = ctx->dev; pr_debug("c4iw_dev %p\n", dev); - memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); - memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); + addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, + dev->rdev.lldi.ports[0]->dev_addr); dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; if (fastreg_support) dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index d20b4ef2c853..ffbd9a89981e 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -2460,6 +2460,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, memset(attr, 0, sizeof(*attr)); memset(init_attr, 0, sizeof(*init_attr)); attr->qp_state = to_ib_qp_state(qhp->attr.state); + attr->cur_qp_state = to_ib_qp_state(qhp->attr.state); init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 5b11c8282744..a71c5a36ceba 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -161,9 +161,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, if (!pq->reqs) goto pq_reqs_nomem; - pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size), - sizeof(*pq->req_in_use), - GFP_KERNEL); + pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL); if (!pq->req_in_use) goto pq_reqs_no_in_use; @@ -210,7 +208,7 @@ cq_comps_nomem: cq_nomem: kmem_cache_destroy(pq->txreq_cache); pq_txreq_nomem: - kfree(pq->req_in_use); + bitmap_free(pq->req_in_use); pq_reqs_no_in_use: kfree(pq->reqs); pq_reqs_nomem: @@ -257,7 +255,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, pq->wait, !atomic_read(&pq->n_reqs)); kfree(pq->reqs); - kfree(pq->req_in_use); + bitmap_free(pq->req_in_use); kmem_cache_destroy(pq->txreq_cache); flush_pq_iowait(pq); kfree(pq); diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig index 18d10ebf900b..ab3fbba70789 100644 --- a/drivers/infiniband/hw/hns/Kconfig +++ b/drivers/infiniband/hw/hns/Kconfig @@ -5,22 +5,9 @@ config INFINIBAND_HNS depends on ARM64 || (COMPILE_TEST && 64BIT) depends on (HNS_DSAF && HNS_ENET) || HNS3 help - This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine - is used in Hisilicon Hip06 and more further ICT SoC based on - platform device. + This is a RoCE/RDMA driver for the Hisilicon RoCE engine. - To compile HIP06 or HIP08 driver as module, choose M here. - -config INFINIBAND_HNS_HIP06 - bool "Hisilicon Hip06 Family RoCE support" - depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET - depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y) - help - RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and - Hip07 SoC. These RoCE engines are platform devices. - - To compile this driver, choose Y here: if INFINIBAND_HNS is m, this - module will be called hns-roce-hw-v1 + To compile HIP08 driver as module, choose M here. config INFINIBAND_HNS_HIP08 bool "Hisilicon Hip08 Family RoCE support" diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile index e105945b94a1..9f04f25d9631 100644 --- a/drivers/infiniband/hw/hns/Makefile +++ b/drivers/infiniband/hw/hns/Makefile @@ -9,11 +9,6 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o -ifdef CONFIG_INFINIBAND_HNS_HIP06 -hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs) -obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o -endif - ifdef CONFIG_INFINIBAND_HNS_HIP08 hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index cc258edec331..492b122d0521 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <linux/pci.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> @@ -61,7 +60,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct hns_roce_ah *ah = to_hr_ah(ibah); int ret = 0; - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && udata) + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) return -EOPNOTSUPP; ah->av.port = rdma_ah_get_port_num(ah_attr); @@ -80,7 +79,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); /* HIP08 needs to record vlan info in Address Vector */ - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) { + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr, &ah->av.vlan_id, NULL); if (ret) diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c index d4fa0fd52294..11a78ceae568 100644 --- a/drivers/infiniband/hw/hns/hns_roce_alloc.c +++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c @@ -31,10 +31,9 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <linux/vmalloc.h> -#include "hns_roce_device.h" #include <rdma/ib_umem.h> +#include "hns_roce_device.h" void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf) { diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index 84f3f2b5f097..4b693d542ace 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -31,7 +31,6 @@ */ #include <linux/dmapool.h> -#include <linux/platform_device.h> #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_cmd.h" @@ -61,7 +60,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, CMD_POLL_TOKEN, 0); if (ret) { dev_err_ratelimited(hr_dev->dev, - "failed to post mailbox %x in poll mode, ret = %d.\n", + "failed to post mailbox 0x%x in poll mode, ret = %d.\n", op, ret); return ret; } @@ -91,7 +90,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, if (unlikely(token != context->token)) { dev_err_ratelimited(hr_dev->dev, - "[cmd] invalid ae token %x,context token is %x!\n", + "[cmd] invalid ae token 0x%x, context token is 0x%x.\n", token, context->token); return; } @@ -130,14 +129,14 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, context->token, 1); if (ret) { dev_err_ratelimited(dev, - "failed to post mailbox %x in event mode, ret = %d.\n", + "failed to post mailbox 0x%x in event mode, ret = %d.\n", op, ret); goto out; } if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { - dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n", + dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n", context->token, op); ret = -EBUSY; goto out; @@ -145,7 +144,7 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, ret = context->result; if (ret) - dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n", + dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n", context->token, op, ret); out: diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h index b73e55de83ac..465d1f914b6c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_common.h +++ b/drivers/infiniband/hw/hns/hns_roce_common.h @@ -104,208 +104,6 @@ #define hr_reg_read(ptr, field) _hr_reg_read(ptr, field) -#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3 -#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4 - -#define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5 - -#define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6 - -#define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10 -#define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \ - (((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S) - -#define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16 - -#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0 -#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \ - (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S) - -#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24 -#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \ - (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S) - -#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0 -#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \ - (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S) - -#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24 -#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \ - (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S) - -#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0 -#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \ - (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S) - -#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16 -#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \ - (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S) - -#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0 -#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \ - (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S) - -#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16 -#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \ - (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S) - -#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0 -#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \ - (((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S) - -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0 -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \ - (((1UL << 15) - 1) << \ - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S) - -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16 -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \ - (((1UL << 4) - 1) << \ - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S) - -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20 - -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21 - -#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0 -#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \ - (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S) - -#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5 -#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \ - (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S) - -#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0 -#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \ - (((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S) - -#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5 -#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \ - (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S) - -#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0 -#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \ - (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S) - -#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8 -#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \ - (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S) - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0 -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \ - (((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S) - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19 - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20 -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \ - (((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S) - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22 -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \ - (((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S) - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31 - -#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0 -#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \ - (((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S) - -#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0 -#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \ - (((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S) - -#define ROCEE_MB6_ROCEE_MB_CMD_S 0 -#define ROCEE_MB6_ROCEE_MB_CMD_M \ - (((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S) - -#define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8 -#define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \ - (((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S) - -#define ROCEE_MB6_ROCEE_MB_EVENT_S 14 - -#define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15 - -#define ROCEE_MB6_ROCEE_MB_TOKEN_S 16 -#define ROCEE_MB6_ROCEE_MB_TOKEN_M \ - (((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S) - -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0 -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \ - (((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S) - -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24 -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \ - (((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S) - -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28 -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \ - (((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S) - -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31 - -#define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0 -#define ROCEE_SMAC_H_ROCEE_SMAC_H_M \ - (((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S) - -#define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16 -#define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \ - (((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S) - -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0 -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \ - (((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S) - -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8 -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \ - (((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S) - -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17 - -#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0 -#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \ - (((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S) - -#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16 -#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \ - (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S) - -#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0 -#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \ - (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S) - -#define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16 -#define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1 -#define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0 - -#define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0 -#define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1 - -#define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0 - -#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0 -#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \ - (((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) - -#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0 -#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \ - (((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) - -#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0 -#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \ - (((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S) - -#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0 -#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \ - (((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S) - -#define ROCEE_SDB_CNT_CMP_BITS 16 - -#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20 - -#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0 - /*************ROCEE_REG DEFINITION****************/ #define ROCEE_VENDOR_ID_REG 0x0 #define ROCEE_VENDOR_PART_ID_REG 0x4 diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index d763f097599f..55057dcbb2dc 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <rdma/ib_umem.h> #include <rdma/uverbs_ioctl.h> #include "hns_roce_device.h" @@ -406,15 +405,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, goto err_cqn; } - /* - * For the QP created by kernel space, tptr value should be initialized - * to zero; For the QP created by user space, it will cause synchronous - * problems if tptr is set to zero here, so we initialize it in user - * space. - */ - if (!udata && hr_cq->tptr_addr) - *hr_cq->tptr_addr = 0; - if (udata) { resp.cqn = hr_cq->cqn; ret = ib_copy_to_udata(udata, &resp, @@ -441,9 +431,6 @@ int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); - if (hr_dev->hw->destroy_cq) - hr_dev->hw->destroy_cq(ib_cq, udata); - free_cqc(hr_dev, hr_cq); free_cqn(hr_dev, hr_cq->cqn); free_cq_db(hr_dev, hr_cq, udata); diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index 751470c7a2ce..5c4c0480832b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c @@ -4,7 +4,6 @@ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. */ -#include <linux/platform_device.h> #include <rdma/ib_umem.h> #include "hns_roce_device.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 43e17d61cb63..1e0bae136997 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -36,36 +36,18 @@ #include <rdma/ib_verbs.h> #include <rdma/hns-abi.h> -#define DRV_NAME "hns_roce" - #define PCI_REVISION_ID_HIP08 0x21 #define PCI_REVISION_ID_HIP09 0x30 -#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6') - #define HNS_ROCE_MAX_MSG_LEN 0x80000000 #define HNS_ROCE_IB_MIN_SQ_STRIDE 6 #define BA_BYTE_LEN 8 -/* Hardware specification only for v1 engine */ #define HNS_ROCE_MIN_CQE_NUM 0x40 -#define HNS_ROCE_MIN_WQE_NUM 0x20 #define HNS_ROCE_MIN_SRQ_WQE_NUM 1 -/* Hardware specification only for v1 engine */ -#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7 -#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000 - -#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20 -#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \ - (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS) -#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2 -#define HNS_ROCE_MIN_CQE_CNT 16 - -#define HNS_ROCE_RESERVED_SGE 1 - #define HNS_ROCE_MAX_IRQ_NUM 128 #define HNS_ROCE_SGE_IN_WQE 2 @@ -102,18 +84,12 @@ #define HNS_ROCE_FRMR_MAX_PA 512 #define PKEY_ID 0xffff -#define GUID_LEN 8 #define NODE_DESC_SIZE 64 #define DB_REG_OFFSET 0x1000 /* Configure to HW for PAGE_SIZE larger than 4KB */ #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12) -#define PAGES_SHIFT_8 8 -#define PAGES_SHIFT_16 16 -#define PAGES_SHIFT_24 24 -#define PAGES_SHIFT_32 32 - #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 #define SRQ_DB_REG 0x230 @@ -122,11 +98,6 @@ #define CQ_BANKID_SHIFT 2 -/* The chip implementation of the consumer index is calculated - * according to twice the actual EQ depth - */ -#define EQ_DEPTH_COEFF 2 - enum { SERV_TYPE_RC, SERV_TYPE_UC, @@ -182,6 +153,7 @@ enum { HNS_ROCE_CAP_FLAG_FRMR = BIT(8), HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9), HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), + HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12), HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14), HNS_ROCE_CAP_FLAG_STASH = BIT(17), }; @@ -227,7 +199,7 @@ struct hns_roce_uar { enum hns_roce_mmap_type { HNS_ROCE_MMAP_TYPE_DB = 1, - HNS_ROCE_MMAP_TYPE_TPTR, + HNS_ROCE_MMAP_TYPE_DWQE, }; struct hns_user_mmap_entry { @@ -242,7 +214,6 @@ struct hns_roce_ucontext { struct list_head page_list; struct mutex page_mutex; struct hns_user_mmap_entry *db_mmap_entry; - struct hns_user_mmap_entry *tptr_mmap_entry; }; struct hns_roce_pd { @@ -345,19 +316,16 @@ struct hns_roce_mw { u32 pbl_buf_pg_sz; }; -/* Only support 4K page size for mr register */ -#define MR_SIZE_4K 0 - struct hns_roce_mr { struct ib_mr ibmr; u64 iova; /* MR's virtual original addr */ u64 size; /* Address range of MR */ u32 key; /* Key of MR */ u32 pd; /* PD num of MR */ - u32 access; /* Access permission of MR */ + u32 access; /* Access permission of MR */ int enabled; /* MR's active status */ - int type; /* MR's register type */ - u32 pbl_hop_num; /* multi-hop number */ + int type; /* MR's register type */ + u32 pbl_hop_num; /* multi-hop number */ struct hns_roce_mtr pbl_mtr; u32 npages; dma_addr_t *page_list; @@ -374,17 +342,17 @@ struct hns_roce_wq { u32 wqe_cnt; /* WQE num */ u32 max_gs; u32 rsv_sge; - int offset; - int wqe_shift; /* WQE size */ + u32 offset; + u32 wqe_shift; /* WQE size */ u32 head; u32 tail; void __iomem *db_reg; }; struct hns_roce_sge { - unsigned int sge_cnt; /* SGE num */ - int offset; - int sge_shift; /* SGE size */ + unsigned int sge_cnt; /* SGE num */ + u32 offset; + u32 sge_shift; /* SGE size */ }; struct hns_roce_buf_list { @@ -453,7 +421,6 @@ struct hns_roce_cq { u32 cons_index; u32 *set_ci_db; void __iomem *db_reg; - u16 *tptr_addr; int arm_sn; int cqe_size; unsigned long cqn; @@ -468,7 +435,7 @@ struct hns_roce_cq { struct hns_roce_idx_que { struct hns_roce_mtr mtr; - int entry_shift; + u32 entry_shift; unsigned long *bitmap; u32 head; u32 tail; @@ -480,7 +447,7 @@ struct hns_roce_srq { u32 wqe_cnt; int max_gs; u32 rsv_sge; - int wqe_shift; + u32 wqe_shift; u32 cqn; u32 xrcdn; void __iomem *db_reg; @@ -539,10 +506,6 @@ struct hns_roce_srq_table { struct hns_roce_hem_table table; }; -struct hns_roce_raq_table { - struct hns_roce_buf_list *e_raq_buf; -}; - struct hns_roce_av { u8 port; u8 gid_index; @@ -627,10 +590,6 @@ struct hns_roce_work { u32 queue_num; }; -enum { - HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5), -}; - struct hns_roce_qp { struct ib_qp ibqp; struct hns_roce_wq rq; @@ -650,9 +609,7 @@ struct hns_roce_qp { u8 sl; u8 resp_depth; u8 state; - u32 access_flags; u32 atomic_rd_en; - u32 pkey_index; u32 qkey; void (*event)(struct hns_roce_qp *qp, enum hns_roce_event event_type); @@ -672,9 +629,10 @@ struct hns_roce_qp { unsigned long flush_flag; struct hns_roce_work flush_work; struct hns_roce_rinl_buf rq_inl_buf; - struct list_head node; /* all qps are on a list */ - struct list_head rq_node; /* all recv qps are on a list */ - struct list_head sq_node; /* all send qps are on a list */ + struct list_head node; /* all qps are on a list */ + struct list_head rq_node; /* all recv qps are on a list */ + struct list_head sq_node; /* all send qps are on a list */ + struct hns_user_mmap_entry *dwqe_mmap_entry; }; struct hns_roce_ib_iboe { @@ -684,11 +642,6 @@ struct hns_roce_ib_iboe { u8 phy_port[HNS_ROCE_MAX_PORTS]; }; -enum { - HNS_ROCE_EQ_STAT_INVALID = 0, - HNS_ROCE_EQ_STAT_VALID = 2, -}; - struct hns_roce_ceqe { __le32 comp; __le32 rsv[15]; @@ -720,12 +673,9 @@ struct hns_roce_eq { int type_flag; /* Aeq:1 ceq:0 */ int eqn; u32 entries; - u32 log_entries; int eqe_size; int irq; - int log_page_size; u32 cons_index; - struct hns_roce_buf_list *buf_list; int over_ignore; int coalesce; int arm_st; @@ -740,7 +690,6 @@ struct hns_roce_eq { struct hns_roce_eq_table { struct hns_roce_eq *eq; - void __iomem **eqc_base; /* only for hw v1 */ }; enum cong_type { @@ -767,7 +716,7 @@ struct hns_roce_caps { u32 reserved_qps; int num_qpc_timer; int num_cqc_timer; - int num_srqs; + u32 num_srqs; u32 max_wqes; u32 max_srq_wrs; u32 max_srq_sges; @@ -781,7 +730,7 @@ struct hns_roce_caps { u32 min_cqes; u32 min_wqes; u32 reserved_cqs; - int reserved_srqs; + u32 reserved_srqs; int num_aeq_vectors; int num_comp_vectors; int num_other_vectors; @@ -855,7 +804,7 @@ struct hns_roce_caps { u32 cqc_timer_ba_pg_sz; u32 cqc_timer_buf_pg_sz; u32 cqc_timer_hop_num; - u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ + u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ u32 cqe_buf_pg_sz; u32 cqe_hop_num; u32 srqwqe_ba_pg_sz; @@ -874,7 +823,7 @@ struct hns_roce_caps { u32 gmv_hop_num; u32 sl_num; u32 llm_buf_pg_sz; - u32 chunk_sz; /* chunk size in non multihop mode */ + u32 chunk_sz; /* chunk size in non multihop mode */ u64 flags; u16 default_ceq_max_cnt; u16 default_ceq_period; @@ -897,7 +846,6 @@ enum hns_roce_device_state { }; struct hns_roce_hw { - int (*reset)(struct hns_roce_dev *hr_dev, bool enable); int (*cmq_init)(struct hns_roce_dev *hr_dev); void (*cmq_exit)(struct hns_roce_dev *hr_dev); int (*hw_profile)(struct hns_roce_dev *hr_dev); @@ -909,14 +857,12 @@ struct hns_roce_hw { int (*poll_mbox_done)(struct hns_roce_dev *hr_dev, unsigned int timeout); bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy); - int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index, + int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index, const union ib_gid *gid, const struct ib_gid_attr *attr); int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, const u8 *addr); - void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, - enum ib_mtu mtu); int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, - struct hns_roce_mr *mr, unsigned long mtpt_idx); + struct hns_roce_mr *mr); int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, int flags, void *mb_buf); @@ -936,9 +882,6 @@ struct hns_roce_hw { enum ib_qp_state new_state); int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); - int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, - struct ib_udata *udata); - int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata); int (*init_eq)(struct hns_roce_dev *hr_dev); void (*cleanup_eq)(struct hns_roce_dev *hr_dev); int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf); @@ -948,13 +891,11 @@ struct hns_roce_hw { struct hns_roce_dev { struct ib_device ib_dev; - struct platform_device *pdev; struct pci_dev *pci_dev; struct device *dev; struct hns_roce_uar priv_uar; const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; spinlock_t sm_lock; - spinlock_t bt_cmd_lock; bool active; bool is_reset; bool dis_db; @@ -1001,8 +942,6 @@ struct hns_roce_dev { int loop_idc; u32 sdb_offset; u32 odb_offset; - dma_addr_t tptr_dma_addr; /* only for hw v1 */ - u32 tptr_size; /* only for hw v1 */ const struct hns_roce_hw *hw; void *priv; struct workqueue_struct *irq_workq; @@ -1010,6 +949,7 @@ struct hns_roce_dev { u32 func_num; u32 is_vf; u32 cong_algo_tmpl_id; + u64 dwqe_page; }; static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) @@ -1158,7 +1098,7 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev); /* hns roce hw need current block and next block addr from mtt */ #define MTT_MIN_COUNT 2 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); + u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, struct hns_roce_buf_attr *buf_attr, unsigned int page_shift, struct ib_udata *udata, diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index fa15d79eabb3..8917365cc6b8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -31,7 +31,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include "hns_roce_device.h" #include "hns_roce_hem.h" #include "hns_roce_common.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c deleted file mode 100644 index f4af3992ba95..000000000000 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ /dev/null @@ -1,4675 +0,0 @@ -/* - * Copyright (c) 2016 Hisilicon Limited. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/platform_device.h> -#include <linux/acpi.h> -#include <linux/etherdevice.h> -#include <linux/interrupt.h> -#include <linux/of.h> -#include <linux/of_platform.h> -#include <rdma/ib_umem.h> -#include "hns_roce_common.h" -#include "hns_roce_device.h" -#include "hns_roce_cmd.h" -#include "hns_roce_hem.h" -#include "hns_roce_hw_v1.h" - -/** - * hns_get_gid_index - Get gid index. - * @hr_dev: pointer to structure hns_roce_dev. - * @port: port, value range: 0 ~ MAX - * @gid_index: gid_index, value range: 0 ~ MAX - * Description: - * N ports shared gids, allocation method as follow: - * GID[0][0], GID[1][0],.....GID[N - 1][0], - * GID[0][0], GID[1][0],.....GID[N - 1][0], - * And so on - */ -u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index) -{ - return gid_index * hr_dev->caps.num_ports + port; -} - -static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg) -{ - dseg->lkey = cpu_to_le32(sg->lkey); - dseg->addr = cpu_to_le64(sg->addr); - dseg->len = cpu_to_le32(sg->length); -} - -static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr, - u32 rkey) -{ - rseg->raddr = cpu_to_le64(remote_addr); - rseg->rkey = cpu_to_le32(rkey); - rseg->len = 0; -} - -static int hns_roce_v1_post_send(struct ib_qp *ibqp, - const struct ib_send_wr *wr, - const struct ib_send_wr **bad_wr) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); - struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL; - struct hns_roce_wqe_ctrl_seg *ctrl = NULL; - struct hns_roce_wqe_data_seg *dseg = NULL; - struct hns_roce_qp *qp = to_hr_qp(ibqp); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_sq_db sq_db = {}; - int ps_opcode, i; - unsigned long flags = 0; - void *wqe = NULL; - __le32 doorbell[2]; - const u8 *smac; - int ret = 0; - int loopback; - u32 wqe_idx; - int nreq; - - if (unlikely(ibqp->qp_type != IB_QPT_GSI && - ibqp->qp_type != IB_QPT_RC)) { - dev_err(dev, "un-supported QP type\n"); - *bad_wr = NULL; - return -EOPNOTSUPP; - } - - spin_lock_irqsave(&qp->sq.lock, flags); - - for (nreq = 0; wr; ++nreq, wr = wr->next) { - if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { - ret = -ENOMEM; - *bad_wr = wr; - goto out; - } - - wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); - - if (unlikely(wr->num_sge > qp->sq.max_gs)) { - dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n", - wr->num_sge, qp->sq.max_gs); - ret = -EINVAL; - *bad_wr = wr; - goto out; - } - - wqe = hns_roce_get_send_wqe(qp, wqe_idx); - qp->sq.wrid[wqe_idx] = wr->wr_id; - - /* Corresponding to the RC and RD type wqe process separately */ - if (ibqp->qp_type == IB_QPT_GSI) { - ud_sq_wqe = wqe; - roce_set_field(ud_sq_wqe->dmac_h, - UD_SEND_WQE_U32_4_DMAC_0_M, - UD_SEND_WQE_U32_4_DMAC_0_S, - ah->av.mac[0]); - roce_set_field(ud_sq_wqe->dmac_h, - UD_SEND_WQE_U32_4_DMAC_1_M, - UD_SEND_WQE_U32_4_DMAC_1_S, - ah->av.mac[1]); - roce_set_field(ud_sq_wqe->dmac_h, - UD_SEND_WQE_U32_4_DMAC_2_M, - UD_SEND_WQE_U32_4_DMAC_2_S, - ah->av.mac[2]); - roce_set_field(ud_sq_wqe->dmac_h, - UD_SEND_WQE_U32_4_DMAC_3_M, - UD_SEND_WQE_U32_4_DMAC_3_S, - ah->av.mac[3]); - - roce_set_field(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_DMAC_4_M, - UD_SEND_WQE_U32_8_DMAC_4_S, - ah->av.mac[4]); - roce_set_field(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_DMAC_5_M, - UD_SEND_WQE_U32_8_DMAC_5_S, - ah->av.mac[5]); - - smac = (const u8 *)hr_dev->dev_addr[qp->port]; - loopback = ether_addr_equal_unaligned(ah->av.mac, - smac) ? 1 : 0; - roce_set_bit(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S, - loopback); - - roce_set_field(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_OPERATION_TYPE_M, - UD_SEND_WQE_U32_8_OPERATION_TYPE_S, - HNS_ROCE_WQE_OPCODE_SEND); - roce_set_field(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M, - UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S, - 2); - roce_set_bit(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S, - 1); - - ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ? - cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | - (wr->send_flags & IB_SEND_SOLICITED ? - cpu_to_le32(HNS_ROCE_WQE_SE) : 0) | - ((wr->opcode == IB_WR_SEND_WITH_IMM) ? - cpu_to_le32(HNS_ROCE_WQE_IMM) : 0); - - roce_set_field(ud_sq_wqe->u32_16, - UD_SEND_WQE_U32_16_DEST_QP_M, - UD_SEND_WQE_U32_16_DEST_QP_S, - ud_wr(wr)->remote_qpn); - roce_set_field(ud_sq_wqe->u32_16, - UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M, - UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S, - ah->av.stat_rate); - - roce_set_field(ud_sq_wqe->u32_36, - UD_SEND_WQE_U32_36_FLOW_LABEL_M, - UD_SEND_WQE_U32_36_FLOW_LABEL_S, - ah->av.flowlabel); - roce_set_field(ud_sq_wqe->u32_36, - UD_SEND_WQE_U32_36_PRIORITY_M, - UD_SEND_WQE_U32_36_PRIORITY_S, - ah->av.sl); - roce_set_field(ud_sq_wqe->u32_36, - UD_SEND_WQE_U32_36_SGID_INDEX_M, - UD_SEND_WQE_U32_36_SGID_INDEX_S, - hns_get_gid_index(hr_dev, qp->phy_port, - ah->av.gid_index)); - - roce_set_field(ud_sq_wqe->u32_40, - UD_SEND_WQE_U32_40_HOP_LIMIT_M, - UD_SEND_WQE_U32_40_HOP_LIMIT_S, - ah->av.hop_limit); - roce_set_field(ud_sq_wqe->u32_40, - UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M, - UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, - ah->av.tclass); - - memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); - - ud_sq_wqe->va0_l = - cpu_to_le32((u32)wr->sg_list[0].addr); - ud_sq_wqe->va0_h = - cpu_to_le32((wr->sg_list[0].addr) >> 32); - ud_sq_wqe->l_key0 = - cpu_to_le32(wr->sg_list[0].lkey); - - ud_sq_wqe->va1_l = - cpu_to_le32((u32)wr->sg_list[1].addr); - ud_sq_wqe->va1_h = - cpu_to_le32((wr->sg_list[1].addr) >> 32); - ud_sq_wqe->l_key1 = - cpu_to_le32(wr->sg_list[1].lkey); - } else if (ibqp->qp_type == IB_QPT_RC) { - u32 tmp_len = 0; - - ctrl = wqe; - memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg)); - for (i = 0; i < wr->num_sge; i++) - tmp_len += wr->sg_list[i].length; - - ctrl->msg_length = - cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len); - - ctrl->sgl_pa_h = 0; - ctrl->flag = 0; - - switch (wr->opcode) { - case IB_WR_SEND_WITH_IMM: - case IB_WR_RDMA_WRITE_WITH_IMM: - ctrl->imm_data = wr->ex.imm_data; - break; - case IB_WR_SEND_WITH_INV: - ctrl->inv_key = - cpu_to_le32(wr->ex.invalidate_rkey); - break; - default: - ctrl->imm_data = 0; - break; - } - - /* Ctrl field, ctrl set type: sig, solic, imm, fence */ - /* SO wait for conforming application scenarios */ - ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ? - cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | - (wr->send_flags & IB_SEND_SOLICITED ? - cpu_to_le32(HNS_ROCE_WQE_SE) : 0) | - ((wr->opcode == IB_WR_SEND_WITH_IMM || - wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ? - cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) | - (wr->send_flags & IB_SEND_FENCE ? - (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0); - - wqe += sizeof(struct hns_roce_wqe_ctrl_seg); - - switch (wr->opcode) { - case IB_WR_RDMA_READ: - ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ; - set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, - rdma_wr(wr)->rkey); - break; - case IB_WR_RDMA_WRITE: - case IB_WR_RDMA_WRITE_WITH_IMM: - ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE; - set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, - rdma_wr(wr)->rkey); - break; - case IB_WR_SEND: - case IB_WR_SEND_WITH_INV: - case IB_WR_SEND_WITH_IMM: - ps_opcode = HNS_ROCE_WQE_OPCODE_SEND; - break; - case IB_WR_LOCAL_INV: - case IB_WR_ATOMIC_CMP_AND_SWP: - case IB_WR_ATOMIC_FETCH_AND_ADD: - case IB_WR_LSO: - default: - ps_opcode = HNS_ROCE_WQE_OPCODE_MASK; - break; - } - ctrl->flag |= cpu_to_le32(ps_opcode); - wqe += sizeof(struct hns_roce_wqe_raddr_seg); - - dseg = wqe; - if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { - if (le32_to_cpu(ctrl->msg_length) > - hr_dev->caps.max_sq_inline) { - ret = -EINVAL; - *bad_wr = wr; - dev_err(dev, "inline len(1-%d)=%d, illegal", - le32_to_cpu(ctrl->msg_length), - hr_dev->caps.max_sq_inline); - goto out; - } - for (i = 0; i < wr->num_sge; i++) { - memcpy(wqe, ((void *) (uintptr_t) - wr->sg_list[i].addr), - wr->sg_list[i].length); - wqe += wr->sg_list[i].length; - } - ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE); - } else { - /* sqe num is two */ - for (i = 0; i < wr->num_sge; i++) - set_data_seg(dseg + i, wr->sg_list + i); - - ctrl->flag |= cpu_to_le32(wr->num_sge << - HNS_ROCE_WQE_SGE_NUM_BIT); - } - } - } - -out: - /* Set DB return */ - if (likely(nreq)) { - qp->sq.head += nreq; - - roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M, - SQ_DOORBELL_U32_4_SQ_HEAD_S, - (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); - roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M, - SQ_DOORBELL_U32_4_SL_S, qp->sl); - roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M, - SQ_DOORBELL_U32_4_PORT_S, qp->phy_port); - roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M, - SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); - roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); - - doorbell[0] = sq_db.u32_4; - doorbell[1] = sq_db.u32_8; - - hns_roce_write64_k(doorbell, qp->sq.db_reg); - } - - spin_unlock_irqrestore(&qp->sq.lock, flags); - - return ret; -} - -static int hns_roce_v1_post_recv(struct ib_qp *ibqp, - const struct ib_recv_wr *wr, - const struct ib_recv_wr **bad_wr) -{ - struct hns_roce_rq_wqe_ctrl *ctrl = NULL; - struct hns_roce_wqe_data_seg *scat = NULL; - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_rq_db rq_db = {}; - __le32 doorbell[2] = {0}; - unsigned long flags = 0; - unsigned int wqe_idx; - int ret = 0; - int nreq; - int i; - u32 reg_val; - - spin_lock_irqsave(&hr_qp->rq.lock, flags); - - for (nreq = 0; wr; ++nreq, wr = wr->next) { - if (hns_roce_wq_overflow(&hr_qp->rq, nreq, - hr_qp->ibqp.recv_cq)) { - ret = -ENOMEM; - *bad_wr = wr; - goto out; - } - - wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); - - if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { - dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n", - wr->num_sge, hr_qp->rq.max_gs); - ret = -EINVAL; - *bad_wr = wr; - goto out; - } - - ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx); - - roce_set_field(ctrl->rwqe_byte_12, - RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M, - RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S, - wr->num_sge); - - scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1); - - for (i = 0; i < wr->num_sge; i++) - set_data_seg(scat + i, wr->sg_list + i); - - hr_qp->rq.wrid[wqe_idx] = wr->wr_id; - } - -out: - if (likely(nreq)) { - hr_qp->rq.head += nreq; - - if (ibqp->qp_type == IB_QPT_GSI) { - __le32 tmp; - - /* SW update GSI rq header */ - reg_val = roce_read(to_hr_dev(ibqp->device), - ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->phy_port); - tmp = cpu_to_le32(reg_val); - roce_set_field(tmp, - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, - hr_qp->rq.head); - reg_val = le32_to_cpu(tmp); - roce_write(to_hr_dev(ibqp->device), - ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val); - } else { - roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M, - RQ_DOORBELL_U32_4_RQ_HEAD_S, - hr_qp->rq.head); - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M, - RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M, - RQ_DOORBELL_U32_8_CMD_S, 1); - roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, - 1); - - doorbell[0] = rq_db.u32_4; - doorbell[1] = rq_db.u32_8; - - hns_roce_write64_k(doorbell, hr_qp->rq.db_reg); - } - } - spin_unlock_irqrestore(&hr_qp->rq.lock, flags); - - return ret; -} - -static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev, - int sdb_mode, int odb_mode) -{ - __le32 tmp; - u32 val; - - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - tmp = cpu_to_le32(val); - roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode); - roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); -} - -static int hns_roce_v1_set_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, - int step_idx) -{ - spinlock_t *lock = &hr_dev->bt_cmd_lock; - struct device *dev = hr_dev->dev; - struct hns_roce_hem_iter iter; - void __iomem *bt_cmd; - __le32 bt_cmd_val[2]; - __le32 bt_cmd_h = 0; - unsigned long flags; - __le32 bt_cmd_l; - int ret = 0; - u64 bt_ba; - long end; - - /* Find the HEM(Hardware Entry Memory) entry */ - unsigned long i = obj / (table->table_chunk_size / table->obj_size); - - switch (table->type) { - case HEM_TYPE_QPC: - case HEM_TYPE_MTPT: - case HEM_TYPE_CQC: - case HEM_TYPE_SRQC: - roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); - break; - default: - return ret; - } - - roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); - roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); - roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); - - /* Currently iter only a chunk */ - for (hns_roce_hem_first(table->hem[i], &iter); - !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) { - bt_ba = hns_roce_hem_addr(&iter) >> HNS_HW_PAGE_SHIFT; - - spin_lock_irqsave(lock, flags); - - bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - - end = HW_SYNC_TIMEOUT_MSECS; - while (end > 0) { - if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)) - break; - - mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); - end -= HW_SYNC_SLEEP_TIME_INTERVAL; - } - - if (end <= 0) { - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); - spin_unlock_irqrestore(lock, flags); - return -EBUSY; - } - - bt_cmd_l = cpu_to_le32(bt_ba); - roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, - upper_32_bits(bt_ba)); - - bt_cmd_val[0] = bt_cmd_l; - bt_cmd_val[1] = bt_cmd_h; - hns_roce_write64_k(bt_cmd_val, - hr_dev->reg_base + ROCEE_BT_CMD_L_REG); - spin_unlock_irqrestore(lock, flags); - } - - return ret; -} - -static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode, - u32 odb_mode) -{ - __le32 tmp; - u32 val; - - /* Configure SDB/ODB extend mode */ - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - tmp = cpu_to_le32(val); - roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode); - roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); -} - -static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept, - u32 sdb_alful) -{ - __le32 tmp; - u32 val; - - /* Configure SDB */ - val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M, - ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful); - roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M, - ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val); -} - -static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept, - u32 odb_alful) -{ - __le32 tmp; - u32 val; - - /* Configure ODB */ - val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M, - ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful); - roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M, - ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val); -} - -static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept, - u32 ext_sdb_alful) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t sdb_dma_addr; - __le32 tmp; - u32 val; - - /* Configure extend SDB threshold */ - roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept); - roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful); - - /* Configure extend SDB base addr */ - sdb_dma_addr = db->ext_db->sdb_buf_list->map; - roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12)); - - /* Configure extend SDB depth */ - val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M, - ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S, - db->ext_db->esdb_dep); - /* - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of - * using 4K page, and shift more 32 because of - * calculating the high 32 bit value evaluated to hardware. - */ - roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M, - ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val); - - dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep); - dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n", - ext_sdb_alept, ext_sdb_alful); -} - -static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept, - u32 ext_odb_alful) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t odb_dma_addr; - __le32 tmp; - u32 val; - - /* Configure extend ODB threshold */ - roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept); - roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful); - - /* Configure extend ODB base addr */ - odb_dma_addr = db->ext_db->odb_buf_list->map; - roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12)); - - /* Configure extend ODB depth */ - val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M, - ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S, - db->ext_db->eodb_dep); - roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M, - ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S, - db->ext_db->eodb_dep); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val); - - dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep); - dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n", - ext_odb_alept, ext_odb_alful); -} - -static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod, - u32 odb_ext_mod) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t sdb_dma_addr; - dma_addr_t odb_dma_addr; - int ret = 0; - - db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL); - if (!db->ext_db) - return -ENOMEM; - - if (sdb_ext_mod) { - db->ext_db->sdb_buf_list = kmalloc( - sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL); - if (!db->ext_db->sdb_buf_list) { - ret = -ENOMEM; - goto ext_sdb_buf_fail_out; - } - - db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev, - HNS_ROCE_V1_EXT_SDB_SIZE, - &sdb_dma_addr, GFP_KERNEL); - if (!db->ext_db->sdb_buf_list->buf) { - ret = -ENOMEM; - goto alloc_sq_db_buf_fail; - } - db->ext_db->sdb_buf_list->map = sdb_dma_addr; - - db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH); - hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT, - HNS_ROCE_V1_EXT_SDB_ALFUL); - } else - hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT, - HNS_ROCE_V1_SDB_ALFUL); - - if (odb_ext_mod) { - db->ext_db->odb_buf_list = kmalloc( - sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL); - if (!db->ext_db->odb_buf_list) { - ret = -ENOMEM; - goto ext_odb_buf_fail_out; - } - - db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev, - HNS_ROCE_V1_EXT_ODB_SIZE, - &odb_dma_addr, GFP_KERNEL); - if (!db->ext_db->odb_buf_list->buf) { - ret = -ENOMEM; - goto alloc_otr_db_buf_fail; - } - db->ext_db->odb_buf_list->map = odb_dma_addr; - - db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH); - hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT, - HNS_ROCE_V1_EXT_ODB_ALFUL); - } else - hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT, - HNS_ROCE_V1_ODB_ALFUL); - - hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod); - - return 0; - -alloc_otr_db_buf_fail: - kfree(db->ext_db->odb_buf_list); - -ext_odb_buf_fail_out: - if (sdb_ext_mod) { - dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE, - db->ext_db->sdb_buf_list->buf, - db->ext_db->sdb_buf_list->map); - } - -alloc_sq_db_buf_fail: - if (sdb_ext_mod) - kfree(db->ext_db->sdb_buf_list); - -ext_sdb_buf_fail_out: - kfree(db->ext_db); - return ret; -} - -static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev, - struct ib_pd *pd) -{ - struct device *dev = &hr_dev->pdev->dev; - struct ib_qp_init_attr init_attr; - struct ib_qp *qp; - - memset(&init_attr, 0, sizeof(struct ib_qp_init_attr)); - init_attr.qp_type = IB_QPT_RC; - init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; - init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM; - init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM; - - qp = ib_create_qp(pd, &init_attr); - if (IS_ERR(qp)) { - dev_err(dev, "Create loop qp for mr free failed!"); - return NULL; - } - - return to_hr_qp(qp); -} - -static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct hns_roce_caps *caps = &hr_dev->caps; - struct ib_device *ibdev = &hr_dev->ib_dev; - struct device *dev = &hr_dev->pdev->dev; - struct ib_cq_init_attr cq_init_attr; - struct ib_qp_attr attr = { 0 }; - struct hns_roce_qp *hr_qp; - struct ib_cq *cq; - struct ib_pd *pd; - union ib_gid dgid; - __be64 subnet_prefix; - int attr_mask = 0; - int ret; - int i, j; - u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 }; - u8 phy_port; - u32 port = 0; - u8 sl; - - /* Reserved cq for loop qp */ - cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2; - cq_init_attr.comp_vector = 0; - - cq = rdma_zalloc_drv_obj(ibdev, ib_cq); - if (!cq) - return -ENOMEM; - - ret = hns_roce_create_cq(cq, &cq_init_attr, NULL); - if (ret) { - dev_err(dev, "Create cq for reserved loop qp failed!"); - goto alloc_cq_failed; - } - free_mr->mr_free_cq = to_hr_cq(cq); - free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev; - free_mr->mr_free_cq->ib_cq.uobject = NULL; - free_mr->mr_free_cq->ib_cq.comp_handler = NULL; - free_mr->mr_free_cq->ib_cq.event_handler = NULL; - free_mr->mr_free_cq->ib_cq.cq_context = NULL; - atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); - - pd = rdma_zalloc_drv_obj(ibdev, ib_pd); - if (!pd) { - ret = -ENOMEM; - goto alloc_mem_failed; - } - - pd->device = ibdev; - ret = hns_roce_alloc_pd(pd, NULL); - if (ret) - goto alloc_pd_failed; - - free_mr->mr_free_pd = to_hr_pd(pd); - free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; - free_mr->mr_free_pd->ibpd.uobject = NULL; - free_mr->mr_free_pd->ibpd.__internal_mr = NULL; - atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0); - - attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE; - attr.pkey_index = 0; - attr.min_rnr_timer = 0; - /* Disable read ability */ - attr.max_dest_rd_atomic = 0; - attr.max_rd_atomic = 0; - /* Use arbitrary values as rq_psn and sq_psn */ - attr.rq_psn = 0x0808; - attr.sq_psn = 0x0808; - attr.retry_cnt = 7; - attr.rnr_retry = 7; - attr.timeout = 0x12; - attr.path_mtu = IB_MTU_256; - attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); - rdma_ah_set_static_rate(&attr.ah_attr, 3); - - subnet_prefix = cpu_to_be64(0xfe80000000000000LL); - for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { - phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : - (i % HNS_ROCE_MAX_PORTS); - sl = i / HNS_ROCE_MAX_PORTS; - - for (j = 0; j < caps->num_ports; j++) { - if (hr_dev->iboe.phy_port[j] == phy_port) { - queue_en[i] = 1; - port = j; - break; - } - } - - if (!queue_en[i]) - continue; - - free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); - if (!free_mr->mr_free_qp[i]) { - dev_err(dev, "Create loop qp failed!\n"); - ret = -ENOMEM; - goto create_lp_qp_failed; - } - hr_qp = free_mr->mr_free_qp[i]; - - hr_qp->port = port; - hr_qp->phy_port = phy_port; - hr_qp->ibqp.qp_type = IB_QPT_RC; - hr_qp->ibqp.device = &hr_dev->ib_dev; - hr_qp->ibqp.uobject = NULL; - atomic_set(&hr_qp->ibqp.usecnt, 0); - hr_qp->ibqp.pd = pd; - hr_qp->ibqp.recv_cq = cq; - hr_qp->ibqp.send_cq = cq; - - rdma_ah_set_port_num(&attr.ah_attr, port + 1); - rdma_ah_set_sl(&attr.ah_attr, sl); - attr.port_num = port + 1; - - attr.dest_qp_num = hr_qp->qpn; - memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), - hr_dev->dev_addr[port], - ETH_ALEN); - - memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); - memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3); - memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3); - dgid.raw[11] = 0xff; - dgid.raw[12] = 0xfe; - dgid.raw[8] ^= 2; - rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw); - - ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, - IB_QPS_RESET, IB_QPS_INIT); - if (ret) { - dev_err(dev, "modify qp failed(%d)!\n", ret); - goto create_lp_qp_failed; - } - - ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN, - IB_QPS_INIT, IB_QPS_RTR); - if (ret) { - dev_err(dev, "modify qp failed(%d)!\n", ret); - goto create_lp_qp_failed; - } - - ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, - IB_QPS_RTR, IB_QPS_RTS); - if (ret) { - dev_err(dev, "modify qp failed(%d)!\n", ret); - goto create_lp_qp_failed; - } - } - - return 0; - -create_lp_qp_failed: - for (i -= 1; i >= 0; i--) { - hr_qp = free_mr->mr_free_qp[i]; - if (ib_destroy_qp(&hr_qp->ibqp)) - dev_err(dev, "Destroy qp %d for mr free failed!\n", i); - } - - hns_roce_dealloc_pd(pd, NULL); - -alloc_pd_failed: - kfree(pd); - -alloc_mem_failed: - hns_roce_destroy_cq(cq, NULL); -alloc_cq_failed: - kfree(cq); - return ret; -} - -static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_qp *hr_qp; - int ret; - int i; - - for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { - hr_qp = free_mr->mr_free_qp[i]; - if (!hr_qp) - continue; - - ret = ib_destroy_qp(&hr_qp->ibqp); - if (ret) - dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", - i, ret); - } - - hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL); - kfree(&free_mr->mr_free_cq->ib_cq); - hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL); - kfree(&free_mr->mr_free_pd->ibpd); -} - -static int hns_roce_db_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - u32 sdb_ext_mod; - u32 odb_ext_mod; - u32 sdb_evt_mod; - u32 odb_evt_mod; - int ret; - - memset(db, 0, sizeof(*db)); - - /* Default DB mode */ - sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE; - odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE; - sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE; - odb_evt_mod = HNS_ROCE_ODB_POLL_MODE; - - db->sdb_ext_mod = sdb_ext_mod; - db->odb_ext_mod = odb_ext_mod; - - /* Init extend DB */ - ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod); - if (ret) { - dev_err(dev, "Failed in extend DB configuration.\n"); - return ret; - } - - hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod); - - return 0; -} - -static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work) -{ - struct hns_roce_recreate_lp_qp_work *lp_qp_work; - struct hns_roce_dev *hr_dev; - - lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work, - work); - hr_dev = to_hr_dev(lp_qp_work->ib_dev); - - hns_roce_v1_release_lp_qp(hr_dev); - - if (hns_roce_v1_rsv_lp_qp(hr_dev)) - dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n"); - - if (lp_qp_work->comp_flag) - complete(lp_qp_work->comp); - - kfree(lp_qp_work); -} - -static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) -{ - long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS; - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct hns_roce_recreate_lp_qp_work *lp_qp_work; - struct device *dev = &hr_dev->pdev->dev; - struct completion comp; - - lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work), - GFP_KERNEL); - if (!lp_qp_work) - return -ENOMEM; - - INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn); - - lp_qp_work->ib_dev = &(hr_dev->ib_dev); - lp_qp_work->comp = ∁ - lp_qp_work->comp_flag = 1; - - init_completion(lp_qp_work->comp); - - queue_work(free_mr->free_mr_wq, &(lp_qp_work->work)); - - while (end > 0) { - if (try_wait_for_completion(&comp)) - return 0; - msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE); - end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE; - } - - lp_qp_work->comp_flag = 0; - if (try_wait_for_completion(&comp)) - return 0; - - dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n"); - return -ETIMEDOUT; -} - -static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); - struct device *dev = &hr_dev->pdev->dev; - struct ib_send_wr send_wr; - const struct ib_send_wr *bad_wr; - int ret; - - memset(&send_wr, 0, sizeof(send_wr)); - send_wr.next = NULL; - send_wr.num_sge = 0; - send_wr.send_flags = 0; - send_wr.sg_list = NULL; - send_wr.wr_id = (unsigned long long)&send_wr; - send_wr.opcode = IB_WR_RDMA_WRITE; - - ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr); - if (ret) { - dev_err(dev, "Post write wqe for mr free failed(%d)!", ret); - return ret; - } - - return 0; -} - -static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) -{ - unsigned long end = - msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; - struct hns_roce_mr_free_work *mr_work = - container_of(work, struct hns_roce_mr_free_work, work); - struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev); - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq; - struct hns_roce_mr *hr_mr = mr_work->mr; - struct device *dev = &hr_dev->pdev->dev; - struct ib_wc wc[HNS_ROCE_V1_RESV_QP]; - struct hns_roce_qp *hr_qp; - int ne = 0; - int ret; - int i; - - for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { - hr_qp = free_mr->mr_free_qp[i]; - if (!hr_qp) - continue; - ne++; - - ret = hns_roce_v1_send_lp_wqe(hr_qp); - if (ret) { - dev_err(dev, - "Send wqe (qp:0x%lx) for mr free failed(%d)!\n", - hr_qp->qpn, ret); - goto free_work; - } - } - - if (!ne) { - dev_err(dev, "Reserved loop qp is absent!\n"); - goto free_work; - } - - do { - ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); - if (ret < 0 && hr_qp) { - dev_err(dev, - "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n", - hr_qp->qpn, ret, hr_mr->key, ne); - goto free_work; - } - ne -= ret; - usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000, - (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000); - } while (ne && time_before_eq(jiffies, end)); - - if (ne != 0) - dev_err(dev, - "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n", - hr_mr->key, ne); - -free_work: - if (mr_work->comp_flag) - complete(mr_work->comp); - kfree(mr_work); -} - -static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, - struct hns_roce_mr *mr, struct ib_udata *udata) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS; - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_mr_free_work *mr_work; - unsigned long start = jiffies; - struct completion comp; - int ret = 0; - - if (mr->enabled) { - if (hns_roce_hw_destroy_mpt(hr_dev, NULL, - key_to_hw_index(mr->key) & - (hr_dev->caps.num_mtpts - 1))) - dev_warn(dev, "DESTROY_MPT failed!\n"); - } - - mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL); - if (!mr_work) { - ret = -ENOMEM; - goto free_mr; - } - - INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn); - - mr_work->ib_dev = &(hr_dev->ib_dev); - mr_work->comp = ∁ - mr_work->comp_flag = 1; - mr_work->mr = (void *)mr; - init_completion(mr_work->comp); - - queue_work(free_mr->free_mr_wq, &(mr_work->work)); - - while (end > 0) { - if (try_wait_for_completion(&comp)) - goto free_mr; - msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); - end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE; - } - - mr_work->comp_flag = 0; - if (try_wait_for_completion(&comp)) - goto free_mr; - - dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key); - ret = -ETIMEDOUT; - -free_mr: - dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n", - mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start)); - - ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)key_to_hw_index(mr->key)); - hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr); - kfree(mr); - - return ret; -} - -static void hns_roce_db_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - - if (db->sdb_ext_mod) { - dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE, - db->ext_db->sdb_buf_list->buf, - db->ext_db->sdb_buf_list->map); - kfree(db->ext_db->sdb_buf_list); - } - - if (db->odb_ext_mod) { - dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE, - db->ext_db->odb_buf_list->buf, - db->ext_db->odb_buf_list->map); - kfree(db->ext_db->odb_buf_list); - } - - kfree(db->ext_db); -} - -static int hns_roce_raq_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_raq_table *raq = &priv->raq_table; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t addr; - int raq_shift; - __le32 tmp; - u32 val; - int ret; - - raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL); - if (!raq->e_raq_buf) - return -ENOMEM; - - raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, - &addr, GFP_KERNEL); - if (!raq->e_raq_buf->buf) { - ret = -ENOMEM; - goto err_dma_alloc_raq; - } - raq->e_raq_buf->map = addr; - - /* Configure raq extended address. 48bit 4K align */ - roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12); - - /* Configure raq_shift */ - raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY); - val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M, - ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift); - /* - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of - * using 4K page, and shift more 32 because of - * calculating the high 32 bit value evaluated to hardware. - */ - roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M, - ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S, - raq->e_raq_buf->map >> 44); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val); - dev_dbg(dev, "Configure raq_shift 0x%x.\n", val); - - /* Configure raq threshold */ - val = roce_read(hr_dev, ROCEE_RAQ_WL_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M, - ROCEE_RAQ_WL_ROCEE_RAQ_WL_S, - HNS_ROCE_V1_EXT_RAQ_WF); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_RAQ_WL_REG, val); - dev_dbg(dev, "Configure raq_wl 0x%x.\n", val); - - /* Enable extend raq */ - val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S, - POL_TIME_INTERVAL_VAL); - roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1); - roce_set_field(tmp, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S, - 2); - roce_set_bit(tmp, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val); - dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val); - - /* Enable raq drop */ - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - tmp = cpu_to_le32(val); - roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); - dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val); - - return 0; - -err_dma_alloc_raq: - kfree(raq->e_raq_buf); - return ret; -} - -static void hns_roce_raq_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_raq_table *raq = &priv->raq_table; - struct device *dev = &hr_dev->pdev->dev; - - dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf, - raq->e_raq_buf->map); - kfree(raq->e_raq_buf); -} - -static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag) -{ - __le32 tmp; - u32 val; - - if (enable_flag) { - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - /* Open all ports */ - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M, - ROCEE_GLB_CFG_ROCEE_PORT_ST_S, - ALL_PORT_VAL_OPEN); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); - } else { - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - /* Close all ports */ - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M, - ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); - } -} - -static int hns_roce_bt_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct device *dev = &hr_dev->pdev->dev; - int ret; - - priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev, - HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map, - GFP_KERNEL); - if (!priv->bt_table.qpc_buf.buf) - return -ENOMEM; - - priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev, - HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map, - GFP_KERNEL); - if (!priv->bt_table.mtpt_buf.buf) { - ret = -ENOMEM; - goto err_failed_alloc_mtpt_buf; - } - - priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev, - HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map, - GFP_KERNEL); - if (!priv->bt_table.cqc_buf.buf) { - ret = -ENOMEM; - goto err_failed_alloc_cqc_buf; - } - - return 0; - -err_failed_alloc_cqc_buf: - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); - -err_failed_alloc_mtpt_buf: - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); - - return ret; -} - -static void hns_roce_bt_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct device *dev = &hr_dev->pdev->dev; - - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map); - - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); - - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); -} - -static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; - struct device *dev = &hr_dev->pdev->dev; - - /* - * This buffer will be used for CQ's tptr(tail pointer), also - * named ci(customer index). Every CQ will use 2 bytes to save - * cqe ci in hip06. Hardware will read this area to get new ci - * when the queue is almost full. - */ - tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, - &tptr_buf->map, GFP_KERNEL); - if (!tptr_buf->buf) - return -ENOMEM; - - hr_dev->tptr_dma_addr = tptr_buf->map; - hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE; - - return 0; -} - -static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; - struct device *dev = &hr_dev->pdev->dev; - - dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, - tptr_buf->buf, tptr_buf->map); -} - -static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct device *dev = &hr_dev->pdev->dev; - int ret; - - free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr"); - if (!free_mr->free_mr_wq) { - dev_err(dev, "Create free mr workqueue failed!\n"); - return -ENOMEM; - } - - ret = hns_roce_v1_rsv_lp_qp(hr_dev); - if (ret) { - dev_err(dev, "Reserved loop qp failed(%d)!\n", ret); - destroy_workqueue(free_mr->free_mr_wq); - } - - return ret; -} - -static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - - destroy_workqueue(free_mr->free_mr_wq); - - hns_roce_v1_release_lp_qp(hr_dev); -} - -/** - * hns_roce_v1_reset - reset RoCE - * @hr_dev: RoCE device struct pointer - * @dereset: true -- drop reset, false -- reset - * return 0 - success , negative --fail - */ -static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset) -{ - struct device_node *dsaf_node; - struct device *dev = &hr_dev->pdev->dev; - struct device_node *np = dev->of_node; - struct fwnode_handle *fwnode; - int ret; - - /* check if this is DT/ACPI case */ - if (dev_of_node(dev)) { - dsaf_node = of_parse_phandle(np, "dsaf-handle", 0); - if (!dsaf_node) { - dev_err(dev, "could not find dsaf-handle\n"); - return -EINVAL; - } - fwnode = &dsaf_node->fwnode; - } else if (is_acpi_device_node(dev->fwnode)) { - struct fwnode_reference_args args; - - ret = acpi_node_get_property_reference(dev->fwnode, - "dsaf-handle", 0, &args); - if (ret) { - dev_err(dev, "could not find dsaf-handle\n"); - return ret; - } - fwnode = args.fwnode; - } else { - dev_err(dev, "cannot read data from DT or ACPI\n"); - return -ENXIO; - } - - ret = hns_dsaf_roce_reset(fwnode, false); - if (ret) - return ret; - - if (dereset) { - msleep(SLEEP_TIME_INTERVAL); - ret = hns_dsaf_roce_reset(fwnode, true); - } - - return ret; -} - -static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_caps *caps = &hr_dev->caps; - int i; - - hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG); - hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG); - hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) | - ((u64)roce_read(hr_dev, - ROCEE_SYS_IMAGE_GUID_H_REG) << 32); - hr_dev->hw_rev = HNS_ROCE_HW_VER1; - - caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM; - caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM; - caps->min_wqes = HNS_ROCE_MIN_WQE_NUM; - caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM; - caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; - caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM; - caps->max_sq_sg = HNS_ROCE_V1_SG_NUM; - caps->max_rq_sg = HNS_ROCE_V1_SG_NUM; - caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE; - caps->num_uars = HNS_ROCE_V1_UAR_NUM; - caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM; - caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM; - caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM; - caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM; - caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM; - caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS; - caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM; - caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA; - caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA; - caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ; - caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ; - caps->qpc_sz = HNS_ROCE_V1_QPC_SIZE; - caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE; - caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE; - caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE; - caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE; - caps->cqe_sz = HNS_ROCE_V1_CQE_SIZE; - caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT; - caps->reserved_lkey = 0; - caps->reserved_pds = 0; - caps->reserved_mrws = 1; - caps->reserved_uars = 0; - caps->reserved_cqs = 0; - caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */ - caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE; - - for (i = 0; i < caps->num_ports; i++) - caps->pkey_table_len[i] = 1; - - for (i = 0; i < caps->num_ports; i++) { - /* Six ports shared 16 GID in v1 engine */ - if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports)) - caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM / - caps->num_ports; - else - caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM / - caps->num_ports + 1; - } - - caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM; - caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM; - caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG); - caps->max_mtu = IB_MTU_2048; - - return 0; -} - -static int hns_roce_v1_init(struct hns_roce_dev *hr_dev) -{ - int ret; - u32 val; - __le32 tmp; - struct device *dev = &hr_dev->pdev->dev; - - /* DMAE user config */ - val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M, - ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf); - roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M, - ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S, - 1 << PAGES_SHIFT_16); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val); - - val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M, - ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf); - roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M, - ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S, - 1 << PAGES_SHIFT_16); - - ret = hns_roce_db_init(hr_dev); - if (ret) { - dev_err(dev, "doorbell init failed!\n"); - return ret; - } - - ret = hns_roce_raq_init(hr_dev); - if (ret) { - dev_err(dev, "raq init failed!\n"); - goto error_failed_raq_init; - } - - ret = hns_roce_bt_init(hr_dev); - if (ret) { - dev_err(dev, "bt init failed!\n"); - goto error_failed_bt_init; - } - - ret = hns_roce_tptr_init(hr_dev); - if (ret) { - dev_err(dev, "tptr init failed!\n"); - goto error_failed_tptr_init; - } - - ret = hns_roce_free_mr_init(hr_dev); - if (ret) { - dev_err(dev, "free mr init failed!\n"); - goto error_failed_free_mr_init; - } - - hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP); - - return 0; - -error_failed_free_mr_init: - hns_roce_tptr_free(hr_dev); - -error_failed_tptr_init: - hns_roce_bt_free(hr_dev); - -error_failed_bt_init: - hns_roce_raq_free(hr_dev); - -error_failed_raq_init: - hns_roce_db_free(hr_dev); - return ret; -} - -static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) -{ - hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); - hns_roce_free_mr_free(hr_dev); - hns_roce_tptr_free(hr_dev); - hns_roce_bt_free(hr_dev); - hns_roce_raq_free(hr_dev); - hns_roce_db_free(hr_dev); -} - -static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev) -{ - u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG); - - return (!!(status & (1 << HCR_GO_BIT))); -} - -static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, u8 op_modifier, - u16 op, u16 token, int event) -{ - u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG); - unsigned long end; - u32 val = 0; - __le32 tmp; - - end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies; - while (hns_roce_v1_cmd_pending(hr_dev)) { - if (time_after(jiffies, end)) { - dev_err(hr_dev->dev, "jiffies=%d end=%d\n", - (int)jiffies, (int)end); - return -EAGAIN; - } - cond_resched(); - } - - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S, - op); - roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M, - ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier); - roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event); - roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1); - roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M, - ROCEE_MB6_ROCEE_MB_TOKEN_S, token); - - val = le32_to_cpu(tmp); - writeq(in_param, hcr + 0); - writeq(out_param, hcr + 2); - writel(in_modifier, hcr + 4); - /* Memory barrier */ - wmb(); - - writel(val, hcr + 5); - - return 0; -} - -static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev, - unsigned int timeout) -{ - u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG; - unsigned long end; - u32 status = 0; - - end = msecs_to_jiffies(timeout) + jiffies; - while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end)) - cond_resched(); - - if (hns_roce_v1_cmd_pending(hr_dev)) { - dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n"); - return -ETIMEDOUT; - } - - status = le32_to_cpu((__force __le32) - __raw_readl(hcr + HCR_STATUS_OFFSET)); - if ((status & STATUS_MASK) != 0x1) { - dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status); - return -EBUSY; - } - - return 0; -} - -static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port, - int gid_index, const union ib_gid *gid, - const struct ib_gid_attr *attr) -{ - unsigned long flags; - u32 *p = NULL; - u8 gid_idx; - - gid_idx = hns_get_gid_index(hr_dev, port, gid_index); - - spin_lock_irqsave(&hr_dev->iboe.lock, flags); - - p = (u32 *)&gid->raw[0]; - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG + - (HNS_ROCE_V1_GID_NUM * gid_idx)); - - p = (u32 *)&gid->raw[4]; - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG + - (HNS_ROCE_V1_GID_NUM * gid_idx)); - - p = (u32 *)&gid->raw[8]; - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG + - (HNS_ROCE_V1_GID_NUM * gid_idx)); - - p = (u32 *)&gid->raw[0xc]; - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG + - (HNS_ROCE_V1_GID_NUM * gid_idx)); - - spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); - - return 0; -} - -static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, - const u8 *addr) -{ - u32 reg_smac_l; - u16 reg_smac_h; - __le32 tmp; - u16 *p_h; - u32 *p; - u32 val; - - /* - * When mac changed, loopback may fail - * because of smac not equal to dmac. - * We Need to release and create reserved qp again. - */ - if (hr_dev->hw->dereg_mr) { - int ret; - - ret = hns_roce_v1_recreate_lp_qp(hr_dev); - if (ret && ret != -ETIMEDOUT) - return ret; - } - - p = (u32 *)(&addr[0]); - reg_smac_l = *p; - roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG + - PHY_PORT_OFFSET * phy_port); - - val = roce_read(hr_dev, - ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET); - tmp = cpu_to_le32(val); - p_h = (u16 *)(&addr[4]); - reg_smac_h = *p_h; - roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M, - ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET, - val); - - return 0; -} - -static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port, - enum ib_mtu mtu) -{ - __le32 tmp; - u32 val; - - val = roce_read(hr_dev, - ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M, - ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET, - val); -} - -static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, - struct hns_roce_mr *mr, - unsigned long mtpt_idx) -{ - u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 }; - struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_v1_mpt_entry *mpt_entry; - dma_addr_t pbl_ba; - int count; - int i; - - /* MPT filled into mailbox buf */ - mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf; - memset(mpt_entry, 0, sizeof(*mpt_entry)); - - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M, - MPT_BYTE_4_KEY_STATE_S, KEY_VALID); - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M, - MPT_BYTE_4_KEY_S, mr->key); - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M, - MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S, - (mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0); - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M, - MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S, - (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S, - (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S, - (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0)); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S, - 0); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0); - - roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M, - MPT_BYTE_12_PBL_ADDR_H_S, 0); - roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M, - MPT_BYTE_12_MW_BIND_COUNTER_S, 0); - - mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova); - mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32)); - mpt_entry->length = cpu_to_le32((u32)mr->size); - - roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M, - MPT_BYTE_28_PD_S, mr->pd); - roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M, - MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx); - roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M, - MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT); - - /* DMA memory register */ - if (mr->type == MR_TYPE_DMA) - return 0; - - count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, - ARRAY_SIZE(pages), &pbl_ba); - if (count < 1) { - ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count); - return -ENOBUFS; - } - - /* Register user mr */ - for (i = 0; i < count; i++) { - switch (i) { - case 0: - mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_36, - MPT_BYTE_36_PA0_H_M, - MPT_BYTE_36_PA0_H_S, - (u32)(pages[i] >> PAGES_SHIFT_32)); - break; - case 1: - roce_set_field(mpt_entry->mpt_byte_36, - MPT_BYTE_36_PA1_L_M, - MPT_BYTE_36_PA1_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_40, - MPT_BYTE_40_PA1_H_M, - MPT_BYTE_40_PA1_H_S, - (u32)(pages[i] >> PAGES_SHIFT_24)); - break; - case 2: - roce_set_field(mpt_entry->mpt_byte_40, - MPT_BYTE_40_PA2_L_M, - MPT_BYTE_40_PA2_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_44, - MPT_BYTE_44_PA2_H_M, - MPT_BYTE_44_PA2_H_S, - (u32)(pages[i] >> PAGES_SHIFT_16)); - break; - case 3: - roce_set_field(mpt_entry->mpt_byte_44, - MPT_BYTE_44_PA3_L_M, - MPT_BYTE_44_PA3_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_48, - MPT_BYTE_48_PA3_H_M, - MPT_BYTE_48_PA3_H_S, - (u32)(pages[i] >> PAGES_SHIFT_8)); - break; - case 4: - mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_56, - MPT_BYTE_56_PA4_H_M, - MPT_BYTE_56_PA4_H_S, - (u32)(pages[i] >> PAGES_SHIFT_32)); - break; - case 5: - roce_set_field(mpt_entry->mpt_byte_56, - MPT_BYTE_56_PA5_L_M, - MPT_BYTE_56_PA5_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_60, - MPT_BYTE_60_PA5_H_M, - MPT_BYTE_60_PA5_H_S, - (u32)(pages[i] >> PAGES_SHIFT_24)); - break; - case 6: - roce_set_field(mpt_entry->mpt_byte_60, - MPT_BYTE_60_PA6_L_M, - MPT_BYTE_60_PA6_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_64, - MPT_BYTE_64_PA6_H_M, - MPT_BYTE_64_PA6_H_S, - (u32)(pages[i] >> PAGES_SHIFT_16)); - break; - default: - break; - } - } - - mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba); - roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M, - MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba)); - - return 0; -} - -static void *get_cqe(struct hns_roce_cq *hr_cq, int n) -{ - return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE); -} - -static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) -{ - struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe); - - /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */ - return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^ - !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL; -} - -static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) -{ - return get_sw_cqe(hr_cq, hr_cq->cons_index); -} - -static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) -{ - __le32 doorbell[2]; - - doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1)); - doorbell[1] = 0; - roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn); - - hns_roce_write64_k(doorbell, hr_cq->db_reg); -} - -static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, - struct hns_roce_srq *srq) -{ - struct hns_roce_cqe *cqe, *dest; - u32 prod_index; - int nfreed = 0; - u8 owner_bit; - - for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index); - ++prod_index) { - if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe) - break; - } - - /* - * Now backwards through the CQ, removing CQ entries - * that match our QP by overwriting them with next entries. - */ - while ((int) --prod_index - (int) hr_cq->cons_index >= 0) { - cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe); - if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, - CQE_BYTE_16_LOCAL_QPN_S) & - HNS_ROCE_CQE_QPN_MASK) == qpn) { - /* In v1 engine, not support SRQ */ - ++nfreed; - } else if (nfreed) { - dest = get_cqe(hr_cq, (prod_index + nfreed) & - hr_cq->ib_cq.cqe); - owner_bit = roce_get_bit(dest->cqe_byte_4, - CQE_BYTE_4_OWNER_S); - memcpy(dest, cqe, sizeof(*cqe)); - roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S, - owner_bit); - } - } - - if (nfreed) { - hr_cq->cons_index += nfreed; - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); - } -} - -static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, - struct hns_roce_srq *srq) -{ - spin_lock_irq(&hr_cq->lock); - __hns_roce_v1_cq_clean(hr_cq, qpn, srq); - spin_unlock_irq(&hr_cq->lock); -} - -static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev, - struct hns_roce_cq *hr_cq, void *mb_buf, - u64 *mtts, dma_addr_t dma_handle) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; - struct hns_roce_cq_context *cq_context = mb_buf; - dma_addr_t tptr_dma_addr; - int offset; - - memset(cq_context, 0, sizeof(*cq_context)); - - /* Get the tptr for this CQ. */ - offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE; - tptr_dma_addr = tptr_buf->map + offset; - hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset); - - /* Register cq_context members */ - roce_set_field(cq_context->cqc_byte_4, - CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M, - CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID); - roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M, - CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn); - - cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle); - - roce_set_field(cq_context->cqc_byte_12, - CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M, - CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S, - ((u64)dma_handle >> 32)); - roce_set_field(cq_context->cqc_byte_12, - CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M, - CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S, - ilog2(hr_cq->cq_depth)); - roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M, - CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector); - - cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0])); - - roce_set_field(cq_context->cqc_byte_20, - CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M, - CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32); - /* Dedicated hardware, directly set 0 */ - roce_set_field(cq_context->cqc_byte_20, - CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M, - CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0); - /** - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of - * using 4K page, and shift more 32 because of - * calculating the high 32 bit value evaluated to hardware. - */ - roce_set_field(cq_context->cqc_byte_20, - CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M, - CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S, - tptr_dma_addr >> 44); - - cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12)); - - roce_set_field(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M, - CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0); - roce_set_bit(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0); - roce_set_bit(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0); - roce_set_bit(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0); - roce_set_bit(cq_context->cqc_byte_32, - CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S, - 0); - /* The initial value of cq's ci is 0 */ - roce_set_field(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M, - CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0); -} - -static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, - enum ib_cq_notify_flags flags) -{ - struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); - u32 notification_flag; - __le32 doorbell[2] = {}; - - notification_flag = (flags & IB_CQ_SOLICITED_MASK) == - IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL; - /* - * flags = 0; Notification Flag = 1, next - * flags = 1; Notification Flag = 0, solocited - */ - doorbell[0] = - cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1)); - roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, - hr_cq->cqn | notification_flag); - - hns_roce_write64_k(doorbell, hr_cq->db_reg); - - return 0; -} - -static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, - struct hns_roce_qp **cur_qp, struct ib_wc *wc) -{ - int qpn; - int is_send; - u16 wqe_ctr; - u32 status; - u32 opcode; - struct hns_roce_cqe *cqe; - struct hns_roce_qp *hr_qp; - struct hns_roce_wq *wq; - struct hns_roce_wqe_ctrl_seg *sq_wqe; - struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); - struct device *dev = &hr_dev->pdev->dev; - - /* Find cqe according consumer index */ - cqe = next_cqe_sw(hr_cq); - if (!cqe) - return -EAGAIN; - - ++hr_cq->cons_index; - /* Memory barrier */ - rmb(); - /* 0->SQ, 1->RQ */ - is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S)); - - /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */ - if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, - CQE_BYTE_16_LOCAL_QPN_S) <= 1) { - qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M, - CQE_BYTE_20_PORT_NUM_S) + - roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, - CQE_BYTE_16_LOCAL_QPN_S) * - HNS_ROCE_MAX_PORTS; - } else { - qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, - CQE_BYTE_16_LOCAL_QPN_S); - } - - if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) { - hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); - if (unlikely(!hr_qp)) { - dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n", - hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK)); - return -EINVAL; - } - - *cur_qp = hr_qp; - } - - wc->qp = &(*cur_qp)->ibqp; - wc->vendor_err = 0; - - status = roce_get_field(cqe->cqe_byte_4, - CQE_BYTE_4_STATUS_OF_THE_OPERATION_M, - CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) & - HNS_ROCE_CQE_STATUS_MASK; - switch (status) { - case HNS_ROCE_CQE_SUCCESS: - wc->status = IB_WC_SUCCESS; - break; - case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR: - wc->status = IB_WC_LOC_LEN_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR: - wc->status = IB_WC_LOC_QP_OP_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR: - wc->status = IB_WC_LOC_PROT_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR: - wc->status = IB_WC_WR_FLUSH_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR: - wc->status = IB_WC_MW_BIND_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR: - wc->status = IB_WC_BAD_RESP_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR: - wc->status = IB_WC_LOC_ACCESS_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: - wc->status = IB_WC_REM_INV_REQ_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR: - wc->status = IB_WC_REM_ACCESS_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR: - wc->status = IB_WC_REM_OP_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: - wc->status = IB_WC_RETRY_EXC_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR: - wc->status = IB_WC_RNR_RETRY_EXC_ERR; - break; - default: - wc->status = IB_WC_GENERAL_ERR; - break; - } - - /* CQE status error, directly return */ - if (wc->status != IB_WC_SUCCESS) - return 0; - - if (is_send) { - /* SQ conrespond to CQE */ - sq_wqe = hns_roce_get_send_wqe(*cur_qp, - roce_get_field(cqe->cqe_byte_4, - CQE_BYTE_4_WQE_INDEX_M, - CQE_BYTE_4_WQE_INDEX_S) & - ((*cur_qp)->sq.wqe_cnt-1)); - switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) { - case HNS_ROCE_WQE_OPCODE_SEND: - wc->opcode = IB_WC_SEND; - break; - case HNS_ROCE_WQE_OPCODE_RDMA_READ: - wc->opcode = IB_WC_RDMA_READ; - wc->byte_len = le32_to_cpu(cqe->byte_cnt); - break; - case HNS_ROCE_WQE_OPCODE_RDMA_WRITE: - wc->opcode = IB_WC_RDMA_WRITE; - break; - case HNS_ROCE_WQE_OPCODE_LOCAL_INV: - wc->opcode = IB_WC_LOCAL_INV; - break; - case HNS_ROCE_WQE_OPCODE_UD_SEND: - wc->opcode = IB_WC_SEND; - break; - default: - wc->status = IB_WC_GENERAL_ERR; - break; - } - wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ? - IB_WC_WITH_IMM : 0); - - wq = &(*cur_qp)->sq; - if ((*cur_qp)->sq_signal_bits) { - /* - * If sg_signal_bit is 1, - * firstly tail pointer updated to wqe - * which current cqe correspond to - */ - wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4, - CQE_BYTE_4_WQE_INDEX_M, - CQE_BYTE_4_WQE_INDEX_S); - wq->tail += (wqe_ctr - (u16)wq->tail) & - (wq->wqe_cnt - 1); - } - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; - ++wq->tail; - } else { - /* RQ conrespond to CQE */ - wc->byte_len = le32_to_cpu(cqe->byte_cnt); - opcode = roce_get_field(cqe->cqe_byte_4, - CQE_BYTE_4_OPERATION_TYPE_M, - CQE_BYTE_4_OPERATION_TYPE_S) & - HNS_ROCE_CQE_OPCODE_MASK; - switch (opcode) { - case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE: - wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; - wc->wc_flags = IB_WC_WITH_IMM; - wc->ex.imm_data = - cpu_to_be32(le32_to_cpu(cqe->immediate_data)); - break; - case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE: - if (roce_get_bit(cqe->cqe_byte_4, - CQE_BYTE_4_IMM_INDICATOR_S)) { - wc->opcode = IB_WC_RECV; - wc->wc_flags = IB_WC_WITH_IMM; - wc->ex.imm_data = cpu_to_be32( - le32_to_cpu(cqe->immediate_data)); - } else { - wc->opcode = IB_WC_RECV; - wc->wc_flags = 0; - } - break; - default: - wc->status = IB_WC_GENERAL_ERR; - break; - } - - /* Update tail pointer, record wr_id */ - wq = &(*cur_qp)->rq; - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; - ++wq->tail; - wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M, - CQE_BYTE_20_SL_S); - wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20, - CQE_BYTE_20_REMOTE_QPN_M, - CQE_BYTE_20_REMOTE_QPN_S); - wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20, - CQE_BYTE_20_GRH_PRESENT_S) ? - IB_WC_GRH : 0); - wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28, - CQE_BYTE_28_P_KEY_IDX_M, - CQE_BYTE_28_P_KEY_IDX_S); - } - - return 0; -} - -int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) -{ - struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); - struct hns_roce_qp *cur_qp = NULL; - unsigned long flags; - int npolled; - int ret; - - spin_lock_irqsave(&hr_cq->lock, flags); - - for (npolled = 0; npolled < num_entries; ++npolled) { - ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled); - if (ret) - break; - } - - if (npolled) { - *hr_cq->tptr_addr = hr_cq->cons_index & - ((hr_cq->cq_depth << 1) - 1); - - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); - } - - spin_unlock_irqrestore(&hr_cq->lock, flags); - - if (ret == 0 || ret == -EAGAIN) - return npolled; - else - return ret; -} - -static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, - int step_idx) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct device *dev = &hr_dev->pdev->dev; - long end = HW_SYNC_TIMEOUT_MSECS; - __le32 bt_cmd_val[2] = {0}; - unsigned long flags = 0; - void __iomem *bt_cmd; - u64 bt_ba = 0; - - switch (table->type) { - case HEM_TYPE_QPC: - bt_ba = priv->bt_table.qpc_buf.map >> 12; - break; - case HEM_TYPE_MTPT: - bt_ba = priv->bt_table.mtpt_buf.map >> 12; - break; - case HEM_TYPE_CQC: - bt_ba = priv->bt_table.cqc_buf.map >> 12; - break; - case HEM_TYPE_SRQC: - dev_dbg(dev, "HEM_TYPE_SRQC not support.\n"); - return -EINVAL; - default: - return 0; - } - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); - roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); - roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); - - spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags); - - bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - - while (1) { - if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { - if (!end) { - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, - flags); - return -EBUSY; - } - } else { - break; - } - mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); - end -= HW_SYNC_SLEEP_TIME_INTERVAL; - } - - bt_cmd_val[0] = cpu_to_le32(bt_ba); - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); - hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); - - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); - - return 0; -} - -static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, - enum hns_roce_qp_state cur_state, - enum hns_roce_qp_state new_state, - struct hns_roce_qp_context *context, - struct hns_roce_qp *hr_qp) -{ - static const u16 - op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = { - [HNS_ROCE_QP_STATE_RST] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP, - }, - [HNS_ROCE_QP_STATE_INIT] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - /* Note: In v1 engine, HW doesn't support RST2INIT. - * We use RST2INIT cmd instead of INIT2INIT. - */ - [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP, - [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP, - }, - [HNS_ROCE_QP_STATE_RTR] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP, - }, - [HNS_ROCE_QP_STATE_RTS] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP, - [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP, - }, - [HNS_ROCE_QP_STATE_SQD] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP, - [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP, - }, - [HNS_ROCE_QP_STATE_ERR] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - } - }; - - struct hns_roce_cmd_mailbox *mailbox; - struct device *dev = &hr_dev->pdev->dev; - int ret; - - if (cur_state >= HNS_ROCE_QP_NUM_STATE || - new_state >= HNS_ROCE_QP_NUM_STATE || - !op[cur_state][new_state]) { - dev_err(dev, "[modify_qp]not support state %d to %d\n", - cur_state, new_state); - return -EINVAL; - } - - if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP) - return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2, - HNS_ROCE_CMD_2RST_QP, - HNS_ROCE_CMD_TIMEOUT_MSECS); - - if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP) - return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2, - HNS_ROCE_CMD_2ERR_QP, - HNS_ROCE_CMD_TIMEOUT_MSECS); - - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - memcpy(mailbox->buf, context, sizeof(*context)); - - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0, - op[cur_state][new_state], - HNS_ROCE_CMD_TIMEOUT_MSECS); - - hns_roce_free_cmd_mailbox(hr_dev, mailbox); - return ret; -} - -static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, - u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba) -{ - struct ib_device *ibdev = &hr_dev->ib_dev; - int count; - - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba); - if (count < 1) { - ibdev_err(ibdev, "Failed to find SQ ba\n"); - return -ENOBUFS; - } - - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, rq_ba, - 1, NULL); - if (!count) { - ibdev_err(ibdev, "Failed to find RQ ba\n"); - return -ENOBUFS; - } - - return 0; -} - -static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, - int attr_mask, enum ib_qp_state cur_state, - enum ib_qp_state new_state) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_sqp_context *context; - dma_addr_t dma_handle = 0; - u32 __iomem *addr; - u64 sq_ba = 0; - u64 rq_ba = 0; - __le32 tmp; - u32 reg_val; - - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; - - /* Search QP buf's MTTs */ - if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle)) - goto out; - - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { - roce_set_field(context->qp1c_bytes_4, - QP1C_BYTES_4_SQ_WQE_SHIFT_M, - QP1C_BYTES_4_SQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(context->qp1c_bytes_4, - QP1C_BYTES_4_RQ_WQE_SHIFT_M, - QP1C_BYTES_4_RQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M, - QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn); - - context->sq_rq_bt_l = cpu_to_le32(dma_handle); - roce_set_field(context->qp1c_bytes_12, - QP1C_BYTES_12_SQ_RQ_BT_H_M, - QP1C_BYTES_12_SQ_RQ_BT_H_S, - upper_32_bits(dma_handle)); - - roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M, - QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head); - roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M, - QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); - roce_set_bit(context->qp1c_bytes_16, - QP1C_BYTES_16_SIGNALING_TYPE_S, - hr_qp->sq_signal_bits); - roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, - 1); - roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, - 1); - roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S, - 0); - - roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M, - QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head); - roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M, - QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index); - - context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba); - - roce_set_field(context->qp1c_bytes_28, - QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M, - QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S, - upper_32_bits(rq_ba)); - roce_set_field(context->qp1c_bytes_28, - QP1C_BYTES_28_RQ_CUR_IDX_M, - QP1C_BYTES_28_RQ_CUR_IDX_S, 0); - - roce_set_field(context->qp1c_bytes_32, - QP1C_BYTES_32_RX_CQ_NUM_M, - QP1C_BYTES_32_RX_CQ_NUM_S, - to_hr_cq(ibqp->recv_cq)->cqn); - roce_set_field(context->qp1c_bytes_32, - QP1C_BYTES_32_TX_CQ_NUM_M, - QP1C_BYTES_32_TX_CQ_NUM_S, - to_hr_cq(ibqp->send_cq)->cqn); - - context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); - - roce_set_field(context->qp1c_bytes_40, - QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M, - QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S, - upper_32_bits(sq_ba)); - roce_set_field(context->qp1c_bytes_40, - QP1C_BYTES_40_SQ_CUR_IDX_M, - QP1C_BYTES_40_SQ_CUR_IDX_S, 0); - - /* Copy context to QP1C register */ - addr = (u32 __iomem *)(hr_dev->reg_base + - ROCEE_QP1C_CFG0_0_REG + - hr_qp->phy_port * sizeof(*context)); - - writel(le32_to_cpu(context->qp1c_bytes_4), addr); - writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1); - writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2); - writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3); - writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4); - writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5); - writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6); - writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7); - writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8); - writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9); - } - - /* Modify QP1C status */ - reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG + - hr_qp->phy_port * sizeof(*context)); - tmp = cpu_to_le32(reg_val); - roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M, - ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state); - reg_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG + - hr_qp->phy_port * sizeof(*context), reg_val); - - hr_qp->state = new_state; - if (new_state == IB_QPS_RESET) { - hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, - ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); - if (ibqp->send_cq != ibqp->recv_cq) - hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), - hr_qp->qpn, NULL); - - hr_qp->rq.head = 0; - hr_qp->rq.tail = 0; - hr_qp->sq.head = 0; - hr_qp->sq.tail = 0; - } - - kfree(context); - return 0; - -out: - kfree(context); - return -EINVAL; -} - -static bool check_qp_state(enum ib_qp_state cur_state, - enum ib_qp_state new_state) -{ - static const bool sm[][IB_QPS_ERR + 1] = { - [IB_QPS_RESET] = { [IB_QPS_RESET] = true, - [IB_QPS_INIT] = true }, - [IB_QPS_INIT] = { [IB_QPS_RESET] = true, - [IB_QPS_INIT] = true, - [IB_QPS_RTR] = true, - [IB_QPS_ERR] = true }, - [IB_QPS_RTR] = { [IB_QPS_RESET] = true, - [IB_QPS_RTS] = true, - [IB_QPS_ERR] = true }, - [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }, - [IB_QPS_SQD] = {}, - [IB_QPS_SQE] = {}, - [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true } - }; - - return sm[cur_state][new_state]; -} - -static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, - int attr_mask, enum ib_qp_state cur_state, - enum ib_qp_state new_state) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_qp_context *context; - const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); - dma_addr_t dma_handle_2 = 0; - dma_addr_t dma_handle = 0; - __le32 doorbell[2] = {0}; - u64 *mtts_2 = NULL; - int ret = -EINVAL; - const u8 *smac; - u64 sq_ba = 0; - u64 rq_ba = 0; - u32 port; - u32 port_num; - u8 *dmac; - - if (!check_qp_state(cur_state, new_state)) { - ibdev_err(ibqp->device, - "not support QP(%u) status from %d to %d\n", - ibqp->qp_num, cur_state, new_state); - return -EINVAL; - } - - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; - - /* Search qp buf's mtts */ - if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle)) - goto out; - - /* Search IRRL's mtts */ - mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, - hr_qp->qpn, &dma_handle_2); - if (mtts_2 == NULL) { - dev_err(dev, "qp irrl_table find failed\n"); - goto out; - } - - /* - * Reset to init - * Mandatory param: - * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS - * Optional param: NA - */ - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S, - to_hr_qp_type(hr_qp->ibqp.qp_type)); - - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, - !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ)); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, - !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) - ); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S, - !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) - ); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M, - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M, - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_PD_M, - QP_CONTEXT_QPC_BYTES_4_PD_S, - to_hr_pd(ibqp->pd)->pdn); - hr_qp->access_flags = attr->qp_access_flags; - roce_set_field(context->qpc_bytes_8, - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M, - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S, - to_hr_cq(ibqp->send_cq)->cqn); - roce_set_field(context->qpc_bytes_8, - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M, - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S, - to_hr_cq(ibqp->recv_cq)->cqn); - - if (ibqp->srq) - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M, - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S, - to_hr_srq(ibqp->srq)->srqn); - - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, - attr->pkey_index); - hr_qp->pkey_index = attr->pkey_index; - roce_set_field(context->qpc_bytes_16, - QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, - QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); - } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S, - to_hr_qp_type(hr_qp->ibqp.qp_type)); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0); - if (attr_mask & IB_QP_ACCESS_FLAGS) { - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, - !!(attr->qp_access_flags & - IB_ACCESS_REMOTE_READ)); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, - !!(attr->qp_access_flags & - IB_ACCESS_REMOTE_WRITE)); - } else { - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, - !!(hr_qp->access_flags & - IB_ACCESS_REMOTE_READ)); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, - !!(hr_qp->access_flags & - IB_ACCESS_REMOTE_WRITE)); - } - - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M, - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M, - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_PD_M, - QP_CONTEXT_QPC_BYTES_4_PD_S, - to_hr_pd(ibqp->pd)->pdn); - - roce_set_field(context->qpc_bytes_8, - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M, - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S, - to_hr_cq(ibqp->send_cq)->cqn); - roce_set_field(context->qpc_bytes_8, - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M, - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S, - to_hr_cq(ibqp->recv_cq)->cqn); - - if (ibqp->srq) - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M, - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S, - to_hr_srq(ibqp->srq)->srqn); - if (attr_mask & IB_QP_PKEY_INDEX) - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, - attr->pkey_index); - else - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, - hr_qp->pkey_index); - - roce_set_field(context->qpc_bytes_16, - QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, - QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); - } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { - if ((attr_mask & IB_QP_ALT_PATH) || - (attr_mask & IB_QP_ACCESS_FLAGS) || - (attr_mask & IB_QP_PKEY_INDEX) || - (attr_mask & IB_QP_QKEY)) { - dev_err(dev, "INIT2RTR attr_mask error\n"); - goto out; - } - - dmac = (u8 *)attr->ah_attr.roce.dmac; - - context->sq_rq_bt_l = cpu_to_le32(dma_handle); - roce_set_field(context->qpc_bytes_24, - QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M, - QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S, - upper_32_bits(dma_handle)); - roce_set_bit(context->qpc_bytes_24, - QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S, - 1); - roce_set_field(context->qpc_bytes_24, - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M, - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S, - attr->min_rnr_timer); - context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2)); - roce_set_field(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M, - QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S, - ((u32)(dma_handle_2 >> 32)) & - QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M); - roce_set_field(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M, - QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0); - roce_set_bit(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S, - 1); - roce_set_bit(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S, - hr_qp->sq_signal_bits); - - port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : - hr_qp->port; - smac = (const u8 *)hr_dev->dev_addr[port]; - /* when dmac equals smac or loop_idc is 1, it should loopback */ - if (ether_addr_equal_unaligned(dmac, smac) || - hr_dev->loop_idc == 0x1) - roce_set_bit(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1); - - roce_set_bit(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S, - rdma_ah_get_ah_flags(&attr->ah_attr)); - roce_set_field(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M, - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S, - ilog2((unsigned int)attr->max_dest_rd_atomic)); - - if (attr_mask & IB_QP_DEST_QPN) - roce_set_field(context->qpc_bytes_36, - QP_CONTEXT_QPC_BYTES_36_DEST_QP_M, - QP_CONTEXT_QPC_BYTES_36_DEST_QP_S, - attr->dest_qp_num); - - /* Configure GID index */ - port_num = rdma_ah_get_port_num(&attr->ah_attr); - roce_set_field(context->qpc_bytes_36, - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M, - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S, - hns_get_gid_index(hr_dev, - port_num - 1, - grh->sgid_index)); - - memcpy(&(context->dmac_l), dmac, 4); - - roce_set_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_DMAC_H_M, - QP_CONTEXT_QPC_BYTES_44_DMAC_H_S, - *((u16 *)(&dmac[4]))); - roce_set_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M, - QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S, - rdma_ah_get_static_rate(&attr->ah_attr)); - roce_set_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_HOPLMT_M, - QP_CONTEXT_QPC_BYTES_44_HOPLMT_S, - grh->hop_limit); - - roce_set_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M, - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S, - grh->flow_label); - roce_set_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_TCLASS_M, - QP_CONTEXT_QPC_BYTES_48_TCLASS_S, - grh->traffic_class); - roce_set_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_MTU_M, - QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu); - - memcpy(context->dgid, grh->dgid.raw, - sizeof(grh->dgid.raw)); - - dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l, - roce_get_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_DMAC_H_M, - QP_CONTEXT_QPC_BYTES_44_DMAC_H_S)); - - roce_set_field(context->qpc_bytes_68, - QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M, - QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, - hr_qp->rq.head); - roce_set_field(context->qpc_bytes_68, - QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0); - - context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba); - - roce_set_field(context->qpc_bytes_76, - QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M, - QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S, - upper_32_bits(rq_ba)); - roce_set_field(context->qpc_bytes_76, - QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M, - QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0); - - context->rx_rnr_time = 0; - - roce_set_field(context->qpc_bytes_84, - QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M, - QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S, - attr->rq_psn - 1); - roce_set_field(context->qpc_bytes_84, - QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M, - QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0); - - roce_set_field(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S, - attr->rq_psn); - roce_set_bit(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0); - roce_set_bit(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0); - roce_set_field(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M, - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S, - 0); - roce_set_field(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M, - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S, - 0); - - context->dma_length = 0; - context->r_key = 0; - context->va_l = 0; - context->va_h = 0; - - roce_set_field(context->qpc_bytes_108, - QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M, - QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0); - roce_set_bit(context->qpc_bytes_108, - QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0); - roce_set_bit(context->qpc_bytes_108, - QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0); - - roce_set_field(context->qpc_bytes_112, - QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M, - QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0); - roce_set_field(context->qpc_bytes_112, - QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M, - QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0); - - /* For chip resp ack */ - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, - hr_qp->phy_port); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_SL_M, - QP_CONTEXT_QPC_BYTES_156_SL_S, - rdma_ah_get_sl(&attr->ah_attr)); - hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); - } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { - /* If exist optional param, return error */ - if ((attr_mask & IB_QP_ALT_PATH) || - (attr_mask & IB_QP_ACCESS_FLAGS) || - (attr_mask & IB_QP_QKEY) || - (attr_mask & IB_QP_PATH_MIG_STATE) || - (attr_mask & IB_QP_CUR_STATE) || - (attr_mask & IB_QP_MIN_RNR_TIMER)) { - dev_err(dev, "RTR2RTS attr_mask error\n"); - goto out; - } - - context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); - - roce_set_field(context->qpc_bytes_120, - QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M, - QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S, - upper_32_bits(sq_ba)); - - roce_set_field(context->qpc_bytes_124, - QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M, - QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0); - roce_set_field(context->qpc_bytes_124, - QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M, - QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0); - - roce_set_field(context->qpc_bytes_128, - QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M, - QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S, - attr->sq_psn); - roce_set_bit(context->qpc_bytes_128, - QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0); - roce_set_field(context->qpc_bytes_128, - QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M, - QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S, - 0); - roce_set_bit(context->qpc_bytes_128, - QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0); - - roce_set_field(context->qpc_bytes_132, - QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M, - QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0); - roce_set_field(context->qpc_bytes_132, - QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M, - QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0); - - roce_set_field(context->qpc_bytes_136, - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M, - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S, - attr->sq_psn); - roce_set_field(context->qpc_bytes_136, - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M, - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S, - attr->sq_psn); - - roce_set_field(context->qpc_bytes_140, - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M, - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S, - (attr->sq_psn >> SQ_PSN_SHIFT)); - roce_set_field(context->qpc_bytes_140, - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M, - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0); - roce_set_bit(context->qpc_bytes_140, - QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0); - - roce_set_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M, - QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0); - roce_set_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, - attr->retry_cnt); - roce_set_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, - attr->rnr_retry); - roce_set_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_LSN_M, - QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100); - - context->rnr_retry = 0; - - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M, - QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S, - attr->retry_cnt); - if (attr->timeout < 0x12) { - dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n", - attr->timeout); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, - 0x12); - } else { - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, - attr->timeout); - } - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M, - QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S, - attr->rnr_retry); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, - hr_qp->phy_port); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_SL_M, - QP_CONTEXT_QPC_BYTES_156_SL_S, - rdma_ah_get_sl(&attr->ah_attr)); - hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M, - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S, - ilog2((unsigned int)attr->max_rd_atomic)); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M, - QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0); - context->pkt_use_len = 0; - - roce_set_field(context->qpc_bytes_164, - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M, - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn); - roce_set_field(context->qpc_bytes_164, - QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M, - QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0); - - roce_set_field(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M, - QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S, - attr->sq_psn); - roce_set_field(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M, - QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0); - roce_set_field(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M, - QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0); - roce_set_bit(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0); - roce_set_bit(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0); - roce_set_bit(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0); - context->sge_use_len = 0; - - roce_set_field(context->qpc_bytes_176, - QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0); - roce_set_field(context->qpc_bytes_176, - QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S, - 0); - roce_set_field(context->qpc_bytes_180, - QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0); - roce_set_field(context->qpc_bytes_180, - QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M, - QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0); - - context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); - - roce_set_field(context->qpc_bytes_188, - QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M, - QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S, - upper_32_bits(sq_ba)); - roce_set_bit(context->qpc_bytes_188, - QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0); - roce_set_field(context->qpc_bytes_188, - QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S, - 0); - } - - /* Every status migrate must change state */ - roce_set_field(context->qpc_bytes_144, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state); - - /* SW pass context to HW */ - ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state), - to_hns_roce_state(new_state), context, - hr_qp); - if (ret) { - dev_err(dev, "hns_roce_qp_modify failed\n"); - goto out; - } - - /* - * Use rst2init to instead of init2init with drv, - * need to hw to flash RQ HEAD by DB again - */ - if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { - roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M, - RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head); - roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M, - RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); - roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M, - RQ_DOORBELL_U32_8_CMD_S, 1); - roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1); - - if (ibqp->uobject) { - hr_qp->rq.db_reg = hr_dev->reg_base + - hr_dev->odb_offset + - DB_REG_OFFSET * hr_dev->priv_uar.index; - } - - hns_roce_write64_k(doorbell, hr_qp->rq.db_reg); - } - - hr_qp->state = new_state; - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) - hr_qp->resp_depth = attr->max_dest_rd_atomic; - if (attr_mask & IB_QP_PORT) { - hr_qp->port = attr->port_num - 1; - hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; - } - - if (new_state == IB_QPS_RESET && !ibqp->uobject) { - hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, - ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); - if (ibqp->send_cq != ibqp->recv_cq) - hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), - hr_qp->qpn, NULL); - - hr_qp->rq.head = 0; - hr_qp->rq.tail = 0; - hr_qp->sq.head = 0; - hr_qp->sq.tail = 0; - } -out: - kfree(context); - return ret; -} - -static int hns_roce_v1_modify_qp(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, int attr_mask, - enum ib_qp_state cur_state, - enum ib_qp_state new_state) -{ - if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) - return -EOPNOTSUPP; - - if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) - return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state, - new_state); - else - return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state, - new_state); -} - -static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state) -{ - switch (state) { - case HNS_ROCE_QP_STATE_RST: - return IB_QPS_RESET; - case HNS_ROCE_QP_STATE_INIT: - return IB_QPS_INIT; - case HNS_ROCE_QP_STATE_RTR: - return IB_QPS_RTR; - case HNS_ROCE_QP_STATE_RTS: - return IB_QPS_RTS; - case HNS_ROCE_QP_STATE_SQD: - return IB_QPS_SQD; - case HNS_ROCE_QP_STATE_ERR: - return IB_QPS_ERR; - default: - return IB_QPS_ERR; - } -} - -static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev, - struct hns_roce_qp *hr_qp, - struct hns_roce_qp_context *hr_context) -{ - struct hns_roce_cmd_mailbox *mailbox; - int ret; - - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0, - HNS_ROCE_CMD_QUERY_QP, - HNS_ROCE_CMD_TIMEOUT_MSECS); - if (!ret) - memcpy(hr_context, mailbox->buf, sizeof(*hr_context)); - else - dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n"); - - hns_roce_free_cmd_mailbox(hr_dev, mailbox); - - return ret; -} - -static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_sqp_context context; - u32 addr; - - mutex_lock(&hr_qp->mutex); - - if (hr_qp->state == IB_QPS_RESET) { - qp_attr->qp_state = IB_QPS_RESET; - goto done; - } - - addr = ROCEE_QP1C_CFG0_0_REG + - hr_qp->port * sizeof(struct hns_roce_sqp_context); - context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr)); - context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1)); - context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2)); - context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3)); - context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4)); - context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5)); - context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6)); - context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7)); - context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8)); - context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9)); - - hr_qp->state = roce_get_field(context.qp1c_bytes_4, - QP1C_BYTES_4_QP_STATE_M, - QP1C_BYTES_4_QP_STATE_S); - qp_attr->qp_state = hr_qp->state; - qp_attr->path_mtu = IB_MTU_256; - qp_attr->path_mig_state = IB_MIG_ARMED; - qp_attr->qkey = QKEY_VAL; - qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - qp_attr->rq_psn = 0; - qp_attr->sq_psn = 0; - qp_attr->dest_qp_num = 1; - qp_attr->qp_access_flags = 6; - - qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20, - QP1C_BYTES_20_PKEY_IDX_M, - QP1C_BYTES_20_PKEY_IDX_S); - qp_attr->port_num = hr_qp->port + 1; - qp_attr->sq_draining = 0; - qp_attr->max_rd_atomic = 0; - qp_attr->max_dest_rd_atomic = 0; - qp_attr->min_rnr_timer = 0; - qp_attr->timeout = 0; - qp_attr->retry_cnt = 0; - qp_attr->rnr_retry = 0; - qp_attr->alt_timeout = 0; - -done: - qp_attr->cur_qp_state = qp_attr->qp_state; - qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; - qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; - qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; - qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; - qp_attr->cap.max_inline_data = 0; - qp_init_attr->cap = qp_attr->cap; - qp_init_attr->create_flags = 0; - - mutex_unlock(&hr_qp->mutex); - - return 0; -} - -static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_qp_context *context; - int tmp_qp_state; - int ret = 0; - int state; - - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; - - memset(qp_attr, 0, sizeof(*qp_attr)); - memset(qp_init_attr, 0, sizeof(*qp_init_attr)); - - mutex_lock(&hr_qp->mutex); - - if (hr_qp->state == IB_QPS_RESET) { - qp_attr->qp_state = IB_QPS_RESET; - goto done; - } - - ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context); - if (ret) { - dev_err(dev, "query qpc error\n"); - ret = -EINVAL; - goto out; - } - - state = roce_get_field(context->qpc_bytes_144, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S); - tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state); - if (tmp_qp_state == -1) { - dev_err(dev, "to_ib_qp_state error\n"); - ret = -EINVAL; - goto out; - } - hr_qp->state = (u8)tmp_qp_state; - qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; - qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_MTU_M, - QP_CONTEXT_QPC_BYTES_48_MTU_S); - qp_attr->path_mig_state = IB_MIG_ARMED; - qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - if (hr_qp->ibqp.qp_type == IB_QPT_UD) - qp_attr->qkey = QKEY_VAL; - - qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S); - qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164, - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M, - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S); - qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36, - QP_CONTEXT_QPC_BYTES_36_DEST_QP_M, - QP_CONTEXT_QPC_BYTES_36_DEST_QP_S); - qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) | - ((roce_get_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) | - ((roce_get_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3); - - if (hr_qp->ibqp.qp_type == IB_QPT_RC) { - struct ib_global_route *grh = - rdma_ah_retrieve_grh(&qp_attr->ah_attr); - - rdma_ah_set_sl(&qp_attr->ah_attr, - roce_get_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_SL_M, - QP_CONTEXT_QPC_BYTES_156_SL_S)); - rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH); - grh->flow_label = - roce_get_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M, - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S); - grh->sgid_index = - roce_get_field(context->qpc_bytes_36, - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M, - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S); - grh->hop_limit = - roce_get_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_HOPLMT_M, - QP_CONTEXT_QPC_BYTES_44_HOPLMT_S); - grh->traffic_class = - roce_get_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_TCLASS_M, - QP_CONTEXT_QPC_BYTES_48_TCLASS_S); - - memcpy(grh->dgid.raw, context->dgid, - sizeof(grh->dgid.raw)); - } - - qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S); - qp_attr->port_num = hr_qp->port + 1; - qp_attr->sq_draining = 0; - qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M, - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S); - qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M, - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S); - qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24, - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M, - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S)); - qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S)); - qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S); - qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry); - -done: - qp_attr->cur_qp_state = qp_attr->qp_state; - qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; - qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; - - if (!ibqp->uobject) { - qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; - qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; - } else { - qp_attr->cap.max_send_wr = 0; - qp_attr->cap.max_send_sge = 0; - } - - qp_init_attr->cap = qp_attr->cap; - -out: - mutex_unlock(&hr_qp->mutex); - kfree(context); - return ret; -} - -static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr) -{ - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - - return hr_qp->doorbell_qpn <= 1 ? - hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) : - hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr); -} - -int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_cq *send_cq, *recv_cq; - int ret; - - ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); - if (ret) - return ret; - - send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; - recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL; - - hns_roce_lock_cqs(send_cq, recv_cq); - if (!udata) { - if (recv_cq) - __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, - (hr_qp->ibqp.srq ? - to_hr_srq(hr_qp->ibqp.srq) : - NULL)); - - if (send_cq && send_cq != recv_cq) - __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL); - } - hns_roce_qp_remove(hr_dev, hr_qp); - hns_roce_unlock_cqs(send_cq, recv_cq); - - hns_roce_qp_destroy(hr_dev, hr_qp, udata); - - return 0; -} - -static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); - struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); - struct device *dev = &hr_dev->pdev->dev; - u32 cqe_cnt_ori; - u32 cqe_cnt_cur; - int wait_time = 0; - - /* - * Before freeing cq buffer, we need to ensure that the outstanding CQE - * have been written by checking the CQE counter. - */ - cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT); - while (1) { - if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) & - HNS_ROCE_CQE_WCMD_EMPTY_BIT) - break; - - cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT); - if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT) - break; - - msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS); - if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) { - dev_warn(dev, "Destroy cq 0x%lx timeout!\n", - hr_cq->cqn); - break; - } - wait_time++; - } - return 0; -} - -static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not) -{ - roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) | - (req_not << eq->log_entries), eq->db_reg); -} - -static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, int qpn) -{ - struct device *dev = &hr_dev->pdev->dev; - - dev_warn(dev, "Local Work Queue Catastrophic Error.\n"); - switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { - case HNS_ROCE_LWQCE_QPC_ERROR: - dev_warn(dev, "QP %d, QPC error.\n", qpn); - break; - case HNS_ROCE_LWQCE_MTU_ERROR: - dev_warn(dev, "QP %d, MTU error.\n", qpn); - break; - case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR: - dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn); - break; - case HNS_ROCE_LWQCE_WQE_ADDR_ERROR: - dev_warn(dev, "QP %d, WQE addr error.\n", qpn); - break; - case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR: - dev_warn(dev, "QP %d, WQE shift error\n", qpn); - break; - case HNS_ROCE_LWQCE_SL_ERROR: - dev_warn(dev, "QP %d, SL error.\n", qpn); - break; - case HNS_ROCE_LWQCE_PORT_ERROR: - dev_warn(dev, "QP %d, port error.\n", qpn); - break; - default: - break; - } -} - -static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int qpn) -{ - struct device *dev = &hr_dev->pdev->dev; - - dev_warn(dev, "Local Access Violation Work Queue Error.\n"); - switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { - case HNS_ROCE_LAVWQE_R_KEY_VIOLATION: - dev_warn(dev, "QP %d, R_key violation.\n", qpn); - break; - case HNS_ROCE_LAVWQE_LENGTH_ERROR: - dev_warn(dev, "QP %d, length error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_VA_ERROR: - dev_warn(dev, "QP %d, VA error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_PD_ERROR: - dev_err(dev, "QP %d, PD error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_RW_ACC_ERROR: - dev_warn(dev, "QP %d, rw acc error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_KEY_STATE_ERROR: - dev_warn(dev, "QP %d, key state error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR: - dev_warn(dev, "QP %d, MR operation error.\n", qpn); - break; - default: - break; - } -} - -static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int event_type) -{ - struct device *dev = &hr_dev->pdev->dev; - int phy_port; - int qpn; - - qpn = roce_get_field(aeqe->event.queue_event.num, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); - phy_port = roce_get_field(aeqe->event.queue_event.num, - HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S); - if (qpn <= 1) - qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port; - - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - dev_warn(dev, "Invalid Req Local Work Queue Error.\n" - "QP %d, phy_port %d.\n", qpn, phy_port); - break; - case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn); - break; - case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn); - break; - default: - break; - } - - hns_roce_qp_event(hr_dev, qpn, event_type); -} - -static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int event_type) -{ - struct device *dev = &hr_dev->pdev->dev; - u32 cqn; - - cqn = roce_get_field(aeqe->event.queue_event.num, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S); - - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - dev_warn(dev, "CQ 0x%x access err.\n", cqn); - break; - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - dev_warn(dev, "CQ 0x%x overflow\n", cqn); - break; - case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: - dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn); - break; - default: - break; - } - - hns_roce_cq_event(hr_dev, cqn, event_type); -} - -static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe) -{ - struct device *dev = &hr_dev->pdev->dev; - - switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { - case HNS_ROCE_DB_SUBTYPE_SDB_OVF: - dev_warn(dev, "SDB overflow.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF: - dev_warn(dev, "SDB almost overflow.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP: - dev_warn(dev, "SDB almost empty.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_ODB_OVF: - dev_warn(dev, "ODB overflow.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF: - dev_warn(dev, "ODB almost overflow.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP: - dev_warn(dev, "SDB almost empty.\n"); - break; - default: - break; - } -} - -static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry) -{ - unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQE_SIZE; - - return (struct hns_roce_aeqe *)((u8 *) - (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + - off % HNS_ROCE_BA_SIZE); -} - -static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq) -{ - struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index); - - return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^ - !!(eq->cons_index & eq->entries)) ? aeqe : NULL; -} - -static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_aeqe *aeqe; - int aeqes_found = 0; - int event_type; - - while ((aeqe = next_aeqe_sw_v1(eq))) { - /* Make sure we read the AEQ entry after we have checked the - * ownership bit - */ - dma_rmb(); - - dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n", - aeqe, - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); - event_type = roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S); - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_PATH_MIG: - dev_warn(dev, "PATH MIG not supported\n"); - break; - case HNS_ROCE_EVENT_TYPE_COMM_EST: - dev_warn(dev, "COMMUNICATION established\n"); - break; - case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: - dev_warn(dev, "SQ DRAINED not supported\n"); - break; - case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: - dev_warn(dev, "PATH MIG failed\n"); - break; - case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type); - break; - case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: - case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: - case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: - dev_warn(dev, "SRQ not support!\n"); - break; - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: - hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type); - break; - case HNS_ROCE_EVENT_TYPE_PORT_CHANGE: - dev_warn(dev, "port change.\n"); - break; - case HNS_ROCE_EVENT_TYPE_MB: - hns_roce_cmd_event(hr_dev, - le16_to_cpu(aeqe->event.cmd.token), - aeqe->event.cmd.status, - le64_to_cpu(aeqe->event.cmd.out_param - )); - break; - case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: - hns_roce_v1_db_overflow_handle(hr_dev, aeqe); - break; - default: - dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n", - event_type, eq->eqn, eq->cons_index); - break; - } - - eq->cons_index++; - aeqes_found = 1; - - if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) - eq->cons_index = 0; - } - - set_eq_cons_index_v1(eq, 0); - - return aeqes_found; -} - -static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry) -{ - unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQE_SIZE; - - return (struct hns_roce_ceqe *)((u8 *) - (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + - off % HNS_ROCE_BA_SIZE); -} - -static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq) -{ - struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index); - - return (!!(roce_get_bit(ceqe->comp, - HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^ - (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; -} - -static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - struct hns_roce_ceqe *ceqe; - int ceqes_found = 0; - u32 cqn; - - while ((ceqe = next_ceqe_sw_v1(eq))) { - /* Make sure we read CEQ entry after we have checked the - * ownership bit - */ - dma_rmb(); - - cqn = roce_get_field(ceqe->comp, - HNS_ROCE_CEQE_CEQE_COMP_CQN_M, - HNS_ROCE_CEQE_CEQE_COMP_CQN_S); - hns_roce_cq_completion(hr_dev, cqn); - - ++eq->cons_index; - ceqes_found = 1; - - if (eq->cons_index > - EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) - eq->cons_index = 0; - } - - set_eq_cons_index_v1(eq, 0); - - return ceqes_found; -} - -static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr) -{ - struct hns_roce_eq *eq = eq_ptr; - struct hns_roce_dev *hr_dev = eq->hr_dev; - int int_work; - - if (eq->type_flag == HNS_ROCE_CEQ) - /* CEQ irq routine, CEQ is pulse irq, not clear */ - int_work = hns_roce_v1_ceq_int(hr_dev, eq); - else - /* AEQ irq routine, AEQ is pulse irq, not clear */ - int_work = hns_roce_v1_aeq_int(hr_dev, eq); - - return IRQ_RETVAL(int_work); -} - -static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id) -{ - struct hns_roce_dev *hr_dev = dev_id; - struct device *dev = &hr_dev->pdev->dev; - int int_work = 0; - u32 caepaemask_val; - u32 cealmovf_val; - u32 caepaest_val; - u32 aeshift_val; - u32 ceshift_val; - u32 cemask_val; - __le32 tmp; - int i; - - /* - * Abnormal interrupt: - * AEQ overflow, ECC multi-bit err, CEQ overflow must clear - * interrupt, mask irq, clear irq, cancel mask operation - */ - aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG); - tmp = cpu_to_le32(aeshift_val); - - /* AEQE overflow */ - if (roce_get_bit(tmp, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) { - dev_warn(dev, "AEQ overflow!\n"); - - /* Set mask */ - caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); - tmp = cpu_to_le32(caepaemask_val); - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, - HNS_ROCE_INT_MASK_ENABLE); - caepaemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); - - /* Clear int state(INT_WC : write 1 clear) */ - caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG); - tmp = cpu_to_le32(caepaest_val); - roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1); - caepaest_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val); - - /* Clear mask */ - caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); - tmp = cpu_to_le32(caepaemask_val); - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, - HNS_ROCE_INT_MASK_DISABLE); - caepaemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); - } - - /* CEQ almost overflow */ - for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { - ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG + - i * CEQ_REG_OFFSET); - tmp = cpu_to_le32(ceshift_val); - - if (roce_get_bit(tmp, - ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) { - dev_warn(dev, "CEQ[%d] almost overflow!\n", i); - int_work++; - - /* Set mask */ - cemask_val = roce_read(hr_dev, - ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET); - tmp = cpu_to_le32(cemask_val); - roce_set_bit(tmp, - ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, - HNS_ROCE_INT_MASK_ENABLE); - cemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET, cemask_val); - - /* Clear int state(INT_WC : write 1 clear) */ - cealmovf_val = roce_read(hr_dev, - ROCEE_CAEP_CEQ_ALM_OVF_0_REG + - i * CEQ_REG_OFFSET); - tmp = cpu_to_le32(cealmovf_val); - roce_set_bit(tmp, - ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S, - 1); - cealmovf_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG + - i * CEQ_REG_OFFSET, cealmovf_val); - - /* Clear mask */ - cemask_val = roce_read(hr_dev, - ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET); - tmp = cpu_to_le32(cemask_val); - roce_set_bit(tmp, - ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, - HNS_ROCE_INT_MASK_DISABLE); - cemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET, cemask_val); - } - } - - /* ECC multi-bit error alarm */ - dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n", - roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG), - roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG), - roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG)); - - dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n", - roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG), - roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG), - roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG)); - - return IRQ_RETVAL(int_work); -} - -static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev) -{ - u32 aemask_val; - int masken = 0; - __le32 tmp; - int i; - - /* AEQ INT */ - aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); - tmp = cpu_to_le32(aemask_val); - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, - masken); - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken); - aemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val); - - /* CEQ INT */ - for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { - /* IRQ mask */ - roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET, masken); - } -} - -static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) + - HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; - int i; - - if (!eq->buf_list) - return; - - for (i = 0; i < npages; ++i) - dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE, - eq->buf_list[i].buf, eq->buf_list[i].map); - - kfree(eq->buf_list); -} - -static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num, - int enable_flag) -{ - void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num]; - __le32 tmp; - u32 val; - - val = readl(eqc); - tmp = cpu_to_le32(val); - - if (enable_flag) - roce_set_field(tmp, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, - HNS_ROCE_EQ_STAT_VALID); - else - roce_set_field(tmp, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, - HNS_ROCE_EQ_STAT_INVALID); - - val = le32_to_cpu(tmp); - writel(val, eqc); -} - -static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn]; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t tmp_dma_addr; - u32 eqcuridx_val; - u32 eqconsindx_val; - u32 eqshift_val; - __le32 tmp2 = 0; - __le32 tmp1 = 0; - __le32 tmp = 0; - int num_bas; - int ret; - int i; - - num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) + - HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; - - if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) { - dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n", - (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE, - num_bas); - return -EINVAL; - } - - eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL); - if (!eq->buf_list) - return -ENOMEM; - - for (i = 0; i < num_bas; ++i) { - eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE, - &tmp_dma_addr, - GFP_KERNEL); - if (!eq->buf_list[i].buf) { - ret = -ENOMEM; - goto err_out_free_pages; - } - - eq->buf_list[i].map = tmp_dma_addr; - } - eq->cons_index = 0; - roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, - HNS_ROCE_EQ_STAT_INVALID); - roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S, - eq->log_entries); - eqshift_val = le32_to_cpu(tmp); - writel(eqshift_val, eqc); - - /* Configure eq extended address 12~44bit */ - writel((u32)(eq->buf_list[0].map >> 12), eqc + 4); - - /* - * Configure eq extended address 45~49 bit. - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of - * using 4K page, and shift more 32 because of - * calculating the high 32 bit value evaluated to hardware. - */ - roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M, - ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S, - eq->buf_list[0].map >> 44); - roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M, - ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0); - eqcuridx_val = le32_to_cpu(tmp1); - writel(eqcuridx_val, eqc + 8); - - /* Configure eq consumer index */ - roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M, - ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0); - eqconsindx_val = le32_to_cpu(tmp2); - writel(eqconsindx_val, eqc + 0xc); - - return 0; - -err_out_free_pages: - for (i -= 1; i >= 0; i--) - dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf, - eq->buf_list[i].map); - - kfree(eq->buf_list); - return ret; -} - -static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_eq *eq; - int irq_num; - int eq_num; - int ret; - int i, j; - - eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; - irq_num = eq_num + hr_dev->caps.num_other_vectors; - - eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL); - if (!eq_table->eq) - return -ENOMEM; - - eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base), - GFP_KERNEL); - if (!eq_table->eqc_base) { - ret = -ENOMEM; - goto err_eqc_base_alloc_fail; - } - - for (i = 0; i < eq_num; i++) { - eq = &eq_table->eq[i]; - eq->hr_dev = hr_dev; - eq->eqn = i; - eq->irq = hr_dev->irq[i]; - eq->log_page_size = PAGE_SHIFT; - - if (i < hr_dev->caps.num_comp_vectors) { - /* CEQ */ - eq_table->eqc_base[i] = hr_dev->reg_base + - ROCEE_CAEP_CEQC_SHIFT_0_REG + - CEQ_REG_OFFSET * i; - eq->type_flag = HNS_ROCE_CEQ; - eq->db_reg = hr_dev->reg_base + - ROCEE_CAEP_CEQC_CONS_IDX_0_REG + - CEQ_REG_OFFSET * i; - eq->entries = hr_dev->caps.ceqe_depth; - eq->log_entries = ilog2(eq->entries); - eq->eqe_size = HNS_ROCE_CEQE_SIZE; - } else { - /* AEQ */ - eq_table->eqc_base[i] = hr_dev->reg_base + - ROCEE_CAEP_AEQC_AEQE_SHIFT_REG; - eq->type_flag = HNS_ROCE_AEQ; - eq->db_reg = hr_dev->reg_base + - ROCEE_CAEP_AEQE_CONS_IDX_REG; - eq->entries = hr_dev->caps.aeqe_depth; - eq->log_entries = ilog2(eq->entries); - eq->eqe_size = HNS_ROCE_AEQE_SIZE; - } - } - - /* Disable irq */ - hns_roce_v1_int_mask_enable(hr_dev); - - /* Configure ce int interval */ - roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG, - HNS_ROCE_CEQ_DEFAULT_INTERVAL); - - /* Configure ce int burst num */ - roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG, - HNS_ROCE_CEQ_DEFAULT_BURST_NUM); - - for (i = 0; i < eq_num; i++) { - ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]); - if (ret) { - dev_err(dev, "eq create failed\n"); - goto err_create_eq_fail; - } - } - - for (j = 0; j < irq_num; j++) { - if (j < eq_num) - ret = request_irq(hr_dev->irq[j], - hns_roce_v1_msix_interrupt_eq, 0, - hr_dev->irq_names[j], - &eq_table->eq[j]); - else - ret = request_irq(hr_dev->irq[j], - hns_roce_v1_msix_interrupt_abn, 0, - hr_dev->irq_names[j], hr_dev); - - if (ret) { - dev_err(dev, "request irq error!\n"); - goto err_request_irq_fail; - } - } - - for (i = 0; i < eq_num; i++) - hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE); - - return 0; - -err_request_irq_fail: - for (j -= 1; j >= 0; j--) - free_irq(hr_dev->irq[j], &eq_table->eq[j]); - -err_create_eq_fail: - for (i -= 1; i >= 0; i--) - hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]); - - kfree(eq_table->eqc_base); - -err_eqc_base_alloc_fail: - kfree(eq_table->eq); - - return ret; -} - -static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; - int irq_num; - int eq_num; - int i; - - eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; - irq_num = eq_num + hr_dev->caps.num_other_vectors; - for (i = 0; i < eq_num; i++) { - /* Disable EQ */ - hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE); - - free_irq(hr_dev->irq[i], &eq_table->eq[i]); - - hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]); - } - for (i = eq_num; i < irq_num; i++) - free_irq(hr_dev->irq[i], hr_dev); - - kfree(eq_table->eqc_base); - kfree(eq_table->eq); -} - -static const struct ib_device_ops hns_roce_v1_dev_ops = { - .destroy_qp = hns_roce_v1_destroy_qp, - .poll_cq = hns_roce_v1_poll_cq, - .post_recv = hns_roce_v1_post_recv, - .post_send = hns_roce_v1_post_send, - .query_qp = hns_roce_v1_query_qp, - .req_notify_cq = hns_roce_v1_req_notify_cq, -}; - -static const struct hns_roce_hw hns_roce_hw_v1 = { - .reset = hns_roce_v1_reset, - .hw_profile = hns_roce_v1_profile, - .hw_init = hns_roce_v1_init, - .hw_exit = hns_roce_v1_exit, - .post_mbox = hns_roce_v1_post_mbox, - .poll_mbox_done = hns_roce_v1_chk_mbox, - .set_gid = hns_roce_v1_set_gid, - .set_mac = hns_roce_v1_set_mac, - .set_mtu = hns_roce_v1_set_mtu, - .write_mtpt = hns_roce_v1_write_mtpt, - .write_cqc = hns_roce_v1_write_cqc, - .set_hem = hns_roce_v1_set_hem, - .clear_hem = hns_roce_v1_clear_hem, - .modify_qp = hns_roce_v1_modify_qp, - .dereg_mr = hns_roce_v1_dereg_mr, - .destroy_cq = hns_roce_v1_destroy_cq, - .init_eq = hns_roce_v1_init_eq_table, - .cleanup_eq = hns_roce_v1_cleanup_eq_table, - .hns_roce_dev_ops = &hns_roce_v1_dev_ops, -}; - -static const struct of_device_id hns_roce_of_match[] = { - { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, }, - {}, -}; -MODULE_DEVICE_TABLE(of, hns_roce_of_match); - -static const struct acpi_device_id hns_roce_acpi_match[] = { - { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 }, - {}, -}; -MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match); - -static struct -platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode) -{ - struct device *dev; - - /* get the 'device' corresponding to the matching 'fwnode' */ - dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode); - /* get the platform device */ - return dev ? to_platform_device(dev) : NULL; -} - -static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) -{ - struct device *dev = &hr_dev->pdev->dev; - struct platform_device *pdev = NULL; - struct net_device *netdev = NULL; - struct device_node *net_node; - int port_cnt = 0; - u8 phy_port; - int ret; - int i; - - /* check if we are compatible with the underlying SoC */ - if (dev_of_node(dev)) { - const struct of_device_id *of_id; - - of_id = of_match_node(hns_roce_of_match, dev->of_node); - if (!of_id) { - dev_err(dev, "device is not compatible!\n"); - return -ENXIO; - } - hr_dev->hw = (const struct hns_roce_hw *)of_id->data; - if (!hr_dev->hw) { - dev_err(dev, "couldn't get H/W specific DT data!\n"); - return -ENXIO; - } - } else if (is_acpi_device_node(dev->fwnode)) { - const struct acpi_device_id *acpi_id; - - acpi_id = acpi_match_device(hns_roce_acpi_match, dev); - if (!acpi_id) { - dev_err(dev, "device is not compatible!\n"); - return -ENXIO; - } - hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data; - if (!hr_dev->hw) { - dev_err(dev, "couldn't get H/W specific ACPI data!\n"); - return -ENXIO; - } - } else { - dev_err(dev, "can't read compatibility data from DT or ACPI\n"); - return -ENXIO; - } - - /* get the mapped register base address */ - hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0); - if (IS_ERR(hr_dev->reg_base)) - return PTR_ERR(hr_dev->reg_base); - - /* read the node_guid of IB device from the DT or ACPI */ - ret = device_property_read_u8_array(dev, "node-guid", - (u8 *)&hr_dev->ib_dev.node_guid, - GUID_LEN); - if (ret) { - dev_err(dev, "couldn't get node_guid from DT or ACPI!\n"); - return ret; - } - - /* get the RoCE associated ethernet ports or netdevices */ - for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) { - if (dev_of_node(dev)) { - net_node = of_parse_phandle(dev->of_node, "eth-handle", - i); - if (!net_node) - continue; - pdev = of_find_device_by_node(net_node); - } else if (is_acpi_device_node(dev->fwnode)) { - struct fwnode_reference_args args; - - ret = acpi_node_get_property_reference(dev->fwnode, - "eth-handle", - i, &args); - if (ret) - continue; - pdev = hns_roce_find_pdev(args.fwnode); - } else { - dev_err(dev, "cannot read data from DT or ACPI\n"); - return -ENXIO; - } - - if (pdev) { - netdev = platform_get_drvdata(pdev); - phy_port = (u8)i; - if (netdev) { - hr_dev->iboe.netdevs[port_cnt] = netdev; - hr_dev->iboe.phy_port[port_cnt] = phy_port; - } else { - dev_err(dev, "no netdev found with pdev %s\n", - pdev->name); - return -ENODEV; - } - port_cnt++; - } - } - - if (port_cnt == 0) { - dev_err(dev, "unable to get eth-handle for available ports!\n"); - return -EINVAL; - } - - hr_dev->caps.num_ports = port_cnt; - - /* cmd issue mode: 0 is poll, 1 is event */ - hr_dev->cmd_mod = 1; - hr_dev->loop_idc = 0; - hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; - hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG; - - /* read the interrupt names from the DT or ACPI */ - ret = device_property_read_string_array(dev, "interrupt-names", - hr_dev->irq_names, - HNS_ROCE_V1_MAX_IRQ_NUM); - if (ret < 0) { - dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n"); - return ret; - } - - /* fetch the interrupt numbers */ - for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) { - hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i); - if (hr_dev->irq[i] <= 0) - return -EINVAL; - } - - return 0; -} - -/** - * hns_roce_probe - RoCE driver entrance - * @pdev: pointer to platform device - * Return : int - * - */ -static int hns_roce_probe(struct platform_device *pdev) -{ - int ret; - struct hns_roce_dev *hr_dev; - struct device *dev = &pdev->dev; - - hr_dev = ib_alloc_device(hns_roce_dev, ib_dev); - if (!hr_dev) - return -ENOMEM; - - hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL); - if (!hr_dev->priv) { - ret = -ENOMEM; - goto error_failed_kzalloc; - } - - hr_dev->pdev = pdev; - hr_dev->dev = dev; - platform_set_drvdata(pdev, hr_dev); - - if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) && - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) { - dev_err(dev, "Not usable DMA addressing mode\n"); - ret = -EIO; - goto error_failed_get_cfg; - } - - ret = hns_roce_get_cfg(hr_dev); - if (ret) { - dev_err(dev, "Get Configuration failed!\n"); - goto error_failed_get_cfg; - } - - ret = hns_roce_init(hr_dev); - if (ret) { - dev_err(dev, "RoCE engine init failed!\n"); - goto error_failed_get_cfg; - } - - return 0; - -error_failed_get_cfg: - kfree(hr_dev->priv); - -error_failed_kzalloc: - ib_dealloc_device(&hr_dev->ib_dev); - - return ret; -} - -/** - * hns_roce_remove - remove RoCE device - * @pdev: pointer to platform device - */ -static int hns_roce_remove(struct platform_device *pdev) -{ - struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev); - - hns_roce_exit(hr_dev); - kfree(hr_dev->priv); - ib_dealloc_device(&hr_dev->ib_dev); - - return 0; -} - -static struct platform_driver hns_roce_driver = { - .probe = hns_roce_probe, - .remove = hns_roce_remove, - .driver = { - .name = DRV_NAME, - .of_match_table = hns_roce_of_match, - .acpi_match_table = ACPI_PTR(hns_roce_acpi_match), - }, -}; - -module_platform_driver(hns_roce_driver); - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); -MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>"); -MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>"); -MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver"); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h deleted file mode 100644 index 60fdcbae6729..000000000000 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h +++ /dev/null @@ -1,1147 +0,0 @@ -/* - * Copyright (c) 2016 Hisilicon Limited. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef _HNS_ROCE_HW_V1_H -#define _HNS_ROCE_HW_V1_H - -#define CQ_STATE_VALID 2 - -#define HNS_ROCE_V1_MAX_PD_NUM 0x8000 -#define HNS_ROCE_V1_MAX_CQ_NUM 0x10000 -#define HNS_ROCE_V1_MAX_CQE_NUM 0x8000 - -#define HNS_ROCE_V1_MAX_QP_NUM 0x40000 -#define HNS_ROCE_V1_MAX_WQE_NUM 0x4000 - -#define HNS_ROCE_V1_MAX_MTPT_NUM 0x80000 - -#define HNS_ROCE_V1_MAX_MTT_SEGS 0x100000 - -#define HNS_ROCE_V1_MAX_QP_INIT_RDMA 128 -#define HNS_ROCE_V1_MAX_QP_DEST_RDMA 128 - -#define HNS_ROCE_V1_MAX_SQ_DESC_SZ 64 -#define HNS_ROCE_V1_MAX_RQ_DESC_SZ 64 -#define HNS_ROCE_V1_SG_NUM 2 -#define HNS_ROCE_V1_INLINE_SIZE 32 - -#define HNS_ROCE_V1_UAR_NUM 256 -#define HNS_ROCE_V1_PHY_UAR_NUM 8 - -#define HNS_ROCE_V1_GID_NUM 16 -#define HNS_ROCE_V1_RESV_QP 8 - -#define HNS_ROCE_V1_MAX_IRQ_NUM 34 -#define HNS_ROCE_V1_COMP_VEC_NUM 32 -#define HNS_ROCE_V1_AEQE_VEC_NUM 1 -#define HNS_ROCE_V1_ABNORMAL_VEC_NUM 1 - -#define HNS_ROCE_V1_COMP_EQE_NUM 0x8000 -#define HNS_ROCE_V1_ASYNC_EQE_NUM 0x400 - -#define HNS_ROCE_V1_QPC_SIZE 256 -#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8 -#define HNS_ROCE_V1_CQC_ENTRY_SIZE 64 -#define HNS_ROCE_V1_MTPT_ENTRY_SIZE 64 -#define HNS_ROCE_V1_MTT_ENTRY_SIZE 64 - -#define HNS_ROCE_V1_CQE_SIZE 32 -#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000 - -#define HNS_ROCE_V1_TABLE_CHUNK_SIZE (1 << 17) - -#define HNS_ROCE_V1_EXT_RAQ_WF 8 -#define HNS_ROCE_V1_RAQ_ENTRY 64 -#define HNS_ROCE_V1_RAQ_DEPTH 32768 -#define HNS_ROCE_V1_RAQ_SIZE (HNS_ROCE_V1_RAQ_ENTRY * HNS_ROCE_V1_RAQ_DEPTH) - -#define HNS_ROCE_V1_SDB_DEPTH 0x400 -#define HNS_ROCE_V1_ODB_DEPTH 0x400 - -#define HNS_ROCE_V1_DB_RSVD 0x80 - -#define HNS_ROCE_V1_SDB_ALEPT HNS_ROCE_V1_DB_RSVD -#define HNS_ROCE_V1_SDB_ALFUL (HNS_ROCE_V1_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD) -#define HNS_ROCE_V1_ODB_ALEPT HNS_ROCE_V1_DB_RSVD -#define HNS_ROCE_V1_ODB_ALFUL (HNS_ROCE_V1_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) - -#define HNS_ROCE_V1_EXT_SDB_DEPTH 0x4000 -#define HNS_ROCE_V1_EXT_ODB_DEPTH 0x4000 -#define HNS_ROCE_V1_EXT_SDB_ENTRY 16 -#define HNS_ROCE_V1_EXT_ODB_ENTRY 16 -#define HNS_ROCE_V1_EXT_SDB_SIZE \ - (HNS_ROCE_V1_EXT_SDB_DEPTH * HNS_ROCE_V1_EXT_SDB_ENTRY) -#define HNS_ROCE_V1_EXT_ODB_SIZE \ - (HNS_ROCE_V1_EXT_ODB_DEPTH * HNS_ROCE_V1_EXT_ODB_ENTRY) - -#define HNS_ROCE_V1_EXT_SDB_ALEPT HNS_ROCE_V1_DB_RSVD -#define HNS_ROCE_V1_EXT_SDB_ALFUL \ - (HNS_ROCE_V1_EXT_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD) -#define HNS_ROCE_V1_EXT_ODB_ALEPT HNS_ROCE_V1_DB_RSVD -#define HNS_ROCE_V1_EXT_ODB_ALFUL \ - (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) - -#define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000 -#define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000 -#define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5 -#define HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE 20 - -#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17) - -#define HNS_ROCE_V1_TPTR_ENTRY_SIZE 2 -#define HNS_ROCE_V1_TPTR_BUF_SIZE \ - (HNS_ROCE_V1_TPTR_ENTRY_SIZE * HNS_ROCE_V1_MAX_CQ_NUM) - -#define HNS_ROCE_ODB_POLL_MODE 0 - -#define HNS_ROCE_SDB_NORMAL_MODE 0 -#define HNS_ROCE_SDB_EXTEND_MODE 1 - -#define HNS_ROCE_ODB_EXTEND_MODE 1 - -#define KEY_VALID 0x02 - -#define HNS_ROCE_CQE_QPN_MASK 0x3ffff -#define HNS_ROCE_CQE_STATUS_MASK 0x1f -#define HNS_ROCE_CQE_OPCODE_MASK 0xf - -#define HNS_ROCE_CQE_SUCCESS 0x00 -#define HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR 0x01 -#define HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR 0x02 -#define HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR 0x03 -#define HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR 0x04 -#define HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR 0x05 -#define HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR 0x06 -#define HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR 0x07 -#define HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR 0x08 -#define HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR 0x09 -#define HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR 0x0a -#define HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR 0x0b -#define HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR 0x0c - -#define QP1C_CFGN_OFFSET 0x28 -#define PHY_PORT_OFFSET 0x8 -#define MTPT_IDX_SHIFT 16 -#define ALL_PORT_VAL_OPEN 0x3f -#define POL_TIME_INTERVAL_VAL 0x80 -#define SLEEP_TIME_INTERVAL 20 -#define SQ_PSN_SHIFT 8 -#define QKEY_VAL 0x80010000 -#define SDB_INV_CNT_OFFSET 8 - -#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10 -#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10 - -#define HNS_ROCE_INT_MASK_DISABLE 0 -#define HNS_ROCE_INT_MASK_ENABLE 1 - -#define CEQ_REG_OFFSET 0x18 - -#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0 - -#define HNS_ROCE_V1_CONS_IDX_M GENMASK(15, 0) - -#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16 -#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M GENMASK(31, 16) - -#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16 -#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M GENMASK(23, 16) - -#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24 -#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M GENMASK(30, 24) - -#define HNS_ROCE_AEQE_U32_4_OWNER_S 31 - -#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0 -#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M GENMASK(23, 0) - -#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25 -#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M GENMASK(27, 25) - -#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0 -#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M GENMASK(15, 0) - -#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0 -#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0) - -/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */ -enum { - HNS_ROCE_LWQCE_QPC_ERROR = 1, - HNS_ROCE_LWQCE_MTU_ERROR, - HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR, - HNS_ROCE_LWQCE_WQE_ADDR_ERROR, - HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR, - HNS_ROCE_LWQCE_SL_ERROR, - HNS_ROCE_LWQCE_PORT_ERROR, -}; - -/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */ -enum { - HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1, - HNS_ROCE_LAVWQE_LENGTH_ERROR, - HNS_ROCE_LAVWQE_VA_ERROR, - HNS_ROCE_LAVWQE_PD_ERROR, - HNS_ROCE_LAVWQE_RW_ACC_ERROR, - HNS_ROCE_LAVWQE_KEY_STATE_ERROR, - HNS_ROCE_LAVWQE_MR_OPERATION_ERROR, -}; - -/* DOORBELL overflow subtype */ -enum { - HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1, - HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF, - HNS_ROCE_DB_SUBTYPE_ODB_OVF, - HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF, - HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP, - HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP, -}; - -enum { - /* RQ&SRQ related operations */ - HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06, - HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE, -}; - -enum { - HNS_ROCE_PORT_DOWN = 0, - HNS_ROCE_PORT_UP, -}; - -struct hns_roce_cq_context { - __le32 cqc_byte_4; - __le32 cq_bt_l; - __le32 cqc_byte_12; - __le32 cur_cqe_ba0_l; - __le32 cqc_byte_20; - __le32 cqe_tptr_addr_l; - __le32 cur_cqe_ba1_l; - __le32 cqc_byte_32; -}; - -#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0 -#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M \ - (((1UL << 2) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S) - -#define CQ_CONTEXT_CQC_BYTE_4_CQN_S 16 -#define CQ_CONTEXT_CQC_BYTE_4_CQN_M \ - (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQN_S) - -#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S 0 -#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M \ - (((1UL << 17) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S) - -#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S 20 -#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M \ - (((1UL << 4) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S) - -#define CQ_CONTEXT_CQC_BYTE_12_CEQN_S 24 -#define CQ_CONTEXT_CQC_BYTE_12_CEQN_M \ - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_12_CEQN_S) - -#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S 0 -#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M \ - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S) - -#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S 16 -#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M \ - (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S) - -#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S 8 -#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M \ - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S) - -#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S 0 -#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M \ - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S) - -#define CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S 9 - -#define CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S 8 -#define CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S 14 -#define CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S 15 - -#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S 16 -#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M \ - (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S) - -struct hns_roce_cqe { - __le32 cqe_byte_4; - union { - __le32 r_key; - __le32 immediate_data; - }; - __le32 byte_cnt; - __le32 cqe_byte_16; - __le32 cqe_byte_20; - __le32 s_mac_l; - __le32 cqe_byte_28; - __le32 reserved; -}; - -#define CQE_BYTE_4_OWNER_S 7 -#define CQE_BYTE_4_SQ_RQ_FLAG_S 14 - -#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_S 8 -#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_M \ - (((1UL << 5) - 1) << CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) - -#define CQE_BYTE_4_WQE_INDEX_S 16 -#define CQE_BYTE_4_WQE_INDEX_M (((1UL << 14) - 1) << CQE_BYTE_4_WQE_INDEX_S) - -#define CQE_BYTE_4_OPERATION_TYPE_S 0 -#define CQE_BYTE_4_OPERATION_TYPE_M \ - (((1UL << 4) - 1) << CQE_BYTE_4_OPERATION_TYPE_S) - -#define CQE_BYTE_4_IMM_INDICATOR_S 15 - -#define CQE_BYTE_16_LOCAL_QPN_S 0 -#define CQE_BYTE_16_LOCAL_QPN_M (((1UL << 24) - 1) << CQE_BYTE_16_LOCAL_QPN_S) - -#define CQE_BYTE_20_PORT_NUM_S 26 -#define CQE_BYTE_20_PORT_NUM_M (((1UL << 3) - 1) << CQE_BYTE_20_PORT_NUM_S) - -#define CQE_BYTE_20_SL_S 24 -#define CQE_BYTE_20_SL_M (((1UL << 2) - 1) << CQE_BYTE_20_SL_S) - -#define CQE_BYTE_20_REMOTE_QPN_S 0 -#define CQE_BYTE_20_REMOTE_QPN_M \ - (((1UL << 24) - 1) << CQE_BYTE_20_REMOTE_QPN_S) - -#define CQE_BYTE_20_GRH_PRESENT_S 29 - -#define CQE_BYTE_28_P_KEY_IDX_S 16 -#define CQE_BYTE_28_P_KEY_IDX_M (((1UL << 16) - 1) << CQE_BYTE_28_P_KEY_IDX_S) - -#define CQ_DB_REQ_NOT_SOL 0 -#define CQ_DB_REQ_NOT (1 << 16) - -struct hns_roce_v1_mpt_entry { - __le32 mpt_byte_4; - __le32 pbl_addr_l; - __le32 mpt_byte_12; - __le32 virt_addr_l; - __le32 virt_addr_h; - __le32 length; - __le32 mpt_byte_28; - __le32 pa0_l; - __le32 mpt_byte_36; - __le32 mpt_byte_40; - __le32 mpt_byte_44; - __le32 mpt_byte_48; - __le32 pa4_l; - __le32 mpt_byte_56; - __le32 mpt_byte_60; - __le32 mpt_byte_64; -}; - -#define MPT_BYTE_4_KEY_STATE_S 0 -#define MPT_BYTE_4_KEY_STATE_M (((1UL << 2) - 1) << MPT_BYTE_4_KEY_STATE_S) - -#define MPT_BYTE_4_KEY_S 8 -#define MPT_BYTE_4_KEY_M (((1UL << 8) - 1) << MPT_BYTE_4_KEY_S) - -#define MPT_BYTE_4_PAGE_SIZE_S 16 -#define MPT_BYTE_4_PAGE_SIZE_M (((1UL << 2) - 1) << MPT_BYTE_4_PAGE_SIZE_S) - -#define MPT_BYTE_4_MW_TYPE_S 20 - -#define MPT_BYTE_4_MW_BIND_ENABLE_S 21 - -#define MPT_BYTE_4_OWN_S 22 - -#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_S 24 -#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_M \ - (((1UL << 2) - 1) << MPT_BYTE_4_MEMORY_LOCATION_TYPE_S) - -#define MPT_BYTE_4_REMOTE_ATOMIC_S 26 -#define MPT_BYTE_4_LOCAL_WRITE_S 27 -#define MPT_BYTE_4_REMOTE_WRITE_S 28 -#define MPT_BYTE_4_REMOTE_READ_S 29 -#define MPT_BYTE_4_REMOTE_INVAL_ENABLE_S 30 -#define MPT_BYTE_4_ADDRESS_TYPE_S 31 - -#define MPT_BYTE_12_PBL_ADDR_H_S 0 -#define MPT_BYTE_12_PBL_ADDR_H_M \ - (((1UL << 17) - 1) << MPT_BYTE_12_PBL_ADDR_H_S) - -#define MPT_BYTE_12_MW_BIND_COUNTER_S 17 -#define MPT_BYTE_12_MW_BIND_COUNTER_M \ - (((1UL << 15) - 1) << MPT_BYTE_12_MW_BIND_COUNTER_S) - -#define MPT_BYTE_28_PD_S 0 -#define MPT_BYTE_28_PD_M (((1UL << 16) - 1) << MPT_BYTE_28_PD_S) - -#define MPT_BYTE_28_L_KEY_IDX_L_S 16 -#define MPT_BYTE_28_L_KEY_IDX_L_M \ - (((1UL << 16) - 1) << MPT_BYTE_28_L_KEY_IDX_L_S) - -#define MPT_BYTE_36_PA0_H_S 0 -#define MPT_BYTE_36_PA0_H_M (((1UL << 5) - 1) << MPT_BYTE_36_PA0_H_S) - -#define MPT_BYTE_36_PA1_L_S 8 -#define MPT_BYTE_36_PA1_L_M (((1UL << 24) - 1) << MPT_BYTE_36_PA1_L_S) - -#define MPT_BYTE_40_PA1_H_S 0 -#define MPT_BYTE_40_PA1_H_M (((1UL << 13) - 1) << MPT_BYTE_40_PA1_H_S) - -#define MPT_BYTE_40_PA2_L_S 16 -#define MPT_BYTE_40_PA2_L_M (((1UL << 16) - 1) << MPT_BYTE_40_PA2_L_S) - -#define MPT_BYTE_44_PA2_H_S 0 -#define MPT_BYTE_44_PA2_H_M (((1UL << 21) - 1) << MPT_BYTE_44_PA2_H_S) - -#define MPT_BYTE_44_PA3_L_S 24 -#define MPT_BYTE_44_PA3_L_M (((1UL << 8) - 1) << MPT_BYTE_44_PA3_L_S) - -#define MPT_BYTE_48_PA3_H_S 0 -#define MPT_BYTE_48_PA3_H_M (((1UL << 29) - 1) << MPT_BYTE_48_PA3_H_S) - -#define MPT_BYTE_56_PA4_H_S 0 -#define MPT_BYTE_56_PA4_H_M (((1UL << 5) - 1) << MPT_BYTE_56_PA4_H_S) - -#define MPT_BYTE_56_PA5_L_S 8 -#define MPT_BYTE_56_PA5_L_M (((1UL << 24) - 1) << MPT_BYTE_56_PA5_L_S) - -#define MPT_BYTE_60_PA5_H_S 0 -#define MPT_BYTE_60_PA5_H_M (((1UL << 13) - 1) << MPT_BYTE_60_PA5_H_S) - -#define MPT_BYTE_60_PA6_L_S 16 -#define MPT_BYTE_60_PA6_L_M (((1UL << 16) - 1) << MPT_BYTE_60_PA6_L_S) - -#define MPT_BYTE_64_PA6_H_S 0 -#define MPT_BYTE_64_PA6_H_M (((1UL << 21) - 1) << MPT_BYTE_64_PA6_H_S) - -#define MPT_BYTE_64_L_KEY_IDX_H_S 24 -#define MPT_BYTE_64_L_KEY_IDX_H_M \ - (((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S) - -struct hns_roce_wqe_ctrl_seg { - __le32 sgl_pa_h; - __le32 flag; - union { - __be32 imm_data; - __le32 inv_key; - }; - __le32 msg_length; -}; - -struct hns_roce_wqe_data_seg { - __le64 addr; - __le32 lkey; - __le32 len; -}; - -struct hns_roce_wqe_raddr_seg { - __le32 rkey; - __le32 len; /* reserved */ - __le64 raddr; -}; - -struct hns_roce_rq_wqe_ctrl { - __le32 rwqe_byte_4; - __le32 rocee_sgl_ba_l; - __le32 rwqe_byte_12; - __le32 reserved[5]; -}; - -#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16 -#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M \ - (((1UL << 6) - 1) << RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S) - -#define HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS 10000 - -#define GID_LEN 16 - -struct hns_roce_ud_send_wqe { - __le32 dmac_h; - __le32 u32_8; - __le32 immediate_data; - - __le32 u32_16; - union { - unsigned char dgid[GID_LEN]; - struct { - __le32 u32_20; - __le32 u32_24; - __le32 u32_28; - __le32 u32_32; - }; - }; - - __le32 u32_36; - __le32 u32_40; - - __le32 va0_l; - __le32 va0_h; - __le32 l_key0; - - __le32 va1_l; - __le32 va1_h; - __le32 l_key1; -}; - -#define UD_SEND_WQE_U32_4_DMAC_0_S 0 -#define UD_SEND_WQE_U32_4_DMAC_0_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_0_S) - -#define UD_SEND_WQE_U32_4_DMAC_1_S 8 -#define UD_SEND_WQE_U32_4_DMAC_1_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_1_S) - -#define UD_SEND_WQE_U32_4_DMAC_2_S 16 -#define UD_SEND_WQE_U32_4_DMAC_2_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_2_S) - -#define UD_SEND_WQE_U32_4_DMAC_3_S 24 -#define UD_SEND_WQE_U32_4_DMAC_3_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_3_S) - -#define UD_SEND_WQE_U32_8_DMAC_4_S 0 -#define UD_SEND_WQE_U32_8_DMAC_4_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_4_S) - -#define UD_SEND_WQE_U32_8_DMAC_5_S 8 -#define UD_SEND_WQE_U32_8_DMAC_5_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S) - -#define UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S 22 - -#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16 -#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \ - (((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S) - -#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S 24 -#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M \ - (((1UL << 6) - 1) << UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S) - -#define UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S 31 - -#define UD_SEND_WQE_U32_16_DEST_QP_S 0 -#define UD_SEND_WQE_U32_16_DEST_QP_M \ - (((1UL << 24) - 1) << UD_SEND_WQE_U32_16_DEST_QP_S) - -#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S 24 -#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S) - -#define UD_SEND_WQE_U32_36_FLOW_LABEL_S 0 -#define UD_SEND_WQE_U32_36_FLOW_LABEL_M \ - (((1UL << 20) - 1) << UD_SEND_WQE_U32_36_FLOW_LABEL_S) - -#define UD_SEND_WQE_U32_36_PRIORITY_S 20 -#define UD_SEND_WQE_U32_36_PRIORITY_M \ - (((1UL << 4) - 1) << UD_SEND_WQE_U32_36_PRIORITY_S) - -#define UD_SEND_WQE_U32_36_SGID_INDEX_S 24 -#define UD_SEND_WQE_U32_36_SGID_INDEX_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_36_SGID_INDEX_S) - -#define UD_SEND_WQE_U32_40_HOP_LIMIT_S 0 -#define UD_SEND_WQE_U32_40_HOP_LIMIT_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_HOP_LIMIT_S) - -#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S 8 -#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S) - -struct hns_roce_sqp_context { - __le32 qp1c_bytes_4; - __le32 sq_rq_bt_l; - __le32 qp1c_bytes_12; - __le32 qp1c_bytes_16; - __le32 qp1c_bytes_20; - __le32 cur_rq_wqe_ba_l; - __le32 qp1c_bytes_28; - __le32 qp1c_bytes_32; - __le32 cur_sq_wqe_ba_l; - __le32 qp1c_bytes_40; -}; - -#define QP1C_BYTES_4_QP_STATE_S 0 -#define QP1C_BYTES_4_QP_STATE_M \ - (((1UL << 3) - 1) << QP1C_BYTES_4_QP_STATE_S) - -#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8 -#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \ - (((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S) - -#define QP1C_BYTES_4_RQ_WQE_SHIFT_S 12 -#define QP1C_BYTES_4_RQ_WQE_SHIFT_M \ - (((1UL << 4) - 1) << QP1C_BYTES_4_RQ_WQE_SHIFT_S) - -#define QP1C_BYTES_4_PD_S 16 -#define QP1C_BYTES_4_PD_M (((1UL << 16) - 1) << QP1C_BYTES_4_PD_S) - -#define QP1C_BYTES_12_SQ_RQ_BT_H_S 0 -#define QP1C_BYTES_12_SQ_RQ_BT_H_M \ - (((1UL << 17) - 1) << QP1C_BYTES_12_SQ_RQ_BT_H_S) - -#define QP1C_BYTES_16_RQ_HEAD_S 0 -#define QP1C_BYTES_16_RQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_16_RQ_HEAD_S) - -#define QP1C_BYTES_16_PORT_NUM_S 16 -#define QP1C_BYTES_16_PORT_NUM_M \ - (((1UL << 3) - 1) << QP1C_BYTES_16_PORT_NUM_S) - -#define QP1C_BYTES_16_SIGNALING_TYPE_S 27 -#define QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S 28 -#define QP1C_BYTES_16_RQ_BA_FLG_S 29 -#define QP1C_BYTES_16_SQ_BA_FLG_S 30 -#define QP1C_BYTES_16_QP1_ERR_S 31 - -#define QP1C_BYTES_20_SQ_HEAD_S 0 -#define QP1C_BYTES_20_SQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_20_SQ_HEAD_S) - -#define QP1C_BYTES_20_PKEY_IDX_S 16 -#define QP1C_BYTES_20_PKEY_IDX_M \ - (((1UL << 16) - 1) << QP1C_BYTES_20_PKEY_IDX_S) - -#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S 0 -#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S) - -#define QP1C_BYTES_28_RQ_CUR_IDX_S 16 -#define QP1C_BYTES_28_RQ_CUR_IDX_M \ - (((1UL << 15) - 1) << QP1C_BYTES_28_RQ_CUR_IDX_S) - -#define QP1C_BYTES_32_TX_CQ_NUM_S 0 -#define QP1C_BYTES_32_TX_CQ_NUM_M \ - (((1UL << 16) - 1) << QP1C_BYTES_32_TX_CQ_NUM_S) - -#define QP1C_BYTES_32_RX_CQ_NUM_S 16 -#define QP1C_BYTES_32_RX_CQ_NUM_M \ - (((1UL << 16) - 1) << QP1C_BYTES_32_RX_CQ_NUM_S) - -#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S 0 -#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S) - -#define QP1C_BYTES_40_SQ_CUR_IDX_S 16 -#define QP1C_BYTES_40_SQ_CUR_IDX_M \ - (((1UL << 15) - 1) << QP1C_BYTES_40_SQ_CUR_IDX_S) - -#define HNS_ROCE_WQE_INLINE (1UL<<31) -#define HNS_ROCE_WQE_SE (1UL<<30) - -#define HNS_ROCE_WQE_SGE_NUM_BIT 24 -#define HNS_ROCE_WQE_IMM (1UL<<23) -#define HNS_ROCE_WQE_FENCE (1UL<<21) -#define HNS_ROCE_WQE_CQ_NOTIFY (1UL<<20) - -#define HNS_ROCE_WQE_OPCODE_SEND (0<<16) -#define HNS_ROCE_WQE_OPCODE_RDMA_READ (1<<16) -#define HNS_ROCE_WQE_OPCODE_RDMA_WRITE (2<<16) -#define HNS_ROCE_WQE_OPCODE_LOCAL_INV (4<<16) -#define HNS_ROCE_WQE_OPCODE_UD_SEND (7<<16) -#define HNS_ROCE_WQE_OPCODE_MASK (15<<16) - -struct hns_roce_qp_context { - __le32 qpc_bytes_4; - __le32 qpc_bytes_8; - __le32 qpc_bytes_12; - __le32 qpc_bytes_16; - __le32 sq_rq_bt_l; - __le32 qpc_bytes_24; - __le32 irrl_ba_l; - __le32 qpc_bytes_32; - __le32 qpc_bytes_36; - __le32 dmac_l; - __le32 qpc_bytes_44; - __le32 qpc_bytes_48; - u8 dgid[16]; - __le32 qpc_bytes_68; - __le32 cur_rq_wqe_ba_l; - __le32 qpc_bytes_76; - __le32 rx_rnr_time; - __le32 qpc_bytes_84; - __le32 qpc_bytes_88; - union { - __le32 rx_sge_len; - __le32 dma_length; - }; - union { - __le32 rx_sge_num; - __le32 rx_send_pktn; - __le32 r_key; - }; - __le32 va_l; - __le32 va_h; - __le32 qpc_bytes_108; - __le32 qpc_bytes_112; - __le32 rx_cur_sq_wqe_ba_l; - __le32 qpc_bytes_120; - __le32 qpc_bytes_124; - __le32 qpc_bytes_128; - __le32 qpc_bytes_132; - __le32 qpc_bytes_136; - __le32 qpc_bytes_140; - __le32 qpc_bytes_144; - __le32 qpc_bytes_148; - union { - __le32 rnr_retry; - __le32 ack_time; - }; - __le32 qpc_bytes_156; - __le32 pkt_use_len; - __le32 qpc_bytes_164; - __le32 qpc_bytes_168; - union { - __le32 sge_use_len; - __le32 pa_use_len; - }; - __le32 qpc_bytes_176; - __le32 qpc_bytes_180; - __le32 tx_cur_sq_wqe_ba_l; - __le32 qpc_bytes_188; - __le32 rvd21; -}; - -#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0 -#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S) - -#define QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S 3 -#define QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S 4 -#define QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S 5 -#define QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S 6 -#define QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S 7 - -#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S 8 -#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M \ - (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S) - -#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S 12 -#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M \ - (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S) - -#define QP_CONTEXT_QPC_BYTES_4_PD_S 16 -#define QP_CONTEXT_QPC_BYTES_4_PD_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_4_PD_S) - -#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S 0 -#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S) - -#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S 16 -#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S) - -#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S 0 -#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S) - -#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_S 0 -#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_16_QP_NUM_S) - -#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S 0 -#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M \ - (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S) - -#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S 18 -#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S) - -#define QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S 23 - -#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S 0 -#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M \ - (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S) - -#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S 18 -#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S) - -#define QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S 20 -#define QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S 21 -#define QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S 22 -#define QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S 23 - -#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S 24 -#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S) - -#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_S 0 -#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_36_DEST_QP_S) - -#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S 24 -#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_S 0 -#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_44_DMAC_H_S) - -#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S 16 -#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S) - -#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_S 24 -#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_HOPLMT_S) - -#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S 0 -#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M \ - (((1UL << 20) - 1) << QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S) - -#define QP_CONTEXT_QPC_BYTES_48_TCLASS_S 20 -#define QP_CONTEXT_QPC_BYTES_48_TCLASS_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_48_TCLASS_S) - -#define QP_CONTEXT_QPC_BYTES_48_MTU_S 28 -#define QP_CONTEXT_QPC_BYTES_48_MTU_M \ - (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_48_MTU_S) - -#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S 0 -#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S) - -#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S 0 -#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S) - -#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S 8 -#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S) - -#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S 24 -#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S) - -#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S 0 -#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S) - -#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S 24 -#define QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S 25 - -#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S 26 -#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M \ - (((1UL << 2) - 1) << \ - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S) - -#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S 29 -#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S) - -#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S 24 -#define QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S 25 - -#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S 24 -#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S) - -#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S 0 -#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S) - -#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S 0 -#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S) - -#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S 16 -#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S) - -#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S 0 -#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S) - -#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S 24 - -#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S 25 -#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S) - -#define QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S 27 - -#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S 24 -#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S) - -#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S 24 -#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S) - -#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S 0 -#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S) - -#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S 16 -#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S) - -#define QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S 31 - -#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_S 0 -#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_144_QP_STATE_S) - -#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S 0 -#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S) - -#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S 2 -#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S) - -#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S 5 -#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S) - -#define QP_CONTEXT_QPC_BYTES_148_LSN_S 8 -#define QP_CONTEXT_QPC_BYTES_148_LSN_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_148_LSN_S) - -#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S 0 -#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S) - -#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S 3 -#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S) - -#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S 8 -#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S) - -#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S 11 -#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S) - -#define QP_CONTEXT_QPC_BYTES_156_SL_S 14 -#define QP_CONTEXT_QPC_BYTES_156_SL_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_SL_S) - -#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S 16 -#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S) - -#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S 24 -#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S) - -#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S 24 -#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S) - -#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S 24 -#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S) - -#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S 26 -#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S) - -#define QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S 28 -#define QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S 29 -#define QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S 30 - -#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S 0 -#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S 0 -#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S) - -#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S 0 -#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S) - -#define QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S 8 - -#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S) - -#define STATUS_MASK 0xff -#define GO_BIT_TIMEOUT_MSECS 10000 -#define HCR_STATUS_OFFSET 0x18 -#define HCR_GO_BIT 15 - -struct hns_roce_rq_db { - __le32 u32_4; - __le32 u32_8; -}; - -#define RQ_DOORBELL_U32_4_RQ_HEAD_S 0 -#define RQ_DOORBELL_U32_4_RQ_HEAD_M \ - (((1UL << 15) - 1) << RQ_DOORBELL_U32_4_RQ_HEAD_S) - -#define RQ_DOORBELL_U32_8_QPN_S 0 -#define RQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << RQ_DOORBELL_U32_8_QPN_S) - -#define RQ_DOORBELL_U32_8_CMD_S 28 -#define RQ_DOORBELL_U32_8_CMD_M (((1UL << 3) - 1) << RQ_DOORBELL_U32_8_CMD_S) - -#define RQ_DOORBELL_U32_8_HW_SYNC_S 31 - -struct hns_roce_sq_db { - __le32 u32_4; - __le32 u32_8; -}; - -#define SQ_DOORBELL_U32_4_SQ_HEAD_S 0 -#define SQ_DOORBELL_U32_4_SQ_HEAD_M \ - (((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S) - -#define SQ_DOORBELL_U32_4_SL_S 16 -#define SQ_DOORBELL_U32_4_SL_M \ - (((1UL << 2) - 1) << SQ_DOORBELL_U32_4_SL_S) - -#define SQ_DOORBELL_U32_4_PORT_S 18 -#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S) - -#define SQ_DOORBELL_U32_8_QPN_S 0 -#define SQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << SQ_DOORBELL_U32_8_QPN_S) - -#define SQ_DOORBELL_HW_SYNC_S 31 - -struct hns_roce_ext_db { - int esdb_dep; - int eodb_dep; - struct hns_roce_buf_list *sdb_buf_list; - struct hns_roce_buf_list *odb_buf_list; -}; - -struct hns_roce_db_table { - int sdb_ext_mod; - int odb_ext_mod; - struct hns_roce_ext_db *ext_db; -}; - -#define HW_SYNC_SLEEP_TIME_INTERVAL 20 -#define HW_SYNC_TIMEOUT_MSECS (25 * HW_SYNC_SLEEP_TIME_INTERVAL) -#define BT_CMD_SYNC_SHIFT 31 -#define HNS_ROCE_BA_SIZE (32 * 4096) - -struct hns_roce_bt_table { - struct hns_roce_buf_list qpc_buf; - struct hns_roce_buf_list mtpt_buf; - struct hns_roce_buf_list cqc_buf; -}; - -struct hns_roce_tptr_table { - struct hns_roce_buf_list tptr_buf; -}; - -struct hns_roce_qp_work { - struct work_struct work; - struct ib_device *ib_dev; - struct hns_roce_qp *qp; - u32 db_wait_stage; - u32 sdb_issue_ptr; - u32 sdb_inv_cnt; - u32 sche_cnt; -}; - -struct hns_roce_mr_free_work { - struct work_struct work; - struct ib_device *ib_dev; - struct completion *comp; - int comp_flag; - void *mr; -}; - -struct hns_roce_recreate_lp_qp_work { - struct work_struct work; - struct ib_device *ib_dev; - struct completion *comp; - int comp_flag; -}; - -struct hns_roce_free_mr { - struct workqueue_struct *free_mr_wq; - struct hns_roce_qp *mr_free_qp[HNS_ROCE_V1_RESV_QP]; - struct hns_roce_cq *mr_free_cq; - struct hns_roce_pd *mr_free_pd; -}; - -struct hns_roce_v1_priv { - struct hns_roce_db_table db_table; - struct hns_roce_raq_table raq_table; - struct hns_roce_bt_table bt_table; - struct hns_roce_tptr_table tptr_table; - struct hns_roce_free_mr free_mr; -}; - -int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); -int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); -int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); - -#endif diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index eb0defa80d0d..b33e948fd060 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -678,6 +678,7 @@ static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val, static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, void *wqe) { +#define HNS_ROCE_SL_SHIFT 2 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; /* All kinds of DirectWQE have the same header field layout */ @@ -685,7 +686,8 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl); roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M, - V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2); + V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, + qp->sl >> HNS_ROCE_SL_SHIFT); roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head); @@ -1305,14 +1307,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, continue; dev_err_ratelimited(hr_dev->dev, - "Cmdq IO error, opcode = %x, return = %x\n", + "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n", desc->opcode, desc_ret); ret = -EIO; } } else { /* FW/HW reset or incorrect number of desc */ tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG); - dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n", + dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n", csq->head, tail); csq->head = tail; @@ -1571,7 +1573,7 @@ static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev) struct hns_roce_cmq_desc desc; int ret; - if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) { + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { hr_dev->func_num = 1; return 0; } @@ -2003,7 +2005,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { - caps->flags |= HNS_ROCE_CAP_FLAG_STASH; + caps->flags |= HNS_ROCE_CAP_FLAG_STASH | + HNS_ROCE_CAP_FLAG_DIRECT_WQE; caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE; } else { caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; @@ -2144,7 +2147,6 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ; caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; - caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM; caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; @@ -2161,6 +2163,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) (u32)priv->handle->rinfo.num_vectors - 2); if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { + caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM; caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE; caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE; @@ -2181,6 +2184,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) } else { u32 func_num = max_t(u32, 1, hr_dev->func_num); + caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM; caps->ceqe_size = HNS_ROCE_CEQE_SIZE; caps->aeqe_size = HNS_ROCE_AEQE_SIZE; caps->gid_table_len[0] /= func_num; @@ -2393,7 +2397,7 @@ static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev) struct hns_roce_caps *caps = &hr_dev->caps; int ret; - if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) return 0; ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE, @@ -2967,8 +2971,8 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev, return hns_roce_cmq_send(hr_dev, desc, 2); } -static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port, - int gid_index, const union ib_gid *gid, +static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index, + const union ib_gid *gid, const struct ib_gid_attr *attr) { enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1; @@ -3063,8 +3067,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, } static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, - void *mb_buf, struct hns_roce_mr *mr, - unsigned long mtpt_idx) + void *mb_buf, struct hns_roce_mr *mr) { struct hns_roce_v2_mpt_entry *mpt_entry; int ret; @@ -4488,14 +4491,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, return 0; } -static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn) -{ - if (!fl) - fl = rdma_calc_flow_label(lqpn, rqpn); - - return rdma_flow_label_to_udp_sport(fl); -} - static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr, u32 *dip_idx) { @@ -4712,8 +4707,9 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, } hr_reg_write(context, QPC_UDPSPN, - is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num, - attr->dest_qp_num) : 0); + is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num, + attr->dest_qp_num) : + 0); hr_reg_clear(qpc_mask, QPC_UDPSPN); @@ -4739,7 +4735,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) { ibdev_err(ibdev, - "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n", + "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n", hr_qp->sl, MAX_SERVICE_LEVEL); return -EINVAL; } @@ -4768,7 +4764,8 @@ static bool check_qp_state(enum ib_qp_state cur_state, [IB_QPS_ERR] = true }, [IB_QPS_SQD] = {}, [IB_QPS_SQE] = {}, - [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true } + [IB_QPS_ERR] = { [IB_QPS_RESET] = true, + [IB_QPS_ERR] = true } }; return sm[cur_state][new_state]; @@ -5868,7 +5865,7 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev, roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag); } -static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) +static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn) { struct device *dev = hr_dev->dev; int ret; @@ -5882,7 +5879,7 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) 0, HNS_ROCE_CMD_DESTROY_AEQC, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) - dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); + dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn); } static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) @@ -6394,7 +6391,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) if (!id) return 0; - if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09) + if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08) return 0; ret = __hns_roce_hw_v2_init_instance(handle); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 35c61da7ba15..12be85f0986e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -35,26 +35,15 @@ #include <linux/bitops.h> -#define HNS_ROCE_VF_QPC_BT_NUM 256 -#define HNS_ROCE_VF_SCCC_BT_NUM 64 -#define HNS_ROCE_VF_SRQC_BT_NUM 64 -#define HNS_ROCE_VF_CQC_BT_NUM 64 -#define HNS_ROCE_VF_MPT_BT_NUM 64 -#define HNS_ROCE_VF_SMAC_NUM 32 -#define HNS_ROCE_VF_SL_NUM 8 -#define HNS_ROCE_VF_GMV_BT_NUM 256 - #define HNS_ROCE_V2_MAX_QP_NUM 0x1000 #define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200 #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000 -#define HNS_ROCE_V2_MAX_SRQ 0x100000 #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 #define HNS_ROCE_V2_MAX_SRQ_SGE 64 #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100 #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 -#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000 #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000 @@ -63,13 +52,10 @@ #define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32 #define HNS_ROCE_V2_UAR_NUM 256 #define HNS_ROCE_V2_PHY_UAR_NUM 1 -#define HNS_ROCE_V2_MAX_IRQ_NUM 65 -#define HNS_ROCE_V2_COMP_VEC_NUM 63 #define HNS_ROCE_V2_AEQE_VEC_NUM 1 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 #define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000 #define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000 -#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_PD_NUM 0x1000000 @@ -81,7 +67,6 @@ #define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16 #define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64 #define HNS_ROCE_V2_IRRL_ENTRY_SZ 64 -#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48 #define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100 #define HNS_ROCE_V2_CQC_ENTRY_SZ 64 #define HNS_ROCE_V2_SRQC_ENTRY_SZ 64 @@ -103,7 +88,6 @@ #define HNS_ROCE_INVALID_LKEY 0x0 #define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 -#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_RSV_QPS 8 #define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 @@ -117,12 +101,14 @@ #define HNS_ROCE_CQE_HOP_NUM 1 #define HNS_ROCE_SRQWQE_HOP_NUM 1 #define HNS_ROCE_PBL_HOP_NUM 2 -#define HNS_ROCE_EQE_HOP_NUM 2 #define HNS_ROCE_IDX_HOP_NUM 1 #define HNS_ROCE_SQWQE_HOP_NUM 2 #define HNS_ROCE_EXT_SGE_HOP_NUM 1 #define HNS_ROCE_RQWQE_HOP_NUM 2 +#define HNS_ROCE_V2_EQE_HOP_NUM 2 +#define HNS_ROCE_V3_EQE_HOP_NUM 1 + #define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6 #define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2 #define HNS_ROCE_V2_GID_INDEX_NUM 16 @@ -1441,7 +1427,7 @@ struct hns_roce_v2_priv { struct hns_roce_dip { u8 dgid[GID_LEN_V2]; u32 dip_idx; - struct list_head node; /* all dips are on a list */ + struct list_head node; /* all dips are on a list */ }; /* only for RNR timeout issue of HIP08 */ diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 4194b626f3c6..f73ba619f375 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -31,7 +31,6 @@ * SOFTWARE. */ #include <linux/acpi.h> -#include <linux/of_platform.h> #include <linux/module.h> #include <linux/pci.h> #include <rdma/ib_addr.h> @@ -70,7 +69,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context) if (port >= hr_dev->caps.num_ports) return -EINVAL; - ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr); + ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr); return ret; } @@ -84,7 +83,7 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context) if (port >= hr_dev->caps.num_ports) return -EINVAL; - ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, NULL, NULL); + ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL); return ret; } @@ -152,9 +151,6 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev) u8 i; for (i = 0; i < hr_dev->caps.num_ports; i++) { - if (hr_dev->hw->set_mtu) - hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i], - hr_dev->caps.max_mtu); ret = hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr); if (ret) @@ -270,6 +266,9 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device, static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index, u16 *pkey) { + if (index > 0) + return -EINVAL; + *pkey = PKEY_ID; return 0; @@ -307,9 +306,22 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, entry->address = address; entry->mmap_type = mmap_type; - ret = rdma_user_mmap_entry_insert_exact( - ucontext, &entry->rdma_entry, length, - mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1); + switch (mmap_type) { + /* pgoff 0 must be used by DB for compatibility */ + case HNS_ROCE_MMAP_TYPE_DB: + ret = rdma_user_mmap_entry_insert_exact( + ucontext, &entry->rdma_entry, length, 0); + break; + case HNS_ROCE_MMAP_TYPE_DWQE: + ret = rdma_user_mmap_entry_insert_range( + ucontext, &entry->rdma_entry, length, 1, + U32_MAX); + break; + default: + ret = -EINVAL; + break; + } + if (ret) { kfree(entry); return NULL; @@ -323,18 +335,12 @@ static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context) if (context->db_mmap_entry) rdma_user_mmap_entry_remove( &context->db_mmap_entry->rdma_entry); - - if (context->tptr_mmap_entry) - rdma_user_mmap_entry_remove( - &context->tptr_mmap_entry->rdma_entry); } static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx) { struct hns_roce_ucontext *context = to_hr_ucontext(uctx); - struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); u64 address; - int ret; address = context->uar.pfn << PAGE_SHIFT; context->db_mmap_entry = hns_roce_user_mmap_entry_insert( @@ -342,27 +348,7 @@ static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx) if (!context->db_mmap_entry) return -ENOMEM; - if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size) - return 0; - - /* - * FIXME: using io_remap_pfn_range on the dma address returned - * by dma_alloc_coherent is totally wrong. - */ - context->tptr_mmap_entry = - hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr, - hr_dev->tptr_size, - HNS_ROCE_MMAP_TYPE_TPTR); - if (!context->tptr_mmap_entry) { - ret = -ENOMEM; - goto err; - } - return 0; - -err: - hns_roce_dealloc_uar_entry(context); - return ret; } static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, @@ -436,10 +422,15 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) entry = to_hns_mmap(rdma_entry); pfn = entry->address >> PAGE_SHIFT; - prot = vma->vm_page_prot; - if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR) - prot = pgprot_noncached(prot); + switch (entry->mmap_type) { + case HNS_ROCE_MMAP_TYPE_DB: + case HNS_ROCE_MMAP_TYPE_DWQE: + prot = pgprot_device(vma->vm_page_prot); + break; + default: + return -EINVAL; + } ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE, prot, rdma_entry); @@ -816,7 +807,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) int ret; spin_lock_init(&hr_dev->sm_lock); - spin_lock_init(&hr_dev->bt_cmd_lock); if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) { @@ -907,20 +897,13 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) struct device *dev = hr_dev->dev; int ret; - if (hr_dev->hw->reset) { - ret = hr_dev->hw->reset(hr_dev, true); - if (ret) { - dev_err(dev, "Reset RoCE engine failed!\n"); - return ret; - } - } hr_dev->is_reset = false; if (hr_dev->hw->cmq_init) { ret = hr_dev->hw->cmq_init(hr_dev); if (ret) { dev_err(dev, "Init RoCE Command Queue failed!\n"); - goto error_failed_cmq_init; + return ret; } } @@ -1003,12 +986,6 @@ error_failed_cmd_init: if (hr_dev->hw->cmq_exit) hr_dev->hw->cmq_exit(hr_dev); -error_failed_cmq_init: - if (hr_dev->hw->reset) { - if (hr_dev->hw->reset(hr_dev, false)) - dev_err(dev, "Dereset RoCE engine failed!\n"); - } - return ret; } @@ -1028,8 +1005,6 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev) hns_roce_cmd_cleanup(hr_dev); if (hr_dev->hw->cmq_exit) hr_dev->hw->cmq_exit(hr_dev); - if (hr_dev->hw->reset) - hr_dev->hw->reset(hr_dev, false); } MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 7089ac780291..2ee06b906b60 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -31,7 +31,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <linux/vmalloc.h> #include <rdma/ib_umem.h> #include "hns_roce_device.h" @@ -81,7 +80,7 @@ static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) return -ENOMEM; } - mr->key = hw_index_to_key(id); /* MR key */ + mr->key = hw_index_to_key(id); /* MR key */ err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, (unsigned long)id); @@ -173,8 +172,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, } if (mr->type != MR_TYPE_FRMR) - ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr, - mtpt_idx); + ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr); else ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr); if (ret) { @@ -363,12 +361,8 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) struct hns_roce_mr *mr = to_hr_mr(ibmr); int ret = 0; - if (hr_dev->hw->dereg_mr) { - ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata); - } else { - hns_roce_mr_free(hr_dev, mr); - kfree(mr); - } + hns_roce_mr_free(hr_dev, mr); + kfree(mr); return ret; } @@ -614,10 +608,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, return -ENOBUFS; for (i = 0; i < count && npage < max_count; i++) { - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) - addr = to_hr_hw_page_addr(pages[npage]); - else - addr = pages[npage]; + addr = pages[npage]; mtts[i] = cpu_to_le64(addr); npage++; @@ -824,11 +815,11 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, } int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) + u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) { struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; int mtt_count, left; - int start_index; + u32 start_index; int total = 0; __le64 *mtts; u32 npage; @@ -847,10 +838,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, continue; addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT); - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) - mtt_buf[total] = to_hr_hw_page_addr(addr); - else - mtt_buf[total] = addr; + mtt_buf[total] = addr; total++; } @@ -884,10 +872,10 @@ done: static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, struct hns_roce_buf_attr *attr, struct hns_roce_hem_cfg *cfg, - unsigned int *buf_page_shift, int unalinged_size) + unsigned int *buf_page_shift, u64 unalinged_size) { struct hns_roce_buf_region *r; - int first_region_padding; + u64 first_region_padding; int page_cnt, region_cnt; unsigned int page_shift; size_t buf_size; diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 81ffad77ae42..783e71852c50 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <linux/pci.h> #include "hns_roce_device.h" @@ -86,7 +85,6 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) { struct hns_roce_ida *uar_ida = &hr_dev->uar_ida; - struct resource *res; int id; /* Using bitmap to manager UAR index */ @@ -104,18 +102,9 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) else uar->index = 0; - if (!dev_is_pci(hr_dev->dev)) { - res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); - if (!res) { - ida_free(&uar_ida->ida, id); - dev_err(&hr_dev->pdev->dev, "memory resource not found!\n"); - return -EINVAL; - } - uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; - } else { - uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) - >> PAGE_SHIFT); - } + uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT); + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) + hr_dev->dwqe_page = pci_resource_start(hr_dev->pci_dev, 4); return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 9af4509894e6..d78373e10aab 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -32,7 +32,6 @@ */ #include <linux/pci.h> -#include <linux/platform_device.h> #include <rdma/ib_addr.h> #include <rdma/ib_umem.h> #include <rdma/uverbs_ioctl.h> @@ -110,12 +109,11 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) return; } - if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && - (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || - event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) { + if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || + event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) { qp->state = IB_QPS_ERR; flush_cqe(hr_dev, qp); @@ -219,13 +217,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) int ret; if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { - /* when hw version is v1, the sqpn is allocated */ - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) - num = HNS_ROCE_MAX_PORTS + - hr_dev->iboe.phy_port[hr_qp->port]; - else - num = 1; - + num = 1; hr_qp->doorbell_qpn = 1; } else { mutex_lock(&qp_table->bank_mutex); @@ -324,11 +316,6 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) if (!hr_qp->qpn) return -EINVAL; - /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ - if (hr_qp->ibqp.qp_type == IB_QPT_GSI && - hr_dev->hw_rev == HNS_ROCE_HW_VER1) - return 0; - /* Alloc memory for QPC */ ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); if (ret) { @@ -379,6 +366,11 @@ err_out: return ret; } +static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp) +{ + rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry); +} + void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct xarray *xa = &hr_dev->qp_table_xa; @@ -402,11 +394,6 @@ static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ - if (hr_qp->ibqp.qp_type == IB_QPT_GSI && - hr_dev->hw_rev == HNS_ROCE_HW_VER1) - return; - if (hr_dev->caps.trrl_entry_sz) hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); @@ -535,11 +522,6 @@ static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { - hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE; - return; - } - hr_qp->sq.max_gs = max(1U, cap->max_send_sge); wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp); @@ -780,7 +762,11 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, goto err_inline; } + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) + hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; + return 0; + err_inline: free_rq_inline_buf(hr_qp); @@ -822,6 +808,35 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, hns_roce_qp_has_rq(init_attr)); } +static int qp_mmap_entry(struct hns_roce_qp *hr_qp, + struct hns_roce_dev *hr_dev, + struct ib_udata *udata, + struct hns_roce_ib_create_qp_resp *resp) +{ + struct hns_roce_ucontext *uctx = + rdma_udata_to_drv_context(udata, + struct hns_roce_ucontext, ibucontext); + struct rdma_user_mmap_entry *rdma_entry; + u64 address; + + address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; + + hr_qp->dwqe_mmap_entry = + hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, + HNS_ROCE_DWQE_SIZE, + HNS_ROCE_MMAP_TYPE_DWQE); + + if (!hr_qp->dwqe_mmap_entry) { + ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); + return -ENOMEM; + } + + rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry; + resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry); + + return 0; +} + static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_qp_init_attr *init_attr, @@ -909,10 +924,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; if (udata) { + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) { + ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp); + if (ret) + return ret; + } + ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd, resp); if (ret) - return ret; + goto err_remove_qp; } else { ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr); if (ret) @@ -920,6 +941,12 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, } return 0; + +err_remove_qp: + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) + qp_user_mmap_entry_remove(hr_qp); + + return ret; } static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, @@ -933,6 +960,8 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, hns_roce_db_unmap_user(uctx, &hr_qp->rdb); if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) hns_roce_db_unmap_user(uctx, &hr_qp->sdb); + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) + qp_user_mmap_entry_remove(hr_qp); } else { if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) hns_roce_free_db(hr_dev, &hr_qp->rdb); @@ -1158,7 +1187,7 @@ static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type, goto out; break; case IB_QPT_UD: - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && is_user) goto out; break; @@ -1391,7 +1420,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, } } -static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset) +static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset) { return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); } diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index b4c657f5f2f9..4b1b16e7a75b 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -550,7 +550,7 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf, struct irdma_sc_dev *dev = &rf->sc_dev; dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx); - irq_set_affinity_hint(msix_vec->irq, NULL); + irq_update_affinity_hint(msix_vec->irq, NULL); free_irq(msix_vec->irq, dev_id); } @@ -1100,7 +1100,7 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, } cpumask_clear(&msix_vec->mask); cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); - irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask); + irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask); if (status) { ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n"); return IRDMA_ERR_CFG; diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c index d219f64b2c3d..43e962b97d6a 100644 --- a/drivers/infiniband/hw/irdma/i40iw_if.c +++ b/drivers/infiniband/hw/irdma/i40iw_if.c @@ -198,7 +198,7 @@ static void i40iw_remove(struct auxiliary_device *aux_dev) aux_dev); struct i40e_info *cdev_info = i40e_adev->ldev; - return i40e_client_device_unregister(cdev_info); + i40e_client_device_unregister(cdev_info); } static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = { diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h index aa20827dcc9d..d0d4f2b77d34 100644 --- a/drivers/infiniband/hw/irdma/pble.h +++ b/drivers/infiniband/hw/irdma/pble.h @@ -69,7 +69,7 @@ struct irdma_add_page_info { struct irdma_chunk { struct list_head list; struct irdma_dma_info dmainfo; - void *bitmapbuf; + unsigned long *bitmapbuf; u32 sizeofbitmap; u64 size; diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 8cd5f9261692..460e757d3fe6 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -21,7 +21,8 @@ static int irdma_query_device(struct ib_device *ibdev, return -EINVAL; memset(props, 0, sizeof(*props)); - ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); + addrconf_addr_eui48((u8 *)&props->sys_image_guid, + iwdev->netdev->dev_addr); props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | irdma_fw_minor_ver(&rf->sc_dev); props->device_cap_flags = iwdev->device_cap_flags; @@ -1170,6 +1171,10 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, udp_info->ttl = attr->ah_attr.grh.hop_limit; udp_info->flow_label = attr->ah_attr.grh.flow_label; udp_info->tos = attr->ah_attr.grh.traffic_class; + udp_info->src_port = + rdma_get_udp_sport(udp_info->flow_label, + ibqp->qp_num, + roce_info->dest_qp); irdma_qp_rem_qos(&iwqp->sc_qp); dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri); ctx_info->user_pri = rt_tos2priority(udp_info->tos); @@ -4321,24 +4326,6 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev, return IB_LINK_LAYER_ETHERNET; } -static __be64 irdma_mac_to_guid(struct net_device *ndev) -{ - const unsigned char *mac = ndev->dev_addr; - __be64 guid; - unsigned char *dst = (unsigned char *)&guid; - - dst[0] = mac[0] ^ 2; - dst[1] = mac[1]; - dst[2] = mac[2]; - dst[3] = 0xff; - dst[4] = 0xfe; - dst[5] = mac[3]; - dst[6] = mac[4]; - dst[7] = mac[5]; - - return guid; -} - static const struct ib_device_ops irdma_roce_dev_ops = { .attach_mcast = irdma_attach_mcast, .create_ah = irdma_create_ah, @@ -4408,7 +4395,8 @@ static const struct ib_device_ops irdma_dev_ops = { static void irdma_init_roce_device(struct irdma_device *iwdev) { iwdev->ibdev.node_type = RDMA_NODE_IB_CA; - iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev); + addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, + iwdev->netdev->dev_addr); ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops); } @@ -4421,7 +4409,8 @@ static int irdma_init_iw_device(struct irdma_device *iwdev) struct net_device *netdev = iwdev->netdev; iwdev->ibdev.node_type = RDMA_NODE_RNIC; - ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr); + addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, + netdev->dev_addr); iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref; iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref; iwdev->ibdev.ops.iw_get_qp = irdma_get_qp; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 0d2fa3338784..1c3d97229988 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -85,14 +85,6 @@ static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device, static struct workqueue_struct *wq; -static void init_query_mad(struct ib_smp *mad) -{ - mad->base_version = 1; - mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; - mad->class_version = 1; - mad->method = IB_MGMT_METHOD_GET; -} - static int check_flow_steering_support(struct mlx4_dev *dev) { int eth_num_ports = 0; @@ -471,7 +463,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS, @@ -669,7 +661,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u32 port, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -721,7 +713,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u32 port, /* If reported active speed is QDR, check if is FDR-10 */ if (props->active_speed == IB_SPEED_QDR) { - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -848,7 +840,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -870,7 +862,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, } } - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(index / 8); @@ -917,7 +909,7 @@ static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE; in_mad->attr_mod = 0; @@ -971,7 +963,7 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; in_mad->attr_mod = cpu_to_be32(index / 32); @@ -1990,7 +1982,7 @@ static int init_node_data(struct mlx4_ib_dev *dev) if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; if (mlx4_is_master(dev->dev)) mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; @@ -2784,10 +2776,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) if (err) goto err_counter; - ibdev->ib_uc_qpns_bitmap = - kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count), - sizeof(long), - GFP_KERNEL); + ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count, + GFP_KERNEL); if (!ibdev->ib_uc_qpns_bitmap) goto err_steer_qp_release; @@ -2875,7 +2865,7 @@ err_diag_counters: mlx4_ib_diag_cleanup(ibdev); err_steer_free_bitmap: - kfree(ibdev->ib_uc_qpns_bitmap); + bitmap_free(ibdev->ib_uc_qpns_bitmap); err_steer_qp_release: mlx4_qp_release_range(dev, ibdev->steer_qpn_base, @@ -2988,7 +2978,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) mlx4_qp_release_range(dev, ibdev->steer_qpn_base, ibdev->steer_qpn_count); - kfree(ibdev->ib_uc_qpns_bitmap); + bitmap_free(ibdev->ib_uc_qpns_bitmap); iounmap(ibdev->uar_map); for (p = 0; p < ibdev->num_ports; ++p) diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index a190fb581591..08371a80fdc2 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -328,8 +328,11 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, } wc->vendor_err = cqe->vendor_err_synd; - if (dump) + if (dump) { + mlx5_ib_warn(dev, "WC error: %d, Message: %s\n", wc->status, + ib_wc_status_msg(wc->status)); dump_cqe(dev, cqe); + } } static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index ec242a5a17a3..293ed709e5ed 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -291,7 +291,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port) if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -318,7 +318,7 @@ static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, if (!in_mad) return -ENOMEM; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, @@ -405,7 +405,7 @@ int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc) if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); @@ -430,7 +430,7 @@ int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid) if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); @@ -456,7 +456,7 @@ int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; in_mad->attr_mod = cpu_to_be32(index / 32); @@ -485,7 +485,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -496,7 +496,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index, memcpy(gid->raw, out_mad->data + 8, 8); - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(index / 8); @@ -530,7 +530,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port, /* props being zeroed by the caller, avoid zeroing it here */ - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -584,6 +584,11 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port, props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP) props->active_speed = IB_SPEED_HDR; break; + case 8: + if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP && + props->port_cap_flags2 & IB_PORT_LINK_SPEED_NDR_SUP) + props->active_speed = IB_SPEED_NDR; + break; } } @@ -591,7 +596,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port, if (props->active_speed == 4) { if (dev->port_caps[port - 1].ext_port_cap & MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index e3c33be9c5a0..cbc20e400be0 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -665,9 +665,9 @@ struct mlx5_ib_mr { /* User MR data */ struct mlx5_cache_ent *cache_ent; + /* Everything after cache_ent is zero'd when MR allocated */ struct ib_umem *umem; - /* This is zero'd when the MR is allocated */ union { /* Used only while the MR is in the cache */ struct { @@ -719,7 +719,7 @@ struct mlx5_ib_mr { /* Zero the fields in the mr that are variant depending on usage */ static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr) { - memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out)); + memset_after(mr, 0, cache_ent); } static inline bool is_odp_mr(struct mlx5_ib_mr *mr) @@ -1466,14 +1466,6 @@ extern const struct uapi_definition mlx5_ib_flow_defs[]; extern const struct uapi_definition mlx5_ib_qos_defs[]; extern const struct uapi_definition mlx5_ib_std_types_defs[]; -static inline void init_query_mad(struct ib_smp *mad) -{ - mad->base_version = 1; - mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; - mad->class_version = 1; - mad->method = IB_MGMT_METHOD_GET; -} - static inline int is_qp1(enum ib_qp_type qp_type) { return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI; diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c index aef1d274a14e..9f0f79d02d3c 100644 --- a/drivers/infiniband/hw/mthca/mthca_allocator.c +++ b/drivers/infiniband/hw/mthca/mthca_allocator.c @@ -51,7 +51,7 @@ u32 mthca_alloc(struct mthca_alloc *alloc) } if (obj < alloc->max) { - set_bit(obj, alloc->table); + __set_bit(obj, alloc->table); obj |= alloc->top; } else obj = -1; @@ -69,7 +69,7 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj) spin_lock_irqsave(&alloc->lock, flags); - clear_bit(obj, alloc->table); + __clear_bit(obj, alloc->table); alloc->last = min(alloc->last, obj); alloc->top = (alloc->top + alloc->max) & alloc->mask; @@ -79,8 +79,6 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj) int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, u32 reserved) { - int i; - /* num must be a power of 2 */ if (num != 1 << (ffs(num) - 1)) return -EINVAL; @@ -90,21 +88,18 @@ int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, alloc->max = num; alloc->mask = mask; spin_lock_init(&alloc->lock); - alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long), - GFP_KERNEL); + alloc->table = bitmap_zalloc(num, GFP_KERNEL); if (!alloc->table) return -ENOMEM; - bitmap_zero(alloc->table, num); - for (i = 0; i < reserved; ++i) - set_bit(i, alloc->table); + bitmap_set(alloc->table, 0, reserved); return 0; } void mthca_alloc_cleanup(struct mthca_alloc *alloc) { - kfree(alloc->table); + bitmap_free(alloc->table); } /* diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index ce0e0867e488..192f83fd7c8a 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -101,13 +101,13 @@ static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order) return -1; found: - clear_bit(seg, buddy->bits[o]); + __clear_bit(seg, buddy->bits[o]); --buddy->num_free[o]; while (o > order) { --o; seg <<= 1; - set_bit(seg ^ 1, buddy->bits[o]); + __set_bit(seg ^ 1, buddy->bits[o]); ++buddy->num_free[o]; } @@ -125,13 +125,13 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) spin_lock(&buddy->lock); while (test_bit(seg ^ 1, buddy->bits[order])) { - clear_bit(seg ^ 1, buddy->bits[order]); + __clear_bit(seg ^ 1, buddy->bits[order]); --buddy->num_free[order]; seg >>= 1; ++order; } - set_bit(seg, buddy->bits[order]); + __set_bit(seg, buddy->bits[order]); ++buddy->num_free[order]; spin_unlock(&buddy->lock); @@ -139,7 +139,7 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) { - int i, s; + int i; buddy->max_order = max_order; spin_lock_init(&buddy->lock); @@ -152,22 +152,20 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { - s = BITS_TO_LONGS(1 << (buddy->max_order - i)); - buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL); + buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i), + GFP_KERNEL); if (!buddy->bits[i]) goto err_out_free; - bitmap_zero(buddy->bits[i], - 1 << (buddy->max_order - i)); } - set_bit(0, buddy->bits[buddy->max_order]); + __set_bit(0, buddy->bits[buddy->max_order]); buddy->num_free[buddy->max_order] = 1; return 0; err_out_free: for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + bitmap_free(buddy->bits[i]); err_out: kfree(buddy->bits); @@ -181,7 +179,7 @@ static void mthca_buddy_cleanup(struct mthca_buddy *buddy) int i; for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + bitmap_free(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); @@ -469,8 +467,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, mpt_entry->start = cpu_to_be64(iova); mpt_entry->length = cpu_to_be64(total_size); - memset(&mpt_entry->lkey, 0, - sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey)); + memset_startat(mpt_entry, 0, lkey); if (mr->mtt) mpt_entry->mtt_seg = diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index ceee23ebc0f2..c46df53f26cf 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -50,14 +50,6 @@ #include <rdma/mthca-abi.h> #include "mthca_memfree.h" -static void init_query_mad(struct ib_smp *mad) -{ - mad->base_version = 1; - mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; - mad->class_version = 1; - mad->method = IB_MGMT_METHOD_GET; -} - static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) { @@ -78,7 +70,7 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr props->fw_ver = mdev->fw_ver; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mthca_MAD_IFC(mdev, 1, 1, @@ -140,7 +132,7 @@ static int mthca_query_port(struct ib_device *ibdev, /* props being zeroed by the caller, avoid zeroing it here */ - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -234,7 +226,7 @@ static int mthca_query_pkey(struct ib_device *ibdev, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; in_mad->attr_mod = cpu_to_be32(index / 32); @@ -263,7 +255,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u32 port, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -274,7 +266,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u32 port, memcpy(gid->raw, out_mad->data + 8, 8); - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(index / 8); @@ -1006,7 +998,7 @@ static int mthca_init_node_data(struct mthca_dev *dev) if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; err = mthca_MAD_IFC(dev, 1, 1, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index c51c3f40700e..265a581133dc 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -1506,7 +1506,6 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) { int status = -ENOMEM; - size_t pd_bitmap_size; struct ocrdma_alloc_pd_range *cmd; struct ocrdma_alloc_pd_range_rsp *rsp; @@ -1528,10 +1527,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; dev->pd_mgr->max_dpp_pd = rsp->pd_count; - pd_bitmap_size = - BITS_TO_LONGS(rsp->pd_count) * sizeof(long); - dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, - GFP_KERNEL); + dev->pd_mgr->pd_dpp_bitmap = bitmap_zalloc(rsp->pd_count, + GFP_KERNEL); } kfree(cmd); } @@ -1547,9 +1544,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; dev->pd_mgr->max_normal_pd = rsp->pd_count; - pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); - dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, - GFP_KERNEL); + dev->pd_mgr->pd_norm_bitmap = bitmap_zalloc(rsp->pd_count, + GFP_KERNEL); } kfree(cmd); @@ -1611,8 +1607,8 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev) static void ocrdma_free_pd_pool(struct ocrdma_dev *dev) { ocrdma_mbx_dealloc_pd_range(dev); - kfree(dev->pd_mgr->pd_norm_bitmap); - kfree(dev->pd_mgr->pd_dpp_bitmap); + bitmap_free(dev->pd_mgr->pd_norm_bitmap); + bitmap_free(dev->pd_mgr->pd_dpp_bitmap); kfree(dev->pd_mgr); } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 7abf6cf1e937..5d4b3bc16493 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -62,20 +62,6 @@ MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); MODULE_AUTHOR("Emulex Corporation"); MODULE_LICENSE("Dual BSD/GPL"); -void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid) -{ - u8 mac_addr[6]; - - memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN); - guid[0] = mac_addr[0] ^ 2; - guid[1] = mac_addr[1]; - guid[2] = mac_addr[2]; - guid[3] = 0xff; - guid[4] = 0xfe; - guid[5] = mac_addr[3]; - guid[6] = mac_addr[4]; - guid[7] = mac_addr[5]; -} static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, u32 port_num) { @@ -203,7 +189,8 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) { int ret; - ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid); + addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, + dev->nic_info.mac_addr); BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX); memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC, sizeof(OCRDMA_NODE_DESC)); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 735123d0e9ec..acf9970ec245 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -41,6 +41,7 @@ */ #include <linux/dma-mapping.h> +#include <net/addrconf.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include <rdma/iw_cm.h> @@ -74,7 +75,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, memset(attr, 0, sizeof *attr); memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); - ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); + addrconf_addr_eui48((u8 *)&attr->sys_image_guid, + dev->nic_info.mac_addr); attr->max_mr_size = dev->attr.max_mr_size; attr->page_size_cap = 0xffff000; attr->vendor_id = dev->nic_info.pdev->vendor; @@ -245,13 +247,13 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) { u16 pd_bitmap_idx = 0; - const unsigned long *pd_bitmap; + unsigned long *pd_bitmap; if (dpp_pool) { pd_bitmap = dev->pd_mgr->pd_dpp_bitmap; pd_bitmap_idx = find_first_zero_bit(pd_bitmap, dev->pd_mgr->max_dpp_pd); - __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap); + __set_bit(pd_bitmap_idx, pd_bitmap); dev->pd_mgr->pd_dpp_count++; if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh) dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count; @@ -259,7 +261,7 @@ static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) pd_bitmap = dev->pd_mgr->pd_norm_bitmap; pd_bitmap_idx = find_first_zero_bit(pd_bitmap, dev->pd_mgr->max_normal_pd); - __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap); + __set_bit(pd_bitmap_idx, pd_bitmap); dev->pd_mgr->pd_norm_count++; if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh) dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count; @@ -1844,12 +1846,10 @@ int ocrdma_modify_srq(struct ib_srq *ibsrq, int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) { - int status; struct ocrdma_srq *srq; srq = get_ocrdma_srq(ibsrq); - status = ocrdma_mbx_query_srq(srq, srq_attr); - return status; + return ocrdma_mbx_query_srq(srq, srq_attr); } int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) @@ -1960,7 +1960,6 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, const struct ib_send_wr *wr) { - int status; struct ocrdma_sge *sge; u32 wqe_size = sizeof(*hdr); @@ -1972,8 +1971,7 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, sge = (struct ocrdma_sge *)(hdr + 1); } - status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); - return status; + return ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); } static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index b73d742a520c..f860b7fcef33 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -59,7 +59,6 @@ int ocrdma_query_port(struct ib_device *ibdev, u32 port, enum rdma_protocol_type ocrdma_query_protocol(struct ib_device *device, u32 port_num); -void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey); int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 9100009f0a23..a53476653b0d 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1931,6 +1931,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, /* db offset was calculated in copy_qp_uresp, now set in the user q */ if (qedr_qp_has_sq(qp)) { qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset; + qp->sq.max_wr = attrs->cap.max_send_wr; rc = qedr_db_recovery_add(dev, qp->usq.db_addr, &qp->usq.db_rec_data->db_data, DB_REC_WIDTH_32B, @@ -1941,6 +1942,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, if (qedr_qp_has_rq(qp)) { qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset; + qp->rq.max_wr = attrs->cap.max_recv_wr; rc = qedr_db_recovery_add(dev, qp->urq.db_addr, &qp->urq.db_rec_data->db_data, DB_REC_WIDTH_32B, diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 9363bccfc6e7..a8e1c30c370f 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -196,7 +196,7 @@ struct qib_ctxtdata { pid_t pid; pid_t subpid[QLOGIC_IB_MAX_SUBCTXT]; /* same size as task_struct .comm[], command that opened context */ - char comm[16]; + char comm[TASK_COMM_LEN]; /* pkeys set by this use of this ctxt */ u16 pkeys[4]; /* so file ops can get at unit */ diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 63854f4b6524..aa290928cf96 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1321,7 +1321,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, rcd->tid_pg_list = ptmp; rcd->pid = current->pid; init_waitqueue_head(&dd->rcd[ctxt]->wait); - strlcpy(rcd->comm, current->comm, sizeof(rcd->comm)); + get_task_comm(rcd->comm, current); ctxt_fp(fp) = rcd; qib_stats.sps_ctxts++; dd->freectxts--; diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index a9b83bc13f4a..aea571943768 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -3030,7 +3030,7 @@ static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) /* Does read/modify/write to appropriate registers to * set output and direction bits selected by mask. - * these are in their canonical postions (e.g. lsb of + * these are in their canonical positions (e.g. lsb of * dir will end up in D48 of extctrl on existing chips). * returns contents of GP Inputs. */ diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index d1c0bc31869f..80a8dd6c7814 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -3742,7 +3742,7 @@ static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) /* * Does read/modify/write to appropriate registers to * set output and direction bits selected by mask. - * these are in their canonical postions (e.g. lsb of + * these are in their canonical positions (e.g. lsb of * dir will end up in D48 of extctrl on existing chips). * returns contents of GP Inputs. */ diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index ab98b6a3ae1e..ceed302cf6a0 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -5665,7 +5665,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) /* * Does read/modify/write to appropriate registers to * set output and direction bits selected by mask. - * these are in their canonical postions (e.g. lsb of + * these are in their canonical positions (e.g. lsb of * dir will end up in D48 of extctrl on existing chips). * returns contents of GP Inputs. */ diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c index 586b0e52ba7f..7d868f033bbf 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c @@ -243,10 +243,11 @@ static struct attribute *usnic_ib_qpn_default_attrs[] = { &qpn_attr_summary.attr, NULL }; +ATTRIBUTE_GROUPS(usnic_ib_qpn_default); static struct kobj_type usnic_ib_qpn_type = { .sysfs_ops = &usnic_ib_qpn_sysfs_ops, - .default_attrs = usnic_ib_qpn_default_attrs + .default_groups = usnic_ib_qpn_default_groups, }; int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev) diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 756a83bcff58..5a0e26cd648e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -442,12 +442,10 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index, int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct usnic_ib_pd *pd = to_upd(ibpd); - void *umem_pd; - umem_pd = pd->umem_pd = usnic_uiom_alloc_pd(); - if (IS_ERR_OR_NULL(umem_pd)) { - return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM; - } + pd->umem_pd = usnic_uiom_alloc_pd(); + if (IS_ERR(pd->umem_pd)) + return PTR_ERR(pd->umem_pd); return 0; } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c index bf51357ea3aa..9a4de962e947 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c @@ -63,12 +63,12 @@ int pvrdma_uar_table_init(struct pvrdma_dev *dev) tbl->max = num; tbl->mask = mask; spin_lock_init(&tbl->lock); - tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL); + tbl->table = bitmap_zalloc(num, GFP_KERNEL); if (!tbl->table) return -ENOMEM; /* 0th UAR is taken by the device. */ - set_bit(0, tbl->table); + __set_bit(0, tbl->table); return 0; } @@ -77,7 +77,7 @@ void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev) { struct pvrdma_id_table *tbl = &dev->uar_table.tbl; - kfree(tbl->table); + bitmap_free(tbl->table); } int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) @@ -100,7 +100,7 @@ int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) return -ENOMEM; } - set_bit(obj, tbl->table); + __set_bit(obj, tbl->table); obj |= tbl->top; spin_unlock_irqrestore(&tbl->lock, flags); @@ -120,7 +120,7 @@ void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) obj = uar->index & (tbl->max - 1); spin_lock_irqsave(&tbl->lock, flags); - clear_bit(obj, tbl->table); + __clear_bit(obj, tbl->table); tbl->last = min(tbl->last, obj); tbl->top = (tbl->top + tbl->max) & tbl->mask; spin_unlock_irqrestore(&tbl->lock, flags); diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile index 1e24673e9318..5395a581f4bb 100644 --- a/drivers/infiniband/sw/rxe/Makefile +++ b/drivers/infiniband/sw/rxe/Makefile @@ -22,5 +22,4 @@ rdma_rxe-y := \ rxe_mcast.o \ rxe_task.o \ rxe_net.o \ - rxe_sysfs.o \ rxe_hw_counters.o diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 8e0f9c489cab..fab291245366 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -13,8 +13,6 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib"); MODULE_DESCRIPTION("Soft RDMA transport"); MODULE_LICENSE("Dual BSD/GPL"); -bool rxe_initialized; - /* free resources for a rxe device all objects created for this device must * have been destroyed */ @@ -290,7 +288,6 @@ static int __init rxe_module_init(void) return err; rdma_link_register(&rxe_link_ops); - rxe_initialized = true; pr_info("loaded\n"); return 0; } @@ -301,7 +298,6 @@ static void __exit rxe_module_exit(void) ib_unregister_driver(RDMA_DRIVER_RXE); rxe_net_exit(); - rxe_initialized = false; pr_info("unloaded\n"); } diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index 1bb3fb618bf5..fb9066e6f5f0 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -39,8 +39,6 @@ #define RXE_ROCE_V2_SPORT (0xc000) -extern bool rxe_initialized; - void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name); diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index d771ba8449a1..f363fe3fa414 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -458,8 +458,6 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) { - unsigned long flags; - if (wqe->has_rd_atomic) { wqe->has_rd_atomic = 0; atomic_inc(&qp->req.rd_atomic); @@ -472,11 +470,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, if (unlikely(qp->req.state == QP_STATE_DRAIN)) { /* state_lock used by requester & completer */ - spin_lock_irqsave(&qp->state_lock, flags); + spin_lock_bh(&qp->state_lock); if ((qp->req.state == QP_STATE_DRAIN) && (qp->comp.psn == qp->req.psn)) { qp->req.state = QP_STATE_DRAINED; - spin_unlock_irqrestore(&qp->state_lock, flags); + spin_unlock_bh(&qp->state_lock); if (qp->ibqp.event_handler) { struct ib_event ev; @@ -488,7 +486,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, qp->ibqp.qp_context); } } else { - spin_unlock_irqrestore(&qp->state_lock, flags); + spin_unlock_bh(&qp->state_lock); } } diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index 6848426c074f..6baaaa34458e 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -42,14 +42,13 @@ err1: static void rxe_send_complete(struct tasklet_struct *t) { struct rxe_cq *cq = from_tasklet(cq, t, comp_task); - unsigned long flags; - spin_lock_irqsave(&cq->cq_lock, flags); + spin_lock_bh(&cq->cq_lock); if (cq->is_dying) { - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); return; } - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } @@ -106,15 +105,14 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) { struct ib_event ev; - unsigned long flags; int full; void *addr; - spin_lock_irqsave(&cq->cq_lock, flags); + spin_lock_bh(&cq->cq_lock); full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT); if (unlikely(full)) { - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); if (cq->ibcq.event_handler) { ev.device = cq->ibcq.device; ev.element.cq = &cq->ibcq; @@ -130,7 +128,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT); - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); if ((cq->notify == IB_CQ_NEXT_COMP) || (cq->notify == IB_CQ_SOLICITED && solicited)) { @@ -143,16 +141,14 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) void rxe_cq_disable(struct rxe_cq *cq) { - unsigned long flags; - - spin_lock_irqsave(&cq->cq_lock, flags); + spin_lock_bh(&cq->cq_lock); cq->is_dying = true; - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); } -void rxe_cq_cleanup(struct rxe_pool_entry *arg) +void rxe_cq_cleanup(struct rxe_pool_elem *elem) { - struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem); + struct rxe_cq *cq = container_of(elem, typeof(*cq), elem); if (cq->queue) rxe_queue_cleanup(cq->queue); diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 1ca43b859d80..b1e174afb1d4 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -37,7 +37,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited); void rxe_cq_disable(struct rxe_cq *cq); -void rxe_cq_cleanup(struct rxe_pool_entry *arg); +void rxe_cq_cleanup(struct rxe_pool_elem *arg); /* rxe_mcast.c */ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, @@ -51,7 +51,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, void rxe_drop_all_mcast_groups(struct rxe_qp *qp); -void rxe_mc_cleanup(struct rxe_pool_entry *arg); +void rxe_mc_cleanup(struct rxe_pool_elem *arg); /* rxe_mmap.c */ struct rxe_mmap_info { @@ -89,7 +89,7 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey); int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr); int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); -void rxe_mr_cleanup(struct rxe_pool_entry *arg); +void rxe_mr_cleanup(struct rxe_pool_elem *arg); /* rxe_mw.c */ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); @@ -97,7 +97,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw); int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe); int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey); struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey); -void rxe_mw_cleanup(struct rxe_pool_entry *arg); +void rxe_mw_cleanup(struct rxe_pool_elem *arg); /* rxe_net.c */ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, @@ -131,7 +131,7 @@ void rxe_qp_error(struct rxe_qp *qp); void rxe_qp_destroy(struct rxe_qp *qp); -void rxe_qp_cleanup(struct rxe_pool_entry *arg); +void rxe_qp_cleanup(struct rxe_pool_elem *elem); static inline int qp_num(struct rxe_qp *qp) { diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c index 1c1d1b53312d..bd1ac88b8700 100644 --- a/drivers/infiniband/sw/rxe/rxe_mcast.c +++ b/drivers/infiniband/sw/rxe/rxe_mcast.c @@ -40,12 +40,11 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, int err; struct rxe_mc_grp *grp; struct rxe_pool *pool = &rxe->mc_grp_pool; - unsigned long flags; if (rxe->attr.max_mcast_qp_attach == 0) return -EINVAL; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); grp = rxe_pool_get_key_locked(pool, mgid); if (grp) @@ -53,13 +52,13 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, grp = create_grp(rxe, pool, mgid); if (IS_ERR(grp)) { - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); err = PTR_ERR(grp); return err; } done: - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); *grp_p = grp; return 0; } @@ -169,9 +168,9 @@ void rxe_drop_all_mcast_groups(struct rxe_qp *qp) } } -void rxe_mc_cleanup(struct rxe_pool_entry *arg) +void rxe_mc_cleanup(struct rxe_pool_elem *elem) { - struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem); + struct rxe_mc_grp *grp = container_of(elem, typeof(*grp), elem); struct rxe_dev *rxe = grp->rxe; rxe_drop_key(grp); diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index bcf717bcf0b3..453ef3c9d535 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -50,7 +50,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) static void rxe_mr_init(int access, struct rxe_mr *mr) { - u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1); + u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1); u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0; /* set ibmr->l/rkey and also copy into private l/rkey @@ -697,9 +697,9 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) return 0; } -void rxe_mr_cleanup(struct rxe_pool_entry *arg) +void rxe_mr_cleanup(struct rxe_pool_elem *elem) { - struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem); + struct rxe_mr *mr = container_of(elem, typeof(*mr), elem); ib_umem_release(mr->umem); diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c index 9534a7fe1a98..32dd8c0b8b9e 100644 --- a/drivers/infiniband/sw/rxe/rxe_mw.c +++ b/drivers/infiniband/sw/rxe/rxe_mw.c @@ -21,7 +21,7 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) } rxe_add_index(mw); - mw->rkey = ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1); + mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? RXE_MW_STATE_FREE : RXE_MW_STATE_VALID; spin_lock_init(&mw->lock); @@ -56,11 +56,10 @@ int rxe_dealloc_mw(struct ib_mw *ibmw) { struct rxe_mw *mw = to_rmw(ibmw); struct rxe_pd *pd = to_rpd(ibmw->pd); - unsigned long flags; - spin_lock_irqsave(&mw->lock, flags); + spin_lock_bh(&mw->lock); rxe_do_dealloc_mw(mw); - spin_unlock_irqrestore(&mw->lock, flags); + spin_unlock_bh(&mw->lock); rxe_drop_ref(mw); rxe_drop_ref(pd); @@ -197,7 +196,6 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) struct rxe_dev *rxe = to_rdev(qp->ibqp.device); u32 mw_rkey = wqe->wr.wr.mw.mw_rkey; u32 mr_lkey = wqe->wr.wr.mw.mr_lkey; - unsigned long flags; mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8); if (unlikely(!mw)) { @@ -225,7 +223,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) mr = NULL; } - spin_lock_irqsave(&mw->lock, flags); + spin_lock_bh(&mw->lock); ret = rxe_check_bind_mw(qp, wqe, mw, mr); if (ret) @@ -233,7 +231,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) rxe_do_bind_mw(qp, wqe, mw, mr); err_unlock: - spin_unlock_irqrestore(&mw->lock, flags); + spin_unlock_bh(&mw->lock); err_drop_mr: if (mr) rxe_drop_ref(mr); @@ -280,7 +278,6 @@ static void rxe_do_invalidate_mw(struct rxe_mw *mw) int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); - unsigned long flags; struct rxe_mw *mw; int ret; @@ -295,7 +292,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) goto err_drop_ref; } - spin_lock_irqsave(&mw->lock, flags); + spin_lock_bh(&mw->lock); ret = rxe_check_invalidate_mw(qp, mw); if (ret) @@ -303,7 +300,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) rxe_do_invalidate_mw(mw); err_unlock: - spin_unlock_irqrestore(&mw->lock, flags); + spin_unlock_bh(&mw->lock); err_drop_ref: rxe_drop_ref(mw); err: @@ -333,9 +330,9 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) return mw; } -void rxe_mw_cleanup(struct rxe_pool_entry *elem) +void rxe_mw_cleanup(struct rxe_pool_elem *elem) { - struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem); + struct rxe_mw *mw = container_of(elem, typeof(*mw), elem); rxe_drop_index(mw); } diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 2cb810cb890a..be72bdbfb4ba 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -22,24 +22,20 @@ static struct rxe_recv_sockets recv_sockets; int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) { - int err; unsigned char ll_addr[ETH_ALEN]; ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); - err = dev_mc_add(rxe->ndev, ll_addr); - return err; + return dev_mc_add(rxe->ndev, ll_addr); } int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid) { - int err; unsigned char ll_addr[ETH_ALEN]; ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); - err = dev_mc_del(rxe->ndev, ll_addr); - return err; + return dev_mc_del(rxe->ndev, ll_addr); } static struct dst_entry *rxe_find_route4(struct net_device *ndev, @@ -444,7 +440,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, else err = rxe_send(skb, pkt); if (err) { - rxe->xmit_errors++; rxe_counter_inc(rxe, RXE_CNT_SEND_ERR); return err; } diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c index 3ef5a10a6efd..df596ba7527d 100644 --- a/drivers/infiniband/sw/rxe/rxe_opcode.c +++ b/drivers/infiniband/sw/rxe/rxe_opcode.c @@ -108,8 +108,8 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = { struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { [IB_OPCODE_RC_SEND_FIRST] = { .name = "IB_OPCODE_RC_SEND_FIRST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK - | RXE_SEND_MASK | RXE_START_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK | + RXE_SEND_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -117,9 +117,9 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { } }, [IB_OPCODE_RC_SEND_MIDDLE] = { - .name = "IB_OPCODE_RC_SEND_MIDDLE]", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK - | RXE_MIDDLE_MASK, + .name = "IB_OPCODE_RC_SEND_MIDDLE", + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -128,8 +128,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_SEND_LAST] = { .name = "IB_OPCODE_RC_SEND_LAST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK - | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | + RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -138,21 +138,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RC_SEND_ONLY] = { .name = "IB_OPCODE_RC_SEND_ONLY", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK - | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | + RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -161,33 +161,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RC_RDMA_WRITE_FIRST] = { .name = "IB_OPCODE_RC_RDMA_WRITE_FIRST", - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK, + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = { .name = "IB_OPCODE_RC_RDMA_WRITE_MIDDLE", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -196,8 +196,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_RDMA_WRITE_LAST] = { .name = "IB_OPCODE_RC_RDMA_WRITE_LAST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -206,69 +206,69 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RC_RDMA_WRITE_ONLY] = { .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY", - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK - | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE", - .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RC_RDMA_READ_REQUEST] = { .name = "IB_OPCODE_RC_RDMA_READ_REQUEST", - .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = { .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST", - .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_START_MASK, + .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = { @@ -282,109 +282,110 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = { .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST", - .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_END_MASK, + .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = { .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY", - .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_ACKNOWLEDGE] = { .name = "IB_OPCODE_RC_ACKNOWLEDGE", - .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK - | RXE_END_MASK, + .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = { .name = "IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE", - .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_ATMACK] = RXE_BTH_BYTES - + RXE_AETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_ATMACK_BYTES + RXE_AETH_BYTES, + [RXE_ATMACK] = RXE_BTH_BYTES + + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_ATMACK_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_COMPARE_SWAP] = { .name = "IB_OPCODE_RC_COMPARE_SWAP", - .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_ATMETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_ATMETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_ATMETH_BYTES, } }, [IB_OPCODE_RC_FETCH_ADD] = { .name = "IB_OPCODE_RC_FETCH_ADD", - .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_ATMETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_ATMETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_ATMETH_BYTES, } }, [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = { .name = "IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE", - .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IETH_BYTES, } }, [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = { .name = "IB_OPCODE_RC_SEND_ONLY_INV", - .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_END_MASK | RXE_START_MASK, + .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_END_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_IETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IETH_BYTES, } }, /* UC */ [IB_OPCODE_UC_SEND_FIRST] = { .name = "IB_OPCODE_UC_SEND_FIRST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK - | RXE_SEND_MASK | RXE_START_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK | + RXE_SEND_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -393,8 +394,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_SEND_MIDDLE] = { .name = "IB_OPCODE_UC_SEND_MIDDLE", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -403,8 +404,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_SEND_LAST] = { .name = "IB_OPCODE_UC_SEND_LAST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK - | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | + RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -413,21 +414,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_UC_SEND_ONLY] = { .name = "IB_OPCODE_UC_SEND_ONLY", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK - | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | + RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -436,33 +437,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_UC_RDMA_WRITE_FIRST] = { .name = "IB_OPCODE_UC_RDMA_WRITE_FIRST", - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK, + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = { .name = "IB_OPCODE_UC_RDMA_WRITE_MIDDLE", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -471,8 +472,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_RDMA_WRITE_LAST] = { .name = "IB_OPCODE_UC_RDMA_WRITE_LAST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -481,460 +482,460 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_UC_RDMA_WRITE_ONLY] = { .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY", - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK - | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE", - .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES + + RXE_IMMDT_BYTES, } }, /* RD */ [IB_OPCODE_RD_SEND_FIRST] = { .name = "IB_OPCODE_RD_SEND_FIRST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_SEND_MIDDLE] = { .name = "IB_OPCODE_RD_SEND_MIDDLE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_SEND_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_SEND_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_SEND_LAST] = { .name = "IB_OPCODE_RD_SEND_LAST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK - | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_SEND_MASK - | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_SEND_MASK | + RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RD_SEND_ONLY] = { .name = "IB_OPCODE_RD_SEND_ONLY", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_FIRST] = { .name = "IB_OPCODE_RD_RDMA_WRITE_FIRST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK, - .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK, + .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_RETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_RETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_MIDDLE] = { .name = "IB_OPCODE_RD_RDMA_WRITE_MIDDLE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_LAST] = { .name = "IB_OPCODE_RD_RDMA_WRITE_LAST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_ONLY] = { .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK - | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK | + RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_RETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_RETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK - | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES - + RXE_DETH_BYTES + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | + RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES + + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_RETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_RETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_RETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_RETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_REQUEST] = { .name = "IB_OPCODE_RD_RDMA_READ_REQUEST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK - | RXE_REQ_MASK | RXE_READ_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | + RXE_REQ_MASK | RXE_READ_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_RETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES - + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES + + RXE_DETH_BYTES + + RXE_RDETH_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST] = { .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK - | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_START_MASK, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | + RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_AETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE] = { .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE", - .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST] = { .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK - | RXE_ACK_MASK | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK | + RXE_ACK_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_AETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY] = { .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK - | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK | + RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_AETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RD_ACKNOWLEDGE] = { .name = "IB_OPCODE_RD_ACKNOWLEDGE", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, } }, [IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE] = { .name = "IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK - | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK | + RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_ATMACK] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_AETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_ATMACK] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RD_COMPARE_SWAP] = { .name = "RD_COMPARE_SWAP", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK - | RXE_REQ_MASK | RXE_ATOMIC_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK | + RXE_REQ_MASK | RXE_ATOMIC_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_ATMETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_ATMETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, [RXE_PAYLOAD] = RXE_BTH_BYTES + - + RXE_ATMETH_BYTES - + RXE_DETH_BYTES + - + RXE_RDETH_BYTES, + RXE_ATMETH_BYTES + + RXE_DETH_BYTES + + RXE_RDETH_BYTES, } }, [IB_OPCODE_RD_FETCH_ADD] = { .name = "IB_OPCODE_RD_FETCH_ADD", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK - | RXE_REQ_MASK | RXE_ATOMIC_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK | + RXE_REQ_MASK | RXE_ATOMIC_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_ATMETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_ATMETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, [RXE_PAYLOAD] = RXE_BTH_BYTES + - + RXE_ATMETH_BYTES - + RXE_DETH_BYTES + - + RXE_RDETH_BYTES, + RXE_ATMETH_BYTES + + RXE_DETH_BYTES + + RXE_RDETH_BYTES, } }, /* UD */ [IB_OPCODE_UD_SEND_ONLY] = { .name = "IB_OPCODE_UD_SEND_ONLY", - .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_DETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE", - .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_DETH] = RXE_BTH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_DETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_DETH_BYTES + + RXE_IMMDT_BYTES, } }, diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index 2e80bb6aa957..4cb003885e00 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -5,13 +5,14 @@ */ #include "rxe.h" -#include "rxe_loc.h" + +#define RXE_POOL_ALIGN (16) static const struct rxe_type_info { const char *name; size_t size; size_t elem_offset; - void (*cleanup)(struct rxe_pool_entry *obj); + void (*cleanup)(struct rxe_pool_elem *obj); enum rxe_pool_flags flags; u32 min_index; u32 max_index; @@ -21,19 +22,19 @@ static const struct rxe_type_info { [RXE_TYPE_UC] = { .name = "rxe-uc", .size = sizeof(struct rxe_ucontext), - .elem_offset = offsetof(struct rxe_ucontext, pelem), + .elem_offset = offsetof(struct rxe_ucontext, elem), .flags = RXE_POOL_NO_ALLOC, }, [RXE_TYPE_PD] = { .name = "rxe-pd", .size = sizeof(struct rxe_pd), - .elem_offset = offsetof(struct rxe_pd, pelem), + .elem_offset = offsetof(struct rxe_pd, elem), .flags = RXE_POOL_NO_ALLOC, }, [RXE_TYPE_AH] = { .name = "rxe-ah", .size = sizeof(struct rxe_ah), - .elem_offset = offsetof(struct rxe_ah, pelem), + .elem_offset = offsetof(struct rxe_ah, elem), .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_AH_INDEX, .max_index = RXE_MAX_AH_INDEX, @@ -41,7 +42,7 @@ static const struct rxe_type_info { [RXE_TYPE_SRQ] = { .name = "rxe-srq", .size = sizeof(struct rxe_srq), - .elem_offset = offsetof(struct rxe_srq, pelem), + .elem_offset = offsetof(struct rxe_srq, elem), .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_SRQ_INDEX, .max_index = RXE_MAX_SRQ_INDEX, @@ -49,7 +50,7 @@ static const struct rxe_type_info { [RXE_TYPE_QP] = { .name = "rxe-qp", .size = sizeof(struct rxe_qp), - .elem_offset = offsetof(struct rxe_qp, pelem), + .elem_offset = offsetof(struct rxe_qp, elem), .cleanup = rxe_qp_cleanup, .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_QP_INDEX, @@ -58,14 +59,14 @@ static const struct rxe_type_info { [RXE_TYPE_CQ] = { .name = "rxe-cq", .size = sizeof(struct rxe_cq), - .elem_offset = offsetof(struct rxe_cq, pelem), + .elem_offset = offsetof(struct rxe_cq, elem), .flags = RXE_POOL_NO_ALLOC, .cleanup = rxe_cq_cleanup, }, [RXE_TYPE_MR] = { .name = "rxe-mr", .size = sizeof(struct rxe_mr), - .elem_offset = offsetof(struct rxe_mr, pelem), + .elem_offset = offsetof(struct rxe_mr, elem), .cleanup = rxe_mr_cleanup, .flags = RXE_POOL_INDEX, .min_index = RXE_MIN_MR_INDEX, @@ -74,7 +75,7 @@ static const struct rxe_type_info { [RXE_TYPE_MW] = { .name = "rxe-mw", .size = sizeof(struct rxe_mw), - .elem_offset = offsetof(struct rxe_mw, pelem), + .elem_offset = offsetof(struct rxe_mw, elem), .cleanup = rxe_mw_cleanup, .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_MW_INDEX, @@ -83,7 +84,7 @@ static const struct rxe_type_info { [RXE_TYPE_MC_GRP] = { .name = "rxe-mc_grp", .size = sizeof(struct rxe_mc_grp), - .elem_offset = offsetof(struct rxe_mc_grp, pelem), + .elem_offset = offsetof(struct rxe_mc_grp, elem), .cleanup = rxe_mc_cleanup, .flags = RXE_POOL_KEY, .key_offset = offsetof(struct rxe_mc_grp, mgid), @@ -92,15 +93,10 @@ static const struct rxe_type_info { [RXE_TYPE_MC_ELEM] = { .name = "rxe-mc_elem", .size = sizeof(struct rxe_mc_elem), - .elem_offset = offsetof(struct rxe_mc_elem, pelem), + .elem_offset = offsetof(struct rxe_mc_elem, elem), }, }; -static inline const char *pool_name(struct rxe_pool *pool) -{ - return rxe_type_info[pool->type].name; -} - static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) { int err = 0; @@ -130,35 +126,36 @@ int rxe_pool_init( enum rxe_elem_type type, unsigned int max_elem) { + const struct rxe_type_info *info = &rxe_type_info[type]; int err = 0; - size_t size = rxe_type_info[type].size; memset(pool, 0, sizeof(*pool)); pool->rxe = rxe; + pool->name = info->name; pool->type = type; pool->max_elem = max_elem; - pool->elem_size = ALIGN(size, RXE_POOL_ALIGN); - pool->flags = rxe_type_info[type].flags; - pool->index.tree = RB_ROOT; - pool->key.tree = RB_ROOT; - pool->cleanup = rxe_type_info[type].cleanup; + pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN); + pool->elem_offset = info->elem_offset; + pool->flags = info->flags; + pool->cleanup = info->cleanup; atomic_set(&pool->num_elem, 0); rwlock_init(&pool->pool_lock); - if (rxe_type_info[type].flags & RXE_POOL_INDEX) { - err = rxe_pool_init_index(pool, - rxe_type_info[type].max_index, - rxe_type_info[type].min_index); + if (pool->flags & RXE_POOL_INDEX) { + pool->index.tree = RB_ROOT; + err = rxe_pool_init_index(pool, info->max_index, + info->min_index); if (err) goto out; } - if (rxe_type_info[type].flags & RXE_POOL_KEY) { - pool->key.key_offset = rxe_type_info[type].key_offset; - pool->key.key_size = rxe_type_info[type].key_size; + if (pool->flags & RXE_POOL_KEY) { + pool->key.tree = RB_ROOT; + pool->key.key_offset = info->key_offset; + pool->key.key_size = info->key_size; } out: @@ -169,9 +166,10 @@ void rxe_pool_cleanup(struct rxe_pool *pool) { if (atomic_read(&pool->num_elem) > 0) pr_warn("%s pool destroyed with unfree'd elem\n", - pool_name(pool)); + pool->name); - bitmap_free(pool->index.table); + if (pool->flags & RXE_POOL_INDEX) + bitmap_free(pool->index.table); } static u32 alloc_index(struct rxe_pool *pool) @@ -189,15 +187,15 @@ static u32 alloc_index(struct rxe_pool *pool) return index + pool->index.min_index; } -static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) +static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new) { struct rb_node **link = &pool->index.tree.rb_node; struct rb_node *parent = NULL; - struct rxe_pool_entry *elem; + struct rxe_pool_elem *elem; while (*link) { parent = *link; - elem = rb_entry(parent, struct rxe_pool_entry, index_node); + elem = rb_entry(parent, struct rxe_pool_elem, index_node); if (elem->index == new->index) { pr_warn("element already exists!\n"); @@ -216,19 +214,20 @@ static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) return 0; } -static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) +static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_elem *new) { struct rb_node **link = &pool->key.tree.rb_node; struct rb_node *parent = NULL; - struct rxe_pool_entry *elem; + struct rxe_pool_elem *elem; int cmp; while (*link) { parent = *link; - elem = rb_entry(parent, struct rxe_pool_entry, key_node); + elem = rb_entry(parent, struct rxe_pool_elem, key_node); cmp = memcmp((u8 *)elem + pool->key.key_offset, - (u8 *)new + pool->key.key_offset, pool->key.key_size); + (u8 *)new + pool->key.key_offset, + pool->key.key_size); if (cmp == 0) { pr_warn("key already exists!\n"); @@ -247,7 +246,7 @@ static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) return 0; } -int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key) +int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key) { struct rxe_pool *pool = elem->pool; int err; @@ -258,37 +257,35 @@ int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key) return err; } -int __rxe_add_key(struct rxe_pool_entry *elem, void *key) +int __rxe_add_key(struct rxe_pool_elem *elem, void *key) { struct rxe_pool *pool = elem->pool; - unsigned long flags; int err; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); err = __rxe_add_key_locked(elem, key); - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); return err; } -void __rxe_drop_key_locked(struct rxe_pool_entry *elem) +void __rxe_drop_key_locked(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; rb_erase(&elem->key_node, &pool->key.tree); } -void __rxe_drop_key(struct rxe_pool_entry *elem) +void __rxe_drop_key(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; - unsigned long flags; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); __rxe_drop_key_locked(elem); - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); } -int __rxe_add_index_locked(struct rxe_pool_entry *elem) +int __rxe_add_index_locked(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; int err; @@ -299,20 +296,19 @@ int __rxe_add_index_locked(struct rxe_pool_entry *elem) return err; } -int __rxe_add_index(struct rxe_pool_entry *elem) +int __rxe_add_index(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; - unsigned long flags; int err; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); err = __rxe_add_index_locked(elem); - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); return err; } -void __rxe_drop_index_locked(struct rxe_pool_entry *elem) +void __rxe_drop_index_locked(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; @@ -320,32 +316,31 @@ void __rxe_drop_index_locked(struct rxe_pool_entry *elem) rb_erase(&elem->index_node, &pool->index.tree); } -void __rxe_drop_index(struct rxe_pool_entry *elem) +void __rxe_drop_index(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; - unsigned long flags; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); __rxe_drop_index_locked(elem); - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); } void *rxe_alloc_locked(struct rxe_pool *pool) { - const struct rxe_type_info *info = &rxe_type_info[pool->type]; - struct rxe_pool_entry *elem; - u8 *obj; + struct rxe_pool_elem *elem; + void *obj; if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto out_cnt; - obj = kzalloc(info->size, GFP_ATOMIC); + obj = kzalloc(pool->elem_size, GFP_ATOMIC); if (!obj) goto out_cnt; - elem = (struct rxe_pool_entry *)(obj + info->elem_offset); + elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset); elem->pool = pool; + elem->obj = obj; kref_init(&elem->ref_cnt); return obj; @@ -357,20 +352,20 @@ out_cnt: void *rxe_alloc(struct rxe_pool *pool) { - const struct rxe_type_info *info = &rxe_type_info[pool->type]; - struct rxe_pool_entry *elem; - u8 *obj; + struct rxe_pool_elem *elem; + void *obj; if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto out_cnt; - obj = kzalloc(info->size, GFP_KERNEL); + obj = kzalloc(pool->elem_size, GFP_KERNEL); if (!obj) goto out_cnt; - elem = (struct rxe_pool_entry *)(obj + info->elem_offset); + elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset); elem->pool = pool; + elem->obj = obj; kref_init(&elem->ref_cnt); return obj; @@ -380,12 +375,13 @@ out_cnt: return NULL; } -int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) +int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem) { if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto out_cnt; elem->pool = pool; + elem->obj = (u8 *)elem - pool->elem_offset; kref_init(&elem->ref_cnt); return 0; @@ -397,17 +393,16 @@ out_cnt: void rxe_elem_release(struct kref *kref) { - struct rxe_pool_entry *elem = - container_of(kref, struct rxe_pool_entry, ref_cnt); + struct rxe_pool_elem *elem = + container_of(kref, struct rxe_pool_elem, ref_cnt); struct rxe_pool *pool = elem->pool; - const struct rxe_type_info *info = &rxe_type_info[pool->type]; - u8 *obj; + void *obj; if (pool->cleanup) pool->cleanup(elem); if (!(pool->flags & RXE_POOL_NO_ALLOC)) { - obj = (u8 *)elem - info->elem_offset; + obj = elem->obj; kfree(obj); } @@ -416,15 +411,14 @@ void rxe_elem_release(struct kref *kref) void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) { - const struct rxe_type_info *info = &rxe_type_info[pool->type]; struct rb_node *node; - struct rxe_pool_entry *elem; - u8 *obj; + struct rxe_pool_elem *elem; + void *obj; node = pool->index.tree.rb_node; while (node) { - elem = rb_entry(node, struct rxe_pool_entry, index_node); + elem = rb_entry(node, struct rxe_pool_elem, index_node); if (elem->index > index) node = node->rb_left; @@ -436,7 +430,7 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) if (node) { kref_get(&elem->ref_cnt); - obj = (u8 *)elem - info->elem_offset; + obj = elem->obj; } else { obj = NULL; } @@ -446,28 +440,26 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) { - u8 *obj; - unsigned long flags; + void *obj; - read_lock_irqsave(&pool->pool_lock, flags); + read_lock_bh(&pool->pool_lock); obj = rxe_pool_get_index_locked(pool, index); - read_unlock_irqrestore(&pool->pool_lock, flags); + read_unlock_bh(&pool->pool_lock); return obj; } void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) { - const struct rxe_type_info *info = &rxe_type_info[pool->type]; struct rb_node *node; - struct rxe_pool_entry *elem; - u8 *obj; + struct rxe_pool_elem *elem; + void *obj; int cmp; node = pool->key.tree.rb_node; while (node) { - elem = rb_entry(node, struct rxe_pool_entry, key_node); + elem = rb_entry(node, struct rxe_pool_elem, key_node); cmp = memcmp((u8 *)elem + pool->key.key_offset, key, pool->key.key_size); @@ -482,7 +474,7 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) if (node) { kref_get(&elem->ref_cnt); - obj = (u8 *)elem - info->elem_offset; + obj = elem->obj; } else { obj = NULL; } @@ -492,12 +484,11 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) void *rxe_pool_get_key(struct rxe_pool *pool, void *key) { - u8 *obj; - unsigned long flags; + void *obj; - read_lock_irqsave(&pool->pool_lock, flags); + read_lock_bh(&pool->pool_lock); obj = rxe_pool_get_key_locked(pool, key); - read_unlock_irqrestore(&pool->pool_lock, flags); + read_unlock_bh(&pool->pool_lock); return obj; } diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h index 8ecd9f870aea..214279310f4d 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.h +++ b/drivers/infiniband/sw/rxe/rxe_pool.h @@ -7,9 +7,6 @@ #ifndef RXE_POOL_H #define RXE_POOL_H -#define RXE_POOL_ALIGN (16) -#define RXE_POOL_CACHE_FLAGS (0) - enum rxe_pool_flags { RXE_POOL_INDEX = BIT(1), RXE_POOL_KEY = BIT(2), @@ -30,10 +27,9 @@ enum rxe_elem_type { RXE_NUM_TYPES, /* keep me last */ }; -struct rxe_pool_entry; - -struct rxe_pool_entry { +struct rxe_pool_elem { struct rxe_pool *pool; + void *obj; struct kref ref_cnt; struct list_head list; @@ -47,14 +43,16 @@ struct rxe_pool_entry { struct rxe_pool { struct rxe_dev *rxe; + const char *name; rwlock_t pool_lock; /* protects pool add/del/search */ - size_t elem_size; - void (*cleanup)(struct rxe_pool_entry *obj); + void (*cleanup)(struct rxe_pool_elem *obj); enum rxe_pool_flags flags; enum rxe_elem_type type; unsigned int max_elem; atomic_t num_elem; + size_t elem_size; + size_t elem_offset; /* only used if indexed */ struct { @@ -89,51 +87,51 @@ void *rxe_alloc_locked(struct rxe_pool *pool); void *rxe_alloc(struct rxe_pool *pool); /* connect already allocated object to pool */ -int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem); +int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem); -#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem) +#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem) /* assign an index to an indexed object and insert object into * pool's rb tree holding and not holding the pool_lock */ -int __rxe_add_index_locked(struct rxe_pool_entry *elem); +int __rxe_add_index_locked(struct rxe_pool_elem *elem); -#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem) +#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->elem) -int __rxe_add_index(struct rxe_pool_entry *elem); +int __rxe_add_index(struct rxe_pool_elem *elem); -#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem) +#define rxe_add_index(obj) __rxe_add_index(&(obj)->elem) /* drop an index and remove object from rb tree * holding and not holding the pool_lock */ -void __rxe_drop_index_locked(struct rxe_pool_entry *elem); +void __rxe_drop_index_locked(struct rxe_pool_elem *elem); -#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem) +#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->elem) -void __rxe_drop_index(struct rxe_pool_entry *elem); +void __rxe_drop_index(struct rxe_pool_elem *elem); -#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem) +#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->elem) /* assign a key to a keyed object and insert object into * pool's rb tree holding and not holding pool_lock */ -int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key); +int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key); -#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key) +#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->elem, key) -int __rxe_add_key(struct rxe_pool_entry *elem, void *key); +int __rxe_add_key(struct rxe_pool_elem *elem, void *key); -#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key) +#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->elem, key) /* remove elem from rb tree holding and not holding the pool_lock */ -void __rxe_drop_key_locked(struct rxe_pool_entry *elem); +void __rxe_drop_key_locked(struct rxe_pool_elem *elem); -#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem) +#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->elem) -void __rxe_drop_key(struct rxe_pool_entry *elem); +void __rxe_drop_key(struct rxe_pool_elem *elem); -#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem) +#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->elem) /* lookup an indexed object from index holding and not holding the pool_lock. * takes a reference on object @@ -153,9 +151,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key); void rxe_elem_release(struct kref *kref); /* take a reference on an object */ -#define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt) +#define rxe_add_ref(obj) kref_get(&(obj)->elem.ref_cnt) /* drop a reference on an object */ -#define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release) +#define rxe_drop_ref(obj) kref_put(&(obj)->elem.ref_cnt, rxe_elem_release) #endif /* RXE_POOL_H */ diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 54b8711321c1..5018b9387694 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -167,7 +167,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, qp->attr.path_mtu = 1; qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); - qpn = qp->pelem.index; + qpn = qp->elem.index; port = &rxe->port; switch (init->qp_type) { @@ -217,8 +217,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, * the port number must be in the Dynamic Ports range * (0xc000 - 0xffff). */ - qp->src_port = RXE_ROCE_V2_SPORT + - (hash_32_generic(qp_num(qp), 14) & 0x3fff); + qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff); qp->sq.max_wr = init->cap.max_send_wr; /* These caps are limited by rxe_qp_chk_cap() done by the caller */ @@ -832,9 +831,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work) } /* called when the last reference to the qp is dropped */ -void rxe_qp_cleanup(struct rxe_pool_entry *arg) +void rxe_qp_cleanup(struct rxe_pool_elem *elem) { - struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); + struct rxe_qp *qp = container_of(elem, typeof(*qp), elem); execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); } diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index 6e6e023c1b45..a1b283dd2d4c 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c @@ -151,7 +151,6 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, struct rxe_queue *new_q; unsigned int num_elem = *num_elem_p; int err; - unsigned long flags = 0, flags1; new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type); if (!new_q) @@ -165,17 +164,17 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, goto err1; } - spin_lock_irqsave(consumer_lock, flags1); + spin_lock_bh(consumer_lock); if (producer_lock) { - spin_lock_irqsave(producer_lock, flags); + spin_lock_bh(producer_lock); err = resize_finish(q, new_q, num_elem); - spin_unlock_irqrestore(producer_lock, flags); + spin_unlock_bh(producer_lock); } else { err = resize_finish(q, new_q, num_elem); } - spin_unlock_irqrestore(consumer_lock, flags1); + spin_unlock_bh(consumer_lock); rxe_queue_cleanup(new_q); /* new/old dep on err */ if (err) diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 0c9d2af15f3d..5eb89052dd66 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -110,7 +110,6 @@ void rnr_nak_timer(struct timer_list *t) static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) { struct rxe_send_wqe *wqe; - unsigned long flags; struct rxe_queue *q = qp->sq.queue; unsigned int index = qp->req.wqe_index; unsigned int cons; @@ -124,25 +123,23 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) /* check to see if we are drained; * state_lock used by requester and completer */ - spin_lock_irqsave(&qp->state_lock, flags); + spin_lock_bh(&qp->state_lock); do { if (qp->req.state != QP_STATE_DRAIN) { /* comp just finished */ - spin_unlock_irqrestore(&qp->state_lock, - flags); + spin_unlock_bh(&qp->state_lock); break; } if (wqe && ((index != cons) || (wqe->state != wqe_state_posted))) { /* comp not done yet */ - spin_unlock_irqrestore(&qp->state_lock, - flags); + spin_unlock_bh(&qp->state_lock); break; } qp->req.state = QP_STATE_DRAINED; - spin_unlock_irqrestore(&qp->state_lock, flags); + spin_unlock_bh(&qp->state_lock); if (qp->ibqp.event_handler) { struct ib_event ev; @@ -372,7 +369,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, int pad = (-payload) & 0x3; int paylen; int solicited; - u16 pkey; u32 qp_num; int ack_req; @@ -404,8 +400,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) == (RXE_WRITE_MASK | RXE_IMMDT_MASK)); - pkey = IB_DEFAULT_PKEY_FULL; - qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : qp->attr.dest_qp_num; @@ -414,7 +408,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, if (ack_req) qp->req.noack_pkts = 0; - bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num, + bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num, ack_req, pkt->psn); /* init optional headers */ diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c index eb1c4c3b3a78..0c0721f04357 100644 --- a/drivers/infiniband/sw/rxe/rxe_srq.c +++ b/drivers/infiniband/sw/rxe/rxe_srq.c @@ -83,7 +83,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, srq->ibsrq.event_handler = init->event_handler; srq->ibsrq.srq_context = init->srq_context; srq->limit = init->attr.srq_limit; - srq->srq_num = srq->pelem.index; + srq->srq_num = srq->elem.index; srq->rq.max_wr = init->attr.max_wr; srq->rq.max_sge = init->attr.max_sge; diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c deleted file mode 100644 index 666202ddff48..000000000000 --- a/drivers/infiniband/sw/rxe/rxe_sysfs.c +++ /dev/null @@ -1,119 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB -/* - * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. - */ - -#include "rxe.h" -#include "rxe_net.h" - -/* Copy argument and remove trailing CR. Return the new length. */ -static int sanitize_arg(const char *val, char *intf, int intf_len) -{ - int len; - - if (!val) - return 0; - - /* Remove newline. */ - for (len = 0; len < intf_len - 1 && val[len] && val[len] != '\n'; len++) - intf[len] = val[len]; - intf[len] = 0; - - if (len == 0 || (val[len] != 0 && val[len] != '\n')) - return 0; - - return len; -} - -static int rxe_param_set_add(const char *val, const struct kernel_param *kp) -{ - int len; - int err = 0; - char intf[32]; - struct net_device *ndev; - struct rxe_dev *exists; - - if (!rxe_initialized) { - pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n"); - return -EAGAIN; - } - - len = sanitize_arg(val, intf, sizeof(intf)); - if (!len) { - pr_err("add: invalid interface name\n"); - return -EINVAL; - } - - ndev = dev_get_by_name(&init_net, intf); - if (!ndev) { - pr_err("interface %s not found\n", intf); - return -EINVAL; - } - - if (is_vlan_dev(ndev)) { - pr_err("rxe creation allowed on top of a real device only\n"); - err = -EPERM; - goto err; - } - - exists = rxe_get_dev_from_net(ndev); - if (exists) { - ib_device_put(&exists->ib_dev); - pr_err("already configured on %s\n", intf); - err = -EINVAL; - goto err; - } - - err = rxe_net_add("rxe%d", ndev); - if (err) { - pr_err("failed to add %s\n", intf); - goto err; - } - -err: - dev_put(ndev); - return err; -} - -static int rxe_param_set_remove(const char *val, const struct kernel_param *kp) -{ - int len; - char intf[32]; - struct ib_device *ib_dev; - - len = sanitize_arg(val, intf, sizeof(intf)); - if (!len) { - pr_err("add: invalid interface name\n"); - return -EINVAL; - } - - if (strncmp("all", intf, len) == 0) { - pr_info("rxe_sys: remove all"); - ib_unregister_driver(RDMA_DRIVER_RXE); - return 0; - } - - ib_dev = ib_device_get_by_name(intf, RDMA_DRIVER_RXE); - if (!ib_dev) { - pr_err("not configured on %s\n", intf); - return -EINVAL; - } - - ib_unregister_device_and_put(ib_dev); - - return 0; -} - -static const struct kernel_param_ops rxe_add_ops = { - .set = rxe_param_set_add, -}; - -static const struct kernel_param_ops rxe_remove_ops = { - .set = rxe_param_set_remove, -}; - -module_param_cb(add, &rxe_add_ops, NULL, 0200); -MODULE_PARM_DESC(add, "DEPRECATED. Create RXE device over network interface"); -module_param_cb(remove, &rxe_remove_ops, NULL, 0200); -MODULE_PARM_DESC(remove, "DEPRECATED. Remove RXE device over network interface"); diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 6951fdcb31bf..0c4db5bb17d7 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -32,25 +32,24 @@ void rxe_do_task(struct tasklet_struct *t) { int cont; int ret; - unsigned long flags; struct rxe_task *task = from_tasklet(task, t, tasklet); - spin_lock_irqsave(&task->state_lock, flags); + spin_lock_bh(&task->state_lock); switch (task->state) { case TASK_STATE_START: task->state = TASK_STATE_BUSY; - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); break; case TASK_STATE_BUSY: task->state = TASK_STATE_ARMED; fallthrough; case TASK_STATE_ARMED: - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); return; default: - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); pr_warn("%s failed with bad state %d\n", __func__, task->state); return; } @@ -59,7 +58,7 @@ void rxe_do_task(struct tasklet_struct *t) cont = 0; ret = task->func(task->arg); - spin_lock_irqsave(&task->state_lock, flags); + spin_lock_bh(&task->state_lock); switch (task->state) { case TASK_STATE_BUSY: if (ret) @@ -81,7 +80,7 @@ void rxe_do_task(struct tasklet_struct *t) pr_warn("%s failed with bad state %d\n", __func__, task->state); } - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); } while (cont); task->ret = ret; @@ -106,7 +105,6 @@ int rxe_init_task(void *obj, struct rxe_task *task, void rxe_cleanup_task(struct rxe_task *task) { - unsigned long flags; bool idle; /* @@ -116,9 +114,9 @@ void rxe_cleanup_task(struct rxe_task *task) task->destroyed = true; do { - spin_lock_irqsave(&task->state_lock, flags); + spin_lock_bh(&task->state_lock); idle = (task->state == TASK_STATE_START); - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); } while (!idle); tasklet_kill(&task->tasklet); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 0aa0d7e52773..915ad6664321 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -182,7 +182,7 @@ static int rxe_create_ah(struct ib_ah *ibah, /* create index > 0 */ rxe_add_index(ah); - ah->ah_num = ah->pelem.index; + ah->ah_num = ah->elem.index; if (uresp) { /* only if new user provider */ @@ -383,10 +383,9 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { int err = 0; - unsigned long flags; struct rxe_srq *srq = to_rsrq(ibsrq); - spin_lock_irqsave(&srq->rq.producer_lock, flags); + spin_lock_bh(&srq->rq.producer_lock); while (wr) { err = post_one_recv(&srq->rq, wr); @@ -395,7 +394,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, wr = wr->next; } - spin_unlock_irqrestore(&srq->rq.producer_lock, flags); + spin_unlock_bh(&srq->rq.producer_lock); if (err) *bad_wr = wr; @@ -469,6 +468,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (err) goto err1; + if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH)) + qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, + qp->ibqp.qp_num, + qp->attr.dest_qp_num); + return 0; err1: @@ -634,19 +638,18 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr, int err; struct rxe_sq *sq = &qp->sq; struct rxe_send_wqe *send_wqe; - unsigned long flags; int full; err = validate_send_wr(qp, ibwr, mask, length); if (err) return err; - spin_lock_irqsave(&qp->sq.sq_lock, flags); + spin_lock_bh(&qp->sq.sq_lock); full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER); if (unlikely(full)) { - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); + spin_unlock_bh(&qp->sq.sq_lock); return -ENOMEM; } @@ -655,7 +658,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr, queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER); - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); + spin_unlock_bh(&qp->sq.sq_lock); return 0; } @@ -735,7 +738,6 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, int err = 0; struct rxe_qp *qp = to_rqp(ibqp); struct rxe_rq *rq = &qp->rq; - unsigned long flags; if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { *bad_wr = wr; @@ -749,7 +751,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, goto err1; } - spin_lock_irqsave(&rq->producer_lock, flags); + spin_lock_bh(&rq->producer_lock); while (wr) { err = post_one_recv(rq, wr); @@ -760,7 +762,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, wr = wr->next; } - spin_unlock_irqrestore(&rq->producer_lock, flags); + spin_unlock_bh(&rq->producer_lock); if (qp->resp.state == QP_STATE_ERROR) rxe_run_task(&qp->resp.task, 1); @@ -841,9 +843,8 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) int i; struct rxe_cq *cq = to_rcq(ibcq); struct rxe_cqe *cqe; - unsigned long flags; - spin_lock_irqsave(&cq->cq_lock, flags); + spin_lock_bh(&cq->cq_lock); for (i = 0; i < num_entries; i++) { cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER); if (!cqe) @@ -852,7 +853,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) memcpy(wc++, &cqe->ibwc, sizeof(*wc)); queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER); } - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); return i; } @@ -870,11 +871,10 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct rxe_cq *cq = to_rcq(ibcq); - unsigned long irq_flags; int ret = 0; int empty; - spin_lock_irqsave(&cq->cq_lock, irq_flags); + spin_lock_bh(&cq->cq_lock); if (cq->notify != IB_CQ_NEXT_COMP) cq->notify = flags & IB_CQ_SOLICITED_MASK; @@ -883,7 +883,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty) ret = 1; - spin_unlock_irqrestore(&cq->cq_lock, irq_flags); + spin_unlock_bh(&cq->cq_lock); return ret; } diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 35e041450090..e48969e8d4c8 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -35,17 +35,17 @@ static inline int psn_compare(u32 psn_a, u32 psn_b) struct rxe_ucontext { struct ib_ucontext ibuc; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; }; struct rxe_pd { struct ib_pd ibpd; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; }; struct rxe_ah { struct ib_ah ibah; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_av av; bool is_user; int ah_num; @@ -60,7 +60,7 @@ struct rxe_cqe { struct rxe_cq { struct ib_cq ibcq; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_queue *queue; spinlock_t cq_lock; u8 notify; @@ -95,7 +95,7 @@ struct rxe_rq { struct rxe_srq { struct ib_srq ibsrq; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_pd *pd; struct rxe_rq rq; u32 srq_num; @@ -209,7 +209,7 @@ struct rxe_resp_info { struct rxe_qp { struct ib_qp ibqp; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct ib_qp_attr attr; unsigned int valid; unsigned int mtu; @@ -309,7 +309,7 @@ static inline int rkey_is_mw(u32 rkey) } struct rxe_mr { - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct ib_mr ibmr; struct ib_umem *umem; @@ -342,7 +342,7 @@ enum rxe_mw_state { struct rxe_mw { struct ib_mw ibmw; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; spinlock_t lock; enum rxe_mw_state state; struct rxe_qp *qp; /* Type 2 only */ @@ -354,7 +354,7 @@ struct rxe_mw { }; struct rxe_mc_grp { - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; spinlock_t mcg_lock; /* guard group */ struct rxe_dev *rxe; struct list_head qp_list; @@ -365,7 +365,7 @@ struct rxe_mc_grp { }; struct rxe_mc_elem { - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct list_head qp_list; struct list_head grp_list; struct rxe_qp *qp; @@ -392,8 +392,6 @@ struct rxe_dev { struct net_device *ndev; - int xmit_errors; - struct rxe_pool uc_pool; struct rxe_pool pd_pool; struct rxe_pool ah_pool; @@ -484,6 +482,6 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw) int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name); -void rxe_mc_cleanup(struct rxe_pool_entry *arg); +void rxe_mc_cleanup(struct rxe_pool_elem *elem); #endif /* RXE_VERBS_H */ diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index 9093e6a80b26..e5c586913d0b 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -98,15 +98,14 @@ static int siw_create_tx_threads(void) continue; siw_tx_thread[cpu] = - kthread_create(siw_run_sq, (unsigned long *)(long)cpu, - "siw_tx/%d", cpu); + kthread_run_on_cpu(siw_run_sq, + (unsigned long *)(long)cpu, + cpu, "siw_tx/%u"); if (IS_ERR(siw_tx_thread[cpu])) { siw_tx_thread[cpu] = NULL; continue; } - kthread_bind(siw_tx_thread[cpu], cpu); - wake_up_process(siw_tx_thread[cpu]); assigned++; } return assigned; diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 1b36350601fa..a3dd2cb6d5c9 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -8,6 +8,7 @@ #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/xarray.h> +#include <net/addrconf.h> #include <rdma/iw_cm.h> #include <rdma/ib_verbs.h> @@ -155,7 +156,8 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, attr->vendor_id = SIW_VENDOR_ID; attr->vendor_part_id = sdev->vendor_part_id; - memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6); + addrconf_addr_eui48((u8 *)&attr->sys_image_guid, + sdev->netdev->dev_addr); return 0; } @@ -660,7 +662,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, kbuf += core_sge->length; core_sge++; } - sqe->sge[0].length = bytes > 0 ? bytes : 0; + sqe->sge[0].length = max(bytes, 0); sqe->num_sge = bytes > 0 ? 1 : 0; return bytes; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 776e46ee95da..07e47021a71f 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -113,10 +113,6 @@ bool iser_pi_enable = false; module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); -int iser_pi_guard; -module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO); -MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]"); - static int iscsi_iser_set(const char *val, const struct kernel_param *kp) { int ret; @@ -139,9 +135,8 @@ static int iscsi_iser_set(const char *val, const struct kernel_param *kp) * Notes: In case of data length errors or iscsi PDU completion failures * this routine will signal iscsi layer of connection failure. */ -void -iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, - char *rx_data, int rx_data_len) +void iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + char *rx_data, int rx_data_len) { int rc = 0; int datalen; @@ -176,8 +171,7 @@ error: * Netes: This routine can't fail, just assign iscsi task * hdr and max hdr size. */ -static int -iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) +static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) { struct iscsi_iser_task *iser_task = task->dd_data; @@ -198,9 +192,8 @@ iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) * state mutex to avoid dereferencing the IB device which * may have already been terminated. */ -int -iser_initialize_task_headers(struct iscsi_task *task, - struct iser_tx_desc *tx_desc) +int iser_initialize_task_headers(struct iscsi_task *task, + struct iser_tx_desc *tx_desc) { struct iser_conn *iser_conn = task->conn->dd_data; struct iser_device *device = iser_conn->ib_conn.device; @@ -237,8 +230,7 @@ iser_initialize_task_headers(struct iscsi_task *task, * Return: Returns zero on success or -ENOMEM when failing * to init task headers (dma mapping error). */ -static int -iscsi_iser_task_init(struct iscsi_task *task) +static int iscsi_iser_task_init(struct iscsi_task *task) { struct iscsi_iser_task *iser_task = task->dd_data; int ret; @@ -272,8 +264,8 @@ iscsi_iser_task_init(struct iscsi_task *task) * xmit. * **/ -static int -iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) +static int iscsi_iser_mtask_xmit(struct iscsi_conn *conn, + struct iscsi_task *task) { int error = 0; @@ -290,9 +282,8 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) return error; } -static int -iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, - struct iscsi_task *task) +static int iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, + struct iscsi_task *task) { struct iscsi_r2t_info *r2t = &task->unsol_r2t; struct iscsi_data hdr; @@ -326,8 +317,7 @@ iscsi_iser_task_xmit_unsol_data_exit: * * Return: zero on success or escalates $error on failure. */ -static int -iscsi_iser_task_xmit(struct iscsi_task *task) +static int iscsi_iser_task_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_iser_task *iser_task = task->dd_data; @@ -410,8 +400,7 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task) * * In addition the error sector is marked. */ -static u8 -iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) +static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) { struct iscsi_iser_task *iser_task = task->dd_data; enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ? @@ -460,11 +449,9 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, * -EINVAL in case end-point doesn't exsits anymore or iser connection * state is not UP (teardown already started). */ -static int -iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, - struct iscsi_cls_conn *cls_conn, - uint64_t transport_eph, - int is_leading) +static int iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_eph, int is_leading) { struct iscsi_conn *conn = cls_conn->dd_data; struct iser_conn *iser_conn; @@ -519,8 +506,7 @@ out: * from this point iscsi must call conn_stop in session/connection * teardown so iser transport must wait for it. */ -static int -iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) +static int iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *iscsi_conn; struct iser_conn *iser_conn; @@ -542,8 +528,7 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) * handle, so we call it under iser the state lock to protect against * this kind of race. */ -static void -iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) +static void iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) { struct iscsi_conn *conn = cls_conn->dd_data; struct iser_conn *iser_conn = conn->dd_data; @@ -578,8 +563,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) * * Removes and free iscsi host. */ -static void -iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) +static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); @@ -588,8 +572,7 @@ iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) iscsi_host_free(shost); } -static inline unsigned int -iser_dif_prot_caps(int prot_caps) +static inline unsigned int iser_dif_prot_caps(int prot_caps) { int ret = 0; @@ -708,9 +691,8 @@ free_host: return NULL; } -static int -iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, - enum iscsi_param param, char *buf, int buflen) +static int iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf, int buflen) { int value; @@ -760,8 +742,8 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, * * Output connection statistics. */ -static void -iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) +static void iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) { struct iscsi_conn *conn = cls_conn->dd_data; @@ -812,9 +794,9 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, * Return: iscsi_endpoint created by iscsi layer or ERR_PTR(error) * if fails. */ -static struct iscsi_endpoint * -iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, - int non_blocking) +static struct iscsi_endpoint *iscsi_iser_ep_connect(struct Scsi_Host *shost, + struct sockaddr *dst_addr, + int non_blocking) { int err; struct iser_conn *iser_conn; @@ -857,8 +839,7 @@ failure: * or more likely iser connection state transitioned to TEMINATING or * DOWN during the wait period. */ -static int -iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +static int iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { struct iser_conn *iser_conn = ep->dd_data; int rc; @@ -893,8 +874,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) * and cleanup or actually call it immediately in case we didn't pass * iscsi conn bind/start stage, thus it is safe. */ -static void -iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) +static void iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) { struct iser_conn *iser_conn = ep->dd_data; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 9f6ac0a09a78..20af46c4e954 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -119,8 +119,6 @@ #define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX) -#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2) - /* the max TX (send) WR supported by the iSER QP is defined by * * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect * * to have at max for SCSI command. The tx posting & completion handling code * @@ -148,8 +146,6 @@ - ISER_MAX_RX_MISC_PDUS) / \ (1 + ISER_INFLIGHT_DATAOUTS)) -#define ISER_SIGNAL_CMD_COUNT 32 - /* Constant PDU lengths calculations */ #define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr)) @@ -366,9 +362,6 @@ struct iser_fr_pool { * @qp: Connection Queue-pair * @cq: Connection completion queue * @cq_size: The number of max outstanding completions - * @post_recv_buf_count: post receive counter - * @sig_count: send work request signal count - * @rx_wr: receive work request for batch posts * @device: reference to iser device * @fr_pool: connection fast registration poool * @pi_support: Indicate device T10-PI support @@ -379,9 +372,6 @@ struct ib_conn { struct ib_qp *qp; struct ib_cq *cq; u32 cq_size; - int post_recv_buf_count; - u8 sig_count; - struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; struct iser_device *device; struct iser_fr_pool fr_pool; bool pi_support; @@ -397,8 +387,6 @@ struct ib_conn { * @state: connection logical state * @qp_max_recv_dtos: maximum number of data outs, corresponds * to max number of post recvs - * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1) - * @min_posted_rx: (qp_max_recv_dtos >> 2) * @max_cmds: maximum cmds allowed for this connection * @name: connection peer portal * @release_work: deffered work for release job @@ -409,7 +397,6 @@ struct ib_conn { * (state is ISER_CONN_UP) * @conn_list: entry in ig conn list * @login_desc: login descriptor - * @rx_desc_head: head of rx_descs cyclic buffer * @rx_descs: rx buffers array (cyclic buffer) * @num_rx_descs: number of rx descriptors * @scsi_sg_tablesize: scsi host sg_tablesize @@ -422,8 +409,6 @@ struct iser_conn { struct iscsi_endpoint *ep; enum iser_conn_state state; unsigned qp_max_recv_dtos; - unsigned qp_max_recv_dtos_mask; - unsigned min_posted_rx; u16 max_cmds; char name[ISER_OBJECT_NAME_SIZE]; struct work_struct release_work; @@ -433,7 +418,6 @@ struct iser_conn { struct completion up_completion; struct list_head conn_list; struct iser_login_desc login_desc; - unsigned int rx_desc_head; struct iser_rx_desc *rx_descs; u32 num_rx_descs; unsigned short scsi_sg_tablesize; @@ -486,7 +470,6 @@ struct iser_global { extern struct iser_global ig; extern int iser_debug_level; extern bool iser_pi_enable; -extern int iser_pi_guard; extern unsigned int iser_max_sectors; extern bool iser_always_reg; @@ -543,9 +526,9 @@ int iser_connect(struct iser_conn *iser_conn, int non_blocking); int iser_post_recvl(struct iser_conn *iser_conn); -int iser_post_recvm(struct iser_conn *iser_conn, int count); -int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, - bool signal); +int iser_post_recvm(struct iser_conn *iser_conn, + struct iser_rx_desc *rx_desc); +int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc); int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, struct iser_data_buf *data, diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 27a6f75a9912..2490150d3085 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -95,11 +95,8 @@ static int iser_prepare_read_cmd(struct iscsi_task *task) * task->data[ISER_DIR_OUT].data_len, Protection size * is stored at task->prot[ISER_DIR_OUT].data_len */ -static int -iser_prepare_write_cmd(struct iscsi_task *task, - unsigned int imm_sz, - unsigned int unsol_sz, - unsigned int edtl) +static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz, + unsigned int unsol_sz, unsigned int edtl) { struct iscsi_iser_task *iser_task = task->dd_data; struct iser_mem_reg *mem_reg; @@ -160,8 +157,8 @@ iser_prepare_write_cmd(struct iscsi_task *task, } /* creates a new tx descriptor and adds header regd buffer */ -static void iser_create_send_desc(struct iser_conn *iser_conn, - struct iser_tx_desc *tx_desc) +static void iser_create_send_desc(struct iser_conn *iser_conn, + struct iser_tx_desc *tx_desc) { struct iser_device *device = iser_conn->ib_conn.device; @@ -247,8 +244,6 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, struct iser_device *device = ib_conn->device; iser_conn->qp_max_recv_dtos = session->cmds_max; - iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ - iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2; if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max, iser_conn->pages_per_mr)) @@ -280,7 +275,6 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, rx_sg->lkey = device->pd->local_dma_lkey; } - iser_conn->rx_desc_head = 0; return 0; rx_desc_dma_map_failed: @@ -322,37 +316,35 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn) static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) { struct iser_conn *iser_conn = conn->dd_data; - struct ib_conn *ib_conn = &iser_conn->ib_conn; struct iscsi_session *session = conn->session; + int err = 0; + int i; iser_dbg("req op %x flags %x\n", req->opcode, req->flags); /* check if this is the last login - going to full feature phase */ if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE) - return 0; - - /* - * Check that there is one posted recv buffer - * (for the last login response). - */ - WARN_ON(ib_conn->post_recv_buf_count != 1); + goto out; if (session->discovery_sess) { iser_info("Discovery session, re-using login RX buffer\n"); - return 0; - } else - iser_info("Normal session, posting batch of RX %d buffers\n", - iser_conn->min_posted_rx); - - /* Initial post receive buffers */ - if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx)) - return -ENOMEM; + goto out; + } - return 0; -} + iser_info("Normal session, posting batch of RX %d buffers\n", + iser_conn->qp_max_recv_dtos - 1); -static inline bool iser_signal_comp(u8 sig_count) -{ - return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0); + /* + * Initial post receive buffers. + * There is one already posted recv buffer (for the last login + * response). Therefore, the first recv buffer is skipped here. + */ + for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) { + err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]); + if (err) + goto out; + } +out: + return err; } /** @@ -360,8 +352,7 @@ static inline bool iser_signal_comp(u8 sig_count) * @conn: link to matching iscsi connection * @task: SCSI command task */ -int iser_send_command(struct iscsi_conn *conn, - struct iscsi_task *task) +int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task) { struct iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_task *iser_task = task->dd_data; @@ -371,7 +362,6 @@ int iser_send_command(struct iscsi_conn *conn, struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; struct scsi_cmnd *sc = task->sc; struct iser_tx_desc *tx_desc = &iser_task->desc; - u8 sig_count = ++iser_conn->ib_conn.sig_count; edtl = ntohl(hdr->data_length); @@ -418,8 +408,7 @@ int iser_send_command(struct iscsi_conn *conn, iser_task->status = ISER_TASK_STATUS_STARTED; - err = iser_post_send(&iser_conn->ib_conn, tx_desc, - iser_signal_comp(sig_count)); + err = iser_post_send(&iser_conn->ib_conn, tx_desc); if (!err) return 0; @@ -434,8 +423,7 @@ send_command_error: * @task: SCSI command task * @hdr: pointer to the LLD's iSCSI message header */ -int iser_send_data_out(struct iscsi_conn *conn, - struct iscsi_task *task, +int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task, struct iscsi_data *hdr) { struct iser_conn *iser_conn = conn->dd_data; @@ -487,7 +475,7 @@ int iser_send_data_out(struct iscsi_conn *conn, itt, buf_offset, data_seg_len); - err = iser_post_send(&iser_conn->ib_conn, tx_desc, true); + err = iser_post_send(&iser_conn->ib_conn, tx_desc); if (!err) return 0; @@ -497,8 +485,7 @@ send_data_out_error: return err; } -int iser_send_control(struct iscsi_conn *conn, - struct iscsi_task *task) +int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task) { struct iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_task *iser_task = task->dd_data; @@ -550,7 +537,7 @@ int iser_send_control(struct iscsi_conn *conn, goto send_control_error; } - err = iser_post_send(&iser_conn->ib_conn, mdesc, true); + err = iser_post_send(&iser_conn->ib_conn, mdesc); if (!err) return 0; @@ -590,11 +577,14 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) desc->rsp_dma, ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); - ib_conn->post_recv_buf_count--; + if (iser_conn->iscsi_conn->session->discovery_sess) + return; + + /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */ + iser_post_recvm(iser_conn, iser_conn->rx_descs); } -static inline int -iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) +static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) { if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) || (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) { @@ -607,10 +597,8 @@ iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) return 0; } -static int -iser_check_remote_inv(struct iser_conn *iser_conn, - struct ib_wc *wc, - struct iscsi_hdr *hdr) +static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc, + struct iscsi_hdr *hdr) { if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { struct iscsi_task *task; @@ -657,8 +645,7 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) struct iser_conn *iser_conn = to_iser_conn(ib_conn); struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); struct iscsi_hdr *hdr; - int length; - int outstanding, count, err; + int length, err; if (unlikely(wc->status != IB_WC_SUCCESS)) { iser_err_comp(wc, "task_rsp"); @@ -687,20 +674,9 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) desc->dma_addr, ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); - /* decrementing conn->post_recv_buf_count only --after-- freeing the * - * task eliminates the need to worry on tasks which are completed in * - * parallel to the execution of iser_conn_term. So the code that waits * - * for the posted rx bufs refcount to become zero handles everything */ - ib_conn->post_recv_buf_count--; - - outstanding = ib_conn->post_recv_buf_count; - if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) { - count = min(iser_conn->qp_max_recv_dtos - outstanding, - iser_conn->min_posted_rx); - err = iser_post_recvm(iser_conn, count); - if (err) - iser_err("posting %d rx bufs err %d\n", count, err); - } + err = iser_post_recvm(iser_conn, desc); + if (err) + iser_err("posting rx buffer err %d\n", err); } void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 9776b755d848..660982625488 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -44,8 +44,7 @@ void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc) iser_err_comp(wc, "memreg"); } -static struct iser_fr_desc * -iser_reg_desc_get_fr(struct ib_conn *ib_conn) +static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn) { struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; struct iser_fr_desc *desc; @@ -60,9 +59,8 @@ iser_reg_desc_get_fr(struct ib_conn *ib_conn) return desc; } -static void -iser_reg_desc_put_fr(struct ib_conn *ib_conn, - struct iser_fr_desc *desc) +static void iser_reg_desc_put_fr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc) { struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; unsigned long flags; @@ -73,9 +71,9 @@ iser_reg_desc_put_fr(struct ib_conn *ib_conn, } int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, - struct iser_data_buf *data, - enum iser_data_dir iser_dir, - enum dma_data_direction dma_dir) + struct iser_data_buf *data, + enum iser_data_dir iser_dir, + enum dma_data_direction dma_dir) { struct ib_device *dev; @@ -100,9 +98,8 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, ib_dma_unmap_sg(dev, data->sg, data->size, dir); } -static int -iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, - struct iser_mem_reg *reg) +static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, + struct iser_mem_reg *reg) { struct scatterlist *sg = mem->sg; @@ -154,8 +151,8 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, reg->mem_h = NULL; } -static void -iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain) +static void iser_set_dif_domain(struct scsi_cmnd *sc, + struct ib_sig_domain *domain) { domain->sig_type = IB_SIG_TYPE_T10_DIF; domain->sig.dif.pi_interval = scsi_prot_interval(sc); @@ -171,8 +168,8 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain) domain->sig.dif.ref_remap = true; } -static int -iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) +static int iser_set_sig_attrs(struct scsi_cmnd *sc, + struct ib_sig_attrs *sig_attrs) { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_WRITE_INSERT: @@ -205,8 +202,7 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) return 0; } -static inline void -iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) +static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) { *mask = 0; if (sc->prot_flags & SCSI_PROT_REF_CHECK) @@ -215,11 +211,8 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) *mask |= IB_SIG_CHECK_GUARD; } -static inline void -iser_inv_rkey(struct ib_send_wr *inv_wr, - struct ib_mr *mr, - struct ib_cqe *cqe, - struct ib_send_wr *next_wr) +static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr, + struct ib_cqe *cqe, struct ib_send_wr *next_wr) { inv_wr->opcode = IB_WR_LOCAL_INV; inv_wr->wr_cqe = cqe; @@ -229,12 +222,11 @@ iser_inv_rkey(struct ib_send_wr *inv_wr, inv_wr->next = next_wr; } -static int -iser_reg_sig_mr(struct iscsi_iser_task *iser_task, - struct iser_data_buf *mem, - struct iser_data_buf *sig_mem, - struct iser_reg_resources *rsc, - struct iser_mem_reg *sig_reg) +static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_data_buf *sig_mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *sig_reg) { struct iser_tx_desc *tx_desc = &iser_task->desc; struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; @@ -335,12 +327,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, return 0; } -static int -iser_reg_data_sg(struct iscsi_iser_task *task, - struct iser_data_buf *mem, - struct iser_fr_desc *desc, - bool use_dma_key, - struct iser_mem_reg *reg) +static int iser_reg_data_sg(struct iscsi_iser_task *task, + struct iser_data_buf *mem, + struct iser_fr_desc *desc, bool use_dma_key, + struct iser_mem_reg *reg) { struct iser_device *device = task->iser_conn->ib_conn.device; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index b566f7cb7797..8bf87b073d9b 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -265,14 +265,14 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn) memset(&init_attr, 0, sizeof(init_attr)); init_attr.event_handler = iser_qp_event_callback; - init_attr.qp_context = (void *)ib_conn; - init_attr.send_cq = ib_conn->cq; - init_attr.recv_cq = ib_conn->cq; - init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; + init_attr.qp_context = (void *)ib_conn; + init_attr.send_cq = ib_conn->cq; + init_attr.recv_cq = ib_conn->cq; + init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; init_attr.cap.max_send_sge = 2; init_attr.cap.max_recv_sge = 1; - init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; - init_attr.qp_type = IB_QPT_RC; + init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; + init_attr.qp_type = IB_QPT_RC; init_attr.cap.max_send_wr = max_send_wr; if (ib_conn->pi_support) init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; @@ -283,9 +283,8 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn) goto out_err; ib_conn->qp = ib_conn->cma_id->qp; - iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", - ib_conn, ib_conn->cma_id, - ib_conn->cma_id->qp, max_send_wr); + iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn, + ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr); return ret; out_err: @@ -313,7 +312,7 @@ struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) goto inc_refcnt; device = kzalloc(sizeof *device, GFP_KERNEL); - if (device == NULL) + if (!device) goto out; /* assign this device to the device */ @@ -392,8 +391,7 @@ void iser_release_work(struct work_struct *work) * so the cm_id removal is out of here. It is Safe to * be invoked multiple times. */ -static void iser_free_ib_conn_res(struct iser_conn *iser_conn, - bool destroy) +static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy) { struct ib_conn *ib_conn = &iser_conn->ib_conn; struct iser_device *device = ib_conn->device; @@ -401,7 +399,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn, iser_info("freeing conn %p cma_id %p qp %p\n", iser_conn, ib_conn->cma_id, ib_conn->qp); - if (ib_conn->qp != NULL) { + if (ib_conn->qp) { rdma_destroy_qp(ib_conn->cma_id); ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); ib_conn->qp = NULL; @@ -411,7 +409,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn, if (iser_conn->rx_descs) iser_free_rx_descriptors(iser_conn); - if (device != NULL) { + if (device) { iser_device_try_release(device); ib_conn->device = NULL; } @@ -445,7 +443,7 @@ void iser_conn_release(struct iser_conn *iser_conn) iser_free_ib_conn_res(iser_conn, true); mutex_unlock(&iser_conn->state_mutex); - if (ib_conn->cma_id != NULL) { + if (ib_conn->cma_id) { rdma_destroy_id(ib_conn->cma_id); ib_conn->cma_id = NULL; } @@ -501,13 +499,12 @@ static void iser_connect_error(struct rdma_cm_id *cma_id) { struct iser_conn *iser_conn; - iser_conn = (struct iser_conn *)cma_id->context; + iser_conn = cma_id->context; iser_conn->state = ISER_CONN_TERMINATING; } -static void -iser_calc_scsi_params(struct iser_conn *iser_conn, - unsigned int max_sectors) +static void iser_calc_scsi_params(struct iser_conn *iser_conn, + unsigned int max_sectors) { struct iser_device *device = iser_conn->ib_conn.device; struct ib_device_attr *attr = &device->ib_device->attrs; @@ -545,11 +542,11 @@ iser_calc_scsi_params(struct iser_conn *iser_conn, static void iser_addr_handler(struct rdma_cm_id *cma_id) { struct iser_device *device; - struct iser_conn *iser_conn; - struct ib_conn *ib_conn; + struct iser_conn *iser_conn; + struct ib_conn *ib_conn; int ret; - iser_conn = (struct iser_conn *)cma_id->context; + iser_conn = cma_id->context; if (iser_conn->state != ISER_CONN_PENDING) /* bailout */ return; @@ -593,9 +590,9 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) static void iser_route_handler(struct rdma_cm_id *cma_id) { struct rdma_conn_param conn_param; - int ret; + int ret; struct iser_cm_hdr req_hdr; - struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + struct iser_conn *iser_conn = cma_id->context; struct ib_conn *ib_conn = &iser_conn->ib_conn; struct ib_device *ib_dev = ib_conn->device->ib_device; @@ -609,9 +606,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id) memset(&conn_param, 0, sizeof conn_param); conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom; - conn_param.initiator_depth = 1; - conn_param.retry_count = 7; - conn_param.rnr_retry_count = 6; + conn_param.initiator_depth = 1; + conn_param.retry_count = 7; + conn_param.rnr_retry_count = 6; memset(&req_hdr, 0, sizeof(req_hdr)); req_hdr.flags = ISER_ZBVA_NOT_SUP; @@ -638,7 +635,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id, struct ib_qp_attr attr; struct ib_qp_init_attr init_attr; - iser_conn = (struct iser_conn *)cma_id->context; + iser_conn = cma_id->context; if (iser_conn->state != ISER_CONN_PENDING) /* bailout */ return; @@ -661,7 +658,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id, static void iser_disconnected_handler(struct rdma_cm_id *cma_id) { - struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + struct iser_conn *iser_conn = cma_id->context; if (iser_conn_terminate(iser_conn)) { if (iser_conn->iscsi_conn) @@ -675,7 +672,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id) static void iser_cleanup_handler(struct rdma_cm_id *cma_id, bool destroy) { - struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + struct iser_conn *iser_conn = cma_id->context; /* * We are not guaranteed that we visited disconnected_handler @@ -687,12 +684,13 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id, complete(&iser_conn->ib_completion); } -static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) +static int iser_cma_handler(struct rdma_cm_id *cma_id, + struct rdma_cm_event *event) { struct iser_conn *iser_conn; int ret = 0; - iser_conn = (struct iser_conn *)cma_id->context; + iser_conn = cma_id->context; iser_info("%s (%d): status %d conn %p id %p\n", rdma_event_msg(event->event), event->event, event->status, cma_id->context, cma_id); @@ -757,7 +755,6 @@ void iser_conn_init(struct iser_conn *iser_conn) INIT_LIST_HEAD(&iser_conn->conn_list); mutex_init(&iser_conn->state_mutex); - ib_conn->post_recv_buf_count = 0; ib_conn->reg_cqe.done = iser_reg_comp; } @@ -765,10 +762,8 @@ void iser_conn_init(struct iser_conn *iser_conn) * starts the process of connecting to the target * sleeps until the connection is established or rejected */ -int iser_connect(struct iser_conn *iser_conn, - struct sockaddr *src_addr, - struct sockaddr *dst_addr, - int non_blocking) +int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr, + struct sockaddr *dst_addr, int non_blocking) { struct ib_conn *ib_conn = &iser_conn->ib_conn; int err = 0; @@ -785,8 +780,7 @@ int iser_connect(struct iser_conn *iser_conn, iser_conn->state = ISER_CONN_PENDING; ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler, - (void *)iser_conn, - RDMA_PS_TCP, IB_QPT_RC); + iser_conn, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(ib_conn->cma_id)) { err = PTR_ERR(ib_conn->cma_id); iser_err("rdma_create_id failed: %d\n", err); @@ -829,7 +823,7 @@ int iser_post_recvl(struct iser_conn *iser_conn) struct ib_conn *ib_conn = &iser_conn->ib_conn; struct iser_login_desc *desc = &iser_conn->login_desc; struct ib_recv_wr wr; - int ib_ret; + int ret; desc->sge.addr = desc->rsp_dma; desc->sge.length = ISER_RX_LOGIN_SIZE; @@ -841,46 +835,30 @@ int iser_post_recvl(struct iser_conn *iser_conn) wr.num_sge = 1; wr.next = NULL; - ib_conn->post_recv_buf_count++; - ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL); - if (ib_ret) { - iser_err("ib_post_recv failed ret=%d\n", ib_ret); - ib_conn->post_recv_buf_count--; - } + ret = ib_post_recv(ib_conn->qp, &wr, NULL); + if (unlikely(ret)) + iser_err("ib_post_recv login failed ret=%d\n", ret); - return ib_ret; + return ret; } -int iser_post_recvm(struct iser_conn *iser_conn, int count) +int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc) { struct ib_conn *ib_conn = &iser_conn->ib_conn; - unsigned int my_rx_head = iser_conn->rx_desc_head; - struct iser_rx_desc *rx_desc; - struct ib_recv_wr *wr; - int i, ib_ret; - - for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) { - rx_desc = &iser_conn->rx_descs[my_rx_head]; - rx_desc->cqe.done = iser_task_rsp; - wr->wr_cqe = &rx_desc->cqe; - wr->sg_list = &rx_desc->rx_sg; - wr->num_sge = 1; - wr->next = wr + 1; - my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask; - } + struct ib_recv_wr wr; + int ret; - wr--; - wr->next = NULL; /* mark end of work requests list */ + rx_desc->cqe.done = iser_task_rsp; + wr.wr_cqe = &rx_desc->cqe; + wr.sg_list = &rx_desc->rx_sg; + wr.num_sge = 1; + wr.next = NULL; - ib_conn->post_recv_buf_count += count; - ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL); - if (unlikely(ib_ret)) { - iser_err("ib_post_recv failed ret=%d\n", ib_ret); - ib_conn->post_recv_buf_count -= count; - } else - iser_conn->rx_desc_head = my_rx_head; + ret = ib_post_recv(ib_conn->qp, &wr, NULL); + if (unlikely(ret)) + iser_err("ib_post_recv failed ret=%d\n", ret); - return ib_ret; + return ret; } @@ -888,16 +866,14 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count) * iser_post_send - Initiate a Send DTO operation * @ib_conn: connection RDMA resources * @tx_desc: iSER TX descriptor - * @signal: true to send work request as SIGNALED * * Return: 0 on success, -1 on failure */ -int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, - bool signal) +int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc) { struct ib_send_wr *wr = &tx_desc->send_wr; struct ib_send_wr *first_wr; - int ib_ret; + int ret; ib_dma_sync_single_for_device(ib_conn->device->ib_device, tx_desc->dma_addr, ISER_HEADERS_LEN, @@ -908,7 +884,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, wr->sg_list = tx_desc->tx_sg; wr->num_sge = tx_desc->num_sge; wr->opcode = IB_WR_SEND; - wr->send_flags = signal ? IB_SEND_SIGNALED : 0; + wr->send_flags = IB_SEND_SIGNALED; if (tx_desc->inv_wr.next) first_wr = &tx_desc->inv_wr; @@ -917,12 +893,12 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, else first_wr = wr; - ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL); - if (unlikely(ib_ret)) + ret = ib_post_send(ib_conn->qp, first_wr, NULL); + if (unlikely(ret)) iser_err("ib_post_send failed, ret:%d opcode:%d\n", - ib_ret, wr->opcode); + ret, wr->opcode); - return ib_ret; + return ret; } u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c index 76e4352fe3f6..385a19846c24 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c @@ -13,8 +13,8 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); - struct rtrs_clt_stats *stats = sess->stats; + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); + struct rtrs_clt_stats *stats = clt_path->stats; struct rtrs_clt_stats_pcpu *s; int cpu; @@ -180,8 +180,8 @@ static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats, void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir) { struct rtrs_clt_con *con = req->con; - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); - struct rtrs_clt_stats *stats = sess->stats; + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); + struct rtrs_clt_stats *stats = clt_path->stats; unsigned int len; len = req->usr_len + req->data_len; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c index 0e69180c3771..b4fa473b7888 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c @@ -16,21 +16,21 @@ #define MIN_MAX_RECONN_ATT -1 #define MAX_MAX_RECONN_ATT 9999 -static void rtrs_clt_sess_release(struct kobject *kobj) +static void rtrs_clt_path_release(struct kobject *kobj) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); - free_sess(sess); + free_path(clt_path); } static struct kobj_type ktype_sess = { .sysfs_ops = &kobj_sysfs_ops, - .release = rtrs_clt_sess_release + .release = rtrs_clt_path_release }; -static void rtrs_clt_sess_stats_release(struct kobject *kobj) +static void rtrs_clt_path_stats_release(struct kobject *kobj) { struct rtrs_clt_stats *stats; @@ -43,14 +43,15 @@ static void rtrs_clt_sess_stats_release(struct kobject *kobj) static struct kobj_type ktype_stats = { .sysfs_ops = &kobj_sysfs_ops, - .release = rtrs_clt_sess_stats_release, + .release = rtrs_clt_path_stats_release, }; static ssize_t max_reconnect_attempts_show(struct device *dev, struct device_attribute *attr, char *page) { - struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); + struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, + dev); return sysfs_emit(page, "%d\n", rtrs_clt_get_max_reconnect_attempts(clt)); @@ -63,7 +64,8 @@ static ssize_t max_reconnect_attempts_store(struct device *dev, { int value; int ret; - struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); + struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, + dev); ret = kstrtoint(buf, 10, &value); if (ret) { @@ -90,9 +92,9 @@ static ssize_t mpath_policy_show(struct device *dev, struct device_attribute *attr, char *page) { - struct rtrs_clt *clt; + struct rtrs_clt_sess *clt; - clt = container_of(dev, struct rtrs_clt, dev); + clt = container_of(dev, struct rtrs_clt_sess, dev); switch (clt->mp_policy) { case MP_POLICY_RR: @@ -114,12 +116,12 @@ static ssize_t mpath_policy_store(struct device *dev, const char *buf, size_t count) { - struct rtrs_clt *clt; + struct rtrs_clt_sess *clt; int value; int ret; size_t len = 0; - clt = container_of(dev, struct rtrs_clt, dev); + clt = container_of(dev, struct rtrs_clt_sess, dev); ret = kstrtoint(buf, 10, &value); if (!ret && (value == MP_POLICY_RR || @@ -169,12 +171,12 @@ static ssize_t add_path_store(struct device *dev, .src = &srcaddr, .dst = &dstaddr }; - struct rtrs_clt *clt; + struct rtrs_clt_sess *clt; const char *nl; size_t len; int err; - clt = container_of(dev, struct rtrs_clt, dev); + clt = container_of(dev, struct rtrs_clt_sess, dev); nl = strchr(buf, '\n'); if (nl) @@ -197,10 +199,10 @@ static DEVICE_ATTR_RW(add_path); static ssize_t rtrs_clt_state_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); - if (sess->state == RTRS_CLT_CONNECTED) + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); + if (clt_path->state == RTRS_CLT_CONNECTED) return sysfs_emit(page, "connected\n"); return sysfs_emit(page, "disconnected\n"); @@ -219,16 +221,16 @@ static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int ret; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); if (!sysfs_streq(buf, "1")) { - rtrs_err(sess->clt, "%s: unknown value: '%s'\n", + rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n", attr->attr.name, buf); return -EINVAL; } - ret = rtrs_clt_reconnect_from_sysfs(sess); + ret = rtrs_clt_reconnect_from_sysfs(clt_path); if (ret) return ret; @@ -249,15 +251,15 @@ static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); if (!sysfs_streq(buf, "1")) { - rtrs_err(sess->clt, "%s: unknown value: '%s'\n", + rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n", attr->attr.name, buf); return -EINVAL; } - rtrs_clt_close_conns(sess, true); + rtrs_clt_close_conns(clt_path, true); return count; } @@ -276,16 +278,16 @@ static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int ret; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); if (!sysfs_streq(buf, "1")) { - rtrs_err(sess->clt, "%s: unknown value: '%s'\n", + rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n", attr->attr.name, buf); return -EINVAL; } - ret = rtrs_clt_remove_path_from_sysfs(sess, &attr->attr); + ret = rtrs_clt_remove_path_from_sysfs(clt_path, &attr->attr); if (ret) return ret; @@ -333,11 +335,11 @@ static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(kobj, typeof(*sess), kobj); + clt_path = container_of(kobj, typeof(*clt_path), kobj); - return sysfs_emit(page, "%u\n", sess->hca_port); + return sysfs_emit(page, "%u\n", clt_path->hca_port); } static struct kobj_attribute rtrs_clt_hca_port_attr = @@ -347,11 +349,11 @@ static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); - return sysfs_emit(page, "%s\n", sess->hca_name); + return sysfs_emit(page, "%s\n", clt_path->hca_name); } static struct kobj_attribute rtrs_clt_hca_name_attr = @@ -361,12 +363,12 @@ static ssize_t rtrs_clt_cur_latency_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); return sysfs_emit(page, "%lld ns\n", - ktime_to_ns(sess->s.hb_cur_latency)); + ktime_to_ns(clt_path->s.hb_cur_latency)); } static struct kobj_attribute rtrs_clt_cur_latency_attr = @@ -376,11 +378,11 @@ static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int len; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); - len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page, + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); + len = sockaddr_to_str((struct sockaddr *)&clt_path->s.src_addr, page, PAGE_SIZE); len += sysfs_emit_at(page, len, "\n"); return len; @@ -393,11 +395,11 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int len; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); - len = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, page, + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); + len = sockaddr_to_str((struct sockaddr *)&clt_path->s.dst_addr, page, PAGE_SIZE); len += sysfs_emit_at(page, len, "\n"); return len; @@ -406,7 +408,7 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj, static struct kobj_attribute rtrs_clt_dst_addr_attr = __ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL); -static struct attribute *rtrs_clt_sess_attrs[] = { +static struct attribute *rtrs_clt_path_attrs[] = { &rtrs_clt_hca_name_attr.attr, &rtrs_clt_hca_port_attr.attr, &rtrs_clt_src_addr_attr.attr, @@ -419,42 +421,43 @@ static struct attribute *rtrs_clt_sess_attrs[] = { NULL, }; -static const struct attribute_group rtrs_clt_sess_attr_group = { - .attrs = rtrs_clt_sess_attrs, +static const struct attribute_group rtrs_clt_path_attr_group = { + .attrs = rtrs_clt_path_attrs, }; -int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess) +int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path) { - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_sess *clt = clt_path->clt; char str[NAME_MAX]; int err; struct rtrs_addr path = { - .src = &sess->s.src_addr, - .dst = &sess->s.dst_addr, + .src = &clt_path->s.src_addr, + .dst = &clt_path->s.dst_addr, }; rtrs_addr_to_str(&path, str, sizeof(str)); - err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths, + err = kobject_init_and_add(&clt_path->kobj, &ktype_sess, + clt->kobj_paths, "%s", str); if (err) { pr_err("kobject_init_and_add: %d\n", err); - kobject_put(&sess->kobj); + kobject_put(&clt_path->kobj); return err; } - err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group); + err = sysfs_create_group(&clt_path->kobj, &rtrs_clt_path_attr_group); if (err) { pr_err("sysfs_create_group(): %d\n", err); goto put_kobj; } - err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats, - &sess->kobj, "stats"); + err = kobject_init_and_add(&clt_path->stats->kobj_stats, &ktype_stats, + &clt_path->kobj, "stats"); if (err) { pr_err("kobject_init_and_add: %d\n", err); - kobject_put(&sess->stats->kobj_stats); + kobject_put(&clt_path->stats->kobj_stats); goto remove_group; } - err = sysfs_create_group(&sess->stats->kobj_stats, + err = sysfs_create_group(&clt_path->stats->kobj_stats, &rtrs_clt_stats_attr_group); if (err) { pr_err("failed to create stats sysfs group, err: %d\n", err); @@ -464,25 +467,25 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess) return 0; put_kobj_stats: - kobject_del(&sess->stats->kobj_stats); - kobject_put(&sess->stats->kobj_stats); + kobject_del(&clt_path->stats->kobj_stats); + kobject_put(&clt_path->stats->kobj_stats); remove_group: - sysfs_remove_group(&sess->kobj, &rtrs_clt_sess_attr_group); + sysfs_remove_group(&clt_path->kobj, &rtrs_clt_path_attr_group); put_kobj: - kobject_del(&sess->kobj); - kobject_put(&sess->kobj); + kobject_del(&clt_path->kobj); + kobject_put(&clt_path->kobj); return err; } -void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess, +void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path, const struct attribute *sysfs_self) { - kobject_del(&sess->stats->kobj_stats); - kobject_put(&sess->stats->kobj_stats); + kobject_del(&clt_path->stats->kobj_stats); + kobject_put(&clt_path->stats->kobj_stats); if (sysfs_self) - sysfs_remove_file_self(&sess->kobj, sysfs_self); - kobject_del(&sess->kobj); + sysfs_remove_file_self(&clt_path->kobj, sysfs_self); + kobject_del(&clt_path->kobj); } static struct attribute *rtrs_clt_attrs[] = { @@ -496,12 +499,12 @@ static const struct attribute_group rtrs_clt_attr_group = { .attrs = rtrs_clt_attrs, }; -int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt) +int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt) { return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group); } -void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt) +void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt) { sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group); diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 15c0077dd27e..7c3f98e57889 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -46,21 +46,21 @@ static struct rtrs_rdma_dev_pd dev_pd = { static struct workqueue_struct *rtrs_wq; static struct class *rtrs_clt_dev_class; -static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt) +static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; bool connected = false; rcu_read_lock(); - list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) - connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED; + list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) + connected |= READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED; rcu_read_unlock(); return connected; } static struct rtrs_permit * -__rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type) +__rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type) { size_t max_depth = clt->queue_depth; struct rtrs_permit *permit; @@ -87,7 +87,7 @@ __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type) return permit; } -static inline void __rtrs_put_permit(struct rtrs_clt *clt, +static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt, struct rtrs_permit *permit) { clear_bit_unlock(permit->mem_id, clt->permits_map); @@ -107,7 +107,7 @@ static inline void __rtrs_put_permit(struct rtrs_clt *clt, * Context: * Can sleep if @wait == RTRS_PERMIT_WAIT */ -struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt, +struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type, enum wait_type can_wait) { @@ -142,7 +142,8 @@ EXPORT_SYMBOL(rtrs_clt_get_permit); * Context: * Does not matter */ -void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit) +void rtrs_clt_put_permit(struct rtrs_clt_sess *clt, + struct rtrs_permit *permit) { if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map))) return; @@ -163,29 +164,29 @@ EXPORT_SYMBOL(rtrs_clt_put_permit); /** * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit - * @sess: client session pointer + * @clt_path: client path pointer * @permit: permit for the allocation of the RDMA buffer * Note: * IO connection starts from 1. * 0 connection is for user messages. */ static -struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess, +struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path, struct rtrs_permit *permit) { int id = 0; if (permit->con_type == RTRS_IO_CON) - id = (permit->cpu_id % (sess->s.irq_con_num - 1)) + 1; + id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1; - return to_clt_con(sess->s.con[id]); + return to_clt_con(clt_path->s.con[id]); } /** * rtrs_clt_change_state() - change the session state through session state * machine. * - * @sess: client session to change the state of. + * @clt_path: client path to change the state of. * @new_state: state to change to. * * returns true if sess's state is changed to new state, otherwise return false. @@ -193,15 +194,15 @@ struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess, * Locks: * state_wq lock must be hold. */ -static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess, +static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path, enum rtrs_clt_state new_state) { enum rtrs_clt_state old_state; bool changed = false; - lockdep_assert_held(&sess->state_wq.lock); + lockdep_assert_held(&clt_path->state_wq.lock); - old_state = sess->state; + old_state = clt_path->state; switch (new_state) { case RTRS_CLT_CONNECTING: switch (old_state) { @@ -275,42 +276,42 @@ static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess, break; } if (changed) { - sess->state = new_state; - wake_up_locked(&sess->state_wq); + clt_path->state = new_state; + wake_up_locked(&clt_path->state_wq); } return changed; } -static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess, +static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path, enum rtrs_clt_state old_state, enum rtrs_clt_state new_state) { bool changed = false; - spin_lock_irq(&sess->state_wq.lock); - if (sess->state == old_state) - changed = rtrs_clt_change_state(sess, new_state); - spin_unlock_irq(&sess->state_wq.lock); + spin_lock_irq(&clt_path->state_wq.lock); + if (clt_path->state == old_state) + changed = rtrs_clt_change_state(clt_path, new_state); + spin_unlock_irq(&clt_path->state_wq.lock); return changed; } static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); - if (rtrs_clt_change_state_from_to(sess, + if (rtrs_clt_change_state_from_to(clt_path, RTRS_CLT_CONNECTED, RTRS_CLT_RECONNECTING)) { - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_sess *clt = clt_path->clt; unsigned int delay_ms; /* * Normal scenario, reconnect if we were successfully connected */ delay_ms = clt->reconnect_delay_sec * 1000; - queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, + queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, msecs_to_jiffies(delay_ms + prandom_u32() % RTRS_RECONNECT_SEED)); } else { @@ -319,7 +320,7 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con) * so notify waiter with error state, waiter is responsible * for cleaning the rest and reconnect if needed. */ - rtrs_clt_change_state_from_to(sess, + rtrs_clt_change_state_from_to(clt_path, RTRS_CLT_CONNECTING, RTRS_CLT_CONNECTING_ERR); } @@ -330,7 +331,7 @@ static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc) struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); if (wc->status != IB_WC_SUCCESS) { - rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n", + rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n", ib_wc_status_msg(wc->status)); rtrs_rdma_error_recovery(con); } @@ -350,7 +351,7 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); if (wc->status != IB_WC_SUCCESS) { - rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n", + rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n", ib_wc_status_msg(wc->status)); rtrs_rdma_error_recovery(con); } @@ -380,14 +381,14 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, bool notify, bool can_wait) { struct rtrs_clt_con *con = req->con; - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int err; if (WARN_ON(!req->in_use)) return; if (WARN_ON(!req->con)) return; - sess = to_clt_sess(con->c.sess); + clt_path = to_clt_path(con->c.path); if (req->sg_cnt) { if (req->dir == DMA_FROM_DEVICE && req->need_inv) { @@ -417,7 +418,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, refcount_inc(&req->ref); err = rtrs_inv_rkey(req); if (err) { - rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n", + rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n", req->mr->rkey, err); } else if (can_wait) { wait_for_completion(&req->inv_comp); @@ -433,21 +434,21 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, if (!refcount_dec_and_test(&req->ref)) return; } - ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, + ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, req->sg_cnt, req->dir); } if (!refcount_dec_and_test(&req->ref)) return; if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) - atomic_dec(&sess->stats->inflight); + atomic_dec(&clt_path->stats->inflight); req->in_use = false; req->con = NULL; if (errno) { - rtrs_err_rl(con->c.sess, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n", - errno, kobject_name(&sess->kobj), sess->hca_name, - sess->hca_port, notify); + rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n", + errno, kobject_name(&clt_path->kobj), clt_path->hca_name, + clt_path->hca_port, notify); } if (notify) @@ -459,12 +460,12 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con, struct rtrs_rbuf *rbuf, u32 off, u32 imm, struct ib_send_wr *wr) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); enum ib_send_flags flags; struct ib_sge sge; if (!req->sg_size) { - rtrs_wrn(con->c.sess, + rtrs_wrn(con->c.path, "Doing RDMA Write failed, no data supplied\n"); return -EINVAL; } @@ -472,16 +473,17 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con, /* user data and user message in the first list element */ sge.addr = req->iu->dma_addr; sge.length = req->sg_size; - sge.lkey = sess->s.dev->ib_pd->local_dma_lkey; + sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey; /* * From time to time we have to post signalled sends, * or send queue will fill up and only QP reset can help. */ - flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ? + flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? 0 : IB_SEND_SIGNALED; - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr, + ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, + req->iu->dma_addr, req->sg_size, DMA_TO_DEVICE); return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1, @@ -489,15 +491,15 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con, imm, flags, wr, NULL); } -static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id, +static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id, s16 errno, bool w_inval) { struct rtrs_clt_io_req *req; - if (WARN_ON(msg_id >= sess->queue_depth)) + if (WARN_ON(msg_id >= clt_path->queue_depth)) return; - req = &sess->reqs[msg_id]; + req = &clt_path->reqs[msg_id]; /* Drop need_inv if server responded with send with invalidation */ req->need_inv &= !w_inval; complete_rdma_req(req, errno, true, false); @@ -507,21 +509,21 @@ static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc) { struct rtrs_iu *iu; int err; - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); - WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0); + WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0); iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); err = rtrs_iu_post_recv(&con->c, iu); if (err) { - rtrs_err(con->c.sess, "post iu failed %d\n", err); + rtrs_err(con->c.path, "post iu failed %d\n", err); rtrs_rdma_error_recovery(con); } } static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); struct rtrs_msg_rkey_rsp *msg; u32 imm_type, imm_payload; bool w_inval = false; @@ -529,25 +531,26 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) u32 buf_id; int err; - WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0); + WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0); iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); if (wc->byte_len < sizeof(*msg)) { - rtrs_err(con->c.sess, "rkey response is malformed: size %d\n", + rtrs_err(con->c.path, "rkey response is malformed: size %d\n", wc->byte_len); goto out; } - ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, + ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); msg = iu->buf; if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) { - rtrs_err(sess->clt, "rkey response is malformed: type %d\n", + rtrs_err(clt_path->clt, + "rkey response is malformed: type %d\n", le16_to_cpu(msg->type)); goto out; } buf_id = le16_to_cpu(msg->buf_id); - if (WARN_ON(buf_id >= sess->queue_depth)) + if (WARN_ON(buf_id >= clt_path->queue_depth)) goto out; rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); @@ -560,10 +563,10 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) if (WARN_ON(buf_id != msg_id)) goto out; - sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey); - process_io_rsp(sess, msg_id, err, w_inval); + clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey); + process_io_rsp(clt_path, msg_id, err, w_inval); } - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr, + ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); return rtrs_clt_recv_done(con, wc); out: @@ -600,14 +603,14 @@ static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe) static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); u32 imm_type, imm_payload; bool w_inval = false; int err; if (wc->status != IB_WC_SUCCESS) { if (wc->status != IB_WC_WR_FLUSH_ERR) { - rtrs_err(sess->clt, "RDMA failed: %s\n", + rtrs_err(clt_path->clt, "RDMA failed: %s\n", ib_wc_status_msg(wc->status)); rtrs_rdma_error_recovery(con); } @@ -632,21 +635,21 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM); rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err); - process_io_rsp(sess, msg_id, err, w_inval); + process_io_rsp(clt_path, msg_id, err, w_inval); } else if (imm_type == RTRS_HB_MSG_IMM) { WARN_ON(con->c.cid); - rtrs_send_hb_ack(&sess->s); - if (sess->flags & RTRS_MSG_NEW_RKEY_F) + rtrs_send_hb_ack(&clt_path->s); + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) return rtrs_clt_recv_done(con, wc); } else if (imm_type == RTRS_HB_ACK_IMM) { WARN_ON(con->c.cid); - sess->s.hb_missed_cnt = 0; - sess->s.hb_cur_latency = - ktime_sub(ktime_get(), sess->s.hb_last_sent); - if (sess->flags & RTRS_MSG_NEW_RKEY_F) + clt_path->s.hb_missed_cnt = 0; + clt_path->s.hb_cur_latency = + ktime_sub(ktime_get(), clt_path->s.hb_last_sent); + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) return rtrs_clt_recv_done(con, wc); } else { - rtrs_wrn(con->c.sess, "Unknown IMM type %u\n", + rtrs_wrn(con->c.path, "Unknown IMM type %u\n", imm_type); } if (w_inval) @@ -658,7 +661,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) else err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); if (err) { - rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n", + rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n", err); rtrs_rdma_error_recovery(con); } @@ -670,7 +673,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE || wc->wc_flags & IB_WC_WITH_IMM)); WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); - if (sess->flags & RTRS_MSG_NEW_RKEY_F) { + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) { if (wc->wc_flags & IB_WC_WITH_INVALIDATE) return rtrs_clt_recv_done(con, wc); @@ -685,7 +688,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) break; default: - rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode); + rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode); return; } } @@ -693,10 +696,10 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) static int post_recv_io(struct rtrs_clt_con *con, size_t q_size) { int err, i; - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); for (i = 0; i < q_size; i++) { - if (sess->flags & RTRS_MSG_NEW_RKEY_F) { + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) { struct rtrs_iu *iu = &con->rsp_ius[i]; err = rtrs_iu_post_recv(&con->c, iu); @@ -710,16 +713,16 @@ static int post_recv_io(struct rtrs_clt_con *con, size_t q_size) return 0; } -static int post_recv_sess(struct rtrs_clt_sess *sess) +static int post_recv_path(struct rtrs_clt_path *clt_path) { size_t q_size = 0; int err, cid; - for (cid = 0; cid < sess->s.con_num; cid++) { + for (cid = 0; cid < clt_path->s.con_num; cid++) { if (cid == 0) q_size = SERVICE_CON_QUEUE_DEPTH; else - q_size = sess->queue_depth; + q_size = clt_path->queue_depth; /* * x2 for RDMA read responses + FR key invalidations, @@ -727,9 +730,10 @@ static int post_recv_sess(struct rtrs_clt_sess *sess) */ q_size *= 2; - err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size); + err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size); if (err) { - rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err); + rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n", + err); return err; } } @@ -740,8 +744,8 @@ static int post_recv_sess(struct rtrs_clt_sess *sess) struct path_it { int i; struct list_head skip_list; - struct rtrs_clt *clt; - struct rtrs_clt_sess *(*next_path)(struct path_it *it); + struct rtrs_clt_sess *clt; + struct rtrs_clt_path *(*next_path)(struct path_it *it); }; /** @@ -773,11 +777,11 @@ struct path_it { * Locks: * rcu_read_lock() must be hold. */ -static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it) +static struct rtrs_clt_path *get_next_path_rr(struct path_it *it) { - struct rtrs_clt_sess __rcu **ppcpu_path; - struct rtrs_clt_sess *path; - struct rtrs_clt *clt; + struct rtrs_clt_path __rcu **ppcpu_path; + struct rtrs_clt_path *path; + struct rtrs_clt_sess *clt; clt = it->clt; @@ -811,26 +815,26 @@ static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it) * Locks: * rcu_read_lock() must be hold. */ -static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it) +static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it) { - struct rtrs_clt_sess *min_path = NULL; - struct rtrs_clt *clt = it->clt; - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *min_path = NULL; + struct rtrs_clt_sess *clt = it->clt; + struct rtrs_clt_path *clt_path; int min_inflight = INT_MAX; int inflight; - list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) + list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) continue; - if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry))) + if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry))) continue; - inflight = atomic_read(&sess->stats->inflight); + inflight = atomic_read(&clt_path->stats->inflight); if (inflight < min_inflight) { min_inflight = inflight; - min_path = sess; + min_path = clt_path; } } @@ -862,26 +866,26 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it) * Therefore the caller MUST check the returned * path is NULL and trigger the IO error. */ -static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it) +static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it) { - struct rtrs_clt_sess *min_path = NULL; - struct rtrs_clt *clt = it->clt; - struct rtrs_clt_sess *sess; - ktime_t min_latency = INT_MAX; + struct rtrs_clt_path *min_path = NULL; + struct rtrs_clt_sess *clt = it->clt; + struct rtrs_clt_path *clt_path; + ktime_t min_latency = KTIME_MAX; ktime_t latency; - list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) + list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) continue; - if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry))) + if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry))) continue; - latency = sess->s.hb_cur_latency; + latency = clt_path->s.hb_cur_latency; if (latency < min_latency) { min_latency = latency; - min_path = sess; + min_path = clt_path; } } @@ -895,7 +899,7 @@ static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it) return min_path; } -static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt) +static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt) { INIT_LIST_HEAD(&it->skip_list); it->clt = clt; @@ -928,7 +932,7 @@ static inline void path_it_deinit(struct path_it *it) * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will * also hold the control message of rtrs. * @req: an io request holding information about IO. - * @sess: client session + * @clt_path: client path * @conf: conformation callback function to notify upper layer. * @permit: permit for allocation of RDMA remote buffer * @priv: private pointer @@ -940,7 +944,7 @@ static inline void path_it_deinit(struct path_it *it) * @dir: direction of the IO. */ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, - struct rtrs_clt_sess *sess, + struct rtrs_clt_path *clt_path, void (*conf)(void *priv, int errno), struct rtrs_permit *permit, void *priv, const struct kvec *vec, size_t usr_len, @@ -958,13 +962,13 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, req->sg_cnt = sg_cnt; req->priv = priv; req->dir = dir; - req->con = rtrs_permit_to_clt_con(sess, permit); + req->con = rtrs_permit_to_clt_con(clt_path, permit); req->conf = conf; req->need_inv = false; req->need_inv_comp = false; req->inv_errno = 0; refcount_set(&req->ref, 1); - req->mp_policy = sess->clt->mp_policy; + req->mp_policy = clt_path->clt->mp_policy; iov_iter_kvec(&iter, READ, vec, 1, usr_len); len = _copy_from_iter(req->iu->buf, usr_len, &iter); @@ -974,7 +978,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, } static struct rtrs_clt_io_req * -rtrs_clt_get_req(struct rtrs_clt_sess *sess, +rtrs_clt_get_req(struct rtrs_clt_path *clt_path, void (*conf)(void *priv, int errno), struct rtrs_permit *permit, void *priv, const struct kvec *vec, size_t usr_len, @@ -983,14 +987,14 @@ rtrs_clt_get_req(struct rtrs_clt_sess *sess, { struct rtrs_clt_io_req *req; - req = &sess->reqs[permit->mem_id]; - rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len, + req = &clt_path->reqs[permit->mem_id]; + rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len, sg, sg_cnt, data_len, dir); return req; } static struct rtrs_clt_io_req * -rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess, +rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path, struct rtrs_clt_io_req *fail_req) { struct rtrs_clt_io_req *req; @@ -999,8 +1003,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess, .iov_len = fail_req->usr_len }; - req = &alive_sess->reqs[fail_req->permit->mem_id]; - rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit, + req = &alive_path->reqs[fail_req->permit->mem_id]; + rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit, fail_req->priv, &vec, fail_req->usr_len, fail_req->sglist, fail_req->sg_cnt, fail_req->data_len, fail_req->dir); @@ -1013,7 +1017,7 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con, u32 size, u32 imm, struct ib_send_wr *wr, struct ib_send_wr *tail) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); struct ib_sge *sge = req->sge; enum ib_send_flags flags; struct scatterlist *sg; @@ -1033,22 +1037,23 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con, for_each_sg(req->sglist, sg, req->sg_cnt, i) { sge[i].addr = sg_dma_address(sg); sge[i].length = sg_dma_len(sg); - sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey; + sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; } num_sge = 1 + req->sg_cnt; } sge[i].addr = req->iu->dma_addr; sge[i].length = size; - sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey; + sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; /* * From time to time we have to post signalled sends, * or send queue will fill up and only QP reset can help. */ - flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ? + flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? 0 : IB_SEND_SIGNALED; - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr, + ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, + req->iu->dma_addr, size, DMA_TO_DEVICE); return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge, @@ -1074,8 +1079,8 @@ static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count) static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) { struct rtrs_clt_con *con = req->con; - struct rtrs_sess *s = con->c.sess; - struct rtrs_clt_sess *sess = to_clt_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_clt_path *clt_path = to_clt_path(s); struct rtrs_msg_rdma_write *msg; struct rtrs_rbuf *rbuf; @@ -1088,13 +1093,13 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; - if (tsize > sess->chunk_size) { + if (tsize > clt_path->chunk_size) { rtrs_wrn(s, "Write request failed, size too big %zu > %d\n", - tsize, sess->chunk_size); + tsize, clt_path->chunk_size); return -EMSGSIZE; } if (req->sg_cnt) { - count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist, + count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist, req->sg_cnt, req->dir); if (!count) { rtrs_wrn(s, "Write request failed, map failed\n"); @@ -1111,7 +1116,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) imm = rtrs_to_io_req_imm(imm); buf_id = req->permit->mem_id; req->sg_size = tsize; - rbuf = &sess->rbufs[buf_id]; + rbuf = &clt_path->rbufs[buf_id]; if (count) { ret = rtrs_map_sg_fr(req, count); @@ -1119,7 +1124,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) rtrs_err_rl(s, "Write request failed, failed to map fast reg. data, err: %d\n", ret); - ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, + ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, req->sg_cnt, req->dir); return ret; } @@ -1153,12 +1158,12 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) if (ret) { rtrs_err_rl(s, "Write request failed: error=%d path=%s [%s:%u]\n", - ret, kobject_name(&sess->kobj), sess->hca_name, - sess->hca_port); + ret, kobject_name(&clt_path->kobj), clt_path->hca_name, + clt_path->hca_port); if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) - atomic_dec(&sess->stats->inflight); + atomic_dec(&clt_path->stats->inflight); if (req->sg_cnt) - ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, + ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, req->sg_cnt, req->dir); } @@ -1168,10 +1173,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) { struct rtrs_clt_con *con = req->con; - struct rtrs_sess *s = con->c.sess; - struct rtrs_clt_sess *sess = to_clt_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_clt_path *clt_path = to_clt_path(s); struct rtrs_msg_rdma_read *msg; - struct rtrs_ib_dev *dev = sess->s.dev; + struct rtrs_ib_dev *dev = clt_path->s.dev; struct ib_reg_wr rwr; struct ib_send_wr *wr = NULL; @@ -1181,10 +1186,10 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; - if (tsize > sess->chunk_size) { + if (tsize > clt_path->chunk_size) { rtrs_wrn(s, "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n", - tsize, sess->chunk_size); + tsize, clt_path->chunk_size); return -EMSGSIZE; } @@ -1254,15 +1259,15 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) */ rtrs_clt_update_all_stats(req, READ); - ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id], + ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id], req->data_len, imm, wr); if (ret) { rtrs_err_rl(s, "Read request failed: error=%d path=%s [%s:%u]\n", - ret, kobject_name(&sess->kobj), sess->hca_name, - sess->hca_port); + ret, kobject_name(&clt_path->kobj), clt_path->hca_name, + clt_path->hca_port); if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) - atomic_dec(&sess->stats->inflight); + atomic_dec(&clt_path->stats->inflight); req->need_inv = false; if (req->sg_cnt) ib_dma_unmap_sg(dev->ib_dev, req->sglist, @@ -1277,21 +1282,21 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) * @clt: clt context * @fail_req: a failed io request. */ -static int rtrs_clt_failover_req(struct rtrs_clt *clt, +static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt, struct rtrs_clt_io_req *fail_req) { - struct rtrs_clt_sess *alive_sess; + struct rtrs_clt_path *alive_path; struct rtrs_clt_io_req *req; int err = -ECONNABORTED; struct path_it it; rcu_read_lock(); for (path_it_init(&it, clt); - (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num; + (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { - if (READ_ONCE(alive_sess->state) != RTRS_CLT_CONNECTED) + if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED) continue; - req = rtrs_clt_get_copy_req(alive_sess, fail_req); + req = rtrs_clt_get_copy_req(alive_path, fail_req); if (req->dir == DMA_TO_DEVICE) err = rtrs_clt_write_req(req); else @@ -1301,7 +1306,7 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt, continue; } /* Success path */ - rtrs_clt_inc_failover_cnt(alive_sess->stats); + rtrs_clt_inc_failover_cnt(alive_path->stats); break; } path_it_deinit(&it); @@ -1310,16 +1315,16 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt, return err; } -static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess) +static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path) { - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_sess *clt = clt_path->clt; struct rtrs_clt_io_req *req; int i, err; - if (!sess->reqs) + if (!clt_path->reqs) return; - for (i = 0; i < sess->queue_depth; ++i) { - req = &sess->reqs[i]; + for (i = 0; i < clt_path->queue_depth; ++i) { + req = &clt_path->reqs[i]; if (!req->in_use) continue; @@ -1337,38 +1342,39 @@ static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess) } } -static void free_sess_reqs(struct rtrs_clt_sess *sess) +static void free_path_reqs(struct rtrs_clt_path *clt_path) { struct rtrs_clt_io_req *req; int i; - if (!sess->reqs) + if (!clt_path->reqs) return; - for (i = 0; i < sess->queue_depth; ++i) { - req = &sess->reqs[i]; + for (i = 0; i < clt_path->queue_depth; ++i) { + req = &clt_path->reqs[i]; if (req->mr) ib_dereg_mr(req->mr); kfree(req->sge); - rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1); } - kfree(sess->reqs); - sess->reqs = NULL; + kfree(clt_path->reqs); + clt_path->reqs = NULL; } -static int alloc_sess_reqs(struct rtrs_clt_sess *sess) +static int alloc_path_reqs(struct rtrs_clt_path *clt_path) { struct rtrs_clt_io_req *req; int i, err = -ENOMEM; - sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs), - GFP_KERNEL); - if (!sess->reqs) + clt_path->reqs = kcalloc(clt_path->queue_depth, + sizeof(*clt_path->reqs), + GFP_KERNEL); + if (!clt_path->reqs) return -ENOMEM; - for (i = 0; i < sess->queue_depth; ++i) { - req = &sess->reqs[i]; - req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL, - sess->s.dev->ib_dev, + for (i = 0; i < clt_path->queue_depth; ++i) { + req = &clt_path->reqs[i]; + req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL, + clt_path->s.dev->ib_dev, DMA_TO_DEVICE, rtrs_clt_rdma_done); if (!req->iu) @@ -1378,13 +1384,14 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess) if (!req->sge) goto out; - req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, - sess->max_pages_per_mr); + req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd, + IB_MR_TYPE_MEM_REG, + clt_path->max_pages_per_mr); if (IS_ERR(req->mr)) { err = PTR_ERR(req->mr); req->mr = NULL; - pr_err("Failed to alloc sess->max_pages_per_mr %d\n", - sess->max_pages_per_mr); + pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n", + clt_path->max_pages_per_mr); goto out; } @@ -1394,12 +1401,12 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess) return 0; out: - free_sess_reqs(sess); + free_path_reqs(clt_path); return err; } -static int alloc_permits(struct rtrs_clt *clt) +static int alloc_permits(struct rtrs_clt_sess *clt) { unsigned int chunk_bits; int err, i; @@ -1433,7 +1440,7 @@ out_err: return err; } -static void free_permits(struct rtrs_clt *clt) +static void free_permits(struct rtrs_clt_sess *clt) { if (clt->permits_map) { size_t sz = clt->queue_depth; @@ -1447,13 +1454,13 @@ static void free_permits(struct rtrs_clt *clt) clt->permits = NULL; } -static void query_fast_reg_mode(struct rtrs_clt_sess *sess) +static void query_fast_reg_mode(struct rtrs_clt_path *clt_path) { struct ib_device *ib_dev; u64 max_pages_per_mr; int mr_page_shift; - ib_dev = sess->s.dev->ib_dev; + ib_dev = clt_path->s.dev->ib_dev; /* * Use the smallest page size supported by the HCA, down to a @@ -1463,24 +1470,24 @@ static void query_fast_reg_mode(struct rtrs_clt_sess *sess) mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1); max_pages_per_mr = ib_dev->attrs.max_mr_size; do_div(max_pages_per_mr, (1ull << mr_page_shift)); - sess->max_pages_per_mr = - min3(sess->max_pages_per_mr, (u32)max_pages_per_mr, + clt_path->max_pages_per_mr = + min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr, ib_dev->attrs.max_fast_reg_page_list_len); - sess->clt->max_segments = - min(sess->max_pages_per_mr, sess->clt->max_segments); + clt_path->clt->max_segments = + min(clt_path->max_pages_per_mr, clt_path->clt->max_segments); } -static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess, +static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path, enum rtrs_clt_state new_state, enum rtrs_clt_state *old_state) { bool changed; - spin_lock_irq(&sess->state_wq.lock); + spin_lock_irq(&clt_path->state_wq.lock); if (old_state) - *old_state = sess->state; - changed = rtrs_clt_change_state(sess, new_state); - spin_unlock_irq(&sess->state_wq.lock); + *old_state = clt_path->state; + changed = rtrs_clt_change_state(clt_path, new_state); + spin_unlock_irq(&clt_path->state_wq.lock); return changed; } @@ -1492,9 +1499,9 @@ static void rtrs_clt_hb_err_handler(struct rtrs_con *c) rtrs_rdma_error_recovery(con); } -static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess) +static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path) { - rtrs_init_hb(&sess->s, &io_comp_cqe, + rtrs_init_hb(&clt_path->s, &io_comp_cqe, RTRS_HB_INTERVAL_MS, RTRS_HB_MISSED_MAX, rtrs_clt_hb_err_handler, @@ -1504,17 +1511,17 @@ static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess) static void rtrs_clt_reconnect_work(struct work_struct *work); static void rtrs_clt_close_work(struct work_struct *work); -static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt, +static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt, const struct rtrs_addr *path, size_t con_num, u32 nr_poll_queues) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int err = -ENOMEM; int cpu; size_t total_con; - sess = kzalloc(sizeof(*sess), GFP_KERNEL); - if (!sess) + clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL); + if (!clt_path) goto err; /* @@ -1522,20 +1529,21 @@ static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt, * +1: Extra connection for user messages */ total_con = con_num + nr_poll_queues + 1; - sess->s.con = kcalloc(total_con, sizeof(*sess->s.con), GFP_KERNEL); - if (!sess->s.con) - goto err_free_sess; + clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con), + GFP_KERNEL); + if (!clt_path->s.con) + goto err_free_path; - sess->s.con_num = total_con; - sess->s.irq_con_num = con_num + 1; + clt_path->s.con_num = total_con; + clt_path->s.irq_con_num = con_num + 1; - sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL); - if (!sess->stats) + clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL); + if (!clt_path->stats) goto err_free_con; - mutex_init(&sess->init_mutex); - uuid_gen(&sess->s.uuid); - memcpy(&sess->s.dst_addr, path->dst, + mutex_init(&clt_path->init_mutex); + uuid_gen(&clt_path->s.uuid); + memcpy(&clt_path->s.dst_addr, path->dst, rdma_addr_size((struct sockaddr *)path->dst)); /* @@ -1544,53 +1552,54 @@ static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt, * the sess->src_addr will contain only zeros, which is then fine. */ if (path->src) - memcpy(&sess->s.src_addr, path->src, + memcpy(&clt_path->s.src_addr, path->src, rdma_addr_size((struct sockaddr *)path->src)); - strscpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname)); - sess->clt = clt; - sess->max_pages_per_mr = RTRS_MAX_SEGMENTS; - init_waitqueue_head(&sess->state_wq); - sess->state = RTRS_CLT_CONNECTING; - atomic_set(&sess->connected_cnt, 0); - INIT_WORK(&sess->close_work, rtrs_clt_close_work); - INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work); - rtrs_clt_init_hb(sess); - - sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry)); - if (!sess->mp_skip_entry) + strscpy(clt_path->s.sessname, clt->sessname, + sizeof(clt_path->s.sessname)); + clt_path->clt = clt; + clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS; + init_waitqueue_head(&clt_path->state_wq); + clt_path->state = RTRS_CLT_CONNECTING; + atomic_set(&clt_path->connected_cnt, 0); + INIT_WORK(&clt_path->close_work, rtrs_clt_close_work); + INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work); + rtrs_clt_init_hb(clt_path); + + clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry)); + if (!clt_path->mp_skip_entry) goto err_free_stats; for_each_possible_cpu(cpu) - INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu)); + INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu)); - err = rtrs_clt_init_stats(sess->stats); + err = rtrs_clt_init_stats(clt_path->stats); if (err) goto err_free_percpu; - return sess; + return clt_path; err_free_percpu: - free_percpu(sess->mp_skip_entry); + free_percpu(clt_path->mp_skip_entry); err_free_stats: - kfree(sess->stats); + kfree(clt_path->stats); err_free_con: - kfree(sess->s.con); -err_free_sess: - kfree(sess); + kfree(clt_path->s.con); +err_free_path: + kfree(clt_path); err: return ERR_PTR(err); } -void free_sess(struct rtrs_clt_sess *sess) +void free_path(struct rtrs_clt_path *clt_path) { - free_percpu(sess->mp_skip_entry); - mutex_destroy(&sess->init_mutex); - kfree(sess->s.con); - kfree(sess->rbufs); - kfree(sess); + free_percpu(clt_path->mp_skip_entry); + mutex_destroy(&clt_path->init_mutex); + kfree(clt_path->s.con); + kfree(clt_path->rbufs); + kfree(clt_path); } -static int create_con(struct rtrs_clt_sess *sess, unsigned int cid) +static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid) { struct rtrs_clt_con *con; @@ -1601,28 +1610,28 @@ static int create_con(struct rtrs_clt_sess *sess, unsigned int cid) /* Map first two connections to the first CPU */ con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids; con->c.cid = cid; - con->c.sess = &sess->s; + con->c.path = &clt_path->s; /* Align with srv, init as 1 */ atomic_set(&con->c.wr_cnt, 1); mutex_init(&con->con_mutex); - sess->s.con[cid] = &con->c; + clt_path->s.con[cid] = &con->c; return 0; } static void destroy_con(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); - sess->s.con[con->c.cid] = NULL; + clt_path->s.con[con->c.cid] = NULL; mutex_destroy(&con->con_mutex); kfree(con); } static int create_con_cq_qp(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit; int err, cq_vector; struct rtrs_msg_rkey_rsp *rsp; @@ -1631,7 +1640,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) if (con->c.cid == 0) { max_send_sge = 1; /* We must be the first here */ - if (WARN_ON(sess->s.dev)) + if (WARN_ON(clt_path->s.dev)) return -EINVAL; /* @@ -1639,16 +1648,16 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) * Be careful not to close user connection before ib dev * is gracefully put. */ - sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device, + clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device, &dev_pd); - if (!sess->s.dev) { - rtrs_wrn(sess->clt, + if (!clt_path->s.dev) { + rtrs_wrn(clt_path->clt, "rtrs_ib_dev_find_get_or_add(): no memory\n"); return -ENOMEM; } - sess->s.dev_ref = 1; - query_fast_reg_mode(sess); - wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; + clt_path->s.dev_ref = 1; + query_fast_reg_mode(clt_path); + wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr; /* * Two (request + registration) completion for send * Two for recv if always_invalidate is set on server @@ -1665,27 +1674,28 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) * This is always true if user connection (cid == 0) is * established first. */ - if (WARN_ON(!sess->s.dev)) + if (WARN_ON(!clt_path->s.dev)) return -EINVAL; - if (WARN_ON(!sess->queue_depth)) + if (WARN_ON(!clt_path->queue_depth)) return -EINVAL; - wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; + wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr; /* Shared between connections */ - sess->s.dev_ref++; + clt_path->s.dev_ref++; max_send_wr = min_t(int, wr_limit, /* QD * (REQ + RSP + FR REGS or INVS) + drain */ - sess->queue_depth * 3 + 1); + clt_path->queue_depth * 3 + 1); max_recv_wr = min_t(int, wr_limit, - sess->queue_depth * 3 + 1); + clt_path->queue_depth * 3 + 1); max_send_sge = 2; } atomic_set(&con->c.sq_wr_avail, max_send_wr); cq_num = max_send_wr + max_recv_wr; /* alloc iu to recv new rkey reply when server reports flags set */ - if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp), - GFP_KERNEL, sess->s.dev->ib_dev, + GFP_KERNEL, + clt_path->s.dev->ib_dev, DMA_FROM_DEVICE, rtrs_clt_rdma_done); if (!con->rsp_ius) @@ -1693,13 +1703,13 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) con->queue_num = cq_num; } cq_num = max_send_wr + max_recv_wr; - cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors; - if (con->c.cid >= sess->s.irq_con_num) - err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge, + cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors; + if (con->c.cid >= clt_path->s.irq_con_num) + err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, cq_vector, cq_num, max_send_wr, max_recv_wr, IB_POLL_DIRECT); else - err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge, + err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, cq_vector, cq_num, max_send_wr, max_recv_wr, IB_POLL_SOFTIRQ); /* @@ -1711,7 +1721,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) static void destroy_con_cq_qp(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); /* * Be careful here: destroy_con_cq_qp() can be called even @@ -1720,13 +1730,14 @@ static void destroy_con_cq_qp(struct rtrs_clt_con *con) lockdep_assert_held(&con->con_mutex); rtrs_cq_qp_destroy(&con->c); if (con->rsp_ius) { - rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_num); + rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev, + con->queue_num); con->rsp_ius = NULL; con->queue_num = 0; } - if (sess->s.dev_ref && !--sess->s.dev_ref) { - rtrs_ib_dev_put(sess->s.dev); - sess->s.dev = NULL; + if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) { + rtrs_ib_dev_put(clt_path->s.dev); + clt_path->s.dev = NULL; } } @@ -1745,7 +1756,7 @@ static void destroy_cm(struct rtrs_clt_con *con) static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con) { - struct rtrs_sess *s = con->c.sess; + struct rtrs_path *s = con->c.path; int err; mutex_lock(&con->con_mutex); @@ -1764,8 +1775,8 @@ static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con) static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); + struct rtrs_clt_sess *clt = clt_path->clt; struct rtrs_msg_conn_req msg; struct rdma_conn_param param; @@ -1782,11 +1793,11 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con) .magic = cpu_to_le16(RTRS_MAGIC), .version = cpu_to_le16(RTRS_PROTO_VER), .cid = cpu_to_le16(con->c.cid), - .cid_num = cpu_to_le16(sess->s.con_num), - .recon_cnt = cpu_to_le16(sess->s.recon_cnt), + .cid_num = cpu_to_le16(clt_path->s.con_num), + .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt), }; - msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0; - uuid_copy(&msg.sess_uuid, &sess->s.uuid); + msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0; + uuid_copy(&msg.sess_uuid, &clt_path->s.uuid); uuid_copy(&msg.paths_uuid, &clt->paths_uuid); err = rdma_connect_locked(con->c.cm_id, ¶m); @@ -1799,8 +1810,8 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con) static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, struct rdma_cm_event *ev) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); + struct rtrs_clt_sess *clt = clt_path->clt; const struct rtrs_msg_conn_rsp *msg; u16 version, queue_depth; int errno; @@ -1831,31 +1842,32 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, if (con->c.cid == 0) { queue_depth = le16_to_cpu(msg->queue_depth); - if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) { + if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) { rtrs_err(clt, "Error: queue depth changed\n"); /* * Stop any more reconnection attempts */ - sess->reconnect_attempts = -1; + clt_path->reconnect_attempts = -1; rtrs_err(clt, "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n"); return -ECONNRESET; } - if (!sess->rbufs) { - sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs), - GFP_KERNEL); - if (!sess->rbufs) + if (!clt_path->rbufs) { + clt_path->rbufs = kcalloc(queue_depth, + sizeof(*clt_path->rbufs), + GFP_KERNEL); + if (!clt_path->rbufs) return -ENOMEM; } - sess->queue_depth = queue_depth; - sess->s.signal_interval = min_not_zero(queue_depth, + clt_path->queue_depth = queue_depth; + clt_path->s.signal_interval = min_not_zero(queue_depth, (unsigned short) SERVICE_CON_QUEUE_DEPTH); - sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size); - sess->max_io_size = le32_to_cpu(msg->max_io_size); - sess->flags = le32_to_cpu(msg->flags); - sess->chunk_size = sess->max_io_size + sess->max_hdr_size; + clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size); + clt_path->max_io_size = le32_to_cpu(msg->max_io_size); + clt_path->flags = le32_to_cpu(msg->flags); + clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size; /* * Global IO size is always a minimum. @@ -1866,20 +1878,20 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, * connections in parallel, use lock. */ mutex_lock(&clt->paths_mutex); - clt->queue_depth = sess->queue_depth; - clt->max_io_size = min_not_zero(sess->max_io_size, + clt->queue_depth = clt_path->queue_depth; + clt->max_io_size = min_not_zero(clt_path->max_io_size, clt->max_io_size); mutex_unlock(&clt->paths_mutex); /* * Cache the hca_port and hca_name for sysfs */ - sess->hca_port = con->c.cm_id->port_num; - scnprintf(sess->hca_name, sizeof(sess->hca_name), - sess->s.dev->ib_dev->name); - sess->s.src_addr = con->c.cm_id->route.addr.src_addr; + clt_path->hca_port = con->c.cm_id->port_num; + scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name), + clt_path->s.dev->ib_dev->name); + clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr; /* set for_new_clt, to allow future reconnect on any path */ - sess->for_new_clt = 1; + clt_path->for_new_clt = 1; } return 0; @@ -1887,16 +1899,16 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, static inline void flag_success_on_conn(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); - atomic_inc(&sess->connected_cnt); + atomic_inc(&clt_path->connected_cnt); con->cm_err = 1; } static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con, struct rdma_cm_event *ev) { - struct rtrs_sess *s = con->c.sess; + struct rtrs_path *s = con->c.path; const struct rtrs_msg_conn_rsp *msg; const char *rej_msg; int status, errno; @@ -1924,23 +1936,23 @@ static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con, return -ECONNRESET; } -void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait) +void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait) { - if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL)) - queue_work(rtrs_wq, &sess->close_work); + if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL)) + queue_work(rtrs_wq, &clt_path->close_work); if (wait) - flush_work(&sess->close_work); + flush_work(&clt_path->close_work); } static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err) { if (con->cm_err == 1) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = to_clt_sess(con->c.sess); - if (atomic_dec_and_test(&sess->connected_cnt)) + clt_path = to_clt_path(con->c.path); + if (atomic_dec_and_test(&clt_path->connected_cnt)) - wake_up(&sess->state_wq); + wake_up(&clt_path->state_wq); } con->cm_err = cm_err; } @@ -1949,8 +1961,8 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *ev) { struct rtrs_clt_con *con = cm_id->context; - struct rtrs_sess *s = con->c.sess; - struct rtrs_clt_sess *sess = to_clt_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_clt_path *clt_path = to_clt_path(s); int cm_err = 0; switch (ev->event) { @@ -1968,7 +1980,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, * i.e. wake up without state change, but we set cm_err. */ flag_success_on_conn(con); - wake_up(&sess->state_wq); + wake_up(&clt_path->state_wq); return 0; } break; @@ -1997,7 +2009,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, /* * Device removal is a special case. Queue close and return 0. */ - rtrs_clt_close_conns(sess, false); + rtrs_clt_close_conns(clt_path, false); return 0; default: rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n", @@ -2020,13 +2032,13 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, static int create_cm(struct rtrs_clt_con *con) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_clt_sess *sess = to_clt_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_clt_path *clt_path = to_clt_path(s); struct rdma_cm_id *cm_id; int err; cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con, - sess->s.dst_addr.ss_family == AF_IB ? + clt_path->s.dst_addr.ss_family == AF_IB ? RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cm_id)) { err = PTR_ERR(cm_id); @@ -2042,8 +2054,8 @@ static int create_cm(struct rtrs_clt_con *con) rtrs_err(s, "Set address reuse failed, err: %d\n", err); goto destroy_cm; } - err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr, - (struct sockaddr *)&sess->s.dst_addr, + err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr, + (struct sockaddr *)&clt_path->s.dst_addr, RTRS_CONNECT_TIMEOUT_MS); if (err) { rtrs_err(s, "Failed to resolve address, err: %d\n", err); @@ -2055,8 +2067,8 @@ static int create_cm(struct rtrs_clt_con *con) * or session state was really changed to error by device removal. */ err = wait_event_interruptible_timeout( - sess->state_wq, - con->cm_err || sess->state != RTRS_CLT_CONNECTING, + clt_path->state_wq, + con->cm_err || clt_path->state != RTRS_CLT_CONNECTING, msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS)); if (err == 0 || err == -ERESTARTSYS) { if (err == 0) @@ -2068,7 +2080,7 @@ static int create_cm(struct rtrs_clt_con *con) err = con->cm_err; goto errr; } - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) { + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) { /* Device removal */ err = -ECONNABORTED; goto errr; @@ -2087,9 +2099,9 @@ destroy_cm: return err; } -static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess) +static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path) { - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_sess *clt = clt_path->clt; int up; /* @@ -2113,19 +2125,19 @@ static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess) mutex_unlock(&clt->paths_ev_mutex); /* Mark session as established */ - sess->established = true; - sess->reconnect_attempts = 0; - sess->stats->reconnects.successful_cnt++; + clt_path->established = true; + clt_path->reconnect_attempts = 0; + clt_path->stats->reconnects.successful_cnt++; } -static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess) +static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path) { - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_sess *clt = clt_path->clt; - if (!sess->established) + if (!clt_path->established) return; - sess->established = false; + clt_path->established = false; mutex_lock(&clt->paths_ev_mutex); WARN_ON(!clt->paths_up); if (--clt->paths_up == 0) @@ -2133,19 +2145,19 @@ static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess) mutex_unlock(&clt->paths_ev_mutex); } -static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) +static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path) { struct rtrs_clt_con *con; unsigned int cid; - WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED); + WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED); /* * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes * exactly in between. Start destroying after it finishes. */ - mutex_lock(&sess->init_mutex); - mutex_unlock(&sess->init_mutex); + mutex_lock(&clt_path->init_mutex); + mutex_unlock(&clt_path->init_mutex); /* * All IO paths must observe !CONNECTED state before we @@ -2153,7 +2165,7 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) */ synchronize_rcu(); - rtrs_stop_hb(&sess->s); + rtrs_stop_hb(&clt_path->s); /* * The order it utterly crucial: firstly disconnect and complete all @@ -2162,15 +2174,15 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) * eventually notify upper layer about session disconnection. */ - for (cid = 0; cid < sess->s.con_num; cid++) { - if (!sess->s.con[cid]) + for (cid = 0; cid < clt_path->s.con_num; cid++) { + if (!clt_path->s.con[cid]) break; - con = to_clt_con(sess->s.con[cid]); + con = to_clt_con(clt_path->s.con[cid]); stop_cm(con); } - fail_all_outstanding_reqs(sess); - free_sess_reqs(sess); - rtrs_clt_sess_down(sess); + fail_all_outstanding_reqs(clt_path); + free_path_reqs(clt_path); + rtrs_clt_path_down(clt_path); /* * Wait for graceful shutdown, namely when peer side invokes @@ -2180,13 +2192,14 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) * since CM does not fire anything. That is fine, we are not in * hurry. */ - wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt), + wait_event_timeout(clt_path->state_wq, + !atomic_read(&clt_path->connected_cnt), msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS)); - for (cid = 0; cid < sess->s.con_num; cid++) { - if (!sess->s.con[cid]) + for (cid = 0; cid < clt_path->s.con_num; cid++) { + if (!clt_path->s.con[cid]) break; - con = to_clt_con(sess->s.con[cid]); + con = to_clt_con(clt_path->s.con[cid]); mutex_lock(&con->con_mutex); destroy_con_cq_qp(con); mutex_unlock(&con->con_mutex); @@ -2195,26 +2208,26 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) } } -static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path, - struct rtrs_clt_sess *sess, - struct rtrs_clt_sess *next) +static inline bool xchg_paths(struct rtrs_clt_path __rcu **rcu_ppcpu_path, + struct rtrs_clt_path *clt_path, + struct rtrs_clt_path *next) { - struct rtrs_clt_sess **ppcpu_path; + struct rtrs_clt_path **ppcpu_path; /* Call cmpxchg() without sparse warnings */ ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path; - return sess == cmpxchg(ppcpu_path, sess, next); + return clt_path == cmpxchg(ppcpu_path, clt_path, next); } -static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) +static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path) { - struct rtrs_clt *clt = sess->clt; - struct rtrs_clt_sess *next; + struct rtrs_clt_sess *clt = clt_path->clt; + struct rtrs_clt_path *next; bool wait_for_grace = false; int cpu; mutex_lock(&clt->paths_mutex); - list_del_rcu(&sess->s.entry); + list_del_rcu(&clt_path->s.entry); /* Make sure everybody observes path removal. */ synchronize_rcu(); @@ -2255,7 +2268,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) * removed. If @sess is the last element, then @next is NULL. */ rcu_read_lock(); - next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry, + next = list_next_or_null_rr_rcu(&clt->paths_list, &clt_path->s.entry, typeof(*next), s.entry); rcu_read_unlock(); @@ -2264,11 +2277,11 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) * removed, so change the pointer manually. */ for_each_possible_cpu(cpu) { - struct rtrs_clt_sess __rcu **ppcpu_path; + struct rtrs_clt_path __rcu **ppcpu_path; ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu); if (rcu_dereference_protected(*ppcpu_path, - lockdep_is_held(&clt->paths_mutex)) != sess) + lockdep_is_held(&clt->paths_mutex)) != clt_path) /* * synchronize_rcu() was called just after deleting * entry from the list, thus IO code path cannot @@ -2281,7 +2294,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) * We race with IO code path, which also changes pointer, * thus we have to be careful not to overwrite it. */ - if (xchg_sessions(ppcpu_path, sess, next)) + if (xchg_paths(ppcpu_path, clt_path, next)) /* * @ppcpu_path was successfully replaced with @next, * that means that someone could also pick up the @@ -2296,29 +2309,29 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) mutex_unlock(&clt->paths_mutex); } -static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess) +static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path) { - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_sess *clt = clt_path->clt; mutex_lock(&clt->paths_mutex); clt->paths_num++; - list_add_tail_rcu(&sess->s.entry, &clt->paths_list); + list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); mutex_unlock(&clt->paths_mutex); } static void rtrs_clt_close_work(struct work_struct *work) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(work, struct rtrs_clt_sess, close_work); + clt_path = container_of(work, struct rtrs_clt_path, close_work); - cancel_delayed_work_sync(&sess->reconnect_dwork); - rtrs_clt_stop_and_destroy_conns(sess); - rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSED, NULL); + cancel_delayed_work_sync(&clt_path->reconnect_dwork); + rtrs_clt_stop_and_destroy_conns(clt_path); + rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL); } -static int init_conns(struct rtrs_clt_sess *sess) +static int init_conns(struct rtrs_clt_path *clt_path) { unsigned int cid; int err; @@ -2328,31 +2341,31 @@ static int init_conns(struct rtrs_clt_sess *sess) * to avoid clashes with previous sessions not yet closed * sessions on a server side. */ - sess->s.recon_cnt++; + clt_path->s.recon_cnt++; /* Establish all RDMA connections */ - for (cid = 0; cid < sess->s.con_num; cid++) { - err = create_con(sess, cid); + for (cid = 0; cid < clt_path->s.con_num; cid++) { + err = create_con(clt_path, cid); if (err) goto destroy; - err = create_cm(to_clt_con(sess->s.con[cid])); + err = create_cm(to_clt_con(clt_path->s.con[cid])); if (err) { - destroy_con(to_clt_con(sess->s.con[cid])); + destroy_con(to_clt_con(clt_path->s.con[cid])); goto destroy; } } - err = alloc_sess_reqs(sess); + err = alloc_path_reqs(clt_path); if (err) goto destroy; - rtrs_start_hb(&sess->s); + rtrs_start_hb(&clt_path->s); return 0; destroy: while (cid--) { - struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]); + struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]); stop_cm(con); @@ -2367,7 +2380,7 @@ destroy: * doing rdma_resolve_addr(), switch to CONNECTION_ERR state * manually to keep reconnecting. */ - rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); + rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL); return err; } @@ -2375,31 +2388,32 @@ destroy: static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); struct rtrs_iu *iu; iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); - rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1); if (wc->status != IB_WC_SUCCESS) { - rtrs_err(sess->clt, "Sess info request send failed: %s\n", + rtrs_err(clt_path->clt, "Path info request send failed: %s\n", ib_wc_status_msg(wc->status)); - rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); + rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL); return; } rtrs_clt_update_wc_stats(con); } -static int process_info_rsp(struct rtrs_clt_sess *sess, +static int process_info_rsp(struct rtrs_clt_path *clt_path, const struct rtrs_msg_info_rsp *msg) { unsigned int sg_cnt, total_len; int i, sgi; sg_cnt = le16_to_cpu(msg->sg_cnt); - if (!sg_cnt || (sess->queue_depth % sg_cnt)) { - rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n", + if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) { + rtrs_err(clt_path->clt, + "Incorrect sg_cnt %d, is not multiple\n", sg_cnt); return -EINVAL; } @@ -2408,15 +2422,15 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, * Check if IB immediate data size is enough to hold the mem_id and * the offset inside the memory chunk. */ - if ((ilog2(sg_cnt - 1) + 1) + (ilog2(sess->chunk_size - 1) + 1) > + if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) { - rtrs_err(sess->clt, + rtrs_err(clt_path->clt, "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n", - MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size); + MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size); return -EINVAL; } total_len = 0; - for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) { + for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) { const struct rtrs_sg_desc *desc = &msg->desc[sgi]; u32 len, rkey; u64 addr; @@ -2427,26 +2441,28 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, total_len += len; - if (!len || (len % sess->chunk_size)) { - rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi, + if (!len || (len % clt_path->chunk_size)) { + rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n", + sgi, len); return -EINVAL; } - for ( ; len && i < sess->queue_depth; i++) { - sess->rbufs[i].addr = addr; - sess->rbufs[i].rkey = rkey; + for ( ; len && i < clt_path->queue_depth; i++) { + clt_path->rbufs[i].addr = addr; + clt_path->rbufs[i].rkey = rkey; - len -= sess->chunk_size; - addr += sess->chunk_size; + len -= clt_path->chunk_size; + addr += clt_path->chunk_size; } } /* Sanity check */ - if (sgi != sg_cnt || i != sess->queue_depth) { - rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n"); + if (sgi != sg_cnt || i != clt_path->queue_depth) { + rtrs_err(clt_path->clt, + "Incorrect sg vector, not fully mapped\n"); return -EINVAL; } - if (total_len != sess->chunk_size * sess->queue_depth) { - rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len); + if (total_len != clt_path->chunk_size * clt_path->queue_depth) { + rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len); return -EINVAL; } @@ -2456,7 +2472,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); struct rtrs_msg_info_rsp *msg; enum rtrs_clt_state state; struct rtrs_iu *iu; @@ -2468,37 +2484,37 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) WARN_ON(con->c.cid); iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); if (wc->status != IB_WC_SUCCESS) { - rtrs_err(sess->clt, "Sess info response recv failed: %s\n", + rtrs_err(clt_path->clt, "Path info response recv failed: %s\n", ib_wc_status_msg(wc->status)); goto out; } WARN_ON(wc->opcode != IB_WC_RECV); if (wc->byte_len < sizeof(*msg)) { - rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", + rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", wc->byte_len); goto out; } - ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, + ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); msg = iu->buf; if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) { - rtrs_err(sess->clt, "Sess info response is malformed: type %d\n", + rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n", le16_to_cpu(msg->type)); goto out; } rx_sz = sizeof(*msg); rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt); if (wc->byte_len < rx_sz) { - rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", + rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", wc->byte_len); goto out; } - err = process_info_rsp(sess, msg); + err = process_info_rsp(clt_path, msg); if (err) goto out; - err = post_recv_sess(sess); + err = post_recv_path(clt_path); if (err) goto out; @@ -2506,25 +2522,25 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) out: rtrs_clt_update_wc_stats(con); - rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); - rtrs_clt_change_state_get_old(sess, state, NULL); + rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1); + rtrs_clt_change_state_get_old(clt_path, state, NULL); } -static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) +static int rtrs_send_path_info(struct rtrs_clt_path *clt_path) { - struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]); + struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]); struct rtrs_msg_info_req *msg; struct rtrs_iu *tx_iu, *rx_iu; size_t rx_sz; int err; rx_sz = sizeof(struct rtrs_msg_info_rsp); - rx_sz += sizeof(struct rtrs_sg_desc) * sess->queue_depth; + rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth; tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL, - sess->s.dev->ib_dev, DMA_TO_DEVICE, + clt_path->s.dev->ib_dev, DMA_TO_DEVICE, rtrs_clt_info_req_done); - rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev, + rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev, DMA_FROM_DEVICE, rtrs_clt_info_rsp_done); if (!tx_iu || !rx_iu) { err = -ENOMEM; @@ -2533,33 +2549,34 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) /* Prepare for getting info response */ err = rtrs_iu_post_recv(&usr_con->c, rx_iu); if (err) { - rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err); + rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err); goto out; } rx_iu = NULL; msg = tx_iu->buf; msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ); - memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname)); + memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname)); - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr, + ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, + tx_iu->dma_addr, tx_iu->size, DMA_TO_DEVICE); /* Send info request */ err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL); if (err) { - rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err); + rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err); goto out; } tx_iu = NULL; /* Wait for state change */ - wait_event_interruptible_timeout(sess->state_wq, - sess->state != RTRS_CLT_CONNECTING, + wait_event_interruptible_timeout(clt_path->state_wq, + clt_path->state != RTRS_CLT_CONNECTING, msecs_to_jiffies( RTRS_CONNECT_TIMEOUT_MS)); - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) { - if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR) + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) { + if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR) err = -ECONNRESET; else err = -ETIMEDOUT; @@ -2567,82 +2584,82 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) out: if (tx_iu) - rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1); if (rx_iu) - rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1); if (err) /* If we've never taken async path because of malloc problems */ - rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); + rtrs_clt_change_state_get_old(clt_path, + RTRS_CLT_CONNECTING_ERR, NULL); return err; } /** - * init_sess() - establishes all session connections and does handshake - * @sess: client session. + * init_path() - establishes all path connections and does handshake + * @clt_path: client path. * In case of error full close or reconnect procedure should be taken, * because reconnect or close async works can be started. */ -static int init_sess(struct rtrs_clt_sess *sess) +static int init_path(struct rtrs_clt_path *clt_path) { int err; char str[NAME_MAX]; struct rtrs_addr path = { - .src = &sess->s.src_addr, - .dst = &sess->s.dst_addr, + .src = &clt_path->s.src_addr, + .dst = &clt_path->s.dst_addr, }; rtrs_addr_to_str(&path, str, sizeof(str)); - mutex_lock(&sess->init_mutex); - err = init_conns(sess); + mutex_lock(&clt_path->init_mutex); + err = init_conns(clt_path); if (err) { - rtrs_err(sess->clt, + rtrs_err(clt_path->clt, "init_conns() failed: err=%d path=%s [%s:%u]\n", err, - str, sess->hca_name, sess->hca_port); + str, clt_path->hca_name, clt_path->hca_port); goto out; } - err = rtrs_send_sess_info(sess); + err = rtrs_send_path_info(clt_path); if (err) { - rtrs_err( - sess->clt, - "rtrs_send_sess_info() failed: err=%d path=%s [%s:%u]\n", - err, str, sess->hca_name, sess->hca_port); + rtrs_err(clt_path->clt, + "rtrs_send_path_info() failed: err=%d path=%s [%s:%u]\n", + err, str, clt_path->hca_name, clt_path->hca_port); goto out; } - rtrs_clt_sess_up(sess); + rtrs_clt_path_up(clt_path); out: - mutex_unlock(&sess->init_mutex); + mutex_unlock(&clt_path->init_mutex); return err; } static void rtrs_clt_reconnect_work(struct work_struct *work) { - struct rtrs_clt_sess *sess; - struct rtrs_clt *clt; + struct rtrs_clt_path *clt_path; + struct rtrs_clt_sess *clt; unsigned int delay_ms; int err; - sess = container_of(to_delayed_work(work), struct rtrs_clt_sess, - reconnect_dwork); - clt = sess->clt; + clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path, + reconnect_dwork); + clt = clt_path->clt; - if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING) + if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING) return; - if (sess->reconnect_attempts >= clt->max_reconnect_attempts) { - /* Close a session completely if max attempts is reached */ - rtrs_clt_close_conns(sess, false); + if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) { + /* Close a path completely if max attempts is reached */ + rtrs_clt_close_conns(clt_path, false); return; } - sess->reconnect_attempts++; + clt_path->reconnect_attempts++; /* Stop everything */ - rtrs_clt_stop_and_destroy_conns(sess); + rtrs_clt_stop_and_destroy_conns(clt_path); msleep(RTRS_RECONNECT_BACKOFF); - if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING, NULL)) { - err = init_sess(sess); + if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) { + err = init_path(clt_path); if (err) goto reconnect_again; } @@ -2650,10 +2667,10 @@ static void rtrs_clt_reconnect_work(struct work_struct *work) return; reconnect_again: - if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, NULL)) { - sess->stats->reconnects.fail_cnt++; + if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) { + clt_path->stats->reconnects.fail_cnt++; delay_ms = clt->reconnect_delay_sec * 1000; - queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, + queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, msecs_to_jiffies(delay_ms + prandom_u32() % RTRS_RECONNECT_SEED)); @@ -2662,19 +2679,20 @@ reconnect_again: static void rtrs_clt_dev_release(struct device *dev) { - struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); + struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, + dev); kfree(clt); } -static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, +static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num, u16 port, size_t pdu_sz, void *priv, void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev), unsigned int reconnect_delay_sec, unsigned int max_reconnect_attempts) { - struct rtrs_clt *clt; + struct rtrs_clt_sess *clt; int err; if (!paths_num || paths_num > MAX_PATHS_NUM) @@ -2749,7 +2767,7 @@ err: return ERR_PTR(err); } -static void free_clt(struct rtrs_clt *clt) +static void free_clt(struct rtrs_clt_sess *clt) { free_permits(clt); free_percpu(clt->pcpu_path); @@ -2760,7 +2778,7 @@ static void free_clt(struct rtrs_clt *clt) } /** - * rtrs_clt_open() - Open a session to an RTRS server + * rtrs_clt_open() - Open a path to an RTRS server * @ops: holds the link event callback and the private pointer. * @sessname: name of the session * @paths: Paths to be established defined by their src and dst addresses @@ -2777,24 +2795,24 @@ static void free_clt(struct rtrs_clt *clt) * * Return a valid pointer on success otherwise PTR_ERR. */ -struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops, - const char *sessname, +struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops, + const char *pathname, const struct rtrs_addr *paths, size_t paths_num, u16 port, size_t pdu_sz, u8 reconnect_delay_sec, s16 max_reconnect_attempts, u32 nr_poll_queues) { - struct rtrs_clt_sess *sess, *tmp; - struct rtrs_clt *clt; + struct rtrs_clt_path *clt_path, *tmp; + struct rtrs_clt_sess *clt; int err, i; - if (strchr(sessname, '/') || strchr(sessname, '.')) { - pr_err("sessname cannot contain / and .\n"); + if (strchr(pathname, '/') || strchr(pathname, '.')) { + pr_err("pathname cannot contain / and .\n"); err = -EINVAL; goto out; } - clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv, + clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv, ops->link_ev, reconnect_delay_sec, max_reconnect_attempts); @@ -2803,49 +2821,49 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops, goto out; } for (i = 0; i < paths_num; i++) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = alloc_sess(clt, &paths[i], nr_cpu_ids, + clt_path = alloc_path(clt, &paths[i], nr_cpu_ids, nr_poll_queues); - if (IS_ERR(sess)) { - err = PTR_ERR(sess); - goto close_all_sess; + if (IS_ERR(clt_path)) { + err = PTR_ERR(clt_path); + goto close_all_path; } if (!i) - sess->for_new_clt = 1; - list_add_tail_rcu(&sess->s.entry, &clt->paths_list); + clt_path->for_new_clt = 1; + list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); - err = init_sess(sess); + err = init_path(clt_path); if (err) { - list_del_rcu(&sess->s.entry); - rtrs_clt_close_conns(sess, true); - free_percpu(sess->stats->pcpu_stats); - kfree(sess->stats); - free_sess(sess); - goto close_all_sess; + list_del_rcu(&clt_path->s.entry); + rtrs_clt_close_conns(clt_path, true); + free_percpu(clt_path->stats->pcpu_stats); + kfree(clt_path->stats); + free_path(clt_path); + goto close_all_path; } - err = rtrs_clt_create_sess_files(sess); + err = rtrs_clt_create_path_files(clt_path); if (err) { - list_del_rcu(&sess->s.entry); - rtrs_clt_close_conns(sess, true); - free_percpu(sess->stats->pcpu_stats); - kfree(sess->stats); - free_sess(sess); - goto close_all_sess; + list_del_rcu(&clt_path->s.entry); + rtrs_clt_close_conns(clt_path, true); + free_percpu(clt_path->stats->pcpu_stats); + kfree(clt_path->stats); + free_path(clt_path); + goto close_all_path; } } err = alloc_permits(clt); if (err) - goto close_all_sess; + goto close_all_path; return clt; -close_all_sess: - list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { - rtrs_clt_destroy_sess_files(sess, NULL); - rtrs_clt_close_conns(sess, true); - kobject_put(&sess->kobj); +close_all_path: + list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { + rtrs_clt_destroy_path_files(clt_path, NULL); + rtrs_clt_close_conns(clt_path, true); + kobject_put(&clt_path->kobj); } rtrs_clt_destroy_sysfs_root(clt); free_clt(clt); @@ -2856,37 +2874,38 @@ out: EXPORT_SYMBOL(rtrs_clt_open); /** - * rtrs_clt_close() - Close a session + * rtrs_clt_close() - Close a path * @clt: Session handle. Session is freed upon return. */ -void rtrs_clt_close(struct rtrs_clt *clt) +void rtrs_clt_close(struct rtrs_clt_sess *clt) { - struct rtrs_clt_sess *sess, *tmp; + struct rtrs_clt_path *clt_path, *tmp; /* Firstly forbid sysfs access */ rtrs_clt_destroy_sysfs_root(clt); /* Now it is safe to iterate over all paths without locks */ - list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { - rtrs_clt_close_conns(sess, true); - rtrs_clt_destroy_sess_files(sess, NULL); - kobject_put(&sess->kobj); + list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { + rtrs_clt_close_conns(clt_path, true); + rtrs_clt_destroy_path_files(clt_path, NULL); + kobject_put(&clt_path->kobj); } free_clt(clt); } EXPORT_SYMBOL(rtrs_clt_close); -int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess) +int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path) { enum rtrs_clt_state old_state; int err = -EBUSY; bool changed; - changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, + changed = rtrs_clt_change_state_get_old(clt_path, + RTRS_CLT_RECONNECTING, &old_state); if (changed) { - sess->reconnect_attempts = 0; - queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0); + clt_path->reconnect_attempts = 0; + queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0); } if (changed || old_state == RTRS_CLT_RECONNECTING) { /* @@ -2894,15 +2913,15 @@ int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess) * execution, so do the flush if we have queued something * right now or work is pending. */ - flush_delayed_work(&sess->reconnect_dwork); - err = (READ_ONCE(sess->state) == + flush_delayed_work(&clt_path->reconnect_dwork); + err = (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED ? 0 : -ENOTCONN); } return err; } -int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess, +int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path, const struct attribute *sysfs_self) { enum rtrs_clt_state old_state; @@ -2918,27 +2937,27 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess, * removing the path. */ do { - rtrs_clt_close_conns(sess, true); - changed = rtrs_clt_change_state_get_old(sess, + rtrs_clt_close_conns(clt_path, true); + changed = rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_DEAD, &old_state); } while (!changed && old_state != RTRS_CLT_DEAD); if (changed) { - rtrs_clt_remove_path_from_arr(sess); - rtrs_clt_destroy_sess_files(sess, sysfs_self); - kobject_put(&sess->kobj); + rtrs_clt_remove_path_from_arr(clt_path); + rtrs_clt_destroy_path_files(clt_path, sysfs_self); + kobject_put(&clt_path->kobj); } return 0; } -void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value) +void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value) { clt->max_reconnect_attempts = (unsigned int)value; } -int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt) +int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt) { return (int)clt->max_reconnect_attempts; } @@ -2968,12 +2987,12 @@ int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt) * On dir=WRITE rtrs client will rdma write data in sg to server side. */ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, - struct rtrs_clt *clt, struct rtrs_permit *permit, - const struct kvec *vec, size_t nr, size_t data_len, - struct scatterlist *sg, unsigned int sg_cnt) + struct rtrs_clt_sess *clt, struct rtrs_permit *permit, + const struct kvec *vec, size_t nr, size_t data_len, + struct scatterlist *sg, unsigned int sg_cnt) { struct rtrs_clt_io_req *req; - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; enum dma_data_direction dma_dir; int err = -ECONNABORTED, i; @@ -2995,19 +3014,19 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, rcu_read_lock(); for (path_it_init(&it, clt); - (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) + (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) continue; - if (usr_len + hdr_len > sess->max_hdr_size) { - rtrs_wrn_rl(sess->clt, + if (usr_len + hdr_len > clt_path->max_hdr_size) { + rtrs_wrn_rl(clt_path->clt, "%s request failed, user message size is %zu and header length %zu, but max size is %u\n", dir == READ ? "Read" : "Write", - usr_len, hdr_len, sess->max_hdr_size); + usr_len, hdr_len, clt_path->max_hdr_size); err = -EMSGSIZE; break; } - req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv, + req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv, vec, usr_len, sg, sg_cnt, data_len, dma_dir); if (dir == READ) @@ -3028,21 +3047,21 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, } EXPORT_SYMBOL(rtrs_clt_request); -int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index) +int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index) { /* If no path, return -1 for block layer not to try again */ int cnt = -1; struct rtrs_con *con; - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; struct path_it it; rcu_read_lock(); for (path_it_init(&it, clt); - (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) + (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) continue; - con = sess->s.con[index + 1]; + con = clt_path->s.con[index + 1]; cnt = ib_process_cq_direct(con->cq, -1); if (cnt) break; @@ -3062,7 +3081,7 @@ EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct); * 0 on success * -ECOMM no connection to the server */ -int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr) +int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr) { if (!rtrs_clt_is_connected(clt)) return -ECOMM; @@ -3077,15 +3096,15 @@ int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr) } EXPORT_SYMBOL(rtrs_clt_query); -int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt, +int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt, struct rtrs_addr *addr) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int err; - sess = alloc_sess(clt, addr, nr_cpu_ids, 0); - if (IS_ERR(sess)) - return PTR_ERR(sess); + clt_path = alloc_path(clt, addr, nr_cpu_ids, 0); + if (IS_ERR(clt_path)) + return PTR_ERR(clt_path); mutex_lock(&clt->paths_mutex); if (clt->paths_num == 0) { @@ -3094,7 +3113,7 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt, * the addition of the first path is like a new session for * the storage server */ - sess->for_new_clt = 1; + clt_path->for_new_clt = 1; } mutex_unlock(&clt->paths_mutex); @@ -3104,24 +3123,24 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt, * IO will never grab it. Also it is very important to add * path before init, since init fires LINK_CONNECTED event. */ - rtrs_clt_add_path_to_arr(sess); + rtrs_clt_add_path_to_arr(clt_path); - err = init_sess(sess); + err = init_path(clt_path); if (err) - goto close_sess; + goto close_path; - err = rtrs_clt_create_sess_files(sess); + err = rtrs_clt_create_path_files(clt_path); if (err) - goto close_sess; + goto close_path; return 0; -close_sess: - rtrs_clt_remove_path_from_arr(sess); - rtrs_clt_close_conns(sess, true); - free_percpu(sess->stats->pcpu_stats); - kfree(sess->stats); - free_sess(sess); +close_path: + rtrs_clt_remove_path_from_arr(clt_path); + rtrs_clt_close_conns(clt_path, true); + free_percpu(clt_path->stats->pcpu_stats); + kfree(clt_path->stats); + free_path(clt_path); return err; } diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h index 9afffccff973..d1b18a154ae0 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h @@ -124,9 +124,9 @@ struct rtrs_rbuf { u32 rkey; }; -struct rtrs_clt_sess { - struct rtrs_sess s; - struct rtrs_clt *clt; +struct rtrs_clt_path { + struct rtrs_path s; + struct rtrs_clt_sess *clt; wait_queue_head_t state_wq; enum rtrs_clt_state state; atomic_t connected_cnt; @@ -153,10 +153,10 @@ struct rtrs_clt_sess { *mp_skip_entry; }; -struct rtrs_clt { +struct rtrs_clt_sess { struct list_head paths_list; /* rcu protected list */ size_t paths_num; - struct rtrs_clt_sess + struct rtrs_clt_path __rcu * __percpu *pcpu_path; uuid_t paths_uuid; int paths_up; @@ -186,31 +186,32 @@ static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c) return container_of(c, struct rtrs_clt_con, c); } -static inline struct rtrs_clt_sess *to_clt_sess(struct rtrs_sess *s) +static inline struct rtrs_clt_path *to_clt_path(struct rtrs_path *s) { - return container_of(s, struct rtrs_clt_sess, s); + return container_of(s, struct rtrs_clt_path, s); } -static inline int permit_size(struct rtrs_clt *clt) +static inline int permit_size(struct rtrs_clt_sess *clt) { return sizeof(struct rtrs_permit) + clt->pdu_sz; } -static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx) +static inline struct rtrs_permit *get_permit(struct rtrs_clt_sess *clt, + int idx) { return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx); } -int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess); -void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait); -int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt, +int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *path); +void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait); +int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt, struct rtrs_addr *addr); -int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess, +int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path, const struct attribute *sysfs_self); -void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value); -int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt); -void free_sess(struct rtrs_clt_sess *sess); +void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value); +int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt); +void free_path(struct rtrs_clt_path *clt_path); /* rtrs-clt-stats.c */ @@ -239,11 +240,11 @@ ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats, /* rtrs-clt-sysfs.c */ -int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt); -void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt); +int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt); +void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt); -int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess); -void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess, +int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path); +void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path, const struct attribute *sysfs_self); #endif /* RTRS_CLT_H */ diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h index 78eac9a4f703..9a1e5c2ae55c 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h @@ -90,7 +90,7 @@ struct rtrs_ib_dev { }; struct rtrs_con { - struct rtrs_sess *sess; + struct rtrs_path *path; struct ib_qp *qp; struct ib_cq *cq; struct rdma_cm_id *cm_id; @@ -100,7 +100,7 @@ struct rtrs_con { atomic_t sq_wr_avail; }; -struct rtrs_sess { +struct rtrs_path { struct list_head entry; struct sockaddr_storage dst_addr; struct sockaddr_storage src_addr; @@ -229,11 +229,11 @@ struct rtrs_msg_conn_rsp { /** * struct rtrs_msg_info_req * @type: @RTRS_MSG_INFO_REQ - * @sessname: Session name chosen by client + * @pathname: Path name chosen by client */ struct rtrs_msg_info_req { __le16 type; - u8 sessname[NAME_MAX]; + u8 pathname[NAME_MAX]; u8 reserved[15]; }; @@ -313,19 +313,19 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu, int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe); -int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con, +int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con, u32 max_send_sge, int cq_vector, int nr_cqe, u32 max_send_wr, u32 max_recv_wr, enum ib_poll_context poll_ctx); void rtrs_cq_qp_destroy(struct rtrs_con *con); -void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe, +void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe, unsigned int interval_ms, unsigned int missed_max, void (*err_handler)(struct rtrs_con *con), struct workqueue_struct *wq); -void rtrs_start_hb(struct rtrs_sess *sess); -void rtrs_stop_hb(struct rtrs_sess *sess); -void rtrs_send_hb_ack(struct rtrs_sess *sess); +void rtrs_start_hb(struct rtrs_path *path); +void rtrs_stop_hb(struct rtrs_path *path); +void rtrs_send_hb_ack(struct rtrs_path *path); void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags, struct rtrs_rdma_dev_pd *pool); diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c index 9c43ce5ba1c1..b94ae12c2795 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c @@ -15,10 +15,10 @@ static void rtrs_srv_release(struct kobject *kobj) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; - sess = container_of(kobj, struct rtrs_srv_sess, kobj); - kfree(sess); + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); + kfree(srv_path); } static struct kobj_type ktype = { @@ -36,24 +36,25 @@ static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { - struct rtrs_srv_sess *sess; - struct rtrs_sess *s; + struct rtrs_srv_path *srv_path; + struct rtrs_path *s; char str[MAXHOSTNAMELEN]; - sess = container_of(kobj, struct rtrs_srv_sess, kobj); - s = &sess->s; + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); + s = &srv_path->s; if (!sysfs_streq(buf, "1")) { rtrs_err(s, "%s: invalid value: '%s'\n", attr->attr.name, buf); return -EINVAL; } - sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str)); + sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, str, + sizeof(str)); rtrs_info(s, "disconnect for path %s requested\n", str); /* first remove sysfs itself to avoid deadlock */ - sysfs_remove_file_self(&sess->kobj, &attr->attr); - close_sess(sess); + sysfs_remove_file_self(&srv_path->kobj, &attr->attr); + close_path(srv_path); return count; } @@ -66,11 +67,11 @@ static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; struct rtrs_con *usr_con; - sess = container_of(kobj, typeof(*sess), kobj); - usr_con = sess->s.con[0]; + srv_path = container_of(kobj, typeof(*srv_path), kobj); + usr_con = srv_path->s.con[0]; return sysfs_emit(page, "%u\n", usr_con->cm_id->port_num); } @@ -82,11 +83,11 @@ static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; - sess = container_of(kobj, struct rtrs_srv_sess, kobj); + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); - return sysfs_emit(page, "%s\n", sess->s.dev->ib_dev->name); + return sysfs_emit(page, "%s\n", srv_path->s.dev->ib_dev->name); } static struct kobj_attribute rtrs_srv_hca_name_attr = @@ -96,11 +97,11 @@ static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; int cnt; - sess = container_of(kobj, struct rtrs_srv_sess, kobj); - cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); + cnt = sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, page, PAGE_SIZE); return cnt + sysfs_emit_at(page, cnt, "\n"); } @@ -112,11 +113,11 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; int len; - sess = container_of(kobj, struct rtrs_srv_sess, kobj); - len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page, + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); + len = sockaddr_to_str((struct sockaddr *)&srv_path->s.src_addr, page, PAGE_SIZE); len += sysfs_emit_at(page, len, "\n"); return len; @@ -125,7 +126,7 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj, static struct kobj_attribute rtrs_srv_dst_addr_attr = __ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL); -static struct attribute *rtrs_srv_sess_attrs[] = { +static struct attribute *rtrs_srv_path_attrs[] = { &rtrs_srv_hca_name_attr.attr, &rtrs_srv_hca_port_attr.attr, &rtrs_srv_src_addr_attr.attr, @@ -134,8 +135,8 @@ static struct attribute *rtrs_srv_sess_attrs[] = { NULL, }; -static const struct attribute_group rtrs_srv_sess_attr_group = { - .attrs = rtrs_srv_sess_attrs, +static const struct attribute_group rtrs_srv_path_attr_group = { + .attrs = rtrs_srv_path_attrs, }; STAT_ATTR(struct rtrs_srv_stats, rdma, @@ -151,9 +152,9 @@ static const struct attribute_group rtrs_srv_stats_attr_group = { .attrs = rtrs_srv_stats_attrs, }; -static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess) +static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; int err = 0; mutex_lock(&srv->paths_mutex); @@ -164,7 +165,7 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess) goto unlock; } srv->dev.class = rtrs_dev_class; - err = dev_set_name(&srv->dev, "%s", sess->s.sessname); + err = dev_set_name(&srv->dev, "%s", srv_path->s.sessname); if (err) goto unlock; @@ -196,9 +197,9 @@ unlock: } static void -rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess) +rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; mutex_lock(&srv->paths_mutex); if (!--srv->dev_ref) { @@ -213,7 +214,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess) } } -static void rtrs_srv_sess_stats_release(struct kobject *kobj) +static void rtrs_srv_path_stats_release(struct kobject *kobj) { struct rtrs_srv_stats *stats; @@ -224,22 +225,22 @@ static void rtrs_srv_sess_stats_release(struct kobject *kobj) static struct kobj_type ktype_stats = { .sysfs_ops = &kobj_sysfs_ops, - .release = rtrs_srv_sess_stats_release, + .release = rtrs_srv_path_stats_release, }; -static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess) +static int rtrs_srv_create_stats_files(struct rtrs_srv_path *srv_path) { int err; - struct rtrs_sess *s = &sess->s; + struct rtrs_path *s = &srv_path->s; - err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats, - &sess->kobj, "stats"); + err = kobject_init_and_add(&srv_path->stats->kobj_stats, &ktype_stats, + &srv_path->kobj, "stats"); if (err) { rtrs_err(s, "kobject_init_and_add(): %d\n", err); - kobject_put(&sess->stats->kobj_stats); + kobject_put(&srv_path->stats->kobj_stats); return err; } - err = sysfs_create_group(&sess->stats->kobj_stats, + err = sysfs_create_group(&srv_path->stats->kobj_stats, &rtrs_srv_stats_attr_group); if (err) { rtrs_err(s, "sysfs_create_group(): %d\n", err); @@ -249,64 +250,64 @@ static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess) return 0; err: - kobject_del(&sess->stats->kobj_stats); - kobject_put(&sess->stats->kobj_stats); + kobject_del(&srv_path->stats->kobj_stats); + kobject_put(&srv_path->stats->kobj_stats); return err; } -int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess) +int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; - struct rtrs_sess *s = &sess->s; + struct rtrs_srv_sess *srv = srv_path->srv; + struct rtrs_path *s = &srv_path->s; char str[NAME_MAX]; int err; struct rtrs_addr path = { - .src = &sess->s.dst_addr, - .dst = &sess->s.src_addr, + .src = &srv_path->s.dst_addr, + .dst = &srv_path->s.src_addr, }; rtrs_addr_to_str(&path, str, sizeof(str)); - err = rtrs_srv_create_once_sysfs_root_folders(sess); + err = rtrs_srv_create_once_sysfs_root_folders(srv_path); if (err) return err; - err = kobject_init_and_add(&sess->kobj, &ktype, srv->kobj_paths, + err = kobject_init_and_add(&srv_path->kobj, &ktype, srv->kobj_paths, "%s", str); if (err) { rtrs_err(s, "kobject_init_and_add(): %d\n", err); goto destroy_root; } - err = sysfs_create_group(&sess->kobj, &rtrs_srv_sess_attr_group); + err = sysfs_create_group(&srv_path->kobj, &rtrs_srv_path_attr_group); if (err) { rtrs_err(s, "sysfs_create_group(): %d\n", err); goto put_kobj; } - err = rtrs_srv_create_stats_files(sess); + err = rtrs_srv_create_stats_files(srv_path); if (err) goto remove_group; return 0; remove_group: - sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group); + sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group); put_kobj: - kobject_del(&sess->kobj); + kobject_del(&srv_path->kobj); destroy_root: - kobject_put(&sess->kobj); - rtrs_srv_destroy_once_sysfs_root_folders(sess); + kobject_put(&srv_path->kobj); + rtrs_srv_destroy_once_sysfs_root_folders(srv_path); return err; } -void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess) +void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path) { - if (sess->kobj.state_in_sysfs) { - kobject_del(&sess->stats->kobj_stats); - kobject_put(&sess->stats->kobj_stats); - sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group); - kobject_put(&sess->kobj); + if (srv_path->kobj.state_in_sysfs) { + kobject_del(&srv_path->stats->kobj_stats); + kobject_put(&srv_path->stats->kobj_stats); + sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group); + kobject_put(&srv_path->kobj); - rtrs_srv_destroy_once_sysfs_root_folders(sess); + rtrs_srv_destroy_once_sysfs_root_folders(srv_path); } } diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c index 7df71f8cf149..24024bce2566 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c @@ -62,19 +62,19 @@ static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c) return container_of(c, struct rtrs_srv_con, c); } -static inline struct rtrs_srv_sess *to_srv_sess(struct rtrs_sess *s) +static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s) { - return container_of(s, struct rtrs_srv_sess, s); + return container_of(s, struct rtrs_srv_path, s); } -static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess, +static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path, enum rtrs_srv_state new_state) { enum rtrs_srv_state old_state; bool changed = false; - spin_lock_irq(&sess->state_lock); - old_state = sess->state; + spin_lock_irq(&srv_path->state_lock); + old_state = srv_path->state; switch (new_state) { case RTRS_SRV_CONNECTED: if (old_state == RTRS_SRV_CONNECTING) @@ -93,8 +93,8 @@ static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess, break; } if (changed) - sess->state = new_state; - spin_unlock_irq(&sess->state_lock); + srv_path->state = new_state; + spin_unlock_irq(&srv_path->state_lock); return changed; } @@ -106,16 +106,16 @@ static void free_id(struct rtrs_srv_op *id) kfree(id); } -static void rtrs_srv_free_ops_ids(struct rtrs_srv_sess *sess) +static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; int i; - if (sess->ops_ids) { + if (srv_path->ops_ids) { for (i = 0; i < srv->queue_depth; i++) - free_id(sess->ops_ids[i]); - kfree(sess->ops_ids); - sess->ops_ids = NULL; + free_id(srv_path->ops_ids[i]); + kfree(srv_path->ops_ids); + srv_path->ops_ids = NULL; } } @@ -127,21 +127,24 @@ static struct ib_cqe io_comp_cqe = { static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref) { - struct rtrs_srv_sess *sess = container_of(ref, struct rtrs_srv_sess, ids_inflight_ref); + struct rtrs_srv_path *srv_path = container_of(ref, + struct rtrs_srv_path, + ids_inflight_ref); - percpu_ref_exit(&sess->ids_inflight_ref); - complete(&sess->complete_done); + percpu_ref_exit(&srv_path->ids_inflight_ref); + complete(&srv_path->complete_done); } -static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess) +static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_srv_op *id; int i, ret; - sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids), - GFP_KERNEL); - if (!sess->ops_ids) + srv_path->ops_ids = kcalloc(srv->queue_depth, + sizeof(*srv_path->ops_ids), + GFP_KERNEL); + if (!srv_path->ops_ids) goto err; for (i = 0; i < srv->queue_depth; ++i) { @@ -149,44 +152,44 @@ static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess) if (!id) goto err; - sess->ops_ids[i] = id; + srv_path->ops_ids[i] = id; } - ret = percpu_ref_init(&sess->ids_inflight_ref, + ret = percpu_ref_init(&srv_path->ids_inflight_ref, rtrs_srv_inflight_ref_release, 0, GFP_KERNEL); if (ret) { pr_err("Percpu reference init failed\n"); goto err; } - init_completion(&sess->complete_done); + init_completion(&srv_path->complete_done); return 0; err: - rtrs_srv_free_ops_ids(sess); + rtrs_srv_free_ops_ids(srv_path); return -ENOMEM; } -static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess) +static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path) { - percpu_ref_get(&sess->ids_inflight_ref); + percpu_ref_get(&srv_path->ids_inflight_ref); } -static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess) +static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path) { - percpu_ref_put(&sess->ids_inflight_ref); + percpu_ref_put(&srv_path->ids_inflight_ref); } static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); if (wc->status != IB_WC_SUCCESS) { rtrs_err(s, "REG MR failed: %s\n", ib_wc_status_msg(wc->status)); - close_sess(sess); + close_path(srv_path); return; } } @@ -197,9 +200,9 @@ static struct ib_cqe local_reg_cqe = { static int rdma_write_sg(struct rtrs_srv_op *id) { - struct rtrs_sess *s = id->con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); - dma_addr_t dma_addr = sess->dma_addr[id->msg_id]; + struct rtrs_path *s = id->con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); + dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id]; struct rtrs_srv_mr *srv_mr; struct ib_send_wr inv_wr; struct ib_rdma_wr imm_wr; @@ -233,7 +236,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id) return -EINVAL; } - plist->lkey = sess->s.dev->ib_pd->local_dma_lkey; + plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey; offset += plist->length; wr->wr.sg_list = plist; @@ -284,7 +287,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id) if (always_invalidate) { struct rtrs_msg_rkey_rsp *msg; - srv_mr = &sess->mrs[id->msg_id]; + srv_mr = &srv_path->mrs[id->msg_id]; rwr.wr.opcode = IB_WR_REG_MR; rwr.wr.wr_cqe = &local_reg_cqe; rwr.wr.num_sge = 0; @@ -300,11 +303,11 @@ static int rdma_write_sg(struct rtrs_srv_op *id) list.addr = srv_mr->iu->dma_addr; list.length = sizeof(*msg); - list.lkey = sess->s.dev->ib_pd->local_dma_lkey; + list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey; imm_wr.wr.sg_list = &list; imm_wr.wr.num_sge = 1; imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM; - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, + ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, srv_mr->iu->dma_addr, srv_mr->iu->size, DMA_TO_DEVICE); } else { @@ -317,7 +320,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id) 0, need_inval)); imm_wr.wr.wr_cqe = &io_comp_cqe; - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr, + ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr, offset, DMA_BIDIRECTIONAL); err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL); @@ -341,8 +344,8 @@ static int rdma_write_sg(struct rtrs_srv_op *id) static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, int errno) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); struct ib_send_wr inv_wr, *wr = NULL; struct ib_rdma_wr imm_wr; struct ib_reg_wr rwr; @@ -402,7 +405,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, struct ib_sge list; struct rtrs_msg_rkey_rsp *msg; - srv_mr = &sess->mrs[id->msg_id]; + srv_mr = &srv_path->mrs[id->msg_id]; rwr.wr.next = &imm_wr.wr; rwr.wr.opcode = IB_WR_REG_MR; rwr.wr.wr_cqe = &local_reg_cqe; @@ -419,11 +422,11 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, list.addr = srv_mr->iu->dma_addr; list.length = sizeof(*msg); - list.lkey = sess->s.dev->ib_pd->local_dma_lkey; + list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey; imm_wr.wr.sg_list = &list; imm_wr.wr.num_sge = 1; imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM; - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, + ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, srv_mr->iu->dma_addr, srv_mr->iu->size, DMA_TO_DEVICE); } else { @@ -444,11 +447,11 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, return err; } -void close_sess(struct rtrs_srv_sess *sess) +void close_path(struct rtrs_srv_path *srv_path) { - if (rtrs_srv_change_state(sess, RTRS_SRV_CLOSING)) - queue_work(rtrs_wq, &sess->close_work); - WARN_ON(sess->state != RTRS_SRV_CLOSING); + if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING)) + queue_work(rtrs_wq, &srv_path->close_work); + WARN_ON(srv_path->state != RTRS_SRV_CLOSING); } static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state) @@ -480,35 +483,35 @@ static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state) */ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; struct rtrs_srv_con *con; - struct rtrs_sess *s; + struct rtrs_path *s; int err; if (WARN_ON(!id)) return true; con = id->con; - s = con->c.sess; - sess = to_srv_sess(s); + s = con->c.path; + srv_path = to_srv_path(s); id->status = status; - if (sess->state != RTRS_SRV_CONNECTED) { + if (srv_path->state != RTRS_SRV_CONNECTED) { rtrs_err_rl(s, - "Sending I/O response failed, session %s is disconnected, sess state %s\n", - kobject_name(&sess->kobj), - rtrs_srv_state_str(sess->state)); + "Sending I/O response failed, server path %s is disconnected, path state %s\n", + kobject_name(&srv_path->kobj), + rtrs_srv_state_str(srv_path->state)); goto out; } if (always_invalidate) { - struct rtrs_srv_mr *mr = &sess->mrs[id->msg_id]; + struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id]; ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey)); } if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) { - rtrs_err(s, "IB send queue full: sess=%s cid=%d\n", - kobject_name(&sess->kobj), + rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n", + kobject_name(&srv_path->kobj), con->c.cid); atomic_add(1, &con->c.sq_wr_avail); spin_lock(&con->rsp_wr_wait_lock); @@ -523,12 +526,12 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status) err = rdma_write_sg(id); if (err) { - rtrs_err_rl(s, "IO response failed: %d: sess=%s\n", err, - kobject_name(&sess->kobj)); - close_sess(sess); + rtrs_err_rl(s, "IO response failed: %d: srv_path=%s\n", err, + kobject_name(&srv_path->kobj)); + close_path(srv_path); } out: - rtrs_srv_put_ops_ids(sess); + rtrs_srv_put_ops_ids(srv_path); return true; } EXPORT_SYMBOL(rtrs_srv_resp_rdma); @@ -538,33 +541,33 @@ EXPORT_SYMBOL(rtrs_srv_resp_rdma); * @srv: Session pointer * @priv: The private pointer that is associated with the session. */ -void rtrs_srv_set_sess_priv(struct rtrs_srv *srv, void *priv) +void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv) { srv->priv = priv; } EXPORT_SYMBOL(rtrs_srv_set_sess_priv); -static void unmap_cont_bufs(struct rtrs_srv_sess *sess) +static void unmap_cont_bufs(struct rtrs_srv_path *srv_path) { int i; - for (i = 0; i < sess->mrs_num; i++) { + for (i = 0; i < srv_path->mrs_num; i++) { struct rtrs_srv_mr *srv_mr; - srv_mr = &sess->mrs[i]; - rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1); + srv_mr = &srv_path->mrs[i]; + rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1); ib_dereg_mr(srv_mr->mr); - ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl, + ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl, srv_mr->sgt.nents, DMA_BIDIRECTIONAL); sg_free_table(&srv_mr->sgt); } - kfree(sess->mrs); + kfree(srv_path->mrs); } -static int map_cont_bufs(struct rtrs_srv_sess *sess) +static int map_cont_bufs(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; - struct rtrs_sess *ss = &sess->s; + struct rtrs_srv_sess *srv = srv_path->srv; + struct rtrs_path *ss = &srv_path->s; int i, mri, err, mrs_num; unsigned int chunk_bits; int chunks_per_mr = 1; @@ -581,19 +584,19 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess) mrs_num = srv->queue_depth; } else { chunks_per_mr = - sess->s.dev->ib_dev->attrs.max_fast_reg_page_list_len; + srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len; mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr); chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num); } - sess->mrs = kcalloc(mrs_num, sizeof(*sess->mrs), GFP_KERNEL); - if (!sess->mrs) + srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL); + if (!srv_path->mrs) return -ENOMEM; - sess->mrs_num = mrs_num; + srv_path->mrs_num = mrs_num; for (mri = 0; mri < mrs_num; mri++) { - struct rtrs_srv_mr *srv_mr = &sess->mrs[mri]; + struct rtrs_srv_mr *srv_mr = &srv_path->mrs[mri]; struct sg_table *sgt = &srv_mr->sgt; struct scatterlist *s; struct ib_mr *mr; @@ -612,13 +615,13 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess) sg_set_page(s, srv->chunks[chunks + i], max_chunk_size, 0); - nr = ib_dma_map_sg(sess->s.dev->ib_dev, sgt->sgl, + nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); if (nr < sgt->nents) { err = nr < 0 ? nr : -EINVAL; goto free_sg; } - mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, + mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, sgt->nents); if (IS_ERR(mr)) { err = PTR_ERR(mr); @@ -634,7 +637,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess) if (always_invalidate) { srv_mr->iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_rkey_rsp), - GFP_KERNEL, sess->s.dev->ib_dev, + GFP_KERNEL, srv_path->s.dev->ib_dev, DMA_TO_DEVICE, rtrs_srv_rdma_done); if (!srv_mr->iu) { err = -ENOMEM; @@ -644,7 +647,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess) } /* Eventually dma addr for each chunk can be cached */ for_each_sg(sgt->sgl, s, sgt->orig_nents, i) - sess->dma_addr[chunks + i] = sg_dma_address(s); + srv_path->dma_addr[chunks + i] = sg_dma_address(s); ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); srv_mr->mr = mr; @@ -652,75 +655,75 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess) continue; err: while (mri--) { - srv_mr = &sess->mrs[mri]; + srv_mr = &srv_path->mrs[mri]; sgt = &srv_mr->sgt; mr = srv_mr->mr; - rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1); dereg_mr: ib_dereg_mr(mr); unmap_sg: - ib_dma_unmap_sg(sess->s.dev->ib_dev, sgt->sgl, + ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); free_sg: sg_free_table(sgt); } - kfree(sess->mrs); + kfree(srv_path->mrs); return err; } chunk_bits = ilog2(srv->queue_depth - 1) + 1; - sess->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits); + srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits); return 0; } static void rtrs_srv_hb_err_handler(struct rtrs_con *c) { - close_sess(to_srv_sess(c->sess)); + close_path(to_srv_path(c->path)); } -static void rtrs_srv_init_hb(struct rtrs_srv_sess *sess) +static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path) { - rtrs_init_hb(&sess->s, &io_comp_cqe, + rtrs_init_hb(&srv_path->s, &io_comp_cqe, RTRS_HB_INTERVAL_MS, RTRS_HB_MISSED_MAX, rtrs_srv_hb_err_handler, rtrs_wq); } -static void rtrs_srv_start_hb(struct rtrs_srv_sess *sess) +static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path) { - rtrs_start_hb(&sess->s); + rtrs_start_hb(&srv_path->s); } -static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess) +static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path) { - rtrs_stop_hb(&sess->s); + rtrs_stop_hb(&srv_path->s); } static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); struct rtrs_iu *iu; iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); - rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); if (wc->status != IB_WC_SUCCESS) { rtrs_err(s, "Sess info response send failed: %s\n", ib_wc_status_msg(wc->status)); - close_sess(sess); + close_path(srv_path); return; } WARN_ON(wc->opcode != IB_WC_SEND); } -static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess) +static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_srv_ctx *ctx = srv->ctx; int up; @@ -731,18 +734,18 @@ static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess) mutex_unlock(&srv->paths_ev_mutex); /* Mark session as established */ - sess->established = true; + srv_path->established = true; } -static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess) +static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_srv_ctx *ctx = srv->ctx; - if (!sess->established) + if (!srv_path->established) return; - sess->established = false; + srv_path->established = false; mutex_lock(&srv->paths_ev_mutex); WARN_ON(!srv->paths_up); if (--srv->paths_up == 0) @@ -750,11 +753,11 @@ static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess) mutex_unlock(&srv->paths_ev_mutex); } -static bool exist_sessname(struct rtrs_srv_ctx *ctx, - const char *sessname, const uuid_t *path_uuid) +static bool exist_pathname(struct rtrs_srv_ctx *ctx, + const char *pathname, const uuid_t *path_uuid) { - struct rtrs_srv *srv; - struct rtrs_srv_sess *sess; + struct rtrs_srv_sess *srv; + struct rtrs_srv_path *srv_path; bool found = false; mutex_lock(&ctx->srv_mutex); @@ -767,9 +770,9 @@ static bool exist_sessname(struct rtrs_srv_ctx *ctx, continue; } - list_for_each_entry(sess, &srv->paths_list, s.entry) { - if (strlen(sess->s.sessname) == strlen(sessname) && - !strcmp(sess->s.sessname, sessname)) { + list_for_each_entry(srv_path, &srv->paths_list, s.entry) { + if (strlen(srv_path->s.sessname) == strlen(pathname) && + !strcmp(srv_path->s.sessname, pathname)) { found = true; break; } @@ -782,14 +785,14 @@ static bool exist_sessname(struct rtrs_srv_ctx *ctx, return found; } -static int post_recv_sess(struct rtrs_srv_sess *sess); +static int post_recv_path(struct rtrs_srv_path *srv_path); static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno); static int process_info_req(struct rtrs_srv_con *con, struct rtrs_msg_info_req *msg) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); struct ib_send_wr *reg_wr = NULL; struct rtrs_msg_info_rsp *rsp; struct rtrs_iu *tx_iu; @@ -797,31 +800,32 @@ static int process_info_req(struct rtrs_srv_con *con, int mri, err; size_t tx_sz; - err = post_recv_sess(sess); + err = post_recv_path(srv_path); if (err) { - rtrs_err(s, "post_recv_sess(), err: %d\n", err); + rtrs_err(s, "post_recv_path(), err: %d\n", err); return err; } - if (strchr(msg->sessname, '/') || strchr(msg->sessname, '.')) { - rtrs_err(s, "sessname cannot contain / and .\n"); + if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) { + rtrs_err(s, "pathname cannot contain / and .\n"); return -EINVAL; } - if (exist_sessname(sess->srv->ctx, - msg->sessname, &sess->srv->paths_uuid)) { - rtrs_err(s, "sessname is duplicated: %s\n", msg->sessname); + if (exist_pathname(srv_path->srv->ctx, + msg->pathname, &srv_path->srv->paths_uuid)) { + rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname); return -EPERM; } - strscpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname)); + strscpy(srv_path->s.sessname, msg->pathname, + sizeof(srv_path->s.sessname)); - rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL); + rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL); if (!rwr) return -ENOMEM; tx_sz = sizeof(*rsp); - tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num; - tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev, + tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num; + tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev, DMA_TO_DEVICE, rtrs_srv_info_rsp_done); if (!tx_iu) { err = -ENOMEM; @@ -830,10 +834,10 @@ static int process_info_req(struct rtrs_srv_con *con, rsp = tx_iu->buf; rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP); - rsp->sg_cnt = cpu_to_le16(sess->mrs_num); + rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num); - for (mri = 0; mri < sess->mrs_num; mri++) { - struct ib_mr *mr = sess->mrs[mri].mr; + for (mri = 0; mri < srv_path->mrs_num; mri++) { + struct ib_mr *mr = srv_path->mrs[mri].mr; rsp->desc[mri].addr = cpu_to_le64(mr->iova); rsp->desc[mri].key = cpu_to_le32(mr->rkey); @@ -854,13 +858,13 @@ static int process_info_req(struct rtrs_srv_con *con, reg_wr = &rwr[mri].wr; } - err = rtrs_srv_create_sess_files(sess); + err = rtrs_srv_create_path_files(srv_path); if (err) goto iu_free; - kobject_get(&sess->kobj); - get_device(&sess->srv->dev); - rtrs_srv_change_state(sess, RTRS_SRV_CONNECTED); - rtrs_srv_start_hb(sess); + kobject_get(&srv_path->kobj); + get_device(&srv_path->srv->dev); + rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED); + rtrs_srv_start_hb(srv_path); /* * We do not account number of established connections at the current @@ -868,9 +872,10 @@ static int process_info_req(struct rtrs_srv_con *con, * all connections are successfully established. Thus, simply notify * listener with a proper event if we are the first path. */ - rtrs_srv_sess_up(sess); + rtrs_srv_path_up(srv_path); - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr, + ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, + tx_iu->dma_addr, tx_iu->size, DMA_TO_DEVICE); /* Send info response */ @@ -878,7 +883,7 @@ static int process_info_req(struct rtrs_srv_con *con, if (err) { rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err); iu_free: - rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1); } rwr_free: kfree(rwr); @@ -889,8 +894,8 @@ rwr_free: static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); struct rtrs_msg_info_req *msg; struct rtrs_iu *iu; int err; @@ -910,7 +915,7 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc) wc->byte_len); goto close; } - ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, + ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); msg = iu->buf; if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) { @@ -923,22 +928,22 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc) goto close; out: - rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); return; close: - close_sess(sess); + close_path(srv_path); goto out; } static int post_recv_info_req(struct rtrs_srv_con *con) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); struct rtrs_iu *rx_iu; int err; rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), - GFP_KERNEL, sess->s.dev->ib_dev, + GFP_KERNEL, srv_path->s.dev->ib_dev, DMA_FROM_DEVICE, rtrs_srv_info_req_done); if (!rx_iu) return -ENOMEM; @@ -946,7 +951,7 @@ static int post_recv_info_req(struct rtrs_srv_con *con) err = rtrs_iu_post_recv(&con->c, rx_iu); if (err) { rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err); - rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1); return err; } @@ -966,20 +971,20 @@ static int post_recv_io(struct rtrs_srv_con *con, size_t q_size) return 0; } -static int post_recv_sess(struct rtrs_srv_sess *sess) +static int post_recv_path(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; - struct rtrs_sess *s = &sess->s; + struct rtrs_srv_sess *srv = srv_path->srv; + struct rtrs_path *s = &srv_path->s; size_t q_size; int err, cid; - for (cid = 0; cid < sess->s.con_num; cid++) { + for (cid = 0; cid < srv_path->s.con_num; cid++) { if (cid == 0) q_size = SERVICE_CON_QUEUE_DEPTH; else q_size = srv->queue_depth; - err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size); + err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size); if (err) { rtrs_err(s, "post_recv_io(), err: %d\n", err); return err; @@ -993,9 +998,9 @@ static void process_read(struct rtrs_srv_con *con, struct rtrs_msg_rdma_read *msg, u32 buf_id, u32 off) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); - struct rtrs_srv *srv = sess->srv; + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); + struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_srv_ctx *ctx = srv->ctx; struct rtrs_srv_op *id; @@ -1003,10 +1008,10 @@ static void process_read(struct rtrs_srv_con *con, void *data; int ret; - if (sess->state != RTRS_SRV_CONNECTED) { + if (srv_path->state != RTRS_SRV_CONNECTED) { rtrs_err_rl(s, "Processing read request failed, session is disconnected, sess state %s\n", - rtrs_srv_state_str(sess->state)); + rtrs_srv_state_str(srv_path->state)); return; } if (msg->sg_cnt != 1 && msg->sg_cnt != 0) { @@ -1014,9 +1019,9 @@ static void process_read(struct rtrs_srv_con *con, "Processing read request failed, invalid message\n"); return; } - rtrs_srv_get_ops_ids(sess); - rtrs_srv_update_rdma_stats(sess->stats, off, READ); - id = sess->ops_ids[buf_id]; + rtrs_srv_get_ops_ids(srv_path); + rtrs_srv_update_rdma_stats(srv_path->stats, off, READ); + id = srv_path->ops_ids[buf_id]; id->con = con; id->dir = READ; id->msg_id = buf_id; @@ -1042,18 +1047,18 @@ send_err_msg: rtrs_err_rl(s, "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n", buf_id, ret); - close_sess(sess); + close_path(srv_path); } - rtrs_srv_put_ops_ids(sess); + rtrs_srv_put_ops_ids(srv_path); } static void process_write(struct rtrs_srv_con *con, struct rtrs_msg_rdma_write *req, u32 buf_id, u32 off) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); - struct rtrs_srv *srv = sess->srv; + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); + struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_srv_ctx *ctx = srv->ctx; struct rtrs_srv_op *id; @@ -1061,15 +1066,15 @@ static void process_write(struct rtrs_srv_con *con, void *data; int ret; - if (sess->state != RTRS_SRV_CONNECTED) { + if (srv_path->state != RTRS_SRV_CONNECTED) { rtrs_err_rl(s, "Processing write request failed, session is disconnected, sess state %s\n", - rtrs_srv_state_str(sess->state)); + rtrs_srv_state_str(srv_path->state)); return; } - rtrs_srv_get_ops_ids(sess); - rtrs_srv_update_rdma_stats(sess->stats, off, WRITE); - id = sess->ops_ids[buf_id]; + rtrs_srv_get_ops_ids(srv_path); + rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE); + id = srv_path->ops_ids[buf_id]; id->con = con; id->dir = WRITE; id->msg_id = buf_id; @@ -1094,20 +1099,21 @@ send_err_msg: rtrs_err_rl(s, "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n", buf_id, ret); - close_sess(sess); + close_path(srv_path); } - rtrs_srv_put_ops_ids(sess); + rtrs_srv_put_ops_ids(srv_path); } static void process_io_req(struct rtrs_srv_con *con, void *msg, u32 id, u32 off) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); struct rtrs_msg_rdma_hdr *hdr; unsigned int type; - ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, sess->dma_addr[id], + ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, + srv_path->dma_addr[id], max_chunk_size, DMA_BIDIRECTIONAL); hdr = msg; type = le16_to_cpu(hdr->type); @@ -1129,7 +1135,7 @@ static void process_io_req(struct rtrs_srv_con *con, void *msg, return; err: - close_sess(sess); + close_path(srv_path); } static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) @@ -1137,16 +1143,16 @@ static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) struct rtrs_srv_mr *mr = container_of(wc->wr_cqe, typeof(*mr), inv_cqe); struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); - struct rtrs_srv *srv = sess->srv; + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); + struct rtrs_srv_sess *srv = srv_path->srv; u32 msg_id, off; void *data; if (wc->status != IB_WC_SUCCESS) { rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n", ib_wc_status_msg(wc->status)); - close_sess(sess); + close_path(srv_path); } msg_id = mr->msg_id; off = mr->msg_off; @@ -1194,9 +1200,9 @@ static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con) static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); - struct rtrs_srv *srv = sess->srv; + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); + struct rtrs_srv_sess *srv = srv_path->srv; u32 imm_type, imm_payload; int err; @@ -1206,7 +1212,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n", ib_wc_status_msg(wc->status), wc->wr_cqe, wc->opcode, wc->vendor_err, wc->byte_len); - close_sess(sess); + close_path(srv_path); } return; } @@ -1222,7 +1228,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); if (err) { rtrs_err(s, "rtrs_post_recv(), err: %d\n", err); - close_sess(sess); + close_path(srv_path); break; } rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), @@ -1231,16 +1237,16 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) u32 msg_id, off; void *data; - msg_id = imm_payload >> sess->mem_bits; - off = imm_payload & ((1 << sess->mem_bits) - 1); + msg_id = imm_payload >> srv_path->mem_bits; + off = imm_payload & ((1 << srv_path->mem_bits) - 1); if (msg_id >= srv->queue_depth || off >= max_chunk_size) { rtrs_err(s, "Wrong msg_id %u, off %u\n", msg_id, off); - close_sess(sess); + close_path(srv_path); return; } if (always_invalidate) { - struct rtrs_srv_mr *mr = &sess->mrs[msg_id]; + struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id]; mr->msg_off = off; mr->msg_id = msg_id; @@ -1248,7 +1254,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) if (err) { rtrs_err(s, "rtrs_post_recv(), err: %d\n", err); - close_sess(sess); + close_path(srv_path); break; } } else { @@ -1257,10 +1263,10 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) } } else if (imm_type == RTRS_HB_MSG_IMM) { WARN_ON(con->c.cid); - rtrs_send_hb_ack(&sess->s); + rtrs_send_hb_ack(&srv_path->s); } else if (imm_type == RTRS_HB_ACK_IMM) { WARN_ON(con->c.cid); - sess->s.hb_missed_cnt = 0; + srv_path->s.hb_missed_cnt = 0; } else { rtrs_wrn(s, "Unknown IMM type %u\n", imm_type); } @@ -1284,22 +1290,23 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) } /** - * rtrs_srv_get_sess_name() - Get rtrs_srv peer hostname. + * rtrs_srv_get_path_name() - Get rtrs_srv peer hostname. * @srv: Session - * @sessname: Sessname buffer + * @pathname: Pathname buffer * @len: Length of sessname buffer */ -int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len) +int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname, + size_t len) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; int err = -ENOTCONN; mutex_lock(&srv->paths_mutex); - list_for_each_entry(sess, &srv->paths_list, s.entry) { - if (sess->state != RTRS_SRV_CONNECTED) + list_for_each_entry(srv_path, &srv->paths_list, s.entry) { + if (srv_path->state != RTRS_SRV_CONNECTED) continue; - strscpy(sessname, sess->s.sessname, - min_t(size_t, sizeof(sess->s.sessname), len)); + strscpy(pathname, srv_path->s.sessname, + min_t(size_t, sizeof(srv_path->s.sessname), len)); err = 0; break; } @@ -1307,44 +1314,45 @@ int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len) return err; } -EXPORT_SYMBOL(rtrs_srv_get_sess_name); +EXPORT_SYMBOL(rtrs_srv_get_path_name); /** * rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth. * @srv: Session */ -int rtrs_srv_get_queue_depth(struct rtrs_srv *srv) +int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv) { return srv->queue_depth; } EXPORT_SYMBOL(rtrs_srv_get_queue_depth); -static int find_next_bit_ring(struct rtrs_srv_sess *sess) +static int find_next_bit_ring(struct rtrs_srv_path *srv_path) { - struct ib_device *ib_dev = sess->s.dev->ib_dev; + struct ib_device *ib_dev = srv_path->s.dev->ib_dev; int v; - v = cpumask_next(sess->cur_cq_vector, &cq_affinity_mask); + v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask); if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors) v = cpumask_first(&cq_affinity_mask); return v; } -static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess) +static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path) { - sess->cur_cq_vector = find_next_bit_ring(sess); + srv_path->cur_cq_vector = find_next_bit_ring(srv_path); - return sess->cur_cq_vector; + return srv_path->cur_cq_vector; } static void rtrs_srv_dev_release(struct device *dev) { - struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev); + struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess, + dev); kfree(srv); } -static void free_srv(struct rtrs_srv *srv) +static void free_srv(struct rtrs_srv_sess *srv) { int i; @@ -1358,11 +1366,11 @@ static void free_srv(struct rtrs_srv *srv) put_device(&srv->dev); } -static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx, +static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx, const uuid_t *paths_uuid, bool first_conn) { - struct rtrs_srv *srv; + struct rtrs_srv_sess *srv; int i; mutex_lock(&ctx->srv_mutex); @@ -1424,7 +1432,7 @@ err_free_srv: return ERR_PTR(-ENOMEM); } -static void put_srv(struct rtrs_srv *srv) +static void put_srv(struct rtrs_srv_sess *srv) { if (refcount_dec_and_test(&srv->refcount)) { struct rtrs_srv_ctx *ctx = srv->ctx; @@ -1438,23 +1446,23 @@ static void put_srv(struct rtrs_srv *srv) } } -static void __add_path_to_srv(struct rtrs_srv *srv, - struct rtrs_srv_sess *sess) +static void __add_path_to_srv(struct rtrs_srv_sess *srv, + struct rtrs_srv_path *srv_path) { - list_add_tail(&sess->s.entry, &srv->paths_list); + list_add_tail(&srv_path->s.entry, &srv->paths_list); srv->paths_num++; WARN_ON(srv->paths_num >= MAX_PATHS_NUM); } -static void del_path_from_srv(struct rtrs_srv_sess *sess) +static void del_path_from_srv(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; if (WARN_ON(!srv)) return; mutex_lock(&srv->paths_mutex); - list_del(&sess->s.entry); + list_del(&srv_path->s.entry); WARN_ON(!srv->paths_num); srv->paths_num--; mutex_unlock(&srv->paths_mutex); @@ -1484,47 +1492,47 @@ static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b) } } -static bool __is_path_w_addr_exists(struct rtrs_srv *srv, +static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv, struct rdma_addr *addr) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; - list_for_each_entry(sess, &srv->paths_list, s.entry) - if (!sockaddr_cmp((struct sockaddr *)&sess->s.dst_addr, + list_for_each_entry(srv_path, &srv->paths_list, s.entry) + if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr, (struct sockaddr *)&addr->dst_addr) && - !sockaddr_cmp((struct sockaddr *)&sess->s.src_addr, + !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr, (struct sockaddr *)&addr->src_addr)) return true; return false; } -static void free_sess(struct rtrs_srv_sess *sess) +static void free_path(struct rtrs_srv_path *srv_path) { - if (sess->kobj.state_in_sysfs) { - kobject_del(&sess->kobj); - kobject_put(&sess->kobj); + if (srv_path->kobj.state_in_sysfs) { + kobject_del(&srv_path->kobj); + kobject_put(&srv_path->kobj); } else { - kfree(sess->stats); - kfree(sess); + kfree(srv_path->stats); + kfree(srv_path); } } static void rtrs_srv_close_work(struct work_struct *work) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; struct rtrs_srv_con *con; int i; - sess = container_of(work, typeof(*sess), close_work); + srv_path = container_of(work, typeof(*srv_path), close_work); - rtrs_srv_destroy_sess_files(sess); - rtrs_srv_stop_hb(sess); + rtrs_srv_destroy_path_files(srv_path); + rtrs_srv_stop_hb(srv_path); - for (i = 0; i < sess->s.con_num; i++) { - if (!sess->s.con[i]) + for (i = 0; i < srv_path->s.con_num; i++) { + if (!srv_path->s.con[i]) continue; - con = to_srv_con(sess->s.con[i]); + con = to_srv_con(srv_path->s.con[i]); rdma_disconnect(con->c.cm_id); ib_drain_qp(con->c.qp); } @@ -1533,41 +1541,41 @@ static void rtrs_srv_close_work(struct work_struct *work) * Degrade ref count to the usual model with a single shared * atomic_t counter */ - percpu_ref_kill(&sess->ids_inflight_ref); + percpu_ref_kill(&srv_path->ids_inflight_ref); /* Wait for all completion */ - wait_for_completion(&sess->complete_done); + wait_for_completion(&srv_path->complete_done); /* Notify upper layer if we are the last path */ - rtrs_srv_sess_down(sess); + rtrs_srv_path_down(srv_path); - unmap_cont_bufs(sess); - rtrs_srv_free_ops_ids(sess); + unmap_cont_bufs(srv_path); + rtrs_srv_free_ops_ids(srv_path); - for (i = 0; i < sess->s.con_num; i++) { - if (!sess->s.con[i]) + for (i = 0; i < srv_path->s.con_num; i++) { + if (!srv_path->s.con[i]) continue; - con = to_srv_con(sess->s.con[i]); + con = to_srv_con(srv_path->s.con[i]); rtrs_cq_qp_destroy(&con->c); rdma_destroy_id(con->c.cm_id); kfree(con); } - rtrs_ib_dev_put(sess->s.dev); + rtrs_ib_dev_put(srv_path->s.dev); - del_path_from_srv(sess); - put_srv(sess->srv); - sess->srv = NULL; - rtrs_srv_change_state(sess, RTRS_SRV_CLOSED); + del_path_from_srv(srv_path); + put_srv(srv_path->srv); + srv_path->srv = NULL; + rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED); - kfree(sess->dma_addr); - kfree(sess->s.con); - free_sess(sess); + kfree(srv_path->dma_addr); + kfree(srv_path->s.con); + free_path(srv_path); } -static int rtrs_rdma_do_accept(struct rtrs_srv_sess *sess, +static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path, struct rdma_cm_id *cm_id) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_msg_conn_rsp msg; struct rdma_conn_param param; int err; @@ -1615,25 +1623,25 @@ static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno) return errno; } -static struct rtrs_srv_sess * -__find_sess(struct rtrs_srv *srv, const uuid_t *sess_uuid) +static struct rtrs_srv_path * +__find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; - list_for_each_entry(sess, &srv->paths_list, s.entry) { - if (uuid_equal(&sess->s.uuid, sess_uuid)) - return sess; + list_for_each_entry(srv_path, &srv->paths_list, s.entry) { + if (uuid_equal(&srv_path->s.uuid, sess_uuid)) + return srv_path; } return NULL; } -static int create_con(struct rtrs_srv_sess *sess, +static int create_con(struct rtrs_srv_path *srv_path, struct rdma_cm_id *cm_id, unsigned int cid) { - struct rtrs_srv *srv = sess->srv; - struct rtrs_sess *s = &sess->s; + struct rtrs_srv_sess *srv = srv_path->srv; + struct rtrs_path *s = &srv_path->s; struct rtrs_srv_con *con; u32 cq_num, max_send_wr, max_recv_wr, wr_limit; @@ -1648,10 +1656,10 @@ static int create_con(struct rtrs_srv_sess *sess, spin_lock_init(&con->rsp_wr_wait_lock); INIT_LIST_HEAD(&con->rsp_wr_wait_list); con->c.cm_id = cm_id; - con->c.sess = &sess->s; + con->c.path = &srv_path->s; con->c.cid = cid; atomic_set(&con->c.wr_cnt, 1); - wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; + wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr; if (con->c.cid == 0) { /* @@ -1684,10 +1692,10 @@ static int create_con(struct rtrs_srv_sess *sess, } cq_num = max_send_wr + max_recv_wr; atomic_set(&con->c.sq_wr_avail, max_send_wr); - cq_vector = rtrs_srv_get_next_cq_vector(sess); + cq_vector = rtrs_srv_get_next_cq_vector(srv_path); /* TODO: SOFTIRQ can be faster, but be careful with softirq context */ - err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_num, + err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num, max_send_wr, max_recv_wr, IB_POLL_WORKQUEUE); if (err) { @@ -1699,8 +1707,8 @@ static int create_con(struct rtrs_srv_sess *sess, if (err) goto free_cqqp; } - WARN_ON(sess->s.con[cid]); - sess->s.con[cid] = &con->c; + WARN_ON(srv_path->s.con[cid]); + srv_path->s.con[cid] = &con->c; /* * Change context from server to current connection. The other @@ -1719,13 +1727,13 @@ err: return err; } -static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv, +static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv, struct rdma_cm_id *cm_id, unsigned int con_num, unsigned int recon_cnt, const uuid_t *uuid) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; int err = -ENOMEM; char str[NAME_MAX]; struct rtrs_addr path; @@ -1739,74 +1747,76 @@ static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv, pr_err("Path with same addr exists\n"); goto err; } - sess = kzalloc(sizeof(*sess), GFP_KERNEL); - if (!sess) + srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL); + if (!srv_path) goto err; - sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL); - if (!sess->stats) + srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL); + if (!srv_path->stats) goto err_free_sess; - sess->stats->sess = sess; + srv_path->stats->srv_path = srv_path; - sess->dma_addr = kcalloc(srv->queue_depth, sizeof(*sess->dma_addr), - GFP_KERNEL); - if (!sess->dma_addr) + srv_path->dma_addr = kcalloc(srv->queue_depth, + sizeof(*srv_path->dma_addr), + GFP_KERNEL); + if (!srv_path->dma_addr) goto err_free_stats; - sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL); - if (!sess->s.con) + srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con), + GFP_KERNEL); + if (!srv_path->s.con) goto err_free_dma_addr; - sess->state = RTRS_SRV_CONNECTING; - sess->srv = srv; - sess->cur_cq_vector = -1; - sess->s.dst_addr = cm_id->route.addr.dst_addr; - sess->s.src_addr = cm_id->route.addr.src_addr; + srv_path->state = RTRS_SRV_CONNECTING; + srv_path->srv = srv; + srv_path->cur_cq_vector = -1; + srv_path->s.dst_addr = cm_id->route.addr.dst_addr; + srv_path->s.src_addr = cm_id->route.addr.src_addr; /* temporary until receiving session-name from client */ - path.src = &sess->s.src_addr; - path.dst = &sess->s.dst_addr; + path.src = &srv_path->s.src_addr; + path.dst = &srv_path->s.dst_addr; rtrs_addr_to_str(&path, str, sizeof(str)); - strscpy(sess->s.sessname, str, sizeof(sess->s.sessname)); - - sess->s.con_num = con_num; - sess->s.irq_con_num = con_num; - sess->s.recon_cnt = recon_cnt; - uuid_copy(&sess->s.uuid, uuid); - spin_lock_init(&sess->state_lock); - INIT_WORK(&sess->close_work, rtrs_srv_close_work); - rtrs_srv_init_hb(sess); - - sess->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd); - if (!sess->s.dev) { + strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname)); + + srv_path->s.con_num = con_num; + srv_path->s.irq_con_num = con_num; + srv_path->s.recon_cnt = recon_cnt; + uuid_copy(&srv_path->s.uuid, uuid); + spin_lock_init(&srv_path->state_lock); + INIT_WORK(&srv_path->close_work, rtrs_srv_close_work); + rtrs_srv_init_hb(srv_path); + + srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd); + if (!srv_path->s.dev) { err = -ENOMEM; goto err_free_con; } - err = map_cont_bufs(sess); + err = map_cont_bufs(srv_path); if (err) goto err_put_dev; - err = rtrs_srv_alloc_ops_ids(sess); + err = rtrs_srv_alloc_ops_ids(srv_path); if (err) goto err_unmap_bufs; - __add_path_to_srv(srv, sess); + __add_path_to_srv(srv, srv_path); - return sess; + return srv_path; err_unmap_bufs: - unmap_cont_bufs(sess); + unmap_cont_bufs(srv_path); err_put_dev: - rtrs_ib_dev_put(sess->s.dev); + rtrs_ib_dev_put(srv_path->s.dev); err_free_con: - kfree(sess->s.con); + kfree(srv_path->s.con); err_free_dma_addr: - kfree(sess->dma_addr); + kfree(srv_path->dma_addr); err_free_stats: - kfree(sess->stats); + kfree(srv_path->stats); err_free_sess: - kfree(sess); + kfree(srv_path); err: return ERR_PTR(err); } @@ -1816,8 +1826,8 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id, size_t len) { struct rtrs_srv_ctx *ctx = cm_id->context; - struct rtrs_srv_sess *sess; - struct rtrs_srv *srv; + struct rtrs_srv_path *srv_path; + struct rtrs_srv_sess *srv; u16 version, con_num, cid; u16 recon_cnt; @@ -1857,16 +1867,16 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id, goto reject_w_err; } mutex_lock(&srv->paths_mutex); - sess = __find_sess(srv, &msg->sess_uuid); - if (sess) { - struct rtrs_sess *s = &sess->s; + srv_path = __find_path(srv, &msg->sess_uuid); + if (srv_path) { + struct rtrs_path *s = &srv_path->s; /* Session already holds a reference */ put_srv(srv); - if (sess->state != RTRS_SRV_CONNECTING) { + if (srv_path->state != RTRS_SRV_CONNECTING) { rtrs_err(s, "Session in wrong state: %s\n", - rtrs_srv_state_str(sess->state)); + rtrs_srv_state_str(srv_path->state)); mutex_unlock(&srv->paths_mutex); goto reject_w_err; } @@ -1886,19 +1896,19 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id, goto reject_w_err; } } else { - sess = __alloc_sess(srv, cm_id, con_num, recon_cnt, + srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt, &msg->sess_uuid); - if (IS_ERR(sess)) { + if (IS_ERR(srv_path)) { mutex_unlock(&srv->paths_mutex); put_srv(srv); - err = PTR_ERR(sess); + err = PTR_ERR(srv_path); pr_err("RTRS server session allocation failed: %d\n", err); goto reject_w_err; } } - err = create_con(sess, cm_id, cid); + err = create_con(srv_path, cm_id, cid); if (err) { - rtrs_err((&sess->s), "create_con(), error %d\n", err); + rtrs_err((&srv_path->s), "create_con(), error %d\n", err); rtrs_rdma_do_reject(cm_id, err); /* * Since session has other connections we follow normal way @@ -1907,9 +1917,9 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id, */ goto close_and_return_err; } - err = rtrs_rdma_do_accept(sess, cm_id); + err = rtrs_rdma_do_accept(srv_path, cm_id); if (err) { - rtrs_err((&sess->s), "rtrs_rdma_do_accept(), error %d\n", err); + rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err); rtrs_rdma_do_reject(cm_id, err); /* * Since current connection was successfully added to the @@ -1929,7 +1939,7 @@ reject_w_err: close_and_return_err: mutex_unlock(&srv->paths_mutex); - close_sess(sess); + close_path(srv_path); return err; } @@ -1937,14 +1947,14 @@ close_and_return_err: static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *ev) { - struct rtrs_srv_sess *sess = NULL; - struct rtrs_sess *s = NULL; + struct rtrs_srv_path *srv_path = NULL; + struct rtrs_path *s = NULL; if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) { struct rtrs_con *c = cm_id->context; - s = c->sess; - sess = to_srv_sess(s); + s = c->path; + srv_path = to_srv_path(s); } switch (ev->event) { @@ -1968,7 +1978,7 @@ static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_TIMEWAIT_EXIT: case RDMA_CM_EVENT_DEVICE_REMOVAL: - close_sess(sess); + close_path(srv_path); break; default: pr_err("Ignoring unexpected CM event %s, err %d\n", @@ -2176,23 +2186,23 @@ struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port) } EXPORT_SYMBOL(rtrs_srv_open); -static void close_sessions(struct rtrs_srv *srv) +static void close_paths(struct rtrs_srv_sess *srv) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; mutex_lock(&srv->paths_mutex); - list_for_each_entry(sess, &srv->paths_list, s.entry) - close_sess(sess); + list_for_each_entry(srv_path, &srv->paths_list, s.entry) + close_path(srv_path); mutex_unlock(&srv->paths_mutex); } static void close_ctx(struct rtrs_srv_ctx *ctx) { - struct rtrs_srv *srv; + struct rtrs_srv_sess *srv; mutex_lock(&ctx->srv_mutex); list_for_each_entry(srv, &ctx->srv_list, ctx_list) - close_sessions(srv); + close_paths(srv); mutex_unlock(&ctx->srv_mutex); flush_workqueue(rtrs_wq); } diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h index 7d403c12faf3..6292e87f6afd 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h @@ -37,7 +37,7 @@ struct rtrs_srv_stats_rdma_stats { struct rtrs_srv_stats { struct kobject kobj_stats; struct rtrs_srv_stats_rdma_stats rdma_stats; - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; }; struct rtrs_srv_con { @@ -71,9 +71,9 @@ struct rtrs_srv_mr { struct rtrs_iu *iu; /* send buffer for new rkey msg */ }; -struct rtrs_srv_sess { - struct rtrs_sess s; - struct rtrs_srv *srv; +struct rtrs_srv_path { + struct rtrs_path s; + struct rtrs_srv_sess *srv; struct work_struct close_work; enum rtrs_srv_state state; spinlock_t state_lock; @@ -90,7 +90,7 @@ struct rtrs_srv_sess { struct rtrs_srv_stats *stats; }; -struct rtrs_srv { +struct rtrs_srv_sess { struct list_head paths_list; int paths_up; struct mutex paths_ev_mutex; @@ -125,7 +125,7 @@ struct rtrs_srv_ib_ctx { extern struct class *rtrs_dev_class; -void close_sess(struct rtrs_srv_sess *sess); +void close_path(struct rtrs_srv_path *srv_path); static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s, size_t size, int d) @@ -142,7 +142,7 @@ ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats, char *page, size_t len); /* functions which are implemented in rtrs-srv-sysfs.c */ -int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess); -void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess); +int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path); +void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path); #endif /* RTRS_SRV_H */ diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c index 37952c8e768c..4da889103a5f 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs.c @@ -69,16 +69,16 @@ EXPORT_SYMBOL_GPL(rtrs_iu_free); int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu) { - struct rtrs_sess *sess = con->sess; + struct rtrs_path *path = con->path; struct ib_recv_wr wr; struct ib_sge list; list.addr = iu->dma_addr; list.length = iu->size; - list.lkey = sess->dev->ib_pd->local_dma_lkey; + list.lkey = path->dev->ib_pd->local_dma_lkey; if (list.length == 0) { - rtrs_wrn(con->sess, + rtrs_wrn(con->path, "Posting receive work request failed, sg list is empty\n"); return -EINVAL; } @@ -126,7 +126,7 @@ static int rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head, int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size, struct ib_send_wr *head) { - struct rtrs_sess *sess = con->sess; + struct rtrs_path *path = con->path; struct ib_send_wr wr; struct ib_sge list; @@ -135,7 +135,7 @@ int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size, list.addr = iu->dma_addr; list.length = size; - list.lkey = sess->dev->ib_pd->local_dma_lkey; + list.lkey = path->dev->ib_pd->local_dma_lkey; wr = (struct ib_send_wr) { .wr_cqe = &iu->cqe, @@ -188,11 +188,11 @@ static int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_send_wr *head) { struct ib_rdma_wr wr; - struct rtrs_sess *sess = con->sess; + struct rtrs_path *path = con->path; enum ib_send_flags sflags; atomic_dec_if_positive(&con->sq_wr_avail); - sflags = (atomic_inc_return(&con->wr_cnt) % sess->signal_interval) ? + sflags = (atomic_inc_return(&con->wr_cnt) % path->signal_interval) ? 0 : IB_SEND_SIGNALED; wr = (struct ib_rdma_wr) { @@ -211,12 +211,12 @@ static void qp_event_handler(struct ib_event *ev, void *ctx) switch (ev->event) { case IB_EVENT_COMM_EST: - rtrs_info(con->sess, "QP event %s (%d) received\n", + rtrs_info(con->path, "QP event %s (%d) received\n", ib_event_msg(ev->event), ev->event); rdma_notify(con->cm_id, IB_EVENT_COMM_EST); break; default: - rtrs_info(con->sess, "Unhandled QP event %s (%d) received\n", + rtrs_info(con->path, "Unhandled QP event %s (%d) received\n", ib_event_msg(ev->event), ev->event); break; } @@ -224,7 +224,7 @@ static void qp_event_handler(struct ib_event *ev, void *ctx) static bool is_pollqueue(struct rtrs_con *con) { - return con->cid >= con->sess->irq_con_num; + return con->cid >= con->path->irq_con_num; } static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe, @@ -240,7 +240,7 @@ static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe, cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx); if (IS_ERR(cq)) { - rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n", + rtrs_err(con->path, "Creating completion queue failed, errno: %ld\n", PTR_ERR(cq)); return PTR_ERR(cq); } @@ -271,7 +271,7 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd, ret = rdma_create_qp(cm_id, pd, &init_attr); if (ret) { - rtrs_err(con->sess, "Creating QP failed, err: %d\n", ret); + rtrs_err(con->path, "Creating QP failed, err: %d\n", ret); return ret; } con->qp = cm_id->qp; @@ -290,7 +290,7 @@ static void destroy_cq(struct rtrs_con *con) con->cq = NULL; } -int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con, +int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con, u32 max_send_sge, int cq_vector, int nr_cqe, u32 max_send_wr, u32 max_recv_wr, enum ib_poll_context poll_ctx) @@ -301,13 +301,13 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con, if (err) return err; - err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr, + err = create_qp(con, path->dev->ib_pd, max_send_wr, max_recv_wr, max_send_sge); if (err) { destroy_cq(con); return err; } - con->sess = sess; + con->path = path; return 0; } @@ -323,24 +323,24 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con) } EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy); -static void schedule_hb(struct rtrs_sess *sess) +static void schedule_hb(struct rtrs_path *path) { - queue_delayed_work(sess->hb_wq, &sess->hb_dwork, - msecs_to_jiffies(sess->hb_interval_ms)); + queue_delayed_work(path->hb_wq, &path->hb_dwork, + msecs_to_jiffies(path->hb_interval_ms)); } -void rtrs_send_hb_ack(struct rtrs_sess *sess) +void rtrs_send_hb_ack(struct rtrs_path *path) { - struct rtrs_con *usr_con = sess->con[0]; + struct rtrs_con *usr_con = path->con[0]; u32 imm; int err; imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0); - err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm, + err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm, NULL); if (err) { - rtrs_err(sess, "send HB ACK failed, errno: %d\n", err); - sess->hb_err_handler(usr_con); + rtrs_err(path, "send HB ACK failed, errno: %d\n", err); + path->hb_err_handler(usr_con); return; } } @@ -349,63 +349,63 @@ EXPORT_SYMBOL_GPL(rtrs_send_hb_ack); static void hb_work(struct work_struct *work) { struct rtrs_con *usr_con; - struct rtrs_sess *sess; + struct rtrs_path *path; u32 imm; int err; - sess = container_of(to_delayed_work(work), typeof(*sess), hb_dwork); - usr_con = sess->con[0]; + path = container_of(to_delayed_work(work), typeof(*path), hb_dwork); + usr_con = path->con[0]; - if (sess->hb_missed_cnt > sess->hb_missed_max) { - rtrs_err(sess, "HB missed max reached.\n"); - sess->hb_err_handler(usr_con); + if (path->hb_missed_cnt > path->hb_missed_max) { + rtrs_err(path, "HB missed max reached.\n"); + path->hb_err_handler(usr_con); return; } - if (sess->hb_missed_cnt++) { + if (path->hb_missed_cnt++) { /* Reschedule work without sending hb */ - schedule_hb(sess); + schedule_hb(path); return; } - sess->hb_last_sent = ktime_get(); + path->hb_last_sent = ktime_get(); imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0); - err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm, + err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm, NULL); if (err) { - rtrs_err(sess, "HB send failed, errno: %d\n", err); - sess->hb_err_handler(usr_con); + rtrs_err(path, "HB send failed, errno: %d\n", err); + path->hb_err_handler(usr_con); return; } - schedule_hb(sess); + schedule_hb(path); } -void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe, +void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe, unsigned int interval_ms, unsigned int missed_max, void (*err_handler)(struct rtrs_con *con), struct workqueue_struct *wq) { - sess->hb_cqe = cqe; - sess->hb_interval_ms = interval_ms; - sess->hb_err_handler = err_handler; - sess->hb_wq = wq; - sess->hb_missed_max = missed_max; - sess->hb_missed_cnt = 0; - INIT_DELAYED_WORK(&sess->hb_dwork, hb_work); + path->hb_cqe = cqe; + path->hb_interval_ms = interval_ms; + path->hb_err_handler = err_handler; + path->hb_wq = wq; + path->hb_missed_max = missed_max; + path->hb_missed_cnt = 0; + INIT_DELAYED_WORK(&path->hb_dwork, hb_work); } EXPORT_SYMBOL_GPL(rtrs_init_hb); -void rtrs_start_hb(struct rtrs_sess *sess) +void rtrs_start_hb(struct rtrs_path *path) { - schedule_hb(sess); + schedule_hb(path); } EXPORT_SYMBOL_GPL(rtrs_start_hb); -void rtrs_stop_hb(struct rtrs_sess *sess) +void rtrs_stop_hb(struct rtrs_path *path) { - cancel_delayed_work_sync(&sess->hb_dwork); - sess->hb_missed_cnt = 0; + cancel_delayed_work_sync(&path->hb_dwork); + path->hb_missed_cnt = 0; } EXPORT_SYMBOL_GPL(rtrs_stop_hb); diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h index 859c79685daf..5e57a7ccc7fb 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs.h +++ b/drivers/infiniband/ulp/rtrs/rtrs.h @@ -13,9 +13,9 @@ #include <linux/scatterlist.h> struct rtrs_permit; -struct rtrs_clt; +struct rtrs_clt_sess; struct rtrs_srv_ctx; -struct rtrs_srv; +struct rtrs_srv_sess; struct rtrs_srv_op; /* @@ -52,14 +52,14 @@ struct rtrs_clt_ops { void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev); }; -struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops, - const char *sessname, +struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops, + const char *pathname, const struct rtrs_addr *paths, size_t path_cnt, u16 port, size_t pdu_sz, u8 reconnect_delay_sec, s16 max_reconnect_attempts, u32 nr_poll_queues); -void rtrs_clt_close(struct rtrs_clt *sess); +void rtrs_clt_close(struct rtrs_clt_sess *clt); enum wait_type { RTRS_PERMIT_NOWAIT = 0, @@ -77,11 +77,12 @@ enum rtrs_clt_con_type { RTRS_IO_CON }; -struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *sess, - enum rtrs_clt_con_type con_type, - enum wait_type wait); +struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *sess, + enum rtrs_clt_con_type con_type, + enum wait_type wait); -void rtrs_clt_put_permit(struct rtrs_clt *sess, struct rtrs_permit *permit); +void rtrs_clt_put_permit(struct rtrs_clt_sess *sess, + struct rtrs_permit *permit); /** * rtrs_clt_req_ops - it holds the request confirmation callback @@ -98,10 +99,10 @@ struct rtrs_clt_req_ops { }; int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, - struct rtrs_clt *sess, struct rtrs_permit *permit, + struct rtrs_clt_sess *sess, struct rtrs_permit *permit, const struct kvec *vec, size_t nr, size_t len, struct scatterlist *sg, unsigned int sg_cnt); -int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index); +int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index); /** * rtrs_attrs - RTRS session attributes @@ -112,7 +113,7 @@ struct rtrs_attrs { u32 max_segments; }; -int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr); +int rtrs_clt_query(struct rtrs_clt_sess *sess, struct rtrs_attrs *attr); /* * Here goes RTRS server API @@ -163,7 +164,7 @@ struct rtrs_srv_ops { * @priv: Private data from user if previously set with * rtrs_srv_set_sess_priv() */ - int (*link_ev)(struct rtrs_srv *sess, enum rtrs_srv_link_ev ev, + int (*link_ev)(struct rtrs_srv_sess *sess, enum rtrs_srv_link_ev ev, void *priv); }; @@ -173,11 +174,12 @@ void rtrs_srv_close(struct rtrs_srv_ctx *ctx); bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int errno); -void rtrs_srv_set_sess_priv(struct rtrs_srv *sess, void *priv); +void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *sess, void *priv); -int rtrs_srv_get_sess_name(struct rtrs_srv *sess, char *sessname, size_t len); +int rtrs_srv_get_path_name(struct rtrs_srv_sess *sess, char *pathname, + size_t len); -int rtrs_srv_get_queue_depth(struct rtrs_srv *sess); +int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *sess); int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port, struct rtrs_addr *addr); diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c index 1cf5deda06e1..fa8d1a466014 100644 --- a/drivers/input/ff-core.c +++ b/drivers/input/ff-core.c @@ -67,7 +67,7 @@ static int compat_effect(struct ff_device *ff, struct ff_effect *effect) effect->type = FF_PERIODIC; effect->u.periodic.waveform = FF_SINE; effect->u.periodic.period = 50; - effect->u.periodic.magnitude = max(magnitude, 0x7fff); + effect->u.periodic.magnitude = magnitude; effect->u.periodic.offset = 0; effect->u.periodic.phase = 0; effect->u.periodic.envelope.attack_length = 0; diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 8dbf1e69c90a..d75a8b179a8a 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -247,7 +247,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata, ssize_t error; int i; - bits = bitmap_zalloc(n_events, GFP_KERNEL); + bits = bitmap_alloc(n_events, GFP_KERNEL); if (!bits) return -ENOMEM; diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index e09b1fae42e1..04da7916eb70 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c @@ -206,11 +206,8 @@ ATTRIBUTE_GROUPS(axp20x); static irqreturn_t axp20x_pek_irq(int irq, void *pwr) { - struct axp20x_pek *axp20x_pek = pwr; - struct input_dev *idev = axp20x_pek->input; - - if (!idev) - return IRQ_HANDLED; + struct input_dev *idev = pwr; + struct axp20x_pek *axp20x_pek = input_get_drvdata(idev); /* * The power-button is connected to ground so a falling edge (dbf) @@ -229,9 +226,22 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr) static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, struct platform_device *pdev) { + struct axp20x_dev *axp20x = axp20x_pek->axp20x; struct input_dev *idev; int error; + axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR"); + if (axp20x_pek->irq_dbr < 0) + return axp20x_pek->irq_dbr; + axp20x_pek->irq_dbr = regmap_irq_get_virq(axp20x->regmap_irqc, + axp20x_pek->irq_dbr); + + axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF"); + if (axp20x_pek->irq_dbf < 0) + return axp20x_pek->irq_dbf; + axp20x_pek->irq_dbf = regmap_irq_get_virq(axp20x->regmap_irqc, + axp20x_pek->irq_dbf); + axp20x_pek->input = devm_input_allocate_device(&pdev->dev); if (!axp20x_pek->input) return -ENOMEM; @@ -246,6 +256,24 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, input_set_drvdata(idev, axp20x_pek); + error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr, + axp20x_pek_irq, 0, + "axp20x-pek-dbr", idev); + if (error < 0) { + dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n", + axp20x_pek->irq_dbr, error); + return error; + } + + error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf, + axp20x_pek_irq, 0, + "axp20x-pek-dbf", idev); + if (error < 0) { + dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n", + axp20x_pek->irq_dbf, error); + return error; + } + error = input_register_device(idev); if (error) { dev_err(&pdev->dev, "Can't register input device: %d\n", @@ -253,6 +281,8 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, return error; } + device_init_wakeup(&pdev->dev, true); + return 0; } @@ -293,18 +323,6 @@ static int axp20x_pek_probe(struct platform_device *pdev) axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent); - axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR"); - if (axp20x_pek->irq_dbr < 0) - return axp20x_pek->irq_dbr; - axp20x_pek->irq_dbr = regmap_irq_get_virq( - axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbr); - - axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF"); - if (axp20x_pek->irq_dbf < 0) - return axp20x_pek->irq_dbf; - axp20x_pek->irq_dbf = regmap_irq_get_virq( - axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf); - if (axp20x_pek_should_register_input(axp20x_pek)) { error = axp20x_pek_probe_input_device(axp20x_pek, pdev); if (error) @@ -313,26 +331,6 @@ static int axp20x_pek_probe(struct platform_device *pdev) axp20x_pek->info = (struct axp20x_info *)match->driver_data; - error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr, - axp20x_pek_irq, 0, - "axp20x-pek-dbr", axp20x_pek); - if (error < 0) { - dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n", - axp20x_pek->irq_dbr, error); - return error; - } - - error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf, - axp20x_pek_irq, 0, - "axp20x-pek-dbf", axp20x_pek); - if (error < 0) { - dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n", - axp20x_pek->irq_dbf, error); - return error; - } - - device_init_wakeup(&pdev->dev, true); - platform_set_drvdata(pdev, axp20x_pek); return 0; diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c index f9b05cf09ff5..2213e06b611d 100644 --- a/drivers/input/misc/palmas-pwrbutton.c +++ b/drivers/input/misc/palmas-pwrbutton.c @@ -15,6 +15,7 @@ * GNU General Public License for more details. */ +#include <linux/bitfield.h> #include <linux/init.h> #include <linux/input.h> #include <linux/interrupt.h> @@ -115,8 +116,8 @@ static void palmas_pwron_params_ofinit(struct device *dev, struct device_node *np; u32 val; int i, error; - u8 lpk_times[] = { 6, 8, 10, 12 }; - int pwr_on_deb_ms[] = { 15, 100, 500, 1000 }; + static const u8 lpk_times[] = { 6, 8, 10, 12 }; + static const int pwr_on_deb_ms[] = { 15, 100, 500, 1000 }; memset(config, 0, sizeof(*config)); @@ -192,8 +193,8 @@ static int palmas_pwron_probe(struct platform_device *pdev) * Setup default hardware shutdown option (long key press) * and debounce. */ - val = config.long_press_time_val << __ffs(PALMAS_LPK_TIME_MASK); - val |= config.pwron_debounce_val << __ffs(PALMAS_PWRON_DEBOUNCE_MASK); + val = FIELD_PREP(PALMAS_LPK_TIME_MASK, config.long_press_time_val) | + FIELD_PREP(PALMAS_PWRON_DEBOUNCE_MASK, config.pwron_debounce_val); error = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE, PALMAS_LONG_PRESS_KEY, PALMAS_LPK_TIME_MASK | diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c index 6e0c5f5a2713..221a553f45cd 100644 --- a/drivers/input/mouse/byd.c +++ b/drivers/input/mouse/byd.c @@ -191,7 +191,7 @@ /* * The touchpad generates a mixture of absolute and relative packets, indicated - * by the the last byte of each packet being set to one of the following: + * by the last byte of each packet being set to one of the following: */ #define BYD_PACKET_ABSOLUTE 0xf8 #define BYD_PACKET_RELATIVE 0x00 diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index aaa3c455e01e..a3bfc7a41679 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -297,6 +297,108 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) return -ENOMSG; } +static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts) +{ + struct device *dev = &ts->client->dev; + struct input_dev *input; + + input = devm_input_allocate_device(dev); + if (!input) + return NULL; + + input_alloc_absinfo(input); + if (!input->absinfo) { + input_free_device(input); + return NULL; + } + + input->absinfo[ABS_X] = ts->input_dev->absinfo[ABS_MT_POSITION_X]; + input->absinfo[ABS_Y] = ts->input_dev->absinfo[ABS_MT_POSITION_Y]; + __set_bit(ABS_X, input->absbit); + __set_bit(ABS_Y, input->absbit); + input_set_abs_params(input, ABS_PRESSURE, 0, 255, 0, 0); + + input_set_capability(input, EV_KEY, BTN_TOUCH); + input_set_capability(input, EV_KEY, BTN_TOOL_PEN); + input_set_capability(input, EV_KEY, BTN_STYLUS); + input_set_capability(input, EV_KEY, BTN_STYLUS2); + __set_bit(INPUT_PROP_DIRECT, input->propbit); + /* + * The resolution of these touchscreens is about 10 units/mm, the actual + * resolution does not matter much since we set INPUT_PROP_DIRECT. + * Userspace wants something here though, so just set it to 10 units/mm. + */ + input_abs_set_res(input, ABS_X, 10); + input_abs_set_res(input, ABS_Y, 10); + + input->name = "Goodix Active Pen"; + input->phys = "input/pen"; + input->id.bustype = BUS_I2C; + input->id.vendor = 0x0416; + if (kstrtou16(ts->id, 10, &input->id.product)) + input->id.product = 0x1001; + input->id.version = ts->version; + + if (input_register_device(input) != 0) { + input_free_device(input); + return NULL; + } + + return input; +} + +static void goodix_ts_report_pen_down(struct goodix_ts_data *ts, u8 *data) +{ + int input_x, input_y, input_w; + u8 key_value; + + if (!ts->input_pen) { + ts->input_pen = goodix_create_pen_input(ts); + if (!ts->input_pen) + return; + } + + if (ts->contact_size == 9) { + input_x = get_unaligned_le16(&data[4]); + input_y = get_unaligned_le16(&data[6]); + input_w = get_unaligned_le16(&data[8]); + } else { + input_x = get_unaligned_le16(&data[2]); + input_y = get_unaligned_le16(&data[4]); + input_w = get_unaligned_le16(&data[6]); + } + + touchscreen_report_pos(ts->input_pen, &ts->prop, input_x, input_y, false); + input_report_abs(ts->input_pen, ABS_PRESSURE, input_w); + + input_report_key(ts->input_pen, BTN_TOUCH, 1); + input_report_key(ts->input_pen, BTN_TOOL_PEN, 1); + + if (data[0] & GOODIX_HAVE_KEY) { + key_value = data[1 + ts->contact_size]; + input_report_key(ts->input_pen, BTN_STYLUS, key_value & 0x10); + input_report_key(ts->input_pen, BTN_STYLUS2, key_value & 0x20); + } else { + input_report_key(ts->input_pen, BTN_STYLUS, 0); + input_report_key(ts->input_pen, BTN_STYLUS2, 0); + } + + input_sync(ts->input_pen); +} + +static void goodix_ts_report_pen_up(struct goodix_ts_data *ts) +{ + if (!ts->input_pen) + return; + + input_report_key(ts->input_pen, BTN_TOUCH, 0); + input_report_key(ts->input_pen, BTN_TOOL_PEN, 0); + input_report_key(ts->input_pen, BTN_STYLUS, 0); + input_report_key(ts->input_pen, BTN_STYLUS2, 0); + + input_sync(ts->input_pen); +} + static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data) { int id = coor_data[0] & 0x0F; @@ -327,6 +429,14 @@ static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data) input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w); } +static void goodix_ts_release_keys(struct goodix_ts_data *ts) +{ + int i; + + for (i = 0; i < GOODIX_MAX_KEYS; i++) + input_report_key(ts->input_dev, ts->keymap[i], 0); +} + static void goodix_ts_report_key(struct goodix_ts_data *ts, u8 *data) { int touch_num; @@ -341,8 +451,7 @@ static void goodix_ts_report_key(struct goodix_ts_data *ts, u8 *data) input_report_key(ts->input_dev, ts->keymap[i], 1); } else { - for (i = 0; i < GOODIX_MAX_KEYS; i++) - input_report_key(ts->input_dev, ts->keymap[i], 0); + goodix_ts_release_keys(ts); } } @@ -364,6 +473,15 @@ static void goodix_process_events(struct goodix_ts_data *ts) if (touch_num < 0) return; + /* The pen being down is always reported as a single touch */ + if (touch_num == 1 && (point_data[1] & 0x80)) { + goodix_ts_report_pen_down(ts, point_data); + goodix_ts_release_keys(ts); + goto sync; /* Release any previously registered touches */ + } else { + goodix_ts_report_pen_up(ts); + } + goodix_ts_report_key(ts, point_data); for (i = 0; i < touch_num; i++) @@ -374,6 +492,7 @@ static void goodix_process_events(struct goodix_ts_data *ts) goodix_ts_report_touch_8b(ts, &point_data[1 + ts->contact_size * i]); +sync: input_mt_sync_frame(ts->input_dev); input_sync(ts->input_dev); } @@ -857,7 +976,7 @@ retry_get_irq_gpio: if (IS_ERR(gpiod)) { error = PTR_ERR(gpiod); if (error != -EPROBE_DEFER) - dev_dbg(dev, "Failed to get %s GPIO: %d\n", + dev_err(dev, "Failed to get %s GPIO: %d\n", GOODIX_GPIO_INT_NAME, error); return error; } @@ -874,7 +993,7 @@ retry_get_irq_gpio: if (IS_ERR(gpiod)) { error = PTR_ERR(gpiod); if (error != -EPROBE_DEFER) - dev_dbg(dev, "Failed to get %s GPIO: %d\n", + dev_err(dev, "Failed to get %s GPIO: %d\n", GOODIX_GPIO_RST_NAME, error); return error; } diff --git a/drivers/input/touchscreen/goodix.h b/drivers/input/touchscreen/goodix.h index 02065d1c3263..fa8602e78a64 100644 --- a/drivers/input/touchscreen/goodix.h +++ b/drivers/input/touchscreen/goodix.h @@ -76,6 +76,7 @@ struct goodix_chip_data { struct goodix_ts_data { struct i2c_client *client; struct input_dev *input_dev; + struct input_dev *input_pen; const struct goodix_chip_data *chip; const char *firmware_name; struct touchscreen_properties prop; diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index 1ee760bac0cf..3eef8c01090f 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c @@ -67,6 +67,7 @@ struct silead_ts_data { struct i2c_client *client; struct gpio_desc *gpio_power; struct input_dev *input; + struct input_dev *pen_input; struct regulator_bulk_data regulators[2]; char fw_name[64]; struct touchscreen_properties prop; @@ -75,6 +76,13 @@ struct silead_ts_data { struct input_mt_pos pos[SILEAD_MAX_FINGERS]; int slots[SILEAD_MAX_FINGERS]; int id[SILEAD_MAX_FINGERS]; + u32 efi_fw_min_max[4]; + bool efi_fw_min_max_set; + bool pen_supported; + bool pen_down; + u32 pen_x_res; + u32 pen_y_res; + int pen_up_count; }; struct silead_fw_data { @@ -82,6 +90,35 @@ struct silead_fw_data { u32 val; }; +static void silead_apply_efi_fw_min_max(struct silead_ts_data *data) +{ + struct input_absinfo *absinfo_x = &data->input->absinfo[ABS_MT_POSITION_X]; + struct input_absinfo *absinfo_y = &data->input->absinfo[ABS_MT_POSITION_Y]; + + if (!data->efi_fw_min_max_set) + return; + + absinfo_x->minimum = data->efi_fw_min_max[0]; + absinfo_x->maximum = data->efi_fw_min_max[1]; + absinfo_y->minimum = data->efi_fw_min_max[2]; + absinfo_y->maximum = data->efi_fw_min_max[3]; + + if (data->prop.invert_x) { + absinfo_x->maximum -= absinfo_x->minimum; + absinfo_x->minimum = 0; + } + + if (data->prop.invert_y) { + absinfo_y->maximum -= absinfo_y->minimum; + absinfo_y->minimum = 0; + } + + if (data->prop.swap_x_y) { + swap(absinfo_x->minimum, absinfo_y->minimum); + swap(absinfo_x->maximum, absinfo_y->maximum); + } +} + static int silead_ts_request_input_dev(struct silead_ts_data *data) { struct device *dev = &data->client->dev; @@ -97,6 +134,7 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data) input_set_abs_params(data->input, ABS_MT_POSITION_X, 0, 4095, 0, 0); input_set_abs_params(data->input, ABS_MT_POSITION_Y, 0, 4095, 0, 0); touchscreen_parse_properties(data->input, true, &data->prop); + silead_apply_efi_fw_min_max(data); input_mt_init_slots(data->input, data->max_fingers, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED | @@ -112,6 +150,40 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data) error = input_register_device(data->input); if (error) { dev_err(dev, "Failed to register input device: %d\n", error); + return error; + } + + return 0; +} + +static int silead_ts_request_pen_input_dev(struct silead_ts_data *data) +{ + struct device *dev = &data->client->dev; + int error; + + if (!data->pen_supported) + return 0; + + data->pen_input = devm_input_allocate_device(dev); + if (!data->pen_input) + return -ENOMEM; + + input_set_abs_params(data->pen_input, ABS_X, 0, 4095, 0, 0); + input_set_abs_params(data->pen_input, ABS_Y, 0, 4095, 0, 0); + input_set_capability(data->pen_input, EV_KEY, BTN_TOUCH); + input_set_capability(data->pen_input, EV_KEY, BTN_TOOL_PEN); + set_bit(INPUT_PROP_DIRECT, data->pen_input->propbit); + touchscreen_parse_properties(data->pen_input, false, &data->prop); + input_abs_set_res(data->pen_input, ABS_X, data->pen_x_res); + input_abs_set_res(data->pen_input, ABS_Y, data->pen_y_res); + + data->pen_input->name = SILEAD_TS_NAME " pen"; + data->pen_input->phys = "input/pen"; + data->input->id.bustype = BUS_I2C; + + error = input_register_device(data->pen_input); + if (error) { + dev_err(dev, "Failed to register pen input device: %d\n", error); return error; } @@ -129,6 +201,45 @@ static void silead_ts_set_power(struct i2c_client *client, } } +static bool silead_ts_handle_pen_data(struct silead_ts_data *data, u8 *buf) +{ + u8 *coord = buf + SILEAD_POINT_DATA_LEN; + struct input_mt_pos pos; + + if (!data->pen_supported || buf[2] != 0x00 || buf[3] != 0x00) + return false; + + if (buf[0] == 0x00 && buf[1] == 0x00 && data->pen_down) { + data->pen_up_count++; + if (data->pen_up_count == 6) { + data->pen_down = false; + goto sync; + } + return true; + } + + if (buf[0] == 0x01 && buf[1] == 0x08) { + touchscreen_set_mt_pos(&pos, &data->prop, + get_unaligned_le16(&coord[SILEAD_POINT_X_OFF]) & 0xfff, + get_unaligned_le16(&coord[SILEAD_POINT_Y_OFF]) & 0xfff); + + input_report_abs(data->pen_input, ABS_X, pos.x); + input_report_abs(data->pen_input, ABS_Y, pos.y); + + data->pen_up_count = 0; + data->pen_down = true; + goto sync; + } + + return false; + +sync: + input_report_key(data->pen_input, BTN_TOOL_PEN, data->pen_down); + input_report_key(data->pen_input, BTN_TOUCH, data->pen_down); + input_sync(data->pen_input); + return true; +} + static void silead_ts_read_data(struct i2c_client *client) { struct silead_ts_data *data = i2c_get_clientdata(client); @@ -151,6 +262,9 @@ static void silead_ts_read_data(struct i2c_client *client) buf[0] = data->max_fingers; } + if (silead_ts_handle_pen_data(data, buf)) + goto sync; /* Pen is down, release all previous touches */ + touch_nr = 0; bufp = buf + SILEAD_POINT_DATA_LEN; for (i = 0; i < buf[0]; i++, bufp += SILEAD_POINT_DATA_LEN) { @@ -193,6 +307,7 @@ static void silead_ts_read_data(struct i2c_client *client) data->pos[i].y, data->id[i], data->slots[i]); } +sync: input_mt_sync_frame(input); input_report_key(input, KEY_LEFTMETA, softbutton_pressed); input_sync(input); @@ -282,17 +397,56 @@ static int silead_ts_load_fw(struct i2c_client *client) { struct device *dev = &client->dev; struct silead_ts_data *data = i2c_get_clientdata(client); - unsigned int fw_size, i; - const struct firmware *fw; + const struct firmware *fw = NULL; struct silead_fw_data *fw_data; + unsigned int fw_size, i; int error; dev_dbg(dev, "Firmware file name: %s", data->fw_name); - error = firmware_request_platform(&fw, data->fw_name, dev); + /* + * Unfortunately, at the time of writing this comment, we have been unable to + * get permission from Silead, or from device OEMs, to distribute the necessary + * Silead firmware files in linux-firmware. + * + * On a whole bunch of devices the UEFI BIOS code contains a touchscreen driver, + * which contains an embedded copy of the firmware. The fw-loader code has a + * "platform" fallback mechanism, which together with info on the firmware + * from drivers/platform/x86/touchscreen_dmi.c will use the firmware from the + * UEFI driver when the firmware is missing from /lib/firmware. This makes the + * touchscreen work OOTB without users needing to manually download the firmware. + * + * The firmware bundled with the original Windows/Android is usually newer then + * the firmware in the UEFI driver and it is better calibrated. This better + * calibration can lead to significant differences in the reported min/max + * coordinates. + * + * To deal with this we first try to load the firmware without "platform" + * fallback. If that fails we retry with "platform" fallback and if that + * succeeds we apply an (optional) set of alternative min/max values from the + * "silead,efi-fw-min-max" property. + */ + error = firmware_request_nowarn(&fw, data->fw_name, dev); if (error) { - dev_err(dev, "Firmware request error %d\n", error); - return error; + error = firmware_request_platform(&fw, data->fw_name, dev); + if (error) { + dev_err(dev, "Firmware request error %d\n", error); + return error; + } + + error = device_property_read_u32_array(dev, "silead,efi-fw-min-max", + data->efi_fw_min_max, + ARRAY_SIZE(data->efi_fw_min_max)); + if (!error) + data->efi_fw_min_max_set = true; + + /* The EFI (platform) embedded fw does not have pen support */ + if (data->pen_supported) { + dev_warn(dev, "Warning loading '%s' from filesystem failed, using EFI embedded copy.\n", + data->fw_name); + dev_warn(dev, "Warning pen support is known to be broken in the EFI embedded fw version\n"); + data->pen_supported = false; + } } fw_size = fw->size / sizeof(*fw_data); @@ -450,6 +604,10 @@ static void silead_ts_read_props(struct i2c_client *client) "silead/%s", str); else dev_dbg(dev, "Firmware file name read error. Using default."); + + data->pen_supported = device_property_read_bool(dev, "silead,pen-supported"); + device_property_read_u32(dev, "silead,pen-resolution-x", &data->pen_x_res); + device_property_read_u32(dev, "silead,pen-resolution-y", &data->pen_y_res); } #ifdef CONFIG_ACPI @@ -562,6 +720,10 @@ static int silead_ts_probe(struct i2c_client *client, if (error) return error; + error = silead_ts_request_pen_input_dev(data); + if (error) + return error; + error = devm_request_threaded_irq(dev, client->irq, NULL, silead_ts_threaded_irq_handler, IRQF_ONESHOT, client->name, data); diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c index 83e685557a19..f2fb6a9a1a57 100644 --- a/drivers/input/touchscreen/ti_am335x_tsc.c +++ b/drivers/input/touchscreen/ti_am335x_tsc.c @@ -126,12 +126,13 @@ static int titsc_config_wires(struct titsc *ts_dev) static void titsc_step_config(struct titsc *ts_dev) { unsigned int config; - int i; + int i, n; int end_step, first_step, tsc_steps; u32 stepenable; config = STEPCONFIG_MODE_HWSYNC | - STEPCONFIG_AVG_16 | ts_dev->bit_xp; + STEPCONFIG_AVG_16 | ts_dev->bit_xp | + STEPCONFIG_INM_ADCREFM; switch (ts_dev->wires) { case 4: config |= STEPCONFIG_INP(ts_dev->inp_yp) | ts_dev->bit_xn; @@ -150,9 +151,11 @@ static void titsc_step_config(struct titsc *ts_dev) first_step = TOTAL_STEPS - tsc_steps; /* Steps 16 to 16-coordinate_readouts is for X */ end_step = first_step + tsc_steps; + n = 0; for (i = end_step - ts_dev->coordinate_readouts; i < end_step; i++) { titsc_writel(ts_dev, REG_STEPCONFIG(i), config); - titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY); + titsc_writel(ts_dev, REG_STEPDELAY(i), + n++ == 0 ? STEPCONFIG_OPENDLY : 0); } config = 0; @@ -174,9 +177,11 @@ static void titsc_step_config(struct titsc *ts_dev) /* 1 ... coordinate_readouts is for Y */ end_step = first_step + ts_dev->coordinate_readouts; + n = 0; for (i = first_step; i < end_step; i++) { titsc_writel(ts_dev, REG_STEPCONFIG(i), config); - titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY); + titsc_writel(ts_dev, REG_STEPDELAY(i), + n++ == 0 ? STEPCONFIG_OPENDLY : 0); } /* Make CHARGECONFIG same as IDLECONFIG */ @@ -195,7 +200,10 @@ static void titsc_step_config(struct titsc *ts_dev) STEPCONFIG_OPENDLY); end_step++; - config |= STEPCONFIG_INP(ts_dev->inp_yn); + config = STEPCONFIG_MODE_HWSYNC | + STEPCONFIG_AVG_16 | ts_dev->bit_yp | + ts_dev->bit_xn | STEPCONFIG_INM_ADCREFM | + STEPCONFIG_INP(ts_dev->inp_yn); titsc_writel(ts_dev, REG_STEPCONFIG(end_step), config); titsc_writel(ts_dev, REG_STEPDELAY(end_step), STEPCONFIG_OPENDLY); @@ -310,7 +318,7 @@ static irqreturn_t titsc_irq(int irq, void *dev) /* * Calculate pressure using formula * Resistance(touch) = x plate resistance * - * x postion/4096 * ((z2 / z1) - 1) + * x position/4096 * ((z2 / z1) - 1) */ z = z1 - z2; z *= x; diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c index e3f2c940ef3d..dfd3b35590c3 100644 --- a/drivers/input/touchscreen/ucb1400_ts.c +++ b/drivers/input/touchscreen/ucb1400_ts.c @@ -186,7 +186,6 @@ static irqreturn_t ucb1400_irq(int irqnr, void *devid) { struct ucb1400_ts *ucb = devid; unsigned int x, y, p; - bool penup; if (unlikely(irqnr != ucb->irq)) return IRQ_NONE; @@ -196,8 +195,7 @@ static irqreturn_t ucb1400_irq(int irqnr, void *devid) /* Start with a small delay before checking pendown state */ msleep(UCB1400_TS_POLL_PERIOD); - while (!ucb->stopped && !(penup = ucb1400_ts_pen_up(ucb))) { - + while (!ucb->stopped && !ucb1400_ts_pen_up(ucb)) { ucb1400_adc_enable(ucb->ac97); x = ucb1400_ts_read_xpos(ucb); y = ucb1400_ts_read_ypos(ucb); diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c index fe4ea6204a4e..141754b2764c 100644 --- a/drivers/input/touchscreen/wacom_i2c.c +++ b/drivers/input/touchscreen/wacom_i2c.c @@ -24,12 +24,19 @@ #define WACOM_IN_PROXIMITY BIT(5) /* Registers */ -#define WACOM_CMD_QUERY0 0x04 -#define WACOM_CMD_QUERY1 0x00 -#define WACOM_CMD_QUERY2 0x33 -#define WACOM_CMD_QUERY3 0x02 -#define WACOM_CMD_THROW0 0x05 -#define WACOM_CMD_THROW1 0x00 +#define WACOM_COMMAND_LSB 0x04 +#define WACOM_COMMAND_MSB 0x00 + +#define WACOM_DATA_LSB 0x05 +#define WACOM_DATA_MSB 0x00 + +/* Report types */ +#define REPORT_FEATURE 0x30 + +/* Requests / operations */ +#define OPCODE_GET_REPORT 0x02 + +#define WACOM_QUERY_REPORT 3 #define WACOM_QUERY_SIZE 19 struct wacom_features { @@ -50,23 +57,24 @@ struct wacom_i2c { static int wacom_query_device(struct i2c_client *client, struct wacom_features *features) { - int ret; - u8 cmd1[] = { WACOM_CMD_QUERY0, WACOM_CMD_QUERY1, - WACOM_CMD_QUERY2, WACOM_CMD_QUERY3 }; - u8 cmd2[] = { WACOM_CMD_THROW0, WACOM_CMD_THROW1 }; + u8 get_query_data_cmd[] = { + WACOM_COMMAND_LSB, + WACOM_COMMAND_MSB, + REPORT_FEATURE | WACOM_QUERY_REPORT, + OPCODE_GET_REPORT, + WACOM_DATA_LSB, + WACOM_DATA_MSB, + }; u8 data[WACOM_QUERY_SIZE]; + int ret; + struct i2c_msg msgs[] = { + /* Request reading of feature ReportID: 3 (Pen Query Data) */ { .addr = client->addr, .flags = 0, - .len = sizeof(cmd1), - .buf = cmd1, - }, - { - .addr = client->addr, - .flags = 0, - .len = sizeof(cmd2), - .buf = cmd2, + .len = sizeof(get_query_data_cmd), + .buf = get_query_data_cmd, }, { .addr = client->addr, diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c index 1e70b8d2a8d7..7c82c4f5fa6b 100644 --- a/drivers/input/touchscreen/zinitix.c +++ b/drivers/input/touchscreen/zinitix.c @@ -252,16 +252,27 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541) static int zinitix_init_regulators(struct bt541_ts_data *bt541) { - struct i2c_client *client = bt541->client; + struct device *dev = &bt541->client->dev; int error; - bt541->supplies[0].supply = "vdd"; - bt541->supplies[1].supply = "vddo"; - error = devm_regulator_bulk_get(&client->dev, + /* + * Some older device trees have erroneous names for the regulators, + * so check if "vddo" is present and in that case use these names. + * Else use the proper supply names on the component. + */ + if (of_find_property(dev->of_node, "vddo-supply", NULL)) { + bt541->supplies[0].supply = "vdd"; + bt541->supplies[1].supply = "vddo"; + } else { + /* Else use the proper supply names */ + bt541->supplies[0].supply = "vcca"; + bt541->supplies[1].supply = "vdd"; + } + error = devm_regulator_bulk_get(dev, ARRAY_SIZE(bt541->supplies), bt541->supplies); if (error < 0) { - dev_err(&client->dev, "Failed to get regulators: %d\n", error); + dev_err(dev, "Failed to get regulators: %d\n", error); return error; } @@ -560,6 +571,7 @@ static SIMPLE_DEV_PM_OPS(zinitix_pm_ops, zinitix_suspend, zinitix_resume); #ifdef CONFIG_OF static const struct of_device_id zinitix_of_match[] = { + { .compatible = "zinitix,bt532" }, { .compatible = "zinitix,bt541" }, { } }; diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig index daf1e25f6042..91353e651a52 100644 --- a/drivers/interconnect/qcom/Kconfig +++ b/drivers/interconnect/qcom/Kconfig @@ -35,6 +35,15 @@ config INTERCONNECT_QCOM_MSM8974 This is a driver for the Qualcomm Network-on-Chip on msm8974-based platforms. +config INTERCONNECT_QCOM_MSM8996 + tristate "Qualcomm MSM8996 interconnect driver" + depends on INTERCONNECT_QCOM + depends on QCOM_SMD_RPM + select INTERCONNECT_QCOM_SMD_RPM + help + This is a driver for the Qualcomm Network-on-Chip on msm8996-based + platforms. + config INTERCONNECT_QCOM_OSM_L3 tristate "Qualcomm OSM L3 interconnect driver" depends on INTERCONNECT_QCOM || COMPILE_TEST @@ -42,6 +51,15 @@ config INTERCONNECT_QCOM_OSM_L3 Say y here to support the Operating State Manager (OSM) interconnect driver which controls the scaling of L3 caches on Qualcomm SoCs. +config INTERCONNECT_QCOM_QCM2290 + tristate "Qualcomm QCM2290 interconnect driver" + depends on INTERCONNECT_QCOM + depends on QCOM_SMD_RPM + select INTERCONNECT_QCOM_SMD_RPM + help + This is a driver for the Qualcomm Network-on-Chip on qcm2290-based + platforms. + config INTERCONNECT_QCOM_QCS404 tristate "Qualcomm QCS404 interconnect driver" depends on INTERCONNECT_QCOM @@ -146,5 +164,14 @@ config INTERCONNECT_QCOM_SM8350 This is a driver for the Qualcomm Network-on-Chip on SM8350-based platforms. +config INTERCONNECT_QCOM_SM8450 + tristate "Qualcomm SM8450 interconnect driver" + depends on INTERCONNECT_QCOM_RPMH_POSSIBLE + select INTERCONNECT_QCOM_RPMH + select INTERCONNECT_QCOM_BCM_VOTER + help + This is a driver for the Qualcomm Network-on-Chip on SM8450-based + platforms. + config INTERCONNECT_QCOM_SMD_RPM tristate diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile index 69300b1d48ef..ceae9bb566c6 100644 --- a/drivers/interconnect/qcom/Makefile +++ b/drivers/interconnect/qcom/Makefile @@ -4,7 +4,9 @@ icc-bcm-voter-objs := bcm-voter.o qnoc-msm8916-objs := msm8916.o qnoc-msm8939-objs := msm8939.o qnoc-msm8974-objs := msm8974.o +qnoc-msm8996-objs := msm8996.o icc-osm-l3-objs := osm-l3.o +qnoc-qcm2290-objs := qcm2290.o qnoc-qcs404-objs := qcs404.o icc-rpmh-obj := icc-rpmh.o qnoc-sc7180-objs := sc7180.o @@ -16,13 +18,16 @@ qnoc-sdx55-objs := sdx55.o qnoc-sm8150-objs := sm8150.o qnoc-sm8250-objs := sm8250.o qnoc-sm8350-objs := sm8350.o +qnoc-sm8450-objs := sm8450.o icc-smd-rpm-objs := smd-rpm.o icc-rpm.o obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o obj-$(CONFIG_INTERCONNECT_QCOM_MSM8939) += qnoc-msm8939.o obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o +obj-$(CONFIG_INTERCONNECT_QCOM_MSM8996) += qnoc-msm8996.o obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o +obj-$(CONFIG_INTERCONNECT_QCOM_QCM2290) += qnoc-qcm2290.o obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o @@ -34,4 +39,5 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o +obj-$(CONFIG_INTERCONNECT_QCOM_SM8450) += qnoc-sm8450.o obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c index ef7999a08c8b..34125e8f8b60 100644 --- a/drivers/interconnect/qcom/icc-rpm.c +++ b/drivers/interconnect/qcom/icc-rpm.c @@ -11,12 +11,20 @@ #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/pm_domain.h> #include <linux/regmap.h> #include <linux/slab.h> #include "smd-rpm.h" #include "icc-rpm.h" +/* QNOC QoS */ +#define QNOC_QOS_MCTL_LOWn_ADDR(n) (0x8 + (n * 0x1000)) +#define QNOC_QOS_MCTL_DFLT_PRIO_MASK 0x70 +#define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT 4 +#define QNOC_QOS_MCTL_URGFWD_EN_MASK 0x8 +#define QNOC_QOS_MCTL_URGFWD_EN_SHIFT 3 + /* BIMC QoS */ #define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n)) #define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n)) @@ -39,6 +47,27 @@ #define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000)) #define NOC_QOS_MODEn_MASK 0x3 +static int qcom_icc_set_qnoc_qos(struct icc_node *src, u64 max_bw) +{ + struct icc_provider *provider = src->provider; + struct qcom_icc_provider *qp = to_qcom_provider(provider); + struct qcom_icc_node *qn = src->data; + struct qcom_icc_qos *qos = &qn->qos; + int rc; + + rc = regmap_update_bits(qp->regmap, + qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port), + QNOC_QOS_MCTL_DFLT_PRIO_MASK, + qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT); + if (rc) + return rc; + + return regmap_update_bits(qp->regmap, + qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port), + QNOC_QOS_MCTL_URGFWD_EN_MASK, + !!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT); +} + static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp, struct qcom_icc_qos *qos, int regnum) @@ -76,7 +105,7 @@ static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw) provider = src->provider; qp = to_qcom_provider(provider); - if (qn->qos.qos_mode != -1) + if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID) mode = qn->qos.qos_mode; /* QoS Priority: The QoS Health parameters are getting considered @@ -137,7 +166,7 @@ static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw) return 0; } - if (qn->qos.qos_mode != -1) + if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID) mode = qn->qos.qos_mode; if (mode == NOC_QOS_MODE_FIXED) { @@ -163,10 +192,14 @@ static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw) dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name); - if (qp->is_bimc_node) + switch (qp->type) { + case QCOM_ICC_BIMC: return qcom_icc_set_bimc_qos(node, sum_bw); - - return qcom_icc_set_noc_qos(node, sum_bw); + case QCOM_ICC_QNOC: + return qcom_icc_set_qnoc_qos(node, sum_bw); + default: + return qcom_icc_set_noc_qos(node, sum_bw); + } } static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw) @@ -239,6 +272,7 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst) rate = max(sum_bw, max_peak_bw); do_div(rate, qn->buswidth); + rate = min_t(u64, rate, LONG_MAX); if (qn->rate == rate) return 0; @@ -307,7 +341,7 @@ int qnoc_probe(struct platform_device *pdev) qp->bus_clks[i].id = cds[i]; qp->num_clks = cd_num; - qp->is_bimc_node = desc->is_bimc_node; + qp->type = desc->type; qp->qos_offset = desc->qos_offset; if (desc->regmap_cfg) { @@ -315,8 +349,13 @@ int qnoc_probe(struct platform_device *pdev) void __iomem *mmio; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) + if (!res) { + /* Try parent's regmap */ + qp->regmap = dev_get_regmap(dev->parent, NULL); + if (qp->regmap) + goto regmap_done; return -ENODEV; + } mmio = devm_ioremap_resource(dev, res); @@ -332,6 +371,7 @@ int qnoc_probe(struct platform_device *pdev) } } +regmap_done: ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks); if (ret) return ret; @@ -340,6 +380,12 @@ int qnoc_probe(struct platform_device *pdev) if (ret) return ret; + if (desc->has_bus_pd) { + ret = dev_pm_domain_attach(dev, true); + if (ret) + return ret; + } + provider = &qp->provider; INIT_LIST_HEAD(&provider->nodes); provider->dev = dev; @@ -377,6 +423,10 @@ int qnoc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, qp); + /* Populate child NoC devices if any */ + if (of_get_child_count(dev->of_node) > 0) + return of_platform_populate(dev->of_node, NULL, NULL, dev); + return 0; err: icc_nodes_remove(provider); diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h index f5744de4da19..26dad006034f 100644 --- a/drivers/interconnect/qcom/icc-rpm.h +++ b/drivers/interconnect/qcom/icc-rpm.h @@ -12,19 +12,25 @@ #define to_qcom_provider(_provider) \ container_of(_provider, struct qcom_icc_provider, provider) +enum qcom_icc_type { + QCOM_ICC_NOC, + QCOM_ICC_BIMC, + QCOM_ICC_QNOC, +}; + /** * struct qcom_icc_provider - Qualcomm specific interconnect provider * @provider: generic interconnect provider * @bus_clks: the clk_bulk_data table of bus clocks * @num_clks: the total number of clk_bulk_data entries - * @is_bimc_node: indicates whether to use bimc specific setting + * @type: the ICC provider type * @qos_offset: offset to QoS registers * @regmap: regmap for QoS registers read/write access */ struct qcom_icc_provider { struct icc_provider provider; int num_clks; - bool is_bimc_node; + enum qcom_icc_type type; struct regmap *regmap; unsigned int qos_offset; struct clk_bulk_data bus_clks[]; @@ -38,6 +44,7 @@ struct qcom_icc_provider { * @ap_owned: indicates if the node is owned by the AP or by the RPM * @qos_mode: default qos mode for this node * @qos_port: qos port number for finding qos registers of this node + * @urg_fwd_en: enable urgent forwarding */ struct qcom_icc_qos { u32 areq_prio; @@ -46,6 +53,7 @@ struct qcom_icc_qos { bool ap_owned; int qos_mode; int qos_port; + bool urg_fwd_en; }; /** @@ -77,7 +85,8 @@ struct qcom_icc_desc { size_t num_nodes; const char * const *clocks; size_t num_clocks; - bool is_bimc_node; + bool has_bus_pd; + enum qcom_icc_type type; const struct regmap_config *regmap_cfg; unsigned int qos_offset; }; diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c index 3eb7936d2cf6..2c8e12549804 100644 --- a/drivers/interconnect/qcom/icc-rpmh.c +++ b/drivers/interconnect/qcom/icc-rpmh.c @@ -21,13 +21,18 @@ void qcom_icc_pre_aggregate(struct icc_node *node) { size_t i; struct qcom_icc_node *qn; + struct qcom_icc_provider *qp; qn = node->data; + qp = to_qcom_provider(node->provider); for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { qn->sum_avg[i] = 0; qn->max_peak[i] = 0; } + + for (i = 0; i < qn->num_bcms; i++) + qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); } EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate); @@ -45,10 +50,8 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, { size_t i; struct qcom_icc_node *qn; - struct qcom_icc_provider *qp; qn = node->data; - qp = to_qcom_provider(node->provider); if (!tag) tag = QCOM_ICC_TAG_ALWAYS; @@ -68,9 +71,6 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, *agg_avg += avg_bw; *agg_peak = max_t(u32, *agg_peak, peak_bw); - for (i = 0; i < qn->num_bcms; i++) - qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); - return 0; } EXPORT_SYMBOL_GPL(qcom_icc_aggregate); diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c index e3c995b11357..2f397a7c3322 100644 --- a/drivers/interconnect/qcom/msm8916.c +++ b/drivers/interconnect/qcom/msm8916.c @@ -1229,6 +1229,7 @@ static const struct regmap_config msm8916_snoc_regmap_config = { }; static struct qcom_icc_desc msm8916_snoc = { + .type = QCOM_ICC_NOC, .nodes = msm8916_snoc_nodes, .num_nodes = ARRAY_SIZE(msm8916_snoc_nodes), .regmap_cfg = &msm8916_snoc_regmap_config, @@ -1256,9 +1257,9 @@ static const struct regmap_config msm8916_bimc_regmap_config = { }; static struct qcom_icc_desc msm8916_bimc = { + .type = QCOM_ICC_BIMC, .nodes = msm8916_bimc_nodes, .num_nodes = ARRAY_SIZE(msm8916_bimc_nodes), - .is_bimc_node = true, .regmap_cfg = &msm8916_bimc_regmap_config, .qos_offset = 0x8000, }; @@ -1325,6 +1326,7 @@ static const struct regmap_config msm8916_pcnoc_regmap_config = { }; static struct qcom_icc_desc msm8916_pcnoc = { + .type = QCOM_ICC_NOC, .nodes = msm8916_pcnoc_nodes, .num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes), .regmap_cfg = &msm8916_pcnoc_regmap_config, diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c index 16272a477bd8..d188f3636e4c 100644 --- a/drivers/interconnect/qcom/msm8939.c +++ b/drivers/interconnect/qcom/msm8939.c @@ -1282,6 +1282,7 @@ static const struct regmap_config msm8939_snoc_regmap_config = { }; static struct qcom_icc_desc msm8939_snoc = { + .type = QCOM_ICC_NOC, .nodes = msm8939_snoc_nodes, .num_nodes = ARRAY_SIZE(msm8939_snoc_nodes), .regmap_cfg = &msm8939_snoc_regmap_config, @@ -1309,6 +1310,7 @@ static const struct regmap_config msm8939_snoc_mm_regmap_config = { }; static struct qcom_icc_desc msm8939_snoc_mm = { + .type = QCOM_ICC_NOC, .nodes = msm8939_snoc_mm_nodes, .num_nodes = ARRAY_SIZE(msm8939_snoc_mm_nodes), .regmap_cfg = &msm8939_snoc_mm_regmap_config, @@ -1336,9 +1338,9 @@ static const struct regmap_config msm8939_bimc_regmap_config = { }; static struct qcom_icc_desc msm8939_bimc = { + .type = QCOM_ICC_BIMC, .nodes = msm8939_bimc_nodes, .num_nodes = ARRAY_SIZE(msm8939_bimc_nodes), - .is_bimc_node = true, .regmap_cfg = &msm8939_bimc_regmap_config, .qos_offset = 0x8000, }; @@ -1407,6 +1409,7 @@ static const struct regmap_config msm8939_pcnoc_regmap_config = { }; static struct qcom_icc_desc msm8939_pcnoc = { + .type = QCOM_ICC_NOC, .nodes = msm8939_pcnoc_nodes, .num_nodes = ARRAY_SIZE(msm8939_pcnoc_nodes), .regmap_cfg = &msm8939_pcnoc_regmap_config, diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c new file mode 100644 index 000000000000..499e11fbbd2e --- /dev/null +++ b/drivers/interconnect/qcom/msm8996.c @@ -0,0 +1,2110 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm MSM8996 Network-on-Chip (NoC) QoS driver + * + * Copyright (c) 2021 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/interconnect-provider.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <dt-bindings/interconnect/qcom,msm8996.h> + +#include "icc-rpm.h" +#include "smd-rpm.h" +#include "msm8996.h" + +static const char * const bus_mm_clocks[] = { + "bus", + "bus_a", + "iface" +}; + +static const char * const bus_a0noc_clocks[] = { + "aggre0_snoc_axi", + "aggre0_cnoc_ahb", + "aggre0_noc_mpu_cfg" +}; + +static const u16 mas_a0noc_common_links[] = { + MSM8996_SLAVE_A0NOC_SNOC +}; + +static struct qcom_icc_node mas_pcie_0 = { + .name = "mas_pcie_0", + .id = MSM8996_MASTER_PCIE_0, + .buswidth = 8, + .mas_rpm_id = 65, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 0, + .num_links = ARRAY_SIZE(mas_a0noc_common_links), + .links = mas_a0noc_common_links +}; + +static struct qcom_icc_node mas_pcie_1 = { + .name = "mas_pcie_1", + .id = MSM8996_MASTER_PCIE_1, + .buswidth = 8, + .mas_rpm_id = 66, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 1, + .num_links = ARRAY_SIZE(mas_a0noc_common_links), + .links = mas_a0noc_common_links +}; + +static struct qcom_icc_node mas_pcie_2 = { + .name = "mas_pcie_2", + .id = MSM8996_MASTER_PCIE_2, + .buswidth = 8, + .mas_rpm_id = 119, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 2, + .num_links = ARRAY_SIZE(mas_a0noc_common_links), + .links = mas_a0noc_common_links +}; + +static const u16 mas_a1noc_common_links[] = { + MSM8996_SLAVE_A1NOC_SNOC +}; + +static struct qcom_icc_node mas_cnoc_a1noc = { + .name = "mas_cnoc_a1noc", + .id = MSM8996_MASTER_CNOC_A1NOC, + .buswidth = 8, + .mas_rpm_id = 116, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(mas_a1noc_common_links), + .links = mas_a1noc_common_links +}; + +static struct qcom_icc_node mas_crypto_c0 = { + .name = "mas_crypto_c0", + .id = MSM8996_MASTER_CRYPTO_CORE0, + .buswidth = 8, + .mas_rpm_id = 23, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 0, + .num_links = ARRAY_SIZE(mas_a1noc_common_links), + .links = mas_a1noc_common_links +}; + +static struct qcom_icc_node mas_pnoc_a1noc = { + .name = "mas_pnoc_a1noc", + .id = MSM8996_MASTER_PNOC_A1NOC, + .buswidth = 8, + .mas_rpm_id = 117, + .slv_rpm_id = -1, + .qos.ap_owned = false, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 1, + .num_links = ARRAY_SIZE(mas_a1noc_common_links), + .links = mas_a1noc_common_links +}; + +static const u16 mas_a2noc_common_links[] = { + MSM8996_SLAVE_A2NOC_SNOC +}; + +static struct qcom_icc_node mas_usb3 = { + .name = "mas_usb3", + .id = MSM8996_MASTER_USB3, + .buswidth = 8, + .mas_rpm_id = 32, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 3, + .num_links = ARRAY_SIZE(mas_a2noc_common_links), + .links = mas_a2noc_common_links +}; + +static struct qcom_icc_node mas_ipa = { + .name = "mas_ipa", + .id = MSM8996_MASTER_IPA, + .buswidth = 8, + .mas_rpm_id = 59, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = -1, + .num_links = ARRAY_SIZE(mas_a2noc_common_links), + .links = mas_a2noc_common_links +}; + +static struct qcom_icc_node mas_ufs = { + .name = "mas_ufs", + .id = MSM8996_MASTER_UFS, + .buswidth = 8, + .mas_rpm_id = 68, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 2, + .num_links = ARRAY_SIZE(mas_a2noc_common_links), + .links = mas_a2noc_common_links +}; + +static const u16 mas_apps_proc_links[] = { + MSM8996_SLAVE_BIMC_SNOC_1, + MSM8996_SLAVE_EBI_CH0, + MSM8996_SLAVE_BIMC_SNOC_0 +}; + +static struct qcom_icc_node mas_apps_proc = { + .name = "mas_apps_proc", + .id = MSM8996_MASTER_AMPSS_M0, + .buswidth = 8, + .mas_rpm_id = 0, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 0, + .num_links = ARRAY_SIZE(mas_apps_proc_links), + .links = mas_apps_proc_links +}; + +static const u16 mas_oxili_common_links[] = { + MSM8996_SLAVE_BIMC_SNOC_1, + MSM8996_SLAVE_HMSS_L3, + MSM8996_SLAVE_EBI_CH0, + MSM8996_SLAVE_BIMC_SNOC_0 +}; + +static struct qcom_icc_node mas_oxili = { + .name = "mas_oxili", + .id = MSM8996_MASTER_GRAPHICS_3D, + .buswidth = 8, + .mas_rpm_id = 6, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 1, + .num_links = ARRAY_SIZE(mas_oxili_common_links), + .links = mas_oxili_common_links +}; + +static struct qcom_icc_node mas_mnoc_bimc = { + .name = "mas_mnoc_bimc", + .id = MSM8996_MASTER_MNOC_BIMC, + .buswidth = 8, + .mas_rpm_id = 2, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 2, + .num_links = ARRAY_SIZE(mas_oxili_common_links), + .links = mas_oxili_common_links +}; + +static const u16 mas_snoc_bimc_links[] = { + MSM8996_SLAVE_HMSS_L3, + MSM8996_SLAVE_EBI_CH0 +}; + +static struct qcom_icc_node mas_snoc_bimc = { + .name = "mas_snoc_bimc", + .id = MSM8996_MASTER_SNOC_BIMC, + .buswidth = 8, + .mas_rpm_id = 3, + .slv_rpm_id = -1, + .qos.ap_owned = false, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = -1, + .num_links = ARRAY_SIZE(mas_snoc_bimc_links), + .links = mas_snoc_bimc_links +}; + +static const u16 mas_snoc_cnoc_links[] = { + MSM8996_SLAVE_CLK_CTL, + MSM8996_SLAVE_RBCPR_CX, + MSM8996_SLAVE_A2NOC_SMMU_CFG, + MSM8996_SLAVE_A0NOC_MPU_CFG, + MSM8996_SLAVE_MESSAGE_RAM, + MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG, + MSM8996_SLAVE_PCIE_0_CFG, + MSM8996_SLAVE_TLMM, + MSM8996_SLAVE_MPM, + MSM8996_SLAVE_A0NOC_SMMU_CFG, + MSM8996_SLAVE_EBI1_PHY_CFG, + MSM8996_SLAVE_BIMC_CFG, + MSM8996_SLAVE_PIMEM_CFG, + MSM8996_SLAVE_RBCPR_MX, + MSM8996_SLAVE_PRNG, + MSM8996_SLAVE_PCIE20_AHB2PHY, + MSM8996_SLAVE_A2NOC_MPU_CFG, + MSM8996_SLAVE_QDSS_CFG, + MSM8996_SLAVE_A2NOC_CFG, + MSM8996_SLAVE_A0NOC_CFG, + MSM8996_SLAVE_UFS_CFG, + MSM8996_SLAVE_CRYPTO_0_CFG, + MSM8996_SLAVE_PCIE_1_CFG, + MSM8996_SLAVE_SNOC_CFG, + MSM8996_SLAVE_SNOC_MPU_CFG, + MSM8996_SLAVE_A1NOC_MPU_CFG, + MSM8996_SLAVE_A1NOC_SMMU_CFG, + MSM8996_SLAVE_PCIE_2_CFG, + MSM8996_SLAVE_CNOC_MNOC_CFG, + MSM8996_SLAVE_QDSS_RBCPR_APU_CFG, + MSM8996_SLAVE_PMIC_ARB, + MSM8996_SLAVE_IMEM_CFG, + MSM8996_SLAVE_A1NOC_CFG, + MSM8996_SLAVE_SSC_CFG, + MSM8996_SLAVE_TCSR, + MSM8996_SLAVE_LPASS_SMMU_CFG, + MSM8996_SLAVE_DCC_CFG +}; + +static struct qcom_icc_node mas_snoc_cnoc = { + .name = "mas_snoc_cnoc", + .id = MSM8996_MASTER_SNOC_CNOC, + .buswidth = 8, + .mas_rpm_id = 52, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_snoc_cnoc_links), + .links = mas_snoc_cnoc_links +}; + +static const u16 mas_qdss_dap_links[] = { + MSM8996_SLAVE_QDSS_RBCPR_APU_CFG, + MSM8996_SLAVE_RBCPR_CX, + MSM8996_SLAVE_A2NOC_SMMU_CFG, + MSM8996_SLAVE_A0NOC_MPU_CFG, + MSM8996_SLAVE_MESSAGE_RAM, + MSM8996_SLAVE_PCIE_0_CFG, + MSM8996_SLAVE_TLMM, + MSM8996_SLAVE_MPM, + MSM8996_SLAVE_A0NOC_SMMU_CFG, + MSM8996_SLAVE_EBI1_PHY_CFG, + MSM8996_SLAVE_BIMC_CFG, + MSM8996_SLAVE_PIMEM_CFG, + MSM8996_SLAVE_RBCPR_MX, + MSM8996_SLAVE_CLK_CTL, + MSM8996_SLAVE_PRNG, + MSM8996_SLAVE_PCIE20_AHB2PHY, + MSM8996_SLAVE_A2NOC_MPU_CFG, + MSM8996_SLAVE_QDSS_CFG, + MSM8996_SLAVE_A2NOC_CFG, + MSM8996_SLAVE_A0NOC_CFG, + MSM8996_SLAVE_UFS_CFG, + MSM8996_SLAVE_CRYPTO_0_CFG, + MSM8996_SLAVE_CNOC_A1NOC, + MSM8996_SLAVE_PCIE_1_CFG, + MSM8996_SLAVE_SNOC_CFG, + MSM8996_SLAVE_SNOC_MPU_CFG, + MSM8996_SLAVE_A1NOC_MPU_CFG, + MSM8996_SLAVE_A1NOC_SMMU_CFG, + MSM8996_SLAVE_PCIE_2_CFG, + MSM8996_SLAVE_CNOC_MNOC_CFG, + MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG, + MSM8996_SLAVE_PMIC_ARB, + MSM8996_SLAVE_IMEM_CFG, + MSM8996_SLAVE_A1NOC_CFG, + MSM8996_SLAVE_SSC_CFG, + MSM8996_SLAVE_TCSR, + MSM8996_SLAVE_LPASS_SMMU_CFG, + MSM8996_SLAVE_DCC_CFG +}; + +static struct qcom_icc_node mas_qdss_dap = { + .name = "mas_qdss_dap", + .id = MSM8996_MASTER_QDSS_DAP, + .buswidth = 8, + .mas_rpm_id = 49, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(mas_qdss_dap_links), + .links = mas_qdss_dap_links +}; + +static const u16 mas_cnoc_mnoc_mmss_cfg_links[] = { + MSM8996_SLAVE_MMAGIC_CFG, + MSM8996_SLAVE_DSA_MPU_CFG, + MSM8996_SLAVE_MMSS_CLK_CFG, + MSM8996_SLAVE_CAMERA_THROTTLE_CFG, + MSM8996_SLAVE_VENUS_CFG, + MSM8996_SLAVE_SMMU_VFE_CFG, + MSM8996_SLAVE_MISC_CFG, + MSM8996_SLAVE_SMMU_CPP_CFG, + MSM8996_SLAVE_GRAPHICS_3D_CFG, + MSM8996_SLAVE_DISPLAY_THROTTLE_CFG, + MSM8996_SLAVE_VENUS_THROTTLE_CFG, + MSM8996_SLAVE_CAMERA_CFG, + MSM8996_SLAVE_DISPLAY_CFG, + MSM8996_SLAVE_CPR_CFG, + MSM8996_SLAVE_SMMU_ROTATOR_CFG, + MSM8996_SLAVE_DSA_CFG, + MSM8996_SLAVE_SMMU_VENUS_CFG, + MSM8996_SLAVE_VMEM_CFG, + MSM8996_SLAVE_SMMU_JPEG_CFG, + MSM8996_SLAVE_SMMU_MDP_CFG, + MSM8996_SLAVE_MNOC_MPU_CFG +}; + +static struct qcom_icc_node mas_cnoc_mnoc_mmss_cfg = { + .name = "mas_cnoc_mnoc_mmss_cfg", + .id = MSM8996_MASTER_CNOC_MNOC_MMSS_CFG, + .buswidth = 8, + .mas_rpm_id = 4, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(mas_cnoc_mnoc_mmss_cfg_links), + .links = mas_cnoc_mnoc_mmss_cfg_links +}; + +static const u16 mas_cnoc_mnoc_cfg_links[] = { + MSM8996_SLAVE_SERVICE_MNOC +}; + +static struct qcom_icc_node mas_cnoc_mnoc_cfg = { + .name = "mas_cnoc_mnoc_cfg", + .id = MSM8996_MASTER_CNOC_MNOC_CFG, + .buswidth = 8, + .mas_rpm_id = 5, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(mas_cnoc_mnoc_cfg_links), + .links = mas_cnoc_mnoc_cfg_links +}; + +static const u16 mas_mnoc_bimc_common_links[] = { + MSM8996_SLAVE_MNOC_BIMC +}; + +static struct qcom_icc_node mas_cpp = { + .name = "mas_cpp", + .id = MSM8996_MASTER_CPP, + .buswidth = 32, + .mas_rpm_id = 115, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 5, + .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links), + .links = mas_mnoc_bimc_common_links +}; + +static struct qcom_icc_node mas_jpeg = { + .name = "mas_jpeg", + .id = MSM8996_MASTER_JPEG, + .buswidth = 32, + .mas_rpm_id = 7, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 7, + .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links), + .links = mas_mnoc_bimc_common_links +}; + +static struct qcom_icc_node mas_mdp_p0 = { + .name = "mas_mdp_p0", + .id = MSM8996_MASTER_MDP_PORT0, + .buswidth = 32, + .mas_rpm_id = 8, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 1, + .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links), + .links = mas_mnoc_bimc_common_links +}; + +static struct qcom_icc_node mas_mdp_p1 = { + .name = "mas_mdp_p1", + .id = MSM8996_MASTER_MDP_PORT1, + .buswidth = 32, + .mas_rpm_id = 61, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 2, + .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links), + .links = mas_mnoc_bimc_common_links +}; + +static struct qcom_icc_node mas_rotator = { + .name = "mas_rotator", + .id = MSM8996_MASTER_ROTATOR, + .buswidth = 32, + .mas_rpm_id = 120, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 0, + .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links), + .links = mas_mnoc_bimc_common_links +}; + +static struct qcom_icc_node mas_venus = { + .name = "mas_venus", + .id = MSM8996_MASTER_VIDEO_P0, + .buswidth = 32, + .mas_rpm_id = 9, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 3, + .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links), + .links = mas_mnoc_bimc_common_links +}; + +static struct qcom_icc_node mas_vfe = { + .name = "mas_vfe", + .id = MSM8996_MASTER_VFE, + .buswidth = 32, + .mas_rpm_id = 11, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 6, + .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links), + .links = mas_mnoc_bimc_common_links +}; + +static const u16 mas_vmem_common_links[] = { + MSM8996_SLAVE_VMEM +}; + +static struct qcom_icc_node mas_snoc_vmem = { + .name = "mas_snoc_vmem", + .id = MSM8996_MASTER_SNOC_VMEM, + .buswidth = 32, + .mas_rpm_id = 114, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(mas_vmem_common_links), + .links = mas_vmem_common_links +}; + +static struct qcom_icc_node mas_venus_vmem = { + .name = "mas_venus_vmem", + .id = MSM8996_MASTER_VIDEO_P0_OCMEM, + .buswidth = 32, + .mas_rpm_id = 121, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(mas_vmem_common_links), + .links = mas_vmem_common_links +}; + +static const u16 mas_snoc_pnoc_links[] = { + MSM8996_SLAVE_BLSP_1, + MSM8996_SLAVE_BLSP_2, + MSM8996_SLAVE_SDCC_1, + MSM8996_SLAVE_SDCC_2, + MSM8996_SLAVE_SDCC_4, + MSM8996_SLAVE_TSIF, + MSM8996_SLAVE_PDM, + MSM8996_SLAVE_AHB2PHY +}; + +static struct qcom_icc_node mas_snoc_pnoc = { + .name = "mas_snoc_pnoc", + .id = MSM8996_MASTER_SNOC_PNOC, + .buswidth = 8, + .mas_rpm_id = 44, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_snoc_pnoc_links), + .links = mas_snoc_pnoc_links +}; + +static const u16 mas_pnoc_a1noc_common_links[] = { + MSM8996_SLAVE_PNOC_A1NOC +}; + +static struct qcom_icc_node mas_sdcc_1 = { + .name = "mas_sdcc_1", + .id = MSM8996_MASTER_SDCC_1, + .buswidth = 8, + .mas_rpm_id = 33, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links), + .links = mas_pnoc_a1noc_common_links +}; + +static struct qcom_icc_node mas_sdcc_2 = { + .name = "mas_sdcc_2", + .id = MSM8996_MASTER_SDCC_2, + .buswidth = 8, + .mas_rpm_id = 35, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links), + .links = mas_pnoc_a1noc_common_links +}; + +static struct qcom_icc_node mas_sdcc_4 = { + .name = "mas_sdcc_4", + .id = MSM8996_MASTER_SDCC_4, + .buswidth = 8, + .mas_rpm_id = 36, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links), + .links = mas_pnoc_a1noc_common_links +}; + +static struct qcom_icc_node mas_usb_hs = { + .name = "mas_usb_hs", + .id = MSM8996_MASTER_USB_HS, + .buswidth = 8, + .mas_rpm_id = 42, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links), + .links = mas_pnoc_a1noc_common_links +}; + +static struct qcom_icc_node mas_blsp_1 = { + .name = "mas_blsp_1", + .id = MSM8996_MASTER_BLSP_1, + .buswidth = 4, + .mas_rpm_id = 41, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links), + .links = mas_pnoc_a1noc_common_links +}; + +static struct qcom_icc_node mas_blsp_2 = { + .name = "mas_blsp_2", + .id = MSM8996_MASTER_BLSP_2, + .buswidth = 4, + .mas_rpm_id = 39, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links), + .links = mas_pnoc_a1noc_common_links +}; + +static struct qcom_icc_node mas_tsif = { + .name = "mas_tsif", + .id = MSM8996_MASTER_TSIF, + .buswidth = 4, + .mas_rpm_id = 37, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links), + .links = mas_pnoc_a1noc_common_links +}; + +static const u16 mas_hmss_links[] = { + MSM8996_SLAVE_PIMEM, + MSM8996_SLAVE_OCIMEM, + MSM8996_SLAVE_SNOC_BIMC +}; + +static struct qcom_icc_node mas_hmss = { + .name = "mas_hmss", + .id = MSM8996_MASTER_HMSS, + .buswidth = 8, + .mas_rpm_id = 118, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 4, + .num_links = ARRAY_SIZE(mas_hmss_links), + .links = mas_hmss_links +}; + +static const u16 mas_qdss_common_links[] = { + MSM8996_SLAVE_PIMEM, + MSM8996_SLAVE_USB3, + MSM8996_SLAVE_OCIMEM, + MSM8996_SLAVE_SNOC_BIMC, + MSM8996_SLAVE_SNOC_PNOC +}; + +static struct qcom_icc_node mas_qdss_bam = { + .name = "mas_qdss_bam", + .id = MSM8996_MASTER_QDSS_BAM, + .buswidth = 16, + .mas_rpm_id = 19, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 2, + .num_links = ARRAY_SIZE(mas_qdss_common_links), + .links = mas_qdss_common_links +}; + +static const u16 mas_snoc_cfg_links[] = { + MSM8996_SLAVE_SERVICE_SNOC +}; + +static struct qcom_icc_node mas_snoc_cfg = { + .name = "mas_snoc_cfg", + .id = MSM8996_MASTER_SNOC_CFG, + .buswidth = 16, + .mas_rpm_id = 20, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(mas_snoc_cfg_links), + .links = mas_snoc_cfg_links +}; + +static const u16 mas_bimc_snoc_0_links[] = { + MSM8996_SLAVE_SNOC_VMEM, + MSM8996_SLAVE_USB3, + MSM8996_SLAVE_PIMEM, + MSM8996_SLAVE_LPASS, + MSM8996_SLAVE_APPSS, + MSM8996_SLAVE_SNOC_CNOC, + MSM8996_SLAVE_SNOC_PNOC, + MSM8996_SLAVE_OCIMEM, + MSM8996_SLAVE_QDSS_STM +}; + +static struct qcom_icc_node mas_bimc_snoc_0 = { + .name = "mas_bimc_snoc_0", + .id = MSM8996_MASTER_BIMC_SNOC_0, + .buswidth = 16, + .mas_rpm_id = 21, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(mas_bimc_snoc_0_links), + .links = mas_bimc_snoc_0_links +}; + +static const u16 mas_bimc_snoc_1_links[] = { + MSM8996_SLAVE_PCIE_2, + MSM8996_SLAVE_PCIE_1, + MSM8996_SLAVE_PCIE_0 +}; + +static struct qcom_icc_node mas_bimc_snoc_1 = { + .name = "mas_bimc_snoc_1", + .id = MSM8996_MASTER_BIMC_SNOC_1, + .buswidth = 16, + .mas_rpm_id = 109, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(mas_bimc_snoc_1_links), + .links = mas_bimc_snoc_1_links +}; + +static const u16 mas_a0noc_snoc_links[] = { + MSM8996_SLAVE_SNOC_PNOC, + MSM8996_SLAVE_OCIMEM, + MSM8996_SLAVE_APPSS, + MSM8996_SLAVE_SNOC_BIMC, + MSM8996_SLAVE_PIMEM +}; + +static struct qcom_icc_node mas_a0noc_snoc = { + .name = "mas_a0noc_snoc", + .id = MSM8996_MASTER_A0NOC_SNOC, + .buswidth = 16, + .mas_rpm_id = 110, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(mas_a0noc_snoc_links), + .links = mas_a0noc_snoc_links +}; + +static const u16 mas_a1noc_snoc_links[] = { + MSM8996_SLAVE_SNOC_VMEM, + MSM8996_SLAVE_USB3, + MSM8996_SLAVE_PCIE_0, + MSM8996_SLAVE_PIMEM, + MSM8996_SLAVE_PCIE_2, + MSM8996_SLAVE_LPASS, + MSM8996_SLAVE_PCIE_1, + MSM8996_SLAVE_APPSS, + MSM8996_SLAVE_SNOC_BIMC, + MSM8996_SLAVE_SNOC_CNOC, + MSM8996_SLAVE_SNOC_PNOC, + MSM8996_SLAVE_OCIMEM, + MSM8996_SLAVE_QDSS_STM +}; + +static struct qcom_icc_node mas_a1noc_snoc = { + .name = "mas_a1noc_snoc", + .id = MSM8996_MASTER_A1NOC_SNOC, + .buswidth = 16, + .mas_rpm_id = 111, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_a1noc_snoc_links), + .links = mas_a1noc_snoc_links +}; + +static const u16 mas_a2noc_snoc_links[] = { + MSM8996_SLAVE_SNOC_VMEM, + MSM8996_SLAVE_USB3, + MSM8996_SLAVE_PCIE_1, + MSM8996_SLAVE_PIMEM, + MSM8996_SLAVE_PCIE_2, + MSM8996_SLAVE_QDSS_STM, + MSM8996_SLAVE_LPASS, + MSM8996_SLAVE_SNOC_BIMC, + MSM8996_SLAVE_SNOC_CNOC, + MSM8996_SLAVE_SNOC_PNOC, + MSM8996_SLAVE_OCIMEM, + MSM8996_SLAVE_PCIE_0 +}; + +static struct qcom_icc_node mas_a2noc_snoc = { + .name = "mas_a2noc_snoc", + .id = MSM8996_MASTER_A2NOC_SNOC, + .buswidth = 16, + .mas_rpm_id = 112, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_a2noc_snoc_links), + .links = mas_a2noc_snoc_links +}; + +static struct qcom_icc_node mas_qdss_etr = { + .name = "mas_qdss_etr", + .id = MSM8996_MASTER_QDSS_ETR, + .buswidth = 16, + .mas_rpm_id = 31, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 3, + .num_links = ARRAY_SIZE(mas_qdss_common_links), + .links = mas_qdss_common_links +}; + +static const u16 slv_a0noc_snoc_links[] = { + MSM8996_MASTER_A0NOC_SNOC +}; + +static struct qcom_icc_node slv_a0noc_snoc = { + .name = "slv_a0noc_snoc", + .id = MSM8996_SLAVE_A0NOC_SNOC, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 141, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(slv_a0noc_snoc_links), + .links = slv_a0noc_snoc_links +}; + +static const u16 slv_a1noc_snoc_links[] = { + MSM8996_MASTER_A1NOC_SNOC +}; + +static struct qcom_icc_node slv_a1noc_snoc = { + .name = "slv_a1noc_snoc", + .id = MSM8996_SLAVE_A1NOC_SNOC, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 142, + .num_links = ARRAY_SIZE(slv_a1noc_snoc_links), + .links = slv_a1noc_snoc_links +}; + +static const u16 slv_a2noc_snoc_links[] = { + MSM8996_MASTER_A2NOC_SNOC +}; + +static struct qcom_icc_node slv_a2noc_snoc = { + .name = "slv_a2noc_snoc", + .id = MSM8996_SLAVE_A2NOC_SNOC, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 143, + .num_links = ARRAY_SIZE(slv_a2noc_snoc_links), + .links = slv_a2noc_snoc_links +}; + +static struct qcom_icc_node slv_ebi = { + .name = "slv_ebi", + .id = MSM8996_SLAVE_EBI_CH0, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 0 +}; + +static struct qcom_icc_node slv_hmss_l3 = { + .name = "slv_hmss_l3", + .id = MSM8996_SLAVE_HMSS_L3, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 160 +}; + +static const u16 slv_bimc_snoc_0_links[] = { + MSM8996_MASTER_BIMC_SNOC_0 +}; + +static struct qcom_icc_node slv_bimc_snoc_0 = { + .name = "slv_bimc_snoc_0", + .id = MSM8996_SLAVE_BIMC_SNOC_0, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 2, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(slv_bimc_snoc_0_links), + .links = slv_bimc_snoc_0_links +}; + +static const u16 slv_bimc_snoc_1_links[] = { + MSM8996_MASTER_BIMC_SNOC_1 +}; + +static struct qcom_icc_node slv_bimc_snoc_1 = { + .name = "slv_bimc_snoc_1", + .id = MSM8996_SLAVE_BIMC_SNOC_1, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 138, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(slv_bimc_snoc_1_links), + .links = slv_bimc_snoc_1_links +}; + +static const u16 slv_cnoc_a1noc_links[] = { + MSM8996_MASTER_CNOC_A1NOC +}; + +static struct qcom_icc_node slv_cnoc_a1noc = { + .name = "slv_cnoc_a1noc", + .id = MSM8996_SLAVE_CNOC_A1NOC, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 75, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(slv_cnoc_a1noc_links), + .links = slv_cnoc_a1noc_links +}; + +static struct qcom_icc_node slv_clk_ctl = { + .name = "slv_clk_ctl", + .id = MSM8996_SLAVE_CLK_CTL, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 47 +}; + +static struct qcom_icc_node slv_tcsr = { + .name = "slv_tcsr", + .id = MSM8996_SLAVE_TCSR, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 50 +}; + +static struct qcom_icc_node slv_tlmm = { + .name = "slv_tlmm", + .id = MSM8996_SLAVE_TLMM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 51 +}; + +static struct qcom_icc_node slv_crypto0_cfg = { + .name = "slv_crypto0_cfg", + .id = MSM8996_SLAVE_CRYPTO_0_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 52, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_mpm = { + .name = "slv_mpm", + .id = MSM8996_SLAVE_MPM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 62, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_pimem_cfg = { + .name = "slv_pimem_cfg", + .id = MSM8996_SLAVE_PIMEM_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 167, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_imem_cfg = { + .name = "slv_imem_cfg", + .id = MSM8996_SLAVE_IMEM_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 54, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_message_ram = { + .name = "slv_message_ram", + .id = MSM8996_SLAVE_MESSAGE_RAM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 55 +}; + +static struct qcom_icc_node slv_bimc_cfg = { + .name = "slv_bimc_cfg", + .id = MSM8996_SLAVE_BIMC_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 56, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_pmic_arb = { + .name = "slv_pmic_arb", + .id = MSM8996_SLAVE_PMIC_ARB, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 59 +}; + +static struct qcom_icc_node slv_prng = { + .name = "slv_prng", + .id = MSM8996_SLAVE_PRNG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 127, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_dcc_cfg = { + .name = "slv_dcc_cfg", + .id = MSM8996_SLAVE_DCC_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 155, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_rbcpr_mx = { + .name = "slv_rbcpr_mx", + .id = MSM8996_SLAVE_RBCPR_MX, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 170, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_qdss_cfg = { + .name = "slv_qdss_cfg", + .id = MSM8996_SLAVE_QDSS_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 63, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_rbcpr_cx = { + .name = "slv_rbcpr_cx", + .id = MSM8996_SLAVE_RBCPR_CX, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 169, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_cpu_apu_cfg = { + .name = "slv_cpu_apu_cfg", + .id = MSM8996_SLAVE_QDSS_RBCPR_APU_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 168, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static const u16 slv_cnoc_mnoc_cfg_links[] = { + MSM8996_MASTER_CNOC_MNOC_CFG +}; + +static struct qcom_icc_node slv_cnoc_mnoc_cfg = { + .name = "slv_cnoc_mnoc_cfg", + .id = MSM8996_SLAVE_CNOC_MNOC_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 66, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(slv_cnoc_mnoc_cfg_links), + .links = slv_cnoc_mnoc_cfg_links +}; + +static struct qcom_icc_node slv_snoc_cfg = { + .name = "slv_snoc_cfg", + .id = MSM8996_SLAVE_SNOC_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 70, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_snoc_mpu_cfg = { + .name = "slv_snoc_mpu_cfg", + .id = MSM8996_SLAVE_SNOC_MPU_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 67, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_ebi1_phy_cfg = { + .name = "slv_ebi1_phy_cfg", + .id = MSM8996_SLAVE_EBI1_PHY_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 73, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_a0noc_cfg = { + .name = "slv_a0noc_cfg", + .id = MSM8996_SLAVE_A0NOC_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 144, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_pcie_1_cfg = { + .name = "slv_pcie_1_cfg", + .id = MSM8996_SLAVE_PCIE_1_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 89, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_pcie_2_cfg = { + .name = "slv_pcie_2_cfg", + .id = MSM8996_SLAVE_PCIE_2_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 165, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_pcie_0_cfg = { + .name = "slv_pcie_0_cfg", + .id = MSM8996_SLAVE_PCIE_0_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 88, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_pcie20_ahb2phy = { + .name = "slv_pcie20_ahb2phy", + .id = MSM8996_SLAVE_PCIE20_AHB2PHY, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 163, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_a0noc_mpu_cfg = { + .name = "slv_a0noc_mpu_cfg", + .id = MSM8996_SLAVE_A0NOC_MPU_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 145, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_ufs_cfg = { + .name = "slv_ufs_cfg", + .id = MSM8996_SLAVE_UFS_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 92, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_a1noc_cfg = { + .name = "slv_a1noc_cfg", + .id = MSM8996_SLAVE_A1NOC_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 147, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_a1noc_mpu_cfg = { + .name = "slv_a1noc_mpu_cfg", + .id = MSM8996_SLAVE_A1NOC_MPU_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 148, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_a2noc_cfg = { + .name = "slv_a2noc_cfg", + .id = MSM8996_SLAVE_A2NOC_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 150, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_a2noc_mpu_cfg = { + .name = "slv_a2noc_mpu_cfg", + .id = MSM8996_SLAVE_A2NOC_MPU_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 151, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_ssc_cfg = { + .name = "slv_ssc_cfg", + .id = MSM8996_SLAVE_SSC_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 177, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_a0noc_smmu_cfg = { + .name = "slv_a0noc_smmu_cfg", + .id = MSM8996_SLAVE_A0NOC_SMMU_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 146, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_a1noc_smmu_cfg = { + .name = "slv_a1noc_smmu_cfg", + .id = MSM8996_SLAVE_A1NOC_SMMU_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 149, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_a2noc_smmu_cfg = { + .name = "slv_a2noc_smmu_cfg", + .id = MSM8996_SLAVE_A2NOC_SMMU_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 152, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_lpass_smmu_cfg = { + .name = "slv_lpass_smmu_cfg", + .id = MSM8996_SLAVE_LPASS_SMMU_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 161, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static const u16 slv_cnoc_mnoc_mmss_cfg_links[] = { + MSM8996_MASTER_CNOC_MNOC_MMSS_CFG +}; + +static struct qcom_icc_node slv_cnoc_mnoc_mmss_cfg = { + .name = "slv_cnoc_mnoc_mmss_cfg", + .id = MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 58, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(slv_cnoc_mnoc_mmss_cfg_links), + .links = slv_cnoc_mnoc_mmss_cfg_links +}; + +static struct qcom_icc_node slv_mmagic_cfg = { + .name = "slv_mmagic_cfg", + .id = MSM8996_SLAVE_MMAGIC_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 162, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_cpr_cfg = { + .name = "slv_cpr_cfg", + .id = MSM8996_SLAVE_CPR_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 6, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_misc_cfg = { + .name = "slv_misc_cfg", + .id = MSM8996_SLAVE_MISC_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 8, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_venus_throttle_cfg = { + .name = "slv_venus_throttle_cfg", + .id = MSM8996_SLAVE_VENUS_THROTTLE_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 178, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_venus_cfg = { + .name = "slv_venus_cfg", + .id = MSM8996_SLAVE_VENUS_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 10, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_vmem_cfg = { + .name = "slv_vmem_cfg", + .id = MSM8996_SLAVE_VMEM_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 180, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_dsa_cfg = { + .name = "slv_dsa_cfg", + .id = MSM8996_SLAVE_DSA_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 157, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_mnoc_clocks_cfg = { + .name = "slv_mnoc_clocks_cfg", + .id = MSM8996_SLAVE_MMSS_CLK_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 12, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_dsa_mpu_cfg = { + .name = "slv_dsa_mpu_cfg", + .id = MSM8996_SLAVE_DSA_MPU_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 158, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_mnoc_mpu_cfg = { + .name = "slv_mnoc_mpu_cfg", + .id = MSM8996_SLAVE_MNOC_MPU_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 14, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_display_cfg = { + .name = "slv_display_cfg", + .id = MSM8996_SLAVE_DISPLAY_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_display_throttle_cfg = { + .name = "slv_display_throttle_cfg", + .id = MSM8996_SLAVE_DISPLAY_THROTTLE_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 156, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_camera_cfg = { + .name = "slv_camera_cfg", + .id = MSM8996_SLAVE_CAMERA_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 3, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_camera_throttle_cfg = { + .name = "slv_camera_throttle_cfg", + .id = MSM8996_SLAVE_CAMERA_THROTTLE_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 154, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_oxili_cfg = { + .name = "slv_oxili_cfg", + .id = MSM8996_SLAVE_GRAPHICS_3D_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 11, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_smmu_mdp_cfg = { + .name = "slv_smmu_mdp_cfg", + .id = MSM8996_SLAVE_SMMU_MDP_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 173, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_smmu_rot_cfg = { + .name = "slv_smmu_rot_cfg", + .id = MSM8996_SLAVE_SMMU_ROTATOR_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 174, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_smmu_venus_cfg = { + .name = "slv_smmu_venus_cfg", + .id = MSM8996_SLAVE_SMMU_VENUS_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 175, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_smmu_cpp_cfg = { + .name = "slv_smmu_cpp_cfg", + .id = MSM8996_SLAVE_SMMU_CPP_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 171, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_smmu_jpeg_cfg = { + .name = "slv_smmu_jpeg_cfg", + .id = MSM8996_SLAVE_SMMU_JPEG_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 172, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_smmu_vfe_cfg = { + .name = "slv_smmu_vfe_cfg", + .id = MSM8996_SLAVE_SMMU_VFE_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 176, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static const u16 slv_mnoc_bimc_links[] = { + MSM8996_MASTER_MNOC_BIMC +}; + +static struct qcom_icc_node slv_mnoc_bimc = { + .name = "slv_mnoc_bimc", + .id = MSM8996_SLAVE_MNOC_BIMC, + .buswidth = 32, + .mas_rpm_id = -1, + .slv_rpm_id = 16, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(slv_mnoc_bimc_links), + .links = slv_mnoc_bimc_links +}; + +static struct qcom_icc_node slv_vmem = { + .name = "slv_vmem", + .id = MSM8996_SLAVE_VMEM, + .buswidth = 32, + .mas_rpm_id = -1, + .slv_rpm_id = 179, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_srvc_mnoc = { + .name = "slv_srvc_mnoc", + .id = MSM8996_SLAVE_SERVICE_MNOC, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 17, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static const u16 slv_pnoc_a1noc_links[] = { + MSM8996_MASTER_PNOC_A1NOC +}; + +static struct qcom_icc_node slv_pnoc_a1noc = { + .name = "slv_pnoc_a1noc", + .id = MSM8996_SLAVE_PNOC_A1NOC, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 139, + .num_links = ARRAY_SIZE(slv_pnoc_a1noc_links), + .links = slv_pnoc_a1noc_links +}; + +static struct qcom_icc_node slv_usb_hs = { + .name = "slv_usb_hs", + .id = MSM8996_SLAVE_USB_HS, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 40 +}; + +static struct qcom_icc_node slv_sdcc_2 = { + .name = "slv_sdcc_2", + .id = MSM8996_SLAVE_SDCC_2, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 33 +}; + +static struct qcom_icc_node slv_sdcc_4 = { + .name = "slv_sdcc_4", + .id = MSM8996_SLAVE_SDCC_4, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 34 +}; + +static struct qcom_icc_node slv_tsif = { + .name = "slv_tsif", + .id = MSM8996_SLAVE_TSIF, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 35 +}; + +static struct qcom_icc_node slv_blsp_2 = { + .name = "slv_blsp_2", + .id = MSM8996_SLAVE_BLSP_2, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 37 +}; + +static struct qcom_icc_node slv_sdcc_1 = { + .name = "slv_sdcc_1", + .id = MSM8996_SLAVE_SDCC_1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 31 +}; + +static struct qcom_icc_node slv_blsp_1 = { + .name = "slv_blsp_1", + .id = MSM8996_SLAVE_BLSP_1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 39 +}; + +static struct qcom_icc_node slv_pdm = { + .name = "slv_pdm", + .id = MSM8996_SLAVE_PDM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 41 +}; + +static struct qcom_icc_node slv_ahb2phy = { + .name = "slv_ahb2phy", + .id = MSM8996_SLAVE_AHB2PHY, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 153, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_hmss = { + .name = "slv_hmss", + .id = MSM8996_SLAVE_APPSS, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 20, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_lpass = { + .name = "slv_lpass", + .id = MSM8996_SLAVE_LPASS, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 21, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_usb3 = { + .name = "slv_usb3", + .id = MSM8996_SLAVE_USB3, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 22, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static const u16 slv_snoc_bimc_links[] = { + MSM8996_MASTER_SNOC_BIMC +}; + +static struct qcom_icc_node slv_snoc_bimc = { + .name = "slv_snoc_bimc", + .id = MSM8996_SLAVE_SNOC_BIMC, + .buswidth = 32, + .mas_rpm_id = -1, + .slv_rpm_id = 24, + .num_links = ARRAY_SIZE(slv_snoc_bimc_links), + .links = slv_snoc_bimc_links +}; + +static const u16 slv_snoc_cnoc_links[] = { + MSM8996_MASTER_SNOC_CNOC +}; + +static struct qcom_icc_node slv_snoc_cnoc = { + .name = "slv_snoc_cnoc", + .id = MSM8996_SLAVE_SNOC_CNOC, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 25, + .num_links = ARRAY_SIZE(slv_snoc_cnoc_links), + .links = slv_snoc_cnoc_links +}; + +static struct qcom_icc_node slv_imem = { + .name = "slv_imem", + .id = MSM8996_SLAVE_OCIMEM, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 26 +}; + +static struct qcom_icc_node slv_pimem = { + .name = "slv_pimem", + .id = MSM8996_SLAVE_PIMEM, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 166 +}; + +static const u16 slv_snoc_vmem_links[] = { + MSM8996_MASTER_SNOC_VMEM +}; + +static struct qcom_icc_node slv_snoc_vmem = { + .name = "slv_snoc_vmem", + .id = MSM8996_SLAVE_SNOC_VMEM, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 140, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(slv_snoc_vmem_links), + .links = slv_snoc_vmem_links +}; + +static const u16 slv_snoc_pnoc_links[] = { + MSM8996_MASTER_SNOC_PNOC +}; + +static struct qcom_icc_node slv_snoc_pnoc = { + .name = "slv_snoc_pnoc", + .id = MSM8996_SLAVE_SNOC_PNOC, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 28, + .num_links = ARRAY_SIZE(slv_snoc_pnoc_links), + .links = slv_snoc_pnoc_links +}; + +static struct qcom_icc_node slv_qdss_stm = { + .name = "slv_qdss_stm", + .id = MSM8996_SLAVE_QDSS_STM, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 30 +}; + +static struct qcom_icc_node slv_pcie_0 = { + .name = "slv_pcie_0", + .id = MSM8996_SLAVE_PCIE_0, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 84, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_pcie_1 = { + .name = "slv_pcie_1", + .id = MSM8996_SLAVE_PCIE_1, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 85, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_pcie_2 = { + .name = "slv_pcie_2", + .id = MSM8996_SLAVE_PCIE_2, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 164, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node slv_srvc_snoc = { + .name = "slv_srvc_snoc", + .id = MSM8996_SLAVE_SERVICE_SNOC, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 29, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID +}; + +static struct qcom_icc_node *a0noc_nodes[] = { + [MASTER_PCIE_0] = &mas_pcie_0, + [MASTER_PCIE_1] = &mas_pcie_1, + [MASTER_PCIE_2] = &mas_pcie_2 +}; + +static const struct regmap_config msm8996_a0noc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x9000, + .fast_io = true +}; + +static const struct qcom_icc_desc msm8996_a0noc = { + .type = QCOM_ICC_NOC, + .nodes = a0noc_nodes, + .num_nodes = ARRAY_SIZE(a0noc_nodes), + .clocks = bus_a0noc_clocks, + .num_clocks = ARRAY_SIZE(bus_a0noc_clocks), + .has_bus_pd = true, + .regmap_cfg = &msm8996_a0noc_regmap_config +}; + +static struct qcom_icc_node *a1noc_nodes[] = { + [MASTER_CNOC_A1NOC] = &mas_cnoc_a1noc, + [MASTER_CRYPTO_CORE0] = &mas_crypto_c0, + [MASTER_PNOC_A1NOC] = &mas_pnoc_a1noc +}; + +static const struct regmap_config msm8996_a1noc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x7000, + .fast_io = true +}; + +static const struct qcom_icc_desc msm8996_a1noc = { + .type = QCOM_ICC_NOC, + .nodes = a1noc_nodes, + .num_nodes = ARRAY_SIZE(a1noc_nodes), + .regmap_cfg = &msm8996_a1noc_regmap_config +}; + +static struct qcom_icc_node *a2noc_nodes[] = { + [MASTER_USB3] = &mas_usb3, + [MASTER_IPA] = &mas_ipa, + [MASTER_UFS] = &mas_ufs +}; + +static const struct regmap_config msm8996_a2noc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xa000, + .fast_io = true +}; + +static const struct qcom_icc_desc msm8996_a2noc = { + .type = QCOM_ICC_NOC, + .nodes = a2noc_nodes, + .num_nodes = ARRAY_SIZE(a2noc_nodes), + .regmap_cfg = &msm8996_a2noc_regmap_config +}; + +static struct qcom_icc_node *bimc_nodes[] = { + [MASTER_AMPSS_M0] = &mas_apps_proc, + [MASTER_GRAPHICS_3D] = &mas_oxili, + [MASTER_MNOC_BIMC] = &mas_mnoc_bimc, + [MASTER_SNOC_BIMC] = &mas_snoc_bimc, + [SLAVE_EBI_CH0] = &slv_ebi, + [SLAVE_HMSS_L3] = &slv_hmss_l3, + [SLAVE_BIMC_SNOC_0] = &slv_bimc_snoc_0, + [SLAVE_BIMC_SNOC_1] = &slv_bimc_snoc_1 +}; + +static const struct regmap_config msm8996_bimc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x62000, + .fast_io = true +}; + +static const struct qcom_icc_desc msm8996_bimc = { + .type = QCOM_ICC_BIMC, + .nodes = bimc_nodes, + .num_nodes = ARRAY_SIZE(bimc_nodes), + .regmap_cfg = &msm8996_bimc_regmap_config +}; + +static struct qcom_icc_node *cnoc_nodes[] = { + [MASTER_SNOC_CNOC] = &mas_snoc_cnoc, + [MASTER_QDSS_DAP] = &mas_qdss_dap, + [SLAVE_CNOC_A1NOC] = &slv_cnoc_a1noc, + [SLAVE_CLK_CTL] = &slv_clk_ctl, + [SLAVE_TCSR] = &slv_tcsr, + [SLAVE_TLMM] = &slv_tlmm, + [SLAVE_CRYPTO_0_CFG] = &slv_crypto0_cfg, + [SLAVE_MPM] = &slv_mpm, + [SLAVE_PIMEM_CFG] = &slv_pimem_cfg, + [SLAVE_IMEM_CFG] = &slv_imem_cfg, + [SLAVE_MESSAGE_RAM] = &slv_message_ram, + [SLAVE_BIMC_CFG] = &slv_bimc_cfg, + [SLAVE_PMIC_ARB] = &slv_pmic_arb, + [SLAVE_PRNG] = &slv_prng, + [SLAVE_DCC_CFG] = &slv_dcc_cfg, + [SLAVE_RBCPR_MX] = &slv_rbcpr_mx, + [SLAVE_QDSS_CFG] = &slv_qdss_cfg, + [SLAVE_RBCPR_CX] = &slv_rbcpr_cx, + [SLAVE_QDSS_RBCPR_APU] = &slv_cpu_apu_cfg, + [SLAVE_CNOC_MNOC_CFG] = &slv_cnoc_mnoc_cfg, + [SLAVE_SNOC_CFG] = &slv_snoc_cfg, + [SLAVE_SNOC_MPU_CFG] = &slv_snoc_mpu_cfg, + [SLAVE_EBI1_PHY_CFG] = &slv_ebi1_phy_cfg, + [SLAVE_A0NOC_CFG] = &slv_a0noc_cfg, + [SLAVE_PCIE_1_CFG] = &slv_pcie_1_cfg, + [SLAVE_PCIE_2_CFG] = &slv_pcie_2_cfg, + [SLAVE_PCIE_0_CFG] = &slv_pcie_0_cfg, + [SLAVE_PCIE20_AHB2PHY] = &slv_pcie20_ahb2phy, + [SLAVE_A0NOC_MPU_CFG] = &slv_a0noc_mpu_cfg, + [SLAVE_UFS_CFG] = &slv_ufs_cfg, + [SLAVE_A1NOC_CFG] = &slv_a1noc_cfg, + [SLAVE_A1NOC_MPU_CFG] = &slv_a1noc_mpu_cfg, + [SLAVE_A2NOC_CFG] = &slv_a2noc_cfg, + [SLAVE_A2NOC_MPU_CFG] = &slv_a2noc_mpu_cfg, + [SLAVE_SSC_CFG] = &slv_ssc_cfg, + [SLAVE_A0NOC_SMMU_CFG] = &slv_a0noc_smmu_cfg, + [SLAVE_A1NOC_SMMU_CFG] = &slv_a1noc_smmu_cfg, + [SLAVE_A2NOC_SMMU_CFG] = &slv_a2noc_smmu_cfg, + [SLAVE_LPASS_SMMU_CFG] = &slv_lpass_smmu_cfg, + [SLAVE_CNOC_MNOC_MMSS_CFG] = &slv_cnoc_mnoc_mmss_cfg +}; + +static const struct regmap_config msm8996_cnoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x1000, + .fast_io = true +}; + +static const struct qcom_icc_desc msm8996_cnoc = { + .type = QCOM_ICC_NOC, + .nodes = cnoc_nodes, + .num_nodes = ARRAY_SIZE(cnoc_nodes), + .regmap_cfg = &msm8996_cnoc_regmap_config +}; + +static struct qcom_icc_node *mnoc_nodes[] = { + [MASTER_CNOC_MNOC_CFG] = &mas_cnoc_mnoc_cfg, + [MASTER_CPP] = &mas_cpp, + [MASTER_JPEG] = &mas_jpeg, + [MASTER_MDP_PORT0] = &mas_mdp_p0, + [MASTER_MDP_PORT1] = &mas_mdp_p1, + [MASTER_ROTATOR] = &mas_rotator, + [MASTER_VIDEO_P0] = &mas_venus, + [MASTER_VFE] = &mas_vfe, + [MASTER_SNOC_VMEM] = &mas_snoc_vmem, + [MASTER_VIDEO_P0_OCMEM] = &mas_venus_vmem, + [MASTER_CNOC_MNOC_MMSS_CFG] = &mas_cnoc_mnoc_mmss_cfg, + [SLAVE_MNOC_BIMC] = &slv_mnoc_bimc, + [SLAVE_VMEM] = &slv_vmem, + [SLAVE_SERVICE_MNOC] = &slv_srvc_mnoc, + [SLAVE_MMAGIC_CFG] = &slv_mmagic_cfg, + [SLAVE_CPR_CFG] = &slv_cpr_cfg, + [SLAVE_MISC_CFG] = &slv_misc_cfg, + [SLAVE_VENUS_THROTTLE_CFG] = &slv_venus_throttle_cfg, + [SLAVE_VENUS_CFG] = &slv_venus_cfg, + [SLAVE_VMEM_CFG] = &slv_vmem_cfg, + [SLAVE_DSA_CFG] = &slv_dsa_cfg, + [SLAVE_MMSS_CLK_CFG] = &slv_mnoc_clocks_cfg, + [SLAVE_DSA_MPU_CFG] = &slv_dsa_mpu_cfg, + [SLAVE_MNOC_MPU_CFG] = &slv_mnoc_mpu_cfg, + [SLAVE_DISPLAY_CFG] = &slv_display_cfg, + [SLAVE_DISPLAY_THROTTLE_CFG] = &slv_display_throttle_cfg, + [SLAVE_CAMERA_CFG] = &slv_camera_cfg, + [SLAVE_CAMERA_THROTTLE_CFG] = &slv_camera_throttle_cfg, + [SLAVE_GRAPHICS_3D_CFG] = &slv_oxili_cfg, + [SLAVE_SMMU_MDP_CFG] = &slv_smmu_mdp_cfg, + [SLAVE_SMMU_ROT_CFG] = &slv_smmu_rot_cfg, + [SLAVE_SMMU_VENUS_CFG] = &slv_smmu_venus_cfg, + [SLAVE_SMMU_CPP_CFG] = &slv_smmu_cpp_cfg, + [SLAVE_SMMU_JPEG_CFG] = &slv_smmu_jpeg_cfg, + [SLAVE_SMMU_VFE_CFG] = &slv_smmu_vfe_cfg +}; + +static const struct regmap_config msm8996_mnoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x20000, + .fast_io = true +}; + +static const struct qcom_icc_desc msm8996_mnoc = { + .type = QCOM_ICC_NOC, + .nodes = mnoc_nodes, + .num_nodes = ARRAY_SIZE(mnoc_nodes), + .clocks = bus_mm_clocks, + .num_clocks = ARRAY_SIZE(bus_mm_clocks), + .regmap_cfg = &msm8996_mnoc_regmap_config +}; + +static struct qcom_icc_node *pnoc_nodes[] = { + [MASTER_SNOC_PNOC] = &mas_snoc_pnoc, + [MASTER_SDCC_1] = &mas_sdcc_1, + [MASTER_SDCC_2] = &mas_sdcc_2, + [MASTER_SDCC_4] = &mas_sdcc_4, + [MASTER_USB_HS] = &mas_usb_hs, + [MASTER_BLSP_1] = &mas_blsp_1, + [MASTER_BLSP_2] = &mas_blsp_2, + [MASTER_TSIF] = &mas_tsif, + [SLAVE_PNOC_A1NOC] = &slv_pnoc_a1noc, + [SLAVE_USB_HS] = &slv_usb_hs, + [SLAVE_SDCC_2] = &slv_sdcc_2, + [SLAVE_SDCC_4] = &slv_sdcc_4, + [SLAVE_TSIF] = &slv_tsif, + [SLAVE_BLSP_2] = &slv_blsp_2, + [SLAVE_SDCC_1] = &slv_sdcc_1, + [SLAVE_BLSP_1] = &slv_blsp_1, + [SLAVE_PDM] = &slv_pdm, + [SLAVE_AHB2PHY] = &slv_ahb2phy +}; + +static const struct regmap_config msm8996_pnoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x3000, + .fast_io = true +}; + +static const struct qcom_icc_desc msm8996_pnoc = { + .type = QCOM_ICC_NOC, + .nodes = pnoc_nodes, + .num_nodes = ARRAY_SIZE(pnoc_nodes), + .regmap_cfg = &msm8996_pnoc_regmap_config +}; + +static struct qcom_icc_node *snoc_nodes[] = { + [MASTER_HMSS] = &mas_hmss, + [MASTER_QDSS_BAM] = &mas_qdss_bam, + [MASTER_SNOC_CFG] = &mas_snoc_cfg, + [MASTER_BIMC_SNOC_0] = &mas_bimc_snoc_0, + [MASTER_BIMC_SNOC_1] = &mas_bimc_snoc_1, + [MASTER_A0NOC_SNOC] = &mas_a0noc_snoc, + [MASTER_A1NOC_SNOC] = &mas_a1noc_snoc, + [MASTER_A2NOC_SNOC] = &mas_a2noc_snoc, + [MASTER_QDSS_ETR] = &mas_qdss_etr, + [SLAVE_A0NOC_SNOC] = &slv_a0noc_snoc, + [SLAVE_A1NOC_SNOC] = &slv_a1noc_snoc, + [SLAVE_A2NOC_SNOC] = &slv_a2noc_snoc, + [SLAVE_HMSS] = &slv_hmss, + [SLAVE_LPASS] = &slv_lpass, + [SLAVE_USB3] = &slv_usb3, + [SLAVE_SNOC_BIMC] = &slv_snoc_bimc, + [SLAVE_SNOC_CNOC] = &slv_snoc_cnoc, + [SLAVE_IMEM] = &slv_imem, + [SLAVE_PIMEM] = &slv_pimem, + [SLAVE_SNOC_VMEM] = &slv_snoc_vmem, + [SLAVE_SNOC_PNOC] = &slv_snoc_pnoc, + [SLAVE_QDSS_STM] = &slv_qdss_stm, + [SLAVE_PCIE_0] = &slv_pcie_0, + [SLAVE_PCIE_1] = &slv_pcie_1, + [SLAVE_PCIE_2] = &slv_pcie_2, + [SLAVE_SERVICE_SNOC] = &slv_srvc_snoc +}; + +static const struct regmap_config msm8996_snoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x20000, + .fast_io = true +}; + +static const struct qcom_icc_desc msm8996_snoc = { + .type = QCOM_ICC_NOC, + .nodes = snoc_nodes, + .num_nodes = ARRAY_SIZE(snoc_nodes), + .regmap_cfg = &msm8996_snoc_regmap_config +}; + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,msm8996-a0noc", .data = &msm8996_a0noc}, + { .compatible = "qcom,msm8996-a1noc", .data = &msm8996_a1noc}, + { .compatible = "qcom,msm8996-a2noc", .data = &msm8996_a2noc}, + { .compatible = "qcom,msm8996-bimc", .data = &msm8996_bimc}, + { .compatible = "qcom,msm8996-cnoc", .data = &msm8996_cnoc}, + { .compatible = "qcom,msm8996-mnoc", .data = &msm8996_mnoc}, + { .compatible = "qcom,msm8996-pnoc", .data = &msm8996_pnoc}, + { .compatible = "qcom,msm8996-snoc", .data = &msm8996_snoc}, + { } +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qnoc_probe, + .remove = qnoc_remove, + .driver = { + .name = "qnoc-msm8996", + .of_match_table = qnoc_of_match, + .sync_state = icc_sync_state, + } +}; +module_platform_driver(qnoc_driver); + +MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>"); +MODULE_DESCRIPTION("Qualcomm MSM8996 NoC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/interconnect/qcom/msm8996.h b/drivers/interconnect/qcom/msm8996.h new file mode 100644 index 000000000000..42b54ffcaa7b --- /dev/null +++ b/drivers/interconnect/qcom/msm8996.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Qualcomm MSM8996 interconnect IDs + * + * Copyright (c) 2021 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#ifndef __DRIVERS_INTERCONNECT_QCOM_MSM8996_H__ +#define __DRIVERS_INTERCONNECT_QCOM_MSM8996_H__ + +#define MSM8996_MASTER_PCIE_0 1 +#define MSM8996_MASTER_PCIE_1 2 +#define MSM8996_MASTER_PCIE_2 3 +#define MSM8996_MASTER_CNOC_A1NOC 4 +#define MSM8996_MASTER_CRYPTO_CORE0 5 +#define MSM8996_MASTER_PNOC_A1NOC 6 +#define MSM8996_MASTER_USB3 7 +#define MSM8996_MASTER_IPA 8 +#define MSM8996_MASTER_UFS 9 +#define MSM8996_MASTER_AMPSS_M0 10 +#define MSM8996_MASTER_GRAPHICS_3D 11 +#define MSM8996_MASTER_MNOC_BIMC 12 +#define MSM8996_MASTER_SNOC_BIMC 13 +#define MSM8996_MASTER_SNOC_CNOC 14 +#define MSM8996_MASTER_QDSS_DAP 15 +#define MSM8996_MASTER_CNOC_MNOC_MMSS_CFG 16 +#define MSM8996_MASTER_CNOC_MNOC_CFG 17 +#define MSM8996_MASTER_CPP 18 +#define MSM8996_MASTER_JPEG 19 +#define MSM8996_MASTER_MDP_PORT0 20 +#define MSM8996_MASTER_MDP_PORT1 21 +#define MSM8996_MASTER_ROTATOR 22 +#define MSM8996_MASTER_VIDEO_P0 23 +#define MSM8996_MASTER_VFE 24 +#define MSM8996_MASTER_SNOC_VMEM 25 +#define MSM8996_MASTER_VIDEO_P0_OCMEM 26 +#define MSM8996_MASTER_SNOC_PNOC 27 +#define MSM8996_MASTER_SDCC_1 28 +#define MSM8996_MASTER_SDCC_2 29 +#define MSM8996_MASTER_SDCC_4 30 +#define MSM8996_MASTER_USB_HS 31 +#define MSM8996_MASTER_BLSP_1 32 +#define MSM8996_MASTER_BLSP_2 33 +#define MSM8996_MASTER_TSIF 34 +#define MSM8996_MASTER_HMSS 35 +#define MSM8996_MASTER_QDSS_BAM 36 +#define MSM8996_MASTER_SNOC_CFG 37 +#define MSM8996_MASTER_BIMC_SNOC_0 38 +#define MSM8996_MASTER_BIMC_SNOC_1 39 +#define MSM8996_MASTER_A0NOC_SNOC 40 +#define MSM8996_MASTER_A1NOC_SNOC 41 +#define MSM8996_MASTER_A2NOC_SNOC 42 +#define MSM8996_MASTER_QDSS_ETR 43 + +#define MSM8996_SLAVE_A0NOC_SNOC 44 +#define MSM8996_SLAVE_A1NOC_SNOC 45 +#define MSM8996_SLAVE_A2NOC_SNOC 46 +#define MSM8996_SLAVE_EBI_CH0 47 +#define MSM8996_SLAVE_HMSS_L3 48 +#define MSM8996_SLAVE_BIMC_SNOC_0 49 +#define MSM8996_SLAVE_BIMC_SNOC_1 50 +#define MSM8996_SLAVE_CNOC_A1NOC 51 +#define MSM8996_SLAVE_CLK_CTL 52 +#define MSM8996_SLAVE_TCSR 53 +#define MSM8996_SLAVE_TLMM 54 +#define MSM8996_SLAVE_CRYPTO_0_CFG 55 +#define MSM8996_SLAVE_MPM 56 +#define MSM8996_SLAVE_PIMEM_CFG 57 +#define MSM8996_SLAVE_IMEM_CFG 58 +#define MSM8996_SLAVE_MESSAGE_RAM 59 +#define MSM8996_SLAVE_BIMC_CFG 60 +#define MSM8996_SLAVE_PMIC_ARB 61 +#define MSM8996_SLAVE_PRNG 62 +#define MSM8996_SLAVE_DCC_CFG 63 +#define MSM8996_SLAVE_RBCPR_MX 64 +#define MSM8996_SLAVE_QDSS_CFG 65 +#define MSM8996_SLAVE_RBCPR_CX 66 +#define MSM8996_SLAVE_QDSS_RBCPR_APU_CFG 67 +#define MSM8996_SLAVE_CNOC_MNOC_CFG 68 +#define MSM8996_SLAVE_SNOC_CFG 69 +#define MSM8996_SLAVE_SNOC_MPU_CFG 70 +#define MSM8996_SLAVE_EBI1_PHY_CFG 71 +#define MSM8996_SLAVE_A0NOC_CFG 72 +#define MSM8996_SLAVE_PCIE_1_CFG 73 +#define MSM8996_SLAVE_PCIE_2_CFG 74 +#define MSM8996_SLAVE_PCIE_0_CFG 75 +#define MSM8996_SLAVE_PCIE20_AHB2PHY 76 +#define MSM8996_SLAVE_A0NOC_MPU_CFG 77 +#define MSM8996_SLAVE_UFS_CFG 78 +#define MSM8996_SLAVE_A1NOC_CFG 79 +#define MSM8996_SLAVE_A1NOC_MPU_CFG 80 +#define MSM8996_SLAVE_A2NOC_CFG 81 +#define MSM8996_SLAVE_A2NOC_MPU_CFG 82 +#define MSM8996_SLAVE_SSC_CFG 83 +#define MSM8996_SLAVE_A0NOC_SMMU_CFG 84 +#define MSM8996_SLAVE_A1NOC_SMMU_CFG 85 +#define MSM8996_SLAVE_A2NOC_SMMU_CFG 86 +#define MSM8996_SLAVE_LPASS_SMMU_CFG 87 +#define MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG 88 +#define MSM8996_SLAVE_MMAGIC_CFG 89 +#define MSM8996_SLAVE_CPR_CFG 90 +#define MSM8996_SLAVE_MISC_CFG 91 +#define MSM8996_SLAVE_VENUS_THROTTLE_CFG 92 +#define MSM8996_SLAVE_VENUS_CFG 93 +#define MSM8996_SLAVE_VMEM_CFG 94 +#define MSM8996_SLAVE_DSA_CFG 95 +#define MSM8996_SLAVE_MMSS_CLK_CFG 96 +#define MSM8996_SLAVE_DSA_MPU_CFG 97 +#define MSM8996_SLAVE_MNOC_MPU_CFG 98 +#define MSM8996_SLAVE_DISPLAY_CFG 99 +#define MSM8996_SLAVE_DISPLAY_THROTTLE_CFG 100 +#define MSM8996_SLAVE_CAMERA_CFG 101 +#define MSM8996_SLAVE_CAMERA_THROTTLE_CFG 102 +#define MSM8996_SLAVE_GRAPHICS_3D_CFG 103 +#define MSM8996_SLAVE_SMMU_MDP_CFG 104 +#define MSM8996_SLAVE_SMMU_ROTATOR_CFG 105 +#define MSM8996_SLAVE_SMMU_VENUS_CFG 106 +#define MSM8996_SLAVE_SMMU_CPP_CFG 107 +#define MSM8996_SLAVE_SMMU_JPEG_CFG 108 +#define MSM8996_SLAVE_SMMU_VFE_CFG 109 +#define MSM8996_SLAVE_MNOC_BIMC 110 +#define MSM8996_SLAVE_VMEM 111 +#define MSM8996_SLAVE_SERVICE_MNOC 112 +#define MSM8996_SLAVE_PNOC_A1NOC 113 +#define MSM8996_SLAVE_USB_HS 114 +#define MSM8996_SLAVE_SDCC_2 115 +#define MSM8996_SLAVE_SDCC_4 116 +#define MSM8996_SLAVE_TSIF 117 +#define MSM8996_SLAVE_BLSP_2 118 +#define MSM8996_SLAVE_SDCC_1 119 +#define MSM8996_SLAVE_BLSP_1 120 +#define MSM8996_SLAVE_PDM 121 +#define MSM8996_SLAVE_AHB2PHY 122 +#define MSM8996_SLAVE_APPSS 123 +#define MSM8996_SLAVE_LPASS 124 +#define MSM8996_SLAVE_USB3 125 +#define MSM8996_SLAVE_SNOC_BIMC 126 +#define MSM8996_SLAVE_SNOC_CNOC 127 +#define MSM8996_SLAVE_OCIMEM 128 +#define MSM8996_SLAVE_PIMEM 129 +#define MSM8996_SLAVE_SNOC_VMEM 130 +#define MSM8996_SLAVE_SNOC_PNOC 131 +#define MSM8996_SLAVE_QDSS_STM 132 +#define MSM8996_SLAVE_PCIE_0 133 +#define MSM8996_SLAVE_PCIE_1 134 +#define MSM8996_SLAVE_PCIE_2 135 +#define MSM8996_SLAVE_SERVICE_SNOC 136 + +#endif /* __DRIVERS_INTERCONNECT_QCOM_MSM8996_H__ */ diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c index c7af143980de..eec13099a6a3 100644 --- a/drivers/interconnect/qcom/osm-l3.c +++ b/drivers/interconnect/qcom/osm-l3.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */ #include <linux/bitfield.h> @@ -15,6 +15,7 @@ #include <dt-bindings/interconnect/qcom,osm-l3.h> #include "sc7180.h" +#include "sc7280.h" #include "sc8180x.h" #include "sdm845.h" #include "sm8150.h" @@ -114,6 +115,22 @@ static const struct qcom_osm_l3_desc sc7180_icc_osm_l3 = { .reg_perf_state = OSM_REG_PERF_STATE, }; +DEFINE_QNODE(sc7280_epss_apps_l3, SC7280_MASTER_EPSS_L3_APPS, 32, SC7280_SLAVE_EPSS_L3); +DEFINE_QNODE(sc7280_epss_l3, SC7280_SLAVE_EPSS_L3, 32); + +static const struct qcom_osm_l3_node *sc7280_epss_l3_nodes[] = { + [MASTER_EPSS_L3_APPS] = &sc7280_epss_apps_l3, + [SLAVE_EPSS_L3_SHARED] = &sc7280_epss_l3, +}; + +static const struct qcom_osm_l3_desc sc7280_icc_epss_l3 = { + .nodes = sc7280_epss_l3_nodes, + .num_nodes = ARRAY_SIZE(sc7280_epss_l3_nodes), + .lut_row_size = EPSS_LUT_ROW_SIZE, + .reg_freq_lut = EPSS_REG_FREQ_LUT, + .reg_perf_state = EPSS_REG_PERF_STATE, +}; + DEFINE_QNODE(sc8180x_osm_apps_l3, SC8180X_MASTER_OSM_L3_APPS, 32, SC8180X_SLAVE_OSM_L3); DEFINE_QNODE(sc8180x_osm_l3, SC8180X_SLAVE_OSM_L3, 32); @@ -326,6 +343,7 @@ err: static const struct of_device_id osm_l3_of_match[] = { { .compatible = "qcom,sc7180-osm-l3", .data = &sc7180_icc_osm_l3 }, + { .compatible = "qcom,sc7280-epss-l3", .data = &sc7280_icc_epss_l3 }, { .compatible = "qcom,sdm845-osm-l3", .data = &sdm845_icc_osm_l3 }, { .compatible = "qcom,sm8150-osm-l3", .data = &sm8150_icc_osm_l3 }, { .compatible = "qcom,sc8180x-osm-l3", .data = &sc8180x_icc_osm_l3 }, diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c new file mode 100644 index 000000000000..74404e0b2080 --- /dev/null +++ b/drivers/interconnect/qcom/qcm2290.c @@ -0,0 +1,1363 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Qualcomm QCM2290 Network-on-Chip (NoC) QoS driver + * + * Copyright (c) 2021, Linaro Ltd. + * + */ + +#include <dt-bindings/interconnect/qcom,qcm2290.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/interconnect-provider.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#include "icc-rpm.h" +#include "smd-rpm.h" + +enum { + QCM2290_MASTER_APPSS_PROC = 1, + QCM2290_MASTER_SNOC_BIMC_RT, + QCM2290_MASTER_SNOC_BIMC_NRT, + QCM2290_MASTER_SNOC_BIMC, + QCM2290_MASTER_TCU_0, + QCM2290_MASTER_GFX3D, + QCM2290_MASTER_SNOC_CNOC, + QCM2290_MASTER_QDSS_DAP, + QCM2290_MASTER_CRYPTO_CORE0, + QCM2290_MASTER_SNOC_CFG, + QCM2290_MASTER_TIC, + QCM2290_MASTER_ANOC_SNOC, + QCM2290_MASTER_BIMC_SNOC, + QCM2290_MASTER_PIMEM, + QCM2290_MASTER_QDSS_BAM, + QCM2290_MASTER_QUP_0, + QCM2290_MASTER_IPA, + QCM2290_MASTER_QDSS_ETR, + QCM2290_MASTER_SDCC_1, + QCM2290_MASTER_SDCC_2, + QCM2290_MASTER_QPIC, + QCM2290_MASTER_USB3_0, + QCM2290_MASTER_QUP_CORE_0, + QCM2290_MASTER_CAMNOC_SF, + QCM2290_MASTER_VIDEO_P0, + QCM2290_MASTER_VIDEO_PROC, + QCM2290_MASTER_CAMNOC_HF, + QCM2290_MASTER_MDP0, + + QCM2290_SLAVE_EBI1, + QCM2290_SLAVE_BIMC_SNOC, + QCM2290_SLAVE_BIMC_CFG, + QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG, + QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG, + QCM2290_SLAVE_CAMERA_CFG, + QCM2290_SLAVE_CLK_CTL, + QCM2290_SLAVE_CRYPTO_0_CFG, + QCM2290_SLAVE_DISPLAY_CFG, + QCM2290_SLAVE_DISPLAY_THROTTLE_CFG, + QCM2290_SLAVE_GPU_CFG, + QCM2290_SLAVE_HWKM, + QCM2290_SLAVE_IMEM_CFG, + QCM2290_SLAVE_IPA_CFG, + QCM2290_SLAVE_LPASS, + QCM2290_SLAVE_MESSAGE_RAM, + QCM2290_SLAVE_PDM, + QCM2290_SLAVE_PIMEM_CFG, + QCM2290_SLAVE_PKA_WRAPPER, + QCM2290_SLAVE_PMIC_ARB, + QCM2290_SLAVE_PRNG, + QCM2290_SLAVE_QDSS_CFG, + QCM2290_SLAVE_QM_CFG, + QCM2290_SLAVE_QM_MPU_CFG, + QCM2290_SLAVE_QPIC, + QCM2290_SLAVE_QUP_0, + QCM2290_SLAVE_SDCC_1, + QCM2290_SLAVE_SDCC_2, + QCM2290_SLAVE_SNOC_CFG, + QCM2290_SLAVE_TCSR, + QCM2290_SLAVE_USB3, + QCM2290_SLAVE_VENUS_CFG, + QCM2290_SLAVE_VENUS_THROTTLE_CFG, + QCM2290_SLAVE_VSENSE_CTRL_CFG, + QCM2290_SLAVE_SERVICE_CNOC, + QCM2290_SLAVE_APPSS, + QCM2290_SLAVE_SNOC_CNOC, + QCM2290_SLAVE_IMEM, + QCM2290_SLAVE_PIMEM, + QCM2290_SLAVE_SNOC_BIMC, + QCM2290_SLAVE_SERVICE_SNOC, + QCM2290_SLAVE_QDSS_STM, + QCM2290_SLAVE_TCU, + QCM2290_SLAVE_ANOC_SNOC, + QCM2290_SLAVE_QUP_CORE_0, + QCM2290_SLAVE_SNOC_BIMC_NRT, + QCM2290_SLAVE_SNOC_BIMC_RT, +}; + +/* Master nodes */ +static const u16 mas_appss_proc_links[] = { + QCM2290_SLAVE_EBI1, + QCM2290_SLAVE_BIMC_SNOC, +}; + +static struct qcom_icc_node mas_appss_proc = { + .id = QCM2290_MASTER_APPSS_PROC, + .name = "mas_apps_proc", + .buswidth = 16, + .qos.ap_owned = true, + .qos.qos_port = 0, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.prio_level = 0, + .qos.areq_prio = 0, + .mas_rpm_id = 0, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_appss_proc_links), + .links = mas_appss_proc_links, +}; + +static const u16 mas_snoc_bimc_rt_links[] = { + QCM2290_SLAVE_EBI1, +}; + +static struct qcom_icc_node mas_snoc_bimc_rt = { + .id = QCM2290_MASTER_SNOC_BIMC_RT, + .name = "mas_snoc_bimc_rt", + .buswidth = 16, + .qos.ap_owned = true, + .qos.qos_port = 2, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .mas_rpm_id = 163, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_snoc_bimc_rt_links), + .links = mas_snoc_bimc_rt_links, +}; + +static const u16 mas_snoc_bimc_nrt_links[] = { + QCM2290_SLAVE_EBI1, +}; + +static struct qcom_icc_node mas_snoc_bimc_nrt = { + .id = QCM2290_MASTER_SNOC_BIMC_NRT, + .name = "mas_snoc_bimc_nrt", + .buswidth = 16, + .qos.ap_owned = true, + .qos.qos_port = 2, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .mas_rpm_id = 163, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_snoc_bimc_nrt_links), + .links = mas_snoc_bimc_nrt_links, +}; + +static const u16 mas_snoc_bimc_links[] = { + QCM2290_SLAVE_EBI1, +}; + +static struct qcom_icc_node mas_snoc_bimc = { + .id = QCM2290_MASTER_SNOC_BIMC, + .name = "mas_snoc_bimc", + .buswidth = 16, + .qos.ap_owned = true, + .qos.qos_port = 2, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .mas_rpm_id = 164, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_snoc_bimc_links), + .links = mas_snoc_bimc_links, +}; + +static const u16 mas_tcu_0_links[] = { + QCM2290_SLAVE_EBI1, + QCM2290_SLAVE_BIMC_SNOC, +}; + +static struct qcom_icc_node mas_tcu_0 = { + .id = QCM2290_MASTER_TCU_0, + .name = "mas_tcu_0", + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_port = 4, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.prio_level = 6, + .qos.areq_prio = 6, + .mas_rpm_id = 102, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_tcu_0_links), + .links = mas_tcu_0_links, +}; + +static const u16 mas_snoc_cnoc_links[] = { + QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG, + QCM2290_SLAVE_SDCC_2, + QCM2290_SLAVE_SDCC_1, + QCM2290_SLAVE_QM_CFG, + QCM2290_SLAVE_BIMC_CFG, + QCM2290_SLAVE_USB3, + QCM2290_SLAVE_QM_MPU_CFG, + QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG, + QCM2290_SLAVE_QDSS_CFG, + QCM2290_SLAVE_PDM, + QCM2290_SLAVE_IPA_CFG, + QCM2290_SLAVE_DISPLAY_THROTTLE_CFG, + QCM2290_SLAVE_TCSR, + QCM2290_SLAVE_MESSAGE_RAM, + QCM2290_SLAVE_PMIC_ARB, + QCM2290_SLAVE_LPASS, + QCM2290_SLAVE_DISPLAY_CFG, + QCM2290_SLAVE_VENUS_CFG, + QCM2290_SLAVE_GPU_CFG, + QCM2290_SLAVE_IMEM_CFG, + QCM2290_SLAVE_SNOC_CFG, + QCM2290_SLAVE_SERVICE_CNOC, + QCM2290_SLAVE_VENUS_THROTTLE_CFG, + QCM2290_SLAVE_PKA_WRAPPER, + QCM2290_SLAVE_HWKM, + QCM2290_SLAVE_PRNG, + QCM2290_SLAVE_VSENSE_CTRL_CFG, + QCM2290_SLAVE_CRYPTO_0_CFG, + QCM2290_SLAVE_PIMEM_CFG, + QCM2290_SLAVE_QUP_0, + QCM2290_SLAVE_CAMERA_CFG, + QCM2290_SLAVE_CLK_CTL, + QCM2290_SLAVE_QPIC, +}; + +static struct qcom_icc_node mas_snoc_cnoc = { + .id = QCM2290_MASTER_SNOC_CNOC, + .name = "mas_snoc_cnoc", + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = 52, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_snoc_cnoc_links), + .links = mas_snoc_cnoc_links, +}; + +static const u16 mas_qdss_dap_links[] = { + QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG, + QCM2290_SLAVE_SDCC_2, + QCM2290_SLAVE_SDCC_1, + QCM2290_SLAVE_QM_CFG, + QCM2290_SLAVE_BIMC_CFG, + QCM2290_SLAVE_USB3, + QCM2290_SLAVE_QM_MPU_CFG, + QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG, + QCM2290_SLAVE_QDSS_CFG, + QCM2290_SLAVE_PDM, + QCM2290_SLAVE_IPA_CFG, + QCM2290_SLAVE_DISPLAY_THROTTLE_CFG, + QCM2290_SLAVE_TCSR, + QCM2290_SLAVE_MESSAGE_RAM, + QCM2290_SLAVE_PMIC_ARB, + QCM2290_SLAVE_LPASS, + QCM2290_SLAVE_DISPLAY_CFG, + QCM2290_SLAVE_VENUS_CFG, + QCM2290_SLAVE_GPU_CFG, + QCM2290_SLAVE_IMEM_CFG, + QCM2290_SLAVE_SNOC_CFG, + QCM2290_SLAVE_SERVICE_CNOC, + QCM2290_SLAVE_VENUS_THROTTLE_CFG, + QCM2290_SLAVE_PKA_WRAPPER, + QCM2290_SLAVE_HWKM, + QCM2290_SLAVE_PRNG, + QCM2290_SLAVE_VSENSE_CTRL_CFG, + QCM2290_SLAVE_CRYPTO_0_CFG, + QCM2290_SLAVE_PIMEM_CFG, + QCM2290_SLAVE_QUP_0, + QCM2290_SLAVE_CAMERA_CFG, + QCM2290_SLAVE_CLK_CTL, + QCM2290_SLAVE_QPIC, +}; + +static struct qcom_icc_node mas_qdss_dap = { + .id = QCM2290_MASTER_QDSS_DAP, + .name = "mas_qdss_dap", + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = 49, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_qdss_dap_links), + .links = mas_qdss_dap_links, +}; + +static const u16 mas_crypto_core0_links[] = { + QCM2290_SLAVE_ANOC_SNOC +}; + +static struct qcom_icc_node mas_crypto_core0 = { + .id = QCM2290_MASTER_CRYPTO_CORE0, + .name = "mas_crypto_core0", + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_port = 22, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 23, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_crypto_core0_links), + .links = mas_crypto_core0_links, +}; + +static const u16 mas_qup_core_0_links[] = { + QCM2290_SLAVE_QUP_CORE_0, +}; + +static struct qcom_icc_node mas_qup_core_0 = { + .id = QCM2290_MASTER_QUP_CORE_0, + .name = "mas_qup_core_0", + .buswidth = 4, + .mas_rpm_id = 170, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_qup_core_0_links), + .links = mas_qup_core_0_links, +}; + +static const u16 mas_camnoc_sf_links[] = { + QCM2290_SLAVE_SNOC_BIMC_NRT, +}; + +static struct qcom_icc_node mas_camnoc_sf = { + .id = QCM2290_MASTER_CAMNOC_SF, + .name = "mas_camnoc_sf", + .buswidth = 32, + .qos.ap_owned = true, + .qos.qos_port = 4, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 3, + .mas_rpm_id = 172, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_camnoc_sf_links), + .links = mas_camnoc_sf_links, +}; + +static const u16 mas_camnoc_hf_links[] = { + QCM2290_SLAVE_SNOC_BIMC_RT, +}; + +static struct qcom_icc_node mas_camnoc_hf = { + .id = QCM2290_MASTER_CAMNOC_HF, + .name = "mas_camnoc_hf", + .buswidth = 32, + .qos.ap_owned = true, + .qos.qos_port = 10, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 3, + .qos.urg_fwd_en = true, + .mas_rpm_id = 173, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_camnoc_hf_links), + .links = mas_camnoc_hf_links, +}; + +static const u16 mas_mdp0_links[] = { + QCM2290_SLAVE_SNOC_BIMC_RT, +}; + +static struct qcom_icc_node mas_mdp0 = { + .id = QCM2290_MASTER_MDP0, + .name = "mas_mdp0", + .buswidth = 16, + .qos.ap_owned = true, + .qos.qos_port = 5, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 3, + .qos.urg_fwd_en = true, + .mas_rpm_id = 8, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_mdp0_links), + .links = mas_mdp0_links, +}; + +static const u16 mas_video_p0_links[] = { + QCM2290_SLAVE_SNOC_BIMC_NRT, +}; + +static struct qcom_icc_node mas_video_p0 = { + .id = QCM2290_MASTER_VIDEO_P0, + .name = "mas_video_p0", + .buswidth = 16, + .qos.ap_owned = true, + .qos.qos_port = 9, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 3, + .qos.urg_fwd_en = true, + .mas_rpm_id = 9, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_video_p0_links), + .links = mas_video_p0_links, +}; + +static const u16 mas_video_proc_links[] = { + QCM2290_SLAVE_SNOC_BIMC_NRT, +}; + +static struct qcom_icc_node mas_video_proc = { + .id = QCM2290_MASTER_VIDEO_PROC, + .name = "mas_video_proc", + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_port = 13, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 4, + .mas_rpm_id = 168, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_video_proc_links), + .links = mas_video_proc_links, +}; + +static const u16 mas_snoc_cfg_links[] = { + QCM2290_SLAVE_SERVICE_SNOC, +}; + +static struct qcom_icc_node mas_snoc_cfg = { + .id = QCM2290_MASTER_SNOC_CFG, + .name = "mas_snoc_cfg", + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = 20, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_snoc_cfg_links), + .links = mas_snoc_cfg_links, +}; + +static const u16 mas_tic_links[] = { + QCM2290_SLAVE_PIMEM, + QCM2290_SLAVE_IMEM, + QCM2290_SLAVE_APPSS, + QCM2290_SLAVE_SNOC_BIMC, + QCM2290_SLAVE_SNOC_CNOC, + QCM2290_SLAVE_TCU, + QCM2290_SLAVE_QDSS_STM, +}; + +static struct qcom_icc_node mas_tic = { + .id = QCM2290_MASTER_TIC, + .name = "mas_tic", + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_port = 8, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 51, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_tic_links), + .links = mas_tic_links, +}; + +static const u16 mas_anoc_snoc_links[] = { + QCM2290_SLAVE_PIMEM, + QCM2290_SLAVE_IMEM, + QCM2290_SLAVE_APPSS, + QCM2290_SLAVE_SNOC_BIMC, + QCM2290_SLAVE_SNOC_CNOC, + QCM2290_SLAVE_TCU, + QCM2290_SLAVE_QDSS_STM, +}; + +static struct qcom_icc_node mas_anoc_snoc = { + .id = QCM2290_MASTER_ANOC_SNOC, + .name = "mas_anoc_snoc", + .buswidth = 16, + .mas_rpm_id = 110, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_anoc_snoc_links), + .links = mas_anoc_snoc_links, +}; + +static const u16 mas_bimc_snoc_links[] = { + QCM2290_SLAVE_PIMEM, + QCM2290_SLAVE_IMEM, + QCM2290_SLAVE_APPSS, + QCM2290_SLAVE_SNOC_CNOC, + QCM2290_SLAVE_TCU, + QCM2290_SLAVE_QDSS_STM, +}; + +static struct qcom_icc_node mas_bimc_snoc = { + .id = QCM2290_MASTER_BIMC_SNOC, + .name = "mas_bimc_snoc", + .buswidth = 8, + .mas_rpm_id = 21, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_bimc_snoc_links), + .links = mas_bimc_snoc_links, +}; + +static const u16 mas_pimem_links[] = { + QCM2290_SLAVE_IMEM, + QCM2290_SLAVE_SNOC_BIMC, +}; + +static struct qcom_icc_node mas_pimem = { + .id = QCM2290_MASTER_PIMEM, + .name = "mas_pimem", + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_port = 20, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 113, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_pimem_links), + .links = mas_pimem_links, +}; + +static const u16 mas_qdss_bam_links[] = { + QCM2290_SLAVE_ANOC_SNOC, +}; + +static struct qcom_icc_node mas_qdss_bam = { + .id = QCM2290_MASTER_QDSS_BAM, + .name = "mas_qdss_bam", + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_port = 2, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 19, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_qdss_bam_links), + .links = mas_qdss_bam_links, +}; + +static const u16 mas_qup_0_links[] = { + QCM2290_SLAVE_ANOC_SNOC, +}; + +static struct qcom_icc_node mas_qup_0 = { + .id = QCM2290_MASTER_QUP_0, + .name = "mas_qup_0", + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_port = 0, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 166, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_qup_0_links), + .links = mas_qup_0_links, +}; + +static const u16 mas_ipa_links[] = { + QCM2290_SLAVE_ANOC_SNOC, +}; + +static struct qcom_icc_node mas_ipa = { + .id = QCM2290_MASTER_IPA, + .name = "mas_ipa", + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_port = 3, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 59, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_ipa_links), + .links = mas_ipa_links, +}; + +static const u16 mas_qdss_etr_links[] = { + QCM2290_SLAVE_ANOC_SNOC, +}; + +static struct qcom_icc_node mas_qdss_etr = { + .id = QCM2290_MASTER_QDSS_ETR, + .name = "mas_qdss_etr", + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_port = 12, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 31, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_qdss_etr_links), + .links = mas_qdss_etr_links, +}; + +static const u16 mas_sdcc_1_links[] = { + QCM2290_SLAVE_ANOC_SNOC, +}; + +static struct qcom_icc_node mas_sdcc_1 = { + .id = QCM2290_MASTER_SDCC_1, + .name = "mas_sdcc_1", + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_port = 17, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 33, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_sdcc_1_links), + .links = mas_sdcc_1_links, +}; + +static const u16 mas_sdcc_2_links[] = { + QCM2290_SLAVE_ANOC_SNOC, +}; + +static struct qcom_icc_node mas_sdcc_2 = { + .id = QCM2290_MASTER_SDCC_2, + .name = "mas_sdcc_2", + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_port = 23, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 35, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_sdcc_2_links), + .links = mas_sdcc_2_links, +}; + +static const u16 mas_qpic_links[] = { + QCM2290_SLAVE_ANOC_SNOC, +}; + +static struct qcom_icc_node mas_qpic = { + .id = QCM2290_MASTER_QPIC, + .name = "mas_qpic", + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_port = 1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 58, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_qpic_links), + .links = mas_qpic_links, +}; + +static const u16 mas_usb3_0_links[] = { + QCM2290_SLAVE_ANOC_SNOC, +}; + +static struct qcom_icc_node mas_usb3_0 = { + .id = QCM2290_MASTER_USB3_0, + .name = "mas_usb3_0", + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_port = 24, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 32, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_usb3_0_links), + .links = mas_usb3_0_links, +}; + +static const u16 mas_gfx3d_links[] = { + QCM2290_SLAVE_EBI1, +}; + +static struct qcom_icc_node mas_gfx3d = { + .id = QCM2290_MASTER_GFX3D, + .name = "mas_gfx3d", + .buswidth = 32, + .qos.ap_owned = true, + .qos.qos_port = 1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.prio_level = 0, + .qos.areq_prio = 0, + .mas_rpm_id = 6, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_gfx3d_links), + .links = mas_gfx3d_links, +}; + +/* Slave nodes */ +static struct qcom_icc_node slv_ebi1 = { + .name = "slv_ebi1", + .id = QCM2290_SLAVE_EBI1, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 0, +}; + +static const u16 slv_bimc_snoc_links[] = { + QCM2290_MASTER_BIMC_SNOC, +}; + +static struct qcom_icc_node slv_bimc_snoc = { + .name = "slv_bimc_snoc", + .id = QCM2290_SLAVE_BIMC_SNOC, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 2, + .num_links = ARRAY_SIZE(slv_bimc_snoc_links), + .links = slv_bimc_snoc_links, +}; + +static struct qcom_icc_node slv_bimc_cfg = { + .name = "slv_bimc_cfg", + .id = QCM2290_SLAVE_BIMC_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 56, +}; + +static struct qcom_icc_node slv_camera_nrt_throttle_cfg = { + .name = "slv_camera_nrt_throttle_cfg", + .id = QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 271, +}; + +static struct qcom_icc_node slv_camera_rt_throttle_cfg = { + .name = "slv_camera_rt_throttle_cfg", + .id = QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 279, +}; + +static struct qcom_icc_node slv_camera_cfg = { + .name = "slv_camera_cfg", + .id = QCM2290_SLAVE_CAMERA_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 3, +}; + +static struct qcom_icc_node slv_clk_ctl = { + .name = "slv_clk_ctl", + .id = QCM2290_SLAVE_CLK_CTL, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 47, +}; + +static struct qcom_icc_node slv_crypto_0_cfg = { + .name = "slv_crypto_0_cfg", + .id = QCM2290_SLAVE_CRYPTO_0_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 52, +}; + +static struct qcom_icc_node slv_display_cfg = { + .name = "slv_display_cfg", + .id = QCM2290_SLAVE_DISPLAY_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 4, +}; + +static struct qcom_icc_node slv_display_throttle_cfg = { + .name = "slv_display_throttle_cfg", + .id = QCM2290_SLAVE_DISPLAY_THROTTLE_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 156, +}; + +static struct qcom_icc_node slv_gpu_cfg = { + .name = "slv_gpu_cfg", + .id = QCM2290_SLAVE_GPU_CFG, + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 275, +}; + +static struct qcom_icc_node slv_hwkm = { + .name = "slv_hwkm", + .id = QCM2290_SLAVE_HWKM, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 280, +}; + +static struct qcom_icc_node slv_imem_cfg = { + .name = "slv_imem_cfg", + .id = QCM2290_SLAVE_IMEM_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 54, +}; + +static struct qcom_icc_node slv_ipa_cfg = { + .name = "slv_ipa_cfg", + .id = QCM2290_SLAVE_IPA_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 183, +}; + +static struct qcom_icc_node slv_lpass = { + .name = "slv_lpass", + .id = QCM2290_SLAVE_LPASS, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 21, +}; + +static struct qcom_icc_node slv_message_ram = { + .name = "slv_message_ram", + .id = QCM2290_SLAVE_MESSAGE_RAM, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 55, +}; + +static struct qcom_icc_node slv_pdm = { + .name = "slv_pdm", + .id = QCM2290_SLAVE_PDM, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 41, +}; + +static struct qcom_icc_node slv_pimem_cfg = { + .name = "slv_pimem_cfg", + .id = QCM2290_SLAVE_PIMEM_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 167, +}; + +static struct qcom_icc_node slv_pka_wrapper = { + .name = "slv_pka_wrapper", + .id = QCM2290_SLAVE_PKA_WRAPPER, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 281, +}; + +static struct qcom_icc_node slv_pmic_arb = { + .name = "slv_pmic_arb", + .id = QCM2290_SLAVE_PMIC_ARB, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 59, +}; + +static struct qcom_icc_node slv_prng = { + .name = "slv_prng", + .id = QCM2290_SLAVE_PRNG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 44, +}; + +static struct qcom_icc_node slv_qdss_cfg = { + .name = "slv_qdss_cfg", + .id = QCM2290_SLAVE_QDSS_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 63, +}; + +static struct qcom_icc_node slv_qm_cfg = { + .name = "slv_qm_cfg", + .id = QCM2290_SLAVE_QM_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 212, +}; + +static struct qcom_icc_node slv_qm_mpu_cfg = { + .name = "slv_qm_mpu_cfg", + .id = QCM2290_SLAVE_QM_MPU_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 231, +}; + +static struct qcom_icc_node slv_qpic = { + .name = "slv_qpic", + .id = QCM2290_SLAVE_QPIC, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 80, +}; + +static struct qcom_icc_node slv_qup_0 = { + .name = "slv_qup_0", + .id = QCM2290_SLAVE_QUP_0, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 261, +}; + +static struct qcom_icc_node slv_sdcc_1 = { + .name = "slv_sdcc_1", + .id = QCM2290_SLAVE_SDCC_1, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 31, +}; + +static struct qcom_icc_node slv_sdcc_2 = { + .name = "slv_sdcc_2", + .id = QCM2290_SLAVE_SDCC_2, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 33, +}; + +static const u16 slv_snoc_cfg_links[] = { + QCM2290_MASTER_SNOC_CFG, +}; + +static struct qcom_icc_node slv_snoc_cfg = { + .name = "slv_snoc_cfg", + .id = QCM2290_SLAVE_SNOC_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 70, + .num_links = ARRAY_SIZE(slv_snoc_cfg_links), + .links = slv_snoc_cfg_links, +}; + +static struct qcom_icc_node slv_tcsr = { + .name = "slv_tcsr", + .id = QCM2290_SLAVE_TCSR, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 50, +}; + +static struct qcom_icc_node slv_usb3 = { + .name = "slv_usb3", + .id = QCM2290_SLAVE_USB3, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 22, +}; + +static struct qcom_icc_node slv_venus_cfg = { + .name = "slv_venus_cfg", + .id = QCM2290_SLAVE_VENUS_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 10, +}; + +static struct qcom_icc_node slv_venus_throttle_cfg = { + .name = "slv_venus_throttle_cfg", + .id = QCM2290_SLAVE_VENUS_THROTTLE_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 178, +}; + +static struct qcom_icc_node slv_vsense_ctrl_cfg = { + .name = "slv_vsense_ctrl_cfg", + .id = QCM2290_SLAVE_VSENSE_CTRL_CFG, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 263, +}; + +static struct qcom_icc_node slv_service_cnoc = { + .name = "slv_service_cnoc", + .id = QCM2290_SLAVE_SERVICE_CNOC, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 76, +}; + +static struct qcom_icc_node slv_qup_core_0 = { + .name = "slv_qup_core_0", + .id = QCM2290_SLAVE_QUP_CORE_0, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 264, +}; + +static const u16 slv_snoc_bimc_nrt_links[] = { + QCM2290_MASTER_SNOC_BIMC_NRT, +}; + +static struct qcom_icc_node slv_snoc_bimc_nrt = { + .name = "slv_snoc_bimc_nrt", + .id = QCM2290_SLAVE_SNOC_BIMC_NRT, + .buswidth = 16, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 259, + .num_links = ARRAY_SIZE(slv_snoc_bimc_nrt_links), + .links = slv_snoc_bimc_nrt_links, +}; + +static const u16 slv_snoc_bimc_rt_links[] = { + QCM2290_MASTER_SNOC_BIMC_RT, +}; + +static struct qcom_icc_node slv_snoc_bimc_rt = { + .name = "slv_snoc_bimc_rt", + .id = QCM2290_SLAVE_SNOC_BIMC_RT, + .buswidth = 16, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 260, + .num_links = ARRAY_SIZE(slv_snoc_bimc_rt_links), + .links = slv_snoc_bimc_rt_links, +}; + +static struct qcom_icc_node slv_appss = { + .name = "slv_appss", + .id = QCM2290_SLAVE_APPSS, + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 20, +}; + +static const u16 slv_snoc_cnoc_links[] = { + QCM2290_MASTER_SNOC_CNOC, +}; + +static struct qcom_icc_node slv_snoc_cnoc = { + .name = "slv_snoc_cnoc", + .id = QCM2290_SLAVE_SNOC_CNOC, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 25, + .num_links = ARRAY_SIZE(slv_snoc_cnoc_links), + .links = slv_snoc_cnoc_links, +}; + +static struct qcom_icc_node slv_imem = { + .name = "slv_imem", + .id = QCM2290_SLAVE_IMEM, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 26, +}; + +static struct qcom_icc_node slv_pimem = { + .name = "slv_pimem", + .id = QCM2290_SLAVE_PIMEM, + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 166, +}; + +static const u16 slv_snoc_bimc_links[] = { + QCM2290_MASTER_SNOC_BIMC, +}; + +static struct qcom_icc_node slv_snoc_bimc = { + .name = "slv_snoc_bimc", + .id = QCM2290_SLAVE_SNOC_BIMC, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 24, + .num_links = ARRAY_SIZE(slv_snoc_bimc_links), + .links = slv_snoc_bimc_links, +}; + +static struct qcom_icc_node slv_service_snoc = { + .name = "slv_service_snoc", + .id = QCM2290_SLAVE_SERVICE_SNOC, + .buswidth = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 29, +}; + +static struct qcom_icc_node slv_qdss_stm = { + .name = "slv_qdss_stm", + .id = QCM2290_SLAVE_QDSS_STM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 30, +}; + +static struct qcom_icc_node slv_tcu = { + .name = "slv_tcu", + .id = QCM2290_SLAVE_TCU, + .buswidth = 8, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .mas_rpm_id = -1, + .slv_rpm_id = 133, +}; + +static const u16 slv_anoc_snoc_links[] = { + QCM2290_MASTER_ANOC_SNOC, +}; + +static struct qcom_icc_node slv_anoc_snoc = { + .name = "slv_anoc_snoc", + .id = QCM2290_SLAVE_ANOC_SNOC, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 141, + .num_links = ARRAY_SIZE(slv_anoc_snoc_links), + .links = slv_anoc_snoc_links, +}; + +/* NoC descriptors */ +static struct qcom_icc_node *qcm2290_bimc_nodes[] = { + [MASTER_APPSS_PROC] = &mas_appss_proc, + [MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt, + [MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt, + [MASTER_SNOC_BIMC] = &mas_snoc_bimc, + [MASTER_TCU_0] = &mas_tcu_0, + [MASTER_GFX3D] = &mas_gfx3d, + [SLAVE_EBI1] = &slv_ebi1, + [SLAVE_BIMC_SNOC] = &slv_bimc_snoc, +}; + +static const struct regmap_config qcm2290_bimc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x80000, + .fast_io = true, +}; + +static struct qcom_icc_desc qcm2290_bimc = { + .type = QCOM_ICC_BIMC, + .nodes = qcm2290_bimc_nodes, + .num_nodes = ARRAY_SIZE(qcm2290_bimc_nodes), + .regmap_cfg = &qcm2290_bimc_regmap_config, + /* M_REG_BASE() in vendor msm_bus_bimc_adhoc driver */ + .qos_offset = 0x8000, +}; + +static struct qcom_icc_node *qcm2290_cnoc_nodes[] = { + [MASTER_SNOC_CNOC] = &mas_snoc_cnoc, + [MASTER_QDSS_DAP] = &mas_qdss_dap, + [SLAVE_BIMC_CFG] = &slv_bimc_cfg, + [SLAVE_CAMERA_NRT_THROTTLE_CFG] = &slv_camera_nrt_throttle_cfg, + [SLAVE_CAMERA_RT_THROTTLE_CFG] = &slv_camera_rt_throttle_cfg, + [SLAVE_CAMERA_CFG] = &slv_camera_cfg, + [SLAVE_CLK_CTL] = &slv_clk_ctl, + [SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg, + [SLAVE_DISPLAY_CFG] = &slv_display_cfg, + [SLAVE_DISPLAY_THROTTLE_CFG] = &slv_display_throttle_cfg, + [SLAVE_GPU_CFG] = &slv_gpu_cfg, + [SLAVE_HWKM] = &slv_hwkm, + [SLAVE_IMEM_CFG] = &slv_imem_cfg, + [SLAVE_IPA_CFG] = &slv_ipa_cfg, + [SLAVE_LPASS] = &slv_lpass, + [SLAVE_MESSAGE_RAM] = &slv_message_ram, + [SLAVE_PDM] = &slv_pdm, + [SLAVE_PIMEM_CFG] = &slv_pimem_cfg, + [SLAVE_PKA_WRAPPER] = &slv_pka_wrapper, + [SLAVE_PMIC_ARB] = &slv_pmic_arb, + [SLAVE_PRNG] = &slv_prng, + [SLAVE_QDSS_CFG] = &slv_qdss_cfg, + [SLAVE_QM_CFG] = &slv_qm_cfg, + [SLAVE_QM_MPU_CFG] = &slv_qm_mpu_cfg, + [SLAVE_QPIC] = &slv_qpic, + [SLAVE_QUP_0] = &slv_qup_0, + [SLAVE_SDCC_1] = &slv_sdcc_1, + [SLAVE_SDCC_2] = &slv_sdcc_2, + [SLAVE_SNOC_CFG] = &slv_snoc_cfg, + [SLAVE_TCSR] = &slv_tcsr, + [SLAVE_USB3] = &slv_usb3, + [SLAVE_VENUS_CFG] = &slv_venus_cfg, + [SLAVE_VENUS_THROTTLE_CFG] = &slv_venus_throttle_cfg, + [SLAVE_VSENSE_CTRL_CFG] = &slv_vsense_ctrl_cfg, + [SLAVE_SERVICE_CNOC] = &slv_service_cnoc, +}; + +static const struct regmap_config qcm2290_cnoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x8200, + .fast_io = true, +}; + +static struct qcom_icc_desc qcm2290_cnoc = { + .type = QCOM_ICC_NOC, + .nodes = qcm2290_cnoc_nodes, + .num_nodes = ARRAY_SIZE(qcm2290_cnoc_nodes), + .regmap_cfg = &qcm2290_cnoc_regmap_config, +}; + +static struct qcom_icc_node *qcm2290_snoc_nodes[] = { + [MASTER_CRYPTO_CORE0] = &mas_crypto_core0, + [MASTER_SNOC_CFG] = &mas_snoc_cfg, + [MASTER_TIC] = &mas_tic, + [MASTER_ANOC_SNOC] = &mas_anoc_snoc, + [MASTER_BIMC_SNOC] = &mas_bimc_snoc, + [MASTER_PIMEM] = &mas_pimem, + [MASTER_QDSS_BAM] = &mas_qdss_bam, + [MASTER_QUP_0] = &mas_qup_0, + [MASTER_IPA] = &mas_ipa, + [MASTER_QDSS_ETR] = &mas_qdss_etr, + [MASTER_SDCC_1] = &mas_sdcc_1, + [MASTER_SDCC_2] = &mas_sdcc_2, + [MASTER_QPIC] = &mas_qpic, + [MASTER_USB3_0] = &mas_usb3_0, + [SLAVE_APPSS] = &slv_appss, + [SLAVE_SNOC_CNOC] = &slv_snoc_cnoc, + [SLAVE_IMEM] = &slv_imem, + [SLAVE_PIMEM] = &slv_pimem, + [SLAVE_SNOC_BIMC] = &slv_snoc_bimc, + [SLAVE_SERVICE_SNOC] = &slv_service_snoc, + [SLAVE_QDSS_STM] = &slv_qdss_stm, + [SLAVE_TCU] = &slv_tcu, + [SLAVE_ANOC_SNOC] = &slv_anoc_snoc, +}; + +static const struct regmap_config qcm2290_snoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x60200, + .fast_io = true, +}; + +static struct qcom_icc_desc qcm2290_snoc = { + .type = QCOM_ICC_QNOC, + .nodes = qcm2290_snoc_nodes, + .num_nodes = ARRAY_SIZE(qcm2290_snoc_nodes), + .regmap_cfg = &qcm2290_snoc_regmap_config, + /* Vendor DT node fab-sys_noc property 'qcom,base-offset' */ + .qos_offset = 0x15000, +}; + +static struct qcom_icc_node *qcm2290_qup_virt_nodes[] = { + [MASTER_QUP_CORE_0] = &mas_qup_core_0, + [SLAVE_QUP_CORE_0] = &slv_qup_core_0 +}; + +static struct qcom_icc_desc qcm2290_qup_virt = { + .type = QCOM_ICC_QNOC, + .nodes = qcm2290_qup_virt_nodes, + .num_nodes = ARRAY_SIZE(qcm2290_qup_virt_nodes), +}; + +static struct qcom_icc_node *qcm2290_mmnrt_virt_nodes[] = { + [MASTER_CAMNOC_SF] = &mas_camnoc_sf, + [MASTER_VIDEO_P0] = &mas_video_p0, + [MASTER_VIDEO_PROC] = &mas_video_proc, + [SLAVE_SNOC_BIMC_NRT] = &slv_snoc_bimc_nrt, +}; + +static struct qcom_icc_desc qcm2290_mmnrt_virt = { + .type = QCOM_ICC_QNOC, + .nodes = qcm2290_mmnrt_virt_nodes, + .num_nodes = ARRAY_SIZE(qcm2290_mmnrt_virt_nodes), + .regmap_cfg = &qcm2290_snoc_regmap_config, + .qos_offset = 0x15000, +}; + +static struct qcom_icc_node *qcm2290_mmrt_virt_nodes[] = { + [MASTER_CAMNOC_HF] = &mas_camnoc_hf, + [MASTER_MDP0] = &mas_mdp0, + [SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt, +}; + +static struct qcom_icc_desc qcm2290_mmrt_virt = { + .type = QCOM_ICC_QNOC, + .nodes = qcm2290_mmrt_virt_nodes, + .num_nodes = ARRAY_SIZE(qcm2290_mmrt_virt_nodes), + .regmap_cfg = &qcm2290_snoc_regmap_config, + .qos_offset = 0x15000, +}; + +static const struct of_device_id qcm2290_noc_of_match[] = { + { .compatible = "qcom,qcm2290-bimc", .data = &qcm2290_bimc }, + { .compatible = "qcom,qcm2290-cnoc", .data = &qcm2290_cnoc }, + { .compatible = "qcom,qcm2290-snoc", .data = &qcm2290_snoc }, + { .compatible = "qcom,qcm2290-qup-virt", .data = &qcm2290_qup_virt }, + { .compatible = "qcom,qcm2290-mmrt-virt", .data = &qcm2290_mmrt_virt }, + { .compatible = "qcom,qcm2290-mmnrt-virt", .data = &qcm2290_mmnrt_virt }, + { }, +}; +MODULE_DEVICE_TABLE(of, qcm2290_noc_of_match); + +static struct platform_driver qcm2290_noc_driver = { + .probe = qnoc_probe, + .remove = qnoc_remove, + .driver = { + .name = "qnoc-qcm2290", + .of_match_table = qcm2290_noc_of_match, + }, +}; +module_platform_driver(qcm2290_noc_driver); + +MODULE_DESCRIPTION("Qualcomm QCM2290 NoC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/interconnect/qcom/sc7280.h b/drivers/interconnect/qcom/sc7280.h index 175e400305c5..1fb9839b2c14 100644 --- a/drivers/interconnect/qcom/sc7280.h +++ b/drivers/interconnect/qcom/sc7280.h @@ -150,5 +150,7 @@ #define SC7280_SLAVE_PCIE_1 139 #define SC7280_SLAVE_QDSS_STM 140 #define SC7280_SLAVE_TCU 141 +#define SC7280_MASTER_EPSS_L3_APPS 142 +#define SC7280_SLAVE_EPSS_L3 143 #endif diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c index 471bb88f8828..274a7139fe1a 100644 --- a/drivers/interconnect/qcom/sdm660.c +++ b/drivers/interconnect/qcom/sdm660.c @@ -1513,6 +1513,7 @@ static const struct regmap_config sdm660_a2noc_regmap_config = { }; static struct qcom_icc_desc sdm660_a2noc = { + .type = QCOM_ICC_NOC, .nodes = sdm660_a2noc_nodes, .num_nodes = ARRAY_SIZE(sdm660_a2noc_nodes), .clocks = bus_a2noc_clocks, @@ -1540,9 +1541,9 @@ static const struct regmap_config sdm660_bimc_regmap_config = { }; static struct qcom_icc_desc sdm660_bimc = { + .type = QCOM_ICC_BIMC, .nodes = sdm660_bimc_nodes, .num_nodes = ARRAY_SIZE(sdm660_bimc_nodes), - .is_bimc_node = true, .regmap_cfg = &sdm660_bimc_regmap_config, }; @@ -1594,6 +1595,7 @@ static const struct regmap_config sdm660_cnoc_regmap_config = { }; static struct qcom_icc_desc sdm660_cnoc = { + .type = QCOM_ICC_NOC, .nodes = sdm660_cnoc_nodes, .num_nodes = ARRAY_SIZE(sdm660_cnoc_nodes), .regmap_cfg = &sdm660_cnoc_regmap_config, @@ -1614,6 +1616,7 @@ static const struct regmap_config sdm660_gnoc_regmap_config = { }; static struct qcom_icc_desc sdm660_gnoc = { + .type = QCOM_ICC_NOC, .nodes = sdm660_gnoc_nodes, .num_nodes = ARRAY_SIZE(sdm660_gnoc_nodes), .regmap_cfg = &sdm660_gnoc_regmap_config, @@ -1653,6 +1656,7 @@ static const struct regmap_config sdm660_mnoc_regmap_config = { }; static struct qcom_icc_desc sdm660_mnoc = { + .type = QCOM_ICC_NOC, .nodes = sdm660_mnoc_nodes, .num_nodes = ARRAY_SIZE(sdm660_mnoc_nodes), .clocks = bus_mm_clocks, @@ -1689,6 +1693,7 @@ static const struct regmap_config sdm660_snoc_regmap_config = { }; static struct qcom_icc_desc sdm660_snoc = { + .type = QCOM_ICC_NOC, .nodes = sdm660_snoc_nodes, .num_nodes = ARRAY_SIZE(sdm660_snoc_nodes), .regmap_cfg = &sdm660_snoc_regmap_config, diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c index 2a85f53802b5..745e3c36a61a 100644 --- a/drivers/interconnect/qcom/sm8150.c +++ b/drivers/interconnect/qcom/sm8150.c @@ -535,7 +535,6 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8150", .of_match_table = qnoc_of_match, - .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c index 8dfb5dea562a..aa707582ea01 100644 --- a/drivers/interconnect/qcom/sm8250.c +++ b/drivers/interconnect/qcom/sm8250.c @@ -551,7 +551,6 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8250", .of_match_table = qnoc_of_match, - .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c index 3e26a2175b28..c79f93a1ac73 100644 --- a/drivers/interconnect/qcom/sm8350.c +++ b/drivers/interconnect/qcom/sm8350.c @@ -531,7 +531,6 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8350", .of_match_table = qnoc_of_match, - .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c new file mode 100644 index 000000000000..8d99ee6421df --- /dev/null +++ b/drivers/interconnect/qcom/sm8450.c @@ -0,0 +1,1987 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Linaro Limited + */ + +#include <linux/device.h> +#include <linux/interconnect.h> +#include <linux/interconnect-provider.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <dt-bindings/interconnect/qcom,sm8450.h> + +#include "bcm-voter.h" +#include "icc-rpmh.h" +#include "sm8450.h" + +static struct qcom_icc_node qhm_qspi = { + .name = "qhm_qspi", + .id = SM8450_MASTER_QSPI_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qup1 = { + .name = "qhm_qup1", + .id = SM8450_MASTER_QUP_1, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qnm_a1noc_cfg = { + .name = "qnm_a1noc_cfg", + .id = SM8450_MASTER_A1NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_SERVICE_A1NOC }, +}; + +static struct qcom_icc_node xm_sdc4 = { + .name = "xm_sdc4", + .id = SM8450_MASTER_SDCC_4, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node xm_ufs_mem = { + .name = "xm_ufs_mem", + .id = SM8450_MASTER_UFS_MEM, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node xm_usb3_0 = { + .name = "xm_usb3_0", + .id = SM8450_MASTER_USB3_0, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qdss_bam = { + .name = "qhm_qdss_bam", + .id = SM8450_MASTER_QDSS_BAM, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qup0 = { + .name = "qhm_qup0", + .id = SM8450_MASTER_QUP_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qup2 = { + .name = "qhm_qup2", + .id = SM8450_MASTER_QUP_2, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qnm_a2noc_cfg = { + .name = "qnm_a2noc_cfg", + .id = SM8450_MASTER_A2NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_SERVICE_A2NOC }, +}; + +static struct qcom_icc_node qxm_crypto = { + .name = "qxm_crypto", + .id = SM8450_MASTER_CRYPTO, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qxm_ipa = { + .name = "qxm_ipa", + .id = SM8450_MASTER_IPA, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qxm_sensorss_q6 = { + .name = "qxm_sensorss_q6", + .id = SM8450_MASTER_SENSORS_PROC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qxm_sp = { + .name = "qxm_sp", + .id = SM8450_MASTER_SP, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_qdss_etr_0 = { + .name = "xm_qdss_etr_0", + .id = SM8450_MASTER_QDSS_ETR, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_qdss_etr_1 = { + .name = "xm_qdss_etr_1", + .id = SM8450_MASTER_QDSS_ETR_1, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_sdc2 = { + .name = "xm_sdc2", + .id = SM8450_MASTER_SDCC_2, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qup0_core_master = { + .name = "qup0_core_master", + .id = SM8450_MASTER_QUP_CORE_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_QUP_CORE_0 }, +}; + +static struct qcom_icc_node qup1_core_master = { + .name = "qup1_core_master", + .id = SM8450_MASTER_QUP_CORE_1, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_QUP_CORE_1 }, +}; + +static struct qcom_icc_node qup2_core_master = { + .name = "qup2_core_master", + .id = SM8450_MASTER_QUP_CORE_2, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_QUP_CORE_2 }, +}; + +static struct qcom_icc_node qnm_gemnoc_cnoc = { + .name = "qnm_gemnoc_cnoc", + .id = SM8450_MASTER_GEM_NOC_CNOC, + .channels = 1, + .buswidth = 16, + .num_links = 51, + .links = { SM8450_SLAVE_AHB2PHY_SOUTH, SM8450_SLAVE_AHB2PHY_NORTH, + SM8450_SLAVE_AOSS, SM8450_SLAVE_CAMERA_CFG, + SM8450_SLAVE_CLK_CTL, SM8450_SLAVE_CDSP_CFG, + SM8450_SLAVE_RBCPR_CX_CFG, SM8450_SLAVE_RBCPR_MMCX_CFG, + SM8450_SLAVE_RBCPR_MXA_CFG, SM8450_SLAVE_RBCPR_MXC_CFG, + SM8450_SLAVE_CRYPTO_0_CFG, SM8450_SLAVE_CX_RDPM, + SM8450_SLAVE_DISPLAY_CFG, SM8450_SLAVE_GFX3D_CFG, + SM8450_SLAVE_IMEM_CFG, SM8450_SLAVE_IPA_CFG, + SM8450_SLAVE_IPC_ROUTER_CFG, SM8450_SLAVE_LPASS, + SM8450_SLAVE_CNOC_MSS, SM8450_SLAVE_MX_RDPM, + SM8450_SLAVE_PCIE_0_CFG, SM8450_SLAVE_PCIE_1_CFG, + SM8450_SLAVE_PDM, SM8450_SLAVE_PIMEM_CFG, + SM8450_SLAVE_PRNG, SM8450_SLAVE_QDSS_CFG, + SM8450_SLAVE_QSPI_0, SM8450_SLAVE_QUP_0, + SM8450_SLAVE_QUP_1, SM8450_SLAVE_QUP_2, + SM8450_SLAVE_SDCC_2, SM8450_SLAVE_SDCC_4, + SM8450_SLAVE_SPSS_CFG, SM8450_SLAVE_TCSR, + SM8450_SLAVE_TLMM, SM8450_SLAVE_TME_CFG, + SM8450_SLAVE_UFS_MEM_CFG, SM8450_SLAVE_USB3_0, + SM8450_SLAVE_VENUS_CFG, SM8450_SLAVE_VSENSE_CTRL_CFG, + SM8450_SLAVE_A1NOC_CFG, SM8450_SLAVE_A2NOC_CFG, + SM8450_SLAVE_DDRSS_CFG, SM8450_SLAVE_CNOC_MNOC_CFG, + SM8450_SLAVE_PCIE_ANOC_CFG, SM8450_SLAVE_SNOC_CFG, + SM8450_SLAVE_IMEM, SM8450_SLAVE_PIMEM, + SM8450_SLAVE_SERVICE_CNOC, SM8450_SLAVE_QDSS_STM, + SM8450_SLAVE_TCU }, +}; + +static struct qcom_icc_node qnm_gemnoc_pcie = { + .name = "qnm_gemnoc_pcie", + .id = SM8450_MASTER_GEM_NOC_PCIE_SNOC, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SM8450_SLAVE_PCIE_0, SM8450_SLAVE_PCIE_1 }, +}; + +static struct qcom_icc_node alm_gpu_tcu = { + .name = "alm_gpu_tcu", + .id = SM8450_MASTER_GPU_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC }, +}; + +static struct qcom_icc_node alm_sys_tcu = { + .name = "alm_sys_tcu", + .id = SM8450_MASTER_SYS_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC }, +}; + +static struct qcom_icc_node chm_apps = { + .name = "chm_apps", + .id = SM8450_MASTER_APPSS_PROC, + .channels = 3, + .buswidth = 32, + .num_links = 3, + .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC, + SM8450_SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qnm_gpu = { + .name = "qnm_gpu", + .id = SM8450_MASTER_GFX3D, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_mdsp = { + .name = "qnm_mdsp", + .id = SM8450_MASTER_MSS_PROC, + .channels = 1, + .buswidth = 16, + .num_links = 3, + .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC, + SM8450_SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qnm_mnoc_hf = { + .name = "qnm_mnoc_hf", + .id = SM8450_MASTER_MNOC_HF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_mnoc_sf = { + .name = "qnm_mnoc_sf", + .id = SM8450_MASTER_MNOC_SF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_nsp_gemnoc = { + .name = "qnm_nsp_gemnoc", + .id = SM8450_MASTER_COMPUTE_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_pcie = { + .name = "qnm_pcie", + .id = SM8450_MASTER_ANOC_PCIE_GEM_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 2, + .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_snoc_gc = { + .name = "qnm_snoc_gc", + .id = SM8450_MASTER_SNOC_GC_MEM_NOC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_snoc_sf = { + .name = "qnm_snoc_sf", + .id = SM8450_MASTER_SNOC_SF_MEM_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 3, + .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC, + SM8450_SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qhm_config_noc = { + .name = "qhm_config_noc", + .id = SM8450_MASTER_CNOC_LPASS_AG_NOC, + .channels = 1, + .buswidth = 4, + .num_links = 6, + .links = { SM8450_SLAVE_LPASS_CORE_CFG, SM8450_SLAVE_LPASS_LPI_CFG, + SM8450_SLAVE_LPASS_MPU_CFG, SM8450_SLAVE_LPASS_TOP_CFG, + SM8450_SLAVE_SERVICES_LPASS_AML_NOC, SM8450_SLAVE_SERVICE_LPASS_AG_NOC }, +}; + +static struct qcom_icc_node qxm_lpass_dsp = { + .name = "qxm_lpass_dsp", + .id = SM8450_MASTER_LPASS_PROC, + .channels = 1, + .buswidth = 8, + .num_links = 4, + .links = { SM8450_SLAVE_LPASS_TOP_CFG, SM8450_SLAVE_LPASS_SNOC, + SM8450_SLAVE_SERVICES_LPASS_AML_NOC, SM8450_SLAVE_SERVICE_LPASS_AG_NOC }, +}; + +static struct qcom_icc_node llcc_mc = { + .name = "llcc_mc", + .id = SM8450_MASTER_LLCC, + .channels = 4, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_EBI1 }, +}; + +static struct qcom_icc_node qnm_camnoc_hf = { + .name = "qnm_camnoc_hf", + .id = SM8450_MASTER_CAMNOC_HF, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_camnoc_icp = { + .name = "qnm_camnoc_icp", + .id = SM8450_MASTER_CAMNOC_ICP, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_camnoc_sf = { + .name = "qnm_camnoc_sf", + .id = SM8450_MASTER_CAMNOC_SF, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_mdp = { + .name = "qnm_mdp", + .id = SM8450_MASTER_MDP, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_mnoc_cfg = { + .name = "qnm_mnoc_cfg", + .id = SM8450_MASTER_CNOC_MNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_SERVICE_MNOC }, +}; + +static struct qcom_icc_node qnm_rot = { + .name = "qnm_rot", + .id = SM8450_MASTER_ROTATOR, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_vapss_hcp = { + .name = "qnm_vapss_hcp", + .id = SM8450_MASTER_CDSP_HCP, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video = { + .name = "qnm_video", + .id = SM8450_MASTER_VIDEO, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video_cv_cpu = { + .name = "qnm_video_cv_cpu", + .id = SM8450_MASTER_VIDEO_CV_PROC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video_cvp = { + .name = "qnm_video_cvp", + .id = SM8450_MASTER_VIDEO_PROC, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video_v_cpu = { + .name = "qnm_video_v_cpu", + .id = SM8450_MASTER_VIDEO_V_PROC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qhm_nsp_noc_config = { + .name = "qhm_nsp_noc_config", + .id = SM8450_MASTER_CDSP_NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_SERVICE_NSP_NOC }, +}; + +static struct qcom_icc_node qxm_nsp = { + .name = "qxm_nsp", + .id = SM8450_MASTER_CDSP_PROC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_CDSP_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_pcie_anoc_cfg = { + .name = "qnm_pcie_anoc_cfg", + .id = SM8450_MASTER_PCIE_ANOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_SERVICE_PCIE_ANOC }, +}; + +static struct qcom_icc_node xm_pcie3_0 = { + .name = "xm_pcie3_0", + .id = SM8450_MASTER_PCIE_0, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node xm_pcie3_1 = { + .name = "xm_pcie3_1", + .id = SM8450_MASTER_PCIE_1, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node qhm_gic = { + .name = "qhm_gic", + .id = SM8450_MASTER_GIC_AHB, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_aggre1_noc = { + .name = "qnm_aggre1_noc", + .id = SM8450_MASTER_A1NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_aggre2_noc = { + .name = "qnm_aggre2_noc", + .id = SM8450_MASTER_A2NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_lpass_noc = { + .name = "qnm_lpass_noc", + .id = SM8450_MASTER_LPASS_ANOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_snoc_cfg = { + .name = "qnm_snoc_cfg", + .id = SM8450_MASTER_SNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_SERVICE_SNOC }, +}; + +static struct qcom_icc_node qxm_pimem = { + .name = "qxm_pimem", + .id = SM8450_MASTER_PIMEM, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_SNOC_GEM_NOC_GC }, +}; + +static struct qcom_icc_node xm_gic = { + .name = "xm_gic", + .id = SM8450_MASTER_GIC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_SLAVE_SNOC_GEM_NOC_GC }, +}; + +static struct qcom_icc_node qnm_mnoc_hf_disp = { + .name = "qnm_mnoc_hf_disp", + .id = SM8450_MASTER_MNOC_HF_MEM_NOC_DISP, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_LLCC_DISP }, +}; + +static struct qcom_icc_node qnm_mnoc_sf_disp = { + .name = "qnm_mnoc_sf_disp", + .id = SM8450_MASTER_MNOC_SF_MEM_NOC_DISP, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_LLCC_DISP }, +}; + +static struct qcom_icc_node qnm_pcie_disp = { + .name = "qnm_pcie_disp", + .id = SM8450_MASTER_ANOC_PCIE_GEM_NOC_DISP, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8450_SLAVE_LLCC_DISP }, +}; + +static struct qcom_icc_node llcc_mc_disp = { + .name = "llcc_mc_disp", + .id = SM8450_MASTER_LLCC_DISP, + .channels = 4, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_SLAVE_EBI1_DISP }, +}; + +static struct qcom_icc_node qnm_mdp_disp = { + .name = "qnm_mdp_disp", + .id = SM8450_MASTER_MDP_DISP, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP }, +}; + +static struct qcom_icc_node qnm_rot_disp = { + .name = "qnm_rot_disp", + .id = SM8450_MASTER_ROTATOR_DISP, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP }, +}; + +static struct qcom_icc_node qns_a1noc_snoc = { + .name = "qns_a1noc_snoc", + .id = SM8450_SLAVE_A1NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8450_MASTER_A1NOC_SNOC }, +}; + +static struct qcom_icc_node srvc_aggre1_noc = { + .name = "srvc_aggre1_noc", + .id = SM8450_SLAVE_SERVICE_A1NOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_a2noc_snoc = { + .name = "qns_a2noc_snoc", + .id = SM8450_SLAVE_A2NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8450_MASTER_A2NOC_SNOC }, +}; + +static struct qcom_icc_node srvc_aggre2_noc = { + .name = "srvc_aggre2_noc", + .id = SM8450_SLAVE_SERVICE_A2NOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qup0_core_slave = { + .name = "qup0_core_slave", + .id = SM8450_SLAVE_QUP_CORE_0, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qup1_core_slave = { + .name = "qup1_core_slave", + .id = SM8450_SLAVE_QUP_CORE_1, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qup2_core_slave = { + .name = "qup2_core_slave", + .id = SM8450_SLAVE_QUP_CORE_2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ahb2phy0 = { + .name = "qhs_ahb2phy0", + .id = SM8450_SLAVE_AHB2PHY_SOUTH, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ahb2phy1 = { + .name = "qhs_ahb2phy1", + .id = SM8450_SLAVE_AHB2PHY_NORTH, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_aoss = { + .name = "qhs_aoss", + .id = SM8450_SLAVE_AOSS, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_camera_cfg = { + .name = "qhs_camera_cfg", + .id = SM8450_SLAVE_CAMERA_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_clk_ctl = { + .name = "qhs_clk_ctl", + .id = SM8450_SLAVE_CLK_CTL, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_compute_cfg = { + .name = "qhs_compute_cfg", + .id = SM8450_SLAVE_CDSP_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { MASTER_CDSP_NOC_CFG }, +}; + +static struct qcom_icc_node qhs_cpr_cx = { + .name = "qhs_cpr_cx", + .id = SM8450_SLAVE_RBCPR_CX_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_mmcx = { + .name = "qhs_cpr_mmcx", + .id = SM8450_SLAVE_RBCPR_MMCX_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_mxa = { + .name = "qhs_cpr_mxa", + .id = SM8450_SLAVE_RBCPR_MXA_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_mxc = { + .name = "qhs_cpr_mxc", + .id = SM8450_SLAVE_RBCPR_MXC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_crypto0_cfg = { + .name = "qhs_crypto0_cfg", + .id = SM8450_SLAVE_CRYPTO_0_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cx_rdpm = { + .name = "qhs_cx_rdpm", + .id = SM8450_SLAVE_CX_RDPM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_display_cfg = { + .name = "qhs_display_cfg", + .id = SM8450_SLAVE_DISPLAY_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_gpuss_cfg = { + .name = "qhs_gpuss_cfg", + .id = SM8450_SLAVE_GFX3D_CFG, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_imem_cfg = { + .name = "qhs_imem_cfg", + .id = SM8450_SLAVE_IMEM_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ipa = { + .name = "qhs_ipa", + .id = SM8450_SLAVE_IPA_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ipc_router = { + .name = "qhs_ipc_router", + .id = SM8450_SLAVE_IPC_ROUTER_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_lpass_cfg = { + .name = "qhs_lpass_cfg", + .id = SM8450_SLAVE_LPASS, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { MASTER_CNOC_LPASS_AG_NOC }, +}; + +static struct qcom_icc_node qhs_mss_cfg = { + .name = "qhs_mss_cfg", + .id = SM8450_SLAVE_CNOC_MSS, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_mx_rdpm = { + .name = "qhs_mx_rdpm", + .id = SM8450_SLAVE_MX_RDPM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie0_cfg = { + .name = "qhs_pcie0_cfg", + .id = SM8450_SLAVE_PCIE_0_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie1_cfg = { + .name = "qhs_pcie1_cfg", + .id = SM8450_SLAVE_PCIE_1_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pdm = { + .name = "qhs_pdm", + .id = SM8450_SLAVE_PDM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pimem_cfg = { + .name = "qhs_pimem_cfg", + .id = SM8450_SLAVE_PIMEM_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_prng = { + .name = "qhs_prng", + .id = SM8450_SLAVE_PRNG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qdss_cfg = { + .name = "qhs_qdss_cfg", + .id = SM8450_SLAVE_QDSS_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qspi = { + .name = "qhs_qspi", + .id = SM8450_SLAVE_QSPI_0, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qup0 = { + .name = "qhs_qup0", + .id = SM8450_SLAVE_QUP_0, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qup1 = { + .name = "qhs_qup1", + .id = SM8450_SLAVE_QUP_1, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qup2 = { + .name = "qhs_qup2", + .id = SM8450_SLAVE_QUP_2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_sdc2 = { + .name = "qhs_sdc2", + .id = SM8450_SLAVE_SDCC_2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_sdc4 = { + .name = "qhs_sdc4", + .id = SM8450_SLAVE_SDCC_4, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_spss_cfg = { + .name = "qhs_spss_cfg", + .id = SM8450_SLAVE_SPSS_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_tcsr = { + .name = "qhs_tcsr", + .id = SM8450_SLAVE_TCSR, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_tlmm = { + .name = "qhs_tlmm", + .id = SM8450_SLAVE_TLMM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_tme_cfg = { + .name = "qhs_tme_cfg", + .id = SM8450_SLAVE_TME_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ufs_mem_cfg = { + .name = "qhs_ufs_mem_cfg", + .id = SM8450_SLAVE_UFS_MEM_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_usb3_0 = { + .name = "qhs_usb3_0", + .id = SM8450_SLAVE_USB3_0, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_venus_cfg = { + .name = "qhs_venus_cfg", + .id = SM8450_SLAVE_VENUS_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_vsense_ctrl_cfg = { + .name = "qhs_vsense_ctrl_cfg", + .id = SM8450_SLAVE_VSENSE_CTRL_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_a1_noc_cfg = { + .name = "qns_a1_noc_cfg", + .id = SM8450_SLAVE_A1NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_MASTER_A1NOC_CFG }, +}; + +static struct qcom_icc_node qns_a2_noc_cfg = { + .name = "qns_a2_noc_cfg", + .id = SM8450_SLAVE_A2NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_MASTER_A2NOC_CFG }, +}; + +static struct qcom_icc_node qns_ddrss_cfg = { + .name = "qns_ddrss_cfg", + .id = SM8450_SLAVE_DDRSS_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + //FIXME where is link +}; + +static struct qcom_icc_node qns_mnoc_cfg = { + .name = "qns_mnoc_cfg", + .id = SM8450_SLAVE_CNOC_MNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_MASTER_CNOC_MNOC_CFG }, +}; + +static struct qcom_icc_node qns_pcie_anoc_cfg = { + .name = "qns_pcie_anoc_cfg", + .id = SM8450_SLAVE_PCIE_ANOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_MASTER_PCIE_ANOC_CFG }, +}; + +static struct qcom_icc_node qns_snoc_cfg = { + .name = "qns_snoc_cfg", + .id = SM8450_SLAVE_SNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8450_MASTER_SNOC_CFG }, +}; + +static struct qcom_icc_node qxs_imem = { + .name = "qxs_imem", + .id = SM8450_SLAVE_IMEM, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node qxs_pimem = { + .name = "qxs_pimem", + .id = SM8450_SLAVE_PIMEM, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node srvc_cnoc = { + .name = "srvc_cnoc", + .id = SM8450_SLAVE_SERVICE_CNOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_0 = { + .name = "xs_pcie_0", + .id = SM8450_SLAVE_PCIE_0, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_1 = { + .name = "xs_pcie_1", + .id = SM8450_SLAVE_PCIE_1, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node xs_qdss_stm = { + .name = "xs_qdss_stm", + .id = SM8450_SLAVE_QDSS_STM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node xs_sys_tcu_cfg = { + .name = "xs_sys_tcu_cfg", + .id = SM8450_SLAVE_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node qns_gem_noc_cnoc = { + .name = "qns_gem_noc_cnoc", + .id = SM8450_SLAVE_GEM_NOC_CNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8450_MASTER_GEM_NOC_CNOC }, +}; + +static struct qcom_icc_node qns_llcc = { + .name = "qns_llcc", + .id = SM8450_SLAVE_LLCC, + .channels = 4, + .buswidth = 16, + .num_links = 1, + .links = { SM8450_MASTER_LLCC }, +}; + +static struct qcom_icc_node qns_pcie = { + .name = "qns_pcie", + .id = SM8450_SLAVE_MEM_NOC_PCIE_SNOC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_MASTER_GEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qhs_lpass_core = { + .name = "qhs_lpass_core", + .id = SM8450_SLAVE_LPASS_CORE_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_lpass_lpi = { + .name = "qhs_lpass_lpi", + .id = SM8450_SLAVE_LPASS_LPI_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_lpass_mpu = { + .name = "qhs_lpass_mpu", + .id = SM8450_SLAVE_LPASS_MPU_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_lpass_top = { + .name = "qhs_lpass_top", + .id = SM8450_SLAVE_LPASS_TOP_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_sysnoc = { + .name = "qns_sysnoc", + .id = SM8450_SLAVE_LPASS_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8450_MASTER_LPASS_ANOC }, +}; + +static struct qcom_icc_node srvc_niu_aml_noc = { + .name = "srvc_niu_aml_noc", + .id = SM8450_SLAVE_SERVICES_LPASS_AML_NOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node srvc_niu_lpass_agnoc = { + .name = "srvc_niu_lpass_agnoc", + .id = SM8450_SLAVE_SERVICE_LPASS_AG_NOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node ebi = { + .name = "ebi", + .id = SM8450_SLAVE_EBI1, + .channels = 4, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_mem_noc_hf = { + .name = "qns_mem_noc_hf", + .id = SM8450_SLAVE_MNOC_HF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_MASTER_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qns_mem_noc_sf = { + .name = "qns_mem_noc_sf", + .id = SM8450_SLAVE_MNOC_SF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_MASTER_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node srvc_mnoc = { + .name = "srvc_mnoc", + .id = SM8450_SLAVE_SERVICE_MNOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_nsp_gemnoc = { + .name = "qns_nsp_gemnoc", + .id = SM8450_SLAVE_CDSP_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_MASTER_COMPUTE_NOC }, +}; + +static struct qcom_icc_node service_nsp_noc = { + .name = "service_nsp_noc", + .id = SM8450_SLAVE_SERVICE_NSP_NOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_pcie_mem_noc = { + .name = "qns_pcie_mem_noc", + .id = SM8450_SLAVE_ANOC_PCIE_GEM_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8450_MASTER_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node srvc_pcie_aggre_noc = { + .name = "srvc_pcie_aggre_noc", + .id = SM8450_SLAVE_SERVICE_PCIE_ANOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_gemnoc_gc = { + .name = "qns_gemnoc_gc", + .id = SM8450_SLAVE_SNOC_GEM_NOC_GC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8450_MASTER_SNOC_GC_MEM_NOC }, +}; + +static struct qcom_icc_node qns_gemnoc_sf = { + .name = "qns_gemnoc_sf", + .id = SM8450_SLAVE_SNOC_GEM_NOC_SF, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8450_MASTER_SNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node srvc_snoc = { + .name = "srvc_snoc", + .id = SM8450_SLAVE_SERVICE_SNOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_llcc_disp = { + .name = "qns_llcc_disp", + .id = SM8450_SLAVE_LLCC_DISP, + .channels = 4, + .buswidth = 16, + .num_links = 1, + .links = { SM8450_MASTER_LLCC_DISP }, +}; + +static struct qcom_icc_node ebi_disp = { + .name = "ebi_disp", + .id = SM8450_SLAVE_EBI1_DISP, + .channels = 4, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_mem_noc_hf_disp = { + .name = "qns_mem_noc_hf_disp", + .id = SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_MASTER_MNOC_HF_MEM_NOC_DISP }, +}; + +static struct qcom_icc_node qns_mem_noc_sf_disp = { + .name = "qns_mem_noc_sf_disp", + .id = SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8450_MASTER_MNOC_SF_MEM_NOC_DISP }, +}; + +static struct qcom_icc_bcm bcm_acv = { + .name = "ACV", + .num_nodes = 1, + .nodes = { &ebi }, +}; + +static struct qcom_icc_bcm bcm_ce0 = { + .name = "CE0", + .num_nodes = 1, + .nodes = { &qxm_crypto }, +}; + +static struct qcom_icc_bcm bcm_cn0 = { + .name = "CN0", + .keepalive = true, + .num_nodes = 55, + .nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie, + &qhs_ahb2phy0, &qhs_ahb2phy1, + &qhs_aoss, &qhs_camera_cfg, + &qhs_clk_ctl, &qhs_compute_cfg, + &qhs_cpr_cx, &qhs_cpr_mmcx, + &qhs_cpr_mxa, &qhs_cpr_mxc, + &qhs_crypto0_cfg, &qhs_cx_rdpm, + &qhs_display_cfg, &qhs_gpuss_cfg, + &qhs_imem_cfg, &qhs_ipa, + &qhs_ipc_router, &qhs_lpass_cfg, + &qhs_mss_cfg, &qhs_mx_rdpm, + &qhs_pcie0_cfg, &qhs_pcie1_cfg, + &qhs_pdm, &qhs_pimem_cfg, + &qhs_prng, &qhs_qdss_cfg, + &qhs_qspi, &qhs_qup0, + &qhs_qup1, &qhs_qup2, + &qhs_sdc2, &qhs_sdc4, + &qhs_spss_cfg, &qhs_tcsr, + &qhs_tlmm, &qhs_tme_cfg, + &qhs_ufs_mem_cfg, &qhs_usb3_0, + &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, + &qns_a1_noc_cfg, &qns_a2_noc_cfg, + &qns_ddrss_cfg, &qns_mnoc_cfg, + &qns_pcie_anoc_cfg, &qns_snoc_cfg, + &qxs_imem, &qxs_pimem, + &srvc_cnoc, &xs_pcie_0, + &xs_pcie_1, &xs_qdss_stm, + &xs_sys_tcu_cfg }, +}; + +static struct qcom_icc_bcm bcm_co0 = { + .name = "CO0", + .num_nodes = 2, + .nodes = { &qxm_nsp, &qns_nsp_gemnoc }, +}; + +static struct qcom_icc_bcm bcm_mc0 = { + .name = "MC0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &ebi }, +}; + +static struct qcom_icc_bcm bcm_mm0 = { + .name = "MM0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &qns_mem_noc_hf }, +}; + +static struct qcom_icc_bcm bcm_mm1 = { + .name = "MM1", + .num_nodes = 12, + .nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp, + &qnm_camnoc_sf, &qnm_mdp, + &qnm_mnoc_cfg, &qnm_rot, + &qnm_vapss_hcp, &qnm_video, + &qnm_video_cv_cpu, &qnm_video_cvp, + &qnm_video_v_cpu, &qns_mem_noc_sf }, +}; + +static struct qcom_icc_bcm bcm_qup0 = { + .name = "QUP0", + .keepalive = true, + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup0_core_slave }, +}; + +static struct qcom_icc_bcm bcm_qup1 = { + .name = "QUP1", + .keepalive = true, + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup1_core_slave }, +}; + +static struct qcom_icc_bcm bcm_qup2 = { + .name = "QUP2", + .keepalive = true, + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup2_core_slave }, +}; + +static struct qcom_icc_bcm bcm_sh0 = { + .name = "SH0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &qns_llcc }, +}; + +static struct qcom_icc_bcm bcm_sh1 = { + .name = "SH1", + .num_nodes = 7, + .nodes = { &alm_gpu_tcu, &alm_sys_tcu, + &qnm_nsp_gemnoc, &qnm_pcie, + &qnm_snoc_gc, &qns_gem_noc_cnoc, + &qns_pcie }, +}; + +static struct qcom_icc_bcm bcm_sn0 = { + .name = "SN0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &qns_gemnoc_sf }, +}; + +static struct qcom_icc_bcm bcm_sn1 = { + .name = "SN1", + .num_nodes = 4, + .nodes = { &qhm_gic, &qxm_pimem, + &xm_gic, &qns_gemnoc_gc }, +}; + +static struct qcom_icc_bcm bcm_sn2 = { + .name = "SN2", + .num_nodes = 1, + .nodes = { &qnm_aggre1_noc }, +}; + +static struct qcom_icc_bcm bcm_sn3 = { + .name = "SN3", + .num_nodes = 1, + .nodes = { &qnm_aggre2_noc }, +}; + +static struct qcom_icc_bcm bcm_sn4 = { + .name = "SN4", + .num_nodes = 1, + .nodes = { &qnm_lpass_noc }, +}; + +static struct qcom_icc_bcm bcm_sn7 = { + .name = "SN7", + .num_nodes = 1, + .nodes = { &qns_pcie_mem_noc }, +}; + +static struct qcom_icc_bcm bcm_acv_disp = { + .name = "ACV", + .num_nodes = 1, + .nodes = { &ebi_disp }, +}; + +static struct qcom_icc_bcm bcm_mc0_disp = { + .name = "MC0", + .num_nodes = 1, + .nodes = { &ebi_disp }, +}; + +static struct qcom_icc_bcm bcm_mm0_disp = { + .name = "MM0", + .num_nodes = 1, + .nodes = { &qns_mem_noc_hf_disp }, +}; + +static struct qcom_icc_bcm bcm_mm1_disp = { + .name = "MM1", + .num_nodes = 3, + .nodes = { &qnm_mdp_disp, &qnm_rot_disp, + &qns_mem_noc_sf_disp }, +}; + +static struct qcom_icc_bcm bcm_sh0_disp = { + .name = "SH0", + .num_nodes = 1, + .nodes = { &qns_llcc_disp }, +}; + +static struct qcom_icc_bcm bcm_sh1_disp = { + .name = "SH1", + .num_nodes = 1, + .nodes = { &qnm_pcie_disp }, +}; + +static struct qcom_icc_bcm *aggre1_noc_bcms[] = { +}; + +static struct qcom_icc_node *aggre1_noc_nodes[] = { + [MASTER_QSPI_0] = &qhm_qspi, + [MASTER_QUP_1] = &qhm_qup1, + [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg, + [MASTER_SDCC_4] = &xm_sdc4, + [MASTER_UFS_MEM] = &xm_ufs_mem, + [MASTER_USB3_0] = &xm_usb3_0, + [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc, + [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc, +}; + +static struct qcom_icc_desc sm8450_aggre1_noc = { + .nodes = aggre1_noc_nodes, + .num_nodes = ARRAY_SIZE(aggre1_noc_nodes), + .bcms = aggre1_noc_bcms, + .num_bcms = ARRAY_SIZE(aggre1_noc_bcms), +}; + +static struct qcom_icc_bcm *aggre2_noc_bcms[] = { + &bcm_ce0, +}; + +static struct qcom_icc_node *aggre2_noc_nodes[] = { + [MASTER_QDSS_BAM] = &qhm_qdss_bam, + [MASTER_QUP_0] = &qhm_qup0, + [MASTER_QUP_2] = &qhm_qup2, + [MASTER_A2NOC_CFG] = &qnm_a2noc_cfg, + [MASTER_CRYPTO] = &qxm_crypto, + [MASTER_IPA] = &qxm_ipa, + [MASTER_SENSORS_PROC] = &qxm_sensorss_q6, + [MASTER_SP] = &qxm_sp, + [MASTER_QDSS_ETR] = &xm_qdss_etr_0, + [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1, + [MASTER_SDCC_2] = &xm_sdc2, + [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc, + [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc, +}; + +static struct qcom_icc_desc sm8450_aggre2_noc = { + .nodes = aggre2_noc_nodes, + .num_nodes = ARRAY_SIZE(aggre2_noc_nodes), + .bcms = aggre2_noc_bcms, + .num_bcms = ARRAY_SIZE(aggre2_noc_bcms), +}; + +static struct qcom_icc_bcm *clk_virt_bcms[] = { + &bcm_qup0, + &bcm_qup1, + &bcm_qup2, +}; + +static struct qcom_icc_node *clk_virt_nodes[] = { + [MASTER_QUP_CORE_0] = &qup0_core_master, + [MASTER_QUP_CORE_1] = &qup1_core_master, + [MASTER_QUP_CORE_2] = &qup2_core_master, + [SLAVE_QUP_CORE_0] = &qup0_core_slave, + [SLAVE_QUP_CORE_1] = &qup1_core_slave, + [SLAVE_QUP_CORE_2] = &qup2_core_slave, +}; + +static struct qcom_icc_desc sm8450_clk_virt = { + .nodes = clk_virt_nodes, + .num_nodes = ARRAY_SIZE(clk_virt_nodes), + .bcms = clk_virt_bcms, + .num_bcms = ARRAY_SIZE(clk_virt_bcms), +}; + +static struct qcom_icc_bcm *config_noc_bcms[] = { + &bcm_cn0, +}; + +static struct qcom_icc_node *config_noc_nodes[] = { + [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc, + [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie, + [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0, + [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1, + [SLAVE_AOSS] = &qhs_aoss, + [SLAVE_CAMERA_CFG] = &qhs_camera_cfg, + [SLAVE_CLK_CTL] = &qhs_clk_ctl, + [SLAVE_CDSP_CFG] = &qhs_compute_cfg, + [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx, + [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx, + [SLAVE_RBCPR_MXA_CFG] = &qhs_cpr_mxa, + [SLAVE_RBCPR_MXC_CFG] = &qhs_cpr_mxc, + [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg, + [SLAVE_CX_RDPM] = &qhs_cx_rdpm, + [SLAVE_DISPLAY_CFG] = &qhs_display_cfg, + [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg, + [SLAVE_IMEM_CFG] = &qhs_imem_cfg, + [SLAVE_IPA_CFG] = &qhs_ipa, + [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router, + [SLAVE_LPASS] = &qhs_lpass_cfg, + [SLAVE_CNOC_MSS] = &qhs_mss_cfg, + [SLAVE_MX_RDPM] = &qhs_mx_rdpm, + [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg, + [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg, + [SLAVE_PDM] = &qhs_pdm, + [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg, + [SLAVE_PRNG] = &qhs_prng, + [SLAVE_QDSS_CFG] = &qhs_qdss_cfg, + [SLAVE_QSPI_0] = &qhs_qspi, + [SLAVE_QUP_0] = &qhs_qup0, + [SLAVE_QUP_1] = &qhs_qup1, + [SLAVE_QUP_2] = &qhs_qup2, + [SLAVE_SDCC_2] = &qhs_sdc2, + [SLAVE_SDCC_4] = &qhs_sdc4, + [SLAVE_SPSS_CFG] = &qhs_spss_cfg, + [SLAVE_TCSR] = &qhs_tcsr, + [SLAVE_TLMM] = &qhs_tlmm, + [SLAVE_TME_CFG] = &qhs_tme_cfg, + [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg, + [SLAVE_USB3_0] = &qhs_usb3_0, + [SLAVE_VENUS_CFG] = &qhs_venus_cfg, + [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg, + [SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg, + [SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg, + [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg, + [SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg, + [SLAVE_PCIE_ANOC_CFG] = &qns_pcie_anoc_cfg, + [SLAVE_SNOC_CFG] = &qns_snoc_cfg, + [SLAVE_IMEM] = &qxs_imem, + [SLAVE_PIMEM] = &qxs_pimem, + [SLAVE_SERVICE_CNOC] = &srvc_cnoc, + [SLAVE_PCIE_0] = &xs_pcie_0, + [SLAVE_PCIE_1] = &xs_pcie_1, + [SLAVE_QDSS_STM] = &xs_qdss_stm, + [SLAVE_TCU] = &xs_sys_tcu_cfg, +}; + +static struct qcom_icc_desc sm8450_config_noc = { + .nodes = config_noc_nodes, + .num_nodes = ARRAY_SIZE(config_noc_nodes), + .bcms = config_noc_bcms, + .num_bcms = ARRAY_SIZE(config_noc_bcms), +}; + +static struct qcom_icc_bcm *gem_noc_bcms[] = { + &bcm_sh0, + &bcm_sh1, + &bcm_sh0_disp, + &bcm_sh1_disp, +}; + +static struct qcom_icc_node *gem_noc_nodes[] = { + [MASTER_GPU_TCU] = &alm_gpu_tcu, + [MASTER_SYS_TCU] = &alm_sys_tcu, + [MASTER_APPSS_PROC] = &chm_apps, + [MASTER_GFX3D] = &qnm_gpu, + [MASTER_MSS_PROC] = &qnm_mdsp, + [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf, + [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf, + [MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc, + [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie, + [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc, + [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf, + [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc, + [SLAVE_LLCC] = &qns_llcc, + [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie, + [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp, + [MASTER_MNOC_SF_MEM_NOC_DISP] = &qnm_mnoc_sf_disp, + [MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp, + [SLAVE_LLCC_DISP] = &qns_llcc_disp, +}; + +static struct qcom_icc_desc sm8450_gem_noc = { + .nodes = gem_noc_nodes, + .num_nodes = ARRAY_SIZE(gem_noc_nodes), + .bcms = gem_noc_bcms, + .num_bcms = ARRAY_SIZE(gem_noc_bcms), +}; + +static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = { +}; + +static struct qcom_icc_node *lpass_ag_noc_nodes[] = { + [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc, + [MASTER_LPASS_PROC] = &qxm_lpass_dsp, + [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core, + [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi, + [SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu, + [SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top, + [SLAVE_LPASS_SNOC] = &qns_sysnoc, + [SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc, + [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc, +}; + +static struct qcom_icc_desc sm8450_lpass_ag_noc = { + .nodes = lpass_ag_noc_nodes, + .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes), + .bcms = lpass_ag_noc_bcms, + .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms), +}; + +static struct qcom_icc_bcm *mc_virt_bcms[] = { + &bcm_acv, + &bcm_mc0, + &bcm_acv_disp, + &bcm_mc0_disp, +}; + +static struct qcom_icc_node *mc_virt_nodes[] = { + [MASTER_LLCC] = &llcc_mc, + [SLAVE_EBI1] = &ebi, + [MASTER_LLCC_DISP] = &llcc_mc_disp, + [SLAVE_EBI1_DISP] = &ebi_disp, +}; + +static struct qcom_icc_desc sm8450_mc_virt = { + .nodes = mc_virt_nodes, + .num_nodes = ARRAY_SIZE(mc_virt_nodes), + .bcms = mc_virt_bcms, + .num_bcms = ARRAY_SIZE(mc_virt_bcms), +}; + +static struct qcom_icc_bcm *mmss_noc_bcms[] = { + &bcm_mm0, + &bcm_mm1, + &bcm_mm0_disp, + &bcm_mm1_disp, +}; + +static struct qcom_icc_node *mmss_noc_nodes[] = { + [MASTER_CAMNOC_HF] = &qnm_camnoc_hf, + [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp, + [MASTER_CAMNOC_SF] = &qnm_camnoc_sf, + [MASTER_MDP] = &qnm_mdp, + [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg, + [MASTER_ROTATOR] = &qnm_rot, + [MASTER_CDSP_HCP] = &qnm_vapss_hcp, + [MASTER_VIDEO] = &qnm_video, + [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu, + [MASTER_VIDEO_PROC] = &qnm_video_cvp, + [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu, + [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf, + [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf, + [SLAVE_SERVICE_MNOC] = &srvc_mnoc, + [MASTER_MDP_DISP] = &qnm_mdp_disp, + [MASTER_ROTATOR_DISP] = &qnm_rot_disp, + [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp, + [SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp, +}; + +static struct qcom_icc_desc sm8450_mmss_noc = { + .nodes = mmss_noc_nodes, + .num_nodes = ARRAY_SIZE(mmss_noc_nodes), + .bcms = mmss_noc_bcms, + .num_bcms = ARRAY_SIZE(mmss_noc_bcms), +}; + +static struct qcom_icc_bcm *nsp_noc_bcms[] = { + &bcm_co0, +}; + +static struct qcom_icc_node *nsp_noc_nodes[] = { + [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config, + [MASTER_CDSP_PROC] = &qxm_nsp, + [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc, + [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc, +}; + +static struct qcom_icc_desc sm8450_nsp_noc = { + .nodes = nsp_noc_nodes, + .num_nodes = ARRAY_SIZE(nsp_noc_nodes), + .bcms = nsp_noc_bcms, + .num_bcms = ARRAY_SIZE(nsp_noc_bcms), +}; + +static struct qcom_icc_bcm *pcie_anoc_bcms[] = { + &bcm_sn7, +}; + +static struct qcom_icc_node *pcie_anoc_nodes[] = { + [MASTER_PCIE_ANOC_CFG] = &qnm_pcie_anoc_cfg, + [MASTER_PCIE_0] = &xm_pcie3_0, + [MASTER_PCIE_1] = &xm_pcie3_1, + [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc, + [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc, +}; + +static struct qcom_icc_desc sm8450_pcie_anoc = { + .nodes = pcie_anoc_nodes, + .num_nodes = ARRAY_SIZE(pcie_anoc_nodes), + .bcms = pcie_anoc_bcms, + .num_bcms = ARRAY_SIZE(pcie_anoc_bcms), +}; + +static struct qcom_icc_bcm *system_noc_bcms[] = { + &bcm_sn0, + &bcm_sn1, + &bcm_sn2, + &bcm_sn3, + &bcm_sn4, +}; + +static struct qcom_icc_node *system_noc_nodes[] = { + [MASTER_GIC_AHB] = &qhm_gic, + [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc, + [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc, + [MASTER_LPASS_ANOC] = &qnm_lpass_noc, + [MASTER_SNOC_CFG] = &qnm_snoc_cfg, + [MASTER_PIMEM] = &qxm_pimem, + [MASTER_GIC] = &xm_gic, + [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc, + [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf, + [SLAVE_SERVICE_SNOC] = &srvc_snoc, +}; + +static struct qcom_icc_desc sm8450_system_noc = { + .nodes = system_noc_nodes, + .num_nodes = ARRAY_SIZE(system_noc_nodes), + .bcms = system_noc_bcms, + .num_bcms = ARRAY_SIZE(system_noc_bcms), +}; + +static int qnoc_probe(struct platform_device *pdev) +{ + const struct qcom_icc_desc *desc; + struct icc_onecell_data *data; + struct icc_provider *provider; + struct qcom_icc_node **qnodes; + struct qcom_icc_provider *qp; + struct icc_node *node; + size_t num_nodes, i; + int ret; + + desc = device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + qnodes = desc->nodes; + num_nodes = desc->num_nodes; + + qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); + if (!qp) + return -ENOMEM; + + data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); + if (!data) + return -ENOMEM; + + provider = &qp->provider; + provider->dev = &pdev->dev; + provider->set = qcom_icc_set; + provider->pre_aggregate = qcom_icc_pre_aggregate; + provider->aggregate = qcom_icc_aggregate; + provider->xlate_extended = qcom_icc_xlate_extended; + INIT_LIST_HEAD(&provider->nodes); + provider->data = data; + + qp->dev = &pdev->dev; + qp->bcms = desc->bcms; + qp->num_bcms = desc->num_bcms; + + qp->voter = of_bcm_voter_get(qp->dev, NULL); + if (IS_ERR(qp->voter)) + return PTR_ERR(qp->voter); + + ret = icc_provider_add(provider); + if (ret) { + dev_err(&pdev->dev, "error adding interconnect provider\n"); + return ret; + } + + for (i = 0; i < qp->num_bcms; i++) + qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); + + for (i = 0; i < num_nodes; i++) { + size_t j; + + if (!qnodes[i]) + continue; + + node = icc_node_create(qnodes[i]->id); + if (IS_ERR(node)) { + ret = PTR_ERR(node); + goto err; + } + + node->name = qnodes[i]->name; + node->data = qnodes[i]; + icc_node_add(node, provider); + + for (j = 0; j < qnodes[i]->num_links; j++) + icc_link_create(node, qnodes[i]->links[j]); + + data->nodes[i] = node; + } + data->num_nodes = num_nodes; + + platform_set_drvdata(pdev, qp); + + return 0; +err: + icc_nodes_remove(provider); + icc_provider_del(provider); + return ret; +} + +static int qnoc_remove(struct platform_device *pdev) +{ + struct qcom_icc_provider *qp = platform_get_drvdata(pdev); + + icc_nodes_remove(&qp->provider); + return icc_provider_del(&qp->provider); +} + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,sm8450-aggre1-noc", + .data = &sm8450_aggre1_noc}, + { .compatible = "qcom,sm8450-aggre2-noc", + .data = &sm8450_aggre2_noc}, + { .compatible = "qcom,sm8450-clk-virt", + .data = &sm8450_clk_virt}, + { .compatible = "qcom,sm8450-config-noc", + .data = &sm8450_config_noc}, + { .compatible = "qcom,sm8450-gem-noc", + .data = &sm8450_gem_noc}, + { .compatible = "qcom,sm8450-lpass-ag-noc", + .data = &sm8450_lpass_ag_noc}, + { .compatible = "qcom,sm8450-mc-virt", + .data = &sm8450_mc_virt}, + { .compatible = "qcom,sm8450-mmss-noc", + .data = &sm8450_mmss_noc}, + { .compatible = "qcom,sm8450-nsp-noc", + .data = &sm8450_nsp_noc}, + { .compatible = "qcom,sm8450-pcie-anoc", + .data = &sm8450_pcie_anoc}, + { .compatible = "qcom,sm8450-system-noc", + .data = &sm8450_system_noc}, + { } +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qnoc_probe, + .remove = qnoc_remove, + .driver = { + .name = "qnoc-sm8450", + .of_match_table = qnoc_of_match, + }, +}; + +static int __init qnoc_driver_init(void) +{ + return platform_driver_register(&qnoc_driver); +} +core_initcall(qnoc_driver_init); + +static void __exit qnoc_driver_exit(void) +{ + platform_driver_unregister(&qnoc_driver); +} +module_exit(qnoc_driver_exit); + +MODULE_DESCRIPTION("sm8450 NoC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/interconnect/qcom/sm8450.h b/drivers/interconnect/qcom/sm8450.h new file mode 100644 index 000000000000..a5790ec6767b --- /dev/null +++ b/drivers/interconnect/qcom/sm8450.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * SM8450 interconnect IDs + * + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Linaro Limited + */ + +#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8450_H +#define __DRIVERS_INTERCONNECT_QCOM_SM8450_H + +#define SM8450_MASTER_GPU_TCU 0 +#define SM8450_MASTER_SYS_TCU 1 +#define SM8450_MASTER_APPSS_PROC 2 +#define SM8450_MASTER_LLCC 3 +#define SM8450_MASTER_CNOC_LPASS_AG_NOC 4 +#define SM8450_MASTER_GIC_AHB 5 +#define SM8450_MASTER_CDSP_NOC_CFG 6 +#define SM8450_MASTER_QDSS_BAM 7 +#define SM8450_MASTER_QSPI_0 8 +#define SM8450_MASTER_QUP_0 9 +#define SM8450_MASTER_QUP_1 10 +#define SM8450_MASTER_QUP_2 11 +#define SM8450_MASTER_A1NOC_CFG 12 +#define SM8450_MASTER_A2NOC_CFG 13 +#define SM8450_MASTER_A1NOC_SNOC 14 +#define SM8450_MASTER_A2NOC_SNOC 15 +#define SM8450_MASTER_CAMNOC_HF 16 +#define SM8450_MASTER_CAMNOC_ICP 17 +#define SM8450_MASTER_CAMNOC_SF 18 +#define SM8450_MASTER_GEM_NOC_CNOC 19 +#define SM8450_MASTER_GEM_NOC_PCIE_SNOC 20 +#define SM8450_MASTER_GFX3D 21 +#define SM8450_MASTER_LPASS_ANOC 22 +#define SM8450_MASTER_MDP 23 +#define SM8450_MASTER_MDP0 SM8450_MASTER_MDP +#define SM8450_MASTER_MDP1 SM8450_MASTER_MDP +#define SM8450_MASTER_MSS_PROC 24 +#define SM8450_MASTER_CNOC_MNOC_CFG 25 +#define SM8450_MASTER_MNOC_HF_MEM_NOC 26 +#define SM8450_MASTER_MNOC_SF_MEM_NOC 27 +#define SM8450_MASTER_COMPUTE_NOC 28 +#define SM8450_MASTER_ANOC_PCIE_GEM_NOC 29 +#define SM8450_MASTER_PCIE_ANOC_CFG 30 +#define SM8450_MASTER_ROTATOR 31 +#define SM8450_MASTER_SNOC_CFG 32 +#define SM8450_MASTER_SNOC_GC_MEM_NOC 33 +#define SM8450_MASTER_SNOC_SF_MEM_NOC 34 +#define SM8450_MASTER_CDSP_HCP 35 +#define SM8450_MASTER_VIDEO 36 +#define SM8450_MASTER_VIDEO_P0 SM8450_MASTER_VIDEO +#define SM8450_MASTER_VIDEO_P1 SM8450_MASTER_VIDEO +#define SM8450_MASTER_VIDEO_CV_PROC 37 +#define SM8450_MASTER_VIDEO_PROC 38 +#define SM8450_MASTER_VIDEO_V_PROC 39 +#define SM8450_MASTER_QUP_CORE_0 40 +#define SM8450_MASTER_QUP_CORE_1 41 +#define SM8450_MASTER_QUP_CORE_2 42 +#define SM8450_MASTER_CRYPTO 43 +#define SM8450_MASTER_IPA 44 +#define SM8450_MASTER_LPASS_PROC 45 +#define SM8450_MASTER_CDSP_PROC 46 +#define SM8450_MASTER_PIMEM 47 +#define SM8450_MASTER_SENSORS_PROC 48 +#define SM8450_MASTER_SP 49 +#define SM8450_MASTER_GIC 50 +#define SM8450_MASTER_PCIE_0 51 +#define SM8450_MASTER_PCIE_1 52 +#define SM8450_MASTER_QDSS_ETR 53 +#define SM8450_MASTER_QDSS_ETR_1 54 +#define SM8450_MASTER_SDCC_2 55 +#define SM8450_MASTER_SDCC_4 56 +#define SM8450_MASTER_UFS_MEM 57 +#define SM8450_MASTER_USB3_0 58 +#define SM8450_SLAVE_EBI1 512 +#define SM8450_SLAVE_AHB2PHY_SOUTH 513 +#define SM8450_SLAVE_AHB2PHY_NORTH 514 +#define SM8450_SLAVE_AOSS 515 +#define SM8450_SLAVE_CAMERA_CFG 516 +#define SM8450_SLAVE_CLK_CTL 517 +#define SM8450_SLAVE_CDSP_CFG 518 +#define SM8450_SLAVE_RBCPR_CX_CFG 519 +#define SM8450_SLAVE_RBCPR_MMCX_CFG 520 +#define SM8450_SLAVE_RBCPR_MXA_CFG 521 +#define SM8450_SLAVE_RBCPR_MXC_CFG 522 +#define SM8450_SLAVE_CRYPTO_0_CFG 523 +#define SM8450_SLAVE_CX_RDPM 524 +#define SM8450_SLAVE_DISPLAY_CFG 525 +#define SM8450_SLAVE_GFX3D_CFG 526 +#define SM8450_SLAVE_IMEM_CFG 527 +#define SM8450_SLAVE_IPA_CFG 528 +#define SM8450_SLAVE_IPC_ROUTER_CFG 529 +#define SM8450_SLAVE_LPASS 530 +#define SM8450_SLAVE_LPASS_CORE_CFG 531 +#define SM8450_SLAVE_LPASS_LPI_CFG 532 +#define SM8450_SLAVE_LPASS_MPU_CFG 533 +#define SM8450_SLAVE_LPASS_TOP_CFG 534 +#define SM8450_SLAVE_CNOC_MSS 535 +#define SM8450_SLAVE_MX_RDPM 536 +#define SM8450_SLAVE_PCIE_0_CFG 537 +#define SM8450_SLAVE_PCIE_1_CFG 538 +#define SM8450_SLAVE_PDM 539 +#define SM8450_SLAVE_PIMEM_CFG 540 +#define SM8450_SLAVE_PRNG 541 +#define SM8450_SLAVE_QDSS_CFG 542 +#define SM8450_SLAVE_QSPI_0 543 +#define SM8450_SLAVE_QUP_0 544 +#define SM8450_SLAVE_QUP_1 545 +#define SM8450_SLAVE_QUP_2 546 +#define SM8450_SLAVE_SDCC_2 547 +#define SM8450_SLAVE_SDCC_4 548 +#define SM8450_SLAVE_SPSS_CFG 549 +#define SM8450_SLAVE_TCSR 550 +#define SM8450_SLAVE_TLMM 551 +#define SM8450_SLAVE_TME_CFG 552 +#define SM8450_SLAVE_UFS_MEM_CFG 553 +#define SM8450_SLAVE_USB3_0 554 +#define SM8450_SLAVE_VENUS_CFG 555 +#define SM8450_SLAVE_VSENSE_CTRL_CFG 556 +#define SM8450_SLAVE_A1NOC_CFG 557 +#define SM8450_SLAVE_A1NOC_SNOC 558 +#define SM8450_SLAVE_A2NOC_CFG 559 +#define SM8450_SLAVE_A2NOC_SNOC 560 +#define SM8450_SLAVE_DDRSS_CFG 561 +#define SM8450_SLAVE_GEM_NOC_CNOC 562 +#define SM8450_SLAVE_SNOC_GEM_NOC_GC 563 +#define SM8450_SLAVE_SNOC_GEM_NOC_SF 564 +#define SM8450_SLAVE_LLCC 565 +#define SM8450_SLAVE_MNOC_HF_MEM_NOC 566 +#define SM8450_SLAVE_MNOC_SF_MEM_NOC 567 +#define SM8450_SLAVE_CNOC_MNOC_CFG 568 +#define SM8450_SLAVE_CDSP_MEM_NOC 569 +#define SM8450_SLAVE_MEM_NOC_PCIE_SNOC 570 +#define SM8450_SLAVE_PCIE_ANOC_CFG 571 +#define SM8450_SLAVE_ANOC_PCIE_GEM_NOC 572 +#define SM8450_SLAVE_SNOC_CFG 573 +#define SM8450_SLAVE_LPASS_SNOC 574 +#define SM8450_SLAVE_QUP_CORE_0 575 +#define SM8450_SLAVE_QUP_CORE_1 576 +#define SM8450_SLAVE_QUP_CORE_2 577 +#define SM8450_SLAVE_IMEM 578 +#define SM8450_SLAVE_PIMEM 579 +#define SM8450_SLAVE_SERVICE_NSP_NOC 580 +#define SM8450_SLAVE_SERVICE_A1NOC 581 +#define SM8450_SLAVE_SERVICE_A2NOC 582 +#define SM8450_SLAVE_SERVICE_CNOC 583 +#define SM8450_SLAVE_SERVICE_MNOC 584 +#define SM8450_SLAVE_SERVICES_LPASS_AML_NOC 585 +#define SM8450_SLAVE_SERVICE_LPASS_AG_NOC 586 +#define SM8450_SLAVE_SERVICE_PCIE_ANOC 587 +#define SM8450_SLAVE_SERVICE_SNOC 588 +#define SM8450_SLAVE_PCIE_0 589 +#define SM8450_SLAVE_PCIE_1 590 +#define SM8450_SLAVE_QDSS_STM 591 +#define SM8450_SLAVE_TCU 592 +#define SM8450_MASTER_LLCC_DISP 1000 +#define SM8450_MASTER_MDP_DISP 1001 +#define SM8450_MASTER_MDP0_DISP SM8450_MASTER_MDP_DISP +#define SM8450_MASTER_MDP1_DISP SM8450_MASTER_MDP_DISP +#define SM8450_MASTER_MNOC_HF_MEM_NOC_DISP 1002 +#define SM8450_MASTER_MNOC_SF_MEM_NOC_DISP 1003 +#define SM8450_MASTER_ANOC_PCIE_GEM_NOC_DISP 1004 +#define SM8450_MASTER_ROTATOR_DISP 1005 +#define SM8450_SLAVE_EBI1_DISP 1512 +#define SM8450_SLAVE_LLCC_DISP 1513 +#define SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP 1514 +#define SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP 1515 + +#endif diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index f5848b351b19..6dc6d8b6b368 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3142,7 +3142,7 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) phys_addr_t doorbell; struct device *dev = msi_desc_to_dev(desc); struct arm_smmu_device *smmu = dev_get_drvdata(dev); - phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index]; + phys_addr_t *cfg = arm_smmu_msi_cfg[desc->msi_index]; doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; doorbell &= MSI_CFG0_ADDR_MASK; @@ -3154,7 +3154,6 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) { - struct msi_desc *desc; int ret, nvec = ARM_SMMU_MAX_MSIS; struct device *dev = smmu->dev; @@ -3170,7 +3169,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) if (!(smmu->features & ARM_SMMU_FEAT_MSI)) return; - if (!dev->msi_domain) { + if (!dev->msi.domain) { dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n"); return; } @@ -3182,21 +3181,9 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) return; } - for_each_msi_entry(desc, dev) { - switch (desc->platform.msi_index) { - case EVTQ_MSI_INDEX: - smmu->evtq.q.irq = desc->irq; - break; - case GERROR_MSI_INDEX: - smmu->gerr_irq = desc->irq; - break; - case PRIQ_MSI_INDEX: - smmu->priq.q.irq = desc->irq; - break; - default: /* Unknown */ - continue; - } - } + smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX); + smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX); + smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX); /* Add callback to free MSIs on teardown */ devm_add_action(dev, arm_smmu_free_msis, dev); diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 1f793219cac6..f2aa34f57454 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -1189,7 +1189,7 @@ static void viommu_remove(struct virtio_device *vdev) iommu_device_unregister(&viommu->iommu); /* Stop all virtqueues */ - vdev->config->reset(vdev); + virtio_reset_device(vdev); vdev->config->del_vqs(vdev); dev_info(&vdev->dev, "device removed\n"); diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 9349fc68b81a..b249d4df899e 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -88,7 +88,6 @@ static struct irq_chip gicv2m_msi_irq_chip = { .irq_mask = gicv2m_mask_msi_irq, .irq_unmask = gicv2m_unmask_msi_irq, .irq_eoi = irq_chip_eoi_parent, - .irq_write_msi_msg = pci_msi_domain_write_msg, }; static struct msi_domain_info gicv2m_msi_domain_info = { @@ -405,7 +404,7 @@ err_free_v2m: return ret; } -static struct of_device_id gicv2m_device_id[] = { +static const struct of_device_id gicv2m_device_id[] = { { .compatible = "arm,gic-v2m-frame", }, {}, }; diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index ad2810c017ed..93f77a8196da 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -28,7 +28,6 @@ static struct irq_chip its_msi_irq_chip = { .irq_unmask = its_unmask_msi_irq, .irq_mask = its_mask_msi_irq, .irq_eoi = irq_chip_eoi_parent, - .irq_write_msi_msg = pci_msi_domain_write_msg, }; static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 0cb584d9815b..d25b7a864bbb 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -46,6 +46,10 @@ #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) +#define RD_LOCAL_LPI_ENABLED BIT(0) +#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1) +#define RD_LOCAL_MEMRESERVE_DONE BIT(2) + static u32 lpi_id_bits; /* @@ -3044,7 +3048,7 @@ static void its_cpu_init_lpis(void) phys_addr_t paddr; u64 val, tmp; - if (gic_data_rdist()->lpi_enabled) + if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) return; val = readl_relaxed(rbase + GICR_CTLR); @@ -3063,15 +3067,13 @@ static void its_cpu_init_lpis(void) paddr &= GENMASK_ULL(51, 16); WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); - its_free_pending_table(gic_data_rdist()->pend_page); - gic_data_rdist()->pend_page = NULL; + gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED; goto out; } pend_page = gic_data_rdist()->pend_page; paddr = page_to_phys(pend_page); - WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); /* set PROPBASE */ val = (gic_rdists->prop_table_pa | @@ -3158,10 +3160,11 @@ static void its_cpu_init_lpis(void) /* Make sure the GIC has seen the above */ dsb(sy); out: - gic_data_rdist()->lpi_enabled = true; + gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED; pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", smp_processor_id(), - gic_data_rdist()->pend_page ? "allocated" : "reserved", + gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ? + "reserved" : "allocated", &paddr); } @@ -5138,7 +5141,7 @@ static int redist_disable_lpis(void) * * If running with preallocated tables, there is nothing to do. */ - if (gic_data_rdist()->lpi_enabled || + if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) || (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) return 0; @@ -5200,6 +5203,51 @@ int its_cpu_init(void) return 0; } +static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work) +{ + cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state); + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; +} + +static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work, + rdist_memreserve_cpuhp_cleanup_workfn); + +static int its_cpu_memreserve_lpi(unsigned int cpu) +{ + struct page *pend_page; + int ret = 0; + + /* This gets to run exactly once per CPU */ + if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE) + return 0; + + pend_page = gic_data_rdist()->pend_page; + if (WARN_ON(!pend_page)) { + ret = -ENOMEM; + goto out; + } + /* + * If the pending table was pre-programmed, free the memory we + * preemptively allocated. Otherwise, reserve that memory for + * later kexecs. + */ + if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) { + its_free_pending_table(pend_page); + gic_data_rdist()->pend_page = NULL; + } else { + phys_addr_t paddr = page_to_phys(pend_page); + WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); + } + +out: + /* Last CPU being brought up gets to issue the cleanup */ + if (cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask)) + schedule_work(&rdist_memreserve_cpuhp_cleanup_work); + + gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE; + return ret; +} + static const struct of_device_id its_device_id[] = { { .compatible = "arm,gic-v3-its", }, {}, @@ -5383,6 +5431,26 @@ static void __init its_acpi_probe(void) static void __init its_acpi_probe(void) { } #endif +int __init its_lpi_memreserve_init(void) +{ + int state; + + if (!efi_enabled(EFI_CONFIG_TABLES)) + return 0; + + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; + state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "irqchip/arm/gicv3/memreserve:online", + its_cpu_memreserve_lpi, + NULL); + if (state < 0) + return state; + + gic_rdists->cpuhp_memreserve_state = state; + + return 0; +} + int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, struct irq_domain *parent_domain) { diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c index b84c9c2eccdc..a2163d32f17d 100644 --- a/drivers/irqchip/irq-gic-v3-mbi.c +++ b/drivers/irqchip/irq-gic-v3-mbi.c @@ -171,7 +171,6 @@ static struct irq_chip mbi_msi_irq_chip = { .irq_unmask = mbi_unmask_msi_irq, .irq_eoi = irq_chip_eoi_parent, .irq_compose_msi_msg = mbi_compose_msi_msg, - .irq_write_msi_msg = pci_msi_domain_write_msg, }; static struct msi_domain_info mbi_msi_domain_info = { diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index daec3309b014..5e935d97207d 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -920,6 +920,22 @@ static int __gic_update_rdist_properties(struct redist_region *region, { u64 typer = gic_read_typer(ptr + GICR_TYPER); + /* Boot-time cleanip */ + if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { + u64 val; + + /* Deactivate any present vPE */ + val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); + if (val & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + ptr + SZ_128K + GICR_VPENDBASER); + + /* Mark the VPE table as invalid */ + val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_VALID; + gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); + } + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */ @@ -1802,6 +1818,7 @@ static int __init gic_init_bases(void __iomem *dist_base, if (gic_dist_supports_lpis()) { its_init(handle, &gic_data.rdists, gic_data.domain); its_cpu_init(); + its_lpi_memreserve_init(); } else { if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) gicv2m_init(handle, gic_data.domain); diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index 5b5a365dbd5e..b9c22f764b4d 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c @@ -26,7 +26,7 @@ struct gpcv2_irqchip_data { u32 cpu2wakeup; }; -static struct gpcv2_irqchip_data *imx_gpcv2_instance; +static struct gpcv2_irqchip_data *imx_gpcv2_instance __ro_after_init; static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i) { diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c index 34a7d261b710..3363f83bd7e9 100644 --- a/drivers/irqchip/irq-ingenic-tcu.c +++ b/drivers/irqchip/irq-ingenic-tcu.c @@ -28,6 +28,7 @@ static void ingenic_tcu_intc_cascade(struct irq_desc *desc) struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); struct regmap *map = gc->private; uint32_t irq_reg, irq_mask; + unsigned long bits; unsigned int i; regmap_read(map, TCU_REG_TFR, &irq_reg); @@ -36,8 +37,9 @@ static void ingenic_tcu_intc_cascade(struct irq_desc *desc) chained_irq_enter(irq_chip, desc); irq_reg &= ~irq_mask; + bits = irq_reg; - for_each_set_bit(i, (unsigned long *)&irq_reg, 32) + for_each_set_bit(i, &bits, 32) generic_handle_domain_irq(domain, i); chained_irq_exit(irq_chip, desc); diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 12df2162108e..f3faf5c99770 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -207,7 +207,7 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain, if (err) return err; - err = platform_msi_domain_alloc(domain, virq, nr_irqs); + err = platform_msi_device_domain_alloc(domain, virq, nr_irqs); if (err) return err; @@ -223,7 +223,7 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain, static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { - platform_msi_domain_free(domain, virq, nr_irqs); + platform_msi_device_domain_free(domain, virq, nr_irqs); } static const struct irq_domain_ops mbigen_domain_ops = { diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c index 3e7297fc5948..497da344717c 100644 --- a/drivers/irqchip/irq-mvebu-icu.c +++ b/drivers/irqchip/irq-mvebu-icu.c @@ -221,7 +221,7 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, icu_irqd->icu_group = msi_data->subset_data->icu_group; icu_irqd->icu = icu; - err = platform_msi_domain_alloc(domain, virq, nr_irqs); + err = platform_msi_device_domain_alloc(domain, virq, nr_irqs); if (err) { dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n"); goto free_irqd; @@ -245,7 +245,7 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, return 0; free_msi: - platform_msi_domain_free(domain, virq, nr_irqs); + platform_msi_device_domain_free(domain, virq, nr_irqs); free_irqd: kfree(icu_irqd); return err; @@ -260,7 +260,7 @@ mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq, kfree(icu_irqd); - platform_msi_domain_free(domain, virq, nr_irqs); + platform_msi_device_domain_free(domain, virq, nr_irqs); } static const struct irq_domain_ops mvebu_icu_domain_ops = { @@ -314,12 +314,12 @@ static int mvebu_icu_subset_probe(struct platform_device *pdev) msi_data->subset_data = of_device_get_match_data(dev); } - dev->msi_domain = of_msi_get_domain(dev, dev->of_node, + dev->msi.domain = of_msi_get_domain(dev, dev->of_node, DOMAIN_BUS_PLATFORM_MSI); - if (!dev->msi_domain) + if (!dev->msi.domain) return -EPROBE_DEFER; - msi_parent_dn = irq_domain_get_of_node(dev->msi_domain); + msi_parent_dn = irq_domain_get_of_node(dev->msi.domain); if (!msi_parent_dn) return -ENODEV; diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index cb7f60b3b4a9..37f9a4499fdb 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -375,7 +375,6 @@ static int intc_irqpin_probe(struct platform_device *pdev) struct intc_irqpin_priv *p; struct intc_irqpin_iomem *i; struct resource *io[INTC_IRQPIN_REG_NR]; - struct resource *irq; struct irq_chip *irq_chip; void (*enable_fn)(struct irq_data *d); void (*disable_fn)(struct irq_data *d); @@ -418,12 +417,14 @@ static int intc_irqpin_probe(struct platform_device *pdev) /* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */ for (k = 0; k < INTC_IRQPIN_MAX; k++) { - irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); - if (!irq) + ret = platform_get_irq_optional(pdev, k); + if (ret == -ENXIO) break; + if (ret < 0) + goto err0; p->irq[k].p = p; - p->irq[k].requested_irq = irq->start; + p->irq[k].requested_irq = ret; } nirqs = k; diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c index 07a6d8b42b63..909325f88239 100644 --- a/drivers/irqchip/irq-renesas-irqc.c +++ b/drivers/irqchip/irq-renesas-irqc.c @@ -126,7 +126,6 @@ static int irqc_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; const char *name = dev_name(dev); struct irqc_priv *p; - struct resource *irq; int ret; int k; @@ -142,13 +141,15 @@ static int irqc_probe(struct platform_device *pdev) /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */ for (k = 0; k < IRQC_IRQ_MAX; k++) { - irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); - if (!irq) + ret = platform_get_irq_optional(pdev, k); + if (ret == -ENXIO) break; + if (ret < 0) + goto err_runtime_pm_disable; p->irq[k].p = p; p->irq[k].hw_irq = k; - p->irq[k].requested_irq = irq->start; + p->irq[k].requested_irq = ret; } p->number_of_irqs = k; diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c index 8eba08db33b2..5fdbb4358dd0 100644 --- a/drivers/irqchip/irq-ti-sci-inta.c +++ b/drivers/irqchip/irq-ti-sci-inta.c @@ -595,7 +595,7 @@ static void ti_sci_inta_msi_set_desc(msi_alloc_info_t *arg, struct platform_device *pdev = to_platform_device(desc->dev); arg->desc = desc; - arg->hwirq = TO_HWIRQ(pdev->id, desc->inta.dev_index); + arg->hwirq = TO_HWIRQ(pdev->id, desc->msi_index); } static struct msi_domain_ops ti_sci_inta_msi_ops = { diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 1518ba31a80c..7c17a6f643ef 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -149,6 +149,8 @@ static struct spear_shirq spear320_shirq_ras3 = { .offset = 0, .nr_irqs = 7, .mask = ((0x1 << 7) - 1) << 0, + .irq_chip = &dummy_irq_chip, + .status_reg = SPEAR320_INT_STS_MASK_REG, }; static struct spear_shirq spear320_shirq_ras1 = { diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c index eab7e83c11c4..b17660c022eb 100644 --- a/drivers/macintosh/mediabay.c +++ b/drivers/macintosh/mediabay.c @@ -703,7 +703,7 @@ static const struct mb_ops keylargo_mb_ops = { * Therefore we do it all by polling the media bay once each tick. */ -static struct of_device_id media_bay_match[] = +static const struct of_device_id media_bay_match[] = { { .name = "media-bay", diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c index 72942002a54a..496c4951ccb1 100644 --- a/drivers/mailbox/apple-mailbox.c +++ b/drivers/mailbox/apple-mailbox.c @@ -364,8 +364,8 @@ static const struct apple_mbox_hw apple_mbox_m3_hw = { }; static const struct of_device_id apple_mbox_of_match[] = { - { .compatible = "apple,t8103-asc-mailbox", .data = &apple_mbox_asc_hw }, - { .compatible = "apple,t8103-m3-mailbox", .data = &apple_mbox_m3_hw }, + { .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw }, + { .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw }, {} }; MODULE_DEVICE_TABLE(of, apple_mbox_of_match); diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index 78073ad1f2f1..22acb51531cb 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -1298,7 +1298,7 @@ static int flexrm_startup(struct mbox_chan *chan) val = (num_online_cpus() < val) ? val / num_online_cpus() : 1; cpumask_set_cpu((ring->num / val) % num_online_cpus(), &ring->irq_aff_hint); - ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint); + ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint); if (ret) { dev_err(ring->mbox->dev, "failed to set IRQ affinity hint for ring%d\n", @@ -1425,7 +1425,7 @@ static void flexrm_shutdown(struct mbox_chan *chan) /* Release IRQ */ if (ring->irq_requested) { - irq_set_affinity_hint(ring->irq, NULL); + irq_update_affinity_hint(ring->irq, NULL); free_irq(ring->irq, ring); ring->irq_requested = false; } @@ -1484,7 +1484,7 @@ static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg) { struct device *dev = msi_desc_to_dev(desc); struct flexrm_mbox *mbox = dev_get_drvdata(dev); - struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index]; + struct flexrm_ring *ring = &mbox->rings[desc->msi_index]; /* Configure per-Ring MSI registers */ writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS); @@ -1497,7 +1497,6 @@ static int flexrm_mbox_probe(struct platform_device *pdev) int index, ret = 0; void __iomem *regs; void __iomem *regs_end; - struct msi_desc *desc; struct resource *iomem; struct flexrm_ring *ring; struct flexrm_mbox *mbox; @@ -1608,10 +1607,8 @@ static int flexrm_mbox_probe(struct platform_device *pdev) goto fail_destroy_cmpl_pool; /* Save alloced IRQ numbers for each ring */ - for_each_msi_entry(desc, dev) { - ring = &mbox->rings[desc->platform.msi_index]; - ring->irq = desc->irq; - } + for (index = 0; index < mbox->num_rings; index++) + mbox->rings[index].irq = msi_get_virq(dev, index); /* Check availability of debugfs */ if (!debugfs_initialized()) diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c index e41bd2f5ea46..ab24e731a782 100644 --- a/drivers/mailbox/hi3660-mailbox.c +++ b/drivers/mailbox/hi3660-mailbox.c @@ -44,14 +44,13 @@ #define MBOX_MSG_LEN 8 /** - * Hi3660 mailbox channel information + * struct hi3660_chan_info - Hi3660 mailbox channel information + * @dst_irq: Interrupt vector for remote processor + * @ack_irq: Interrupt vector for local processor * * A channel can be used for TX or RX, it can trigger remote * processor interrupt to notify remote processor and can receive - * interrupt if has incoming message. - * - * @dst_irq: Interrupt vector for remote processor - * @ack_irq: Interrupt vector for local processor + * interrupt if it has an incoming message. */ struct hi3660_chan_info { unsigned int dst_irq; @@ -59,16 +58,15 @@ struct hi3660_chan_info { }; /** - * Hi3660 mailbox controller data - * - * Mailbox controller includes 32 channels and can allocate - * channel for message transferring. - * + * struct hi3660_mbox - Hi3660 mailbox controller data * @dev: Device to which it is attached * @base: Base address of the register mapping region * @chan: Representation of channels in mailbox controller * @mchan: Representation of channel info * @controller: Representation of a communication channel controller + * + * Mailbox controller includes 32 channels and can allocate + * channel for message transferring. */ struct hi3660_mbox { struct device *dev; diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index ffe36a6bef9e..544de2db6453 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -563,8 +563,8 @@ static int imx_mu_probe(struct platform_device *pdev) size = sizeof(struct imx_sc_rpc_msg_max); priv->msg = devm_kzalloc(dev, size, GFP_KERNEL); - if (IS_ERR(priv->msg)) - return PTR_ERR(priv->msg); + if (!priv->msg) + return -ENOMEM; priv->clk = devm_clk_get(dev, NULL); if (IS_ERR(priv->clk)) { diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c index 0d6e2231a2c7..4e34854d1238 100644 --- a/drivers/mailbox/mailbox-mpfs.c +++ b/drivers/mailbox/mailbox-mpfs.c @@ -232,7 +232,7 @@ static int mpfs_mbox_probe(struct platform_device *pdev) } static const struct of_device_id mpfs_mbox_of_match[] = { - {.compatible = "microchip,polarfire-soc-mailbox", }, + {.compatible = "microchip,mpfs-mailbox", }, {}, }; MODULE_DEVICE_TABLE(of, mpfs_mbox_of_match); diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index a8845b162dbf..2578e5aaa935 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -573,8 +573,11 @@ static int cmdq_probe(struct platform_device *pdev) cmdq->clocks[alias_id].id = clk_names[alias_id]; cmdq->clocks[alias_id].clk = of_clk_get(node, 0); if (IS_ERR(cmdq->clocks[alias_id].clk)) { - dev_err(dev, "failed to get gce clk: %d\n", alias_id); - return PTR_ERR(cmdq->clocks[alias_id].clk); + of_node_put(node); + return dev_err_probe(dev, + PTR_ERR(cmdq->clocks[alias_id].clk), + "failed to get gce clk: %d\n", + alias_id); } } } @@ -582,8 +585,8 @@ static int cmdq_probe(struct platform_device *pdev) cmdq->clocks[alias_id].id = clk_name; cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name); if (IS_ERR(cmdq->clocks[alias_id].clk)) { - dev_err(dev, "failed to get gce clk\n"); - return PTR_ERR(cmdq->clocks[alias_id].clk); + return dev_err_probe(dev, PTR_ERR(cmdq->clocks[alias_id].clk), + "failed to get gce clk\n"); } } @@ -658,13 +661,13 @@ static const struct gce_plat gce_plat_v5 = { .thread_nr = 24, .shift = 3, .control_by_sw = true, - .gce_num = 2 + .gce_num = 1 }; static const struct gce_plat gce_plat_v6 = { .thread_nr = 24, .shift = 3, - .control_by_sw = false, + .control_by_sw = true, .gce_num = 2 }; diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 887a3704c12e..ed18936b8ce6 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -241,9 +241,11 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p) if (ret) return IRQ_NONE; - val &= pchan->cmd_complete.status_mask; - if (!val) - return IRQ_NONE; + if (val) { /* Ensure GAS exists and value is non-zero */ + val &= pchan->cmd_complete.status_mask; + if (!val) + return IRQ_NONE; + } ret = pcc_chan_reg_read(&pchan->error, &val); if (ret) @@ -289,7 +291,7 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id) pchan = chan_info + subspace_id; chan = pchan->chan.mchan; if (IS_ERR(chan) || chan->cl) { - dev_err(dev, "Channel not found for idx: %d\n", subspace_id); + pr_err("Channel not found for idx: %d\n", subspace_id); return ERR_PTR(-EBUSY); } dev = chan->mbox->dev; diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c index f1d4f4679b17..c5d963222014 100644 --- a/drivers/mailbox/qcom-ipcc.c +++ b/drivers/mailbox/qcom-ipcc.c @@ -13,8 +13,6 @@ #include <dt-bindings/mailbox/qcom-ipcc.h> -#define IPCC_MBOX_MAX_CHAN 48 - /* IPCC Register offsets */ #define IPCC_REG_SEND_ID 0x0c #define IPCC_REG_RECV_ID 0x10 @@ -52,9 +50,10 @@ struct qcom_ipcc { struct device *dev; void __iomem *base; struct irq_domain *irq_domain; - struct mbox_chan chan[IPCC_MBOX_MAX_CHAN]; - struct qcom_ipcc_chan_info mchan[IPCC_MBOX_MAX_CHAN]; + struct mbox_chan *chans; + struct qcom_ipcc_chan_info *mchan; struct mbox_controller mbox; + int num_chans; int irq; }; @@ -166,25 +165,37 @@ static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox, struct qcom_ipcc *ipcc = to_qcom_ipcc(mbox); struct qcom_ipcc_chan_info *mchan; struct mbox_chan *chan; - unsigned int i; + struct device *dev; + int chan_id; + + dev = ipcc->dev; if (ph->args_count != 2) return ERR_PTR(-EINVAL); - for (i = 0; i < IPCC_MBOX_MAX_CHAN; i++) { - chan = &ipcc->chan[i]; - if (!chan->con_priv) { - mchan = &ipcc->mchan[i]; - mchan->client_id = ph->args[0]; - mchan->signal_id = ph->args[1]; - chan->con_priv = mchan; - break; - } + for (chan_id = 0; chan_id < mbox->num_chans; chan_id++) { + chan = &ipcc->chans[chan_id]; + mchan = chan->con_priv; - chan = NULL; + if (!mchan) + break; + else if (mchan->client_id == ph->args[0] && + mchan->signal_id == ph->args[1]) + return ERR_PTR(-EBUSY); } - return chan ?: ERR_PTR(-EBUSY); + if (chan_id >= mbox->num_chans) + return ERR_PTR(-EBUSY); + + mchan = devm_kzalloc(dev, sizeof(*mchan), GFP_KERNEL); + if (!mchan) + return ERR_PTR(-ENOMEM); + + mchan->client_id = ph->args[0]; + mchan->signal_id = ph->args[1]; + chan->con_priv = mchan; + + return chan; } static const struct mbox_chan_ops ipcc_mbox_chan_ops = { @@ -192,15 +203,49 @@ static const struct mbox_chan_ops ipcc_mbox_chan_ops = { .shutdown = qcom_ipcc_mbox_shutdown, }; -static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc) +static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc, + struct device_node *controller_dn) { + struct of_phandle_args curr_ph; + struct device_node *client_dn; struct mbox_controller *mbox; struct device *dev = ipcc->dev; + int i, j, ret; + + /* + * Find out the number of clients interested in this mailbox + * and create channels accordingly. + */ + ipcc->num_chans = 0; + for_each_node_with_property(client_dn, "mboxes") { + if (!of_device_is_available(client_dn)) + continue; + i = of_count_phandle_with_args(client_dn, + "mboxes", "#mbox-cells"); + for (j = 0; j < i; j++) { + ret = of_parse_phandle_with_args(client_dn, "mboxes", + "#mbox-cells", j, &curr_ph); + of_node_put(curr_ph.np); + if (!ret && curr_ph.np == controller_dn) { + ipcc->num_chans++; + break; + } + } + } + + /* If no clients are found, skip registering as a mbox controller */ + if (!ipcc->num_chans) + return 0; + + ipcc->chans = devm_kcalloc(dev, ipcc->num_chans, + sizeof(struct mbox_chan), GFP_KERNEL); + if (!ipcc->chans) + return -ENOMEM; mbox = &ipcc->mbox; mbox->dev = dev; - mbox->num_chans = IPCC_MBOX_MAX_CHAN; - mbox->chans = ipcc->chan; + mbox->num_chans = ipcc->num_chans; + mbox->chans = ipcc->chans; mbox->ops = &ipcc_mbox_chan_ops; mbox->of_xlate = qcom_ipcc_mbox_xlate; mbox->txdone_irq = false; @@ -212,6 +257,8 @@ static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc) static int qcom_ipcc_probe(struct platform_device *pdev) { struct qcom_ipcc *ipcc; + static int id; + char *name; int ret; ipcc = devm_kzalloc(&pdev->dev, sizeof(*ipcc), GFP_KERNEL); @@ -228,27 +275,33 @@ static int qcom_ipcc_probe(struct platform_device *pdev) if (ipcc->irq < 0) return ipcc->irq; + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "ipcc_%d", id++); + if (!name) + return -ENOMEM; + ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node, &qcom_ipcc_irq_ops, ipcc); if (!ipcc->irq_domain) return -ENOMEM; - ret = qcom_ipcc_setup_mbox(ipcc); + ret = qcom_ipcc_setup_mbox(ipcc, pdev->dev.of_node); if (ret) goto err_mbox; ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn, - IRQF_TRIGGER_HIGH, "ipcc", ipcc); + IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, name, ipcc); if (ret < 0) { dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret); - goto err_mbox; + goto err_req_irq; } - enable_irq_wake(ipcc->irq); platform_set_drvdata(pdev, ipcc); return 0; +err_req_irq: + if (ipcc->num_chans) + mbox_controller_unregister(&ipcc->mbox); err_mbox: irq_domain_remove(ipcc->irq_domain); diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c index f44079d62b1a..31a0fa914274 100644 --- a/drivers/mailbox/zynqmp-ipi-mailbox.c +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -655,6 +655,7 @@ static int zynqmp_ipi_probe(struct platform_device *pdev) mbox->pdata = pdata; ret = zynqmp_ipi_mbox_probe(mbox, nc); if (ret) { + of_node_put(nc); dev_err(dev, "failed to probe subdev.\n"); ret = -EINVAL; goto free_mbox_dev; diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index b94d5e4fdc23..24a4532053e4 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -1274,8 +1274,6 @@ mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag) { - int r = 0; - /* return if in use */ if (CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE) @@ -1289,9 +1287,9 @@ mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int slee (access_control_value<<12))); /* Wait for IOC to clear Doorbell Status bit */ - if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) { + if (WaitForDoorbellAck(ioc, 5, sleepFlag) < 0) return -2; - }else + else return 0; } diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig index 51aecafdcbdf..5efc4151bf58 100644 --- a/drivers/misc/cxl/Kconfig +++ b/drivers/misc/cxl/Kconfig @@ -6,6 +6,7 @@ config CXL_BASE bool select PPC_COPRO_BASE + select PPC_64S_HASH_MMU config CXL tristate "Support for IBM Coherent Accelerators (CXL)" diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index c173a5e88c91..315c43f17dd3 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -570,6 +570,7 @@ static struct attribute *afu_cr_attrs[] = { &class_attribute.attr, NULL, }; +ATTRIBUTE_GROUPS(afu_cr); static void release_afu_config_record(struct kobject *kobj) { @@ -581,7 +582,7 @@ static void release_afu_config_record(struct kobject *kobj) static struct kobj_type afu_config_record_type = { .sysfs_ops = &kobj_sysfs_ops, .release = release_afu_config_record, - .default_attrs = afu_cr_attrs, + .default_groups = afu_cr_groups, }; static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx) diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 49ab656e8a96..633e1cf08d6e 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -68,11 +68,6 @@ * which won't work on pure SMBus systems. */ -struct at24_client { - struct i2c_client *client; - struct regmap *regmap; -}; - struct at24_data { /* * Lock protects against activities from other Linux tasks, @@ -94,9 +89,10 @@ struct at24_data { /* * Some chips tie up multiple I2C addresses; dummy devices reserve - * them for us, and we'll use them with SMBus calls. + * them for us. */ - struct at24_client client[]; + u8 bank_addr_shift; + struct regmap *client_regmaps[]; }; /* @@ -123,6 +119,7 @@ MODULE_PARM_DESC(at24_write_timeout, "Time (in ms) to try writes (default 25)"); struct at24_chip_data { u32 byte_len; u8 flags; + u8 bank_addr_shift; void (*read_post)(unsigned int off, char *buf, size_t count); }; @@ -137,6 +134,12 @@ struct at24_chip_data { .read_post = _read_post, \ } +#define AT24_CHIP_DATA_BS(_name, _len, _flags, _bank_addr_shift) \ + static const struct at24_chip_data _name = { \ + .byte_len = _len, .flags = _flags, \ + .bank_addr_shift = _bank_addr_shift \ + } + static void at24_read_post_vaio(unsigned int off, char *buf, size_t count) { int i; @@ -197,6 +200,7 @@ AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16); +AT24_CHIP_DATA_BS(at24_data_24c1025, 1048576 / 8, AT24_FLAG_ADDR16, 2); AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16); /* identical to 24c08 ? */ AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0); @@ -225,6 +229,7 @@ static const struct i2c_device_id at24_ids[] = { { "24c256", (kernel_ulong_t)&at24_data_24c256 }, { "24c512", (kernel_ulong_t)&at24_data_24c512 }, { "24c1024", (kernel_ulong_t)&at24_data_24c1024 }, + { "24c1025", (kernel_ulong_t)&at24_data_24c1025 }, { "24c2048", (kernel_ulong_t)&at24_data_24c2048 }, { "at24", 0 }, { /* END OF LIST */ } @@ -254,6 +259,7 @@ static const struct of_device_id at24_of_match[] = { { .compatible = "atmel,24c256", .data = &at24_data_24c256 }, { .compatible = "atmel,24c512", .data = &at24_data_24c512 }, { .compatible = "atmel,24c1024", .data = &at24_data_24c1024 }, + { .compatible = "atmel,24c1025", .data = &at24_data_24c1025 }, { .compatible = "atmel,24c2048", .data = &at24_data_24c2048 }, { /* END OF LIST */ }, }; @@ -275,8 +281,8 @@ MODULE_DEVICE_TABLE(acpi, at24_acpi_ids); * set the byte address; on a multi-master board, another master * may have changed the chip's "current" address pointer. */ -static struct at24_client *at24_translate_offset(struct at24_data *at24, - unsigned int *offset) +static struct regmap *at24_translate_offset(struct at24_data *at24, + unsigned int *offset) { unsigned int i; @@ -288,12 +294,12 @@ static struct at24_client *at24_translate_offset(struct at24_data *at24, *offset &= 0xff; } - return &at24->client[i]; + return at24->client_regmaps[i]; } static struct device *at24_base_client_dev(struct at24_data *at24) { - return &at24->client[0].client->dev; + return regmap_get_device(at24->client_regmaps[0]); } static size_t at24_adjust_read_count(struct at24_data *at24, @@ -324,14 +330,10 @@ static ssize_t at24_regmap_read(struct at24_data *at24, char *buf, unsigned int offset, size_t count) { unsigned long timeout, read_time; - struct at24_client *at24_client; - struct i2c_client *client; struct regmap *regmap; int ret; - at24_client = at24_translate_offset(at24, &offset); - regmap = at24_client->regmap; - client = at24_client->client; + regmap = at24_translate_offset(at24, &offset); count = at24_adjust_read_count(at24, offset, count); /* adjust offset for mac and serial read ops */ @@ -346,7 +348,7 @@ static ssize_t at24_regmap_read(struct at24_data *at24, char *buf, read_time = jiffies; ret = regmap_bulk_read(regmap, offset, buf, count); - dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n", + dev_dbg(regmap_get_device(regmap), "read %zu@%d --> %d (%ld)\n", count, offset, ret, jiffies); if (!ret) return count; @@ -387,14 +389,10 @@ static ssize_t at24_regmap_write(struct at24_data *at24, const char *buf, unsigned int offset, size_t count) { unsigned long timeout, write_time; - struct at24_client *at24_client; - struct i2c_client *client; struct regmap *regmap; int ret; - at24_client = at24_translate_offset(at24, &offset); - regmap = at24_client->regmap; - client = at24_client->client; + regmap = at24_translate_offset(at24, &offset); count = at24_adjust_write_count(at24, offset, count); timeout = jiffies + msecs_to_jiffies(at24_write_timeout); @@ -406,7 +404,7 @@ static ssize_t at24_regmap_write(struct at24_data *at24, const char *buf, write_time = jiffies; ret = regmap_bulk_write(regmap, offset, buf, count); - dev_dbg(&client->dev, "write %zu@%d --> %d (%ld)\n", + dev_dbg(regmap_get_device(regmap), "write %zu@%d --> %d (%ld)\n", count, offset, ret, jiffies); if (!ret) return count; @@ -538,17 +536,16 @@ static const struct at24_chip_data *at24_get_chip_data(struct device *dev) } static int at24_make_dummy_client(struct at24_data *at24, unsigned int index, + struct i2c_client *base_client, struct regmap_config *regmap_config) { - struct i2c_client *base_client, *dummy_client; + struct i2c_client *dummy_client; struct regmap *regmap; - struct device *dev; - - base_client = at24->client[0].client; - dev = &base_client->dev; - dummy_client = devm_i2c_new_dummy_device(dev, base_client->adapter, - base_client->addr + index); + dummy_client = devm_i2c_new_dummy_device(&base_client->dev, + base_client->adapter, + base_client->addr + + (index << at24->bank_addr_shift)); if (IS_ERR(dummy_client)) return PTR_ERR(dummy_client); @@ -556,8 +553,7 @@ static int at24_make_dummy_client(struct at24_data *at24, unsigned int index, if (IS_ERR(regmap)) return PTR_ERR(regmap); - at24->client[index].client = dummy_client; - at24->client[index].regmap = regmap; + at24->client_regmaps[index] = regmap; return 0; } @@ -680,7 +676,7 @@ static int at24_probe(struct i2c_client *client) if (IS_ERR(regmap)) return PTR_ERR(regmap); - at24 = devm_kzalloc(dev, struct_size(at24, client, num_addresses), + at24 = devm_kzalloc(dev, struct_size(at24, client_regmaps, num_addresses), GFP_KERNEL); if (!at24) return -ENOMEM; @@ -690,10 +686,10 @@ static int at24_probe(struct i2c_client *client) at24->page_size = page_size; at24->flags = flags; at24->read_post = cdata->read_post; + at24->bank_addr_shift = cdata->bank_addr_shift; at24->num_addresses = num_addresses; at24->offset_adj = at24_get_offset_adj(flags, byte_len); - at24->client[0].client = client; - at24->client[0].regmap = regmap; + at24->client_regmaps[0] = regmap; at24->vcc_reg = devm_regulator_get(dev, "vcc"); if (IS_ERR(at24->vcc_reg)) @@ -709,7 +705,7 @@ static int at24_probe(struct i2c_client *client) /* use dummy devices for multiple-address chips */ for (i = 1; i < num_addresses; i++) { - err = at24_make_dummy_client(at24, i, ®map_config); + err = at24_make_dummy_client(at24, i, client, ®map_config); if (err) return err; } diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index b38978a3b3ff..c3305bdda69c 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -1,28 +1,27 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * at25.c -- support most SPI EEPROMs, such as Atmel AT25 models - * and Cypress FRAMs FM25 models + * Driver for most of the SPI EEPROMs, such as Atmel AT25 models + * and Cypress FRAMs FM25 models. * * Copyright (C) 2006 David Brownell */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> +#include <linux/bits.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/property.h> #include <linux/sched.h> +#include <linux/slab.h> -#include <linux/nvmem-provider.h> -#include <linux/spi/spi.h> #include <linux/spi/eeprom.h> -#include <linux/property.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/math.h> +#include <linux/spi/spi.h> + +#include <linux/nvmem-provider.h> /* - * NOTE: this is an *EEPROM* driver. The vagaries of product naming + * NOTE: this is an *EEPROM* driver. The vagaries of product naming * mean that some AT25 products are EEPROMs, and others are FLASH. * Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver, * not this one! @@ -33,9 +32,9 @@ #define FM25_SN_LEN 8 /* serial number length */ struct at25_data { + struct spi_eeprom chip; struct spi_device *spi; struct mutex lock; - struct spi_eeprom chip; unsigned addrlen; struct nvmem_config nvmem_config; struct nvmem_device *nvmem; @@ -58,13 +57,14 @@ struct at25_data { #define AT25_SR_BP1 0x08 #define AT25_SR_WPEN 0x80 /* writeprotect enable */ -#define AT25_INSTR_BIT3 0x08 /* Additional address bit in instr */ +#define AT25_INSTR_BIT3 0x08 /* additional address bit in instr */ #define FM25_ID_LEN 9 /* ID length */ #define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */ -/* Specs often allow 5 msec for a page write, sometimes 20 msec; +/* + * Specs often allow 5ms for a page write, sometimes 20ms; * it's important to recover from write timeouts. */ #define EE_TIMEOUT 25 @@ -96,7 +96,7 @@ static int at25_ee_read(void *priv, unsigned int offset, instr = AT25_READ; if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) - if (offset >= (1U << (at25->addrlen * 8))) + if (offset >= BIT(at25->addrlen * 8)) instr |= AT25_INSTR_BIT3; *cp++ = instr; @@ -109,7 +109,7 @@ static int at25_ee_read(void *priv, unsigned int offset, *cp++ = offset >> 8; fallthrough; case 1: - case 0: /* can't happen: for better codegen */ + case 0: /* can't happen: for better code generation */ *cp++ = offset >> 0; } @@ -126,11 +126,12 @@ static int at25_ee_read(void *priv, unsigned int offset, mutex_lock(&at25->lock); - /* Read it all at once. + /* + * Read it all at once. * * REVISIT that's potentially a problem with large chips, if * other devices on the bus need to be accessed regularly or - * this chip is clocked very slowly + * this chip is clocked very slowly. */ status = spi_sync(at25->spi, &m); dev_dbg(&at25->spi->dev, "read %zu bytes at %d --> %zd\n", @@ -140,9 +141,7 @@ static int at25_ee_read(void *priv, unsigned int offset, return status; } -/* - * read extra registers as ID or serial number - */ +/* Read extra registers as ID or serial number */ static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command, int len) { @@ -208,7 +207,8 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) if (!bounce) return -ENOMEM; - /* For write, rollover is within the page ... so we write at + /* + * For write, rollover is within the page ... so we write at * most one page, then manually roll over to the next page. */ mutex_lock(&at25->lock); @@ -229,7 +229,7 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) instr = AT25_WRITE; if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) - if (offset >= (1U << (at25->addrlen * 8))) + if (offset >= BIT(at25->addrlen * 8)) instr |= AT25_INSTR_BIT3; *cp++ = instr; @@ -242,7 +242,7 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) *cp++ = offset >> 8; fallthrough; case 1: - case 0: /* can't happen: for better codegen */ + case 0: /* can't happen: for better code generation */ *cp++ = offset >> 0; } @@ -258,8 +258,9 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) if (status < 0) break; - /* REVISIT this should detect (or prevent) failed writes - * to readonly sections of the EEPROM... + /* + * REVISIT this should detect (or prevent) failed writes + * to read-only sections of the EEPROM... */ /* Wait for non-busy status */ @@ -306,34 +307,37 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip) { u32 val; + int err; - memset(chip, 0, sizeof(*chip)); strncpy(chip->name, "at25", sizeof(chip->name)); - if (device_property_read_u32(dev, "size", &val) == 0 || - device_property_read_u32(dev, "at25,byte-len", &val) == 0) { - chip->byte_len = val; - } else { + err = device_property_read_u32(dev, "size", &val); + if (err) + err = device_property_read_u32(dev, "at25,byte-len", &val); + if (err) { dev_err(dev, "Error: missing \"size\" property\n"); - return -ENODEV; + return err; } + chip->byte_len = val; - if (device_property_read_u32(dev, "pagesize", &val) == 0 || - device_property_read_u32(dev, "at25,page-size", &val) == 0) { - chip->page_size = val; - } else { + err = device_property_read_u32(dev, "pagesize", &val); + if (err) + err = device_property_read_u32(dev, "at25,page-size", &val); + if (err) { dev_err(dev, "Error: missing \"pagesize\" property\n"); - return -ENODEV; + return err; } + chip->page_size = val; - if (device_property_read_u32(dev, "at25,addr-mode", &val) == 0) { + err = device_property_read_u32(dev, "address-width", &val); + if (err) { + err = device_property_read_u32(dev, "at25,addr-mode", &val); + if (err) { + dev_err(dev, "Error: missing \"address-width\" property\n"); + return err; + } chip->flags = (u16)val; } else { - if (device_property_read_u32(dev, "address-width", &val)) { - dev_err(dev, - "Error: missing \"address-width\" property\n"); - return -ENODEV; - } switch (val) { case 9: chip->flags |= EE_INSTR_BIT3_IS_ADDR; @@ -359,16 +363,54 @@ static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip) return 0; } +static int at25_fram_to_chip(struct device *dev, struct spi_eeprom *chip) +{ + struct at25_data *at25 = container_of(chip, struct at25_data, chip); + u8 sernum[FM25_SN_LEN]; + u8 id[FM25_ID_LEN]; + int i; + + strncpy(chip->name, "fm25", sizeof(chip->name)); + + /* Get ID of chip */ + fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN); + if (id[6] != 0xc2) { + dev_err(dev, "Error: no Cypress FRAM (id %02x)\n", id[6]); + return -ENODEV; + } + /* Set size found in ID */ + if (id[7] < 0x21 || id[7] > 0x26) { + dev_err(dev, "Error: unsupported size (id %02x)\n", id[7]); + return -ENODEV; + } + + chip->byte_len = BIT(id[7] - 0x21 + 4) * 1024; + if (chip->byte_len > 64 * 1024) + chip->flags |= EE_ADDR3; + else + chip->flags |= EE_ADDR2; + + if (id[8]) { + fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN); + /* Swap byte order */ + for (i = 0; i < FM25_SN_LEN; i++) + at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i]; + } + + chip->page_size = PAGE_SIZE; + return 0; +} + static const struct of_device_id at25_of_match[] = { - { .compatible = "atmel,at25",}, - { .compatible = "cypress,fm25",}, + { .compatible = "atmel,at25" }, + { .compatible = "cypress,fm25" }, { } }; MODULE_DEVICE_TABLE(of, at25_of_match); static const struct spi_device_id at25_spi_ids[] = { - { .name = "at25",}, - { .name = "fm25",}, + { .name = "at25" }, + { .name = "fm25" }, { } }; MODULE_DEVICE_TABLE(spi, at25_spi_ids); @@ -378,31 +420,18 @@ static int at25_probe(struct spi_device *spi) struct at25_data *at25 = NULL; int err; int sr; - u8 id[FM25_ID_LEN]; - u8 sernum[FM25_SN_LEN]; - int i; - const struct of_device_id *match; - bool is_fram = 0; - - match = of_match_device(of_match_ptr(at25_of_match), &spi->dev); - if (match && !strcmp(match->compatible, "cypress,fm25")) - is_fram = 1; - - at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL); - if (!at25) - return -ENOMEM; - - /* Chip description */ - if (spi->dev.platform_data) { - memcpy(&at25->chip, spi->dev.platform_data, sizeof(at25->chip)); - } else if (!is_fram) { - err = at25_fw_to_chip(&spi->dev, &at25->chip); - if (err) - return err; - } - - /* Ping the chip ... the status register is pretty portable, - * unlike probing manufacturer IDs. We do expect that system + struct spi_eeprom *pdata; + bool is_fram; + + err = device_property_match_string(&spi->dev, "compatible", "cypress,fm25"); + if (err >= 0) + is_fram = true; + else + is_fram = false; + + /* + * Ping the chip ... the status register is pretty portable, + * unlike probing manufacturer IDs. We do expect that system * firmware didn't write it in the past few milliseconds! */ sr = spi_w8r8(spi, AT25_RDSR); @@ -415,35 +444,17 @@ static int at25_probe(struct spi_device *spi) at25->spi = spi; spi_set_drvdata(spi, at25); - if (is_fram) { - /* Get ID of chip */ - fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN); - if (id[6] != 0xc2) { - dev_err(&spi->dev, - "Error: no Cypress FRAM (id %02x)\n", id[6]); - return -ENODEV; - } - /* set size found in ID */ - if (id[7] < 0x21 || id[7] > 0x26) { - dev_err(&spi->dev, "Error: unsupported size (id %02x)\n", id[7]); - return -ENODEV; - } - at25->chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024; - - if (at25->chip.byte_len > 64 * 1024) - at25->chip.flags |= EE_ADDR3; + /* Chip description */ + pdata = dev_get_platdata(&spi->dev); + if (pdata) { + at25->chip = *pdata; + } else { + if (is_fram) + err = at25_fram_to_chip(&spi->dev, &at25->chip); else - at25->chip.flags |= EE_ADDR2; - - if (id[8]) { - fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN); - /* swap byte order */ - for (i = 0; i < FM25_SN_LEN; i++) - at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i]; - } - - at25->chip.page_size = PAGE_SIZE; - strncpy(at25->chip.name, "fm25", sizeof(at25->chip.name)); + err = at25_fw_to_chip(&spi->dev, &at25->chip); + if (err) + return err; } /* For now we only support 8/16/24 bit addressing */ diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c index 8132a84698d5..3c0ae07a2d80 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -57,7 +57,7 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) } va_block->start = virt_addr; - va_block->end = virt_addr + page_size; + va_block->end = virt_addr + page_size - 1; va_block->size = page_size; list_add_tail(&va_block->node, &cb->va_block_list); } @@ -80,13 +80,13 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) offset += va_block->size; } - hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR); + rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV); mutex_unlock(&ctx->mmu_lock); cb->is_mmu_mapped = true; - return 0; + return rc; err_va_umap: list_for_each_entry(va_block, &cb->va_block_list, node) { @@ -97,7 +97,7 @@ err_va_umap: offset -= va_block->size; } - hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); + rc = hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR); mutex_unlock(&ctx->mmu_lock); @@ -126,7 +126,7 @@ static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) "Failed to unmap CB's va 0x%llx\n", va_block->start); - hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); + hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR); mutex_unlock(&ctx->mmu_lock); @@ -250,8 +250,7 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, * Can't use generic function to check this because of special case * where we create a CB as part of the reset process */ - if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) && - (ctx_id != HL_KERNEL_ASID_ID))) { + if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) { dev_warn_ratelimited(hdev->dev, "Device is disabled or in reset. Can't create new CBs\n"); rc = -EBUSY; @@ -380,8 +379,9 @@ int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle) } static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr, - u64 cb_handle, u32 *usage_cnt) + u64 cb_handle, u32 flags, u32 *usage_cnt, u64 *device_va) { + struct hl_vm_va_block *va_block; struct hl_cb *cb; u32 handle; int rc = 0; @@ -402,7 +402,18 @@ static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr, goto out; } - *usage_cnt = atomic_read(&cb->cs_cnt); + if (flags & HL_CB_FLAGS_GET_DEVICE_VA) { + va_block = list_first_entry(&cb->va_block_list, struct hl_vm_va_block, node); + if (va_block) { + *device_va = va_block->start; + } else { + dev_err(hdev->dev, "CB is not mapped to the device's MMU\n"); + rc = -EINVAL; + goto out; + } + } else { + *usage_cnt = atomic_read(&cb->cs_cnt); + } out: spin_unlock(&mgr->cb_lock); @@ -414,7 +425,7 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) union hl_cb_args *args = data; struct hl_device *hdev = hpriv->hdev; enum hl_device_status status; - u64 handle = 0; + u64 handle = 0, device_va; u32 usage_cnt = 0; int rc; @@ -450,13 +461,20 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) case HL_CB_OP_INFO: rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle, - &usage_cnt); - memset(args, 0, sizeof(*args)); - args->out.usage_cnt = usage_cnt; + args->in.flags, + &usage_cnt, + &device_va); + + memset(&args->out, 0, sizeof(args->out)); + + if (args->in.flags & HL_CB_FLAGS_GET_DEVICE_VA) + args->out.device_va = device_va; + else + args->out.usage_cnt = usage_cnt; break; default: - rc = -ENOTTY; + rc = -EINVAL; break; } diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 4c8000fd246c..0a4ef13d9ac4 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright 2016-2019 HabanaLabs, Ltd. + * Copyright 2016-2021 HabanaLabs, Ltd. * All Rights Reserved. */ @@ -533,8 +533,8 @@ static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs) mcs_compl->stream_master_qid_map)) { /* extract the timestamp only of first completed CS */ if (!mcs_compl->timestamp) - mcs_compl->timestamp = - ktime_to_ns(fence->timestamp); + mcs_compl->timestamp = ktime_to_ns(fence->timestamp); + complete_all(&mcs_compl->completion); /* @@ -733,6 +733,14 @@ static void cs_timedout(struct work_struct *work) hdev = cs->ctx->hdev; + /* Save only the first CS timeout parameters */ + rc = atomic_cmpxchg(&hdev->last_error.cs_write_disable, 0, 1); + if (!rc) { + hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime; + hdev->last_error.cs_timeout_timestamp = ktime_get(); + hdev->last_error.cs_timeout_seq = cs->sequence; + } + switch (cs->type) { case CS_TYPE_SIGNAL: dev_err(hdev->dev, @@ -767,9 +775,9 @@ static void cs_timedout(struct work_struct *work) if (likely(!skip_reset_on_timeout)) { if (hdev->reset_on_lockup) - hl_device_reset(hdev, HL_RESET_TDR); + hl_device_reset(hdev, HL_DRV_RESET_TDR); else - hdev->needs_reset = true; + hdev->reset_info.needs_reset = true; } } @@ -806,7 +814,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS); cs->timeout_jiffies = timeout; cs->skip_reset_on_timeout = - hdev->skip_reset_on_timeout || + hdev->reset_info.skip_reset_on_timeout || !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT); cs->submission_time_jiffies = jiffies; INIT_LIST_HEAD(&cs->job_list); @@ -1131,9 +1139,6 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args) enum hl_cs_type cs_type; if (!hl_device_operational(hdev, &status)) { - dev_warn_ratelimited(hdev->dev, - "Device is %s. Can't submit new CS\n", - hdev->status[status]); return -EBUSY; } @@ -1262,7 +1267,8 @@ static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid) static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, u32 num_chunks, u64 *cs_seq, u32 flags, - u32 encaps_signals_handle, u32 timeout) + u32 encaps_signals_handle, u32 timeout, + u16 *signal_initial_sob_count) { bool staged_mid, int_queues_only = true; struct hl_device *hdev = hpriv->hdev; @@ -1429,6 +1435,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, goto free_cs_object; } + *signal_initial_sob_count = cs->initial_sob_count; + rc = HL_CS_STATUS_SUCCESS; goto put_cs; @@ -1457,6 +1465,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, int rc = 0, do_ctx_switch; void __user *chunks; u32 num_chunks, tmp; + u16 sob_count; int ret; do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0); @@ -1497,7 +1506,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, rc = 0; } else { rc = cs_ioctl_default(hpriv, chunks, num_chunks, - cs_seq, 0, 0, hdev->timeout_jiffies); + cs_seq, 0, 0, hdev->timeout_jiffies, &sob_count); } mutex_unlock(&hpriv->restore_phase_mutex); @@ -1813,6 +1822,9 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv, } handle->count = count; + + hl_ctx_get(hdev, hpriv->ctx); + handle->ctx = hpriv->ctx; mgr = &hpriv->ctx->sig_mgr; spin_lock(&mgr->lock); @@ -1822,7 +1834,7 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv, if (hdl_id < 0) { dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n"); rc = -EINVAL; - goto out; + goto put_ctx; } handle->id = hdl_id; @@ -1875,7 +1887,10 @@ remove_idr: idr_remove(&mgr->handles, hdl_id); spin_unlock(&mgr->lock); +put_ctx: + hl_ctx_put(handle->ctx); kfree(handle); + out: return rc; } @@ -1935,6 +1950,7 @@ static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id) /* Release the id and free allocated memory of the handle */ idr_remove(&mgr->handles, handle_id); + hl_ctx_put(encaps_sig_hdl->ctx); kfree(encaps_sig_hdl); } else { rc = -EINVAL; @@ -1948,7 +1964,8 @@ out: static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, void __user *chunks, u32 num_chunks, - u64 *cs_seq, u32 flags, u32 timeout) + u64 *cs_seq, u32 flags, u32 timeout, + u32 *signal_sob_addr_offset, u16 *signal_initial_sob_count) { struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL; bool handle_found = false, is_wait_cs = false, @@ -2180,6 +2197,9 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, goto free_cs_object; } + *signal_sob_addr_offset = cs->sob_addr_offset; + *signal_initial_sob_count = cs->initial_sob_count; + rc = HL_CS_STATUS_SUCCESS; if (is_wait_cs) wait_cs_submitted = true; @@ -2210,6 +2230,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) void __user *chunks; u32 num_chunks, flags, timeout, signals_count = 0, sob_addr = 0, handle_id = 0; + u16 sob_initial_count = 0; int rc; rc = hl_cs_sanity_checks(hpriv, args); @@ -2240,7 +2261,8 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) case CS_TYPE_WAIT: case CS_TYPE_COLLECTIVE_WAIT: rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks, - &cs_seq, args->in.cs_flags, timeout); + &cs_seq, args->in.cs_flags, timeout, + &sob_addr, &sob_initial_count); break; case CS_RESERVE_SIGNALS: rc = cs_ioctl_reserve_signals(hpriv, @@ -2256,20 +2278,33 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq, args->in.cs_flags, args->in.encaps_sig_handle_id, - timeout); + timeout, &sob_initial_count); break; } out: if (rc != -EAGAIN) { memset(args, 0, sizeof(*args)); - if (cs_type == CS_RESERVE_SIGNALS) { + switch (cs_type) { + case CS_RESERVE_SIGNALS: args->out.handle_id = handle_id; args->out.sob_base_addr_offset = sob_addr; args->out.count = signals_count; - } else { + break; + case CS_TYPE_SIGNAL: + args->out.sob_base_addr_offset = sob_addr; + args->out.sob_count_before_submission = sob_initial_count; + args->out.seq = cs_seq; + break; + case CS_TYPE_DEFAULT: + args->out.sob_count_before_submission = sob_initial_count; args->out.seq = cs_seq; + break; + default: + args->out.seq = cs_seq; + break; } + args->out.status = rc; } @@ -2334,16 +2369,18 @@ static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence * hl_cs_poll_fences - iterate CS fences to check for CS completion * * @mcs_data: multi-CS internal data + * @mcs_compl: multi-CS completion structure * * @return 0 on success, otherwise non 0 error code * * The function iterates on all CS sequence in the list and set bit in * completion_bitmap for each completed CS. - * while iterating, the function can extracts the stream map to be later - * used by the waiting function. - * this function shall be called after taking context ref + * While iterating, the function sets the stream map of each fence in the fence + * array in the completion QID stream map to be used by CSs to perform + * completion to the multi-CS context. + * This function shall be called after taking context ref */ -static int hl_cs_poll_fences(struct multi_cs_data *mcs_data) +static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_completion *mcs_compl) { struct hl_fence **fence_ptr = mcs_data->fence_arr; struct hl_device *hdev = mcs_data->ctx->hdev; @@ -2360,6 +2397,15 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data) return rc; /* + * re-initialize the completion here to handle 2 possible cases: + * 1. CS will complete the multi-CS prior clearing the completion. in which + * case the fence iteration is guaranteed to catch the CS completion. + * 2. the completion will occur after re-init of the completion. + * in which case we will wake up immediately in wait_for_completion. + */ + reinit_completion(&mcs_compl->completion); + + /* * set to maximum time to verify timestamp is valid: if at the end * this value is maintained- no timestamp was updated */ @@ -2370,6 +2416,21 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data) struct hl_fence *fence = *fence_ptr; /* + * In order to prevent case where we wait until timeout even though a CS associated + * with the multi-CS actually completed we do things in the below order: + * 1. for each fence set it's QID map in the multi-CS completion QID map. This way + * any CS can, potentially, complete the multi CS for the specific QID (note + * that once completion is initialized, calling complete* and then wait on the + * completion will cause it to return at once) + * 2. only after allowing multi-CS completion for the specific QID we check whether + * the specific CS already completed (and thus the wait for completion part will + * be skipped). if the CS not completed it is guaranteed that completing CS will + * wake up the completion. + */ + if (fence) + mcs_compl->stream_master_qid_map |= fence->stream_master_qid_map; + + /* * function won't sleep as it is called with timeout 0 (i.e. * poll the fence) */ @@ -2384,9 +2445,7 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data) switch (status) { case CS_WAIT_STATUS_BUSY: - /* CS did not finished, keep waiting on its QID*/ - mcs_data->stream_master_qid_map |= - fence->stream_master_qid_map; + /* CS did not finished, QID to wait on already stored */ break; case CS_WAIT_STATUS_COMPLETED: /* @@ -2394,9 +2453,19 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data) * returns to user indicating CS completed before it finished * all of its mcs handling, to avoid race the next time the * user waits for mcs. + * note: when reaching this case fence is definitely not NULL + * but NULL check was added to overcome static analysis */ - if (!fence->mcs_handling_done) + if (fence && !fence->mcs_handling_done) { + /* + * in case multi CS is completed but MCS handling not done + * we "complete" the multi CS to prevent it from waiting + * until time-out and the "multi-CS handling done" will have + * another chance at the next iteration + */ + complete_all(&mcs_compl->completion); break; + } mcs_data->completion_bitmap |= BIT(i); /* @@ -2456,6 +2525,21 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, return rc; } +static inline unsigned long hl_usecs64_to_jiffies(const u64 usecs) +{ + if (usecs <= U32_MAX) + return usecs_to_jiffies(usecs); + + /* + * If the value in nanoseconds is larger than 64 bit, use the largest + * 64 bit value. + */ + if (usecs >= ((u64)(U64_MAX / NSEC_PER_USEC))) + return nsecs_to_jiffies(U64_MAX); + + return nsecs_to_jiffies(usecs * NSEC_PER_USEC); +} + /* * hl_wait_multi_cs_completion_init - init completion structure * @@ -2469,9 +2553,7 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, * the function gets the first available completion (by marking it "used") * and initialize its values. */ -static struct multi_cs_completion *hl_wait_multi_cs_completion_init( - struct hl_device *hdev, - u8 stream_master_bitmap) +static struct multi_cs_completion *hl_wait_multi_cs_completion_init(struct hl_device *hdev) { struct multi_cs_completion *mcs_compl; int i; @@ -2483,8 +2565,11 @@ static struct multi_cs_completion *hl_wait_multi_cs_completion_init( if (!mcs_compl->used) { mcs_compl->used = 1; mcs_compl->timestamp = 0; - mcs_compl->stream_master_qid_map = stream_master_bitmap; - reinit_completion(&mcs_compl->completion); + /* + * init QID map to 0 to avoid completion by CSs. the actual QID map + * to multi-CS CSs will be set incrementally at a later stage + */ + mcs_compl->stream_master_qid_map = 0; spin_unlock(&mcs_compl->lock); break; } @@ -2492,8 +2577,7 @@ static struct multi_cs_completion *hl_wait_multi_cs_completion_init( } if (i == MULTI_CS_MAX_USER_CTX) { - dev_err(hdev->dev, - "no available multi-CS completion structure\n"); + dev_err(hdev->dev, "no available multi-CS completion structure\n"); return ERR_PTR(-ENOMEM); } return mcs_compl; @@ -2524,27 +2608,18 @@ static void hl_wait_multi_cs_completion_fini( * * @return 0 on success, otherwise non 0 error code */ -static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data) +static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data, + struct multi_cs_completion *mcs_compl) { - struct hl_device *hdev = mcs_data->ctx->hdev; - struct multi_cs_completion *mcs_compl; long completion_rc; - mcs_compl = hl_wait_multi_cs_completion_init(hdev, - mcs_data->stream_master_qid_map); - if (IS_ERR(mcs_compl)) - return PTR_ERR(mcs_compl); - - completion_rc = wait_for_completion_interruptible_timeout( - &mcs_compl->completion, - usecs_to_jiffies(mcs_data->timeout_us)); + completion_rc = wait_for_completion_interruptible_timeout(&mcs_compl->completion, + mcs_data->timeout_jiffies); /* update timestamp */ if (completion_rc > 0) mcs_data->timestamp = mcs_compl->timestamp; - hl_wait_multi_cs_completion_fini(mcs_compl); - mcs_data->wait_status = completion_rc; return 0; @@ -2577,6 +2652,7 @@ void hl_multi_cs_completion_init(struct hl_device *hdev) */ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) { + struct multi_cs_completion *mcs_compl; struct hl_device *hdev = hpriv->hdev; struct multi_cs_data mcs_data = {0}; union hl_wait_cs_args *args = data; @@ -2631,9 +2707,17 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) hl_ctx_get(hdev, ctx); + /* wait (with timeout) for the first CS to be completed */ + mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us); + mcs_compl = hl_wait_multi_cs_completion_init(hdev); + if (IS_ERR(mcs_compl)) { + rc = PTR_ERR(mcs_compl); + goto put_ctx; + } + /* poll all CS fences, extract timestamp */ mcs_data.update_ts = true; - rc = hl_cs_poll_fences(&mcs_data); + rc = hl_cs_poll_fences(&mcs_data, mcs_compl); /* * skip wait for CS completion when one of the below is true: * - an error on the poll function @@ -2641,34 +2725,39 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) * - the user called ioctl with timeout 0 */ if (rc || mcs_data.completion_bitmap || !args->in.timeout_us) - goto put_ctx; + goto completion_fini; - /* wait (with timeout) for the first CS to be completed */ - mcs_data.timeout_us = args->in.timeout_us; - rc = hl_wait_multi_cs_completion(&mcs_data); - if (rc) - goto put_ctx; + while (true) { + rc = hl_wait_multi_cs_completion(&mcs_data, mcs_compl); + if (rc || (mcs_data.wait_status == 0)) + break; - if (mcs_data.wait_status > 0) { /* * poll fences once again to update the CS map. * no timestamp should be updated this time. */ mcs_data.update_ts = false; - rc = hl_cs_poll_fences(&mcs_data); + rc = hl_cs_poll_fences(&mcs_data, mcs_compl); + + if (mcs_data.completion_bitmap) + break; /* * if hl_wait_multi_cs_completion returned before timeout (i.e. - * it got a completion) we expect to see at least one CS - * completed after the poll function. + * it got a completion) it either got completed by CS in the multi CS list + * (in which case the indication will be non empty completion_bitmap) or it + * got completed by CS submitted to one of the shared stream master but + * not in the multi CS list (in which case we should wait again but modify + * the timeout and set timestamp as zero to let a CS related to the current + * multi-CS set a new, relevant, timestamp) */ - if (!mcs_data.completion_bitmap) { - dev_warn_ratelimited(hdev->dev, - "Multi-CS got completion on wait but no CS completed\n"); - rc = -EFAULT; - } + mcs_data.timeout_jiffies = mcs_data.wait_status; + mcs_compl->timestamp = 0; } +completion_fini: + hl_wait_multi_cs_completion_fini(mcs_compl); + put_ctx: hl_ctx_put(ctx); kfree(fence_arr); @@ -2699,7 +2788,7 @@ free_seq_arr: } /* update if some CS was gone */ - if (mcs_data.timestamp) + if (!mcs_data.timestamp) args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE; } else { args->out.status = HL_WAIT_CS_STATUS_BUSY; @@ -2766,37 +2855,129 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) } static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, - u32 timeout_us, u64 user_address, - u64 target_value, u16 interrupt_offset, - enum hl_cs_wait_status *status, + struct hl_cb_mgr *cb_mgr, u64 timeout_us, + u64 cq_counters_handle, u64 cq_counters_offset, + u64 target_value, struct hl_user_interrupt *interrupt, + u32 *status, u64 *timestamp) { struct hl_user_pending_interrupt *pend; - struct hl_user_interrupt *interrupt; unsigned long timeout, flags; - u64 completion_value; long completion_rc; + struct hl_cb *cb; int rc = 0; + u32 handle; - if (timeout_us == U32_MAX) - timeout = timeout_us; - else - timeout = usecs_to_jiffies(timeout_us); + timeout = hl_usecs64_to_jiffies(timeout_us); hl_ctx_get(hdev, ctx); - pend = kmalloc(sizeof(*pend), GFP_KERNEL); + cq_counters_handle >>= PAGE_SHIFT; + handle = (u32) cq_counters_handle; + + cb = hl_cb_get(hdev, cb_mgr, handle); + if (!cb) { + hl_ctx_put(ctx); + return -EINVAL; + } + + pend = kzalloc(sizeof(*pend), GFP_KERNEL); if (!pend) { + hl_cb_put(cb); hl_ctx_put(ctx); return -ENOMEM; } hl_fence_init(&pend->fence, ULONG_MAX); - if (interrupt_offset == HL_COMMON_USER_INTERRUPT_ID) - interrupt = &hdev->common_user_interrupt; - else - interrupt = &hdev->user_interrupt[interrupt_offset]; + pend->cq_kernel_addr = (u64 *) cb->kernel_address + cq_counters_offset; + pend->cq_target_value = target_value; + + /* We check for completion value as interrupt could have been received + * before we added the node to the wait list + */ + if (*pend->cq_kernel_addr >= target_value) { + *status = HL_WAIT_CS_STATUS_COMPLETED; + /* There was no interrupt, we assume the completion is now. */ + pend->fence.timestamp = ktime_get(); + } + + if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED)) + goto set_timestamp; + + /* Add pending user interrupt to relevant list for the interrupt + * handler to monitor + */ + spin_lock_irqsave(&interrupt->wait_list_lock, flags); + list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head); + spin_unlock_irqrestore(&interrupt->wait_list_lock, flags); + + /* Wait for interrupt handler to signal completion */ + completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion, + timeout); + if (completion_rc > 0) { + *status = HL_WAIT_CS_STATUS_COMPLETED; + } else { + if (completion_rc == -ERESTARTSYS) { + dev_err_ratelimited(hdev->dev, + "user process got signal while waiting for interrupt ID %d\n", + interrupt->interrupt_id); + rc = -EINTR; + *status = HL_WAIT_CS_STATUS_ABORTED; + } else { + if (pend->fence.error == -EIO) { + dev_err_ratelimited(hdev->dev, + "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n", + pend->fence.error); + rc = -EIO; + *status = HL_WAIT_CS_STATUS_ABORTED; + } else { + dev_err_ratelimited(hdev->dev, "Waiting for interrupt ID %d timedout\n", + interrupt->interrupt_id); + rc = -ETIMEDOUT; + } + *status = HL_WAIT_CS_STATUS_BUSY; + } + } + + spin_lock_irqsave(&interrupt->wait_list_lock, flags); + list_del(&pend->wait_list_node); + spin_unlock_irqrestore(&interrupt->wait_list_lock, flags); + +set_timestamp: + *timestamp = ktime_to_ns(pend->fence.timestamp); + + kfree(pend); + hl_cb_put(cb); + hl_ctx_put(ctx); + + return rc; +} + +static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx, + u64 timeout_us, u64 user_address, + u64 target_value, struct hl_user_interrupt *interrupt, + + u32 *status, + u64 *timestamp) +{ + struct hl_user_pending_interrupt *pend; + unsigned long timeout, flags; + u64 completion_value; + long completion_rc; + int rc = 0; + + timeout = hl_usecs64_to_jiffies(timeout_us); + + hl_ctx_get(hdev, ctx); + + pend = kzalloc(sizeof(*pend), GFP_KERNEL); + if (!pend) { + hl_ctx_put(ctx); + return -ENOMEM; + } + + hl_fence_init(&pend->fence, ULONG_MAX); /* Add pending user interrupt to relevant list for the interrupt * handler to monitor @@ -2815,13 +2996,14 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, } if (completion_value >= target_value) { - *status = CS_WAIT_STATUS_COMPLETED; + *status = HL_WAIT_CS_STATUS_COMPLETED; /* There was no interrupt, we assume the completion is now. */ pend->fence.timestamp = ktime_get(); - } else - *status = CS_WAIT_STATUS_BUSY; + } else { + *status = HL_WAIT_CS_STATUS_BUSY; + } - if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED)) + if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED)) goto remove_pending_user_interrupt; wait_again: @@ -2850,7 +3032,13 @@ wait_again: } if (completion_value >= target_value) { - *status = CS_WAIT_STATUS_COMPLETED; + *status = HL_WAIT_CS_STATUS_COMPLETED; + } else if (pend->fence.error) { + dev_err_ratelimited(hdev->dev, + "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n", + pend->fence.error); + /* set the command completion status as ABORTED */ + *status = HL_WAIT_CS_STATUS_ABORTED; } else { timeout = completion_rc; goto wait_again; @@ -2861,7 +3049,7 @@ wait_again: interrupt->interrupt_id); rc = -EINTR; } else { - *status = CS_WAIT_STATUS_BUSY; + *status = HL_WAIT_CS_STATUS_BUSY; } remove_pending_user_interrupt: @@ -2879,11 +3067,12 @@ remove_pending_user_interrupt: static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data) { - u16 interrupt_id, interrupt_offset, first_interrupt, last_interrupt; + u16 interrupt_id, first_interrupt, last_interrupt; struct hl_device *hdev = hpriv->hdev; struct asic_fixed_properties *prop; + struct hl_user_interrupt *interrupt; union hl_wait_cs_args *args = data; - enum hl_cs_wait_status status; + u32 status = HL_WAIT_CS_STATUS_BUSY; u64 timestamp; int rc; @@ -2894,8 +3083,7 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data) return -EPERM; } - interrupt_id = - FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags); + interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags); first_interrupt = prop->first_available_user_msix_interrupt; last_interrupt = prop->first_available_user_msix_interrupt + @@ -2908,15 +3096,21 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data) } if (interrupt_id == HL_COMMON_USER_INTERRUPT_ID) - interrupt_offset = HL_COMMON_USER_INTERRUPT_ID; + interrupt = &hdev->common_user_interrupt; else - interrupt_offset = interrupt_id - first_interrupt; + interrupt = &hdev->user_interrupt[interrupt_id - first_interrupt]; - rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, + if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ) + rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->cb_mgr, + args->in.interrupt_timeout_us, args->in.cq_counters_handle, + args->in.cq_counters_offset, + args->in.target, interrupt, &status, + ×tamp); + else + rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx, args->in.interrupt_timeout_us, args->in.addr, - args->in.target, interrupt_offset, &status, + args->in.target, interrupt, &status, ×tamp); - if (rc) { if (rc != -EINTR) dev_err_ratelimited(hdev->dev, @@ -2926,22 +3120,13 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data) } memset(args, 0, sizeof(*args)); + args->out.status = status; if (timestamp) { args->out.timestamp_nsec = timestamp; args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD; } - switch (status) { - case CS_WAIT_STATUS_COMPLETED: - args->out.status = HL_WAIT_CS_STATUS_COMPLETED; - break; - case CS_WAIT_STATUS_BUSY: - default: - args->out.status = HL_WAIT_CS_STATUS_BUSY; - break; - } - return 0; } @@ -2955,7 +3140,7 @@ int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data) * user interrupt */ if (!hl_device_operational(hpriv->hdev, NULL)) - return -EPERM; + return -EBUSY; if (flags & HL_WAIT_CS_FLAGS_INTERRUPT) rc = hl_interrupt_wait_ioctl(hpriv, data); diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c index d0aaccd4df2c..c6360e33bce8 100644 --- a/drivers/misc/habanalabs/common/context.c +++ b/drivers/misc/habanalabs/common/context.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright 2016-2019 HabanaLabs, Ltd. + * Copyright 2016-2021 HabanaLabs, Ltd. * All Rights Reserved. */ @@ -13,13 +13,13 @@ void hl_encaps_handle_do_release(struct kref *ref) { struct hl_cs_encaps_sig_handle *handle = container_of(ref, struct hl_cs_encaps_sig_handle, refcount); - struct hl_ctx *ctx = handle->hdev->compute_ctx; - struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr; + struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr; spin_lock(&mgr->lock); idr_remove(&mgr->handles, handle->id); spin_unlock(&mgr->lock); + hl_ctx_put(handle->ctx); kfree(handle); } @@ -27,8 +27,7 @@ static void hl_encaps_handle_do_release_sob(struct kref *ref) { struct hl_cs_encaps_sig_handle *handle = container_of(ref, struct hl_cs_encaps_sig_handle, refcount); - struct hl_ctx *ctx = handle->hdev->compute_ctx; - struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr; + struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr; /* if we're here, then there was a signals reservation but cs with * encaps signals wasn't submitted, so need to put refcount @@ -40,6 +39,7 @@ static void hl_encaps_handle_do_release_sob(struct kref *ref) idr_remove(&mgr->handles, handle->id); spin_unlock(&mgr->lock); + hl_ctx_put(handle->ctx); kfree(handle); } @@ -97,11 +97,9 @@ static void hl_ctx_fini(struct hl_ctx *ctx) /* The engines are stopped as there is no executing CS, but the * Coresight might be still working by accessing addresses * related to the stopped engines. Hence stop it explicitly. - * Stop only if this is the compute context, as there can be - * only one compute context */ - if ((hdev->in_debug) && (hdev->compute_ctx == ctx)) - hl_device_set_debug_mode(hdev, false); + if (hdev->in_debug) + hl_device_set_debug_mode(hdev, ctx, false); hdev->asic_funcs->ctx_fini(ctx); hl_cb_va_pool_fini(ctx); @@ -167,7 +165,7 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv) hpriv->ctx = ctx; /* TODO: remove the following line for multiple process support */ - hdev->compute_ctx = ctx; + hdev->is_compute_ctx_active = true; return 0; @@ -274,6 +272,27 @@ int hl_ctx_put(struct hl_ctx *ctx) return kref_put(&ctx->refcount, hl_ctx_do_release); } +struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev) +{ + struct hl_ctx *ctx = NULL; + struct hl_fpriv *hpriv; + + mutex_lock(&hdev->fpriv_list_lock); + + list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) { + /* There can only be a single user which has opened the compute device, so exit + * immediately once we find him + */ + ctx = hpriv->ctx; + hl_ctx_get(hdev, ctx); + break; + } + + mutex_unlock(&hdev->fpriv_list_lock); + + return ctx; +} + /* * hl_ctx_get_fence_locked - get CS fence under CS lock * diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 1f2a3dc6c4e2..fc084ee5106e 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright 2016-2019 HabanaLabs, Ltd. + * Copyright 2016-2021 HabanaLabs, Ltd. * All Rights Reserved. */ @@ -15,19 +15,25 @@ #define MMU_ADDR_BUF_SIZE 40 #define MMU_ASID_BUF_SIZE 10 #define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE) +#define I2C_MAX_TRANSACTION_LEN 8 static struct dentry *hl_debug_root; static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, - u8 i2c_reg, long *val) + u8 i2c_reg, u8 i2c_len, u64 *val) { struct cpucp_packet pkt; - u64 result; int rc; if (!hl_device_operational(hdev, NULL)) return -EBUSY; + if (i2c_len > I2C_MAX_TRANSACTION_LEN) { + dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n", + i2c_len, I2C_MAX_TRANSACTION_LEN); + return -EINVAL; + } + memset(&pkt, 0, sizeof(pkt)); pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD << @@ -35,12 +41,10 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, pkt.i2c_bus = i2c_bus; pkt.i2c_addr = i2c_addr; pkt.i2c_reg = i2c_reg; + pkt.i2c_len = i2c_len; rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, &result); - - *val = (long) result; - + 0, val); if (rc) dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc); @@ -48,7 +52,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, } static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, - u8 i2c_reg, u32 val) + u8 i2c_reg, u8 i2c_len, u64 val) { struct cpucp_packet pkt; int rc; @@ -56,6 +60,12 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, if (!hl_device_operational(hdev, NULL)) return -EBUSY; + if (i2c_len > I2C_MAX_TRANSACTION_LEN) { + dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n", + i2c_len, I2C_MAX_TRANSACTION_LEN); + return -EINVAL; + } + memset(&pkt, 0, sizeof(pkt)); pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR << @@ -63,6 +73,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, pkt.i2c_bus = i2c_bus; pkt.i2c_addr = i2c_addr; pkt.i2c_reg = i2c_reg; + pkt.i2c_len = i2c_len; pkt.value = cpu_to_le64(val); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), @@ -235,6 +246,8 @@ static int vm_show(struct seq_file *s, void *data) struct hl_vm_hash_node *hnode; struct hl_userptr *userptr; struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; + struct hl_va_range *va_range; + struct hl_vm_va_block *va_block; enum vm_type *vm_type; bool once = true; u64 j; @@ -314,6 +327,25 @@ static int vm_show(struct seq_file *s, void *data) spin_unlock(&dev_entry->ctx_mem_hash_spinlock); + ctx = hl_get_compute_ctx(dev_entry->hdev); + if (ctx) { + seq_puts(s, "\nVA ranges:\n\n"); + for (i = HL_VA_RANGE_TYPE_HOST ; i < HL_VA_RANGE_TYPE_MAX ; ++i) { + va_range = ctx->va_range[i]; + seq_printf(s, " va_range %d\n", i); + seq_puts(s, "---------------------\n"); + mutex_lock(&va_range->lock); + list_for_each_entry(va_block, &va_range->list, node) { + seq_printf(s, "%#16llx - %#16llx (%#llx)\n", + va_block->start, va_block->end, + va_block->size); + } + mutex_unlock(&va_range->lock); + seq_puts(s, "\n"); + } + hl_ctx_put(ctx); + } + if (!once) seq_puts(s, "\n"); @@ -407,7 +439,7 @@ static int mmu_show(struct seq_file *s, void *data) if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID) ctx = hdev->kernel_ctx; else - ctx = hdev->compute_ctx; + ctx = hl_get_compute_ctx(hdev); if (!ctx) { dev_err(hdev->dev, "no ctx available\n"); @@ -495,7 +527,7 @@ static int engines_show(struct seq_file *s, void *data) struct hl_dbg_device_entry *dev_entry = entry->dev_entry; struct hl_device *hdev = dev_entry->hdev; - if (atomic_read(&hdev->in_reset)) { + if (hdev->reset_info.in_reset) { dev_warn_ratelimited(hdev->dev, "Can't check device idle during reset\n"); return 0; @@ -560,7 +592,7 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size, u64 *phys_addr) { struct hl_vm_phys_pg_pack *phys_pg_pack; - struct hl_ctx *ctx = hdev->compute_ctx; + struct hl_ctx *ctx; struct hl_vm_hash_node *hnode; u64 end_address, range_size; struct hl_userptr *userptr; @@ -568,6 +600,8 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size, bool valid = false; int i, rc = 0; + ctx = hl_get_compute_ctx(hdev); + if (!ctx) { dev_err(hdev->dev, "no ctx available\n"); return -EINVAL; @@ -624,7 +658,7 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf, ssize_t rc; u32 val; - if (atomic_read(&hdev->in_reset)) { + if (hdev->reset_info.in_reset) { dev_warn_ratelimited(hdev->dev, "Can't read during reset\n"); return 0; } @@ -660,7 +694,7 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf, u32 value; ssize_t rc; - if (atomic_read(&hdev->in_reset)) { + if (hdev->reset_info.in_reset) { dev_warn_ratelimited(hdev->dev, "Can't write during reset\n"); return 0; } @@ -697,7 +731,7 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf, ssize_t rc; u64 val; - if (atomic_read(&hdev->in_reset)) { + if (hdev->reset_info.in_reset) { dev_warn_ratelimited(hdev->dev, "Can't read during reset\n"); return 0; } @@ -733,7 +767,7 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf, u64 value; ssize_t rc; - if (atomic_read(&hdev->in_reset)) { + if (hdev->reset_info.in_reset) { dev_warn_ratelimited(hdev->dev, "Can't write during reset\n"); return 0; } @@ -768,7 +802,7 @@ static ssize_t hl_dma_size_write(struct file *f, const char __user *buf, ssize_t rc; u32 size; - if (atomic_read(&hdev->in_reset)) { + if (hdev->reset_info.in_reset) { dev_warn_ratelimited(hdev->dev, "Can't DMA during reset\n"); return 0; } @@ -874,22 +908,22 @@ static ssize_t hl_i2c_data_read(struct file *f, char __user *buf, struct hl_dbg_device_entry *entry = file_inode(f)->i_private; struct hl_device *hdev = entry->hdev; char tmp_buf[32]; - long val; + u64 val; ssize_t rc; if (*ppos) return 0; rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr, - entry->i2c_reg, &val); + entry->i2c_reg, entry->i2c_len, &val); if (rc) { dev_err(hdev->dev, - "Failed to read from I2C bus %d, addr %d, reg %d\n", - entry->i2c_bus, entry->i2c_addr, entry->i2c_reg); + "Failed to read from I2C bus %d, addr %d, reg %d, len %d\n", + entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len); return rc; } - sprintf(tmp_buf, "0x%02lx\n", val); + sprintf(tmp_buf, "%#02llx\n", val); rc = simple_read_from_buffer(buf, count, ppos, tmp_buf, strlen(tmp_buf)); @@ -901,19 +935,19 @@ static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf, { struct hl_dbg_device_entry *entry = file_inode(f)->i_private; struct hl_device *hdev = entry->hdev; - u32 value; + u64 value; ssize_t rc; - rc = kstrtouint_from_user(buf, count, 16, &value); + rc = kstrtou64_from_user(buf, count, 16, &value); if (rc) return rc; rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr, - entry->i2c_reg, value); + entry->i2c_reg, entry->i2c_len, value); if (rc) { dev_err(hdev->dev, - "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n", - value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg); + "Failed to write %#02llx to I2C bus %d, addr %d, reg %d, len %d\n", + value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len); return rc; } @@ -1043,7 +1077,7 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf, u64 value; ssize_t rc; - if (atomic_read(&hdev->in_reset)) { + if (hdev->reset_info.in_reset) { dev_warn_ratelimited(hdev->dev, "Can't change clock gating during reset\n"); return 0; @@ -1085,7 +1119,7 @@ static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf, u32 value; ssize_t rc; - if (atomic_read(&hdev->in_reset)) { + if (hdev->reset_info.in_reset) { dev_warn_ratelimited(hdev->dev, "Can't change stop on error during reset\n"); return 0; @@ -1396,6 +1430,11 @@ void hl_debugfs_add_device(struct hl_device *hdev) dev_entry->root, &dev_entry->i2c_reg); + debugfs_create_u8("i2c_len", + 0644, + dev_entry->root, + &dev_entry->i2c_len); + debugfs_create_file("i2c_data", 0644, dev_entry->root, @@ -1458,7 +1497,7 @@ void hl_debugfs_add_device(struct hl_device *hdev) debugfs_create_x8("skip_reset_on_timeout", 0644, dev_entry->root, - &hdev->skip_reset_on_timeout); + &hdev->reset_info.skip_reset_on_timeout); debugfs_create_file("state_dump", 0600, diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 2022e5d7b3ad..733338ab6f1d 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright 2016-2019 HabanaLabs, Ltd. + * Copyright 2016-2021 HabanaLabs, Ltd. * All Rights Reserved. */ @@ -17,9 +17,9 @@ enum hl_device_status hl_device_status(struct hl_device *hdev) { enum hl_device_status status; - if (atomic_read(&hdev->in_reset)) + if (hdev->reset_info.in_reset) status = HL_DEVICE_STATUS_IN_RESET; - else if (hdev->needs_reset) + else if (hdev->reset_info.needs_reset) status = HL_DEVICE_STATUS_NEEDS_RESET; else if (hdev->disabled) status = HL_DEVICE_STATUS_MALFUNCTION; @@ -95,14 +95,14 @@ static void hpriv_release(struct kref *ref) if ((hdev->reset_if_device_not_idle && !device_is_idle) || hdev->reset_upon_device_release) - hl_device_reset(hdev, HL_RESET_DEVICE_RELEASE); + hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE); - /* Now we can mark the compute_ctx as empty. Even if a reset is running in a different + /* Now we can mark the compute_ctx as not active. Even if a reset is running in a different * thread, we don't care because the in_reset is marked so if a user will try to open - * the device it will fail on that, even if compute_ctx is NULL. + * the device it will fail on that, even if compute_ctx is false. */ mutex_lock(&hdev->fpriv_list_lock); - hdev->compute_ctx = NULL; + hdev->is_compute_ctx_active = false; mutex_unlock(&hdev->fpriv_list_lock); kfree(hpriv); @@ -169,9 +169,9 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp) goto out; } - mutex_lock(&hdev->fpriv_list_lock); + mutex_lock(&hdev->fpriv_ctrl_list_lock); list_del(&hpriv->dev_node); - mutex_unlock(&hdev->fpriv_list_lock); + mutex_unlock(&hdev->fpriv_ctrl_list_lock); out: put_pid(hpriv->taskpid); @@ -324,16 +324,12 @@ put_devices: static void device_hard_reset_pending(struct work_struct *work) { struct hl_device_reset_work *device_reset_work = - container_of(work, struct hl_device_reset_work, - reset_work.work); + container_of(work, struct hl_device_reset_work, reset_work.work); struct hl_device *hdev = device_reset_work->hdev; u32 flags; int rc; - flags = HL_RESET_HARD | HL_RESET_FROM_RESET_THREAD; - - if (device_reset_work->fw_reset) - flags |= HL_RESET_FW; + flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR; rc = hl_device_reset(hdev, flags); if ((rc == -EBUSY) && !hdev->device_fini_pending) { @@ -452,9 +448,12 @@ static int device_early_init(struct hl_device *hdev) mutex_init(&hdev->debug_lock); INIT_LIST_HEAD(&hdev->cs_mirror_list); spin_lock_init(&hdev->cs_mirror_lock); + spin_lock_init(&hdev->reset_info.lock); INIT_LIST_HEAD(&hdev->fpriv_list); + INIT_LIST_HEAD(&hdev->fpriv_ctrl_list); mutex_init(&hdev->fpriv_list_lock); - atomic_set(&hdev->in_reset, 0); + mutex_init(&hdev->fpriv_ctrl_list_lock); + mutex_init(&hdev->clk_throttling.lock); return 0; @@ -494,6 +493,9 @@ static void device_early_fini(struct hl_device *hdev) mutex_destroy(&hdev->send_cpu_message_lock); mutex_destroy(&hdev->fpriv_list_lock); + mutex_destroy(&hdev->fpriv_ctrl_list_lock); + + mutex_destroy(&hdev->clk_throttling.lock); hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); @@ -513,22 +515,6 @@ static void device_early_fini(struct hl_device *hdev) hdev->asic_funcs->early_fini(hdev); } -static void set_freq_to_low_job(struct work_struct *work) -{ - struct hl_device *hdev = container_of(work, struct hl_device, - work_freq.work); - - mutex_lock(&hdev->fpriv_list_lock); - - if (!hdev->compute_ctx) - hl_device_set_frequency(hdev, PLL_LOW); - - mutex_unlock(&hdev->fpriv_list_lock); - - schedule_delayed_work(&hdev->work_freq, - usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC)); -} - static void hl_device_heartbeat(struct work_struct *work) { struct hl_device *hdev = container_of(work, struct hl_device, @@ -540,8 +526,10 @@ static void hl_device_heartbeat(struct work_struct *work) if (!hdev->asic_funcs->send_heartbeat(hdev)) goto reschedule; - dev_err(hdev->dev, "Device heartbeat failed!\n"); - hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_HEARTBEAT); + if (hl_device_operational(hdev, NULL)) + dev_err(hdev->dev, "Device heartbeat failed!\n"); + + hl_device_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT); return; @@ -552,12 +540,12 @@ reschedule: * If control reached here, then at least one heartbeat work has been * scheduled since last reset/init cycle. * So if the device is not already in reset cycle, reset the flag - * prev_reset_trigger as no reset occurred with HL_RESET_FW_FATAL_ERR + * prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR * status for at least one heartbeat. From this point driver restarts * tracking future consecutive fatal errors. */ - if (!(atomic_read(&hdev->in_reset))) - hdev->prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT; + if (!hdev->reset_info.in_reset) + hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT; schedule_delayed_work(&hdev->work_heartbeat, usecs_to_jiffies(HL_HEARTBEAT_PER_USEC)); @@ -586,18 +574,6 @@ static int device_late_init(struct hl_device *hdev) hdev->high_pll = hdev->asic_prop.high_pll; - /* force setting to low frequency */ - hdev->curr_pll_profile = PLL_LOW; - - if (hdev->pm_mng_profile == PM_AUTO) - hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW); - else - hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST); - - INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job); - schedule_delayed_work(&hdev->work_freq, - usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC)); - if (hdev->heartbeat) { INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat); schedule_delayed_work(&hdev->work_heartbeat, @@ -620,7 +596,6 @@ static void device_late_fini(struct hl_device *hdev) if (!hdev->late_init_done) return; - cancel_delayed_work_sync(&hdev->work_freq); if (hdev->heartbeat) cancel_delayed_work_sync(&hdev->work_heartbeat); @@ -650,36 +625,7 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization) return 0; } -/* - * hl_device_set_frequency - set the frequency of the device - * - * @hdev: pointer to habanalabs device structure - * @freq: the new frequency value - * - * Change the frequency if needed. This function has no protection against - * concurrency, therefore it is assumed that the calling function has protected - * itself against the case of calling this function from multiple threads with - * different values - * - * Returns 0 if no change was done, otherwise returns 1 - */ -int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq) -{ - if ((hdev->pm_mng_profile == PM_MANUAL) || - (hdev->curr_pll_profile == freq)) - return 0; - - dev_dbg(hdev->dev, "Changing device frequency to %s\n", - freq == PLL_HIGH ? "high" : "low"); - - hdev->asic_funcs->set_pll_profile(hdev, freq); - - hdev->curr_pll_profile = freq; - - return 1; -} - -int hl_device_set_debug_mode(struct hl_device *hdev, bool enable) +int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable) { int rc = 0; @@ -693,12 +639,12 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable) goto out; } - if (!hdev->hard_reset_pending) - hdev->asic_funcs->halt_coresight(hdev); + if (!hdev->reset_info.hard_reset_pending) + hdev->asic_funcs->halt_coresight(hdev, ctx); hdev->in_debug = 0; - if (!hdev->hard_reset_pending) + if (!hdev->reset_info.hard_reset_pending) hdev->asic_funcs->set_clock_gating(hdev); goto out; @@ -735,6 +681,8 @@ static void take_release_locks(struct hl_device *hdev) /* Flush anyone that is inside device open */ mutex_lock(&hdev->fpriv_list_lock); mutex_unlock(&hdev->fpriv_list_lock); + mutex_lock(&hdev->fpriv_ctrl_list_lock); + mutex_unlock(&hdev->fpriv_ctrl_list_lock); } static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset) @@ -774,11 +722,14 @@ int hl_device_suspend(struct hl_device *hdev) pci_save_state(hdev->pdev); /* Block future CS/VM/JOB completion operations */ - rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); - if (rc) { + spin_lock(&hdev->reset_info.lock); + if (hdev->reset_info.in_reset) { + spin_unlock(&hdev->reset_info.lock); dev_err(hdev->dev, "Can't suspend while in reset\n"); return -EIO; } + hdev->reset_info.in_reset = 1; + spin_unlock(&hdev->reset_info.lock); /* This blocks all other stuff that is not blocked by in_reset */ hdev->disabled = true; @@ -828,10 +779,12 @@ int hl_device_resume(struct hl_device *hdev) } - hdev->disabled = false; - atomic_set(&hdev->in_reset, 0); + /* 'in_reset' was set to true during suspend, now we must clear it in order + * for hard reset to be performed + */ + hdev->reset_info.in_reset = 0; - rc = hl_device_reset(hdev, HL_RESET_HARD); + rc = hl_device_reset(hdev, HL_DRV_RESET_HARD); if (rc) { dev_err(hdev->dev, "Failed to reset device during resume\n"); goto disable_device; @@ -846,17 +799,21 @@ disable_device: return rc; } -static int device_kill_open_processes(struct hl_device *hdev, u32 timeout) +static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev) { - struct hl_fpriv *hpriv; struct task_struct *task = NULL; + struct list_head *fd_list; + struct hl_fpriv *hpriv; + struct mutex *fd_lock; u32 pending_cnt; + fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock; + fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list; /* Giving time for user to close FD, and for processes that are inside * hl_device_open to finish */ - if (!list_empty(&hdev->fpriv_list)) + if (!list_empty(fd_list)) ssleep(1); if (timeout) { @@ -872,12 +829,12 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout) } } - mutex_lock(&hdev->fpriv_list_lock); + mutex_lock(fd_lock); /* This section must be protected because we are dereferencing * pointers that are freed if the process exits */ - list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) { + list_for_each_entry(hpriv, fd_list, dev_node) { task = get_pid_task(hpriv->taskpid, PIDTYPE_PID); if (task) { dev_info(hdev->dev, "Killing user process pid=%d\n", @@ -889,12 +846,12 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout) } else { dev_warn(hdev->dev, "Can't get task struct for PID so giving up on killing process\n"); - mutex_unlock(&hdev->fpriv_list_lock); + mutex_unlock(fd_lock); return -ETIME; } } - mutex_unlock(&hdev->fpriv_list_lock); + mutex_unlock(fd_lock); /* * We killed the open users, but that doesn't mean they are closed. @@ -906,7 +863,7 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout) */ wait_for_processes: - while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) { + while ((!list_empty(fd_list)) && (pending_cnt)) { dev_dbg(hdev->dev, "Waiting for all unmap operations to finish before hard reset\n"); @@ -916,7 +873,7 @@ wait_for_processes: } /* All processes exited successfully */ - if (list_empty(&hdev->fpriv_list)) + if (list_empty(fd_list)) return 0; /* Give up waiting for processes to exit */ @@ -928,14 +885,19 @@ wait_for_processes: return -EBUSY; } -static void device_disable_open_processes(struct hl_device *hdev) +static void device_disable_open_processes(struct hl_device *hdev, bool control_dev) { + struct list_head *fd_list; struct hl_fpriv *hpriv; + struct mutex *fd_lock; - mutex_lock(&hdev->fpriv_list_lock); - list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) + fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock; + fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list; + + mutex_lock(fd_lock); + list_for_each_entry(hpriv, fd_list, dev_node) hpriv->hdev = NULL; - mutex_unlock(&hdev->fpriv_list_lock); + mutex_unlock(fd_lock); } static void handle_reset_trigger(struct hl_device *hdev, u32 flags) @@ -948,17 +910,17 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags) * ('in_reset' makes sure of it). This makes sure that * 'reset_cause' will continue holding its 1st recorded reason! */ - if (flags & HL_RESET_HEARTBEAT) { - hdev->curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT; - cur_reset_trigger = HL_RESET_HEARTBEAT; - } else if (flags & HL_RESET_TDR) { - hdev->curr_reset_cause = HL_RESET_CAUSE_TDR; - cur_reset_trigger = HL_RESET_TDR; - } else if (flags & HL_RESET_FW_FATAL_ERR) { - hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN; - cur_reset_trigger = HL_RESET_FW_FATAL_ERR; + if (flags & HL_DRV_RESET_HEARTBEAT) { + hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT; + cur_reset_trigger = HL_DRV_RESET_HEARTBEAT; + } else if (flags & HL_DRV_RESET_TDR) { + hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR; + cur_reset_trigger = HL_DRV_RESET_TDR; + } else if (flags & HL_DRV_RESET_FW_FATAL_ERR) { + hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN; + cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR; } else { - hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN; + hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN; } /* @@ -966,11 +928,11 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags) * is set and if this reset is due to a fatal FW error * device is set to an unstable state. */ - if (hdev->prev_reset_trigger != cur_reset_trigger) { - hdev->prev_reset_trigger = cur_reset_trigger; - hdev->reset_trigger_repeated = 0; + if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) { + hdev->reset_info.prev_reset_trigger = cur_reset_trigger; + hdev->reset_info.reset_trigger_repeated = 0; } else { - hdev->reset_trigger_repeated = 1; + hdev->reset_info.reset_trigger_repeated = 1; } /* If reset is due to heartbeat, device CPU is no responsive in @@ -979,8 +941,8 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags) * If F/W is performing the reset, no need to send it a message to disable * PCI access */ - if ((flags & HL_RESET_HARD) && - !(flags & (HL_RESET_HEARTBEAT | HL_RESET_FW))) { + if ((flags & HL_DRV_RESET_HARD) && + !(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) { /* Disable PCI access from device F/W so he won't send * us additional interrupts. We disable MSI/MSI-X at * the halt_engines function and we can't have the F/W @@ -1015,34 +977,39 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags) */ int hl_device_reset(struct hl_device *hdev, u32 flags) { + bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false, + reset_upon_device_release = false, schedule_hard_reset = false; u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0}; - bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false; + struct hl_ctx *ctx; int i, rc; if (!hdev->init_done) { - dev_err(hdev->dev, - "Can't reset before initialization is done\n"); + dev_err(hdev->dev, "Can't reset before initialization is done\n"); return 0; } - hard_reset = !!(flags & HL_RESET_HARD); - from_hard_reset_thread = !!(flags & HL_RESET_FROM_RESET_THREAD); - fw_reset = !!(flags & HL_RESET_FW); + hard_reset = !!(flags & HL_DRV_RESET_HARD); + from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR); + fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW); - if (!hard_reset && !hdev->supports_soft_reset) { + if (!hard_reset && !hdev->asic_prop.supports_soft_reset) { hard_instead_soft = true; hard_reset = true; } - if (hdev->reset_upon_device_release && - (flags & HL_RESET_DEVICE_RELEASE)) { - dev_dbg(hdev->dev, - "Perform %s-reset upon device release\n", - hard_reset ? "hard" : "soft"); + if (hdev->reset_upon_device_release && (flags & HL_DRV_RESET_DEV_RELEASE)) { + if (hard_reset) { + dev_crit(hdev->dev, + "Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n"); + return -EINVAL; + } + + reset_upon_device_release = true; + goto do_reset; } - if (!hard_reset && !hdev->allow_inference_soft_reset) { + if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) { hard_instead_soft = true; hard_reset = true; } @@ -1062,12 +1029,22 @@ do_reset: */ if (!from_hard_reset_thread) { /* Block future CS/VM/JOB completion operations */ - rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); - if (rc) + spin_lock(&hdev->reset_info.lock); + if (hdev->reset_info.in_reset) { + /* We only allow scheduling of a hard reset during soft reset */ + if (hard_reset && hdev->reset_info.is_in_soft_reset) + hdev->reset_info.hard_reset_schedule_flags = flags; + spin_unlock(&hdev->reset_info.lock); return 0; + } + hdev->reset_info.in_reset = 1; + spin_unlock(&hdev->reset_info.lock); handle_reset_trigger(hdev, flags); + /* This still allows the completion of some KDMA ops */ + hdev->reset_info.is_in_soft_reset = !hard_reset; + /* This also blocks future CS/VM/JOB completion operations */ hdev->disabled = true; @@ -1075,21 +1052,19 @@ do_reset: if (hard_reset) dev_info(hdev->dev, "Going to reset device\n"); - else if (flags & HL_RESET_DEVICE_RELEASE) - dev_info(hdev->dev, - "Going to reset device after it was released by user\n"); + else if (reset_upon_device_release) + dev_info(hdev->dev, "Going to reset device after release by user\n"); else - dev_info(hdev->dev, - "Going to reset compute engines of inference device\n"); + dev_info(hdev->dev, "Going to reset engines of inference device\n"); } again: if ((hard_reset) && (!from_hard_reset_thread)) { - hdev->hard_reset_pending = true; + hdev->reset_info.hard_reset_pending = true; hdev->process_kill_trial_cnt = 0; - hdev->device_reset_work.fw_reset = fw_reset; + hdev->device_reset_work.flags = flags; /* * Because the reset function can't run from heartbeat work, @@ -1109,7 +1084,7 @@ kill_processes: * process can't really exit until all its CSs are done, which * is what we do in cs rollback */ - rc = device_kill_open_processes(hdev, 0); + rc = device_kill_open_processes(hdev, 0, false); if (rc == -EBUSY) { if (hdev->device_fini_pending) { @@ -1138,7 +1113,7 @@ kill_processes: hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset); if (hard_reset) { - hdev->fw_loader.linux_loaded = false; + hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE; /* Release kernel context */ if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1) @@ -1154,24 +1129,23 @@ kill_processes: for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) hl_cq_reset(hdev, &hdev->completion_queue[i]); - mutex_lock(&hdev->fpriv_list_lock); - /* Make sure the context switch phase will run again */ - if (hdev->compute_ctx) { - atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1); - hdev->compute_ctx->thread_ctx_switch_wait_token = 0; + ctx = hl_get_compute_ctx(hdev); + if (ctx) { + atomic_set(&ctx->thread_ctx_switch_token, 1); + ctx->thread_ctx_switch_wait_token = 0; + hl_ctx_put(ctx); } - mutex_unlock(&hdev->fpriv_list_lock); - /* Finished tear-down, starting to re-initialize */ if (hard_reset) { hdev->device_cpu_disabled = false; - hdev->hard_reset_pending = false; + hdev->reset_info.hard_reset_pending = false; - if (hdev->reset_trigger_repeated && - (hdev->prev_reset_trigger == HL_RESET_FW_FATAL_ERR)) { + if (hdev->reset_info.reset_trigger_repeated && + (hdev->reset_info.prev_reset_trigger == + HL_DRV_RESET_FW_FATAL_ERR)) { /* if there 2 back to back resets from FW, * ensure driver puts the driver in a unusable state */ @@ -1204,7 +1178,7 @@ kill_processes: goto out_err; } - hdev->compute_ctx = NULL; + hdev->is_compute_ctx_active = false; rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); if (rc) { @@ -1225,16 +1199,14 @@ kill_processes: rc = hdev->asic_funcs->hw_init(hdev); if (rc) { - dev_err(hdev->dev, - "failed to initialize the H/W after reset\n"); + dev_err(hdev->dev, "failed to initialize the H/W after reset\n"); goto out_err; } /* If device is not idle fail the reset process */ if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask, HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) { - dev_err(hdev->dev, - "device is not idle (mask 0x%llx_%llx) after reset\n", + dev_err(hdev->dev, "device is not idle (mask 0x%llx_%llx) after reset\n", idle_mask[1], idle_mask[0]); rc = -EIO; goto out_err; @@ -1243,43 +1215,56 @@ kill_processes: /* Check that the communication with the device is working */ rc = hdev->asic_funcs->test_queues(hdev); if (rc) { - dev_err(hdev->dev, - "Failed to detect if device is alive after reset\n"); + dev_err(hdev->dev, "Failed to detect if device is alive after reset\n"); goto out_err; } if (hard_reset) { rc = device_late_init(hdev); if (rc) { - dev_err(hdev->dev, - "Failed late init after hard reset\n"); + dev_err(hdev->dev, "Failed late init after hard reset\n"); goto out_err; } rc = hl_vm_init(hdev); if (rc) { - dev_err(hdev->dev, - "Failed to init memory module after hard reset\n"); + dev_err(hdev->dev, "Failed to init memory module after hard reset\n"); goto out_err; } hl_set_max_power(hdev); } else { - rc = hdev->asic_funcs->soft_reset_late_init(hdev); + rc = hdev->asic_funcs->non_hard_reset_late_init(hdev); if (rc) { - dev_err(hdev->dev, - "Failed late init after soft reset\n"); + if (reset_upon_device_release) + dev_err(hdev->dev, + "Failed late init in reset after device release\n"); + else + dev_err(hdev->dev, "Failed late init after soft reset\n"); goto out_err; } } - atomic_set(&hdev->in_reset, 0); - hdev->needs_reset = false; + spin_lock(&hdev->reset_info.lock); + hdev->reset_info.is_in_soft_reset = false; + + /* Schedule hard reset only if requested and if not already in hard reset. + * We keep 'in_reset' enabled, so no other reset can go in during the hard + * reset schedule + */ + if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags) + schedule_hard_reset = true; + else + hdev->reset_info.in_reset = 0; + + spin_unlock(&hdev->reset_info.lock); + + hdev->reset_info.needs_reset = false; dev_notice(hdev->dev, "Successfully finished resetting the device\n"); if (hard_reset) { - hdev->hard_reset_cnt++; + hdev->reset_info.hard_reset_cnt++; /* After reset is done, we are ready to receive events from * the F/W. We can't do it before because we will ignore events @@ -1287,28 +1272,41 @@ kill_processes: * the device will be operational although it shouldn't be */ hdev->asic_funcs->enable_events_from_fw(hdev); - } else { - hdev->soft_reset_cnt++; + } else if (!reset_upon_device_release) { + hdev->reset_info.soft_reset_cnt++; + } + + if (schedule_hard_reset) { + dev_info(hdev->dev, "Performing hard reset scheduled during soft reset\n"); + flags = hdev->reset_info.hard_reset_schedule_flags; + hdev->reset_info.hard_reset_schedule_flags = 0; + hdev->disabled = true; + hard_reset = true; + handle_reset_trigger(hdev, flags); + goto again; } return 0; out_err: hdev->disabled = true; + hdev->reset_info.is_in_soft_reset = false; if (hard_reset) { - dev_err(hdev->dev, - "Failed to reset! Device is NOT usable\n"); - hdev->hard_reset_cnt++; + dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n"); + hdev->reset_info.hard_reset_cnt++; + } else if (reset_upon_device_release) { + dev_err(hdev->dev, "Failed to reset device after user release\n"); + hard_reset = true; + goto again; } else { - dev_err(hdev->dev, - "Failed to do soft-reset, trying hard reset\n"); - hdev->soft_reset_cnt++; + dev_err(hdev->dev, "Failed to do soft-reset\n"); + hdev->reset_info.soft_reset_cnt++; hard_reset = true; goto again; } - atomic_set(&hdev->in_reset, 0); + hdev->reset_info.in_reset = 0; return rc; } @@ -1455,7 +1453,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) goto mmu_fini; } - hdev->compute_ctx = NULL; + hdev->is_compute_ctx_active = false; hdev->asic_funcs->state_dump_init(hdev); @@ -1619,6 +1617,7 @@ out_disabled: */ void hl_device_fini(struct hl_device *hdev) { + bool device_in_reset; ktime_t timeout; u64 reset_sec; int i, rc; @@ -1642,10 +1641,22 @@ void hl_device_fini(struct hl_device *hdev) */ timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000); - rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); - while (rc) { + + spin_lock(&hdev->reset_info.lock); + device_in_reset = !!hdev->reset_info.in_reset; + if (!device_in_reset) + hdev->reset_info.in_reset = 1; + spin_unlock(&hdev->reset_info.lock); + + while (device_in_reset) { usleep_range(50, 200); - rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); + + spin_lock(&hdev->reset_info.lock); + device_in_reset = !!hdev->reset_info.in_reset; + if (!device_in_reset) + hdev->reset_info.in_reset = 1; + spin_unlock(&hdev->reset_info.lock); + if (ktime_compare(ktime_get(), timeout) > 0) { dev_crit(hdev->dev, "Failed to remove device because reset function did not finish\n"); @@ -1667,7 +1678,7 @@ void hl_device_fini(struct hl_device *hdev) take_release_locks(hdev); - hdev->hard_reset_pending = true; + hdev->reset_info.hard_reset_pending = true; hl_hwmon_fini(hdev); @@ -1681,10 +1692,16 @@ void hl_device_fini(struct hl_device *hdev) "Waiting for all processes to exit (timeout of %u seconds)", HL_PENDING_RESET_LONG_SEC); - rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC); + rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC, false); if (rc) { dev_crit(hdev->dev, "Failed to kill all open processes\n"); - device_disable_open_processes(hdev); + device_disable_open_processes(hdev, false); + } + + rc = device_kill_open_processes(hdev, 0, true); + if (rc) { + dev_crit(hdev->dev, "Failed to kill all control device open processes\n"); + device_disable_open_processes(hdev, true); } hl_cb_pool_fini(hdev); @@ -1692,7 +1709,7 @@ void hl_device_fini(struct hl_device *hdev) /* Reset the H/W. It will be in idle state after this returns */ hdev->asic_funcs->hw_fini(hdev, true, false); - hdev->fw_loader.linux_loaded = false; + hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE; /* Release kernel context */ if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1)) diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index 4e68fb9d2a6b..6775c5c3166b 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -15,8 +15,6 @@ #define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */ -#define FW_CPU_STATUS_POLL_INTERVAL_USEC 10000 - static char *extract_fw_ver_from_str(const char *fw_str) { char *str, *fw_ver, *whitespace; @@ -214,7 +212,8 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, struct asic_fixed_properties *prop = &hdev->asic_prop; struct cpucp_packet *pkt; dma_addr_t pkt_dma_addr; - u32 tmp, expected_ack_val; + struct hl_bd *sent_bd; + u32 tmp, expected_ack_val, pi; int rc = 0; pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len, @@ -239,6 +238,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, /* set fence to a non valid value */ pkt->fence = cpu_to_le32(UINT_MAX); + pi = queue->pi; /* * The CPU queue is a synchronous queue with an effective depth of @@ -248,7 +248,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, * Which means that we don't need to lock the access to the entire H/W * queues module when submitting a JOB to the CPU queue. */ - hl_hw_queue_submit_bd(hdev, queue, 0, len, pkt_dma_addr); + hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr); if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN) expected_ack_val = queue->pi; @@ -280,6 +280,14 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, *result = le64_to_cpu(pkt->result); } + /* Scrub previous buffer descriptor 'ctl' field which contains the + * previous PI value written during packet submission. + * We must do this or else F/W can read an old value upon queue wraparound. + */ + sent_bd = queue->kernel_address; + sent_bd += hl_pi_2_offset(pi); + sent_bd->ctl = cpu_to_le32(UINT_MAX); + out: mutex_unlock(&hdev->send_cpu_message_lock); @@ -445,15 +453,6 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val, err_exists = true; } - if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) { - dev_warn(hdev->dev, - "Device boot warning - Skipped DRAM initialization\n"); - /* This is a warning so we don't want it to disable the - * device - */ - err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED; - } - if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) { if (hdev->bmc_enable) { dev_err(hdev->dev, @@ -497,15 +496,6 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val, err_exists = true; } - if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) { - dev_warn(hdev->dev, - "Device boot warning - Failed to load preboot primary image\n"); - /* This is a warning so we don't want it to disable the - * device as we have a secondary preboot image - */ - err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL; - } - if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL) { dev_err(hdev->dev, "Device boot error - Failed to load preboot secondary image\n"); err_exists = true; @@ -525,6 +515,34 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val, if (sts_val & CPU_BOOT_DEV_STS0_ENABLED) dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val); + /* All warnings should go here in order not to reach the unknown error validation */ + if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) { + dev_warn(hdev->dev, + "Device boot warning - Skipped DRAM initialization\n"); + /* This is a warning so we don't want it to disable the + * device + */ + err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED; + } + + if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) { + dev_warn(hdev->dev, + "Device boot warning - Failed to load preboot primary image\n"); + /* This is a warning so we don't want it to disable the + * device as we have a secondary preboot image + */ + err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL; + } + + if (err_val & CPU_BOOT_ERR0_TPM_FAIL) { + dev_warn(hdev->dev, + "Device boot warning - TPM failure\n"); + /* This is a warning so we don't want it to disable the + * device + */ + err_val &= ~CPU_BOOT_ERR0_TPM_FAIL; + } + if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) { dev_err(hdev->dev, "Device boot error - unknown ERR0 error 0x%08x\n", err_val); @@ -961,6 +979,7 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power) pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.type = cpu_to_le16(CPUCP_POWER_INPUT); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), HL_CPUCP_INFO_TIMEOUT_USEC, &result); @@ -974,6 +993,92 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power) return rc; } +int hl_fw_dram_replaced_row_get(struct hl_device *hdev, + struct cpucp_hbm_row_info *info) +{ + struct cpucp_hbm_row_info *cpucp_repl_rows_info_cpu_addr; + dma_addr_t cpucp_repl_rows_info_dma_addr; + struct cpucp_packet pkt = {}; + u64 result; + int rc; + + cpucp_repl_rows_info_cpu_addr = + hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, + sizeof(struct cpucp_hbm_row_info), + &cpucp_repl_rows_info_dma_addr); + if (!cpucp_repl_rows_info_cpu_addr) { + dev_err(hdev->dev, + "Failed to allocate DMA memory for CPU-CP replaced rows info packet\n"); + return -ENOMEM; + } + + memset(cpucp_repl_rows_info_cpu_addr, 0, sizeof(struct cpucp_hbm_row_info)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.addr = cpu_to_le64(cpucp_repl_rows_info_dma_addr); + pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_hbm_row_info)); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_CPUCP_INFO_TIMEOUT_USEC, &result); + if (rc) { + dev_err(hdev->dev, + "Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc); + goto out; + } + + memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info)); + +out: + hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, + sizeof(struct cpucp_hbm_row_info), + cpucp_repl_rows_info_cpu_addr); + + return rc; +} + +int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num) +{ + struct cpucp_packet pkt; + u64 result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_PENDING_ROWS_STATUS << CPUCP_PKT_CTL_OPCODE_SHIFT); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result); + if (rc) { + dev_err(hdev->dev, + "Failed to handle CPU-CP pending rows info pkt, error %d\n", rc); + goto out; + } + + *pend_rows_num = (u32) result; +out: + return rc; +} + +int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid) +{ + struct cpucp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_ENGINE_CORE_ASID_SET << CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.value = cpu_to_le64(asid); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_CPUCP_INFO_TIMEOUT_USEC, NULL); + if (rc) + dev_err(hdev->dev, + "Failed on ASID configuration request for engine core, error %d\n", + rc); + + return rc; +} + void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev) { struct static_fw_load_mgr *static_loader = @@ -1028,7 +1133,7 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status) switch (status) { case CPU_BOOT_STATUS_NA: dev_err(hdev->dev, - "Device boot progress - BTL did NOT run\n"); + "Device boot progress - BTL/ROM did NOT run\n"); break; case CPU_BOOT_STATUS_IN_WFE: dev_err(hdev->dev, @@ -1101,9 +1206,8 @@ static int hl_fw_read_preboot_caps(struct hl_device *hdev, (status == CPU_BOOT_STATUS_DRAM_RDY) || (status == CPU_BOOT_STATUS_NIC_FW_RDY) || (status == CPU_BOOT_STATUS_READY_TO_BOOT) || - (status == CPU_BOOT_STATUS_SRAM_AVAIL) || (status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT), - FW_CPU_STATUS_POLL_INTERVAL_USEC, + hdev->fw_poll_interval_usec, timeout); if (rc) { @@ -1250,8 +1354,7 @@ static void hl_fw_preboot_update_state(struct hl_device *hdev) * 3. FW application - a. Fetch fw application security status * b. Check whether hard reset is done by fw app */ - prop->hard_reset_done_by_fw = - !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN); + prop->hard_reset_done_by_fw = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN); dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n", cpu_boot_dev_sts0); @@ -1287,11 +1390,7 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg, { int rc; - /* pldm was added for cases in which we use preboot on pldm and want - * to load boot fit, but we can't wait for preboot because it runs - * very slowly - */ - if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU) || hdev->pldm) + if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU)) return 0; /* @@ -1437,7 +1536,7 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev, le32_to_cpu(dyn_regs->cpu_cmd_status_to_host), status, FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status, - FW_CPU_STATUS_POLL_INTERVAL_USEC, + hdev->fw_poll_interval_usec, timeout); if (rc) { @@ -1703,6 +1802,9 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev, return rc; } + /* here we can mark the descriptor as valid as the content has been validated */ + fw_loader->dynamic_loader.fw_desc_valid = true; + return 0; } @@ -1759,7 +1861,13 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev, return rc; } - /* extract address copy the descriptor from */ + /* + * extract address to copy the descriptor from + * in addition, as the descriptor value is going to be over-ridden by new data- we mark it + * as invalid. + * it will be marked again as valid once validated + */ + fw_loader->dynamic_loader.fw_desc_valid = false; src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar + response->ram_offset; memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc)); @@ -1920,17 +2028,15 @@ static void hl_fw_boot_fit_update_state(struct hl_device *hdev, { struct asic_fixed_properties *prop = &hdev->asic_prop; - /* Clear reset status since we need to read it again from boot CPU */ - prop->hard_reset_done_by_fw = false; + hdev->fw_loader.fw_comp_loaded |= FW_TYPE_BOOT_CPU; /* Read boot_cpu status bits */ if (prop->fw_preboot_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_ENABLED) { prop->fw_bootfit_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg); - if (prop->fw_bootfit_cpu_boot_dev_sts0 & - CPU_BOOT_DEV_STS0_FW_HARD_RST_EN) - prop->hard_reset_done_by_fw = true; + prop->hard_reset_done_by_fw = !!(prop->fw_bootfit_cpu_boot_dev_sts0 & + CPU_BOOT_DEV_STS0_FW_HARD_RST_EN); dev_dbg(hdev->dev, "Firmware boot CPU status0 %#x\n", prop->fw_bootfit_cpu_boot_dev_sts0); @@ -2055,14 +2161,21 @@ static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev, dyn_loader = &fw_loader->dynamic_loader; - /* Make sure CPU boot-loader is running */ + /* + * Make sure CPU boot-loader is running + * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux + * yet there is a debug scenario in which we loading uboot (without Linux) + * which at later stage is relocated to DRAM. In this case we expect + * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the + * poll flags + */ rc = hl_poll_timeout( hdev, le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status), status, - (status == CPU_BOOT_STATUS_NIC_FW_RDY) || - (status == CPU_BOOT_STATUS_READY_TO_BOOT), - FW_CPU_STATUS_POLL_INTERVAL_USEC, + (status == CPU_BOOT_STATUS_READY_TO_BOOT) || + (status == CPU_BOOT_STATUS_SRAM_AVAIL), + hdev->fw_poll_interval_usec, dyn_loader->wait_for_bl_timeout); if (rc) { dev_err(hdev->dev, "failed to wait for boot\n"); @@ -2082,14 +2195,14 @@ static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev, dyn_loader = &fw_loader->dynamic_loader; - /* Make sure CPU boot-loader is running */ + /* Make sure CPU linux is running */ rc = hl_poll_timeout( hdev, le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status), status, (status == CPU_BOOT_STATUS_SRAM_AVAIL), - FW_CPU_STATUS_POLL_INTERVAL_USEC, + hdev->fw_poll_interval_usec, fw_loader->cpu_timeout); if (rc) { dev_err(hdev->dev, "failed to wait for Linux\n"); @@ -2121,18 +2234,14 @@ static void hl_fw_linux_update_state(struct hl_device *hdev, { struct asic_fixed_properties *prop = &hdev->asic_prop; - hdev->fw_loader.linux_loaded = true; - - /* Clear reset status since we need to read again from app */ - prop->hard_reset_done_by_fw = false; + hdev->fw_loader.fw_comp_loaded |= FW_TYPE_LINUX; /* Read FW application security bits */ if (prop->fw_cpu_boot_dev_sts0_valid) { prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg); - if (prop->fw_app_cpu_boot_dev_sts0 & - CPU_BOOT_DEV_STS0_FW_HARD_RST_EN) - prop->hard_reset_done_by_fw = true; + prop->hard_reset_done_by_fw = !!(prop->fw_app_cpu_boot_dev_sts0 & + CPU_BOOT_DEV_STS0_FW_HARD_RST_EN); if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN) @@ -2247,6 +2356,9 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev, dev_info(hdev->dev, "Loading firmware to device, may take some time...\n"); + /* initialize FW descriptor as invalid */ + fw_loader->dynamic_loader.fw_desc_valid = false; + /* * In this stage, "cpu_dyn_regs" contains only LKD's hard coded values! * It will be updated from FW after hl_fw_dynamic_request_descriptor(). @@ -2259,14 +2371,14 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev, if (rc) goto protocol_err; - if (hdev->curr_reset_cause) { + if (hdev->reset_info.curr_reset_cause) { rc = hl_fw_dynamic_send_msg(hdev, fw_loader, - HL_COMMS_RESET_CAUSE_TYPE, &hdev->curr_reset_cause); + HL_COMMS_RESET_CAUSE_TYPE, &hdev->reset_info.curr_reset_cause); if (rc) goto protocol_err; /* Clear current reset cause */ - hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN; + hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN; } if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) { @@ -2288,6 +2400,15 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev, goto protocol_err; } + /* + * when testing FW load (without Linux) on PLDM we don't want to + * wait until boot fit is active as it may take several hours. + * instead, we load the bootfit and let it do all initializations in + * the background. + */ + if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX)) + return 0; + rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader); if (rc) goto protocol_err; @@ -2333,7 +2454,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev, return 0; protocol_err: - fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0), + if (fw_loader->dynamic_loader.fw_desc_valid) + fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0), le32_to_cpu(dyn_regs->cpu_boot_err1), le32_to_cpu(dyn_regs->cpu_boot_dev_sts0), le32_to_cpu(dyn_regs->cpu_boot_dev_sts1)); @@ -2380,7 +2502,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev, cpu_boot_status_reg, status, status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT, - FW_CPU_STATUS_POLL_INTERVAL_USEC, + hdev->fw_poll_interval_usec, fw_loader->boot_fit_timeout); if (rc) { @@ -2403,7 +2525,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev, cpu_msg_status_reg, status, status == CPU_MSG_OK, - FW_CPU_STATUS_POLL_INTERVAL_USEC, + hdev->fw_poll_interval_usec, fw_loader->boot_fit_timeout); if (rc) { @@ -2416,7 +2538,14 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev, WREG32(msg_to_cpu_reg, KMD_MSG_NA); } - /* Make sure CPU boot-loader is running */ + /* + * Make sure CPU boot-loader is running + * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux + * yet there is a debug scenario in which we loading uboot (without Linux) + * which at later stage is relocated to DRAM. In this case we expect + * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the + * poll flags + */ rc = hl_poll_timeout( hdev, cpu_boot_status_reg, @@ -2425,7 +2554,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev, (status == CPU_BOOT_STATUS_NIC_FW_RDY) || (status == CPU_BOOT_STATUS_READY_TO_BOOT) || (status == CPU_BOOT_STATUS_SRAM_AVAIL), - FW_CPU_STATUS_POLL_INTERVAL_USEC, + hdev->fw_poll_interval_usec, cpu_timeout); dev_dbg(hdev->dev, "uboot status = %d\n", status); @@ -2474,7 +2603,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev, cpu_boot_status_reg, status, (status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED), - FW_CPU_STATUS_POLL_INTERVAL_USEC, + hdev->fw_poll_interval_usec, cpu_timeout); if (rc) { @@ -2494,7 +2623,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev, cpu_boot_status_reg, status, (status == CPU_BOOT_STATUS_SRAM_AVAIL), - FW_CPU_STATUS_POLL_INTERVAL_USEC, + hdev->fw_poll_interval_usec, cpu_timeout); /* Clear message */ diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index a2002cbf794b..cb710fd478b6 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 * - * Copyright 2016-2019 HabanaLabs, Ltd. + * Copyright 2016-2021 HabanaLabs, Ltd. * All Rights Reserved. * */ @@ -61,6 +61,8 @@ #define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */ #define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */ +#define HL_FW_STATUS_POLL_INTERVAL_USEC 10000 /* 10ms */ + #define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */ #define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */ @@ -117,37 +119,37 @@ enum hl_mmu_page_table_location { /* * Reset Flags * - * - HL_RESET_HARD + * - HL_DRV_RESET_HARD * If set do hard reset to all engines. If not set reset just * compute/DMA engines. * - * - HL_RESET_FROM_RESET_THREAD + * - HL_DRV_RESET_FROM_RESET_THR * Set if the caller is the hard-reset thread * - * - HL_RESET_HEARTBEAT + * - HL_DRV_RESET_HEARTBEAT * Set if reset is due to heartbeat * - * - HL_RESET_TDR + * - HL_DRV_RESET_TDR * Set if reset is due to TDR * - * - HL_RESET_DEVICE_RELEASE + * - HL_DRV_RESET_DEV_RELEASE * Set if reset is due to device release * - * - HL_RESET_FW + * - HL_DRV_RESET_BYPASS_REQ_TO_FW * F/W will perform the reset. No need to ask it to reset the device. This is relevant * only when running with secured f/w * - * - HL_RESET_FW_FATAL_ERR + * - HL_DRV_RESET_FW_FATAL_ERR * Set if reset is due to a fatal error from FW */ -#define HL_RESET_HARD (1 << 0) -#define HL_RESET_FROM_RESET_THREAD (1 << 1) -#define HL_RESET_HEARTBEAT (1 << 2) -#define HL_RESET_TDR (1 << 3) -#define HL_RESET_DEVICE_RELEASE (1 << 4) -#define HL_RESET_FW (1 << 5) -#define HL_RESET_FW_FATAL_ERR (1 << 6) +#define HL_DRV_RESET_HARD (1 << 0) +#define HL_DRV_RESET_FROM_RESET_THR (1 << 1) +#define HL_DRV_RESET_HEARTBEAT (1 << 2) +#define HL_DRV_RESET_TDR (1 << 3) +#define HL_DRV_RESET_DEV_RELEASE (1 << 4) +#define HL_DRV_RESET_BYPASS_REQ_TO_FW (1 << 5) +#define HL_DRV_RESET_FW_FATAL_ERR (1 << 6) #define HL_MAX_SOBS_PER_MONITOR 8 @@ -219,6 +221,7 @@ enum hl_fw_component { /** * enum hl_fw_types - F/W types present in the system + * @FW_TYPE_NONE: no FW component indication * @FW_TYPE_LINUX: Linux image for device CPU * @FW_TYPE_BOOT_CPU: Boot image for device CPU * @FW_TYPE_PREBOOT_CPU: Indicates pre-loaded CPUs are present in the system @@ -226,6 +229,7 @@ enum hl_fw_component { * @FW_TYPE_ALL_TYPES: Mask for all types */ enum hl_fw_types { + FW_TYPE_NONE = 0x0, FW_TYPE_LINUX = 0x1, FW_TYPE_BOOT_CPU = 0x2, FW_TYPE_PREBOOT_CPU = 0x4, @@ -353,6 +357,21 @@ enum vm_type { }; /** + * enum mmu_op_flags - mmu operation relevant information. + * @MMU_OP_USERPTR: operation on user memory (host resident). + * @MMU_OP_PHYS_PACK: operation on DRAM (device resident). + * @MMU_OP_CLEAR_MEMCACHE: operation has to clear memcache. + * @MMU_OP_SKIP_LOW_CACHE_INV: operation is allowed to skip parts of cache invalidation. + */ +enum mmu_op_flags { + MMU_OP_USERPTR = 0x1, + MMU_OP_PHYS_PACK = 0x2, + MMU_OP_CLEAR_MEMCACHE = 0x4, + MMU_OP_SKIP_LOW_CACHE_INV = 0x8, +}; + + +/** * enum hl_device_hw_state - H/W device state. use this to understand whether * to do reset before hw_init or not * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset @@ -382,6 +401,7 @@ enum hl_device_hw_state { * @hop3_mask: mask to get the PTE address in hop 3. * @hop4_mask: mask to get the PTE address in hop 4. * @hop5_mask: mask to get the PTE address in hop 5. + * @last_mask: mask to get the bit indicating this is the last hop. * @page_size: default page size used to allocate memory. * @num_hops: The amount of hops supported by the translation table. * @host_resident: Should the MMU page table reside in host memory or in the @@ -402,6 +422,7 @@ struct hl_mmu_properties { u64 hop3_mask; u64 hop4_mask; u64 hop5_mask; + u64 last_mask; u32 page_size; u32 num_hops; u8 host_resident; @@ -524,6 +545,15 @@ struct hl_hints_range { * @dynamic_fw_load: is dynamic FW load is supported. * @gic_interrupts_enable: true if FW is not blocking GIC controller, * false otherwise. + * @use_get_power_for_reset_history: To support backward compatibility for Goya + * and Gaudi + * @supports_soft_reset: is soft reset supported. + * @allow_inference_soft_reset: true if the ASIC supports soft reset that is + * initiated by user or TDR. This is only true + * in inference ASICs, as there is no real-world + * use-case of doing soft-reset in training (due + * to the fact that training runs on multiple + * devices) */ struct asic_fixed_properties { struct hw_queue_properties *hw_queues_props; @@ -604,6 +634,9 @@ struct asic_fixed_properties { u8 iatu_done_by_fw; u8 dynamic_fw_load; u8 gic_interrupts_enable; + u8 use_get_power_for_reset_history; + u8 supports_soft_reset; + u8 allow_inference_soft_reset; }; /** @@ -852,10 +885,15 @@ struct hl_user_interrupt { * pending on an interrupt * @wait_list_node: node in the list of user threads pending on an interrupt * @fence: hl fence object for interrupt completion + * @cq_target_value: CQ target value + * @cq_kernel_addr: CQ kernel address, to be used in the cq interrupt + * handler for taget value comparison */ struct hl_user_pending_interrupt { struct list_head wait_list_node; struct hl_fence fence; + u64 cq_target_value; + u64 *cq_kernel_addr; }; /** @@ -1010,6 +1048,7 @@ struct fw_response { * @image_region: region to copy the FW image to * @fw_image_size: size of FW image to load * @wait_for_bl_timeout: timeout for waiting for boot loader to respond + * @fw_desc_valid: true if FW descriptor has been validated and hence the data can be used */ struct dynamic_fw_load_mgr { struct fw_response response; @@ -1017,6 +1056,7 @@ struct dynamic_fw_load_mgr { struct pci_mem_region *image_region; size_t fw_image_size; u32 wait_for_bl_timeout; + bool fw_desc_valid; }; /** @@ -1042,7 +1082,8 @@ struct fw_image_props { * @skip_bmc: should BMC be skipped * @sram_bar_id: SRAM bar ID * @dram_bar_id: DRAM bar ID - * @linux_loaded: true if linux was loaded so far + * @fw_comp_loaded: bitmask of loaded FW components. set bit meaning loaded + * component. values are set according to enum hl_fw_types. */ struct fw_load_mgr { union { @@ -1056,7 +1097,7 @@ struct fw_load_mgr { u8 skip_bmc; u8 sram_bar_id; u8 dram_bar_id; - u8 linux_loaded; + u8 fw_comp_loaded; }; /** @@ -1128,7 +1169,7 @@ struct fw_load_mgr { * @disable_clock_gating: disable clock gating completely * @debug_coresight: perform certain actions on Coresight for debugging. * @is_device_idle: return true if device is idle, false otherwise. - * @soft_reset_late_init: perform certain actions needed after soft reset. + * @non_hard_reset_late_init: perform certain actions needed after a reset which is not hard-reset * @hw_queues_lock: acquire H/W queues lock. * @hw_queues_unlock: release H/W queues lock. * @get_pci_id: retrieve PCI ID. @@ -1261,10 +1302,10 @@ struct hl_asic_funcs { int (*send_heartbeat)(struct hl_device *hdev); void (*set_clock_gating)(struct hl_device *hdev); void (*disable_clock_gating)(struct hl_device *hdev); - int (*debug_coresight)(struct hl_device *hdev, void *data); + int (*debug_coresight)(struct hl_device *hdev, struct hl_ctx *ctx, void *data); bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, struct seq_file *s); - int (*soft_reset_late_init)(struct hl_device *hdev); + int (*non_hard_reset_late_init)(struct hl_device *hdev); void (*hw_queues_lock)(struct hl_device *hdev); void (*hw_queues_unlock)(struct hl_device *hdev); u32 (*get_pci_id)(struct hl_device *hdev); @@ -1276,7 +1317,7 @@ struct hl_asic_funcs { int (*init_iatu)(struct hl_device *hdev); u32 (*rreg)(struct hl_device *hdev, u32 reg); void (*wreg)(struct hl_device *hdev, u32 reg, u32 val); - void (*halt_coresight)(struct hl_device *hdev); + void (*halt_coresight)(struct hl_device *hdev, struct hl_ctx *ctx); int (*ctx_init)(struct hl_ctx *ctx); void (*ctx_fini)(struct hl_ctx *ctx); int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk); @@ -1518,6 +1559,9 @@ struct hl_userptr { * @submission_time_jiffies: submission time of the cs * @type: CS_TYPE_*. * @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs. + * @sob_addr_offset: sob offset from the configuration base address. + * @initial_sob_count: count of completed signals in SOB before current submission of signal or + * cs with encaps signals. * @submitted: true if CS was submitted to H/W. * @completed: true if CS was completed by device. * @timedout : true if CS was timedout. @@ -1553,6 +1597,8 @@ struct hl_cs { u64 submission_time_jiffies; enum hl_cs_type type; u32 encaps_sig_hdl_id; + u32 sob_addr_offset; + u16 initial_sob_count; u8 submitted; u8 completed; u8 timedout; @@ -1792,7 +1838,6 @@ struct hl_debug_params { * @dev_node: node in the device list of file private data * @refcount: number of related contexts. * @restore_phase_mutex: lock for context switch and restore phase. - * @is_control: true for control device, false otherwise */ struct hl_fpriv { struct hl_device *hdev; @@ -1805,7 +1850,6 @@ struct hl_fpriv { struct list_head dev_node; struct kref refcount; struct mutex restore_phase_mutex; - u8 is_control; }; @@ -1864,6 +1908,7 @@ struct hl_debugfs_entry { * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read. * @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read. * @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read. + * @i2c_len: generic u8 debugfs file for length value to use in i2c_data_read. */ struct hl_dbg_device_entry { struct dentry *root; @@ -1892,6 +1937,7 @@ struct hl_dbg_device_entry { u8 i2c_bus; u8 i2c_addr; u8 i2c_reg; + u8 i2c_len; }; /** @@ -2180,13 +2226,13 @@ struct hwmon_chip_info; * @wq: work queue for device reset procedure. * @reset_work: reset work to be done. * @hdev: habanalabs device structure. - * @fw_reset: whether f/w will do the reset without us sending them a message to do it. + * @flags: reset flags. */ struct hl_device_reset_work { struct workqueue_struct *wq; struct delayed_work reset_work; struct hl_device *hdev; - bool fw_reset; + u32 flags; }; /** @@ -2328,12 +2374,10 @@ struct multi_cs_completion { * @ctx: pointer to the context structure * @fence_arr: array of fences of all CSs * @seq_arr: array of CS sequence numbers - * @timeout_us: timeout in usec for waiting for CS to complete + * @timeout_jiffies: timeout in jiffies for waiting for CS to complete * @timestamp: timestamp of first completed CS * @wait_status: wait for CS status * @completion_bitmap: bitmap of completed CSs (1- completed, otherwise 0) - * @stream_master_qid_map: bitmap of all stream master QIDs on which the - * multi-CS is waiting * @arr_len: fence_arr and seq_arr array length * @gone_cs: indication of gone CS (1- there was gone CS, otherwise 0) * @update_ts: update timestamp. 1- update the timestamp, otherwise 0. @@ -2342,17 +2386,114 @@ struct multi_cs_data { struct hl_ctx *ctx; struct hl_fence **fence_arr; u64 *seq_arr; - s64 timeout_us; + s64 timeout_jiffies; s64 timestamp; long wait_status; u32 completion_bitmap; - u32 stream_master_qid_map; u8 arr_len; u8 gone_cs; u8 update_ts; }; /** + * struct hl_clk_throttle_timestamp - current/last clock throttling timestamp + * @start: timestamp taken when 'start' event is received in driver + * @end: timestamp taken when 'end' event is received in driver + */ +struct hl_clk_throttle_timestamp { + ktime_t start; + ktime_t end; +}; + +/** + * struct hl_clk_throttle - keeps current/last clock throttling timestamps + * @timestamp: timestamp taken by driver and firmware, index 0 refers to POWER + * index 1 refers to THERMAL + * @lock: protects this structure as it can be accessed from both event queue + * context and info_ioctl context + * @current_reason: bitmask represents the current clk throttling reasons + * @aggregated_reason: bitmask represents aggregated clk throttling reasons since driver load + */ +struct hl_clk_throttle { + struct hl_clk_throttle_timestamp timestamp[HL_CLK_THROTTLE_TYPE_MAX]; + struct mutex lock; + u32 current_reason; + u32 aggregated_reason; +}; + +/** + * struct last_error_session_info - info about last session in which CS timeout or + * razwi error occurred. + * @open_dev_timestamp: device open timestamp. + * @cs_timeout_timestamp: CS timeout timestamp. + * @razwi_timestamp: razwi timestamp. + * @cs_write_disable: if set writing to CS parameters in the structure is disabled so the + * first (root cause) CS timeout will not be overwritten. + * @razwi_write_disable: if set writing to razwi parameters in the structure is disabled so the + * first (root cause) razwi will not be overwritten. + * @cs_timeout_seq: CS timeout sequence number. + * @razwi_addr: address that caused razwi. + * @razwi_engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does + * not have engine id it will be set to U16_MAX. + * @razwi_engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible + * engines which one them caused the razwi. In that case, it will contain the + * second possible engine id, otherwise it will be set to U16_MAX. + * @razwi_non_engine_initiator: in case the initiator of the razwi does not have engine id. + * @razwi_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX. + */ +struct last_error_session_info { + ktime_t open_dev_timestamp; + ktime_t cs_timeout_timestamp; + ktime_t razwi_timestamp; + atomic_t cs_write_disable; + atomic_t razwi_write_disable; + u64 cs_timeout_seq; + u64 razwi_addr; + u16 razwi_engine_id_1; + u16 razwi_engine_id_2; + u8 razwi_non_engine_initiator; + u8 razwi_type; +}; + +/** + * struct hl_reset_info - holds current device reset information. + * @lock: lock to protect critical reset flows. + * @soft_reset_cnt: number of soft reset since the driver was loaded. + * @hard_reset_cnt: number of hard reset since the driver was loaded. + * @hard_reset_schedule_flags: hard reset is scheduled to after current soft reset, + * here we hold the hard reset flags. + * @in_reset: is device in reset flow. + * @is_in_soft_reset: Device is currently in soft reset process. + * @needs_reset: true if reset_on_lockup is false and device should be reset + * due to lockup. + * @hard_reset_pending: is there a hard reset work pending. + * @curr_reset_cause: saves an enumerated reset cause when a hard reset is + * triggered, and cleared after it is shared with preboot. + * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden + * with a new value on next reset + * @reset_trigger_repeated: set if device reset is triggered more than once with + * same cause. + * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to + * complete instead. + */ +struct hl_reset_info { + spinlock_t lock; + u32 soft_reset_cnt; + u32 hard_reset_cnt; + u32 hard_reset_schedule_flags; + u8 in_reset; + u8 is_in_soft_reset; + u8 needs_reset; + u8 hard_reset_pending; + + u8 curr_reset_cause; + u8 prev_reset_trigger; + u8 reset_trigger_repeated; + + u8 skip_reset_on_timeout; +}; + +/** * struct hl_device - habanalabs device structure. * @pdev: pointer to PCI device, can be NULL in case of simulator device. * @pcie_bar_phys: array of available PCIe bars physical addresses. @@ -2363,7 +2504,6 @@ struct multi_cs_data { * @cdev_ctrl: char device for control operations only (INFO IOCTL) * @dev: related kernel basic device structure. * @dev_ctrl: related kernel device structure for the control device - * @work_freq: delayed work to lower device frequency if possible. * @work_heartbeat: delayed work for CPU-CP is-alive check. * @device_reset_work: delayed work which performs hard reset * @asic_name: ASIC specific name. @@ -2398,7 +2538,6 @@ struct multi_cs_data { * @asic_specific: ASIC specific information to use only from ASIC files. * @vm: virtual memory manager for MMU. * @hwmon_dev: H/W monitor device. - * @pm_mng_profile: current power management profile. * @hl_chip_info: ASIC's sensors information. * @device_status_description: device status description. * @hl_debugfs: device's debugfs manager. @@ -2410,8 +2549,10 @@ struct multi_cs_data { * @internal_cb_va_base: internal cb pool mmu virtual address base * @fpriv_list: list of file private data structures. Each structure is created * when a user opens the device + * @fpriv_ctrl_list: list of file private data structures. Each structure is created + * when a user opens the control device * @fpriv_list_lock: protects the fpriv_list - * @compute_ctx: current compute context executing. + * @fpriv_ctrl_list_lock: protects the fpriv_ctrl_list * @aggregated_cs_counters: aggregated cs counters among all contexts * @mmu_priv: device-specific MMU data. * @mmu_func: device-related MMU functions. @@ -2419,6 +2560,10 @@ struct multi_cs_data { * @pci_mem_region: array of memory regions in the PCI * @state_dump_specs: constants and dictionaries needed to dump system state. * @multi_cs_completion: array of multi-CS completion. + * @clk_throttling: holds information about current/previous clock throttling events + * @reset_info: holds current device reset information. + * @last_error: holds information about last session in which CS timeout or razwi error occurred. + * @stream_master_qid_arr: pointer to array with QIDs of master streams. * @dram_used_mem: current DRAM memory consumption. * @timeout_jiffies: device CS timeout value. * @max_power: the max power of the device, as configured by the sysadmin. This @@ -2434,20 +2579,17 @@ struct multi_cs_data { * device initialization. Mainly used to debug and * workaround firmware bugs * @dram_pci_bar_start: start bus address of PCIe bar towards DRAM. + * @last_successful_open_ktime: timestamp (ktime) of the last successful device open. * @last_successful_open_jif: timestamp (jiffies) of the last successful * device open. * @last_open_session_duration_jif: duration (jiffies) of the last device open * session. * @open_counter: number of successful device open operations. - * @in_reset: is device in reset flow. - * @curr_pll_profile: current PLL profile. + * @fw_poll_interval_usec: FW status poll interval in usec. * @card_type: Various ASICs have several card types. This indicates the card * type of the current device. * @major: habanalabs kernel driver major. * @high_pll: high PLL profile frequency. - * @soft_reset_cnt: number of soft reset since the driver was loaded. - * @hard_reset_cnt: number of hard reset since the driver was loaded. - * @clk_throttling_reason: bitmask represents the current clk throttling reasons * @id: device minor. * @id_control: minor of the control device * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit @@ -2455,7 +2597,6 @@ struct multi_cs_data { * @disabled: is device disabled. * @late_init_done: is late init stage was done during initialization. * @hwmon_initialized: is H/W monitor sensors was initialized. - * @hard_reset_pending: is there a hard reset work pending. * @heartbeat: is heartbeat sanity check towards CPU-CP enabled. * @reset_on_lockup: true if a reset should be done in case of stuck CS, false * otherwise. @@ -2467,8 +2608,9 @@ struct multi_cs_data { * @init_done: is the initialization of the device done. * @device_cpu_disabled: is the device CPU disabled (due to timeouts) * @dma_mask: the dma mask that was set for this device - * @in_debug: is device under debug. This, together with fpriv_list, enforces - * that only a single user is configuring the debug infrastructure. + * @in_debug: whether the device is in a state where the profiling/tracing infrastructure + * can be used. This indication is needed because in some ASICs we need to do + * specific operations to enable that infrastructure. * @power9_64bit_dma_enable: true to enable 64-bit DMA mask support. Relevant * only to POWER9 machines. * @cdev_sysfs_created: were char devices and sysfs nodes created. @@ -2477,34 +2619,18 @@ struct multi_cs_data { * @sync_stream_queue_idx: helper index for sync stream queues initialization. * @collective_mon_idx: helper index for collective initialization * @supports_coresight: is CoreSight supported. - * @supports_soft_reset: is soft reset supported. - * @allow_inference_soft_reset: true if the ASIC supports soft reset that is - * initiated by user or TDR. This is only true - * in inference ASICs, as there is no real-world - * use-case of doing soft-reset in training (due - * to the fact that training runs on multiple - * devices) * @supports_cb_mapping: is mapping a CB to the device's MMU supported. - * @needs_reset: true if reset_on_lockup is false and device should be reset - * due to lockup. * @process_kill_trial_cnt: number of trials reset thread tried killing * user processes * @device_fini_pending: true if device_fini was called and might be * waiting for the reset thread to finish * @supports_staged_submission: true if staged submissions are supported - * @curr_reset_cause: saves an enumerated reset cause when a hard reset is - * triggered, and cleared after it is shared with preboot. - * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden - * with a new value on next reset - * @reset_trigger_repeated: set if device reset is triggered more than once with - * same cause. - * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to - * complete instead. * @device_cpu_is_halted: Flag to indicate whether the device CPU was already * halted. We can't halt it again because the COMMS * protocol will throw an error. Relevant only for * cases where Linux was not loaded to device CPU * @supports_wait_for_multi_cs: true if wait for multi CS is supported + * @is_compute_ctx_active: Whether there is an active compute context executing. */ struct hl_device { struct pci_dev *pdev; @@ -2515,7 +2641,6 @@ struct hl_device { struct cdev cdev_ctrl; struct device *dev; struct device *dev_ctrl; - struct delayed_work work_freq; struct delayed_work work_heartbeat; struct hl_device_reset_work device_reset_work; char asic_name[HL_STR_MAX]; @@ -2546,7 +2671,6 @@ struct hl_device { void *asic_specific; struct hl_vm vm; struct device *hwmon_dev; - enum hl_pm_mng_profile pm_mng_profile; struct hwmon_chip_info *hl_chip_info; struct hl_dbg_device_entry hl_debugfs; @@ -2560,9 +2684,9 @@ struct hl_device { u64 internal_cb_va_base; struct list_head fpriv_list; + struct list_head fpriv_ctrl_list; struct mutex fpriv_list_lock; - - struct hl_ctx *compute_ctx; + struct mutex fpriv_ctrl_list_lock; struct hl_cs_counters_atomic aggregated_cs_counters; @@ -2577,6 +2701,11 @@ struct hl_device { struct multi_cs_completion multi_cs_completion[ MULTI_CS_MAX_USER_CTX]; + struct hl_clk_throttle clk_throttling; + struct last_error_session_info last_error; + + struct hl_reset_info reset_info; + u32 *stream_master_qid_arr; atomic64_t dram_used_mem; u64 timeout_jiffies; @@ -2587,21 +2716,17 @@ struct hl_device { u64 last_successful_open_jif; u64 last_open_session_duration_jif; u64 open_counter; - atomic_t in_reset; - enum hl_pll_frequency curr_pll_profile; + u64 fw_poll_interval_usec; + ktime_t last_successful_open_ktime; enum cpucp_card_types card_type; u32 major; u32 high_pll; - u32 soft_reset_cnt; - u32 hard_reset_cnt; - u32 clk_throttling_reason; u16 id; u16 id_control; u16 cpu_pci_msb_addr; u8 disabled; u8 late_init_done; u8 hwmon_initialized; - u8 hard_reset_pending; u8 heartbeat; u8 reset_on_lockup; u8 dram_default_page_mapping; @@ -2618,20 +2743,14 @@ struct hl_device { u8 sync_stream_queue_idx; u8 collective_mon_idx; u8 supports_coresight; - u8 supports_soft_reset; - u8 allow_inference_soft_reset; u8 supports_cb_mapping; - u8 needs_reset; u8 process_kill_trial_cnt; u8 device_fini_pending; u8 supports_staged_submission; - u8 curr_reset_cause; - u8 prev_reset_trigger; - u8 reset_trigger_repeated; - u8 skip_reset_on_timeout; u8 device_cpu_is_halted; u8 supports_wait_for_multi_cs; u8 stream_master_qid_arr_size; + u8 is_compute_ctx_active; /* Parameters for bring-up */ u64 nic_ports_mask; @@ -2659,6 +2778,7 @@ struct hl_device { * wait cs are used to wait of the reserved encaps signals. * @hdev: pointer to habanalabs device structure. * @hw_sob: pointer to H/W SOB used in the reservation. + * @ctx: pointer to the user's context data structure * @cs_seq: staged cs sequence which contains encapsulated signals * @id: idr handler id to be used to fetch the handler info * @q_idx: stream queue index @@ -2669,6 +2789,7 @@ struct hl_cs_encaps_sig_handle { struct kref refcount; struct hl_device *hdev; struct hl_hw_sob *hw_sob; + struct hl_ctx *ctx; u64 cs_seq; u32 id; u32 q_idx; @@ -2757,21 +2878,9 @@ static inline bool hl_mem_area_inside_range(u64 address, u64 size, static inline bool hl_mem_area_crosses_range(u64 address, u32 size, u64 range_start_address, u64 range_end_address) { - u64 end_address = address + size; + u64 end_address = address + size - 1; - if ((address >= range_start_address) && - (address < range_end_address)) - return true; - - if ((end_address >= range_start_address) && - (end_address < range_end_address)) - return true; - - if ((address < range_start_address) && - (end_address >= range_end_address)) - return true; - - return false; + return ((address <= range_end_address) && (range_start_address <= end_address)); } int hl_device_open(struct inode *inode, struct file *filp); @@ -2779,10 +2888,7 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp); bool hl_device_operational(struct hl_device *hdev, enum hl_device_status *status); enum hl_device_status hl_device_status(struct hl_device *hdev); -int hl_device_set_debug_mode(struct hl_device *hdev, bool enable); -int create_hdev(struct hl_device **dev, struct pci_dev *pdev, - enum hl_asic_type asic_type, int minor); -void destroy_hdev(struct hl_device *hdev); +int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable); int hl_hw_queues_create(struct hl_device *hdev); void hl_hw_queues_destroy(struct hl_device *hdev); int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id, @@ -2821,6 +2927,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx); void hl_ctx_do_release(struct kref *ref); void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx); int hl_ctx_put(struct hl_ctx *ctx); +struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev); struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq); int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr, struct hl_fence **fence, u32 arr_len); @@ -2834,7 +2941,6 @@ int hl_device_resume(struct hl_device *hdev); int hl_device_reset(struct hl_device *hdev, u32 flags); void hl_hpriv_get(struct hl_fpriv *hpriv); int hl_hpriv_put(struct hl_fpriv *hpriv); -int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq); int hl_device_utilization(struct hl_device *hdev, u32 *utilization); int hl_build_hwmon_channel_info(struct hl_device *hdev, @@ -2915,6 +3021,9 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 size); int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size); +int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags); +int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard, + u32 flags, u32 asid, u64 va, u64 size); void hl_mmu_swap_out(struct hl_ctx *ctx); void hl_mmu_swap_in(struct hl_ctx *ctx); int hl_mmu_if_set_funcs(struct hl_device *hdev); @@ -2969,6 +3078,10 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev, struct fw_load_mgr *fw_loader, enum comms_cmd cmd, unsigned int size, bool wait_ok, u32 timeout); +int hl_fw_dram_replaced_row_get(struct hl_device *hdev, + struct cpucp_hbm_row_info *info); +int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num); +int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid); int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3], bool is_wc[3]); int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data); diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c index 949d1b5c5c41..690b763c7a95 100644 --- a/drivers/misc/habanalabs/common/habanalabs_drv.c +++ b/drivers/misc/habanalabs/common/habanalabs_drv.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright 2016-2019 HabanaLabs, Ltd. + * Copyright 2016-2021 HabanaLabs, Ltd. * All Rights Reserved. * */ @@ -153,15 +153,7 @@ int hl_device_open(struct inode *inode, struct file *filp) goto out_err; } - if (hdev->in_debug) { - dev_err_ratelimited(hdev->dev, - "Can't open %s because it is being debugged by another user\n", - dev_name(hdev->dev)); - rc = -EPERM; - goto out_err; - } - - if (hdev->compute_ctx) { + if (hdev->is_compute_ctx_active) { dev_dbg_ratelimited(hdev->dev, "Can't open %s because another user is working on it\n", dev_name(hdev->dev)); @@ -175,20 +167,17 @@ int hl_device_open(struct inode *inode, struct file *filp) goto out_err; } - /* Device is IDLE at this point so it is legal to change PLLs. - * There is no need to check anything because if the PLL is - * already HIGH, the set function will return without doing - * anything - */ - hl_device_set_frequency(hdev, PLL_HIGH); - list_add(&hpriv->dev_node, &hdev->fpriv_list); mutex_unlock(&hdev->fpriv_list_lock); hl_debugfs_add_file(hpriv); + atomic_set(&hdev->last_error.cs_write_disable, 0); + atomic_set(&hdev->last_error.razwi_write_disable, 0); + hdev->open_counter++; hdev->last_successful_open_jif = jiffies; + hdev->last_successful_open_ktime = ktime_get(); return 0; @@ -231,12 +220,11 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp) hpriv->hdev = hdev; filp->private_data = hpriv; hpriv->filp = filp; - hpriv->is_control = true; nonseekable_open(inode, filp); hpriv->taskpid = find_get_pid(current->pid); - mutex_lock(&hdev->fpriv_list_lock); + mutex_lock(&hdev->fpriv_ctrl_list_lock); if (!hl_device_operational(hdev, NULL)) { dev_err_ratelimited(hdev->dev_ctrl, @@ -246,13 +234,13 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp) goto out_err; } - list_add(&hpriv->dev_node, &hdev->fpriv_list); - mutex_unlock(&hdev->fpriv_list_lock); + list_add(&hpriv->dev_node, &hdev->fpriv_ctrl_list); + mutex_unlock(&hdev->fpriv_ctrl_list_lock); return 0; out_err: - mutex_unlock(&hdev->fpriv_list_lock); + mutex_unlock(&hdev->fpriv_ctrl_list_lock); filp->private_data = NULL; put_pid(hpriv->taskpid); @@ -263,6 +251,7 @@ out_err: static void set_driver_behavior_per_device(struct hl_device *hdev) { + hdev->pldm = 0; hdev->fw_components = FW_TYPE_ALL_TYPES; hdev->cpu_queues_enable = 1; hdev->heartbeat = 1; @@ -279,23 +268,53 @@ static void set_driver_behavior_per_device(struct hl_device *hdev) hdev->axi_drain = 0; } -/* +static void copy_kernel_module_params_to_device(struct hl_device *hdev) +{ + hdev->major = hl_major; + hdev->memory_scrub = memory_scrub; + hdev->reset_on_lockup = reset_on_lockup; + hdev->boot_error_status_mask = boot_error_status_mask; + + if (timeout_locked) + hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000); + else + hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT; + +} + +static int fixup_device_params(struct hl_device *hdev) +{ + hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type); + + hdev->fw_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC; + + hdev->stop_on_err = true; + hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN; + hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT; + + /* Enable only after the initialization of the device */ + hdev->disabled = true; + + /* Set default DMA mask to 32 bits */ + hdev->dma_mask = 32; + + return 0; +} + +/** * create_hdev - create habanalabs device instance * * @dev: will hold the pointer to the new habanalabs device structure * @pdev: pointer to the pci device - * @asic_type: in case of simulator device, which device is it - * @minor: in case of simulator device, the minor of the device * * Allocate memory for habanalabs device and initialize basic fields * Identify the ASIC type * Allocate ID (minor) for the device (only for real devices) */ -int create_hdev(struct hl_device **dev, struct pci_dev *pdev, - enum hl_asic_type asic_type, int minor) +static int create_hdev(struct hl_device **dev, struct pci_dev *pdev) { + int main_id, ctrl_id = 0, rc = 0; struct hl_device *hdev; - int rc, main_id, ctrl_id = 0; *dev = NULL; @@ -303,69 +322,39 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev, if (!hdev) return -ENOMEM; - /* First, we must find out which ASIC are we handling. This is needed - * to configure the behavior of the driver (kernel parameters) - */ - if (pdev) { - hdev->asic_type = get_asic_type(pdev->device); - if (hdev->asic_type == ASIC_INVALID) { - dev_err(&pdev->dev, "Unsupported ASIC\n"); - rc = -ENODEV; - goto free_hdev; - } - } else { - hdev->asic_type = asic_type; - } - - if (pdev) - hdev->asic_prop.fw_security_enabled = - is_asic_secured(hdev->asic_type); - else - hdev->asic_prop.fw_security_enabled = false; + /* can be NULL in case of simulator device */ + hdev->pdev = pdev; /* Assign status description string */ - strncpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL], - "operational", HL_STR_MAX); - strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET], - "in reset", HL_STR_MAX); - strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION], - "disabled", HL_STR_MAX); - strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET], - "needs reset", HL_STR_MAX); + strncpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL], "operational", HL_STR_MAX); + strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET], "in reset", HL_STR_MAX); + strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION], "disabled", HL_STR_MAX); + strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET], "needs reset", HL_STR_MAX); strncpy(hdev->status[HL_DEVICE_STATUS_IN_DEVICE_CREATION], "in device creation", HL_STR_MAX); - hdev->major = hl_major; - hdev->reset_on_lockup = reset_on_lockup; - hdev->memory_scrub = memory_scrub; - hdev->boot_error_status_mask = boot_error_status_mask; - hdev->stop_on_err = true; + /* First, we must find out which ASIC are we handling. This is needed + * to configure the behavior of the driver (kernel parameters) + */ + hdev->asic_type = get_asic_type(pdev->device); + if (hdev->asic_type == ASIC_INVALID) { + dev_err(&pdev->dev, "Unsupported ASIC\n"); + rc = -ENODEV; + goto free_hdev; + } - hdev->pldm = 0; + copy_kernel_module_params_to_device(hdev); set_driver_behavior_per_device(hdev); - hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN; - hdev->prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT; - - if (timeout_locked) - hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000); - else - hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT; - - hdev->disabled = true; - hdev->pdev = pdev; /* can be NULL in case of simulator device */ - - /* Set default DMA mask to 32 bits */ - hdev->dma_mask = 32; + fixup_device_params(hdev); mutex_lock(&hl_devs_idr_lock); /* Always save 2 numbers, 1 for main device and 1 for control. * They must be consecutive */ - main_id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS, - GFP_KERNEL); + main_id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS, GFP_KERNEL); if (main_id >= 0) ctrl_id = idr_alloc(&hl_devs_idr, hdev, main_id + 1, @@ -405,7 +394,7 @@ free_hdev: * @dev: pointer to the habanalabs device structure * */ -void destroy_hdev(struct hl_device *hdev) +static void destroy_hdev(struct hl_device *hdev) { /* Remove device from the device list */ mutex_lock(&hl_devs_idr_lock); @@ -444,7 +433,7 @@ static int hl_pmops_resume(struct device *dev) return hl_device_resume(hdev); } -/* +/** * hl_pci_probe - probe PCI habanalabs devices * * @pdev: pointer to pci device @@ -454,8 +443,7 @@ static int hl_pmops_resume(struct device *dev) * Create a new habanalabs device and initialize it according to the * device's type */ -static int hl_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +static int hl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hl_device *hdev; int rc; @@ -464,7 +452,7 @@ static int hl_pci_probe(struct pci_dev *pdev, " device found [%04x:%04x] (rev %x)\n", (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); - rc = create_hdev(&hdev, pdev, ASIC_INVALID, -1); + rc = create_hdev(&hdev, pdev); if (rc) return rc; diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 86c3257d9ae1..3ba3a8ffda3e 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -158,7 +158,7 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args) min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0; } -static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args) +static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args) { struct hl_debug_params *params; void *input = NULL, *output = NULL; @@ -200,7 +200,7 @@ static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args) params->output_size = args->output_size; } - rc = hdev->asic_funcs->debug_coresight(hdev, params); + rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params); if (rc) { dev_err(hdev->dev, "debug coresight operation failed %d\n", rc); @@ -269,8 +269,8 @@ static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args) if ((!max_size) || (!out)) return -EINVAL; - reset_count.hard_reset_cnt = hdev->hard_reset_cnt; - reset_count.soft_reset_cnt = hdev->soft_reset_cnt; + reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt; + reset_count.soft_reset_cnt = hdev->reset_info.soft_reset_cnt; return copy_to_user(out, &reset_count, min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0; @@ -313,15 +313,38 @@ static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args) { + void __user *out = (void __user *) (uintptr_t) args->return_pointer; struct hl_device *hdev = hpriv->hdev; struct hl_info_clk_throttle clk_throttle = {0}; + ktime_t end_time, zero_time = ktime_set(0, 0); u32 max_size = args->return_size; - void __user *out = (void __user *) (uintptr_t) args->return_pointer; + int i; if ((!max_size) || (!out)) return -EINVAL; - clk_throttle.clk_throttling_reason = hdev->clk_throttling_reason; + mutex_lock(&hdev->clk_throttling.lock); + + clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason; + + for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) { + if (!(hdev->clk_throttling.aggregated_reason & BIT(i))) + continue; + + clk_throttle.clk_throttling_timestamp_us[i] = + ktime_to_us(hdev->clk_throttling.timestamp[i].start); + + if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time)) + end_time = hdev->clk_throttling.timestamp[i].end; + else + end_time = ktime_get(); + + clk_throttle.clk_throttling_duration_ns[i] = + ktime_to_ns(ktime_sub(end_time, + hdev->clk_throttling.timestamp[i].start)); + + } + mutex_unlock(&hdev->clk_throttling.lock); return copy_to_user(out, &clk_throttle, min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0; @@ -480,6 +503,94 @@ static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args) min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0; } +static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + u32 max_size = args->return_size; + u32 pend_rows_num = 0; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + int rc; + + if ((!max_size) || (!out)) + return -EINVAL; + + rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num); + if (rc) + return rc; + + return copy_to_user(out, &pend_rows_num, + min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0; +} + +static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + u32 max_size = args->return_size; + struct cpucp_hbm_row_info info = {0}; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + int rc; + + if ((!max_size) || (!out)) + return -EINVAL; + + rc = hl_fw_dram_replaced_row_get(hdev, &info); + if (rc) + return rc; + + return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; +} + +static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_info_last_err_open_dev_time info = {0}; + struct hl_device *hdev = hpriv->hdev; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + info.timestamp = ktime_to_ns(hdev->last_error.open_dev_timestamp); + + return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; +} + +static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_info_cs_timeout_event info = {0}; + struct hl_device *hdev = hpriv->hdev; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + info.seq = hdev->last_error.cs_timeout_seq; + info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout_timestamp); + + return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; +} + +static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + u32 max_size = args->return_size; + struct hl_info_razwi_event info = {0}; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + info.timestamp = ktime_to_ns(hdev->last_error.razwi_timestamp); + info.addr = hdev->last_error.razwi_addr; + info.engine_id_1 = hdev->last_error.razwi_engine_id_1; + info.engine_id_2 = hdev->last_error.razwi_engine_id_2; + info.no_engine_id = hdev->last_error.razwi_non_engine_initiator; + info.error_type = hdev->last_error.razwi_type; + + return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; +} + static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, struct device *dev) { @@ -503,6 +614,33 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_RESET_COUNT: return get_reset_count(hdev, args); + case HL_INFO_HW_EVENTS: + return hw_events_info(hdev, false, args); + + case HL_INFO_HW_EVENTS_AGGREGATE: + return hw_events_info(hdev, true, args); + + case HL_INFO_CS_COUNTERS: + return cs_counters_info(hpriv, args); + + case HL_INFO_CLK_THROTTLE_REASON: + return clk_throttle_info(hpriv, args); + + case HL_INFO_SYNC_MANAGER: + return sync_manager_info(hpriv, args); + + case HL_INFO_OPEN_STATS: + return open_stats_info(hpriv, args); + + case HL_INFO_LAST_ERR_OPEN_DEV_TIME: + return last_err_open_dev_info(hpriv, args); + + case HL_INFO_CS_TIMEOUT_EVENT: + return cs_timeout_info(hpriv, args); + + case HL_INFO_RAZWI_EVENT: + return razwi_info(hpriv, args); + default: break; } @@ -515,10 +653,6 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, } switch (args->op) { - case HL_INFO_HW_EVENTS: - rc = hw_events_info(hdev, false, args); - break; - case HL_INFO_DRAM_USAGE: rc = dram_usage_info(hpriv, args); break; @@ -531,10 +665,6 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, rc = device_utilization(hdev, args); break; - case HL_INFO_HW_EVENTS_AGGREGATE: - rc = hw_events_info(hdev, true, args); - break; - case HL_INFO_CLK_RATE: rc = get_clk_rate(hdev, args); break; @@ -542,18 +672,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_TIME_SYNC: return time_sync_info(hdev, args); - case HL_INFO_CS_COUNTERS: - return cs_counters_info(hpriv, args); - case HL_INFO_PCI_COUNTERS: return pci_counters_info(hpriv, args); - case HL_INFO_CLK_THROTTLE_REASON: - return clk_throttle_info(hpriv, args); - - case HL_INFO_SYNC_MANAGER: - return sync_manager_info(hpriv, args); - case HL_INFO_TOTAL_ENERGY: return total_energy_consumption_info(hpriv, args); @@ -563,12 +684,16 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_POWER: return power_info(hpriv, args); - case HL_INFO_OPEN_STATS: - return open_stats_info(hpriv, args); + + case HL_INFO_DRAM_REPLACED_ROWS: + return dram_replaced_rows_info(hpriv, args); + + case HL_INFO_DRAM_PENDING_ROWS: + return dram_pending_rows_info(hpriv, args); default: dev_err(dev, "Invalid request %d\n", args->op); - rc = -ENOTTY; + rc = -EINVAL; break; } @@ -613,16 +738,17 @@ static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data) "Rejecting debug configuration request because device not in debug mode\n"); return -EFAULT; } - args->input_size = - min(args->input_size, hl_debug_struct_size[args->op]); - rc = debug_coresight(hdev, args); + args->input_size = min(args->input_size, hl_debug_struct_size[args->op]); + rc = debug_coresight(hdev, hpriv->ctx, args); break; + case HL_DEBUG_OP_SET_MODE: - rc = hl_device_set_debug_mode(hdev, (bool) args->enable); + rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable); break; + default: dev_err(hdev->dev, "Invalid request %d\n", args->op); - rc = -ENOTTY; + rc = -EINVAL; break; } @@ -649,7 +775,6 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg, const struct hl_ioctl_desc *ioctl, struct device *dev) { struct hl_fpriv *hpriv = filep->private_data; - struct hl_device *hdev = hpriv->hdev; unsigned int nr = _IOC_NR(cmd); char stack_kdata[128] = {0}; char *kdata = NULL; @@ -658,12 +783,6 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg, u32 hl_size; int retcode; - if (hdev->hard_reset_pending) { - dev_crit_ratelimited(dev, - "Device HARD reset pending! Please close FD\n"); - return -ENODEV; - } - /* Do not trust userspace, use our own definition */ func = ioctl->func; diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index 0743319b10c7..6103e479e855 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -429,6 +429,9 @@ static int init_signal_cs(struct hl_device *hdev, rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1, false); + job->cs->sob_addr_offset = hw_sob->sob_addr; + job->cs->initial_sob_count = prop->next_sob_val - 1; + return rc; } @@ -571,7 +574,7 @@ static int encaps_sig_first_staged_cs_handler struct hl_encaps_signals_mgr *mgr; int rc = 0; - mgr = &hdev->compute_ctx->sig_mgr; + mgr = &cs->ctx->sig_mgr; spin_lock(&mgr->lock); encaps_sig_hdl = idr_find(&mgr->handles, cs->encaps_sig_hdl_id); diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c index e33f65be8a00..57f5d2c48330 100644 --- a/drivers/misc/habanalabs/common/hwmon.c +++ b/drivers/misc/habanalabs/common/hwmon.c @@ -10,17 +10,148 @@ #include <linux/pci.h> #include <linux/hwmon.h> -#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1) +#define HWMON_NR_SENSOR_TYPES (hwmon_max) -int hl_build_hwmon_channel_info(struct hl_device *hdev, - struct cpucp_sensor *sensors_arr) +#ifdef _HAS_HWMON_HWMON_T_ENABLE + +static u32 fixup_flags_legacy_fw(struct hl_device *hdev, enum hwmon_sensor_types type, + u32 cpucp_flags) { - u32 counts[HWMON_NR_SENSOR_TYPES] = {0}; - u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL}; + u32 flags; + + switch (type) { + case hwmon_temp: + flags = (cpucp_flags << 1) | HWMON_T_ENABLE; + break; + + case hwmon_in: + flags = (cpucp_flags << 1) | HWMON_I_ENABLE; + break; + + case hwmon_curr: + flags = (cpucp_flags << 1) | HWMON_C_ENABLE; + break; + + case hwmon_fan: + flags = (cpucp_flags << 1) | HWMON_F_ENABLE; + break; + + case hwmon_power: + flags = (cpucp_flags << 1) | HWMON_P_ENABLE; + break; + + case hwmon_pwm: + /* enable bit was here from day 1, so no need to adjust */ + flags = cpucp_flags; + break; + + default: + dev_err(hdev->dev, "unsupported h/w sensor type %d\n", type); + flags = cpucp_flags; + break; + } + + return flags; +} + +static u32 fixup_attr_legacy_fw(u32 attr) +{ + return (attr - 1); +} + +#else + +static u32 fixup_flags_legacy_fw(struct hl_device *hdev, enum hwmon_sensor_types type, + u32 cpucp_flags) +{ + return cpucp_flags; +} + +static u32 fixup_attr_legacy_fw(u32 attr) +{ + return attr; +} + +#endif /* !_HAS_HWMON_HWMON_T_ENABLE */ + +static u32 adjust_hwmon_flags(struct hl_device *hdev, enum hwmon_sensor_types type, u32 cpucp_flags) +{ + u32 flags, cpucp_input_val; + bool use_cpucp_enum; + + use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 & + CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false; + + /* If f/w is using it's own enum, we need to check if the properties values are aligned. + * If not, it means we need to adjust the values to the new format that is used in the + * kernel since 5.6 (enum values were incremented by 1 by adding a new enable value). + */ + if (use_cpucp_enum) { + switch (type) { + case hwmon_temp: + cpucp_input_val = cpucp_temp_input; + if (cpucp_input_val == hwmon_temp_input) + flags = cpucp_flags; + else + flags = (cpucp_flags << 1) | HWMON_T_ENABLE; + break; + + case hwmon_in: + cpucp_input_val = cpucp_in_input; + if (cpucp_input_val == hwmon_in_input) + flags = cpucp_flags; + else + flags = (cpucp_flags << 1) | HWMON_I_ENABLE; + break; + + case hwmon_curr: + cpucp_input_val = cpucp_curr_input; + if (cpucp_input_val == hwmon_curr_input) + flags = cpucp_flags; + else + flags = (cpucp_flags << 1) | HWMON_C_ENABLE; + break; + + case hwmon_fan: + cpucp_input_val = cpucp_fan_input; + if (cpucp_input_val == hwmon_fan_input) + flags = cpucp_flags; + else + flags = (cpucp_flags << 1) | HWMON_F_ENABLE; + break; + + case hwmon_pwm: + /* enable bit was here from day 1, so no need to adjust */ + flags = cpucp_flags; + break; + + case hwmon_power: + cpucp_input_val = CPUCP_POWER_INPUT; + if (cpucp_input_val == hwmon_power_input) + flags = cpucp_flags; + else + flags = (cpucp_flags << 1) | HWMON_P_ENABLE; + break; + + default: + dev_err(hdev->dev, "unsupported h/w sensor type %d\n", type); + flags = cpucp_flags; + break; + } + } else { + flags = fixup_flags_legacy_fw(hdev, type, cpucp_flags); + } + + return flags; +} + +int hl_build_hwmon_channel_info(struct hl_device *hdev, struct cpucp_sensor *sensors_arr) +{ + u32 num_sensors_for_type, flags, num_active_sensor_types = 0, arr_size = 0, *curr_arr; u32 sensors_by_type_next_index[HWMON_NR_SENSOR_TYPES] = {0}; + u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL}; struct hwmon_channel_info **channels_info; - u32 num_sensors_for_type, num_active_sensor_types = 0, - arr_size = 0, *curr_arr; + u32 counts[HWMON_NR_SENSOR_TYPES] = {0}; enum hwmon_sensor_types type; int rc, i, j; @@ -31,8 +162,7 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev, break; if (type >= HWMON_NR_SENSOR_TYPES) { - dev_err(hdev->dev, - "Got wrong sensor type %d from device\n", type); + dev_err(hdev->dev, "Got wrong sensor type %d from device\n", type); return -EINVAL; } @@ -45,8 +175,9 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev, continue; num_sensors_for_type = counts[i] + 1; - curr_arr = kcalloc(num_sensors_for_type, sizeof(*curr_arr), - GFP_KERNEL); + dev_dbg(hdev->dev, "num_sensors_for_type %d = %d\n", i, num_sensors_for_type); + + curr_arr = kcalloc(num_sensors_for_type, sizeof(*curr_arr), GFP_KERNEL); if (!curr_arr) { rc = -ENOMEM; goto sensors_type_err; @@ -59,20 +190,18 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev, for (i = 0 ; i < arr_size ; i++) { type = le32_to_cpu(sensors_arr[i].type); curr_arr = sensors_by_type[type]; - curr_arr[sensors_by_type_next_index[type]++] = - le32_to_cpu(sensors_arr[i].flags); + flags = adjust_hwmon_flags(hdev, type, le32_to_cpu(sensors_arr[i].flags)); + curr_arr[sensors_by_type_next_index[type]++] = flags; } - channels_info = kcalloc(num_active_sensor_types + 1, - sizeof(*channels_info), GFP_KERNEL); + channels_info = kcalloc(num_active_sensor_types + 1, sizeof(*channels_info), GFP_KERNEL); if (!channels_info) { rc = -ENOMEM; goto channels_info_array_err; } for (i = 0 ; i < num_active_sensor_types ; i++) { - channels_info[i] = kzalloc(sizeof(*channels_info[i]), - GFP_KERNEL); + channels_info[i] = kzalloc(sizeof(*channels_info[i]), GFP_KERNEL); if (!channels_info[i]) { rc = -ENOMEM; goto channel_info_err; @@ -88,18 +217,19 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev, j++; } - hdev->hl_chip_info->info = - (const struct hwmon_channel_info **)channels_info; + hdev->hl_chip_info->info = (const struct hwmon_channel_info **)channels_info; return 0; channel_info_err: - for (i = 0 ; i < num_active_sensor_types ; i++) + for (i = 0 ; i < num_active_sensor_types ; i++) { if (channels_info[i]) { kfree(channels_info[i]->config); kfree(channels_info[i]); } + } kfree(channels_info); + channels_info_array_err: sensors_type_err: for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) @@ -112,14 +242,16 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct hl_device *hdev = dev_get_drvdata(dev); - int rc; + bool use_cpucp_enum; u32 cpucp_attr; - bool use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 & - CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false; + int rc; if (!hl_device_operational(hdev, NULL)) return -ENODEV; + use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 & + CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false; + switch (type) { case hwmon_temp: switch (attr) { @@ -151,7 +283,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type, if (use_cpucp_enum) rc = hl_get_temperature(hdev, channel, cpucp_attr, val); else - rc = hl_get_temperature(hdev, channel, attr, val); + rc = hl_get_temperature(hdev, channel, fixup_attr_legacy_fw(attr), val); break; case hwmon_in: switch (attr) { @@ -174,7 +306,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type, if (use_cpucp_enum) rc = hl_get_voltage(hdev, channel, cpucp_attr, val); else - rc = hl_get_voltage(hdev, channel, attr, val); + rc = hl_get_voltage(hdev, channel, fixup_attr_legacy_fw(attr), val); break; case hwmon_curr: switch (attr) { @@ -197,7 +329,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type, if (use_cpucp_enum) rc = hl_get_current(hdev, channel, cpucp_attr, val); else - rc = hl_get_current(hdev, channel, attr, val); + rc = hl_get_current(hdev, channel, fixup_attr_legacy_fw(attr), val); break; case hwmon_fan: switch (attr) { @@ -217,7 +349,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type, if (use_cpucp_enum) rc = hl_get_fan_speed(hdev, channel, cpucp_attr, val); else - rc = hl_get_fan_speed(hdev, channel, attr, val); + rc = hl_get_fan_speed(hdev, channel, fixup_attr_legacy_fw(attr), val); break; case hwmon_pwm: switch (attr) { @@ -234,6 +366,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type, if (use_cpucp_enum) rc = hl_get_pwm_info(hdev, channel, cpucp_attr, val); else + /* no need for fixup as pwm was aligned from day 1 */ rc = hl_get_pwm_info(hdev, channel, attr, val); break; case hwmon_power: @@ -251,7 +384,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type, if (use_cpucp_enum) rc = hl_get_power(hdev, channel, cpucp_attr, val); else - rc = hl_get_power(hdev, channel, attr, val); + rc = hl_get_power(hdev, channel, fixup_attr_legacy_fw(attr), val); break; default: return -EINVAL; @@ -286,7 +419,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type, if (use_cpucp_enum) hl_set_temperature(hdev, channel, cpucp_attr, val); else - hl_set_temperature(hdev, channel, attr, val); + hl_set_temperature(hdev, channel, fixup_attr_legacy_fw(attr), val); break; case hwmon_pwm: switch (attr) { @@ -303,6 +436,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type, if (use_cpucp_enum) hl_set_pwm_info(hdev, channel, cpucp_attr, val); else + /* no need for fixup as pwm was aligned from day 1 */ hl_set_pwm_info(hdev, channel, attr, val); break; case hwmon_in: @@ -317,7 +451,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type, if (use_cpucp_enum) hl_set_voltage(hdev, channel, cpucp_attr, val); else - hl_set_voltage(hdev, channel, attr, val); + hl_set_voltage(hdev, channel, fixup_attr_legacy_fw(attr), val); break; case hwmon_curr: switch (attr) { @@ -331,7 +465,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type, if (use_cpucp_enum) hl_set_current(hdev, channel, cpucp_attr, val); else - hl_set_current(hdev, channel, attr, val); + hl_set_current(hdev, channel, fixup_attr_legacy_fw(attr), val); break; case hwmon_power: switch (attr) { @@ -345,7 +479,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type, if (use_cpucp_enum) hl_set_power(hdev, channel, cpucp_attr, val); else - hl_set_power(hdev, channel, attr, val); + hl_set_power(hdev, channel, fixup_attr_legacy_fw(attr), val); break; default: return -EINVAL; @@ -444,6 +578,9 @@ int hl_get_temperature(struct hl_device *hdev, pkt.sensor_index = __cpu_to_le16(sensor_index); pkt.type = __cpu_to_le16(attr); + dev_dbg(hdev->dev, "get temp, ctl 0x%x, sensor %d, type %d\n", + pkt.ctl, pkt.sensor_index, pkt.type); + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result); @@ -677,12 +814,18 @@ int hl_set_power(struct hl_device *hdev, int sensor_index, u32 attr, long value) { struct cpucp_packet pkt; + struct asic_fixed_properties *prop = &hdev->asic_prop; int rc; memset(&pkt, 0, sizeof(pkt)); - pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET << + if (prop->use_get_power_for_reset_history) + pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT); + else + pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_SET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); pkt.type = __cpu_to_le16(attr); pkt.value = __cpu_to_le64(value); diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c index 96d82b682674..1b6bdc900c26 100644 --- a/drivers/misc/habanalabs/common/irq.c +++ b/drivers/misc/habanalabs/common/irq.c @@ -145,8 +145,12 @@ static void handle_user_cq(struct hl_device *hdev, spin_lock(&user_cq->wait_list_lock); list_for_each_entry(pend, &user_cq->wait_list_head, wait_list_node) { - pend->fence.timestamp = now; - complete_all(&pend->fence.completion); + if ((pend->cq_kernel_addr && + *(pend->cq_kernel_addr) >= pend->cq_target_value) || + !pend->cq_kernel_addr) { + pend->fence.timestamp = now; + complete_all(&pend->fence.completion); + } } spin_unlock(&user_cq->wait_list_lock); } @@ -245,10 +249,8 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg) */ dma_rmb(); - if (hdev->disabled) { - dev_warn(hdev->dev, - "Device disabled but received IRQ %d for EQ\n", - irq); + if (hdev->disabled && !hdev->reset_info.is_in_soft_reset) { + dev_warn(hdev->dev, "Device disabled but received an EQ event\n"); goto skip_irq; } diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 9bd626a00de3..c1eefaebacb6 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -316,7 +316,7 @@ static int free_phys_pg_pack(struct hl_device *hdev, } if (rc && !hdev->disabled) - hl_device_reset(hdev, HL_RESET_HARD); + hl_device_reset(hdev, HL_DRV_RESET_HARD); end: kvfree(phys_pg_pack->pages); @@ -477,7 +477,7 @@ static int add_va_block_locked(struct hl_device *hdev, struct list_head *va_list, u64 start, u64 end) { struct hl_vm_va_block *va_block, *res = NULL; - u64 size = end - start; + u64 size = end - start + 1; print_va_list_locked(hdev, va_list); @@ -518,7 +518,7 @@ static int add_va_block_locked(struct hl_device *hdev, /** * add_va_block() - wrapper for add_va_block_locked. * @hdev: pointer to the habanalabs device structure. - * @va_list: pointer to the virtual addresses block list. + * @va_range: pointer to the virtual addresses range object. * @start: start virtual address. * @end: end virtual address. * @@ -538,8 +538,11 @@ static inline int add_va_block(struct hl_device *hdev, } /** - * is_hint_crossing_range() - check if hint address crossing specified reserved - * range. + * is_hint_crossing_range() - check if hint address crossing specified reserved. + * @range_type: virtual space range type. + * @start_addr: start virtual address. + * @size: block size. + * @prop: asic properties structure to retrieve reserved ranges from. */ static inline bool is_hint_crossing_range(enum hl_va_range_type range_type, u64 start_addr, u32 size, struct asic_fixed_properties *prop) { @@ -644,7 +647,7 @@ static u64 get_va_block(struct hl_device *hdev, continue; } - valid_size = va_block->end - valid_start; + valid_size = va_block->end - valid_start + 1; if (valid_size < size) continue; @@ -707,7 +710,7 @@ static u64 get_va_block(struct hl_device *hdev, if (new_va_block->size > size) { new_va_block->start += size; - new_va_block->size = new_va_block->end - new_va_block->start; + new_va_block->size = new_va_block->end - new_va_block->start + 1; } else { list_del(&new_va_block->node); kfree(new_va_block); @@ -749,6 +752,7 @@ u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, /** * hl_get_va_range_type() - get va_range type for the given address and size. + * @ctx: context to fetch va_range from. * @address: the start address of the area we want to validate. * @size: the size in bytes of the area we want to validate. * @type: returned va_range type. @@ -776,8 +780,8 @@ static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size, * hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block. * @hdev: pointer to the habanalabs device structure * @ctx: pointer to the context structure. - * @start: start virtual address. - * @end: end virtual address. + * @start_addr: start virtual address. + * @size: number of bytes to unreserve. * * This function does the following: * - Takes the list lock and calls add_va_block_locked. @@ -1201,17 +1205,13 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, goto map_err; } - rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, false, - *vm_type, ctx->asid, ret_vaddr, phys_pg_pack->total_size); + rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV, + ctx->asid, ret_vaddr, phys_pg_pack->total_size); mutex_unlock(&ctx->mmu_lock); - if (rc) { - dev_err(hdev->dev, - "mapping handle %u failed due to MMU cache invalidation\n", - handle); + if (rc) goto map_err; - } ret_vaddr += phys_pg_pack->offset; @@ -1349,9 +1349,8 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, * at the loop end rather than for each iteration */ if (!ctx_free) - rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, true, - *vm_type, ctx->asid, vaddr, - phys_pg_pack->total_size); + rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr, + phys_pg_pack->total_size); mutex_unlock(&ctx->mmu_lock); @@ -1364,11 +1363,6 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, if (!ctx_free) { int tmp_rc; - if (rc) - dev_err(hdev->dev, - "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n", - vaddr); - tmp_rc = add_va_block(hdev, va_range, vaddr, vaddr + phys_pg_pack->total_size - 1); if (tmp_rc) { @@ -2037,7 +2031,7 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args) default: dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); - rc = -ENOTTY; + rc = -EINVAL; break; } @@ -2162,7 +2156,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) default: dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); - rc = -ENOTTY; + rc = -EINVAL; break; } @@ -2339,6 +2333,8 @@ void hl_userptr_delete_list(struct hl_device *hdev, /** * hl_userptr_is_pinned() - returns whether the given userptr is pinned. * @hdev: pointer to the habanalabs device structure. + * @addr: user address to check. + * @size: user block size to check. * @userptr_list: pointer to the list to clear. * @userptr: pointer to userptr to check. * @@ -2361,9 +2357,10 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, /** * va_range_init() - initialize virtual addresses range. * @hdev: pointer to the habanalabs device structure. - * @va_range: pointer to the range to initialize. + * @va_ranges: pointer to va_ranges array. * @start: range start address. * @end: range end address. + * @page_size: page size for this va_range. * * This function does the following: * - Initializes the virtual addresses list of the given range with the given @@ -2388,8 +2385,14 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range, start += PAGE_SIZE; } - if (end & (PAGE_SIZE - 1)) - end &= PAGE_MASK; + /* + * The end of the range is inclusive, hence we need to align it + * to the end of the last full page in the range. For example if + * end = 0x3ff5 with page size 0x1000, we need to align it to + * 0x2fff. The remainig 0xff5 bytes do not form a full page. + */ + if ((end + 1) & (PAGE_SIZE - 1)) + end = ((end + 1) & PAGE_MASK) - 1; } if (start >= end) { @@ -2414,7 +2417,7 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range, /** * va_range_fini() - clear a virtual addresses range. * @hdev: pointer to the habanalabs structure. - * va_range: pointer to virtual addresses rang.e + * @va_range: pointer to virtual addresses range. * * This function does the following: * - Frees the virtual addresses block list and its lock. @@ -2434,12 +2437,15 @@ static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range) * @ctx: pointer to the habanalabs context structure. * @host_range_start: host virtual addresses range start. * @host_range_end: host virtual addresses range end. + * @host_page_size: host page size. * @host_huge_range_start: host virtual addresses range start for memory * allocated with huge pages. * @host_huge_range_end: host virtual addresses range end for memory allocated * with huge pages. + * @host_huge_page_size: host huge page size. * @dram_range_start: dram virtual addresses range start. * @dram_range_end: dram virtual addresses range end. + * @dram_page_size: dram page size. * * This function initializes the following: * - MMU for context. @@ -2564,14 +2570,14 @@ int hl_vm_ctx_init(struct hl_ctx *ctx) return 0; dram_range_start = prop->dmmu.start_addr; - dram_range_end = prop->dmmu.end_addr; + dram_range_end = prop->dmmu.end_addr - 1; dram_page_size = prop->dram_page_size ? prop->dram_page_size : prop->dmmu.page_size; host_range_start = prop->pmmu.start_addr; - host_range_end = prop->pmmu.end_addr; + host_range_end = prop->pmmu.end_addr - 1; host_page_size = prop->pmmu.page_size; host_huge_range_start = prop->pmmu_huge.start_addr; - host_huge_range_end = prop->pmmu_huge.end_addr; + host_huge_range_end = prop->pmmu_huge.end_addr - 1; host_huge_page_size = prop->pmmu_huge.page_size; return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end, @@ -2618,7 +2624,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) * Clearly something went wrong on hard reset so no point in printing * another side effect error */ - if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash)) + if (!hdev->reset_info.hard_reset_pending && !hash_empty(ctx->mem_hash)) dev_dbg(hdev->dev, "user released device without removing its memory mappings\n"); @@ -2633,8 +2639,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) mutex_lock(&ctx->mmu_lock); /* invalidate the cache once after the unmapping loop */ - hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); - hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK); + hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR); + hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK); mutex_unlock(&ctx->mmu_lock); diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c index aa96917f62e5..9153a1f55175 100644 --- a/drivers/misc/habanalabs/common/mmu/mmu.c +++ b/drivers/misc/habanalabs/common/mmu/mmu.c @@ -637,3 +637,28 @@ u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr) { return addr; } + +int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags) +{ + int rc; + + rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags); + if (rc) + dev_err_ratelimited(hdev->dev, "MMU cache invalidation failed\n"); + + return rc; +} + +int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard, + u32 flags, u32 asid, u64 va, u64 size) +{ + int rc; + + rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, is_hard, flags, + asid, va, size); + if (rc) + dev_err_ratelimited(hdev->dev, "MMU cache range invalidation failed\n"); + + return rc; +} + diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c index 0f536f79dd9c..6134b6ae7615 100644 --- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c +++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c @@ -269,7 +269,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx) num_of_hop3 = prop->dram_size_for_default_page_mapping; do_div(num_of_hop3, prop->dram_page_size); - do_div(num_of_hop3, PTE_ENTRIES_IN_HOP); + do_div(num_of_hop3, HOP_PTE_ENTRIES_512); /* add hop1 and hop2 */ total_hops = num_of_hop3 + 2; @@ -330,7 +330,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx) for (i = 0 ; i < num_of_hop3 ; i++) { hop3_pte_addr = ctx->dram_default_hops[i]; - for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) { + for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) { write_final_pte(ctx, hop3_pte_addr, pte_val); get_pte(ctx, ctx->dram_default_hops[i]); hop3_pte_addr += HL_PTE_SIZE; @@ -369,7 +369,7 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx) num_of_hop3 = prop->dram_size_for_default_page_mapping; do_div(num_of_hop3, prop->dram_page_size); - do_div(num_of_hop3, PTE_ENTRIES_IN_HOP); + do_div(num_of_hop3, HOP_PTE_ENTRIES_512); hop0_addr = get_hop0_addr(ctx); /* add hop1 and hop2 */ @@ -379,7 +379,7 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx) for (i = 0 ; i < num_of_hop3 ; i++) { hop3_pte_addr = ctx->dram_default_hops[i]; - for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) { + for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) { clear_pte(ctx, hop3_pte_addr); put_pte(ctx, ctx->dram_default_hops[i]); hop3_pte_addr += HL_PTE_SIZE; @@ -573,7 +573,7 @@ static int _hl_mmu_v1_unmap(struct hl_ctx *ctx, curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr; - is_huge = curr_pte & LAST_MASK; + is_huge = curr_pte & mmu_prop->last_mask; if (is_dram_addr && !is_huge) { dev_err(hdev->dev, @@ -597,7 +597,7 @@ static int _hl_mmu_v1_unmap(struct hl_ctx *ctx, if (hdev->dram_default_page_mapping && is_dram_addr) { u64 default_pte = (prop->mmu_dram_default_page_addr & - HOP_PHYS_ADDR_MASK) | LAST_MASK | + HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask | PAGE_PRESENT_MASK; if (curr_pte == default_pte) { dev_err(hdev->dev, @@ -729,7 +729,7 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, if (hdev->dram_default_page_mapping && is_dram_addr) { u64 default_pte = (prop->mmu_dram_default_page_addr & - HOP_PHYS_ADDR_MASK) | LAST_MASK | + HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask | PAGE_PRESENT_MASK; if (curr_pte != default_pte) { @@ -769,7 +769,7 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, goto err; } - curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK + curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask | PAGE_PRESENT_MASK; if (is_huge) @@ -930,7 +930,7 @@ static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) return -EFAULT; - if (hops->hop_info[i].hop_pte_val & LAST_MASK) + if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask) break; } diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c index 42c1769ad25d..45c715325e2a 100644 --- a/drivers/misc/habanalabs/common/sysfs.c +++ b/drivers/misc/habanalabs/common/sysfs.c @@ -139,7 +139,7 @@ static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr, struct hl_device *hdev = dev_get_drvdata(dev); return sprintf(buf, "0x%08x\n", - hdev->asic_prop.cpucp_info.cpld_version); + le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_version)); } static ssize_t cpucp_kernel_ver_show(struct device *dev, @@ -163,8 +163,13 @@ static ssize_t infineon_ver_show(struct device *dev, { struct hl_device *hdev = dev_get_drvdata(dev); - return sprintf(buf, "0x%04x\n", - hdev->asic_prop.cpucp_info.infineon_version); + if (hdev->asic_prop.cpucp_info.infineon_second_stage_version) + return sprintf(buf, "%#04x %#04x\n", + le32_to_cpu(hdev->asic_prop.cpucp_info.infineon_version), + le32_to_cpu(hdev->asic_prop.cpucp_info.infineon_second_stage_version)); + else + return sprintf(buf, "%#04x\n", + le32_to_cpu(hdev->asic_prop.cpucp_info.infineon_version)); } static ssize_t fuse_ver_show(struct device *dev, struct device_attribute *attr, @@ -206,7 +211,7 @@ static ssize_t soft_reset_store(struct device *dev, goto out; } - if (!hdev->allow_inference_soft_reset) { + if (!hdev->asic_prop.allow_inference_soft_reset) { dev_err(hdev->dev, "Device does not support inference soft-reset\n"); goto out; } @@ -236,7 +241,7 @@ static ssize_t hard_reset_store(struct device *dev, dev_warn(hdev->dev, "Hard-Reset requested through sysfs\n"); - hl_device_reset(hdev, HL_RESET_HARD); + hl_device_reset(hdev, HL_DRV_RESET_HARD); out: return count; @@ -298,7 +303,7 @@ static ssize_t soft_reset_cnt_show(struct device *dev, { struct hl_device *hdev = dev_get_drvdata(dev); - return sprintf(buf, "%d\n", hdev->soft_reset_cnt); + return sprintf(buf, "%d\n", hdev->reset_info.soft_reset_cnt); } static ssize_t hard_reset_cnt_show(struct device *dev, @@ -306,7 +311,7 @@ static ssize_t hard_reset_cnt_show(struct device *dev, { struct hl_device *hdev = dev_get_drvdata(dev); - return sprintf(buf, "%d\n", hdev->hard_reset_cnt); + return sprintf(buf, "%d\n", hdev->reset_info.hard_reset_cnt); } static ssize_t max_power_show(struct device *dev, struct device_attribute *attr, @@ -419,8 +424,6 @@ static struct attribute *hl_dev_attrs[] = { &dev_attr_max_power.attr, &dev_attr_pci_addr.attr, &dev_attr_preboot_btl_ver.attr, - &dev_attr_soft_reset.attr, - &dev_attr_soft_reset_cnt.attr, &dev_attr_status.attr, &dev_attr_thermal_ver.attr, &dev_attr_uboot_ver.attr, @@ -445,15 +448,25 @@ static const struct attribute_group *hl_dev_attr_groups[] = { NULL, }; +static struct attribute *hl_dev_inference_attrs[] = { + &dev_attr_soft_reset.attr, + &dev_attr_soft_reset_cnt.attr, + NULL, +}; + +static struct attribute_group hl_dev_inference_attr_group = { + .attrs = hl_dev_inference_attrs, +}; + +static const struct attribute_group *hl_dev_inference_attr_groups[] = { + &hl_dev_inference_attr_group, + NULL, +}; + int hl_sysfs_init(struct hl_device *hdev) { int rc; - if (hdev->asic_type == ASIC_GOYA) - hdev->pm_mng_profile = PM_AUTO; - else - hdev->pm_mng_profile = PM_MANUAL; - hdev->max_power = hdev->asic_prop.max_power_default; hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group); @@ -465,10 +478,25 @@ int hl_sysfs_init(struct hl_device *hdev) return rc; } + if (!hdev->asic_prop.allow_inference_soft_reset) + return 0; + + rc = device_add_groups(hdev->dev, hl_dev_inference_attr_groups); + if (rc) { + dev_err(hdev->dev, + "Failed to add groups to device, error %d\n", rc); + return rc; + } + return 0; } void hl_sysfs_fini(struct hl_device *hdev) { device_remove_groups(hdev->dev, hl_dev_attr_groups); + + if (!hdev->asic_prop.allow_inference_soft_reset) + return; + + device_remove_groups(hdev->dev, hl_dev_inference_attr_groups); } diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 825737dfe381..013c6da2e3ca 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright 2016-2020 HabanaLabs, Ltd. + * Copyright 2016-2021 HabanaLabs, Ltd. * All Rights Reserved. */ @@ -593,26 +593,27 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev) else prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE; prop->mmu_pte_size = HL_PTE_SIZE; - prop->mmu_hop_table_size = HOP_TABLE_SIZE; - prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE; + prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE; + prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE; prop->dram_page_size = PAGE_SIZE_2MB; prop->dram_supports_virtual_memory = false; - prop->pmmu.hop0_shift = HOP0_SHIFT; - prop->pmmu.hop1_shift = HOP1_SHIFT; - prop->pmmu.hop2_shift = HOP2_SHIFT; - prop->pmmu.hop3_shift = HOP3_SHIFT; - prop->pmmu.hop4_shift = HOP4_SHIFT; - prop->pmmu.hop0_mask = HOP0_MASK; - prop->pmmu.hop1_mask = HOP1_MASK; - prop->pmmu.hop2_mask = HOP2_MASK; - prop->pmmu.hop3_mask = HOP3_MASK; - prop->pmmu.hop4_mask = HOP4_MASK; + prop->pmmu.hop0_shift = MMU_V1_1_HOP0_SHIFT; + prop->pmmu.hop1_shift = MMU_V1_1_HOP1_SHIFT; + prop->pmmu.hop2_shift = MMU_V1_1_HOP2_SHIFT; + prop->pmmu.hop3_shift = MMU_V1_1_HOP3_SHIFT; + prop->pmmu.hop4_shift = MMU_V1_1_HOP4_SHIFT; + prop->pmmu.hop0_mask = MMU_V1_1_HOP0_MASK; + prop->pmmu.hop1_mask = MMU_V1_1_HOP1_MASK; + prop->pmmu.hop2_mask = MMU_V1_1_HOP2_MASK; + prop->pmmu.hop3_mask = MMU_V1_1_HOP3_MASK; + prop->pmmu.hop4_mask = MMU_V1_1_HOP4_MASK; prop->pmmu.start_addr = VA_HOST_SPACE_START; prop->pmmu.end_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1; prop->pmmu.page_size = PAGE_SIZE_4KB; prop->pmmu.num_hops = MMU_ARCH_5_HOPS; + prop->pmmu.last_mask = LAST_MASK; /* PMMU and HPMMU are the same except of page size */ memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu)); @@ -664,6 +665,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev) prop->clk_pll_index = HL_GAUDI_MME_PLL; prop->max_freq_value = GAUDI_MAX_CLK_FREQ; + prop->use_get_power_for_reset_history = true; + return 0; } @@ -878,6 +881,11 @@ static int gaudi_fetch_psoc_frequency(struct hl_device *hdev) int rc; if (hdev->asic_prop.fw_security_enabled) { + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI_CPU_PLL, pll_freq_arr); if (rc) @@ -1273,6 +1281,7 @@ static int gaudi_collective_wait_init_cs(struct hl_cs *cs) container_of(cs->signal_fence, struct hl_cs_compl, base_fence); struct hl_cs_compl *cs_cmpl = container_of(cs->fence, struct hl_cs_compl, base_fence); + struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl; struct gaudi_collective_properties *cprop; u32 stream, queue_id, sob_group_offset; struct gaudi_device *gaudi; @@ -1285,10 +1294,16 @@ static int gaudi_collective_wait_init_cs(struct hl_cs *cs) gaudi = hdev->asic_specific; cprop = &gaudi->collective_props; - /* In encaps signals case the SOB info will be retrieved from - * the handle in gaudi_collective_slave_init_job. - */ - if (!cs->encaps_signals) { + if (cs->encaps_signals) { + cs_cmpl->hw_sob = handle->hw_sob; + /* at this checkpoint we only need the hw_sob pointer + * for the completion check before start going over the jobs + * of the master/slaves, the sob_value will be taken later on + * in gaudi_collective_slave_init_job depends on each + * job wait offset value. + */ + cs_cmpl->sob_val = 0; + } else { /* copy the SOB id and value of the signal CS */ cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; cs_cmpl->sob_val = signal_cs_cmpl->sob_val; @@ -1621,6 +1636,8 @@ static int gaudi_late_init(struct hl_device *hdev) */ gaudi_mmu_prepare(hdev, 1); + hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST); + return 0; disable_pci_access: @@ -4006,7 +4023,7 @@ static void gaudi_init_firmware_loader(struct hl_device *hdev) struct fw_load_mgr *fw_loader = &hdev->fw_loader; /* fill common fields */ - fw_loader->linux_loaded = false; + fw_loader->fw_comp_loaded = FW_TYPE_NONE; fw_loader->boot_fit_img.image_name = GAUDI_BOOT_FIT_FILE; fw_loader->linux_img.image_name = GAUDI_LINUX_FW_FILE; fw_loader->cpu_timeout = GAUDI_CPU_TIMEOUT_USEC; @@ -4289,13 +4306,31 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset * via the GIC. Otherwise, we need to use COMMS or the MSG_TO_CPU * registers in case of old F/Ws */ - if (hdev->fw_loader.linux_loaded) { + if (hdev->fw_loader.fw_comp_loaded & FW_TYPE_LINUX) { irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR : le32_to_cpu(dyn_regs->gic_host_halt_irq); WREG32(irq_handler_offset, gaudi_irq_map_table[GAUDI_EVENT_HALT_MACHINE].cpu_id); + + /* This is a hail-mary attempt to revive the card in the small chance that the + * f/w has experienced a watchdog event, which caused it to return back to preboot. + * In that case, triggering reset through GIC won't help. We need to trigger the + * reset as if Linux wasn't loaded. + * + * We do it only if the reset cause was HB, because that would be the indication + * of such an event. + * + * In case watchdog hasn't expired but we still got HB, then this won't do any + * damage. + */ + if (hdev->reset_info.curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT) { + if (hdev->asic_prop.hard_reset_done_by_fw) + hl_fw_ask_hard_reset_without_linux(hdev); + else + hl_fw_ask_halt_machine_without_linux(hdev); + } } else { if (hdev->asic_prop.hard_reset_done_by_fw) hl_fw_ask_hard_reset_without_linux(hdev); @@ -6412,6 +6447,7 @@ static int gaudi_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, { u32 dma_core_sts0, err_cause, cfg1, size_left, pos, size_to_dma; struct gaudi_device *gaudi = hdev->asic_specific; + u32 qm_glbl_sts0, qm_cgm_sts; u64 dma_offset, qm_offset; dma_addr_t dma_addr; void *kernel_addr; @@ -6436,14 +6472,20 @@ static int gaudi_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, dma_offset = dma_id * DMA_CORE_OFFSET; qm_offset = dma_id * DMA_QMAN_OFFSET; dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset); - is_eng_idle = IS_DMA_IDLE(dma_core_sts0); + qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + qm_offset); + qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + qm_offset); + is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) && + IS_DMA_IDLE(dma_core_sts0); if (!is_eng_idle) { dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2]; dma_offset = dma_id * DMA_CORE_OFFSET; qm_offset = dma_id * DMA_QMAN_OFFSET; dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset); - is_eng_idle = IS_DMA_IDLE(dma_core_sts0); + qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + qm_offset); + qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + qm_offset); + is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) && + IS_DMA_IDLE(dma_core_sts0); if (!is_eng_idle) { dev_err_ratelimited(hdev->dev, @@ -6522,7 +6564,7 @@ static u64 gaudi_read_pte(struct hl_device *hdev, u64 addr) { struct gaudi_device *gaudi = hdev->asic_specific; - if (hdev->hard_reset_pending) + if (hdev->reset_info.hard_reset_pending) return U64_MAX; return readq(hdev->pcie_bar[HBM_BAR_ID] + @@ -6533,7 +6575,7 @@ static void gaudi_write_pte(struct hl_device *hdev, u64 addr, u64 val) { struct gaudi_device *gaudi = hdev->asic_specific; - if (hdev->hard_reset_pending) + if (hdev->reset_info.hard_reset_pending) return; writeq(val, hdev->pcie_bar[HBM_BAR_ID] + @@ -6935,8 +6977,9 @@ event_not_supported: snprintf(desc, size, "N/A"); } -static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, - u32 x_y, bool is_write) +static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, u32 x_y, + bool is_write, s32 *engine_id_1, + s32 *engine_id_2) { u32 dma_id[2], dma_offset, err_cause[2], mask, i; @@ -6976,44 +7019,64 @@ static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, switch (x_y) { case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0: case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1: - if ((err_cause[0] & mask) && !(err_cause[1] & mask)) + if ((err_cause[0] & mask) && !(err_cause[1] & mask)) { + *engine_id_1 = GAUDI_ENGINE_ID_DMA_0; return "DMA0"; - else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) + } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) { + *engine_id_1 = GAUDI_ENGINE_ID_DMA_2; return "DMA2"; - else + } else { + *engine_id_1 = GAUDI_ENGINE_ID_DMA_0; + *engine_id_2 = GAUDI_ENGINE_ID_DMA_2; return "DMA0 or DMA2"; + } case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0: case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1: - if ((err_cause[0] & mask) && !(err_cause[1] & mask)) + if ((err_cause[0] & mask) && !(err_cause[1] & mask)) { + *engine_id_1 = GAUDI_ENGINE_ID_DMA_1; return "DMA1"; - else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) + } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) { + *engine_id_1 = GAUDI_ENGINE_ID_DMA_3; return "DMA3"; - else + } else { + *engine_id_1 = GAUDI_ENGINE_ID_DMA_1; + *engine_id_2 = GAUDI_ENGINE_ID_DMA_3; return "DMA1 or DMA3"; + } case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0: case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1: - if ((err_cause[0] & mask) && !(err_cause[1] & mask)) + if ((err_cause[0] & mask) && !(err_cause[1] & mask)) { + *engine_id_1 = GAUDI_ENGINE_ID_DMA_4; return "DMA4"; - else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) + } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) { + *engine_id_1 = GAUDI_ENGINE_ID_DMA_6; return "DMA6"; - else + } else { + *engine_id_1 = GAUDI_ENGINE_ID_DMA_4; + *engine_id_2 = GAUDI_ENGINE_ID_DMA_6; return "DMA4 or DMA6"; + } case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0: case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1: - if ((err_cause[0] & mask) && !(err_cause[1] & mask)) + if ((err_cause[0] & mask) && !(err_cause[1] & mask)) { + *engine_id_1 = GAUDI_ENGINE_ID_DMA_5; return "DMA5"; - else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) + } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) { + *engine_id_1 = GAUDI_ENGINE_ID_DMA_7; return "DMA7"; - else + } else { + *engine_id_1 = GAUDI_ENGINE_ID_DMA_5; + *engine_id_2 = GAUDI_ENGINE_ID_DMA_7; return "DMA5 or DMA7"; + } } unknown_initiator: return "unknown initiator"; } -static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, - bool is_write) +static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, bool is_write, + u32 *engine_id_1, u32 *engine_id_2) { u32 val, x_y, axi_id; @@ -7026,24 +7089,35 @@ static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, switch (x_y) { case RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0: - if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) { + *engine_id_1 = GAUDI_ENGINE_ID_TPC_0; return "TPC0"; - if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) + } + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) { + *engine_id_1 = GAUDI_ENGINE_ID_NIC_0; return "NIC0"; + } break; case RAZWI_INITIATOR_ID_X_Y_TPC1: + *engine_id_1 = GAUDI_ENGINE_ID_TPC_1; return "TPC1"; case RAZWI_INITIATOR_ID_X_Y_MME0_0: case RAZWI_INITIATOR_ID_X_Y_MME0_1: + *engine_id_1 = GAUDI_ENGINE_ID_MME_0; return "MME0"; case RAZWI_INITIATOR_ID_X_Y_MME1_0: case RAZWI_INITIATOR_ID_X_Y_MME1_1: + *engine_id_1 = GAUDI_ENGINE_ID_MME_1; return "MME1"; case RAZWI_INITIATOR_ID_X_Y_TPC2: + *engine_id_1 = GAUDI_ENGINE_ID_TPC_2; return "TPC2"; case RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC: - if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) { + *engine_id_1 = GAUDI_ENGINE_ID_TPC_3; return "TPC3"; + } + /* PCI, CPU or PSOC does not have engine id*/ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PCI)) return "PCI"; if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_CPU)) @@ -7059,32 +7133,49 @@ static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1: case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0: case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1: - return gaudi_get_razwi_initiator_dma_name(hdev, x_y, is_write); + return gaudi_get_razwi_initiator_dma_name(hdev, x_y, is_write, + engine_id_1, engine_id_2); case RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2: - if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) { + *engine_id_1 = GAUDI_ENGINE_ID_TPC_4; return "TPC4"; - if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) + } + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) { + *engine_id_1 = GAUDI_ENGINE_ID_NIC_1; return "NIC1"; - if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) + } + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) { + *engine_id_1 = GAUDI_ENGINE_ID_NIC_2; return "NIC2"; + } break; case RAZWI_INITIATOR_ID_X_Y_TPC5: + *engine_id_1 = GAUDI_ENGINE_ID_TPC_5; return "TPC5"; case RAZWI_INITIATOR_ID_X_Y_MME2_0: case RAZWI_INITIATOR_ID_X_Y_MME2_1: + *engine_id_1 = GAUDI_ENGINE_ID_MME_2; return "MME2"; case RAZWI_INITIATOR_ID_X_Y_MME3_0: case RAZWI_INITIATOR_ID_X_Y_MME3_1: + *engine_id_1 = GAUDI_ENGINE_ID_MME_3; return "MME3"; case RAZWI_INITIATOR_ID_X_Y_TPC6: + *engine_id_1 = GAUDI_ENGINE_ID_TPC_6; return "TPC6"; case RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5: - if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) { + *engine_id_1 = GAUDI_ENGINE_ID_TPC_7; return "TPC7"; - if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) + } + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) { + *engine_id_1 = GAUDI_ENGINE_ID_NIC_4; return "NIC4"; - if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) + } + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) { + *engine_id_1 = GAUDI_ENGINE_ID_NIC_5; return "NIC5"; + } break; default: break; @@ -7101,27 +7192,28 @@ static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, return "unknown initiator"; } -static void gaudi_print_razwi_info(struct hl_device *hdev) +static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_id_1, + u32 *engine_id_2) { + if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) { dev_err_ratelimited(hdev->dev, "RAZWI event caused by illegal write of %s\n", - gaudi_get_razwi_initiator_name(hdev, true)); + gaudi_get_razwi_initiator_name(hdev, true, engine_id_1, engine_id_2)); WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0); } if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) { dev_err_ratelimited(hdev->dev, "RAZWI event caused by illegal read of %s\n", - gaudi_get_razwi_initiator_name(hdev, false)); + gaudi_get_razwi_initiator_name(hdev, false, engine_id_1, engine_id_2)); WREG32(mmMMU_UP_RAZWI_READ_VLD, 0); } } -static void gaudi_print_mmu_error_info(struct hl_device *hdev) +static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr, u8 *type) { struct gaudi_device *gaudi = hdev->asic_specific; - u64 addr; u32 val; if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) @@ -7129,24 +7221,24 @@ static void gaudi_print_mmu_error_info(struct hl_device *hdev) val = RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE); if (val & MMU_UP_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) { - addr = val & MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK; - addr <<= 32; - addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA); + *addr = val & MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK; + *addr <<= 32; + *addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA); - dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", - addr); + dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", *addr); + *type = HL_RAZWI_PAGE_FAULT; WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0); } val = RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE); if (val & MMU_UP_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK) { - addr = val & MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK; - addr <<= 32; - addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA); + *addr = val & MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK; + *addr <<= 32; + *addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA); - dev_err_ratelimited(hdev->dev, - "MMU access error on va 0x%llx\n", addr); + dev_err_ratelimited(hdev->dev, "MMU access error on va 0x%llx\n", *addr); + *type = HL_RAZWI_MMU_ACCESS_ERROR; WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0); } @@ -7665,15 +7757,46 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type) static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type, bool razwi) { + u32 engine_id_1, engine_id_2; char desc[64] = ""; + u64 razwi_addr = 0; + u8 razwi_type; + int rc; + + /* + * Init engine id by default as not valid and only if razwi initiated from engine with + * engine id it will get valid value. + * Init razwi type to default, will be changed only if razwi caused by page fault of + * MMU access error + */ + engine_id_1 = U16_MAX; + engine_id_2 = U16_MAX; + razwi_type = U8_MAX; gaudi_get_event_desc(event_type, desc, sizeof(desc)); dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", event_type, desc); if (razwi) { - gaudi_print_razwi_info(hdev); - gaudi_print_mmu_error_info(hdev); + gaudi_print_and_get_razwi_info(hdev, &engine_id_1, &engine_id_2); + gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type); + + /* In case it's the first razwi, save its parameters*/ + rc = atomic_cmpxchg(&hdev->last_error.razwi_write_disable, 0, 1); + if (!rc) { + hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime; + hdev->last_error.razwi_timestamp = ktime_get(); + hdev->last_error.razwi_addr = razwi_addr; + hdev->last_error.razwi_engine_id_1 = engine_id_1; + hdev->last_error.razwi_engine_id_2 = engine_id_2; + /* + * If first engine id holds non valid value the razwi initiator + * does not have engine id + */ + hdev->last_error.razwi_non_engine_initiator = (engine_id_1 == U16_MAX); + hdev->last_error.razwi_type = razwi_type; + + } } } @@ -7696,14 +7819,10 @@ static void gaudi_print_fw_alive_info(struct hl_device *hdev, fw_alive->thread_id, fw_alive->uptime_seconds); } -static int gaudi_soft_reset_late_init(struct hl_device *hdev) +static int gaudi_non_hard_reset_late_init(struct hl_device *hdev) { - struct gaudi_device *gaudi = hdev->asic_specific; - - /* Unmask all IRQs since some could have been received - * during the soft reset - */ - return hl_fw_unmask_irq_arr(hdev, gaudi->events, sizeof(gaudi->events)); + /* GAUDI doesn't support any reset except hard-reset */ + return -EPERM; } static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device, @@ -7897,27 +8016,39 @@ static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type) static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type) { + ktime_t zero_time = ktime_set(0, 0); + + mutex_lock(&hdev->clk_throttling.lock); + switch (event_type) { case GAUDI_EVENT_FIX_POWER_ENV_S: - hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER; + hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER; + hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER; + hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get(); + hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time; dev_info_ratelimited(hdev->dev, "Clock throttling due to power consumption\n"); break; case GAUDI_EVENT_FIX_POWER_ENV_E: - hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER; + hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER; + hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get(); dev_info_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n"); break; case GAUDI_EVENT_FIX_THERMAL_ENV_S: - hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL; + hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL; + hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL; + hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get(); + hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time; dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n"); break; case GAUDI_EVENT_FIX_THERMAL_ENV_E: - hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL; + hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL; + hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get(); dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n"); break; @@ -7927,6 +8058,8 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev, event_type); break; } + + mutex_unlock(&hdev->clk_throttling.lock); } static void gaudi_handle_eqe(struct hl_device *hdev, @@ -7975,7 +8108,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR: gaudi_print_irq_info(hdev, event_type, true); gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); - fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR; + fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR; goto reset_device; case GAUDI_EVENT_GIC500: @@ -7983,7 +8116,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, case GAUDI_EVENT_L2_RAM_ECC: case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17: gaudi_print_irq_info(hdev, event_type, false); - fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR; + fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR; goto reset_device; case GAUDI_EVENT_HBM0_SPI_0: @@ -7994,7 +8127,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, gaudi_hbm_read_interrupts(hdev, gaudi_hbm_event_to_dev(event_type), &eq_entry->hbm_ecc_data); - fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR; + fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR; goto reset_device; case GAUDI_EVENT_HBM0_SPI_1: @@ -8177,9 +8310,11 @@ static void gaudi_handle_eqe(struct hl_device *hdev, reset_device: if (hdev->asic_prop.fw_security_enabled) - hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FW | fw_fatal_err_flag); + hl_device_reset(hdev, HL_DRV_RESET_HARD + | HL_DRV_RESET_BYPASS_REQ_TO_FW + | fw_fatal_err_flag); else if (hdev->hard_reset_on_fw_events) - hl_device_reset(hdev, HL_RESET_HARD | fw_fatal_err_flag); + hl_device_reset(hdev, HL_DRV_RESET_HARD | fw_fatal_err_flag); else hl_fw_unmask_irq(hdev, event_type); } @@ -8206,7 +8341,7 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, int rc; if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) || - hdev->hard_reset_pending) + hdev->reset_info.hard_reset_pending) return 0; if (hdev->pldm) @@ -8229,12 +8364,6 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, WREG32(mmSTLB_INV_SET, 0); - if (rc) { - dev_err_ratelimited(hdev->dev, - "MMU cache invalidation timeout\n"); - hl_device_reset(hdev, HL_RESET_HARD); - } - return rc; } @@ -8662,7 +8791,7 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev, hdev->internal_cb_pool_dma_addr, HOST_SPACE_INTERNAL_CB_SZ); - hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR); + hdev->asic_funcs->mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR); mutex_unlock(&ctx->mmu_lock); if (rc) @@ -8697,7 +8826,7 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev, HOST_SPACE_INTERNAL_CB_SZ); hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ); - hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR); mutex_unlock(&ctx->mmu_lock); gen_pool_destroy(hdev->internal_cb_pool); @@ -9458,7 +9587,7 @@ static const struct hl_asic_funcs gaudi_funcs = { .disable_clock_gating = gaudi_disable_clock_gating, .debug_coresight = gaudi_debug_coresight, .is_device_idle = gaudi_is_device_idle, - .soft_reset_late_init = gaudi_soft_reset_late_init, + .non_hard_reset_late_init = gaudi_non_hard_reset_late_init, .hw_queues_lock = gaudi_hw_queues_lock, .hw_queues_unlock = gaudi_hw_queues_unlock, .get_pci_id = gaudi_get_pci_id, diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index f325e36a71e6..8ac16a9b7d15 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -357,8 +357,8 @@ void gaudi_init_security(struct hl_device *hdev); void gaudi_ack_protection_bits_errors(struct hl_device *hdev); void gaudi_add_device_attr(struct hl_device *hdev, struct attribute_group *dev_attr_grp); -int gaudi_debug_coresight(struct hl_device *hdev, void *data); -void gaudi_halt_coresight(struct hl_device *hdev); +int gaudi_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data); +void gaudi_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx); void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid); #endif /* GAUDIP_H_ */ diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c index 5349c1be13f9..08108f5fed67 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c +++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c @@ -848,7 +848,7 @@ static int gaudi_config_spmu(struct hl_device *hdev, return 0; } -int gaudi_debug_coresight(struct hl_device *hdev, void *data) +int gaudi_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data) { struct hl_debug_params *params = data; int rc = 0; @@ -887,7 +887,7 @@ int gaudi_debug_coresight(struct hl_device *hdev, void *data) return rc; } -void gaudi_halt_coresight(struct hl_device *hdev) +void gaudi_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx) { struct hl_debug_params params = {}; int i, rc; diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 5536e8c27bd5..fbcc7bbf44b3 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright 2016-2019 HabanaLabs, Ltd. + * Copyright 2016-2021 HabanaLabs, Ltd. * All Rights Reserved. */ @@ -410,25 +410,26 @@ int goya_set_fixed_properties(struct hl_device *hdev) else prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE; prop->mmu_pte_size = HL_PTE_SIZE; - prop->mmu_hop_table_size = HOP_TABLE_SIZE; - prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE; + prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE; + prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE; prop->dram_page_size = PAGE_SIZE_2MB; prop->dram_supports_virtual_memory = true; - prop->dmmu.hop0_shift = HOP0_SHIFT; - prop->dmmu.hop1_shift = HOP1_SHIFT; - prop->dmmu.hop2_shift = HOP2_SHIFT; - prop->dmmu.hop3_shift = HOP3_SHIFT; - prop->dmmu.hop4_shift = HOP4_SHIFT; - prop->dmmu.hop0_mask = HOP0_MASK; - prop->dmmu.hop1_mask = HOP1_MASK; - prop->dmmu.hop2_mask = HOP2_MASK; - prop->dmmu.hop3_mask = HOP3_MASK; - prop->dmmu.hop4_mask = HOP4_MASK; + prop->dmmu.hop0_shift = MMU_V1_0_HOP0_SHIFT; + prop->dmmu.hop1_shift = MMU_V1_0_HOP1_SHIFT; + prop->dmmu.hop2_shift = MMU_V1_0_HOP2_SHIFT; + prop->dmmu.hop3_shift = MMU_V1_0_HOP3_SHIFT; + prop->dmmu.hop4_shift = MMU_V1_0_HOP4_SHIFT; + prop->dmmu.hop0_mask = MMU_V1_0_HOP0_MASK; + prop->dmmu.hop1_mask = MMU_V1_0_HOP1_MASK; + prop->dmmu.hop2_mask = MMU_V1_0_HOP2_MASK; + prop->dmmu.hop3_mask = MMU_V1_0_HOP3_MASK; + prop->dmmu.hop4_mask = MMU_V1_0_HOP4_MASK; prop->dmmu.start_addr = VA_DDR_SPACE_START; prop->dmmu.end_addr = VA_DDR_SPACE_END; prop->dmmu.page_size = PAGE_SIZE_2MB; prop->dmmu.num_hops = MMU_ARCH_5_HOPS; + prop->dmmu.last_mask = LAST_MASK; /* shifts and masks are the same in PMMU and DMMU */ memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu)); @@ -436,6 +437,7 @@ int goya_set_fixed_properties(struct hl_device *hdev) prop->pmmu.end_addr = VA_HOST_SPACE_END; prop->pmmu.page_size = PAGE_SIZE_4KB; prop->pmmu.num_hops = MMU_ARCH_5_HOPS; + prop->pmmu.last_mask = LAST_MASK; /* PMMU and HPMMU are the same except of page size */ memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu)); @@ -473,6 +475,8 @@ int goya_set_fixed_properties(struct hl_device *hdev) prop->clk_pll_index = HL_GOYA_MME_PLL; + prop->use_get_power_for_reset_history = true; + return 0; } @@ -735,6 +739,11 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev) int rc; if (hdev->asic_prop.fw_security_enabled) { + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) + return; + rc = hl_fw_cpucp_pll_info_get(hdev, HL_GOYA_PCI_PLL, pll_freq_arr); @@ -778,9 +787,59 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev) prop->psoc_pci_pll_div_factor = div_fctr; } +/* + * goya_set_frequency - set the frequency of the device + * + * @hdev: pointer to habanalabs device structure + * @freq: the new frequency value + * + * Change the frequency if needed. This function has no protection against + * concurrency, therefore it is assumed that the calling function has protected + * itself against the case of calling this function from multiple threads with + * different values + * + * Returns 0 if no change was done, otherwise returns 1 + */ +int goya_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq) +{ + struct goya_device *goya = hdev->asic_specific; + + if ((goya->pm_mng_profile == PM_MANUAL) || + (goya->curr_pll_profile == freq)) + return 0; + + dev_dbg(hdev->dev, "Changing device frequency to %s\n", + freq == PLL_HIGH ? "high" : "low"); + + goya_set_pll_profile(hdev, freq); + + goya->curr_pll_profile = freq; + + return 1; +} + +static void goya_set_freq_to_low_job(struct work_struct *work) +{ + struct goya_work_freq *goya_work = container_of(work, + struct goya_work_freq, + work_freq.work); + struct hl_device *hdev = goya_work->hdev; + + mutex_lock(&hdev->fpriv_list_lock); + + if (!hdev->is_compute_ctx_active) + goya_set_frequency(hdev, PLL_LOW); + + mutex_unlock(&hdev->fpriv_list_lock); + + schedule_delayed_work(&goya_work->work_freq, + usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC)); +} + int goya_late_init(struct hl_device *hdev) { struct asic_fixed_properties *prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; int rc; goya_fetch_psoc_frequency(hdev); @@ -829,6 +888,16 @@ int goya_late_init(struct hl_device *hdev) return rc; } + /* force setting to low frequency */ + goya->curr_pll_profile = PLL_LOW; + + goya->pm_mng_profile = PM_AUTO; + + hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW); + + schedule_delayed_work(&goya->goya_work->work_freq, + usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC)); + return 0; } @@ -842,8 +911,11 @@ int goya_late_init(struct hl_device *hdev) void goya_late_fini(struct hl_device *hdev) { const struct hwmon_channel_info **channel_info_arr; + struct goya_device *goya = hdev->asic_specific; int i = 0; + cancel_delayed_work_sync(&goya->goya_work->work_freq); + if (!hdev->hl_chip_info->info) return; @@ -961,12 +1033,21 @@ static int goya_sw_init(struct hl_device *hdev) spin_lock_init(&goya->hw_queues_lock); hdev->supports_coresight = true; - hdev->supports_soft_reset = true; - hdev->allow_inference_soft_reset = true; + hdev->asic_prop.supports_soft_reset = true; + hdev->asic_prop.allow_inference_soft_reset = true; hdev->supports_wait_for_multi_cs = false; hdev->asic_funcs->set_pci_memory_regions(hdev); + goya->goya_work = kmalloc(sizeof(struct goya_work_freq), GFP_KERNEL); + if (!goya->goya_work) { + rc = -ENOMEM; + goto free_cpu_accessible_dma_pool; + } + + goya->goya_work->hdev = hdev; + INIT_DELAYED_WORK(&goya->goya_work->work_freq, goya_set_freq_to_low_job); + return 0; free_cpu_accessible_dma_pool: @@ -1003,6 +1084,7 @@ static int goya_sw_fini(struct hl_device *hdev) dma_pool_destroy(hdev->dma_pool); + kfree(goya->goya_work); kfree(goya); return 0; @@ -2502,7 +2584,7 @@ static void goya_init_firmware_loader(struct hl_device *hdev) struct fw_load_mgr *fw_loader = &hdev->fw_loader; /* fill common fields */ - fw_loader->linux_loaded = false; + fw_loader->fw_comp_loaded = FW_TYPE_NONE; fw_loader->boot_fit_img.image_name = GOYA_BOOT_FIT_FILE; fw_loader->linux_img.image_name = GOYA_LINUX_FW_FILE; fw_loader->cpu_timeout = GOYA_CPU_TIMEOUT_USEC; @@ -2619,7 +2701,7 @@ int goya_mmu_init(struct hl_device *hdev) (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK)); hdev->asic_funcs->mmu_invalidate_cache(hdev, true, - VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK); + MMU_OP_USERPTR | MMU_OP_PHYS_PACK); WREG32(mmMMU_MMU_ENABLE, 1); WREG32(mmMMU_SPI_MASK, 0xF); @@ -4395,7 +4477,7 @@ static u64 goya_read_pte(struct hl_device *hdev, u64 addr) { struct goya_device *goya = hdev->asic_specific; - if (hdev->hard_reset_pending) + if (hdev->reset_info.hard_reset_pending) return U64_MAX; return readq(hdev->pcie_bar[DDR_BAR_ID] + @@ -4406,7 +4488,7 @@ static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val) { struct goya_device *goya = hdev->asic_specific; - if (hdev->hard_reset_pending) + if (hdev->reset_info.hard_reset_pending) return; writeq(val, hdev->pcie_bar[DDR_BAR_ID] + @@ -4731,7 +4813,7 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr, return rc; } -static int goya_soft_reset_late_init(struct hl_device *hdev) +static int goya_non_hard_reset_late_init(struct hl_device *hdev) { /* * Unmask all IRQs since some could have been received @@ -4764,24 +4846,39 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type) static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type) { + ktime_t zero_time = ktime_set(0, 0); + + mutex_lock(&hdev->clk_throttling.lock); + switch (event_type) { case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S: - hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER; + hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER; + hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER; + hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get(); + hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time; dev_info_ratelimited(hdev->dev, "Clock throttling due to power consumption\n"); break; + case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E: - hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER; + hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER; + hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get(); dev_info_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n"); break; + case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S: - hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL; + hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL; + hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL; + hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get(); + hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time; dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n"); break; + case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E: - hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL; + hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL; + hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get(); dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n"); break; @@ -4791,6 +4888,8 @@ static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type) event_type); break; } + + mutex_unlock(&hdev->clk_throttling.lock); } void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) @@ -4834,14 +4933,14 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC: goya_print_irq_info(hdev, event_type, false); if (hdev->hard_reset_on_fw_events) - hl_device_reset(hdev, (HL_RESET_HARD | - HL_RESET_FW_FATAL_ERR)); + hl_device_reset(hdev, (HL_DRV_RESET_HARD | + HL_DRV_RESET_FW_FATAL_ERR)); break; case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET: goya_print_irq_info(hdev, event_type, false); if (hdev->hard_reset_on_fw_events) - hl_device_reset(hdev, HL_RESET_HARD); + hl_device_reset(hdev, HL_DRV_RESET_HARD); break; case GOYA_ASYNC_EVENT_ID_PCIE_DEC: @@ -4901,7 +5000,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) goya_print_irq_info(hdev, event_type, false); goya_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err); if (hdev->hard_reset_on_fw_events) - hl_device_reset(hdev, HL_RESET_HARD); + hl_device_reset(hdev, HL_DRV_RESET_HARD); else hl_fw_unmask_irq(hdev, event_type); break; @@ -5209,7 +5308,7 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, int rc; if (!(goya->hw_cap_initialized & HW_CAP_MMU) || - hdev->hard_reset_pending) + hdev->reset_info.hard_reset_pending) return 0; /* no need in L1 only invalidation in Goya */ @@ -5232,12 +5331,6 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, 1000, timeout_usec); - if (rc) { - dev_err_ratelimited(hdev->dev, - "MMU cache invalidation timeout\n"); - hl_device_reset(hdev, HL_RESET_HARD); - } - return rc; } @@ -5645,7 +5738,7 @@ static const struct hl_asic_funcs goya_funcs = { .disable_clock_gating = goya_disable_clock_gating, .debug_coresight = goya_debug_coresight, .is_device_idle = goya_is_device_idle, - .soft_reset_late_init = goya_soft_reset_late_init, + .non_hard_reset_late_init = goya_non_hard_reset_late_init, .hw_queues_lock = goya_hw_queues_lock, .hw_queues_unlock = goya_hw_queues_unlock, .get_pci_id = goya_get_pci_id, diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h index 97add7b04f82..3740fd25bf84 100644 --- a/drivers/misc/habanalabs/goya/goyaP.h +++ b/drivers/misc/habanalabs/goya/goyaP.h @@ -153,9 +153,15 @@ #define HW_CAP_GOLDEN 0x00000400 #define HW_CAP_TPC 0x00000800 +struct goya_work_freq { + struct hl_device *hdev; + struct delayed_work work_freq; +}; + struct goya_device { /* TODO: remove hw_queues_lock after moving to scheduler code */ spinlock_t hw_queues_lock; + struct goya_work_freq *goya_work; u64 mme_clk; u64 tpc_clk; @@ -166,6 +172,9 @@ struct goya_device { u32 events_stat_aggregate[GOYA_ASYNC_EVENT_ID_SIZE]; u32 hw_cap_initialized; u8 device_cpu_mmu_mappings_done; + + enum hl_pll_frequency curr_pll_profile; + enum hl_pm_mng_profile pm_mng_profile; }; int goya_set_fixed_properties(struct hl_device *hdev); @@ -211,8 +220,8 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq); void goya_add_device_attr(struct hl_device *hdev, struct attribute_group *dev_attr_grp); int goya_cpucp_info_get(struct hl_device *hdev); -int goya_debug_coresight(struct hl_device *hdev, void *data); -void goya_halt_coresight(struct hl_device *hdev); +int goya_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data); +void goya_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx); int goya_suspend(struct hl_device *hdev); int goya_resume(struct hl_device *hdev); @@ -237,5 +246,6 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev); u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx); u64 goya_get_device_time(struct hl_device *hdev); +int goya_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq); #endif /* GOYAP_H_ */ diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c index c55c100fdd24..2c5133cfae65 100644 --- a/drivers/misc/habanalabs/goya/goya_coresight.c +++ b/drivers/misc/habanalabs/goya/goya_coresight.c @@ -652,7 +652,7 @@ static int goya_config_spmu(struct hl_device *hdev, return 0; } -int goya_debug_coresight(struct hl_device *hdev, void *data) +int goya_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data) { struct hl_debug_params *params = data; int rc = 0; @@ -691,7 +691,7 @@ int goya_debug_coresight(struct hl_device *hdev, void *data) return rc; } -void goya_halt_coresight(struct hl_device *hdev) +void goya_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx) { struct hl_debug_params params = {}; int i, rc; diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c index 59b2624ff81a..76b47749affe 100644 --- a/drivers/misc/habanalabs/goya/goya_hwmgr.c +++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright 2016-2019 HabanaLabs, Ltd. + * Copyright 2016-2021 HabanaLabs, Ltd. * All Rights Reserved. */ @@ -62,7 +62,7 @@ static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr, goto fail; } - if (hdev->pm_mng_profile == PM_AUTO) { + if (goya->pm_mng_profile == PM_AUTO) { count = -EPERM; goto fail; } @@ -111,7 +111,7 @@ static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr, goto fail; } - if (hdev->pm_mng_profile == PM_AUTO) { + if (goya->pm_mng_profile == PM_AUTO) { count = -EPERM; goto fail; } @@ -160,7 +160,7 @@ static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr, goto fail; } - if (hdev->pm_mng_profile == PM_AUTO) { + if (goya->pm_mng_profile == PM_AUTO) { count = -EPERM; goto fail; } @@ -234,13 +234,14 @@ static ssize_t pm_mng_profile_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hl_device *hdev = dev_get_drvdata(dev); + struct goya_device *goya = hdev->asic_specific; if (!hl_device_operational(hdev, NULL)) return -ENODEV; return sprintf(buf, "%s\n", - (hdev->pm_mng_profile == PM_AUTO) ? "auto" : - (hdev->pm_mng_profile == PM_MANUAL) ? "manual" : + (goya->pm_mng_profile == PM_AUTO) ? "auto" : + (goya->pm_mng_profile == PM_MANUAL) ? "manual" : "unknown"); } @@ -248,6 +249,7 @@ static ssize_t pm_mng_profile_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hl_device *hdev = dev_get_drvdata(dev); + struct goya_device *goya = hdev->asic_specific; if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; @@ -256,7 +258,7 @@ static ssize_t pm_mng_profile_store(struct device *dev, mutex_lock(&hdev->fpriv_list_lock); - if (hdev->compute_ctx) { + if (hdev->is_compute_ctx_active) { dev_err(hdev->dev, "Can't change PM profile while compute context is opened on the device\n"); count = -EPERM; @@ -265,26 +267,27 @@ static ssize_t pm_mng_profile_store(struct device *dev, if (strncmp("auto", buf, strlen("auto")) == 0) { /* Make sure we are in LOW PLL when changing modes */ - if (hdev->pm_mng_profile == PM_MANUAL) { - hdev->curr_pll_profile = PLL_HIGH; - hdev->pm_mng_profile = PM_AUTO; - hl_device_set_frequency(hdev, PLL_LOW); + if (goya->pm_mng_profile == PM_MANUAL) { + goya->curr_pll_profile = PLL_HIGH; + goya->pm_mng_profile = PM_AUTO; + goya_set_frequency(hdev, PLL_LOW); } } else if (strncmp("manual", buf, strlen("manual")) == 0) { - if (hdev->pm_mng_profile == PM_AUTO) { + if (goya->pm_mng_profile == PM_AUTO) { /* Must release the lock because the work thread also * takes this lock. But before we release it, set * the mode to manual so nothing will change if a user * suddenly opens the device */ - hdev->pm_mng_profile = PM_MANUAL; + goya->pm_mng_profile = PM_MANUAL; mutex_unlock(&hdev->fpriv_list_lock); /* Flush the current work so we can return to the user * knowing that he is the only one changing frequencies */ - flush_delayed_work(&hdev->work_freq); + if (goya->goya_work) + flush_delayed_work(&goya->goya_work->work_freq); return count; } diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h index ae13231fda94..737c39f33f05 100644 --- a/drivers/misc/habanalabs/include/common/cpucp_if.h +++ b/drivers/misc/habanalabs/include/common/cpucp_if.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 * - * Copyright 2020 HabanaLabs, Ltd. + * Copyright 2020-2021 HabanaLabs, Ltd. * All Rights Reserved. * */ @@ -376,6 +376,19 @@ enum pq_init_status { * and QMANs. The f/w will return a bitmask where each bit represents * a different engine or QMAN according to enum cpucp_idle_mask. * The bit will be 1 if the engine is NOT idle. + * + * CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET - + * Fetch all HBM replaced-rows and prending to be replaced rows data. + * + * CPUCP_PACKET_HBM_PENDING_ROWS_STATUS - + * Fetch status of HBM rows pending replacement and need a reboot to + * be replaced. + * + * CPUCP_PACKET_POWER_SET - + * Resets power history of device to 0 + * + * CPUCP_PACKET_ENGINE_CORE_ASID_SET - + * Packet to perform engine core ASID configuration */ enum cpucp_packet_id { @@ -421,6 +434,11 @@ enum cpucp_packet_id { CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */ CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */ CPUCP_PACKET_IS_IDLE_CHECK, /* internal */ + CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET,/* internal */ + CPUCP_PACKET_HBM_PENDING_ROWS_STATUS, /* internal */ + CPUCP_PACKET_POWER_SET, /* internal */ + CPUCP_PACKET_RESERVED, /* not used */ + CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */ }; #define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5 @@ -480,7 +498,14 @@ struct cpucp_packet { __u8 i2c_bus; __u8 i2c_addr; __u8 i2c_reg; - __u8 pad; /* unused */ + /* + * In legacy implemetations, i2c_len was not present, + * was unused and just added as pad. + * So if i2c_len is 0, it is treated as legacy + * and r/w 1 Byte, else if i2c_len is specified, + * its treated as new multibyte r/w support. + */ + __u8 i2c_len; }; struct {/* For PLL info fetch */ @@ -688,6 +713,7 @@ struct eq_generic_event { #define CPUCP_MAX_NIC_LANES (CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC) #define CPUCP_NIC_MASK_ARR_LEN ((CPUCP_MAX_NICS + 63) / 64) #define CPUCP_NIC_POLARITY_ARR_LEN ((CPUCP_MAX_NIC_LANES + 63) / 64) +#define CPUCP_HBM_ROW_REPLACE_MAX 32 struct cpucp_sensor { __le32 type; @@ -740,6 +766,7 @@ struct cpucp_security_info { * @fuse_version: silicon production FUSE information. * @thermal_version: thermald S/W version. * @cpucp_version: CpuCP S/W version. + * @infineon_second_stage_version: Infineon 2nd stage DC-DC version. * @dram_size: available DRAM size. * @card_name: card name that will be displayed in HWMON subsystem on the host * @sec_info: security information @@ -749,6 +776,10 @@ struct cpucp_security_info { * @dram_binning_mask: DRAM binning mask, 1 bit per dram instance * (0 = functional 1 = binned) * @memory_repair_flag: eFuse flag indicating memory repair + * @edma_binning_mask: EDMA binning mask, 1 bit per EDMA instance + * (0 = functional 1 = binned) + * @xbar_binning_mask: Xbar binning mask, 1 bit per Xbar instance + * (0 = functional 1 = binned) */ struct cpucp_info { struct cpucp_sensor sensors[CPUCP_MAX_SENSORS]; @@ -761,7 +792,7 @@ struct cpucp_info { __u8 fuse_version[VERSION_MAX_LEN]; __u8 thermal_version[VERSION_MAX_LEN]; __u8 cpucp_version[VERSION_MAX_LEN]; - __le32 reserved2; + __le32 infineon_second_stage_version; __le64 dram_size; char card_name[CARD_NAME_MAX_LEN]; __le64 reserved3; @@ -769,7 +800,9 @@ struct cpucp_info { __u8 reserved5; __u8 dram_binning_mask; __u8 memory_repair_flag; - __u8 pad[5]; + __u8 edma_binning_mask; + __u8 xbar_binning_mask; + __u8 pad[3]; struct cpucp_security_info sec_info; __le32 reserved6; __u8 pll_map[PLL_MAP_LEN]; @@ -833,4 +866,25 @@ struct cpucp_nic_status { __le32 high_ber_cnt; }; +enum cpucp_hbm_row_replace_cause { + REPLACE_CAUSE_DOUBLE_ECC_ERR, + REPLACE_CAUSE_MULTI_SINGLE_ECC_ERR, +}; + +struct cpucp_hbm_row_info { + __u8 hbm_idx; + __u8 pc; + __u8 sid; + __u8 bank_idx; + __le16 row_addr; + __u8 replaced_row_cause; /* enum cpucp_hbm_row_replace_cause */ + __u8 pad; +}; + +struct cpucp_hbm_row_replaced_rows_info { + __le16 num_replaced_rows; + __u8 pad[6]; + struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX]; +}; + #endif /* CPUCP_IF_H */ diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h index 2626df6ef3ef..135e21d6edc9 100644 --- a/drivers/misc/habanalabs/include/common/hl_boot_if.h +++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h @@ -32,6 +32,7 @@ enum cpu_boot_err { CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL = 13, CPU_BOOT_ERR_BOOT_FW_CRIT_ERR = 18, CPU_BOOT_ERR_BINNING_FAIL = 19, + CPU_BOOT_ERR_TPM_FAIL = 20, CPU_BOOT_ERR_ENABLED = 31, CPU_BOOT_ERR_SCND_EN = 63, CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */ @@ -108,6 +109,8 @@ enum cpu_boot_err { * malfunctioning components might still be * in use. * + * CPU_BOOT_ERR0_TPM_FAIL TPM verification flow failed. + * * CPU_BOOT_ERR0_ENABLED Error registers enabled. * This is a main indication that the * running FW populates the error @@ -130,6 +133,7 @@ enum cpu_boot_err { #define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) #define CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR (1 << CPU_BOOT_ERR_BOOT_FW_CRIT_ERR) #define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL) +#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL) #define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED) #define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED) diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h index dedf20e8f956..758f246627f8 100644 --- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h +++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h @@ -16,27 +16,18 @@ #define PAGE_PRESENT_MASK 0x0000000000001ull #define SWAP_OUT_MASK 0x0000000000004ull #define LAST_MASK 0x0000000000800ull -#define HOP0_MASK 0x3000000000000ull -#define HOP1_MASK 0x0FF8000000000ull -#define HOP2_MASK 0x0007FC0000000ull -#define HOP3_MASK 0x000003FE00000ull -#define HOP4_MASK 0x00000001FF000ull #define FLAGS_MASK 0x0000000000FFFull -#define HOP0_SHIFT 48 -#define HOP1_SHIFT 39 -#define HOP2_SHIFT 30 -#define HOP3_SHIFT 21 -#define HOP4_SHIFT 12 - #define MMU_ARCH_5_HOPS 5 #define HOP_PHYS_ADDR_MASK (~FLAGS_MASK) #define HL_PTE_SIZE sizeof(u64) -#define HOP_TABLE_SIZE PAGE_SIZE_4KB -#define PTE_ENTRIES_IN_HOP (HOP_TABLE_SIZE / HL_PTE_SIZE) -#define HOP0_TABLES_TOTAL_SIZE (HOP_TABLE_SIZE * MAX_ASID) + +/* definitions for HOP with 512 PTE entries */ +#define HOP_PTE_ENTRIES_512 512 +#define HOP_TABLE_SIZE_512_PTE (HOP_PTE_ENTRIES_512 * HL_PTE_SIZE) +#define HOP0_512_PTE_TABLES_TOTAL_SIZE (HOP_TABLE_SIZE_512_PTE * MAX_ASID) #define MMU_HOP0_PA43_12_SHIFT 12 #define MMU_HOP0_PA49_44_SHIFT (12 + 32) diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h index 8539dd041f2c..86511002e367 100644 --- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h +++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h @@ -8,8 +8,20 @@ #ifndef INCLUDE_MMU_V1_0_H_ #define INCLUDE_MMU_V1_0_H_ -#define MMU_HOP0_PA43_12 0x490004 -#define MMU_HOP0_PA49_44 0x490008 -#define MMU_ASID_BUSY 0x490000 +#define MMU_V1_0_HOP0_MASK 0x3000000000000ull +#define MMU_V1_0_HOP1_MASK 0x0FF8000000000ull +#define MMU_V1_0_HOP2_MASK 0x0007FC0000000ull +#define MMU_V1_0_HOP3_MASK 0x000003FE00000ull +#define MMU_V1_0_HOP4_MASK 0x00000001FF000ull + +#define MMU_V1_0_HOP0_SHIFT 48 +#define MMU_V1_0_HOP1_SHIFT 39 +#define MMU_V1_0_HOP2_SHIFT 30 +#define MMU_V1_0_HOP3_SHIFT 21 +#define MMU_V1_0_HOP4_SHIFT 12 + +#define MMU_HOP0_PA43_12 0x490004 +#define MMU_HOP0_PA49_44 0x490008 +#define MMU_ASID_BUSY 0x490000 #endif /* INCLUDE_MMU_V1_0_H_ */ diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h index b2a9570583ac..9c727a5d47b4 100644 --- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h +++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h @@ -8,9 +8,21 @@ #ifndef INCLUDE_MMU_V1_1_H_ #define INCLUDE_MMU_V1_1_H_ -#define MMU_ASID 0xC12004 -#define MMU_HOP0_PA43_12 0xC12008 -#define MMU_HOP0_PA49_44 0xC1200C -#define MMU_BUSY 0xC12000 +#define MMU_V1_1_HOP0_MASK 0x3000000000000ull +#define MMU_V1_1_HOP1_MASK 0x0FF8000000000ull +#define MMU_V1_1_HOP2_MASK 0x0007FC0000000ull +#define MMU_V1_1_HOP3_MASK 0x000003FE00000ull +#define MMU_V1_1_HOP4_MASK 0x00000001FF000ull + +#define MMU_V1_1_HOP0_SHIFT 48 +#define MMU_V1_1_HOP1_SHIFT 39 +#define MMU_V1_1_HOP2_SHIFT 30 +#define MMU_V1_1_HOP3_SHIFT 21 +#define MMU_V1_1_HOP4_SHIFT 12 + +#define MMU_ASID 0xC12004 +#define MMU_HOP0_PA43_12 0xC12008 +#define MMU_HOP0_PA49_44 0xC1200C +#define MMU_BUSY 0xC12000 #endif /* INCLUDE_MMU_V1_1_H_ */ diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index 0f54730c7ed5..98828030b5a4 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c @@ -76,12 +76,12 @@ static void firmware_load(const struct firmware *fw, void *context) if (fw == NULL) { dev_err(&spi->dev, "Cannot load firmware, aborting\n"); - return; + goto out; } if (fw->size == 0) { dev_err(&spi->dev, "Error: Firmware size is 0!\n"); - return; + goto out; } /* Fill dummy data (24 stuffing bits for commands) */ @@ -103,7 +103,7 @@ static void firmware_load(const struct firmware *fw, void *context) dev_err(&spi->dev, "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n", jedec_id); - return; + goto out; } dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name); @@ -116,7 +116,7 @@ static void firmware_load(const struct firmware *fw, void *context) buffer = kzalloc(fw->size + 8, GFP_KERNEL); if (!buffer) { dev_err(&spi->dev, "Error: Can't allocate memory!\n"); - return; + goto out; } /* @@ -155,7 +155,7 @@ static void firmware_load(const struct firmware *fw, void *context) "Error: Timeout waiting for FPGA to clear (status=%08x)!\n", status); kfree(buffer); - return; + goto out; } dev_info(&spi->dev, "Configuring the FPGA...\n"); @@ -181,7 +181,7 @@ static void firmware_load(const struct firmware *fw, void *context) release_firmware(fw); kfree(buffer); - +out: complete(&data->fw_loaded); } diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index aa12097668d3..2e0aa74ac185 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -11,7 +11,7 @@ lkdtm-$(CONFIG_LKDTM) += usercopy.o lkdtm-$(CONFIG_LKDTM) += stackleak.o lkdtm-$(CONFIG_LKDTM) += cfi.o lkdtm-$(CONFIG_LKDTM) += fortify.o -lkdtm-$(CONFIG_PPC_BOOK3S_64) += powerpc.o +lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o KASAN_SANITIZE_rodata.o := n KASAN_SANITIZE_stackleak.o := n @@ -20,7 +20,7 @@ CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) OBJCOPYFLAGS := OBJCOPYFLAGS_rodata_objcopy.o := \ - --rename-section .noinstr.text=.rodata,alloc,readonly,load + --rename-section .noinstr.text=.rodata,alloc,readonly,load,contents targets += rodata.o rodata_objcopy.o $(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE $(call if_changed,objcopy) diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index f4cb94a9aa9c..f21854ac5cc2 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -41,20 +41,22 @@ static DEFINE_SPINLOCK(lock_me_up); * Make sure compiler does not optimize this function or stack frame away: * - function marked noinline * - stack variables are marked volatile - * - stack variables are written (memset()) and read (pr_info()) - * - function has external effects (pr_info()) - * */ + * - stack variables are written (memset()) and read (buf[..] passed as arg) + * - function may have external effects (memzero_explicit()) + * - no tail recursion possible + */ static int noinline recursive_loop(int remaining) { volatile char buf[REC_STACK_SIZE]; + volatile int ret; memset((void *)buf, remaining & 0xFF, sizeof(buf)); - pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)], - recur_count); if (!remaining) - return 0; + ret = 0; else - return recursive_loop(remaining - 1); + ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1); + memzero_explicit((void *)buf, sizeof(buf)); + return ret; } /* If the depth is negative, use the default, otherwise keep parameter. */ diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index 609d9ee2acc0..f69b964b9952 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -182,7 +182,7 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(FORTIFIED_SUBOBJECT), CRASHTYPE(FORTIFIED_STRSCPY), CRASHTYPE(DOUBLE_FAULT), -#ifdef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC_64S_HASH_MMU CRASHTYPE(PPC_SLB_MULTIHIT), #endif }; @@ -212,7 +212,11 @@ module_param(cpoint_count, int, 0644); MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\ "crash point is to be hit to trigger action"); -/* For test debug reporting. */ +/* + * For test debug reporting when CI systems provide terse summaries. + * TODO: Remove this once reasonable reporting exists in most CI systems: + * https://lore.kernel.org/lkml/CAHk-=wiFvfkoFixTapvvyPMN9pq5G-+Dys2eSyBa1vzDGAO5+A@mail.gmail.com + */ char *lkdtm_kernel_info; /* Return the crashtype number or NULL if the name is invalid */ diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 0e90591235a6..06734670a732 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -2330,6 +2330,8 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, list_move_tail(&cb->list, &dev->ctrl_rd_list); } + cl->status = 0; + mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, cl->dma_mapped || cl->status, @@ -2407,6 +2409,8 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) list_move_tail(&cb->list, &dev->ctrl_rd_list); } + cl->status = 0; + mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, !cl->dma_mapped || cl->status, diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index be41843df75b..cebcca6d6d3e 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -672,10 +672,14 @@ static void mei_hbm_cl_dma_map_res(struct mei_device *dev, if (!cl) return; - dev_dbg(dev->dev, "cl dma map result = %d\n", res->status); - cl->status = res->status; - if (!cl->status) + if (res->status) { + dev_err(dev->dev, "cl dma map failed %d\n", res->status); + cl->status = -EFAULT; + } else { + dev_dbg(dev->dev, "cl dma map succeeded\n"); cl->dma_mapped = 1; + cl->status = 0; + } wake_up(&cl->wait); } @@ -698,10 +702,14 @@ static void mei_hbm_cl_dma_unmap_res(struct mei_device *dev, if (!cl) return; - dev_dbg(dev->dev, "cl dma unmap result = %d\n", res->status); - cl->status = res->status; - if (!cl->status) + if (res->status) { + dev_err(dev->dev, "cl dma unmap failed %d\n", res->status); + cl->status = -EFAULT; + } else { + dev_dbg(dev->dev, "cl dma unmap succeeded\n"); cl->dma_mapped = 0; + cl->status = 0; + } wake_up(&cl->wait); } diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c index a4e854b9b9e6..00652c137cc7 100644 --- a/drivers/misc/mei/hw-txe.c +++ b/drivers/misc/mei/hw-txe.c @@ -994,11 +994,7 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack) hhisr &= ~IPC_HHIER_SEC; } - generated = generated || - (hisr & HISR_INT_STS_MSK) || - (ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING); - - if (generated && do_ack) { + if (do_ack) { /* Save the interrupt causes */ hw->intr_cause |= hisr & HISR_INT_STS_MSK; if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY) diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 5c8cb679b997..f79076c67256 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -24,6 +24,7 @@ const char *mei_dev_state_str(int state) MEI_DEV_STATE(ENABLED); MEI_DEV_STATE(RESETTING); MEI_DEV_STATE(DISABLED); + MEI_DEV_STATE(POWERING_DOWN); MEI_DEV_STATE(POWER_DOWN); MEI_DEV_STATE(POWER_UP); default: diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index e70525eedaae..d881f5e40ad9 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -74,7 +74,6 @@ static long afu_ioctl_attach(struct ocxl_context *ctx, { struct ocxl_ioctl_attach arg; u64 amr = 0; - int rc; pr_debug("%s for context %d\n", __func__, ctx->pasid); @@ -86,8 +85,7 @@ static long afu_ioctl_attach(struct ocxl_context *ctx, return -EINVAL; amr = arg.amr & mfspr(SPRN_UAMOR); - rc = ocxl_context_attach(ctx, amr, current->mm); - return rc; + return ocxl_context_attach(ctx, amr, current->mm); } static long afu_ioctl_get_metadata(struct ocxl_context *ctx, diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 2ed7e3aaff3a..8f786a225dcf 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -865,7 +865,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, goto err_release_irq; } misc_device->parent = &pdev->dev; - misc_device->fops = &pci_endpoint_test_fops, + misc_device->fops = &pci_endpoint_test_fops; err = misc_register(misc_device); if (err) { diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index 4c26b19f5154..f0e7f02605eb 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c @@ -371,6 +371,7 @@ static const struct of_device_id sram_dt_ids[] = { { .compatible = "atmel,sama5d2-securam", .data = &atmel_securam_config }, { .compatible = "nvidia,tegra186-sysram", .data = &tegra_sysram_config }, { .compatible = "nvidia,tegra194-sysram", .data = &tegra_sysram_config }, + { .compatible = "nvidia,tegra234-sysram", .data = &tegra_sysram_config }, {} }; diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index 488eeb2811ae..281c54003edc 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -289,7 +289,7 @@ static ssize_t api_show(struct device *dev, { struct uacce_device *uacce = to_uacce_device(dev); - return sprintf(buf, "%s\n", uacce->api_ver); + return sysfs_emit(buf, "%s\n", uacce->api_ver); } static ssize_t flags_show(struct device *dev, @@ -297,7 +297,7 @@ static ssize_t flags_show(struct device *dev, { struct uacce_device *uacce = to_uacce_device(dev); - return sprintf(buf, "%u\n", uacce->flags); + return sysfs_emit(buf, "%u\n", uacce->flags); } static ssize_t available_instances_show(struct device *dev, @@ -309,7 +309,7 @@ static ssize_t available_instances_show(struct device *dev, if (!uacce->ops->get_available_instances) return -ENODEV; - return sprintf(buf, "%d\n", + return sysfs_emit(buf, "%d\n", uacce->ops->get_available_instances(uacce)); } @@ -318,7 +318,7 @@ static ssize_t algorithms_show(struct device *dev, { struct uacce_device *uacce = to_uacce_device(dev); - return sprintf(buf, "%s\n", uacce->algs); + return sysfs_emit(buf, "%s\n", uacce->algs); } static ssize_t region_mmio_size_show(struct device *dev, @@ -326,7 +326,7 @@ static ssize_t region_mmio_size_show(struct device *dev, { struct uacce_device *uacce = to_uacce_device(dev); - return sprintf(buf, "%lu\n", + return sysfs_emit(buf, "%lu\n", uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT); } @@ -335,7 +335,7 @@ static ssize_t region_dus_size_show(struct device *dev, { struct uacce_device *uacce = to_uacce_device(dev); - return sprintf(buf, "%lu\n", + return sysfs_emit(buf, "%lu\n", uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT); } diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c index c0b5e339d5a1..6cf3e21c7604 100644 --- a/drivers/misc/vmw_vmci/vmci_context.c +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -687,10 +687,8 @@ int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid) } spin_unlock(&context->lock); - if (found) { - synchronize_rcu(); - kfree(notifier); - } + if (found) + kvfree_rcu(notifier); vmci_ctx_put(context); diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c index e3436abf39f4..2100297c94ad 100644 --- a/drivers/misc/vmw_vmci/vmci_event.c +++ b/drivers/misc/vmw_vmci/vmci_event.c @@ -209,8 +209,7 @@ int vmci_event_unsubscribe(u32 sub_id) if (!s) return VMCI_ERROR_NOT_FOUND; - synchronize_rcu(); - kfree(s); + kvfree_rcu(s); return VMCI_SUCCESS; } diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 8c2361e66277..463b707d9e99 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1293,14 +1293,12 @@ static int bcm2835_add_host(struct bcm2835_host *host) host->dma_cfg_tx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; host->dma_cfg_tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - host->dma_cfg_tx.slave_id = 13; /* DREQ channel */ host->dma_cfg_tx.direction = DMA_MEM_TO_DEV; host->dma_cfg_tx.src_addr = 0; host->dma_cfg_tx.dst_addr = host->phys_addr + SDDATA; host->dma_cfg_rx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; host->dma_cfg_rx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - host->dma_cfg_rx.slave_id = 13; /* DREQ channel */ host->dma_cfg_rx.direction = DMA_DEV_TO_MEM; host->dma_cfg_rx.src_addr = host->phys_addr + SDDATA; host->dma_cfg_rx.dst_addr = 0; diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 7693236c946f..7ab1b38a7be5 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -1128,8 +1128,8 @@ static int jz4740_mmc_resume(struct device *dev) return pinctrl_select_default_state(dev); } -DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, - jz4740_mmc_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, + jz4740_mmc_resume); static struct platform_driver jz4740_mmc_driver = { .probe = jz4740_mmc_probe, diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 98c218bd6669..40b6878bea6c 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -1210,7 +1210,7 @@ static int mxcmci_resume(struct device *dev) return ret; } -DEFINE_SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume); static struct platform_driver mxcmci_driver = { .probe = mxcmci_probe, diff --git a/drivers/most/most_usb.c b/drivers/most/most_usb.c index acabb7715b42..73258b24fea7 100644 --- a/drivers/most/most_usb.c +++ b/drivers/most/most_usb.c @@ -831,7 +831,7 @@ static ssize_t value_show(struct device *dev, struct device_attribute *attr, int err; if (sysfs_streq(name, "arb_address")) - return snprintf(buf, PAGE_SIZE, "%04x\n", dci_obj->reg_addr); + return sysfs_emit(buf, "%04x\n", dci_obj->reg_addr); if (sysfs_streq(name, "arb_value")) reg_addr = dci_obj->reg_addr; @@ -843,7 +843,7 @@ static ssize_t value_show(struct device *dev, struct device_attribute *attr, if (err < 0) return err; - return snprintf(buf, PAGE_SIZE, "%04x\n", val); + return sysfs_emit(buf, "%04x\n", val); } static ssize_t value_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 4945caa88345..6a099bbcd8be 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -357,12 +357,6 @@ config MTD_INTEL_VR_NOR Map driver for a NOR flash bank located on the Expansion Bus of the Intel Vermilion Range chipset. -config MTD_RBTX4939 - tristate "Map driver for RBTX4939 board" - depends on TOSHIBA_RBTX4939 && MTD_CFI && MTD_COMPLEX_MAPPINGS - help - Map driver for NOR flash chips on RBTX4939 board. - config MTD_PLATRAM tristate "Map driver for platform device RAM (mtd-ram)" select MTD_RAM diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 11fea9c8d561..2240b100f66a 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -42,6 +42,5 @@ obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o -obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o obj-$(CONFIG_MTD_VMU) += vmu-flash.o obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c deleted file mode 100644 index 39c86c0b0ec1..000000000000 --- a/drivers/mtd/maps/rbtx4939-flash.c +++ /dev/null @@ -1,133 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * rbtx4939-flash (based on physmap.c) - * - * This is a simplified physmap driver with map_init callback function. - * - * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp> - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/device.h> -#include <linux/platform_device.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/map.h> -#include <linux/mtd/partitions.h> -#include <asm/txx9/rbtx4939.h> - -struct rbtx4939_flash_info { - struct mtd_info *mtd; - struct map_info map; -}; - -static int rbtx4939_flash_remove(struct platform_device *dev) -{ - struct rbtx4939_flash_info *info; - - info = platform_get_drvdata(dev); - if (!info) - return 0; - - if (info->mtd) { - mtd_device_unregister(info->mtd); - map_destroy(info->mtd); - } - return 0; -} - -static const char * const rom_probe_types[] = { - "cfi_probe", "jedec_probe", NULL }; - -static int rbtx4939_flash_probe(struct platform_device *dev) -{ - struct rbtx4939_flash_data *pdata; - struct rbtx4939_flash_info *info; - struct resource *res; - const char * const *probe_type; - int err = 0; - unsigned long size; - - pdata = dev_get_platdata(&dev->dev); - if (!pdata) - return -ENODEV; - - res = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - info = devm_kzalloc(&dev->dev, sizeof(struct rbtx4939_flash_info), - GFP_KERNEL); - if (!info) - return -ENOMEM; - - platform_set_drvdata(dev, info); - - size = resource_size(res); - pr_notice("rbtx4939 platform flash device: %pR\n", res); - - if (!devm_request_mem_region(&dev->dev, res->start, size, - dev_name(&dev->dev))) - return -EBUSY; - - info->map.name = dev_name(&dev->dev); - info->map.phys = res->start; - info->map.size = size; - info->map.bankwidth = pdata->width; - - info->map.virt = devm_ioremap(&dev->dev, info->map.phys, size); - if (!info->map.virt) - return -EBUSY; - - if (pdata->map_init) - (*pdata->map_init)(&info->map); - else - simple_map_init(&info->map); - - probe_type = rom_probe_types; - for (; !info->mtd && *probe_type; probe_type++) - info->mtd = do_map_probe(*probe_type, &info->map); - if (!info->mtd) { - dev_err(&dev->dev, "map_probe failed\n"); - err = -ENXIO; - goto err_out; - } - info->mtd->dev.parent = &dev->dev; - err = mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts); - - if (err) - goto err_out; - return 0; - -err_out: - rbtx4939_flash_remove(dev); - return err; -} - -#ifdef CONFIG_PM -static void rbtx4939_flash_shutdown(struct platform_device *dev) -{ - struct rbtx4939_flash_info *info = platform_get_drvdata(dev); - - if (mtd_suspend(info->mtd) == 0) - mtd_resume(info->mtd); -} -#else -#define rbtx4939_flash_shutdown NULL -#endif - -static struct platform_driver rbtx4939_flash_driver = { - .probe = rbtx4939_flash_probe, - .remove = rbtx4939_flash_remove, - .shutdown = rbtx4939_flash_shutdown, - .driver = { - .name = "rbtx4939-flash", - }, -}; - -module_platform_driver(rbtx4939_flash_driver); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("RBTX4939 MTD map driver"); -MODULE_ALIAS("platform:rbtx4939-flash"); diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 32b8738baa24..20408b7db540 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -309,7 +309,7 @@ config MTD_NAND_DAVINCI config MTD_NAND_TXX9NDFMC tristate "TXx9 NAND controller" - depends on SOC_TX4938 || SOC_TX4939 || COMPILE_TEST + depends on SOC_TX4938 || COMPILE_TEST depends on HAS_IOMEM help This enables the NAND flash controller on the TXx9 SoCs. diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 04e6f7b26706..7c6efa3b6255 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -6,6 +6,7 @@ #include <linux/clk.h> #include <linux/slab.h> #include <linux/bitops.h> +#include <linux/dma/qcom_adm.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/module.h> @@ -952,6 +953,7 @@ static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, struct dma_async_tx_descriptor *dma_desc; struct scatterlist *sgl; struct dma_slave_config slave_conf; + struct qcom_adm_peripheral_config periph_conf = {}; enum dma_transfer_direction dir_eng; int ret; @@ -983,11 +985,19 @@ static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, if (read) { slave_conf.src_maxburst = 16; slave_conf.src_addr = nandc->base_dma + reg_off; - slave_conf.slave_id = nandc->data_crci; + if (nandc->data_crci) { + periph_conf.crci = nandc->data_crci; + slave_conf.peripheral_config = &periph_conf; + slave_conf.peripheral_size = sizeof(periph_conf); + } } else { slave_conf.dst_maxburst = 16; slave_conf.dst_addr = nandc->base_dma + reg_off; - slave_conf.slave_id = nandc->cmd_crci; + if (nandc->cmd_crci) { + periph_conf.crci = nandc->cmd_crci; + slave_conf.peripheral_config = &periph_conf; + slave_conf.peripheral_size = sizeof(periph_conf); + } } ret = dmaengine_slave_config(nandc->chan, &slave_conf); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 07fc603c2fa7..ec498ce70f35 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3874,8 +3874,8 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) skb->l4_hash) return skb->hash; - return __bond_xmit_hash(bond, skb, skb->head, skb->protocol, - skb->mac_header, skb->network_header, + return __bond_xmit_hash(bond, skb, skb->data, skb->protocol, + skb_mac_offset(skb), skb_network_offset(skb), skb_headlen(skb)); } @@ -4884,25 +4884,39 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, struct bonding *bond = netdev_priv(bond_dev); struct slave *slave = NULL; struct list_head *iter; + bool xmit_suc = false; + bool skb_used = false; bond_for_each_slave_rcu(bond, slave, iter) { - if (bond_is_last_slave(bond, slave)) - break; - if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { - struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); + struct sk_buff *skb2; + + if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)) + continue; + if (bond_is_last_slave(bond, slave)) { + skb2 = skb; + skb_used = true; + } else { + skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) { net_err_ratelimited("%s: Error: %s: skb_clone() failed\n", bond_dev->name, __func__); continue; } - bond_dev_queue_xmit(bond, skb2, slave->dev); } + + if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK) + xmit_suc = true; } - if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) - return bond_dev_queue_xmit(bond, skb, slave->dev); - return bond_tx_drop(bond_dev, skb); + if (!skb_used) + dev_kfree_skb_any(skb); + + if (xmit_suc) + return NETDEV_TX_OK; + + atomic_long_inc(&bond_dev->tx_dropped); + return NET_XMIT_DROP; } /*------------------------- Device initialization ---------------------------*/ diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 91230894692d..444ef6a342f6 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -754,7 +754,7 @@ static void cfv_remove(struct virtio_device *vdev) debugfs_remove_recursive(cfv->debugfs); vringh_kiov_cleanup(&cfv->ctx.riov); - vdev->config->reset(vdev); + virtio_reset_device(vdev); vdev->vringh_config->del_vrhs(cfv->vdev); cfv->vr_rx = NULL; vdev->config->del_vqs(cfv->vdev); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 849de4564709..621ce742ad21 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -106,9 +106,9 @@ static void emac_update_speed(struct net_device *dev) /* set EMAC SPEED, depend on PHY */ reg_val = readl(db->membase + EMAC_MAC_SUPP_REG); - reg_val &= ~(0x1 << 8); + reg_val &= ~EMAC_MAC_SUPP_100M; if (db->speed == SPEED_100) - reg_val |= 1 << 8; + reg_val |= EMAC_MAC_SUPP_100M; writel(reg_val, db->membase + EMAC_MAC_SUPP_REG); } @@ -264,7 +264,7 @@ static void emac_dma_done_callback(void *arg) /* re enable interrupt */ reg_val = readl(db->membase + EMAC_INT_CTL_REG); - reg_val |= (0x01 << 8); + reg_val |= EMAC_INT_CTL_RX_EN; writel(reg_val, db->membase + EMAC_INT_CTL_REG); db->emacrx_completed_flag = 1; @@ -429,7 +429,7 @@ static unsigned int emac_powerup(struct net_device *ndev) /* initial EMAC */ /* flush RX FIFO */ reg_val = readl(db->membase + EMAC_RX_CTL_REG); - reg_val |= 0x8; + reg_val |= EMAC_RX_CTL_FLUSH_FIFO; writel(reg_val, db->membase + EMAC_RX_CTL_REG); udelay(1); @@ -441,8 +441,8 @@ static unsigned int emac_powerup(struct net_device *ndev) /* set MII clock */ reg_val = readl(db->membase + EMAC_MAC_MCFG_REG); - reg_val &= (~(0xf << 2)); - reg_val |= (0xD << 2); + reg_val &= ~EMAC_MAC_MCFG_MII_CLKD_MASK; + reg_val |= EMAC_MAC_MCFG_MII_CLKD_72; writel(reg_val, db->membase + EMAC_MAC_MCFG_REG); /* clear RX counter */ @@ -506,7 +506,7 @@ static void emac_init_device(struct net_device *dev) /* enable RX/TX0/RX Hlevel interrup */ reg_val = readl(db->membase + EMAC_INT_CTL_REG); - reg_val |= (0xf << 0) | (0x01 << 8); + reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN); writel(reg_val, db->membase + EMAC_INT_CTL_REG); spin_unlock_irqrestore(&db->lock, flags); @@ -637,7 +637,9 @@ static void emac_rx(struct net_device *dev) if (!rxcount) { db->emacrx_completed_flag = 1; reg_val = readl(db->membase + EMAC_INT_CTL_REG); - reg_val |= (0xf << 0) | (0x01 << 8); + reg_val |= (EMAC_INT_CTL_TX_EN | + EMAC_INT_CTL_TX_ABRT_EN | + EMAC_INT_CTL_RX_EN); writel(reg_val, db->membase + EMAC_INT_CTL_REG); /* had one stuck? */ @@ -669,7 +671,9 @@ static void emac_rx(struct net_device *dev) writel(reg_val | EMAC_CTL_RX_EN, db->membase + EMAC_CTL_REG); reg_val = readl(db->membase + EMAC_INT_CTL_REG); - reg_val |= (0xf << 0) | (0x01 << 8); + reg_val |= (EMAC_INT_CTL_TX_EN | + EMAC_INT_CTL_TX_ABRT_EN | + EMAC_INT_CTL_RX_EN); writel(reg_val, db->membase + EMAC_INT_CTL_REG); db->emacrx_completed_flag = 1; @@ -783,20 +787,20 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id) } /* Transmit Interrupt check */ - if (int_status & (0x01 | 0x02)) + if (int_status & EMAC_INT_STA_TX_COMPLETE) emac_tx_done(dev, db, int_status); - if (int_status & (0x04 | 0x08)) + if (int_status & EMAC_INT_STA_TX_ABRT) netdev_info(dev, " ab : %x\n", int_status); /* Re-enable interrupt mask */ if (db->emacrx_completed_flag == 1) { reg_val = readl(db->membase + EMAC_INT_CTL_REG); - reg_val |= (0xf << 0) | (0x01 << 8); + reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN); writel(reg_val, db->membase + EMAC_INT_CTL_REG); } else { reg_val = readl(db->membase + EMAC_INT_CTL_REG); - reg_val |= (0xf << 0); + reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN); writel(reg_val, db->membase + EMAC_INT_CTL_REG); } @@ -1068,6 +1072,7 @@ out_clk_disable_unprepare: clk_disable_unprepare(db->clk); out_dispose_mapping: irq_dispose_mapping(ndev->irq); + dma_release_channel(db->rx_chan); out_iounmap: iounmap(db->membase); out: diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.h b/drivers/net/ethernet/allwinner/sun4i-emac.h index 38c72d9ec600..90bd9ad77607 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.h +++ b/drivers/net/ethernet/allwinner/sun4i-emac.h @@ -38,6 +38,7 @@ #define EMAC_RX_CTL_REG (0x3c) #define EMAC_RX_CTL_AUTO_DRQ_EN (1 << 1) #define EMAC_RX_CTL_DMA_EN (1 << 2) +#define EMAC_RX_CTL_FLUSH_FIFO (1 << 3) #define EMAC_RX_CTL_PASS_ALL_EN (1 << 4) #define EMAC_RX_CTL_PASS_CTL_EN (1 << 5) #define EMAC_RX_CTL_PASS_CRC_ERR_EN (1 << 6) @@ -61,7 +62,21 @@ #define EMAC_RX_IO_DATA_STATUS_OK (1 << 7) #define EMAC_RX_FBC_REG (0x50) #define EMAC_INT_CTL_REG (0x54) +#define EMAC_INT_CTL_RX_EN (1 << 8) +#define EMAC_INT_CTL_TX0_EN (1) +#define EMAC_INT_CTL_TX1_EN (1 << 1) +#define EMAC_INT_CTL_TX_EN (EMAC_INT_CTL_TX0_EN | EMAC_INT_CTL_TX1_EN) +#define EMAC_INT_CTL_TX0_ABRT_EN (0x1 << 2) +#define EMAC_INT_CTL_TX1_ABRT_EN (0x1 << 3) +#define EMAC_INT_CTL_TX_ABRT_EN (EMAC_INT_CTL_TX0_ABRT_EN | EMAC_INT_CTL_TX1_ABRT_EN) #define EMAC_INT_STA_REG (0x58) +#define EMAC_INT_STA_TX0_COMPLETE (0x1) +#define EMAC_INT_STA_TX1_COMPLETE (0x1 << 1) +#define EMAC_INT_STA_TX_COMPLETE (EMAC_INT_STA_TX0_COMPLETE | EMAC_INT_STA_TX1_COMPLETE) +#define EMAC_INT_STA_TX0_ABRT (0x1 << 2) +#define EMAC_INT_STA_TX1_ABRT (0x1 << 3) +#define EMAC_INT_STA_TX_ABRT (EMAC_INT_STA_TX0_ABRT | EMAC_INT_STA_TX1_ABRT) +#define EMAC_INT_STA_RX_COMPLETE (0x1 << 8) #define EMAC_MAC_CTL0_REG (0x5c) #define EMAC_MAC_CTL0_RX_FLOW_CTL_EN (1 << 2) #define EMAC_MAC_CTL0_TX_FLOW_CTL_EN (1 << 3) @@ -87,8 +102,11 @@ #define EMAC_MAC_CLRT_RM (0x0f) #define EMAC_MAC_MAXF_REG (0x70) #define EMAC_MAC_SUPP_REG (0x74) +#define EMAC_MAC_SUPP_100M (0x1 << 8) #define EMAC_MAC_TEST_REG (0x78) #define EMAC_MAC_MCFG_REG (0x7c) +#define EMAC_MAC_MCFG_MII_CLKD_MASK (0xff << 2) +#define EMAC_MAC_MCFG_MII_CLKD_72 (0x0d << 2) #define EMAC_MAC_A0_REG (0x98) #define EMAC_MAC_A1_REG (0x9c) #define EMAC_MAC_A2_REG (0xa0) diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index 9a650d1c1bdd..4d2ba30c2fbd 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1237,6 +1237,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) struct bmac_data *bp; const unsigned char *prop_addr; unsigned char addr[6]; + u8 macaddr[6]; struct net_device *dev; int is_bmac_plus = ((int)match->data) != 0; @@ -1284,7 +1285,9 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) rev = addr[0] == 0 && addr[1] == 0xA0; for (j = 0; j < 6; ++j) - dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j]; + macaddr[j] = rev ? bitrev8(addr[j]): addr[j]; + + eth_hw_addr_set(dev, macaddr); /* Enable chip without interrupts for now */ bmac_enable_and_reset_chip(dev); diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 4b80e3a52a19..6f8c91eb1263 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -90,7 +90,7 @@ static void mace_set_timeout(struct net_device *dev); static void mace_tx_timeout(struct timer_list *t); static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma); static inline void mace_clean_rings(struct mace_data *mp); -static void __mace_set_address(struct net_device *dev, void *addr); +static void __mace_set_address(struct net_device *dev, const void *addr); /* * If we can't get a skbuff when we need it, we use this area for DMA. @@ -112,6 +112,7 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match) struct net_device *dev; struct mace_data *mp; const unsigned char *addr; + u8 macaddr[ETH_ALEN]; int j, rev, rc = -EBUSY; if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { @@ -167,8 +168,9 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match) rev = addr[0] == 0 && addr[1] == 0xA0; for (j = 0; j < 6; ++j) { - dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j]; + macaddr[j] = rev ? bitrev8(addr[j]): addr[j]; } + eth_hw_addr_set(dev, macaddr); mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | in_8(&mp->mace->chipid_lo); @@ -369,11 +371,12 @@ static void mace_reset(struct net_device *dev) out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO); } -static void __mace_set_address(struct net_device *dev, void *addr) +static void __mace_set_address(struct net_device *dev, const void *addr) { struct mace_data *mp = netdev_priv(dev); volatile struct mace __iomem *mb = mp->mace; - unsigned char *p = addr; + const unsigned char *p = addr; + u8 macaddr[ETH_ALEN]; int i; /* load up the hardware address */ @@ -385,7 +388,10 @@ static void __mace_set_address(struct net_device *dev, void *addr) ; } for (i = 0; i < 6; ++i) - out_8(&mb->padr, dev->dev_addr[i] = p[i]); + out_8(&mb->padr, macaddr[i] = p[i]); + + eth_hw_addr_set(dev, macaddr); + if (mp->chipid != BROKEN_ADDRCHG_REV) out_8(&mb->iac, 0); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 226f4403cfed..87f1056e29ff 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -4020,10 +4020,12 @@ static int bcmgenet_probe(struct platform_device *pdev) /* Request the WOL interrupt and advertise suspend if available */ priv->wol_irq_disabled = true; - err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, - dev->name, priv); - if (!err) - device_set_wakeup_capable(&pdev->dev, 1); + if (priv->wol_irq > 0) { + err = devm_request_irq(&pdev->dev, priv->wol_irq, + bcmgenet_wol_isr, 0, dev->name, priv); + if (!err) + device_set_wakeup_capable(&pdev->dev, 1); + } /* Set the needed headroom to account for any possible * features enabling/disabling at runtime diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index fed5f93bf620..26433a62d7f0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl { __be32 opt2; __be64 opt0; __be32 iss; - __be32 rsvd; + __be32 rsvd[3]; }; struct cpl_act_open_req { diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c index d04a6c163445..da8d10475a08 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c @@ -32,6 +32,7 @@ #include <linux/tcp.h> #include <linux/ipv6.h> +#include <net/inet_ecn.h> #include <net/route.h> #include <net/ip6_route.h> @@ -99,7 +100,7 @@ cxgb_find_route(struct cxgb4_lld_info *lldi, rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, peer_port, local_port, IPPROTO_TCP, - tos, 0); + tos & ~INET_ECN_MASK, 0); if (IS_ERR(rt)) return NULL; n = dst_neigh_lookup(&rt->dst, &peer_ip); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 4db6889b79ba..1c81b161de52 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -150,10 +150,10 @@ static void enic_set_affinity_hint(struct enic *enic) !cpumask_available(enic->msix[i].affinity_mask) || cpumask_empty(enic->msix[i].affinity_mask)) continue; - err = irq_set_affinity_hint(enic->msix_entry[i].vector, - enic->msix[i].affinity_mask); + err = irq_update_affinity_hint(enic->msix_entry[i].vector, + enic->msix[i].affinity_mask); if (err) - netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n", + netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n", err); } @@ -173,7 +173,7 @@ static void enic_unset_affinity_hint(struct enic *enic) int i; for (i = 0; i < enic->intr_count; i++) - irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); + irq_update_affinity_hint(enic->msix_entry[i].vector, NULL); } static int enic_udp_tunnel_set_port(struct net_device *netdev, diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index ad67b4216079..d0c262f2695a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3491,7 +3491,7 @@ static int be_msix_register(struct be_adapter *adapter) if (status) goto err_msix; - irq_set_affinity_hint(vec, eqo->affinity_mask); + irq_update_affinity_hint(vec, eqo->affinity_mask); } return 0; @@ -3552,7 +3552,7 @@ static void be_irq_unregister(struct be_adapter *adapter) /* MSIx */ for_all_evt_queues(adapter, eqo, i) { vec = be_msix_vec_get(adapter, eqo); - irq_set_affinity_hint(vec, NULL); + irq_update_affinity_hint(vec, NULL); free_irq(vec, eqo); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index d21ba70ef4a3..e985ae008a97 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4246,7 +4246,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev) } irq = ls_dev->irqs[0]; - err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, + err = devm_request_threaded_irq(&ls_dev->dev, irq->virq, NULL, dpni_irq0_handler_thread, IRQF_NO_SUSPEND | IRQF_ONESHOT, dev_name(&ls_dev->dev), &ls_dev->dev); @@ -4273,7 +4273,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev) return 0; free_irq: - devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); + devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev); free_mc_irq: fsl_mc_free_irqs(ls_dev); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index 32b5faa87bb8..5f5f8c53c4a0 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -129,7 +129,6 @@ static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv) static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) { struct device *dev = &mc_dev->dev; - struct fsl_mc_device_irq *irq; struct ptp_qoriq *ptp_qoriq; struct device_node *node; void __iomem *base; @@ -177,8 +176,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) goto err_unmap; } - irq = mc_dev->irqs[0]; - ptp_qoriq->irq = irq->msi_desc->irq; + ptp_qoriq->irq = mc_dev->irqs[0]->virq; err = request_threaded_irq(ptp_qoriq->irq, NULL, dpaa2_ptp_irq_handler_thread, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 9b5512b4f15d..9a561072aa4a 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -1554,8 +1554,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF]; - err = devm_request_threaded_irq(dev, irq->msi_desc->irq, - NULL, + err = devm_request_threaded_irq(dev, irq->virq, NULL, dpaa2_switch_irq0_handler_thread, IRQF_NO_SUSPEND | IRQF_ONESHOT, dev_name(dev), dev); @@ -1581,7 +1580,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) return 0; free_devm_irq: - devm_free_irq(dev, irq->msi_desc->irq, dev); + devm_free_irq(dev, irq->virq, dev); free_irq: fsl_mc_free_irqs(sw_dev); return err; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index 5b8b9bcf41a2..266e562bd67a 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -51,6 +51,7 @@ struct tgec_mdio_controller { struct mdio_fsl_priv { struct tgec_mdio_controller __iomem *mdio_base; bool is_little_endian; + bool has_a009885; bool has_a011043; }; @@ -186,10 +187,10 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) { struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; struct tgec_mdio_controller __iomem *regs = priv->mdio_base; + unsigned long flags; uint16_t dev_addr; uint32_t mdio_stat; uint32_t mdio_ctl; - uint16_t value; int ret; bool endian = priv->is_little_endian; @@ -221,12 +222,18 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) return ret; } + if (priv->has_a009885) + /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we + * must read back the data register within 16 MDC cycles. + */ + local_irq_save(flags); + /* Initiate the read */ xgmac_write32(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl, endian); ret = xgmac_wait_until_done(&bus->dev, regs, endian); if (ret) - return ret; + goto irq_restore; /* Return all Fs if nothing was there */ if ((xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) && @@ -234,13 +241,17 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) dev_dbg(&bus->dev, "Error while reading PHY%d reg at %d.%hhu\n", phy_id, dev_addr, regnum); - return 0xffff; + ret = 0xffff; + } else { + ret = xgmac_read32(®s->mdio_data, endian) & 0xffff; + dev_dbg(&bus->dev, "read %04x\n", ret); } - value = xgmac_read32(®s->mdio_data, endian) & 0xffff; - dev_dbg(&bus->dev, "read %04x\n", value); +irq_restore: + if (priv->has_a009885) + local_irq_restore(flags); - return value; + return ret; } static int xgmac_mdio_probe(struct platform_device *pdev) @@ -287,6 +298,8 @@ static int xgmac_mdio_probe(struct platform_device *pdev) priv->is_little_endian = device_property_read_bool(&pdev->dev, "little-endian"); + priv->has_a009885 = device_property_read_bool(&pdev->dev, + "fsl,erratum-a009885"); priv->has_a011043 = device_property_read_bool(&pdev->dev, "fsl,erratum-a011043"); @@ -318,9 +331,10 @@ err_ioremap: static int xgmac_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); + struct mdio_fsl_priv *priv = bus->priv; mdiobus_unregister(bus); - iounmap(bus->priv); + iounmap(priv->mdio_base); mdiobus_free(bus); return 0; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index fed3b6bc0d76..b33ed4d92b71 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -548,7 +548,7 @@ static int rx_request_irq(struct hinic_rxq *rxq) goto err_req_irq; cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask); - err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask); + err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask); if (err) goto err_irq_affinity; @@ -565,7 +565,7 @@ static void rx_free_irq(struct hinic_rxq *rxq) { struct hinic_rq *rq = rxq->rq; - irq_set_affinity_hint(rq->irq, NULL); + irq_update_affinity_hint(rq->irq, NULL); free_irq(rq->irq, rxq); rx_del_napi(rxq); } diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index 27937c5d7956..daec9ce04531 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -117,9 +117,10 @@ static int sni_82596_probe(struct platform_device *dev) netdevice->dev_addr[5] = readb(eth_addr + 0x06); iounmap(eth_addr); - if (!netdevice->irq) { + if (netdevice->irq < 0) { printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", __FILE__, netdevice->base_addr); + retval = netdevice->irq; goto probe_failed; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 61afc220fc6c..2a3d8aef7f4e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3915,10 +3915,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) * * get_cpu_mask returns a static constant mask with * a permanent lifetime so it's ok to pass to - * irq_set_affinity_hint without making a copy. + * irq_update_affinity_hint without making a copy. */ cpu = cpumask_local_spread(q_vector->v_idx, -1); - irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + irq_update_affinity_hint(irq_num, get_cpu_mask(cpu)); } vsi->irqs_ready = true; @@ -3929,7 +3929,7 @@ free_queue_irqs: vector--; irq_num = pf->msix_entries[base + vector].vector; irq_set_affinity_notifier(irq_num, NULL); - irq_set_affinity_hint(irq_num, NULL); + irq_update_affinity_hint(irq_num, NULL); free_irq(irq_num, &vsi->q_vectors[vector]); } return err; @@ -4750,7 +4750,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) /* clear the affinity notifier in the IRQ descriptor */ irq_set_affinity_notifier(irq_num, NULL); /* remove our suggested affinity mask for this IRQ */ - irq_set_affinity_hint(irq_num, NULL); + irq_update_affinity_hint(irq_num, NULL); synchronize_irq(irq_num); free_irq(irq_num, vsi->q_vectors[i]); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 95116ef2f0ae..8125b9120615 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -492,10 +492,10 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); /* Spread the IRQ affinity hints across online CPUs. Note that * get_cpu_mask returns a mask with a permanent lifetime so - * it's safe to use as a hint for irq_set_affinity_hint. + * it's safe to use as a hint for irq_update_affinity_hint. */ cpu = cpumask_local_spread(q_vector->v_idx, -1); - irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + irq_update_affinity_hint(irq_num, get_cpu_mask(cpu)); } return 0; @@ -505,7 +505,7 @@ free_queue_irqs: vector--; irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; irq_set_affinity_notifier(irq_num, NULL); - irq_set_affinity_hint(irq_num, NULL); + irq_update_affinity_hint(irq_num, NULL); free_irq(irq_num, &adapter->q_vectors[vector]); } return err; @@ -557,7 +557,7 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) for (vector = 0; vector < q_vectors; vector++) { irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; irq_set_affinity_notifier(irq_num, NULL); - irq_set_affinity_hint(irq_num, NULL); + irq_update_affinity_hint(irq_num, NULL); free_irq(irq_num, &adapter->q_vectors[vector]); } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c6ff656b2476..89b467006291 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3247,8 +3247,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) /* If Flow Director is enabled, set interrupt affinity */ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { /* assign the mask for this irq */ - irq_set_affinity_hint(entry->vector, - &q_vector->affinity_mask); + irq_update_affinity_hint(entry->vector, + &q_vector->affinity_mask); } } @@ -3264,8 +3264,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) free_queue_irqs: while (vector) { vector--; - irq_set_affinity_hint(adapter->msix_entries[vector].vector, - NULL); + irq_update_affinity_hint(adapter->msix_entries[vector].vector, + NULL); free_irq(adapter->msix_entries[vector].vector, adapter->q_vector[vector]); } @@ -3398,7 +3398,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) continue; /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(entry->vector, NULL); + irq_update_affinity_hint(entry->vector, NULL); free_irq(entry->vector, q_vector); } diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h index a0a5a8e6bd8c..2fd9ef2fe5d6 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera.h +++ b/drivers/net/ethernet/marvell/prestera/prestera.h @@ -283,7 +283,6 @@ struct prestera_router { struct list_head rif_entry_list; struct notifier_block inetaddr_nb; struct notifier_block inetaddr_valid_nb; - bool aborted; }; struct prestera_rxtx_params { diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c index 51fc841b1e7a..e6bfadc874c5 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -1831,8 +1831,8 @@ static int prestera_iface_to_msg(struct prestera_iface *iface, int prestera_hw_rif_create(struct prestera_switch *sw, struct prestera_iface *iif, u8 *mac, u16 *rif_id) { - struct prestera_msg_rif_req req; struct prestera_msg_rif_resp resp; + struct prestera_msg_rif_req req; int err; memcpy(req.mac, mac, ETH_ALEN); @@ -1868,9 +1868,9 @@ int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id, int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id) { - int err; struct prestera_msg_vr_resp resp; struct prestera_msg_vr_req req; + int err; err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 08fdd1e50388..cad93f747d0c 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -982,6 +982,7 @@ static void prestera_switch_fini(struct prestera_switch *sw) prestera_event_handlers_unregister(sw); prestera_rxtx_switch_fini(sw); prestera_switchdev_fini(sw); + prestera_router_fini(sw); prestera_netdev_event_handler_unregister(sw); prestera_hw_switch_fini(sw); } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c index 8a3b7b664358..6ef4d32b8fdd 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -25,10 +25,10 @@ static int __prestera_inetaddr_port_event(struct net_device *port_dev, struct netlink_ext_ack *extack) { struct prestera_port *port = netdev_priv(port_dev); - int err; - struct prestera_rif_entry *re; struct prestera_rif_entry_key re_key = {}; + struct prestera_rif_entry *re; u32 kern_tb_id; + int err; err = prestera_is_valid_mac_addr(port, port_dev->dev_addr); if (err) { @@ -45,21 +45,21 @@ static int __prestera_inetaddr_port_event(struct net_device *port_dev, switch (event) { case NETDEV_UP: if (re) { - NL_SET_ERR_MSG_MOD(extack, "rif_entry already exist"); + NL_SET_ERR_MSG_MOD(extack, "RIF already exist"); return -EEXIST; } re = prestera_rif_entry_create(port->sw, &re_key, prestera_fix_tb_id(kern_tb_id), port_dev->dev_addr); if (!re) { - NL_SET_ERR_MSG_MOD(extack, "Can't create rif_entry"); + NL_SET_ERR_MSG_MOD(extack, "Can't create RIF"); return -EINVAL; } dev_hold(port_dev); break; case NETDEV_DOWN: if (!re) { - NL_SET_ERR_MSG_MOD(extack, "rif_entry not exist"); + NL_SET_ERR_MSG_MOD(extack, "Can't find RIF"); return -EEXIST; } prestera_rif_entry_destroy(port->sw, re); @@ -75,11 +75,11 @@ static int __prestera_inetaddr_event(struct prestera_switch *sw, unsigned long event, struct netlink_ext_ack *extack) { - if (prestera_netdev_check(dev) && !netif_is_bridge_port(dev) && - !netif_is_lag_port(dev) && !netif_is_ovs_port(dev)) - return __prestera_inetaddr_port_event(dev, event, extack); + if (!prestera_netdev_check(dev) || netif_is_bridge_port(dev) || + netif_is_lag_port(dev) || netif_is_ovs_port(dev)) + return 0; - return 0; + return __prestera_inetaddr_port_event(dev, event, extack); } static int __prestera_inetaddr_cb(struct notifier_block *nb, @@ -126,6 +126,8 @@ static int __prestera_inetaddr_valid_cb(struct notifier_block *nb, goto out; if (ipv4_is_multicast(ivi->ivi_addr)) { + NL_SET_ERR_MSG_MOD(ivi->extack, + "Multicast addr on RIF is not supported"); err = -EINVAL; goto out; } @@ -166,7 +168,7 @@ int prestera_router_init(struct prestera_switch *sw) err_register_inetaddr_notifier: unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb); err_register_inetaddr_validator_notifier: - /* prestera_router_hw_fini */ + prestera_router_hw_fini(sw); err_router_lib_init: kfree(sw->router); return err; @@ -176,7 +178,7 @@ void prestera_router_fini(struct prestera_switch *sw) { unregister_inetaddr_notifier(&sw->router->inetaddr_nb); unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb); - /* router_hw_fini */ + prestera_router_hw_fini(sw); kfree(sw->router); sw->router = NULL; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c index 5866a4be50f5..e5592b69ad37 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c @@ -29,6 +29,12 @@ int prestera_router_hw_init(struct prestera_switch *sw) return 0; } +void prestera_router_hw_fini(struct prestera_switch *sw) +{ + WARN_ON(!list_empty(&sw->router->vr_list)); + WARN_ON(!list_empty(&sw->router->rif_entry_list)); +} + static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw, u32 tb_id) { @@ -47,13 +53,8 @@ static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw, struct netlink_ext_ack *extack) { struct prestera_vr *vr; - u16 hw_vr_id; int err; - err = prestera_hw_vr_create(sw, &hw_vr_id); - if (err) - return ERR_PTR(-ENOMEM); - vr = kzalloc(sizeof(*vr), GFP_KERNEL); if (!vr) { err = -ENOMEM; @@ -61,23 +62,26 @@ static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw, } vr->tb_id = tb_id; - vr->hw_vr_id = hw_vr_id; + + err = prestera_hw_vr_create(sw, &vr->hw_vr_id); + if (err) + goto err_hw_create; list_add(&vr->router_node, &sw->router->vr_list); return vr; -err_alloc_vr: - prestera_hw_vr_delete(sw, hw_vr_id); +err_hw_create: kfree(vr); +err_alloc_vr: return ERR_PTR(err); } static void __prestera_vr_destroy(struct prestera_switch *sw, struct prestera_vr *vr) { - prestera_hw_vr_delete(sw, vr->hw_vr_id); list_del(&vr->router_node); + prestera_hw_vr_delete(sw, vr->hw_vr_id); kfree(vr); } @@ -87,17 +91,22 @@ static struct prestera_vr *prestera_vr_get(struct prestera_switch *sw, u32 tb_id struct prestera_vr *vr; vr = __prestera_vr_find(sw, tb_id); - if (!vr) + if (vr) { + refcount_inc(&vr->refcount); + } else { vr = __prestera_vr_create(sw, tb_id, extack); - if (IS_ERR(vr)) - return ERR_CAST(vr); + if (IS_ERR(vr)) + return ERR_CAST(vr); + + refcount_set(&vr->refcount, 1); + } return vr; } static void prestera_vr_put(struct prestera_switch *sw, struct prestera_vr *vr) { - if (!vr->ref_cnt) + if (refcount_dec_and_test(&vr->refcount)) __prestera_vr_destroy(sw, vr); } @@ -120,7 +129,7 @@ __prestera_rif_entry_key_copy(const struct prestera_rif_entry_key *in, out->iface.vlan_id = in->iface.vlan_id; break; default: - pr_err("Unsupported iface type"); + WARN(1, "Unsupported iface type"); return -EINVAL; } @@ -158,7 +167,6 @@ void prestera_rif_entry_destroy(struct prestera_switch *sw, iface.vr_id = e->vr->hw_vr_id; prestera_hw_rif_delete(sw, e->hw_id, &iface); - e->vr->ref_cnt--; prestera_vr_put(sw, e->vr); kfree(e); } @@ -183,7 +191,6 @@ prestera_rif_entry_create(struct prestera_switch *sw, if (IS_ERR(e->vr)) goto err_vr_get; - e->vr->ref_cnt++; memcpy(&e->addr, addr, sizeof(e->addr)); /* HW */ @@ -198,7 +205,6 @@ prestera_rif_entry_create(struct prestera_switch *sw, return e; err_hw_create: - e->vr->ref_cnt--; prestera_vr_put(sw, e->vr); err_vr_get: err_key_copy: diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h index fed53595f7bb..b6b028551868 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h @@ -6,7 +6,7 @@ struct prestera_vr { struct list_head router_node; - unsigned int ref_cnt; + refcount_t refcount; u32 tb_id; /* key (kernel fib table id) */ u16 hw_vr_id; /* virtual router ID */ u8 __pad[2]; @@ -32,5 +32,6 @@ prestera_rif_entry_create(struct prestera_switch *sw, struct prestera_rif_entry_key *k, u32 tb_id, const unsigned char *addr); int prestera_router_hw_init(struct prestera_switch *sw); +void prestera_router_hw_fini(struct prestera_switch *sw); #endif /* _PRESTERA_ROUTER_HW_H_ */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b67b4323cff0..f02d07ec5ccb 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -267,7 +267,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, phylink_config); struct mtk_eth *eth = mac->hw; u32 mcr_cur, mcr_new, sid, i; - int val, ge_mode, err; + int val, ge_mode, err = 0; /* MT76x8 has no hardware settings between for the MAC */ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 9e48509ed3b2..414e390e6b48 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -244,9 +244,9 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) cpumask_empty(eq->affinity_mask)) return; - hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); + hint_err = irq_update_affinity_hint(eq->irq, eq->affinity_mask); if (hint_err) - mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err); + mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n", hint_err); } #endif @@ -1123,9 +1123,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) if (eq_table->eq[i].have_irq) { free_cpumask_var(eq_table->eq[i].affinity_mask); -#if defined(CONFIG_SMP) - irq_set_affinity_hint(eq_table->eq[i].irq, NULL); -#endif + irq_update_affinity_hint(eq_table->eq[i].irq, NULL); free_irq(eq_table->eq[i].irq, eq_table->eq + i); eq_table->eq[i].have_irq = 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 33815246fead..378fc8e3bd97 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2018 Mellanox Technologies. */ +#include <net/inet_ecn.h> #include <net/vxlan.h> #include <net/gre.h> #include <net/geneve.h> @@ -235,7 +236,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, int err; /* add the IP fields */ - attr.fl.fl4.flowi4_tos = tun_key->tos; + attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK; attr.fl.fl4.daddr = tun_key->u.ipv4.dst; attr.fl.fl4.saddr = tun_key->u.ipv4.src; attr.ttl = tun_key->ttl; @@ -350,7 +351,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, int err; /* add the IP fields */ - attr.fl.fl4.flowi4_tos = tun_key->tos; + attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK; attr.fl.fl4.daddr = tun_key->u.ipv4.dst; attr.fl.fl4.saddr = tun_key->u.ipv4.src; attr.ttl = tun_key->ttl; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 90fec0649ef5..41807ef55201 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -129,11 +129,11 @@ static void irq_release(struct mlx5_irq *irq) struct mlx5_irq_pool *pool = irq->pool; xa_erase(&pool->irqs, irq->index); - /* free_irq requires that affinity and rmap will be cleared + /* free_irq requires that affinity_hint and rmap will be cleared * before calling it. This is why there is asymmetry with set_rmap * which should be called after alloc_irq but before request_irq. */ - irq_set_affinity_hint(irq->irqn, NULL); + irq_update_affinity_hint(irq->irqn, NULL); free_cpumask_var(irq->mask); free_irq(irq->irqn, &irq->nh); kfree(irq); @@ -238,7 +238,7 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, } if (affinity) { cpumask_copy(irq->mask, affinity); - irq_set_affinity_hint(irq->irqn, irq->mask); + irq_set_affinity_and_hint(irq->irqn, irq->mask); } irq->pool = pool; irq->refcount = 1; @@ -251,7 +251,7 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, } return irq; err_xa: - irq_set_affinity_hint(irq->irqn, NULL); + irq_update_affinity_hint(irq->irqn, NULL); free_cpumask_var(irq->mask); err_cpumask: free_irq(irq->irqn, &irq->nh); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index b1311b656e17..455293aa6343 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -771,7 +771,10 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port, ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); - ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause); + /* Don't attempt to send PAUSE frames on the NPI port, it's broken */ + if (port != ocelot->npi) + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, + tx_pause); /* Undo the effects of ocelot_phylink_mac_link_down: * enable MAC module diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index beb9379424c0..949858891973 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -559,13 +559,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, return -EOPNOTSUPP; } - if (filter->block_id == VCAP_IS1 && - !is_zero_ether_addr(match.mask->dst)) { - NL_SET_ERR_MSG_MOD(extack, - "Key type S1_NORMAL cannot match on destination MAC"); - return -EOPNOTSUPP; - } - /* The hw support mac matches only for MAC_ETYPE key, * therefore if other matches(port, tcp flags, etc) are added * then just bail out @@ -580,6 +573,14 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, return -EOPNOTSUPP; flow_rule_match_eth_addrs(rule, &match); + + if (filter->block_id == VCAP_IS1 && + !is_zero_ether_addr(match.mask->dst)) { + NL_SET_ERR_MSG_MOD(extack, + "Key type S1_NORMAL cannot match on destination MAC"); + return -EOPNOTSUPP; + } + filter->key_type = OCELOT_VCAP_KEY_ETYPE; ether_addr_copy(filter->key.etype.dmac.value, match.key->dst); @@ -805,13 +806,34 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, struct netlink_ext_ack *extack = f->common.extack; struct ocelot_vcap_filter *filter; int chain = f->common.chain_index; - int ret; + int block_id, ret; if (chain && !ocelot_find_vcap_filter_that_points_at(ocelot, chain)) { NL_SET_ERR_MSG_MOD(extack, "No default GOTO action points to this chain"); return -EOPNOTSUPP; } + block_id = ocelot_chain_to_block(chain, ingress); + if (block_id < 0) { + NL_SET_ERR_MSG_MOD(extack, "Cannot offload to this chain"); + return -EOPNOTSUPP; + } + + filter = ocelot_vcap_block_find_filter_by_id(&ocelot->block[block_id], + f->cookie, true); + if (filter) { + /* Filter already exists on other ports */ + if (!ingress) { + NL_SET_ERR_MSG_MOD(extack, "VCAP ES0 does not support shared filters"); + return -EOPNOTSUPP; + } + + filter->ingress_port_mask |= BIT(port); + + return ocelot_vcap_filter_replace(ocelot, filter); + } + + /* Filter didn't exist, create it now */ filter = ocelot_vcap_filter_create(ocelot, port, ingress, f); if (!filter) return -ENOMEM; @@ -874,6 +896,12 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, if (filter->type == OCELOT_VCAP_FILTER_DUMMY) return ocelot_vcap_dummy_filter_del(ocelot, filter); + if (ingress) { + filter->ingress_port_mask &= ~BIT(port); + if (filter->ingress_port_mask) + return ocelot_vcap_filter_replace(ocelot, filter); + } + return ocelot_vcap_filter_del(ocelot, filter); } EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 8115c3db252e..e271b6225b72 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -1187,7 +1187,7 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev, ocelot_port_bridge_join(ocelot, port, bridge); err = switchdev_bridge_port_offload(brport_dev, dev, priv, - &ocelot_netdevice_nb, + &ocelot_switchdev_nb, &ocelot_switchdev_blocking_nb, false, extack); if (err) @@ -1201,7 +1201,7 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev, err_switchdev_sync: switchdev_bridge_port_unoffload(brport_dev, priv, - &ocelot_netdevice_nb, + &ocelot_switchdev_nb, &ocelot_switchdev_blocking_nb); err_switchdev_offload: ocelot_port_bridge_leave(ocelot, port, bridge); @@ -1214,7 +1214,7 @@ static void ocelot_netdevice_pre_bridge_leave(struct net_device *dev, struct ocelot_port_private *priv = netdev_priv(dev); switchdev_bridge_port_unoffload(brport_dev, priv, - &ocelot_netdevice_nb, + &ocelot_switchdev_nb, &ocelot_switchdev_blocking_nb); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c index adfeb8d3293d..62a69a91ab22 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c @@ -12,6 +12,7 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> @@ -48,46 +49,60 @@ #define DWMAC_RX_VARDELAY(d) ((d) << DWMAC_RX_VARDELAY_SHIFT) #define DWMAC_RXN_VARDELAY(d) ((d) << DWMAC_RXN_VARDELAY_SHIFT) +struct oxnas_dwmac; + +struct oxnas_dwmac_data { + int (*setup)(struct oxnas_dwmac *dwmac); +}; + struct oxnas_dwmac { struct device *dev; struct clk *clk; struct regmap *regmap; + const struct oxnas_dwmac_data *data; }; -static int oxnas_dwmac_init(struct platform_device *pdev, void *priv) +static int oxnas_dwmac_setup_ox810se(struct oxnas_dwmac *dwmac) { - struct oxnas_dwmac *dwmac = priv; unsigned int value; int ret; - /* Reset HW here before changing the glue configuration */ - ret = device_reset(dwmac->dev); - if (ret) + ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value); + if (ret < 0) return ret; - ret = clk_prepare_enable(dwmac->clk); - if (ret) - return ret; + /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */ + value |= BIT(DWMAC_CKEN_GTX) | + /* Use simple mux for 25/125 Mhz clock switching */ + BIT(DWMAC_SIMPLE_MUX); + + regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value); + + return 0; +} + +static int oxnas_dwmac_setup_ox820(struct oxnas_dwmac *dwmac) +{ + unsigned int value; + int ret; ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value); - if (ret < 0) { - clk_disable_unprepare(dwmac->clk); + if (ret < 0) return ret; - } /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */ value |= BIT(DWMAC_CKEN_GTX) | /* Use simple mux for 25/125 Mhz clock switching */ - BIT(DWMAC_SIMPLE_MUX) | - /* set auto switch tx clock source */ - BIT(DWMAC_AUTO_TX_SOURCE) | - /* enable tx & rx vardelay */ - BIT(DWMAC_CKEN_TX_OUT) | - BIT(DWMAC_CKEN_TXN_OUT) | - BIT(DWMAC_CKEN_TX_IN) | - BIT(DWMAC_CKEN_RX_OUT) | - BIT(DWMAC_CKEN_RXN_OUT) | - BIT(DWMAC_CKEN_RX_IN); + BIT(DWMAC_SIMPLE_MUX) | + /* set auto switch tx clock source */ + BIT(DWMAC_AUTO_TX_SOURCE) | + /* enable tx & rx vardelay */ + BIT(DWMAC_CKEN_TX_OUT) | + BIT(DWMAC_CKEN_TXN_OUT) | + BIT(DWMAC_CKEN_TX_IN) | + BIT(DWMAC_CKEN_RX_OUT) | + BIT(DWMAC_CKEN_RXN_OUT) | + BIT(DWMAC_CKEN_RX_IN); regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value); /* set tx & rx vardelay */ @@ -100,6 +115,27 @@ static int oxnas_dwmac_init(struct platform_device *pdev, void *priv) return 0; } +static int oxnas_dwmac_init(struct platform_device *pdev, void *priv) +{ + struct oxnas_dwmac *dwmac = priv; + int ret; + + /* Reset HW here before changing the glue configuration */ + ret = device_reset(dwmac->dev); + if (ret) + return ret; + + ret = clk_prepare_enable(dwmac->clk); + if (ret) + return ret; + + ret = dwmac->data->setup(dwmac); + if (ret) + clk_disable_unprepare(dwmac->clk); + + return ret; +} + static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv) { struct oxnas_dwmac *dwmac = priv; @@ -128,6 +164,12 @@ static int oxnas_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + dwmac->data = (const struct oxnas_dwmac_data *)of_device_get_match_data(&pdev->dev); + if (!dwmac->data) { + ret = -EINVAL; + goto err_remove_config_dt; + } + dwmac->dev = &pdev->dev; plat_dat->bsp_priv = dwmac; plat_dat->init = oxnas_dwmac_init; @@ -166,8 +208,23 @@ err_remove_config_dt: return ret; } +static const struct oxnas_dwmac_data ox810se_dwmac_data = { + .setup = oxnas_dwmac_setup_ox810se, +}; + +static const struct oxnas_dwmac_data ox820_dwmac_data = { + .setup = oxnas_dwmac_setup_ox820, +}; + static const struct of_device_id oxnas_dwmac_match[] = { - { .compatible = "oxsemi,ox820-dwmac" }, + { + .compatible = "oxsemi,ox810se-dwmac", + .data = &ox810se_dwmac_data, + }, + { + .compatible = "oxsemi,ox820-dwmac", + .data = &ox820_dwmac_data, + }, { } }; MODULE_DEVICE_TABLE(of, oxnas_dwmac_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 63ff2dad8c85..6708ca2aa4f7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7159,7 +7159,8 @@ int stmmac_dvr_probe(struct device *device, pm_runtime_get_noresume(device); pm_runtime_set_active(device); - pm_runtime_enable(device); + if (!pm_runtime_enabled(device)) + pm_runtime_enable(device); if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) { diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 33142d505fc8..03575c017500 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -349,7 +349,7 @@ static void cpsw_rx_handler(void *token, int len, int status) struct cpsw_common *cpsw = ndev_to_cpsw(xmeta->ndev); int pkt_size = cpsw->rx_packet_max; int ret = 0, port, ch = xmeta->ch; - int headroom = CPSW_HEADROOM; + int headroom = CPSW_HEADROOM_NA; struct net_device *ndev = xmeta->ndev; struct cpsw_priv *priv; struct page_pool *pool; @@ -392,7 +392,7 @@ static void cpsw_rx_handler(void *token, int len, int status) } if (priv->xdp_prog) { - int headroom = CPSW_HEADROOM, size = len; + int size = len; xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]); if (status & CPDMA_RX_VLAN_ENCAP) { @@ -442,7 +442,7 @@ requeue: xmeta->ndev = ndev; xmeta->ch = ch; - dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; + dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA; ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, pkt_size, 0); if (ret < 0) { diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 279e261e4720..bd4b1528cf99 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -283,7 +283,7 @@ static void cpsw_rx_handler(void *token, int len, int status) { struct page *new_page, *page = token; void *pa = page_address(page); - int headroom = CPSW_HEADROOM; + int headroom = CPSW_HEADROOM_NA; struct cpsw_meta_xdp *xmeta; struct cpsw_common *cpsw; struct net_device *ndev; @@ -336,7 +336,7 @@ static void cpsw_rx_handler(void *token, int len, int status) } if (priv->xdp_prog) { - int headroom = CPSW_HEADROOM, size = len; + int size = len; xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]); if (status & CPDMA_RX_VLAN_ENCAP) { @@ -386,7 +386,7 @@ requeue: xmeta->ndev = ndev; xmeta->ch = ch; - dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; + dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA; ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, pkt_size, 0); if (ret < 0) { diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 3537502e5e8b..ba220593e6db 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -1122,7 +1122,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv) xmeta->ndev = priv->ndev; xmeta->ch = ch; - dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM; + dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA; ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch, page, dma, cpsw->rx_packet_max, diff --git a/drivers/net/ethernet/vertexcom/Kconfig b/drivers/net/ethernet/vertexcom/Kconfig index 6e2cf062ddba..4184a635fe01 100644 --- a/drivers/net/ethernet/vertexcom/Kconfig +++ b/drivers/net/ethernet/vertexcom/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_VERTEXCOM bool "Vertexcom devices" - default n + default y help If you have a network (Ethernet) card belonging to this class, say Y. diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 23ac353b35fe..377c94ec2486 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -41,8 +41,9 @@ #include "xilinx_axienet.h" /* Descriptors defines for Tx and Rx DMA */ -#define TX_BD_NUM_DEFAULT 64 +#define TX_BD_NUM_DEFAULT 128 #define RX_BD_NUM_DEFAULT 1024 +#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) #define TX_BD_NUM_MAX 4096 #define RX_BD_NUM_MAX 4096 @@ -496,7 +497,8 @@ static void axienet_setoptions(struct net_device *ndev, u32 options) static int __axienet_device_reset(struct axienet_local *lp) { - u32 timeout; + u32 value; + int ret; /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset * process of Axi DMA takes a while to complete as all pending @@ -506,15 +508,23 @@ static int __axienet_device_reset(struct axienet_local *lp) * they both reset the entire DMA core, so only one needs to be used. */ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); - timeout = DELAY_OF_ONE_MILLISEC; - while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) & - XAXIDMA_CR_RESET_MASK) { - udelay(1); - if (--timeout == 0) { - netdev_err(lp->ndev, "%s: DMA reset timeout!\n", - __func__); - return -ETIMEDOUT; - } + ret = read_poll_timeout(axienet_dma_in32, value, + !(value & XAXIDMA_CR_RESET_MASK), + DELAY_OF_ONE_MILLISEC, 50000, false, lp, + XAXIDMA_TX_CR_OFFSET); + if (ret) { + dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); + return ret; + } + + /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ + ret = read_poll_timeout(axienet_ior, value, + value & XAE_INT_PHYRSTCMPLT_MASK, + DELAY_OF_ONE_MILLISEC, 50000, false, lp, + XAE_IS_OFFSET); + if (ret) { + dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); + return ret; } return 0; @@ -623,6 +633,8 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) break; + /* Ensure we see complete descriptor update */ + dma_rmb(); phys = desc_get_phys_addr(lp, cur_p); dma_unmap_single(ndev->dev.parent, phys, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), @@ -631,13 +643,15 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) dev_consume_skb_irq(cur_p->skb); - cur_p->cntrl = 0; cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; cur_p->app4 = 0; - cur_p->status = 0; cur_p->skb = NULL; + /* ensure our transmit path and device don't prematurely see status cleared */ + wmb(); + cur_p->cntrl = 0; + cur_p->status = 0; if (sizep) *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; @@ -647,6 +661,32 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, } /** + * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy + * @lp: Pointer to the axienet_local structure + * @num_frag: The number of BDs to check for + * + * Return: 0, on success + * NETDEV_TX_BUSY, if any of the descriptors are not free + * + * This function is invoked before BDs are allocated and transmission starts. + * This function returns 0 if a BD or group of BDs can be allocated for + * transmission. If the BD or any of the BDs are not free the function + * returns a busy status. This is invoked from axienet_start_xmit. + */ +static inline int axienet_check_tx_bd_space(struct axienet_local *lp, + int num_frag) +{ + struct axidma_bd *cur_p; + + /* Ensure we see all descriptor updates from device or TX IRQ path */ + rmb(); + cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; + if (cur_p->cntrl) + return NETDEV_TX_BUSY; + return 0; +} + +/** * axienet_start_xmit_done - Invoked once a transmit is completed by the * Axi DMA Tx channel. * @ndev: Pointer to the net_device structure @@ -675,30 +715,8 @@ static void axienet_start_xmit_done(struct net_device *ndev) /* Matches barrier in axienet_start_xmit */ smp_mb(); - netif_wake_queue(ndev); -} - -/** - * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy - * @lp: Pointer to the axienet_local structure - * @num_frag: The number of BDs to check for - * - * Return: 0, on success - * NETDEV_TX_BUSY, if any of the descriptors are not free - * - * This function is invoked before BDs are allocated and transmission starts. - * This function returns 0 if a BD or group of BDs can be allocated for - * transmission. If the BD or any of the BDs are not free the function - * returns a busy status. This is invoked from axienet_start_xmit. - */ -static inline int axienet_check_tx_bd_space(struct axienet_local *lp, - int num_frag) -{ - struct axidma_bd *cur_p; - cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; - if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) - return NETDEV_TX_BUSY; - return 0; + if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) + netif_wake_queue(ndev); } /** @@ -730,20 +748,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) num_frag = skb_shinfo(skb)->nr_frags; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; - if (axienet_check_tx_bd_space(lp, num_frag)) { - if (netif_queue_stopped(ndev)) - return NETDEV_TX_BUSY; - + if (axienet_check_tx_bd_space(lp, num_frag + 1)) { + /* Should not happen as last start_xmit call should have + * checked for sufficient space and queue should only be + * woken when sufficient space is available. + */ netif_stop_queue(ndev); - - /* Matches barrier in axienet_start_xmit_done */ - smp_mb(); - - /* Space might have just been freed - check again */ - if (axienet_check_tx_bd_space(lp, num_frag)) - return NETDEV_TX_BUSY; - - netif_wake_queue(ndev); + if (net_ratelimit()) + netdev_warn(ndev, "TX ring unexpectedly full\n"); + return NETDEV_TX_BUSY; } if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -804,6 +817,18 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (++lp->tx_bd_tail >= lp->tx_bd_num) lp->tx_bd_tail = 0; + /* Stop queue if next transmit may not have space */ + if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { + netif_stop_queue(ndev); + + /* Matches barrier in axienet_start_xmit_done */ + smp_mb(); + + /* Space might have just been freed - check again */ + if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) + netif_wake_queue(ndev); + } + return NETDEV_TX_OK; } @@ -834,6 +859,8 @@ static void axienet_recv(struct net_device *ndev) tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; + /* Ensure we see complete descriptor update */ + dma_rmb(); phys = desc_get_phys_addr(lp, cur_p); dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, DMA_FROM_DEVICE); @@ -1352,7 +1379,8 @@ axienet_ethtools_set_ringparam(struct net_device *ndev, if (ering->rx_pending > RX_BD_NUM_MAX || ering->rx_mini_pending || ering->rx_jumbo_pending || - ering->rx_pending > TX_BD_NUM_MAX) + ering->tx_pending < TX_BD_NUM_MIN || + ering->tx_pending > TX_BD_NUM_MAX) return -EINVAL; if (netif_running(ndev)) @@ -2027,6 +2055,11 @@ static int axienet_probe(struct platform_device *pdev) lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; + /* Reset core now that clocks are enabled, prior to accessing MDIO */ + ret = __axienet_device_reset(lp); + if (ret) + goto cleanup_clk; + lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (lp->phy_node) { ret = axienet_mdio_setup(lp); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 315278a7cf88..cf69da0e296c 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -164,6 +164,7 @@ struct hv_netvsc_packet { u32 total_bytes; u32 send_buf_index; u32 total_data_buflen; + struct hv_dma_range *dma_range; }; #define NETVSC_HASH_KEYLEN 40 @@ -1074,6 +1075,7 @@ struct netvsc_device { /* Receive buffer allocated by us but manages by NetVSP */ void *recv_buf; + void *recv_original_buf; u32 recv_buf_size; /* allocated bytes */ struct vmbus_gpadl recv_buf_gpadl_handle; u32 recv_section_cnt; @@ -1082,6 +1084,7 @@ struct netvsc_device { /* Send buffer allocated by us */ void *send_buf; + void *send_original_buf; u32 send_buf_size; struct vmbus_gpadl send_buf_gpadl_handle; u32 send_section_cnt; @@ -1731,4 +1734,6 @@ struct rndis_message { #define RETRY_US_HI 10000 #define RETRY_MAX 2000 /* >10 sec */ +void netvsc_dma_unmap(struct hv_device *hv_dev, + struct hv_netvsc_packet *packet); #endif /* _HYPERV_NET_H */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 5086cd07d1ed..afa81a9480cc 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -153,8 +153,21 @@ static void free_netvsc_device(struct rcu_head *head) int i; kfree(nvdev->extension); - vfree(nvdev->recv_buf); - vfree(nvdev->send_buf); + + if (nvdev->recv_original_buf) { + hv_unmap_memory(nvdev->recv_buf); + vfree(nvdev->recv_original_buf); + } else { + vfree(nvdev->recv_buf); + } + + if (nvdev->send_original_buf) { + hv_unmap_memory(nvdev->send_buf); + vfree(nvdev->send_original_buf); + } else { + vfree(nvdev->send_buf); + } + bitmap_free(nvdev->send_section_map); for (i = 0; i < VRSS_CHANNEL_MAX; i++) { @@ -337,6 +350,7 @@ static int netvsc_init_buf(struct hv_device *device, struct nvsp_message *init_packet; unsigned int buf_size; int i, ret = 0; + void *vaddr; /* Get receive buffer area. */ buf_size = device_info->recv_sections * device_info->recv_section_size; @@ -372,6 +386,17 @@ static int netvsc_init_buf(struct hv_device *device, goto cleanup; } + if (hv_isolation_type_snp()) { + vaddr = hv_map_memory(net_device->recv_buf, buf_size); + if (!vaddr) { + ret = -ENOMEM; + goto cleanup; + } + + net_device->recv_original_buf = net_device->recv_buf; + net_device->recv_buf = vaddr; + } + /* Notify the NetVsp of the gpadl handle */ init_packet = &net_device->channel_init_pkt; memset(init_packet, 0, sizeof(struct nvsp_message)); @@ -475,6 +500,17 @@ static int netvsc_init_buf(struct hv_device *device, goto cleanup; } + if (hv_isolation_type_snp()) { + vaddr = hv_map_memory(net_device->send_buf, buf_size); + if (!vaddr) { + ret = -ENOMEM; + goto cleanup; + } + + net_device->send_original_buf = net_device->send_buf; + net_device->send_buf = vaddr; + } + /* Notify the NetVsp of the gpadl handle */ init_packet = &net_device->channel_init_pkt; memset(init_packet, 0, sizeof(struct nvsp_message)); @@ -764,7 +800,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev, /* Notify the layer above us */ if (likely(skb)) { - const struct hv_netvsc_packet *packet + struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)skb->cb; u32 send_index = packet->send_buf_index; struct netvsc_stats *tx_stats; @@ -780,6 +816,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev, tx_stats->bytes += packet->total_bytes; u64_stats_update_end(&tx_stats->syncp); + netvsc_dma_unmap(ndev_ctx->device_ctx, packet); napi_consume_skb(skb, budget); } @@ -944,6 +981,88 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, memset(dest, 0, padding); } +void netvsc_dma_unmap(struct hv_device *hv_dev, + struct hv_netvsc_packet *packet) +{ + u32 page_count = packet->cp_partial ? + packet->page_buf_cnt - packet->rmsg_pgcnt : + packet->page_buf_cnt; + int i; + + if (!hv_is_isolation_supported()) + return; + + if (!packet->dma_range) + return; + + for (i = 0; i < page_count; i++) + dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma, + packet->dma_range[i].mapping_size, + DMA_TO_DEVICE); + + kfree(packet->dma_range); +} + +/* netvsc_dma_map - Map swiotlb bounce buffer with data page of + * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation + * VM. + * + * In isolation VM, netvsc send buffer has been marked visible to + * host and so the data copied to send buffer doesn't need to use + * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer() + * may not be copied to send buffer and so these pages need to be + * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do + * that. The pfns in the struct hv_page_buffer need to be converted + * to bounce buffer's pfn. The loop here is necessary because the + * entries in the page buffer array are not necessarily full + * pages of data. Each entry in the array has a separate offset and + * len that may be non-zero, even for entries in the middle of the + * array. And the entries are not physically contiguous. So each + * entry must be individually mapped rather than as a contiguous unit. + * So not use dma_map_sg() here. + */ +static int netvsc_dma_map(struct hv_device *hv_dev, + struct hv_netvsc_packet *packet, + struct hv_page_buffer *pb) +{ + u32 page_count = packet->cp_partial ? + packet->page_buf_cnt - packet->rmsg_pgcnt : + packet->page_buf_cnt; + dma_addr_t dma; + int i; + + if (!hv_is_isolation_supported()) + return 0; + + packet->dma_range = kcalloc(page_count, + sizeof(*packet->dma_range), + GFP_KERNEL); + if (!packet->dma_range) + return -ENOMEM; + + for (i = 0; i < page_count; i++) { + char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT) + + pb[i].offset); + u32 len = pb[i].len; + + dma = dma_map_single(&hv_dev->device, src, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&hv_dev->device, dma)) { + kfree(packet->dma_range); + return -ENOMEM; + } + + /* pb[].offset and pb[].len are not changed during dma mapping + * and so not reassign. + */ + packet->dma_range[i].dma = dma; + packet->dma_range[i].mapping_size = len; + pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT; + } + + return 0; +} + static inline int netvsc_send_pkt( struct hv_device *device, struct hv_netvsc_packet *packet, @@ -984,14 +1103,24 @@ static inline int netvsc_send_pkt( trace_nvsp_send_pkt(ndev, out_channel, rpkt); + packet->dma_range = NULL; if (packet->page_buf_cnt) { if (packet->cp_partial) pb += packet->rmsg_pgcnt; + ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb); + if (ret) { + ret = -EAGAIN; + goto exit; + } + ret = vmbus_sendpacket_pagebuffer(out_channel, pb, packet->page_buf_cnt, &nvmsg, sizeof(nvmsg), req_id); + + if (ret) + netvsc_dma_unmap(ndev_ctx->device_ctx, packet); } else { ret = vmbus_sendpacket(out_channel, &nvmsg, sizeof(nvmsg), @@ -999,6 +1128,7 @@ static inline int netvsc_send_pkt( VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); } +exit: if (ret == 0) { atomic_inc_return(&nvchan->queue_sends); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index efa963b7af54..3646469433b1 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2516,6 +2516,7 @@ static int netvsc_probe(struct hv_device *dev, net->netdev_ops = &device_ops; net->ethtool_ops = ðtool_ops; SET_NETDEV_DEV(net, &dev->device); + dma_set_min_align_mask(&dev->device, HV_HYP_PAGE_SIZE - 1); /* We always need headroom for rndis header */ net->needed_headroom = RNDIS_AND_PPI_SIZE; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index f6c9c2a670f9..448fcc325ed7 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -361,6 +361,8 @@ static void rndis_filter_receive_response(struct net_device *ndev, } } + netvsc_dma_unmap(((struct net_device_context *) + netdev_priv(ndev))->device_ctx, &request->pkt); complete(&request->wait_event); } else { netdev_err(ndev, diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 49d9a077d037..68291a3efd04 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1080,27 +1080,38 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one) { struct gsi *gsi; u32 backlog; + int delta; - if (!endpoint->replenish_enabled) { + if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) { if (add_one) atomic_inc(&endpoint->replenish_saved); return; } + /* If already active, just update the backlog */ + if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) { + if (add_one) + atomic_inc(&endpoint->replenish_backlog); + return; + } + while (atomic_dec_not_zero(&endpoint->replenish_backlog)) if (ipa_endpoint_replenish_one(endpoint)) goto try_again_later; + + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); + if (add_one) atomic_inc(&endpoint->replenish_backlog); return; try_again_later: - /* The last one didn't succeed, so fix the backlog */ - backlog = atomic_inc_return(&endpoint->replenish_backlog); + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); - if (add_one) - atomic_inc(&endpoint->replenish_backlog); + /* The last one didn't succeed, so fix the backlog */ + delta = add_one ? 2 : 1; + backlog = atomic_add_return(delta, &endpoint->replenish_backlog); /* Whenever a receive buffer transaction completes we'll try to * replenish again. It's unlikely, but if we fail to supply even @@ -1120,7 +1131,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) u32 max_backlog; u32 saved; - endpoint->replenish_enabled = true; + set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) atomic_add(saved, &endpoint->replenish_backlog); @@ -1134,7 +1145,7 @@ static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) { u32 backlog; - endpoint->replenish_enabled = false; + clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) atomic_add(backlog, &endpoint->replenish_saved); } @@ -1691,7 +1702,8 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) /* RX transactions require a single TRE, so the maximum * backlog is the same as the maximum outstanding TREs. */ - endpoint->replenish_enabled = false; + clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); atomic_set(&endpoint->replenish_saved, gsi_channel_tre_max(gsi, endpoint->channel_id)); atomic_set(&endpoint->replenish_backlog, 0); diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 0a859d10312d..0313cdc607de 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -41,6 +41,19 @@ enum ipa_endpoint_name { #define IPA_ENDPOINT_MAX 32 /* Max supported by driver */ /** + * enum ipa_replenish_flag: RX buffer replenish flags + * + * @IPA_REPLENISH_ENABLED: Whether receive buffer replenishing is enabled + * @IPA_REPLENISH_ACTIVE: Whether replenishing is underway + * @IPA_REPLENISH_COUNT: Number of defined replenish flags + */ +enum ipa_replenish_flag { + IPA_REPLENISH_ENABLED, + IPA_REPLENISH_ACTIVE, + IPA_REPLENISH_COUNT, /* Number of flags (must be last) */ +}; + +/** * struct ipa_endpoint - IPA endpoint information * @ipa: IPA pointer * @ee_id: Execution environmnent endpoint is associated with @@ -51,7 +64,7 @@ enum ipa_endpoint_name { * @trans_tre_max: Maximum number of TRE descriptors per transaction * @evt_ring_id: GSI event ring used by the endpoint * @netdev: Network device pointer, if endpoint uses one - * @replenish_enabled: Whether receive buffer replenishing is enabled + * @replenish_flags: Replenishing state flags * @replenish_ready: Number of replenish transactions without doorbell * @replenish_saved: Replenish requests held while disabled * @replenish_backlog: Number of buffers needed to fill hardware queue @@ -72,7 +85,7 @@ struct ipa_endpoint { struct net_device *netdev; /* Receive buffer replenishing for RX endpoints */ - bool replenish_enabled; + DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT); u32 replenish_ready; atomic_t replenish_saved; atomic_t replenish_backlog; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index dae95d9a07e8..5b6c0d120e09 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -421,7 +421,7 @@ static int at803x_set_wol(struct phy_device *phydev, const u8 *mac; int ret, irq_enabled; unsigned int i; - const unsigned int offsets[] = { + static const unsigned int offsets[] = { AT803X_LOC_MAC_ADDR_32_47_OFFSET, AT803X_LOC_MAC_ADDR_16_31_OFFSET, AT803X_LOC_MAC_ADDR_0_15_OFFSET, diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 739859c0dfb1..fa71fb7a66b5 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -189,6 +189,8 @@ #define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_SGMII 0x4 #define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */ +#define MII_88E1510_MSCR_2 0x15 + #define MII_VCT5_TX_RX_MDI0_COUPLING 0x10 #define MII_VCT5_TX_RX_MDI1_COUPLING 0x11 #define MII_VCT5_TX_RX_MDI2_COUPLING 0x12 @@ -1932,6 +1934,58 @@ static void marvell_get_stats(struct phy_device *phydev, data[i] = marvell_get_stat(phydev, i); } +static int m88e1510_loopback(struct phy_device *phydev, bool enable) +{ + int err; + + if (enable) { + u16 bmcr_ctl = 0, mscr2_ctl = 0; + + if (phydev->speed == SPEED_1000) + bmcr_ctl = BMCR_SPEED1000; + else if (phydev->speed == SPEED_100) + bmcr_ctl = BMCR_SPEED100; + + if (phydev->duplex == DUPLEX_FULL) + bmcr_ctl |= BMCR_FULLDPLX; + + err = phy_write(phydev, MII_BMCR, bmcr_ctl); + if (err < 0) + return err; + + if (phydev->speed == SPEED_1000) + mscr2_ctl = BMCR_SPEED1000; + else if (phydev->speed == SPEED_100) + mscr2_ctl = BMCR_SPEED100; + + err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE, + MII_88E1510_MSCR_2, BMCR_SPEED1000 | + BMCR_SPEED100, mscr2_ctl); + if (err < 0) + return err; + + /* Need soft reset to have speed configuration takes effect */ + err = genphy_soft_reset(phydev); + if (err < 0) + return err; + + /* FIXME: Based on trial and error test, it seem 1G need to have + * delay between soft reset and loopback enablement. + */ + if (phydev->speed == SPEED_1000) + msleep(1000); + + return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, + BMCR_LOOPBACK); + } else { + err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0); + if (err < 0) + return err; + + return phy_config_aneg(phydev); + } +} + static int marvell_vct5_wait_complete(struct phy_device *phydev) { int i; @@ -3078,7 +3132,7 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .set_loopback = genphy_loopback, + .set_loopback = m88e1510_loopback, .get_tunable = m88e1011_get_tunable, .set_tunable = m88e1011_set_tunable, .cable_test_start = marvell_vct7_cable_test_start, diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 4570cb9535b7..a7ebcdab415b 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1726,8 +1726,8 @@ static struct phy_driver ksphy_driver[] = { .config_init = kszphy_config_init, .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, - .suspend = genphy_suspend, - .resume = genphy_resume, + .suspend = kszphy_suspend, + .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ8021, .phy_id_mask = 0x00ffffff, @@ -1741,8 +1741,8 @@ static struct phy_driver ksphy_driver[] = { .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, - .suspend = genphy_suspend, - .resume = genphy_resume, + .suspend = kszphy_suspend, + .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ8031, .phy_id_mask = 0x00ffffff, @@ -1756,8 +1756,8 @@ static struct phy_driver ksphy_driver[] = { .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, - .suspend = genphy_suspend, - .resume = genphy_resume, + .suspend = kszphy_suspend, + .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ8041, .phy_id_mask = MICREL_PHY_ID_MASK, @@ -1788,8 +1788,8 @@ static struct phy_driver ksphy_driver[] = { .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, - .suspend = genphy_suspend, - .resume = genphy_resume, + .suspend = kszphy_suspend, + .resume = kszphy_resume, }, { .name = "Micrel KSZ8051", /* PHY_BASIC_FEATURES */ @@ -1802,8 +1802,8 @@ static struct phy_driver ksphy_driver[] = { .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, .match_phy_device = ksz8051_match_phy_device, - .suspend = genphy_suspend, - .resume = genphy_resume, + .suspend = kszphy_suspend, + .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ8001, .name = "Micrel KSZ8001 or KS8721", @@ -1817,8 +1817,8 @@ static struct phy_driver ksphy_driver[] = { .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, - .suspend = genphy_suspend, - .resume = genphy_resume, + .suspend = kszphy_suspend, + .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ8081, .name = "Micrel KSZ8081 or KSZ8091", @@ -1848,8 +1848,8 @@ static struct phy_driver ksphy_driver[] = { .config_init = ksz8061_config_init, .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, - .suspend = genphy_suspend, - .resume = genphy_resume, + .suspend = kszphy_suspend, + .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ9021, .phy_id_mask = 0x000ffffe, @@ -1864,8 +1864,8 @@ static struct phy_driver ksphy_driver[] = { .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, - .suspend = genphy_suspend, - .resume = genphy_resume, + .suspend = kszphy_suspend, + .resume = kszphy_resume, .read_mmd = genphy_read_mmd_unsupported, .write_mmd = genphy_write_mmd_unsupported, }, { @@ -1883,7 +1883,7 @@ static struct phy_driver ksphy_driver[] = { .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, - .suspend = genphy_suspend, + .suspend = kszphy_suspend, .resume = kszphy_resume, }, { .phy_id = PHY_ID_LAN8814, @@ -1928,7 +1928,7 @@ static struct phy_driver ksphy_driver[] = { .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, - .suspend = genphy_suspend, + .suspend = kszphy_suspend, .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ8873MLL, diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index ab77a9f439ef..4720b24ca51b 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -1641,17 +1641,20 @@ static int sfp_sm_probe_for_phy(struct sfp *sfp) static int sfp_module_parse_power(struct sfp *sfp) { u32 power_mW = 1000; + bool supports_a2; if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL)) power_mW = 1500; if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL)) power_mW = 2000; + supports_a2 = sfp->id.ext.sff8472_compliance != + SFP_SFF8472_COMPLIANCE_NONE || + sfp->id.ext.diagmon & SFP_DIAGMON_DDM; + if (power_mW > sfp->max_power_mW) { /* Module power specification exceeds the allowed maximum. */ - if (sfp->id.ext.sff8472_compliance == - SFP_SFF8472_COMPLIANCE_NONE && - !(sfp->id.ext.diagmon & SFP_DIAGMON_DDM)) { + if (!supports_a2) { /* The module appears not to implement bus address * 0xa2, so assume that the module powers up in the * indicated mode. @@ -1668,11 +1671,25 @@ static int sfp_module_parse_power(struct sfp *sfp) } } + if (power_mW <= 1000) { + /* Modules below 1W do not require a power change sequence */ + sfp->module_power_mW = power_mW; + return 0; + } + + if (!supports_a2) { + /* The module power level is below the host maximum and the + * module appears not to implement bus address 0xa2, so assume + * that the module powers up in the indicated mode. + */ + return 0; + } + /* If the module requires a higher power mode, but also requires * an address change sequence, warn the user that the module may * not be functional. */ - if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE && power_mW > 1000) { + if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE) { dev_warn(sfp->dev, "Address Change Sequence not supported but module requires %u.%uW, module may not be functional\n", power_mW / 1000, (power_mW / 100) % 10); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index f510e8219470..37e5f3495362 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1316,6 +1316,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ {QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */ + {QMI_FIXED_INTF(0x19d2, 0x1485, 5)}, /* ZTE MF286D */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ @@ -1401,6 +1402,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ + {QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index abe0149ed917..bc1e3dd67c04 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1962,7 +1962,8 @@ static const struct driver_info smsc95xx_info = { .bind = smsc95xx_bind, .unbind = smsc95xx_unbind, .link_reset = smsc95xx_link_reset, - .reset = smsc95xx_start_phy, + .reset = smsc95xx_reset, + .check_connect = smsc95xx_start_phy, .stop = smsc95xx_stop, .rx_fixup = smsc95xx_rx_fixup, .tx_fixup = smsc95xx_tx_fixup, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 569eecfbc2cd..3d97f158ec59 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3312,7 +3312,7 @@ static int virtnet_probe(struct virtio_device *vdev) return 0; free_unregister_netdev: - vi->vdev->config->reset(vdev); + virtio_reset_device(vdev); unregister_netdev(dev); free_failover: @@ -3328,7 +3328,7 @@ free: static void remove_vq_common(struct virtnet_info *vi) { - vi->vdev->config->reset(vi->vdev); + virtio_reset_device(vi->vdev); /* Free unused buffers in both send and recv, if any. */ free_unused_bufs(vi); diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index c0cfd9b36c0b..720952b92e78 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -302,6 +302,41 @@ void wg_noise_set_static_identity_private_key( static_identity->static_public, private_key); } +static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen) +{ + struct blake2s_state state; + u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 }; + u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32)); + int i; + + if (keylen > BLAKE2S_BLOCK_SIZE) { + blake2s_init(&state, BLAKE2S_HASH_SIZE); + blake2s_update(&state, key, keylen); + blake2s_final(&state, x_key); + } else + memcpy(x_key, key, keylen); + + for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) + x_key[i] ^= 0x36; + + blake2s_init(&state, BLAKE2S_HASH_SIZE); + blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); + blake2s_update(&state, in, inlen); + blake2s_final(&state, i_hash); + + for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) + x_key[i] ^= 0x5c ^ 0x36; + + blake2s_init(&state, BLAKE2S_HASH_SIZE); + blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); + blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE); + blake2s_final(&state, i_hash); + + memcpy(out, i_hash, BLAKE2S_HASH_SIZE); + memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE); + memzero_explicit(i_hash, BLAKE2S_HASH_SIZE); +} + /* This is Hugo Krawczyk's HKDF: * - https://eprint.iacr.org/2010/264.pdf * - https://tools.ietf.org/html/rfc5869 @@ -322,14 +357,14 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, ((third_len || third_dst) && (!second_len || !second_dst)))); /* Extract entropy from data into secret */ - blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN); + hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN); if (!first_dst || !first_len) goto out; /* Expand first key: key = secret, data = 0x1 */ output[0] = 1; - blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE); + hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE); memcpy(first_dst, output, first_len); if (!second_dst || !second_len) @@ -337,8 +372,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, /* Expand second key: key = secret, data = first-key || 0x2 */ output[BLAKE2S_HASH_SIZE] = 2; - blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, - BLAKE2S_HASH_SIZE); + hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE); memcpy(second_dst, output, second_len); if (!third_dst || !third_len) @@ -346,8 +380,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, /* Expand third key: key = secret, data = second-key || 0x3 */ output[BLAKE2S_HASH_SIZE] = 3; - blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, - BLAKE2S_HASH_SIZE); + hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE); memcpy(third_dst, output, third_len); out: diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index d73b522a0081..de71ad594f34 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -1014,7 +1014,7 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) } ab_pci->msi_ep_base_data = msi_desc->msg.data; - if (msi_desc->msi_attrib.is_64) + if (msi_desc->pci.msi_attrib.is_64) set_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags); ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c index 2f3c451148db..2f8908074303 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c @@ -4,6 +4,8 @@ */ #include <asm/unaligned.h> + +#include <linux/math.h> #include <linux/string.h> #include <linux/bug.h> diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0307a6677907..8d54f9face2f 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -4498,7 +4498,7 @@ static void remove_vqs(struct virtio_device *vdev) { int i; - vdev->config->reset(vdev); + virtio_reset_device(vdev); for (i = 0; i < ARRAY_SIZE(hwsim_vqs); i++) { struct virtqueue *vq = hwsim_vqs[i]; diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c index a0c5d02ae88c..8a3d86897ea8 100644 --- a/drivers/net/wireless/rsi/rsi_91x_coex.c +++ b/drivers/net/wireless/rsi/rsi_91x_coex.c @@ -63,7 +63,7 @@ static void rsi_coex_scheduler_thread(struct rsi_common *common) rsi_coex_sched_tx_pkts(coex_cb); } while (atomic_read(&coex_cb->coex_tx_thread.thread_done) == 0); - complete_and_exit(&coex_cb->coex_tx_thread.completion, 0); + kthread_complete_and_exit(&coex_cb->coex_tx_thread.completion, 0); } int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg) diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index 5d1490fc32db..f9f004446b07 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -264,7 +264,7 @@ static void rsi_tx_scheduler_thread(struct rsi_common *common) if (common->init_done) rsi_core_qos_processor(common); } while (atomic_read(&common->tx_thread.thread_done) == 0); - complete_and_exit(&common->tx_thread.completion, 0); + kthread_complete_and_exit(&common->tx_thread.completion, 0); } #ifdef CONFIG_RSI_COEX diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index 8ace1874e5cb..b2b47a0abcbf 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -75,7 +75,7 @@ void rsi_sdio_rx_thread(struct rsi_common *common) rsi_dbg(INFO_ZONE, "%s: Terminated SDIO RX thread\n", __func__); atomic_inc(&sdev->rx_thread.thread_done); - complete_and_exit(&sdev->rx_thread.completion, 0); + kthread_complete_and_exit(&sdev->rx_thread.completion, 0); } /** diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index 4ffcdde1acb1..5130b0e72adc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -56,6 +56,6 @@ void rsi_usb_rx_thread(struct rsi_common *common) out: rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__); skb_queue_purge(&dev->rx_q); - complete_and_exit(&dev->rx_thread.completion, 0); + kthread_complete_and_exit(&dev->rx_thread.completion, 0); } diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c index 71bf9b4f769f..6872782e8dd8 100644 --- a/drivers/net/wwan/mhi_wwan_mbim.c +++ b/drivers/net/wwan/mhi_wwan_mbim.c @@ -385,13 +385,13 @@ static void mhi_net_rx_refill_work(struct work_struct *work) int err; while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) { - struct sk_buff *skb = alloc_skb(MHI_DEFAULT_MRU, GFP_KERNEL); + struct sk_buff *skb = alloc_skb(mbim->mru, GFP_KERNEL); if (unlikely(!skb)) break; err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, - MHI_DEFAULT_MRU, MHI_EOT); + mbim->mru, MHI_EOT); if (unlikely(err)) { kfree_skb(skb); break; diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index 37d26f01986b..62a0f1a010cb 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -188,7 +188,7 @@ do { \ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy) { int polarity, retry, ret; - char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 }; + static const char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 }; int count = sizeof(rset_cmd); nfc_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n"); diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index a43fc4117fa5..c922f10d0d7b 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -316,6 +316,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, return -ENOMEM; transaction->aid_len = skb->data[1]; + + /* Checking if the length of the AID is valid */ + if (transaction->aid_len > sizeof(transaction->aid)) + return -EINVAL; + memcpy(transaction->aid, &skb->data[2], transaction->aid_len); @@ -325,6 +330,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, return -EPROTO; transaction->params_len = skb->data[transaction->aid_len + 3]; + + /* Total size is allocated (skb->len - 2) minus fixed array members */ + if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction))) + return -EINVAL; + memcpy(transaction->params, skb->data + transaction->aid_len + 4, transaction->params_len); diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index 87847c380051..04550b1f984c 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -1321,6 +1321,8 @@ static const struct ntb_dev_data dev_data[] = { static const struct pci_device_id amd_ntb_pci_tbl[] = { { PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] }, { PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] }, { PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] }, { 0, } }; diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index 4c6eb61a6ac6..88ae18b0efa8 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -297,7 +297,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, * (see CMA_CONFIG_ALIGNMENT) */ dev_err(&sndev->stdev->dev, - "ERROR: Memory window address is not aligned to it's size!\n"); + "ERROR: Memory window address is not aligned to its size!\n"); return -EINVAL; } @@ -419,8 +419,10 @@ static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev, enum ntb_width *width) { struct switchtec_dev *stdev = sndev->stdev; + struct part_cfg_regs __iomem *part_cfg = + &stdev->mmio_part_cfg_all[partition]; - u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id); + u32 pff = ioread32(&part_cfg->vep_pff_inst_id) & 0xFF; u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]); if (speed) @@ -840,7 +842,6 @@ static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev) u64 tpart_vec; int self; u64 part_map; - int bit; sndev->ntb.pdev = sndev->stdev->pdev; sndev->ntb.topo = NTB_TOPO_SWITCH; @@ -859,31 +860,31 @@ static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev) tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low); part_map = ioread64(&sndev->mmio_ntb->ep_map); + tpart_vec &= part_map; part_map &= ~(1 << sndev->self_partition); - if (!ffs(tpart_vec)) { + if (!tpart_vec) { if (sndev->stdev->partition_count != 2) { dev_err(&sndev->stdev->dev, "ntb target partition not defined\n"); return -ENODEV; } - bit = ffs(part_map); - if (!bit) { + if (!part_map) { dev_err(&sndev->stdev->dev, "peer partition is not NT partition\n"); return -ENODEV; } - sndev->peer_partition = bit - 1; + sndev->peer_partition = __ffs64(part_map); } else { - if (ffs(tpart_vec) != fls(tpart_vec)) { + if (__ffs64(tpart_vec) != (fls64(tpart_vec) - 1)) { dev_err(&sndev->stdev->dev, "ntb driver only supports 1 pair of 1-1 ntb mapping\n"); return -ENODEV; } - sndev->peer_partition = ffs(tpart_vec) - 1; + sndev->peer_partition = __ffs64(tpart_vec); if (!(part_map & (1ULL << sndev->peer_partition))) { dev_err(&sndev->stdev->dev, "ntb target partition is not NT partition\n"); @@ -954,7 +955,7 @@ static int config_req_id_table(struct switchtec_ntb *sndev, u32 error; u32 proxy_id; - if (ioread32(&mmio_ctrl->req_id_table_size) < count) { + if (ioread16(&mmio_ctrl->req_id_table_size) < count) { dev_err(&sndev->stdev->dev, "Not enough requester IDs available.\n"); return -EFAULT; @@ -966,9 +967,6 @@ static int config_req_id_table(struct switchtec_ntb *sndev, if (rc) return rc; - iowrite32(NTB_PART_CTRL_ID_PROT_DIS, - &mmio_ctrl->partition_ctrl); - for (i = 0; i < count; i++) { iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN, &mmio_ctrl->req_id_table[i]); @@ -1090,7 +1088,7 @@ static int crosslink_enum_partition(struct switchtec_ntb *sndev, { struct part_cfg_regs __iomem *part_cfg = &sndev->stdev->mmio_part_cfg_all[sndev->peer_partition]; - u32 pff = ioread32(&part_cfg->vep_pff_inst_id); + u32 pff = ioread32(&part_cfg->vep_pff_inst_id) & 0xFF; struct pff_csr_regs __iomem *mmio_pff = &sndev->stdev->mmio_pff_csr[pff]; const u64 bar_space = 0x1000000000LL; diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c index 3f05cfbc73af..dd683cb58d09 100644 --- a/drivers/ntb/msi.c +++ b/drivers/ntb/msi.c @@ -108,8 +108,10 @@ int ntb_msi_setup_mws(struct ntb_dev *ntb) if (!ntb->msi) return -EINVAL; - desc = first_msi_entry(&ntb->pdev->dev); + msi_lock_descs(&ntb->pdev->dev); + desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED); addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32); + msi_unlock_descs(&ntb->pdev->dev); for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { peer_widx = ntb_peer_highest_mw_idx(ntb, peer); @@ -260,8 +262,9 @@ static int ntbm_msi_setup_callback(struct ntb_dev *ntb, struct msi_desc *entry, * @handler: Function to be called when the IRQ occurs * @thread_fn: Function to be called in a threaded interrupt context. NULL * for clients which handle everything in @handler - * @devname: An ascii name for the claiming device, dev_name(dev) if NULL + * @name: An ascii name for the claiming device, dev_name(dev) if NULL * @dev_id: A cookie passed back to the handler function + * @msi_desc: MSI descriptor data which triggers the interrupt * * This function assigns an interrupt handler to an unused * MSI interrupt and returns the descriptor used to trigger @@ -281,13 +284,15 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, const char *name, void *dev_id, struct ntb_msi_desc *msi_desc) { + struct device *dev = &ntb->pdev->dev; struct msi_desc *entry; int ret; if (!ntb->msi) return -EINVAL; - for_each_pci_msi_entry(entry, ntb->pdev) { + msi_lock_descs(dev); + msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) { if (irq_has_action(entry->irq)) continue; @@ -304,14 +309,17 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, ret = ntbm_msi_setup_callback(ntb, entry, msi_desc); if (ret) { devm_free_irq(&ntb->dev, entry->irq, dev_id); - return ret; + goto unlock; } - - return entry->irq; + ret = entry->irq; + goto unlock; } + ret = -ENODEV; - return -ENODEV; +unlock: + msi_unlock_descs(dev); + return ret; } EXPORT_SYMBOL(ntbm_msi_request_threaded_irq); diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c index 726c7354d465..995b6cdc67ed 100644 --- a/drivers/nvdimm/virtio_pmem.c +++ b/drivers/nvdimm/virtio_pmem.c @@ -105,7 +105,7 @@ static void virtio_pmem_remove(struct virtio_device *vdev) nvdimm_bus_unregister(nvdimm_bus); vdev->config->del_vqs(vdev); - vdev->config->reset(vdev); + virtio_reset_device(vdev); } static struct virtio_driver virtio_pmem_driver = { diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index e765d3d0542e..23a38dcf0fc4 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -312,6 +312,8 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct nvmem_device *nvmem = to_nvmem_device(dev); + attr->size = nvmem->size; + return nvmem_bin_attr_get_umode(nvmem); } diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c index 6a537d959f14..e9a375dd84af 100644 --- a/drivers/nvmem/mtk-efuse.c +++ b/drivers/nvmem/mtk-efuse.c @@ -19,11 +19,12 @@ static int mtk_reg_read(void *context, unsigned int reg, void *_val, size_t bytes) { struct mtk_efuse_priv *priv = context; - u32 *val = _val; - int i = 0, words = bytes / 4; + void __iomem *addr = priv->base + reg; + u8 *val = _val; + int i; - while (words--) - *val++ = readl(priv->base + reg + (i++ * 4)); + for (i = 0; i < bytes; i++, val++) + *val = readb(addr + i); return 0; } @@ -45,8 +46,8 @@ static int mtk_efuse_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - econfig.stride = 4; - econfig.word_size = 4; + econfig.stride = 1; + econfig.word_size = 1; econfig.reg_read = mtk_reg_read; econfig.size = resource_size(res); econfig.priv = priv; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index ca2cfb3012a4..ad85ff6474ff 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -26,6 +26,7 @@ #include <linux/serial_core.h> #include <linux/sysfs.h> #include <linux/random.h> +#include <linux/kmemleak.h> #include <asm/setup.h> /* for COMMAND_LINE_SIZE */ #include <asm/page.h> @@ -524,9 +525,12 @@ static int __init __reserved_mem_reserve_reg(unsigned long node, size = dt_mem_next_cell(dt_root_size_cells, &prop); if (size && - early_init_dt_reserve_memory_arch(base, size, nomap) == 0) + early_init_dt_reserve_memory_arch(base, size, nomap) == 0) { pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n", uname, &base, (unsigned long)(size / SZ_1M)); + if (!nomap) + kmemleak_alloc_phys(base, size, 0, 0); + } else pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n", uname, &base, (unsigned long)(size / SZ_1M)); diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 43e615aa12ff..d98fafdd0f99 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -184,7 +184,7 @@ config PCI_LABEL config PCI_HYPERV tristate "Hyper-V PCI Frontend" - depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS + depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS select PCI_HYPERV_INTERFACE help The PCI device frontend driver allows the kernel to import arbitrary diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index d62c4ac4ae1b..37be95adf169 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -5,8 +5,9 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \ remove.o pci.o pci-driver.o search.o \ pci-sysfs.o rom.o setup-res.o irq.o vpd.o \ - setup-bus.o vc.o mmap.o setup-irq.o msi.o + setup-bus.o vc.o mmap.o setup-irq.o +obj-$(CONFIG_PCI) += msi/ obj-$(CONFIG_PCI) += pcie/ ifdef CONFIG_PCI diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 46935695cfb9..0d9f6b21babb 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -42,7 +42,10 @@ int noinline pci_bus_read_config_##size \ if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ pci_lock_config(flags); \ res = bus->ops->read(bus, devfn, pos, len, &data); \ - *value = (type)data; \ + if (res) \ + PCI_SET_ERROR_RESPONSE(value); \ + else \ + *value = (type)data; \ pci_unlock_config(flags); \ return res; \ } @@ -80,10 +83,8 @@ int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, void __iomem *addr; addr = bus->ops->map_bus(bus, devfn, where); - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } if (size == 1) *val = readb(addr); @@ -122,10 +123,8 @@ int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, void __iomem *addr; addr = bus->ops->map_bus(bus, devfn, where & ~0x3); - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } *val = readl(addr); @@ -228,7 +227,10 @@ int pci_user_read_config_##size \ ret = dev->bus->ops->read(dev->bus, dev->devfn, \ pos, sizeof(type), &data); \ raw_spin_unlock_irq(&pci_lock); \ - *val = (type)data; \ + if (ret) \ + PCI_SET_ERROR_RESPONSE(val); \ + else \ + *val = (type)data; \ return pcibios_err_to_errno(ret); \ } \ EXPORT_SYMBOL_GPL(pci_user_read_config_##size); @@ -410,9 +412,9 @@ int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) if (pcie_capability_reg_implemented(dev, pos)) { ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); /* - * Reset *val to 0 if pci_read_config_word() fails, it may - * have been written as 0xFFFF if hardware error happens - * during pci_read_config_word(). + * Reset *val to 0 if pci_read_config_word() fails; it may + * have been written as 0xFFFF (PCI_ERROR_RESPONSE) if the + * config read failed on PCI. */ if (ret) *val = 0; @@ -445,9 +447,9 @@ int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) if (pcie_capability_reg_implemented(dev, pos)) { ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); /* - * Reset *val to 0 if pci_read_config_dword() fails, it may - * have been written as 0xFFFFFFFF if hardware error happens - * during pci_read_config_dword(). + * Reset *val to 0 if pci_read_config_dword() fails; it may + * have been written as 0xFFFFFFFF (PCI_ERROR_RESPONSE) if + * the config read failed on PCI. */ if (ret) *val = 0; @@ -523,7 +525,7 @@ EXPORT_SYMBOL(pcie_capability_clear_and_set_dword); int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) { if (pci_dev_is_disconnected(dev)) { - *val = ~0; + PCI_SET_ERROR_RESPONSE(val); return PCIBIOS_DEVICE_NOT_FOUND; } return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); @@ -533,7 +535,7 @@ EXPORT_SYMBOL(pci_read_config_byte); int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val) { if (pci_dev_is_disconnected(dev)) { - *val = ~0; + PCI_SET_ERROR_RESPONSE(val); return PCIBIOS_DEVICE_NOT_FOUND; } return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); @@ -544,7 +546,7 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val) { if (pci_dev_is_disconnected(dev)) { - *val = ~0; + PCI_SET_ERROR_RESPONSE(val); return PCIBIOS_DEVICE_NOT_FOUND; } return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 7fc5135ffbbf..601f2531ee91 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -4,7 +4,7 @@ menu "PCI controller drivers" depends on PCI config PCI_MVEBU - bool "Marvell EBU PCIe controller" + tristate "Marvell EBU PCIe controller" depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST depends on MVEBU_MBUS depends on ARM @@ -274,14 +274,14 @@ config PCIE_BRCMSTB BMIPS_GENERIC || COMPILE_TEST depends on OF depends on PCI_MSI_IRQ_DOMAIN - default ARCH_BRCMSTB + default ARCH_BRCMSTB || BMIPS_GENERIC help Say Y here to enable PCIe host controller support for Broadcom STB based SoCs, like the Raspberry Pi 4. config PCI_HYPERV_INTERFACE tristate "Hyper-V PCI Interface" - depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 + depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN help The Hyper-V PCI Interface is a helper driver allows other drivers to have a common interface with the Hyper-V PCI frontend driver. @@ -332,8 +332,8 @@ config PCIE_APPLE If unsure, say Y if you have an Apple Silicon system. config PCIE_MT7621 - bool "MediaTek MT7621 PCIe Controller" - depends on SOC_MT7621 || (MIPS && COMPILE_TEST) + tristate "MediaTek MT7621 PCIe Controller" + depends on SOC_MT7621 || COMPILE_TEST select PHY_MT7621_PCI default SOC_MT7621 help diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 918e11082e6a..489586a4cdc7 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -51,11 +51,10 @@ enum link_status { #define MAX_LANES 2 struct j721e_pcie { - struct device *dev; + struct cdns_pcie *cdns_pcie; struct clk *refclk; u32 mode; u32 num_lanes; - struct cdns_pcie *cdns_pcie; void __iomem *user_cfg_base; void __iomem *intd_cfg_base; u32 linkdown_irq_regfield; @@ -99,7 +98,7 @@ static inline void j721e_pcie_intd_writel(struct j721e_pcie *pcie, u32 offset, static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv) { struct j721e_pcie *pcie = priv; - struct device *dev = pcie->dev; + struct device *dev = pcie->cdns_pcie->dev; u32 reg; reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2); @@ -165,7 +164,7 @@ static const struct cdns_pcie_ops j721e_pcie_ops = { static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon, unsigned int offset) { - struct device *dev = pcie->dev; + struct device *dev = pcie->cdns_pcie->dev; u32 mask = J721E_MODE_RC; u32 mode = pcie->mode; u32 val = 0; @@ -184,7 +183,7 @@ static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon, static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie, struct regmap *syscon, unsigned int offset) { - struct device *dev = pcie->dev; + struct device *dev = pcie->cdns_pcie->dev; struct device_node *np = dev->of_node; int link_speed; u32 val = 0; @@ -205,7 +204,7 @@ static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie, static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie, struct regmap *syscon, unsigned int offset) { - struct device *dev = pcie->dev; + struct device *dev = pcie->cdns_pcie->dev; u32 lanes = pcie->num_lanes; u32 val = 0; int ret; @@ -220,7 +219,7 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie, static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) { - struct device *dev = pcie->dev; + struct device *dev = pcie->cdns_pcie->dev; struct device_node *node = dev->of_node; struct of_phandle_args args; unsigned int offset = 0; @@ -354,7 +353,7 @@ static int j721e_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct pci_host_bridge *bridge; - struct j721e_pcie_data *data; + const struct j721e_pcie_data *data; struct cdns_pcie *cdns_pcie; struct j721e_pcie *pcie; struct cdns_pcie_rc *rc; @@ -367,7 +366,7 @@ static int j721e_pcie_probe(struct platform_device *pdev) int ret; int irq; - data = (struct j721e_pcie_data *)of_device_get_match_data(dev); + data = of_device_get_match_data(dev); if (!data) return -EINVAL; @@ -377,7 +376,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) if (!pcie) return -ENOMEM; - pcie->dev = dev; pcie->mode = mode; pcie->linkdown_irq_regfield = data->linkdown_irq_regfield; diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c index a224afadbcc0..bac0541317c1 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-plat.c +++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c @@ -45,7 +45,6 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev) { const struct cdns_plat_pcie_of_data *data; struct cdns_plat_pcie *cdns_plat_pcie; - const struct of_device_id *match; struct device *dev = &pdev->dev; struct pci_host_bridge *bridge; struct cdns_pcie_ep *ep; @@ -54,11 +53,10 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev) bool is_rc; int ret; - match = of_match_device(cdns_plat_pcie_of_match, dev); - if (!match) + data = of_device_get_match_data(dev); + if (!data) return -EINVAL; - data = (struct cdns_plat_pcie_of_data *)match->data; is_rc = data->is_rc; pr_debug(" Started %s with is_rc: %d\n", __func__, is_rc); diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index 262421e5d917..c8a27b6290ce 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -310,7 +310,7 @@ struct cdns_pcie { * single function at a time * @vendor_id: PCI vendor ID * @device_id: PCI device ID - * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or + * @avail_ib_bar: Status of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or * available * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2 * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index a4221f6f3629..12d19183e746 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -697,16 +697,14 @@ static int dra7xx_pcie_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; char name[10]; struct gpio_desc *reset; - const struct of_device_id *match; const struct dra7xx_pcie_of_data *data; enum dw_pcie_device_mode mode; u32 b1co_mode_sel_mask; - match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); - if (!match) + data = of_device_get_match_data(dev); + if (!data) return -EINVAL; - data = (struct dra7xx_pcie_of_data *)match->data; mode = (enum dw_pcie_device_mode)data->mode; b1co_mode_sel_mask = data->b1co_mode_sel_mask; diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index 722dacdd5a17..467c8d1cd7e4 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -217,10 +217,8 @@ static int exynos_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn, { struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); - if (PCI_SLOT(devfn)) { - *val = ~0; + if (PCI_SLOT(devfn)) return PCIBIOS_DEVICE_NOT_FOUND; - } *val = dw_pcie_read_dbi(pci, where, size); return PCIBIOS_SUCCESSFUL; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 26f49f797b0f..6974bd5aa116 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -29,6 +29,7 @@ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/reset.h> +#include <linux/phy/phy.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> @@ -49,6 +50,7 @@ enum imx6_pcie_variants { IMX6QP, IMX7D, IMX8MQ, + IMX8MM, }; #define IMX6_PCIE_FLAG_IMX6_PHY BIT(0) @@ -88,6 +90,7 @@ struct imx6_pcie { struct device *pd_pcie; /* power domain for pcie phy */ struct device *pd_pcie_phy; + struct phy *phy; const struct imx6_pcie_drvdata *drvdata; }; @@ -372,6 +375,8 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) case IMX7D: case IMX8MQ: reset_control_assert(imx6_pcie->pciephy_reset); + fallthrough; + case IMX8MM: reset_control_assert(imx6_pcie->apps_reset); break; case IMX6SX: @@ -407,7 +412,8 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie) { - WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ); + WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ && + imx6_pcie->drvdata->variant != IMX8MM); return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; } @@ -446,6 +452,11 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) break; case IMX7D: break; + case IMX8MM: + ret = clk_prepare_enable(imx6_pcie->pcie_aux); + if (ret) + dev_err(dev, "unable to enable pcie_aux clock\n"); + break; case IMX8MQ: ret = clk_prepare_enable(imx6_pcie->pcie_aux); if (ret) { @@ -522,6 +533,14 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) goto err_ref_clk; } + switch (imx6_pcie->drvdata->variant) { + case IMX8MM: + if (phy_power_on(imx6_pcie->phy)) + dev_err(dev, "unable to power on PHY\n"); + break; + default: + break; + } /* allow the clocks to stabilize */ usleep_range(200, 500); @@ -538,6 +557,10 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) case IMX8MQ: reset_control_deassert(imx6_pcie->pciephy_reset); break; + case IMX8MM: + if (phy_init(imx6_pcie->phy)) + dev_err(dev, "waiting for phy ready timeout!\n"); + break; case IMX7D: reset_control_deassert(imx6_pcie->pciephy_reset); @@ -614,6 +637,12 @@ static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie) static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) { switch (imx6_pcie->drvdata->variant) { + case IMX8MM: + /* + * The PHY initialization had been done in the PHY + * driver, break here directly. + */ + break; case IMX8MQ: /* * TODO: Currently this code assumes external @@ -753,6 +782,7 @@ static void imx6_pcie_ltssm_enable(struct device *dev) break; case IMX7D: case IMX8MQ: + case IMX8MM: reset_control_deassert(imx6_pcie->apps_reset); break; } @@ -871,6 +901,7 @@ static void imx6_pcie_ltssm_disable(struct device *dev) IMX6Q_GPR12_PCIE_CTL_2, 0); break; case IMX7D: + case IMX8MM: reset_control_assert(imx6_pcie->apps_reset); break; default: @@ -930,6 +961,7 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); break; case IMX8MQ: + case IMX8MM: clk_disable_unprepare(imx6_pcie->pcie_aux); break; default: @@ -945,8 +977,16 @@ static int imx6_pcie_suspend_noirq(struct device *dev) return 0; imx6_pcie_pm_turnoff(imx6_pcie); - imx6_pcie_clk_disable(imx6_pcie); imx6_pcie_ltssm_disable(dev); + imx6_pcie_clk_disable(imx6_pcie); + switch (imx6_pcie->drvdata->variant) { + case IMX8MM: + if (phy_power_off(imx6_pcie->phy)) + dev_err(dev, "unable to power off PHY\n"); + break; + default: + break; + } return 0; } @@ -1043,11 +1083,6 @@ static int imx6_pcie_probe(struct platform_device *pdev) } /* Fetch clocks */ - imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); - if (IS_ERR(imx6_pcie->pcie_phy)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy), - "pcie_phy clock source missing or invalid\n"); - imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus"); if (IS_ERR(imx6_pcie->pcie_bus)) return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus), @@ -1090,9 +1125,34 @@ static int imx6_pcie_probe(struct platform_device *pdev) return PTR_ERR(imx6_pcie->apps_reset); } break; + case IMX8MM: + imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux"); + if (IS_ERR(imx6_pcie->pcie_aux)) + return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux), + "pcie_aux clock source missing or invalid\n"); + imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, + "apps"); + if (IS_ERR(imx6_pcie->apps_reset)) + return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset), + "failed to get pcie apps reset control\n"); + + imx6_pcie->phy = devm_phy_get(dev, "pcie-phy"); + if (IS_ERR(imx6_pcie->phy)) + return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy), + "failed to get pcie phy\n"); + + break; default: break; } + /* Don't fetch the pcie_phy clock, if it has abstract PHY driver */ + if (imx6_pcie->phy == NULL) { + imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); + if (IS_ERR(imx6_pcie->pcie_phy)) + return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy), + "pcie_phy clock source missing or invalid\n"); + } + /* Grab turnoff reset */ imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); @@ -1202,6 +1262,10 @@ static const struct imx6_pcie_drvdata drvdata[] = { [IMX8MQ] = { .variant = IMX8MQ, }, + [IMX8MM] = { + .variant = IMX8MM, + .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, + }, }; static const struct of_device_id imx6_pcie_of_match[] = { @@ -1209,7 +1273,8 @@ static const struct of_device_id imx6_pcie_of_match[] = { { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], }, { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], }, { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], }, - { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } , + { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], }, + { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], }, {}, }; diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 865258d8c53c..1c2ee4e13f1c 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -747,9 +747,9 @@ err: #ifdef CONFIG_ARM /* - * When a PCI device does not exist during config cycles, keystone host gets a - * bus error instead of returning 0xffffffff. This handler always returns 0 - * for this kind of faults. + * When a PCI device does not exist during config cycles, keystone host + * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE). + * This handler always returns 0 for this kind of fault. */ static int ks_pcie_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) @@ -775,12 +775,19 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) struct dw_pcie *pci = ks_pcie->pci; struct device *dev = pci->dev; struct device_node *np = dev->of_node; + struct of_phandle_args args; + unsigned int offset = 0; devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id"); if (IS_ERR(devctrl_regs)) return PTR_ERR(devctrl_regs); - ret = regmap_read(devctrl_regs, 0, &id); + /* Do not error out to maintain old DT compatibility */ + ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-id", 1, 0, &args); + if (!ret) + offset = args.args[0]; + + ret = regmap_read(devctrl_regs, offset, &id); if (ret) return ret; @@ -989,6 +996,8 @@ err_phy: static int ks_pcie_set_mode(struct device *dev) { struct device_node *np = dev->of_node; + struct of_phandle_args args; + unsigned int offset = 0; struct regmap *syscon; u32 val; u32 mask; @@ -998,10 +1007,15 @@ static int ks_pcie_set_mode(struct device *dev) if (IS_ERR(syscon)) return 0; + /* Do not error out to maintain old DT compatibility */ + ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args); + if (!ret) + offset = args.args[0]; + mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN; val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN; - ret = regmap_update_bits(syscon, 0, mask, val); + ret = regmap_update_bits(syscon, offset, mask, val); if (ret) { dev_err(dev, "failed to set pcie mode\n"); return ret; @@ -1014,6 +1028,8 @@ static int ks_pcie_am654_set_mode(struct device *dev, enum dw_pcie_device_mode mode) { struct device_node *np = dev->of_node; + struct of_phandle_args args; + unsigned int offset = 0; struct regmap *syscon; u32 val; u32 mask; @@ -1023,6 +1039,11 @@ static int ks_pcie_am654_set_mode(struct device *dev, if (IS_ERR(syscon)) return 0; + /* Do not error out to maintain old DT compatibility */ + ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args); + if (!ret) + offset = args.args[0]; + mask = AM654_PCIE_DEV_TYPE_MASK; switch (mode) { @@ -1037,7 +1058,7 @@ static int ks_pcie_am654_set_mode(struct device *dev, return -EINVAL; } - ret = regmap_update_bits(syscon, 0, mask, val); + ret = regmap_update_bits(syscon, offset, mask, val); if (ret) { dev_err(dev, "failed to set pcie mode\n"); return ret; @@ -1087,7 +1108,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; const struct ks_pcie_of_data *data; - const struct of_device_id *match; enum dw_pcie_device_mode mode; struct dw_pcie *pci; struct keystone_pcie *ks_pcie; @@ -1104,8 +1124,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev) int irq; int i; - match = of_match_device(of_match_ptr(ks_pcie_of_match), dev); - data = (struct ks_pcie_of_data *)match->data; + data = of_device_get_match_data(dev); if (!data) return -EINVAL; diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index 5b9c625df7b8..6a4f0619bb1c 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -3,6 +3,7 @@ * PCIe host controller driver for Freescale Layerscape SoCs * * Copyright (C) 2014 Freescale Semiconductor. + * Copyright 2021 NXP * * Author: Minghuan Lian <Minghuan.Lian@freescale.com> */ @@ -22,12 +23,6 @@ #include "pcie-designware.h" -/* PEX1/2 Misc Ports Status Register */ -#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4) -#define LTSSM_STATE_SHIFT 20 -#define LTSSM_STATE_MASK 0x3f -#define LTSSM_PCIE_L0 0x11 /* L0 state */ - /* PEX Internal Configuration Registers */ #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ #define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */ @@ -35,20 +30,8 @@ #define PCIE_IATU_NUM 6 -struct ls_pcie_drvdata { - u32 lut_offset; - u32 ltssm_shift; - u32 lut_dbg; - const struct dw_pcie_host_ops *ops; - const struct dw_pcie_ops *dw_pcie_ops; -}; - struct ls_pcie { struct dw_pcie *pci; - void __iomem *lut; - struct regmap *scfg; - const struct ls_pcie_drvdata *drvdata; - int index; }; #define to_ls_pcie(x) dev_get_drvdata((x)->dev) @@ -83,38 +66,6 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) iowrite32(val, pci->dbi_base + PCIE_STRFMR1); } -static int ls1021_pcie_link_up(struct dw_pcie *pci) -{ - u32 state; - struct ls_pcie *pcie = to_ls_pcie(pci); - - if (!pcie->scfg) - return 0; - - regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); - state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; - - if (state < LTSSM_PCIE_L0) - return 0; - - return 1; -} - -static int ls_pcie_link_up(struct dw_pcie *pci) -{ - struct ls_pcie *pcie = to_ls_pcie(pci); - u32 state; - - state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> - pcie->drvdata->ltssm_shift) & - LTSSM_STATE_MASK; - - if (state < LTSSM_PCIE_L0) - return 0; - - return 1; -} - /* Forward error response of outbound non-posted requests */ static void ls_pcie_fix_error_response(struct ls_pcie *pcie) { @@ -139,96 +90,20 @@ static int ls_pcie_host_init(struct pcie_port *pp) return 0; } -static int ls1021_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct ls_pcie *pcie = to_ls_pcie(pci); - struct device *dev = pci->dev; - u32 index[2]; - int ret; - - pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, - "fsl,pcie-scfg"); - if (IS_ERR(pcie->scfg)) { - ret = PTR_ERR(pcie->scfg); - dev_err(dev, "No syscfg phandle specified\n"); - pcie->scfg = NULL; - return ret; - } - - if (of_property_read_u32_array(dev->of_node, - "fsl,pcie-scfg", index, 2)) { - pcie->scfg = NULL; - return -EINVAL; - } - pcie->index = index[1]; - - return ls_pcie_host_init(pp); -} - -static const struct dw_pcie_host_ops ls1021_pcie_host_ops = { - .host_init = ls1021_pcie_host_init, -}; - static const struct dw_pcie_host_ops ls_pcie_host_ops = { .host_init = ls_pcie_host_init, }; -static const struct dw_pcie_ops dw_ls1021_pcie_ops = { - .link_up = ls1021_pcie_link_up, -}; - -static const struct dw_pcie_ops dw_ls_pcie_ops = { - .link_up = ls_pcie_link_up, -}; - -static const struct ls_pcie_drvdata ls1021_drvdata = { - .ops = &ls1021_pcie_host_ops, - .dw_pcie_ops = &dw_ls1021_pcie_ops, -}; - -static const struct ls_pcie_drvdata ls1043_drvdata = { - .lut_offset = 0x10000, - .ltssm_shift = 24, - .lut_dbg = 0x7fc, - .ops = &ls_pcie_host_ops, - .dw_pcie_ops = &dw_ls_pcie_ops, -}; - -static const struct ls_pcie_drvdata ls1046_drvdata = { - .lut_offset = 0x80000, - .ltssm_shift = 24, - .lut_dbg = 0x407fc, - .ops = &ls_pcie_host_ops, - .dw_pcie_ops = &dw_ls_pcie_ops, -}; - -static const struct ls_pcie_drvdata ls2080_drvdata = { - .lut_offset = 0x80000, - .ltssm_shift = 0, - .lut_dbg = 0x7fc, - .ops = &ls_pcie_host_ops, - .dw_pcie_ops = &dw_ls_pcie_ops, -}; - -static const struct ls_pcie_drvdata ls2088_drvdata = { - .lut_offset = 0x80000, - .ltssm_shift = 0, - .lut_dbg = 0x407fc, - .ops = &ls_pcie_host_ops, - .dw_pcie_ops = &dw_ls_pcie_ops, -}; - static const struct of_device_id ls_pcie_of_match[] = { - { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata }, - { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, - { .compatible = "fsl,ls1028a-pcie", .data = &ls2088_drvdata }, - { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, - { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, - { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, - { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, - { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata }, - { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata }, + { .compatible = "fsl,ls1012a-pcie", }, + { .compatible = "fsl,ls1021a-pcie", }, + { .compatible = "fsl,ls1028a-pcie", }, + { .compatible = "fsl,ls1043a-pcie", }, + { .compatible = "fsl,ls1046a-pcie", }, + { .compatible = "fsl,ls2080a-pcie", }, + { .compatible = "fsl,ls2085a-pcie", }, + { .compatible = "fsl,ls2088a-pcie", }, + { .compatible = "fsl,ls1088a-pcie", }, { }, }; @@ -247,11 +122,8 @@ static int ls_pcie_probe(struct platform_device *pdev) if (!pci) return -ENOMEM; - pcie->drvdata = of_device_get_match_data(dev); - pci->dev = dev; - pci->ops = pcie->drvdata->dw_pcie_ops; - pci->pp.ops = pcie->drvdata->ops; + pci->pp.ops = &ls_pcie_host_ops; pcie->pci = pci; @@ -260,8 +132,6 @@ static int ls_pcie_probe(struct platform_device *pdev) if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); - pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset; - if (!ls_pcie_is_bridge(pcie)) return -ENODEV; diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index c91fc1954432..2f15441770e1 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -380,17 +380,15 @@ static int artpec6_pcie_probe(struct platform_device *pdev) struct dw_pcie *pci; struct artpec6_pcie *artpec6_pcie; int ret; - const struct of_device_id *match; const struct artpec_pcie_of_data *data; enum artpec_pcie_variants variant; enum dw_pcie_device_mode mode; u32 val; - match = of_match_device(artpec6_pcie_of_match, dev); - if (!match) + data = of_device_get_match_data(dev); + if (!data) return -EINVAL; - data = (struct artpec_pcie_of_data *)match->data; variant = (enum artpec_pcie_variants)data->variant; mode = (enum dw_pcie_device_mode)data->mode; diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 8851eb161a0e..0c5de87d3cc6 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -122,15 +122,13 @@ static int dw_plat_pcie_probe(struct platform_device *pdev) struct dw_plat_pcie *dw_plat_pcie; struct dw_pcie *pci; int ret; - const struct of_device_id *match; const struct dw_plat_pcie_of_data *data; enum dw_pcie_device_mode mode; - match = of_match_device(dw_plat_pcie_of_match, dev); - if (!match) + data = of_device_get_match_data(dev); + if (!data) return -EINVAL; - data = (struct dw_plat_pcie_of_data *)match->data; mode = (enum dw_pcie_device_mode)data->mode; dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 850b4533f4ef..d92c8a25094f 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -672,10 +672,11 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci) if (!pci->atu_base) { struct resource *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); - if (res) + if (res) { pci->atu_size = resource_size(res); - pci->atu_base = devm_ioremap_resource(dev, res); - if (IS_ERR(pci->atu_base)) + pci->atu_base = devm_ioremap_resource(dev, res); + } + if (!pci->atu_base || IS_ERR(pci->atu_base)) pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; } diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c index 8fc5960faf28..8904b5b85ee5 100644 --- a/drivers/pci/controller/dwc/pcie-hisi.c +++ b/drivers/pci/controller/dwc/pcie-hisi.c @@ -18,6 +18,10 @@ #if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) +struct hisi_pcie { + void __iomem *reg_base; +}; + static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { @@ -58,10 +62,10 @@ static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct pci_config_window *cfg = bus->sysdata; - void __iomem *reg_base = cfg->priv; + struct hisi_pcie *pcie = cfg->priv; if (bus->number == cfg->busr.start) - return reg_base + where; + return pcie->reg_base + where; else return pci_ecam_map_bus(bus, devfn, where); } @@ -71,12 +75,16 @@ static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, static int hisi_pcie_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; + struct hisi_pcie *pcie; struct acpi_device *adev = to_acpi_device(dev); struct acpi_pci_root *root = acpi_driver_data(adev); struct resource *res; - void __iomem *reg_base; int ret; + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + /* * Retrieve RC base and size from a HISI0081 device with _UID * matching our segment. @@ -91,11 +99,11 @@ static int hisi_pcie_init(struct pci_config_window *cfg) return -ENOMEM; } - reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); - if (!reg_base) + pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); + if (!pcie->reg_base) return -ENOMEM; - cfg->priv = reg_base; + cfg->priv = pcie; return 0; } @@ -115,9 +123,13 @@ const struct pci_ecam_ops hisi_pcie_ops = { static int hisi_pcie_platform_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; + struct hisi_pcie *pcie; struct platform_device *pdev = to_platform_device(dev); struct resource *res; - void __iomem *reg_base; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) { @@ -125,11 +137,11 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg) return -EINVAL; } - reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); - if (!reg_base) + pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); + if (!pcie->reg_base) return -ENOMEM; - cfg->priv = reg_base; + cfg->priv = pcie; return 0; } diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 86f9d16c50d7..410555dccb6d 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -127,10 +127,8 @@ static int histb_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn, { struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); - if (PCI_SLOT(devfn)) { - *val = ~0; + if (PCI_SLOT(devfn)) return PCIBIOS_DEVICE_NOT_FOUND; - } *val = dw_pcie_read_dbi(pci, where, size); return PCIBIOS_SUCCESSFUL; diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index d15cf35fa7f2..5ba144924ff8 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -62,7 +62,7 @@ struct intel_pcie_soc { unsigned int pcie_ver; }; -struct intel_pcie_port { +struct intel_pcie { struct dw_pcie pci; void __iomem *app_base; struct gpio_desc *reset_gpio; @@ -83,53 +83,53 @@ static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val) writel(val, base + ofs); } -static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val) +static inline void pcie_app_wr(struct intel_pcie *pcie, u32 ofs, u32 val) { - writel(val, lpp->app_base + ofs); + writel(val, pcie->app_base + ofs); } -static void pcie_app_wr_mask(struct intel_pcie_port *lpp, u32 ofs, +static void pcie_app_wr_mask(struct intel_pcie *pcie, u32 ofs, u32 mask, u32 val) { - pcie_update_bits(lpp->app_base, ofs, mask, val); + pcie_update_bits(pcie->app_base, ofs, mask, val); } -static inline u32 pcie_rc_cfg_rd(struct intel_pcie_port *lpp, u32 ofs) +static inline u32 pcie_rc_cfg_rd(struct intel_pcie *pcie, u32 ofs) { - return dw_pcie_readl_dbi(&lpp->pci, ofs); + return dw_pcie_readl_dbi(&pcie->pci, ofs); } -static inline void pcie_rc_cfg_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val) +static inline void pcie_rc_cfg_wr(struct intel_pcie *pcie, u32 ofs, u32 val) { - dw_pcie_writel_dbi(&lpp->pci, ofs, val); + dw_pcie_writel_dbi(&pcie->pci, ofs, val); } -static void pcie_rc_cfg_wr_mask(struct intel_pcie_port *lpp, u32 ofs, +static void pcie_rc_cfg_wr_mask(struct intel_pcie *pcie, u32 ofs, u32 mask, u32 val) { - pcie_update_bits(lpp->pci.dbi_base, ofs, mask, val); + pcie_update_bits(pcie->pci.dbi_base, ofs, mask, val); } -static void intel_pcie_ltssm_enable(struct intel_pcie_port *lpp) +static void intel_pcie_ltssm_enable(struct intel_pcie *pcie) { - pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, + pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, PCIE_APP_CCR_LTSSM_ENABLE); } -static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp) +static void intel_pcie_ltssm_disable(struct intel_pcie *pcie) { - pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0); + pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0); } -static void intel_pcie_link_setup(struct intel_pcie_port *lpp) +static void intel_pcie_link_setup(struct intel_pcie *pcie) { u32 val; - u8 offset = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP); + u8 offset = dw_pcie_find_capability(&pcie->pci, PCI_CAP_ID_EXP); - val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL); + val = pcie_rc_cfg_rd(pcie, offset + PCI_EXP_LNKCTL); val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC); - pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val); + pcie_rc_cfg_wr(pcie, offset + PCI_EXP_LNKCTL, val); } static void intel_pcie_init_n_fts(struct dw_pcie *pci) @@ -148,14 +148,14 @@ static void intel_pcie_init_n_fts(struct dw_pcie *pci) pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT; } -static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp) +static int intel_pcie_ep_rst_init(struct intel_pcie *pcie) { - struct device *dev = lpp->pci.dev; + struct device *dev = pcie->pci.dev; int ret; - lpp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(lpp->reset_gpio)) { - ret = PTR_ERR(lpp->reset_gpio); + pcie->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(pcie->reset_gpio)) { + ret = PTR_ERR(pcie->reset_gpio); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret); return ret; @@ -167,19 +167,19 @@ static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp) return 0; } -static void intel_pcie_core_rst_assert(struct intel_pcie_port *lpp) +static void intel_pcie_core_rst_assert(struct intel_pcie *pcie) { - reset_control_assert(lpp->core_rst); + reset_control_assert(pcie->core_rst); } -static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp) +static void intel_pcie_core_rst_deassert(struct intel_pcie *pcie) { /* * One micro-second delay to make sure the reset pulse * wide enough so that core reset is clean. */ udelay(1); - reset_control_deassert(lpp->core_rst); + reset_control_deassert(pcie->core_rst); /* * Some SoC core reset also reset PHY, more delay needed @@ -188,58 +188,58 @@ static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp) usleep_range(1000, 2000); } -static void intel_pcie_device_rst_assert(struct intel_pcie_port *lpp) +static void intel_pcie_device_rst_assert(struct intel_pcie *pcie) { - gpiod_set_value_cansleep(lpp->reset_gpio, 1); + gpiod_set_value_cansleep(pcie->reset_gpio, 1); } -static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp) +static void intel_pcie_device_rst_deassert(struct intel_pcie *pcie) { - msleep(lpp->rst_intrvl); - gpiod_set_value_cansleep(lpp->reset_gpio, 0); + msleep(pcie->rst_intrvl); + gpiod_set_value_cansleep(pcie->reset_gpio, 0); } -static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp) +static void intel_pcie_core_irq_disable(struct intel_pcie *pcie) { - pcie_app_wr(lpp, PCIE_APP_IRNEN, 0); - pcie_app_wr(lpp, PCIE_APP_IRNCR, PCIE_APP_IRN_INT); + pcie_app_wr(pcie, PCIE_APP_IRNEN, 0); + pcie_app_wr(pcie, PCIE_APP_IRNCR, PCIE_APP_IRN_INT); } static int intel_pcie_get_resources(struct platform_device *pdev) { - struct intel_pcie_port *lpp = platform_get_drvdata(pdev); - struct dw_pcie *pci = &lpp->pci; + struct intel_pcie *pcie = platform_get_drvdata(pdev); + struct dw_pcie *pci = &pcie->pci; struct device *dev = pci->dev; int ret; - lpp->core_clk = devm_clk_get(dev, NULL); - if (IS_ERR(lpp->core_clk)) { - ret = PTR_ERR(lpp->core_clk); + pcie->core_clk = devm_clk_get(dev, NULL); + if (IS_ERR(pcie->core_clk)) { + ret = PTR_ERR(pcie->core_clk); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get clks: %d\n", ret); return ret; } - lpp->core_rst = devm_reset_control_get(dev, NULL); - if (IS_ERR(lpp->core_rst)) { - ret = PTR_ERR(lpp->core_rst); + pcie->core_rst = devm_reset_control_get(dev, NULL); + if (IS_ERR(pcie->core_rst)) { + ret = PTR_ERR(pcie->core_rst); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get resets: %d\n", ret); return ret; } ret = device_property_read_u32(dev, "reset-assert-ms", - &lpp->rst_intrvl); + &pcie->rst_intrvl); if (ret) - lpp->rst_intrvl = RESET_INTERVAL_MS; + pcie->rst_intrvl = RESET_INTERVAL_MS; - lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app"); - if (IS_ERR(lpp->app_base)) - return PTR_ERR(lpp->app_base); + pcie->app_base = devm_platform_ioremap_resource_byname(pdev, "app"); + if (IS_ERR(pcie->app_base)) + return PTR_ERR(pcie->app_base); - lpp->phy = devm_phy_get(dev, "pcie"); - if (IS_ERR(lpp->phy)) { - ret = PTR_ERR(lpp->phy); + pcie->phy = devm_phy_get(dev, "pcie"); + if (IS_ERR(pcie->phy)) { + ret = PTR_ERR(pcie->phy); if (ret != -EPROBE_DEFER) dev_err(dev, "Couldn't get pcie-phy: %d\n", ret); return ret; @@ -248,137 +248,137 @@ static int intel_pcie_get_resources(struct platform_device *pdev) return 0; } -static int intel_pcie_wait_l2(struct intel_pcie_port *lpp) +static int intel_pcie_wait_l2(struct intel_pcie *pcie) { u32 value; int ret; - struct dw_pcie *pci = &lpp->pci; + struct dw_pcie *pci = &pcie->pci; if (pci->link_gen < 3) return 0; /* Send PME_TURN_OFF message */ - pcie_app_wr_mask(lpp, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF, + pcie_app_wr_mask(pcie, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF, PCIE_APP_MSG_XMT_PM_TURNOFF); /* Read PMC status and wait for falling into L2 link state */ - ret = readl_poll_timeout(lpp->app_base + PCIE_APP_PMC, value, + ret = readl_poll_timeout(pcie->app_base + PCIE_APP_PMC, value, value & PCIE_APP_PMC_IN_L2, 20, jiffies_to_usecs(5 * HZ)); if (ret) - dev_err(lpp->pci.dev, "PCIe link enter L2 timeout!\n"); + dev_err(pcie->pci.dev, "PCIe link enter L2 timeout!\n"); return ret; } -static void intel_pcie_turn_off(struct intel_pcie_port *lpp) +static void intel_pcie_turn_off(struct intel_pcie *pcie) { - if (dw_pcie_link_up(&lpp->pci)) - intel_pcie_wait_l2(lpp); + if (dw_pcie_link_up(&pcie->pci)) + intel_pcie_wait_l2(pcie); /* Put endpoint device in reset state */ - intel_pcie_device_rst_assert(lpp); - pcie_rc_cfg_wr_mask(lpp, PCI_COMMAND, PCI_COMMAND_MEMORY, 0); + intel_pcie_device_rst_assert(pcie); + pcie_rc_cfg_wr_mask(pcie, PCI_COMMAND, PCI_COMMAND_MEMORY, 0); } -static int intel_pcie_host_setup(struct intel_pcie_port *lpp) +static int intel_pcie_host_setup(struct intel_pcie *pcie) { int ret; - struct dw_pcie *pci = &lpp->pci; + struct dw_pcie *pci = &pcie->pci; - intel_pcie_core_rst_assert(lpp); - intel_pcie_device_rst_assert(lpp); + intel_pcie_core_rst_assert(pcie); + intel_pcie_device_rst_assert(pcie); - ret = phy_init(lpp->phy); + ret = phy_init(pcie->phy); if (ret) return ret; - intel_pcie_core_rst_deassert(lpp); + intel_pcie_core_rst_deassert(pcie); - ret = clk_prepare_enable(lpp->core_clk); + ret = clk_prepare_enable(pcie->core_clk); if (ret) { - dev_err(lpp->pci.dev, "Core clock enable failed: %d\n", ret); + dev_err(pcie->pci.dev, "Core clock enable failed: %d\n", ret); goto clk_err; } pci->atu_base = pci->dbi_base + 0xC0000; - intel_pcie_ltssm_disable(lpp); - intel_pcie_link_setup(lpp); + intel_pcie_ltssm_disable(pcie); + intel_pcie_link_setup(pcie); intel_pcie_init_n_fts(pci); dw_pcie_setup_rc(&pci->pp); dw_pcie_upconfig_setup(pci); - intel_pcie_device_rst_deassert(lpp); - intel_pcie_ltssm_enable(lpp); + intel_pcie_device_rst_deassert(pcie); + intel_pcie_ltssm_enable(pcie); ret = dw_pcie_wait_for_link(pci); if (ret) goto app_init_err; /* Enable integrated interrupts */ - pcie_app_wr_mask(lpp, PCIE_APP_IRNEN, PCIE_APP_IRN_INT, + pcie_app_wr_mask(pcie, PCIE_APP_IRNEN, PCIE_APP_IRN_INT, PCIE_APP_IRN_INT); return 0; app_init_err: - clk_disable_unprepare(lpp->core_clk); + clk_disable_unprepare(pcie->core_clk); clk_err: - intel_pcie_core_rst_assert(lpp); - phy_exit(lpp->phy); + intel_pcie_core_rst_assert(pcie); + phy_exit(pcie->phy); return ret; } -static void __intel_pcie_remove(struct intel_pcie_port *lpp) +static void __intel_pcie_remove(struct intel_pcie *pcie) { - intel_pcie_core_irq_disable(lpp); - intel_pcie_turn_off(lpp); - clk_disable_unprepare(lpp->core_clk); - intel_pcie_core_rst_assert(lpp); - phy_exit(lpp->phy); + intel_pcie_core_irq_disable(pcie); + intel_pcie_turn_off(pcie); + clk_disable_unprepare(pcie->core_clk); + intel_pcie_core_rst_assert(pcie); + phy_exit(pcie->phy); } static int intel_pcie_remove(struct platform_device *pdev) { - struct intel_pcie_port *lpp = platform_get_drvdata(pdev); - struct pcie_port *pp = &lpp->pci.pp; + struct intel_pcie *pcie = platform_get_drvdata(pdev); + struct pcie_port *pp = &pcie->pci.pp; dw_pcie_host_deinit(pp); - __intel_pcie_remove(lpp); + __intel_pcie_remove(pcie); return 0; } static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev) { - struct intel_pcie_port *lpp = dev_get_drvdata(dev); + struct intel_pcie *pcie = dev_get_drvdata(dev); int ret; - intel_pcie_core_irq_disable(lpp); - ret = intel_pcie_wait_l2(lpp); + intel_pcie_core_irq_disable(pcie); + ret = intel_pcie_wait_l2(pcie); if (ret) return ret; - phy_exit(lpp->phy); - clk_disable_unprepare(lpp->core_clk); + phy_exit(pcie->phy); + clk_disable_unprepare(pcie->core_clk); return ret; } static int __maybe_unused intel_pcie_resume_noirq(struct device *dev) { - struct intel_pcie_port *lpp = dev_get_drvdata(dev); + struct intel_pcie *pcie = dev_get_drvdata(dev); - return intel_pcie_host_setup(lpp); + return intel_pcie_host_setup(pcie); } static int intel_pcie_rc_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct intel_pcie_port *lpp = dev_get_drvdata(pci->dev); + struct intel_pcie *pcie = dev_get_drvdata(pci->dev); - return intel_pcie_host_setup(lpp); + return intel_pcie_host_setup(pcie); } static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr) @@ -402,17 +402,17 @@ static int intel_pcie_probe(struct platform_device *pdev) { const struct intel_pcie_soc *data; struct device *dev = &pdev->dev; - struct intel_pcie_port *lpp; + struct intel_pcie *pcie; struct pcie_port *pp; struct dw_pcie *pci; int ret; - lpp = devm_kzalloc(dev, sizeof(*lpp), GFP_KERNEL); - if (!lpp) + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) return -ENOMEM; - platform_set_drvdata(pdev, lpp); - pci = &lpp->pci; + platform_set_drvdata(pdev, pcie); + pci = &pcie->pci; pci->dev = dev; pp = &pci->pp; @@ -420,7 +420,7 @@ static int intel_pcie_probe(struct platform_device *pdev) if (ret) return ret; - ret = intel_pcie_ep_rst_init(lpp); + ret = intel_pcie_ep_rst_init(pcie); if (ret) return ret; diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 095afbccf9c1..fa6886d66488 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -530,10 +530,8 @@ static int kirin_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn, { struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); - if (PCI_SLOT(devfn)) { - *val = ~0; + if (PCI_SLOT(devfn)) return PCIBIOS_DEVICE_NOT_FOUND; - } *val = dw_pcie_read_dbi(pci, where, size); return PCIBIOS_SUCCESSFUL; @@ -773,7 +771,6 @@ static const struct of_device_id kirin_pcie_match[] = { static int kirin_pcie_probe(struct platform_device *pdev) { enum pcie_kirin_phy_type phy_type; - const struct of_device_id *of_id; struct device *dev = &pdev->dev; struct kirin_pcie *kirin_pcie; struct dw_pcie *pci; @@ -784,13 +781,12 @@ static int kirin_pcie_probe(struct platform_device *pdev) return -EINVAL; } - of_id = of_match_device(kirin_pcie_match, dev); - if (!of_id) { + phy_type = (long)of_device_get_match_data(dev); + if (!phy_type) { dev_err(dev, "OF data missing\n"); return -EINVAL; } - phy_type = (long)of_id->data; kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); if (!kirin_pcie) diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index cfe66bf04c1d..6ce8eddf3a37 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -553,10 +553,8 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev, int irq, ret; irq = platform_get_irq_byname(pdev, "global"); - if (irq < 0) { - dev_err(&pdev->dev, "Failed to get Global IRQ\n"); + if (irq < 0) return irq; - } ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qcom_pcie_ep_global_irq_thread, @@ -620,7 +618,7 @@ static void qcom_pcie_ep_init(struct dw_pcie_ep *ep) dw_pcie_ep_reset_bar(pci, bar); } -static struct dw_pcie_ep_ops pci_ep_ops = { +static const struct dw_pcie_ep_ops pci_ep_ops = { .ep_init = qcom_pcie_ep_init, .raise_irq = qcom_pcie_ep_raise_irq, .get_features = qcom_pcie_epc_get_features, diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 1c3d1116bb60..c19cd506ed3f 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -1343,7 +1343,7 @@ static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie) /* Look for an available entry to hold the mapping */ for (i = 0; i < nr_map; i++) { - u16 bdf_be = cpu_to_be16(map[i].bdf); + __be16 bdf_be = cpu_to_be16(map[i].bdf); u32 val; u8 hash; @@ -1534,6 +1534,12 @@ static int qcom_pcie_probe(struct platform_device *pdev) const struct qcom_pcie_cfg *pcie_cfg; int ret; + pcie_cfg = of_device_get_match_data(dev); + if (!pcie_cfg || !pcie_cfg->ops) { + dev_err(dev, "Invalid platform data\n"); + return -EINVAL; + } + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; @@ -1553,12 +1559,6 @@ static int qcom_pcie_probe(struct platform_device *pdev) pcie->pci = pci; - pcie_cfg = of_device_get_match_data(dev); - if (!pcie_cfg || !pcie_cfg->ops) { - dev_err(dev, "Invalid platform data\n"); - return -EINVAL; - } - pcie->ops = pcie_cfg->ops; pcie->pipe_clk_need_muxing = pcie_cfg->pipe_clk_need_muxing; diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c index 1a9e353bef55..1569e82b5568 100644 --- a/drivers/pci/controller/dwc/pcie-spear13xx.c +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -69,7 +69,7 @@ struct pcie_app_reg { static int spear13xx_pcie_start_link(struct dw_pcie *pci) { struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); - struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base; /* enable ltssm */ writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID) @@ -83,7 +83,7 @@ static int spear13xx_pcie_start_link(struct dw_pcie *pci) static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) { struct spear13xx_pcie *spear13xx_pcie = arg; - struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base; struct dw_pcie *pci = spear13xx_pcie->pci; struct pcie_port *pp = &pci->pp; unsigned int status; @@ -102,7 +102,7 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie) { - struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base; /* Enable MSI interrupt */ if (IS_ENABLED(CONFIG_PCI_MSI)) @@ -113,7 +113,7 @@ static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pc static int spear13xx_pcie_link_up(struct dw_pcie *pci) { struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); - struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base; if (readl(&app_reg->app_status_1) & XMLH_LINK_UP) return 1; diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 904976913081..b1b5f836a806 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -245,7 +245,7 @@ static const unsigned int pcie_gen_freq[] = { GEN4_CORE_CLK_FREQ }; -struct tegra_pcie_dw { +struct tegra194_pcie { struct device *dev; struct resource *appl_res; struct resource *dbi_res; @@ -289,22 +289,22 @@ struct tegra_pcie_dw { int ep_state; }; -struct tegra_pcie_dw_of_data { +struct tegra194_pcie_of_data { enum dw_pcie_device_mode mode; }; -static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) +static inline struct tegra194_pcie *to_tegra_pcie(struct dw_pcie *pci) { - return container_of(pci, struct tegra_pcie_dw, pci); + return container_of(pci, struct tegra194_pcie, pci); } -static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value, +static inline void appl_writel(struct tegra194_pcie *pcie, const u32 value, const u32 reg) { writel_relaxed(value, pcie->appl_base + reg); } -static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg) +static inline u32 appl_readl(struct tegra194_pcie *pcie, const u32 reg) { return readl_relaxed(pcie->appl_base + reg); } @@ -316,7 +316,7 @@ struct tegra_pcie_soc { static void apply_bad_link_workaround(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + struct tegra194_pcie *pcie = to_tegra_pcie(pci); u32 current_link_width; u16 val; @@ -349,7 +349,7 @@ static void apply_bad_link_workaround(struct pcie_port *pp) static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) { - struct tegra_pcie_dw *pcie = arg; + struct tegra194_pcie *pcie = arg; struct dw_pcie *pci = &pcie->pci; struct pcie_port *pp = &pci->pp; u32 val, tmp; @@ -420,7 +420,7 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie) +static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie) { u32 val; @@ -448,7 +448,7 @@ static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie) static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) { - struct tegra_pcie_dw *pcie = arg; + struct tegra194_pcie *pcie = arg; struct dw_pcie *pci = &pcie->pci; u32 val, speed; @@ -494,7 +494,7 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) { - struct tegra_pcie_dw *pcie = arg; + struct tegra194_pcie *pcie = arg; struct dw_pcie_ep *ep = &pcie->pci.ep; int spurious = 1; u32 status_l0, status_l1, link_status; @@ -537,7 +537,7 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) return IRQ_HANDLED; } -static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, +static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { /* @@ -554,7 +554,7 @@ static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, return pci_generic_config_read(bus, devfn, where, size, val); } -static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, +static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { /* @@ -571,8 +571,8 @@ static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, static struct pci_ops tegra_pci_ops = { .map_bus = dw_pcie_own_conf_map_bus, - .read = tegra_pcie_dw_rd_own_conf, - .write = tegra_pcie_dw_wr_own_conf, + .read = tegra194_pcie_rd_own_conf, + .write = tegra194_pcie_wr_own_conf, }; #if defined(CONFIG_PCIEASPM) @@ -594,7 +594,7 @@ static const u32 event_cntr_data_offset[] = { 0x1dc }; -static void disable_aspm_l11(struct tegra_pcie_dw *pcie) +static void disable_aspm_l11(struct tegra194_pcie *pcie) { u32 val; @@ -603,7 +603,7 @@ static void disable_aspm_l11(struct tegra_pcie_dw *pcie) dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); } -static void disable_aspm_l12(struct tegra_pcie_dw *pcie) +static void disable_aspm_l12(struct tegra194_pcie *pcie) { u32 val; @@ -612,7 +612,7 @@ static void disable_aspm_l12(struct tegra_pcie_dw *pcie) dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); } -static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) +static inline u32 event_counter_prog(struct tegra194_pcie *pcie, u32 event) { u32 val; @@ -629,7 +629,7 @@ static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) static int aspm_state_cnt(struct seq_file *s, void *data) { - struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *) + struct tegra194_pcie *pcie = (struct tegra194_pcie *) dev_get_drvdata(s->private); u32 val; @@ -660,7 +660,7 @@ static int aspm_state_cnt(struct seq_file *s, void *data) return 0; } -static void init_host_aspm(struct tegra_pcie_dw *pcie) +static void init_host_aspm(struct tegra194_pcie *pcie) { struct dw_pcie *pci = &pcie->pci; u32 val; @@ -688,22 +688,22 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie) dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); } -static void init_debugfs(struct tegra_pcie_dw *pcie) +static void init_debugfs(struct tegra194_pcie *pcie) { debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs, aspm_state_cnt); } #else -static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; } -static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; } -static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } -static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; } +static inline void disable_aspm_l12(struct tegra194_pcie *pcie) { return; } +static inline void disable_aspm_l11(struct tegra194_pcie *pcie) { return; } +static inline void init_host_aspm(struct tegra194_pcie *pcie) { return; } +static inline void init_debugfs(struct tegra194_pcie *pcie) { return; } #endif static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + struct tegra194_pcie *pcie = to_tegra_pcie(pci); u32 val; u16 val_w; @@ -741,7 +741,7 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + struct tegra194_pcie *pcie = to_tegra_pcie(pci); u32 val; /* Enable legacy interrupt generation */ @@ -762,7 +762,7 @@ static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp) static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + struct tegra194_pcie *pcie = to_tegra_pcie(pci); u32 val; /* Enable MSI interrupt generation */ @@ -775,7 +775,7 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp) static void tegra_pcie_enable_interrupts(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + struct tegra194_pcie *pcie = to_tegra_pcie(pci); /* Clear interrupt statuses before enabling interrupts */ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); @@ -800,7 +800,7 @@ static void tegra_pcie_enable_interrupts(struct pcie_port *pp) tegra_pcie_enable_msi_interrupts(pp); } -static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) +static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie) { struct dw_pcie *pci = &pcie->pci; u32 val, offset, i; @@ -853,10 +853,10 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); } -static int tegra_pcie_dw_host_init(struct pcie_port *pp) +static int tegra194_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + struct tegra194_pcie *pcie = to_tegra_pcie(pci); u32 val; pp->bridge->ops = &tegra_pci_ops; @@ -914,10 +914,10 @@ static int tegra_pcie_dw_host_init(struct pcie_port *pp) return 0; } -static int tegra_pcie_dw_start_link(struct dw_pcie *pci) +static int tegra194_pcie_start_link(struct dw_pcie *pci) { u32 val, offset, speed, tmp; - struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + struct tegra194_pcie *pcie = to_tegra_pcie(pci); struct pcie_port *pp = &pci->pp; bool retry = true; @@ -982,7 +982,7 @@ retry_link: val &= ~PCI_DLF_EXCHANGE_ENABLE; dw_pcie_writel_dbi(pci, offset, val); - tegra_pcie_dw_host_init(pp); + tegra194_pcie_host_init(pp); dw_pcie_setup_rc(pp); retry = false; @@ -998,32 +998,32 @@ retry_link: return 0; } -static int tegra_pcie_dw_link_up(struct dw_pcie *pci) +static int tegra194_pcie_link_up(struct dw_pcie *pci) { - struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + struct tegra194_pcie *pcie = to_tegra_pcie(pci); u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); return !!(val & PCI_EXP_LNKSTA_DLLLA); } -static void tegra_pcie_dw_stop_link(struct dw_pcie *pci) +static void tegra194_pcie_stop_link(struct dw_pcie *pci) { - struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + struct tegra194_pcie *pcie = to_tegra_pcie(pci); disable_irq(pcie->pex_rst_irq); } static const struct dw_pcie_ops tegra_dw_pcie_ops = { - .link_up = tegra_pcie_dw_link_up, - .start_link = tegra_pcie_dw_start_link, - .stop_link = tegra_pcie_dw_stop_link, + .link_up = tegra194_pcie_link_up, + .start_link = tegra194_pcie_start_link, + .stop_link = tegra194_pcie_stop_link, }; -static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { - .host_init = tegra_pcie_dw_host_init, +static const struct dw_pcie_host_ops tegra194_pcie_host_ops = { + .host_init = tegra194_pcie_host_init, }; -static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) +static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie) { unsigned int phy_count = pcie->phy_count; @@ -1033,7 +1033,7 @@ static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) } } -static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie) +static int tegra_pcie_enable_phy(struct tegra194_pcie *pcie) { unsigned int i; int ret; @@ -1060,7 +1060,7 @@ phy_exit: return ret; } -static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) +static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie) { struct platform_device *pdev = to_platform_device(pcie->dev); struct device_node *np = pcie->dev->of_node; @@ -1156,7 +1156,7 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) return 0; } -static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, +static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie, bool enable) { struct mrq_uphy_response resp; @@ -1184,7 +1184,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, return tegra_bpmp_transfer(pcie->bpmp, &msg); } -static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, +static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie, bool enable) { struct mrq_uphy_response resp; @@ -1212,7 +1212,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, return tegra_bpmp_transfer(pcie->bpmp, &msg); } -static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) +static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie) { struct pcie_port *pp = &pcie->pci.pp; struct pci_bus *child, *root_bus = NULL; @@ -1250,7 +1250,7 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) } } -static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie) +static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie) { pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3"); if (IS_ERR(pcie->slot_ctl_3v3)) { @@ -1271,7 +1271,7 @@ static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie) return 0; } -static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie) +static int tegra_pcie_enable_slot_regulators(struct tegra194_pcie *pcie) { int ret; @@ -1309,7 +1309,7 @@ fail_12v_enable: return ret; } -static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie) +static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie) { if (pcie->slot_ctl_12v) regulator_disable(pcie->slot_ctl_12v); @@ -1317,7 +1317,7 @@ static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie) regulator_disable(pcie->slot_ctl_3v3); } -static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie, +static int tegra_pcie_config_controller(struct tegra194_pcie *pcie, bool en_hw_hot_rst) { int ret; @@ -1414,7 +1414,7 @@ fail_slot_reg_en: return ret; } -static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie) +static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie) { int ret; @@ -1442,7 +1442,7 @@ static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie) pcie->cid, ret); } -static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie) +static int tegra_pcie_init_controller(struct tegra194_pcie *pcie) { struct dw_pcie *pci = &pcie->pci; struct pcie_port *pp = &pci->pp; @@ -1452,7 +1452,7 @@ static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie) if (ret < 0) return ret; - pp->ops = &tegra_pcie_dw_host_ops; + pp->ops = &tegra194_pcie_host_ops; ret = dw_pcie_host_init(pp); if (ret < 0) { @@ -1467,11 +1467,11 @@ fail_host_init: return ret; } -static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie) +static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie) { u32 val; - if (!tegra_pcie_dw_link_up(&pcie->pci)) + if (!tegra194_pcie_link_up(&pcie->pci)) return 0; val = appl_readl(pcie, APPL_RADM_STATUS); @@ -1483,12 +1483,12 @@ static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie) 1, PME_ACK_TIMEOUT); } -static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie) +static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie) { u32 data; int err; - if (!tegra_pcie_dw_link_up(&pcie->pci)) { + if (!tegra194_pcie_link_up(&pcie->pci)) { dev_dbg(pcie->dev, "PCIe link is not up...!\n"); return; } @@ -1545,15 +1545,15 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie) appl_writel(pcie, data, APPL_PINMUX); } -static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie) +static void tegra_pcie_deinit_controller(struct tegra194_pcie *pcie) { tegra_pcie_downstream_dev_to_D0(pcie); dw_pcie_host_deinit(&pcie->pci.pp); - tegra_pcie_dw_pme_turnoff(pcie); + tegra194_pcie_pme_turnoff(pcie); tegra_pcie_unconfig_controller(pcie); } -static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) +static int tegra_pcie_config_rp(struct tegra194_pcie *pcie) { struct device *dev = pcie->dev; char *name; @@ -1580,7 +1580,7 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) goto fail_pm_get_sync; } - pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci); + pcie->link_state = tegra194_pcie_link_up(&pcie->pci); if (!pcie->link_state) { ret = -ENOMEDIUM; goto fail_host_init; @@ -1605,7 +1605,7 @@ fail_pm_get_sync: return ret; } -static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) +static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie) { u32 val; int ret; @@ -1644,7 +1644,7 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); } -static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) +static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie) { struct dw_pcie *pci = &pcie->pci; struct dw_pcie_ep *ep = &pci->ep; @@ -1809,7 +1809,7 @@ fail_pll_init: static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) { - struct tegra_pcie_dw *pcie = arg; + struct tegra194_pcie *pcie = arg; if (gpiod_get_value(pcie->pex_rst_gpiod)) pex_ep_event_pex_rst_assert(pcie); @@ -1819,7 +1819,7 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) return IRQ_HANDLED; } -static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) +static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq) { /* Tegra194 supports only INTA */ if (irq > 1) @@ -1831,7 +1831,7 @@ static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) return 0; } -static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) +static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq) { if (unlikely(irq > 31)) return -EINVAL; @@ -1841,7 +1841,7 @@ static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) return 0; } -static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq) +static int tegra_pcie_ep_raise_msix_irq(struct tegra194_pcie *pcie, u16 irq) { struct dw_pcie_ep *ep = &pcie->pci.ep; @@ -1855,7 +1855,7 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + struct tegra194_pcie *pcie = to_tegra_pcie(pci); switch (type) { case PCI_EPC_IRQ_LEGACY: @@ -1896,7 +1896,7 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = { .get_features = tegra_pcie_ep_get_features, }; -static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie, +static int tegra_pcie_config_ep(struct tegra194_pcie *pcie, struct platform_device *pdev) { struct dw_pcie *pci = &pcie->pci; @@ -1957,12 +1957,12 @@ static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie, return 0; } -static int tegra_pcie_dw_probe(struct platform_device *pdev) +static int tegra194_pcie_probe(struct platform_device *pdev) { - const struct tegra_pcie_dw_of_data *data; + const struct tegra194_pcie_of_data *data; struct device *dev = &pdev->dev; struct resource *atu_dma_res; - struct tegra_pcie_dw *pcie; + struct tegra194_pcie *pcie; struct pcie_port *pp; struct dw_pcie *pci; struct phy **phys; @@ -1988,7 +1988,7 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev) pcie->dev = &pdev->dev; pcie->mode = (enum dw_pcie_device_mode)data->mode; - ret = tegra_pcie_dw_parse_dt(pcie); + ret = tegra194_pcie_parse_dt(pcie); if (ret < 0) { const char *level = KERN_ERR; @@ -2146,9 +2146,9 @@ fail: return ret; } -static int tegra_pcie_dw_remove(struct platform_device *pdev) +static int tegra194_pcie_remove(struct platform_device *pdev) { - struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); + struct tegra194_pcie *pcie = platform_get_drvdata(pdev); if (!pcie->link_state) return 0; @@ -2164,9 +2164,9 @@ static int tegra_pcie_dw_remove(struct platform_device *pdev) return 0; } -static int tegra_pcie_dw_suspend_late(struct device *dev) +static int tegra194_pcie_suspend_late(struct device *dev) { - struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); + struct tegra194_pcie *pcie = dev_get_drvdata(dev); u32 val; if (!pcie->link_state) @@ -2182,9 +2182,9 @@ static int tegra_pcie_dw_suspend_late(struct device *dev) return 0; } -static int tegra_pcie_dw_suspend_noirq(struct device *dev) +static int tegra194_pcie_suspend_noirq(struct device *dev) { - struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); + struct tegra194_pcie *pcie = dev_get_drvdata(dev); if (!pcie->link_state) return 0; @@ -2193,15 +2193,15 @@ static int tegra_pcie_dw_suspend_noirq(struct device *dev) pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN); tegra_pcie_downstream_dev_to_D0(pcie); - tegra_pcie_dw_pme_turnoff(pcie); + tegra194_pcie_pme_turnoff(pcie); tegra_pcie_unconfig_controller(pcie); return 0; } -static int tegra_pcie_dw_resume_noirq(struct device *dev) +static int tegra194_pcie_resume_noirq(struct device *dev) { - struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); + struct tegra194_pcie *pcie = dev_get_drvdata(dev); int ret; if (!pcie->link_state) @@ -2211,7 +2211,7 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev) if (ret < 0) return ret; - ret = tegra_pcie_dw_host_init(&pcie->pci.pp); + ret = tegra194_pcie_host_init(&pcie->pci.pp); if (ret < 0) { dev_err(dev, "Failed to init host: %d\n", ret); goto fail_host_init; @@ -2219,7 +2219,7 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev) dw_pcie_setup_rc(&pcie->pci.pp); - ret = tegra_pcie_dw_start_link(&pcie->pci); + ret = tegra194_pcie_start_link(&pcie->pci); if (ret < 0) goto fail_host_init; @@ -2234,9 +2234,9 @@ fail_host_init: return ret; } -static int tegra_pcie_dw_resume_early(struct device *dev) +static int tegra194_pcie_resume_early(struct device *dev) { - struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); + struct tegra194_pcie *pcie = dev_get_drvdata(dev); u32 val; if (pcie->mode == DW_PCIE_EP_TYPE) { @@ -2259,9 +2259,9 @@ static int tegra_pcie_dw_resume_early(struct device *dev) return 0; } -static void tegra_pcie_dw_shutdown(struct platform_device *pdev) +static void tegra194_pcie_shutdown(struct platform_device *pdev) { - struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); + struct tegra194_pcie *pcie = platform_get_drvdata(pdev); if (!pcie->link_state) return; @@ -2273,50 +2273,50 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev) if (IS_ENABLED(CONFIG_PCI_MSI)) disable_irq(pcie->pci.pp.msi_irq); - tegra_pcie_dw_pme_turnoff(pcie); + tegra194_pcie_pme_turnoff(pcie); tegra_pcie_unconfig_controller(pcie); } -static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = { +static const struct tegra194_pcie_of_data tegra194_pcie_rc_of_data = { .mode = DW_PCIE_RC_TYPE, }; -static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = { +static const struct tegra194_pcie_of_data tegra194_pcie_ep_of_data = { .mode = DW_PCIE_EP_TYPE, }; -static const struct of_device_id tegra_pcie_dw_of_match[] = { +static const struct of_device_id tegra194_pcie_of_match[] = { { .compatible = "nvidia,tegra194-pcie", - .data = &tegra_pcie_dw_rc_of_data, + .data = &tegra194_pcie_rc_of_data, }, { .compatible = "nvidia,tegra194-pcie-ep", - .data = &tegra_pcie_dw_ep_of_data, + .data = &tegra194_pcie_ep_of_data, }, {}, }; -static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { - .suspend_late = tegra_pcie_dw_suspend_late, - .suspend_noirq = tegra_pcie_dw_suspend_noirq, - .resume_noirq = tegra_pcie_dw_resume_noirq, - .resume_early = tegra_pcie_dw_resume_early, +static const struct dev_pm_ops tegra194_pcie_pm_ops = { + .suspend_late = tegra194_pcie_suspend_late, + .suspend_noirq = tegra194_pcie_suspend_noirq, + .resume_noirq = tegra194_pcie_resume_noirq, + .resume_early = tegra194_pcie_resume_early, }; -static struct platform_driver tegra_pcie_dw_driver = { - .probe = tegra_pcie_dw_probe, - .remove = tegra_pcie_dw_remove, - .shutdown = tegra_pcie_dw_shutdown, +static struct platform_driver tegra194_pcie_driver = { + .probe = tegra194_pcie_probe, + .remove = tegra194_pcie_remove, + .shutdown = tegra194_pcie_shutdown, .driver = { .name = "tegra194-pcie", - .pm = &tegra_pcie_dw_pm_ops, - .of_match_table = tegra_pcie_dw_of_match, + .pm = &tegra194_pcie_pm_ops, + .of_match_table = tegra194_pcie_of_match, }, }; -module_platform_driver(tegra_pcie_dw_driver); +module_platform_driver(tegra194_pcie_driver); -MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match); +MODULE_DEVICE_TABLE(of, tegra194_pcie_of_match); MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>"); MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index d05be942956e..b45ac3754242 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -61,9 +61,9 @@ #define PCL_RDLH_LINK_UP BIT(1) #define PCL_XMLH_LINK_UP BIT(0) -struct uniphier_pcie_priv { - void __iomem *base; +struct uniphier_pcie { struct dw_pcie pci; + void __iomem *base; struct clk *clk; struct reset_control *rst; struct phy *phy; @@ -72,62 +72,62 @@ struct uniphier_pcie_priv { #define to_uniphier_pcie(x) dev_get_drvdata((x)->dev) -static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_priv *priv, +static void uniphier_pcie_ltssm_enable(struct uniphier_pcie *pcie, bool enable) { u32 val; - val = readl(priv->base + PCL_APP_READY_CTRL); + val = readl(pcie->base + PCL_APP_READY_CTRL); if (enable) val |= PCL_APP_LTSSM_ENABLE; else val &= ~PCL_APP_LTSSM_ENABLE; - writel(val, priv->base + PCL_APP_READY_CTRL); + writel(val, pcie->base + PCL_APP_READY_CTRL); } -static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv) +static void uniphier_pcie_init_rc(struct uniphier_pcie *pcie) { u32 val; /* set RC MODE */ - val = readl(priv->base + PCL_MODE); + val = readl(pcie->base + PCL_MODE); val |= PCL_MODE_REGEN; val &= ~PCL_MODE_REGVAL; - writel(val, priv->base + PCL_MODE); + writel(val, pcie->base + PCL_MODE); /* use auxiliary power detection */ - val = readl(priv->base + PCL_APP_PM0); + val = readl(pcie->base + PCL_APP_PM0); val |= PCL_SYS_AUX_PWR_DET; - writel(val, priv->base + PCL_APP_PM0); + writel(val, pcie->base + PCL_APP_PM0); /* assert PERST# */ - val = readl(priv->base + PCL_PINCTRL0); + val = readl(pcie->base + PCL_PINCTRL0); val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL | PCL_PERST_PLDN_REGVAL); val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN | PCL_PERST_PLDN_REGEN; - writel(val, priv->base + PCL_PINCTRL0); + writel(val, pcie->base + PCL_PINCTRL0); - uniphier_pcie_ltssm_enable(priv, false); + uniphier_pcie_ltssm_enable(pcie, false); usleep_range(100000, 200000); /* deassert PERST# */ - val = readl(priv->base + PCL_PINCTRL0); + val = readl(pcie->base + PCL_PINCTRL0); val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN; - writel(val, priv->base + PCL_PINCTRL0); + writel(val, pcie->base + PCL_PINCTRL0); } -static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv) +static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie) { u32 status; int ret; /* wait PIPE clock */ - ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status, + ret = readl_poll_timeout(pcie->base + PCL_PIPEMON, status, status & PCL_PCLK_ALIVE, 100000, 1000000); if (ret) { - dev_err(priv->pci.dev, + dev_err(pcie->pci.dev, "Failed to initialize controller in RC mode\n"); return ret; } @@ -137,10 +137,10 @@ static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv) static int uniphier_pcie_link_up(struct dw_pcie *pci) { - struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + struct uniphier_pcie *pcie = to_uniphier_pcie(pci); u32 val, mask; - val = readl(priv->base + PCL_STATUS_LINK); + val = readl(pcie->base + PCL_STATUS_LINK); mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP; return (val & mask) == mask; @@ -148,39 +148,40 @@ static int uniphier_pcie_link_up(struct dw_pcie *pci) static int uniphier_pcie_start_link(struct dw_pcie *pci) { - struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + struct uniphier_pcie *pcie = to_uniphier_pcie(pci); - uniphier_pcie_ltssm_enable(priv, true); + uniphier_pcie_ltssm_enable(pcie, true); return 0; } static void uniphier_pcie_stop_link(struct dw_pcie *pci) { - struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + struct uniphier_pcie *pcie = to_uniphier_pcie(pci); - uniphier_pcie_ltssm_enable(priv, false); + uniphier_pcie_ltssm_enable(pcie, false); } -static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv) +static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie) { - writel(PCL_RCV_INT_ALL_ENABLE, priv->base + PCL_RCV_INT); - writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX); + writel(PCL_RCV_INT_ALL_ENABLE, pcie->base + PCL_RCV_INT); + writel(PCL_RCV_INTX_ALL_ENABLE, pcie->base + PCL_RCV_INTX); } + static void uniphier_pcie_irq_mask(struct irq_data *d) { struct pcie_port *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + struct uniphier_pcie *pcie = to_uniphier_pcie(pci); unsigned long flags; u32 val; raw_spin_lock_irqsave(&pp->lock, flags); - val = readl(priv->base + PCL_RCV_INTX); + val = readl(pcie->base + PCL_RCV_INTX); val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT); - writel(val, priv->base + PCL_RCV_INTX); + writel(val, pcie->base + PCL_RCV_INTX); raw_spin_unlock_irqrestore(&pp->lock, flags); } @@ -189,15 +190,15 @@ static void uniphier_pcie_irq_unmask(struct irq_data *d) { struct pcie_port *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + struct uniphier_pcie *pcie = to_uniphier_pcie(pci); unsigned long flags; u32 val; raw_spin_lock_irqsave(&pp->lock, flags); - val = readl(priv->base + PCL_RCV_INTX); + val = readl(pcie->base + PCL_RCV_INTX); val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT); - writel(val, priv->base + PCL_RCV_INTX); + writel(val, pcie->base + PCL_RCV_INTX); raw_spin_unlock_irqrestore(&pp->lock, flags); } @@ -226,13 +227,13 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc) { struct pcie_port *pp = irq_desc_get_handler_data(desc); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + struct uniphier_pcie *pcie = to_uniphier_pcie(pci); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long reg; u32 val, bit; /* INT for debug */ - val = readl(priv->base + PCL_RCV_INT); + val = readl(pcie->base + PCL_RCV_INT); if (val & PCL_CFG_BW_MGT_STATUS) dev_dbg(pci->dev, "Link Bandwidth Management Event\n"); @@ -243,16 +244,16 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc) if (val & PCL_CFG_PME_MSI_STATUS) dev_dbg(pci->dev, "PME Interrupt\n"); - writel(val, priv->base + PCL_RCV_INT); + writel(val, pcie->base + PCL_RCV_INT); /* INTx */ chained_irq_enter(chip, desc); - val = readl(priv->base + PCL_RCV_INTX); + val = readl(pcie->base + PCL_RCV_INTX); reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val); for_each_set_bit(bit, ®, PCI_NUM_INTX) - generic_handle_domain_irq(priv->legacy_irq_domain, bit); + generic_handle_domain_irq(pcie->legacy_irq_domain, bit); chained_irq_exit(chip, desc); } @@ -260,7 +261,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc) static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + struct uniphier_pcie *pcie = to_uniphier_pcie(pci); struct device_node *np = pci->dev->of_node; struct device_node *np_intc; int ret = 0; @@ -278,9 +279,9 @@ static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp) goto out_put_node; } - priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, + pcie->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, &uniphier_intx_domain_ops, pp); - if (!priv->legacy_irq_domain) { + if (!pcie->legacy_irq_domain) { dev_err(pci->dev, "Failed to get INTx domain\n"); ret = -ENODEV; goto out_put_node; @@ -297,14 +298,14 @@ out_put_node: static int uniphier_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + struct uniphier_pcie *pcie = to_uniphier_pcie(pci); int ret; ret = uniphier_pcie_config_legacy_irq(pp); if (ret) return ret; - uniphier_pcie_irq_enable(priv); + uniphier_pcie_irq_enable(pcie); return 0; } @@ -313,36 +314,36 @@ static const struct dw_pcie_host_ops uniphier_pcie_host_ops = { .host_init = uniphier_pcie_host_init, }; -static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv) +static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie) { int ret; - ret = clk_prepare_enable(priv->clk); + ret = clk_prepare_enable(pcie->clk); if (ret) return ret; - ret = reset_control_deassert(priv->rst); + ret = reset_control_deassert(pcie->rst); if (ret) goto out_clk_disable; - uniphier_pcie_init_rc(priv); + uniphier_pcie_init_rc(pcie); - ret = phy_init(priv->phy); + ret = phy_init(pcie->phy); if (ret) goto out_rst_assert; - ret = uniphier_pcie_wait_rc(priv); + ret = uniphier_pcie_wait_rc(pcie); if (ret) goto out_phy_exit; return 0; out_phy_exit: - phy_exit(priv->phy); + phy_exit(pcie->phy); out_rst_assert: - reset_control_assert(priv->rst); + reset_control_assert(pcie->rst); out_clk_disable: - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(pcie->clk); return ret; } @@ -356,41 +357,41 @@ static const struct dw_pcie_ops dw_pcie_ops = { static int uniphier_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct uniphier_pcie_priv *priv; + struct uniphier_pcie *pcie; int ret; - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) return -ENOMEM; - priv->pci.dev = dev; - priv->pci.ops = &dw_pcie_ops; + pcie->pci.dev = dev; + pcie->pci.ops = &dw_pcie_ops; - priv->base = devm_platform_ioremap_resource_byname(pdev, "link"); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); + pcie->base = devm_platform_ioremap_resource_byname(pdev, "link"); + if (IS_ERR(pcie->base)) + return PTR_ERR(pcie->base); - priv->clk = devm_clk_get(dev, NULL); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); + pcie->clk = devm_clk_get(dev, NULL); + if (IS_ERR(pcie->clk)) + return PTR_ERR(pcie->clk); - priv->rst = devm_reset_control_get_shared(dev, NULL); - if (IS_ERR(priv->rst)) - return PTR_ERR(priv->rst); + pcie->rst = devm_reset_control_get_shared(dev, NULL); + if (IS_ERR(pcie->rst)) + return PTR_ERR(pcie->rst); - priv->phy = devm_phy_optional_get(dev, "pcie-phy"); - if (IS_ERR(priv->phy)) - return PTR_ERR(priv->phy); + pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); - platform_set_drvdata(pdev, priv); + platform_set_drvdata(pdev, pcie); - ret = uniphier_pcie_host_enable(priv); + ret = uniphier_pcie_host_enable(pcie); if (ret) return ret; - priv->pci.pp.ops = &uniphier_pcie_host_ops; + pcie->pci.pp.ops = &uniphier_pcie_host_ops; - return dw_pcie_host_init(&priv->pci.pp); + return dw_pcie_host_init(&pcie->pci.pp); } static const struct of_device_id uniphier_pcie_match[] = { diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c index 306950272fd6..d7b7350f02dd 100644 --- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c +++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c @@ -34,31 +34,31 @@ #define PF_DBG_WE BIT(31) #define PF_DBG_PABR BIT(27) -#define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev) +#define to_ls_g4_pcie(x) platform_get_drvdata((x)->pdev) -struct ls_pcie_g4 { +struct ls_g4_pcie { struct mobiveil_pcie pci; struct delayed_work dwork; int irq; }; -static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off) +static inline u32 ls_g4_pcie_pf_readl(struct ls_g4_pcie *pcie, u32 off) { return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off); } -static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie, +static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie, u32 off, u32 val) { iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off); } -static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci) +static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci) { - struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci); + struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci); u32 state; - state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG); + state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG); state = state & PF_DBG_LTSSM_MASK; if (state == PF_DBG_LTSSM_L0) @@ -67,14 +67,14 @@ static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci) return 0; } -static void ls_pcie_g4_disable_interrupt(struct ls_pcie_g4 *pcie) +static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie) { struct mobiveil_pcie *mv_pci = &pcie->pci; mobiveil_csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB); } -static void ls_pcie_g4_enable_interrupt(struct ls_pcie_g4 *pcie) +static void ls_g4_pcie_enable_interrupt(struct ls_g4_pcie *pcie) { struct mobiveil_pcie *mv_pci = &pcie->pci; u32 val; @@ -87,7 +87,7 @@ static void ls_pcie_g4_enable_interrupt(struct ls_pcie_g4 *pcie) mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB); } -static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie) +static int ls_g4_pcie_reinit_hw(struct ls_g4_pcie *pcie) { struct mobiveil_pcie *mv_pci = &pcie->pci; struct device *dev = &mv_pci->pdev->dev; @@ -97,7 +97,7 @@ static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie) /* Poll for pab_csb_reset to set and PAB activity to clear */ do { usleep_range(10, 15); - val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT); + val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_INT_STAT); act_stat = mobiveil_csr_readl(mv_pci, PAB_ACTIVITY_STAT); } while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--); if (to < 0) { @@ -106,22 +106,22 @@ static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie) } /* clear PEX_RESET bit in PEX_PF0_DBG register */ - val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG); + val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG); val |= PF_DBG_WE; - ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val); + ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val); - val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG); + val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG); val |= PF_DBG_PABR; - ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val); + ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val); - val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG); + val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG); val &= ~PF_DBG_WE; - ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val); + ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val); mobiveil_host_init(mv_pci, true); to = 100; - while (!ls_pcie_g4_link_up(mv_pci) && to--) + while (!ls_g4_pcie_link_up(mv_pci) && to--) usleep_range(200, 250); if (to < 0) { dev_err(dev, "PCIe link training timeout\n"); @@ -131,9 +131,9 @@ static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie) return 0; } -static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id) +static irqreturn_t ls_g4_pcie_isr(int irq, void *dev_id) { - struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id; + struct ls_g4_pcie *pcie = (struct ls_g4_pcie *)dev_id; struct mobiveil_pcie *mv_pci = &pcie->pci; u32 val; @@ -142,7 +142,7 @@ static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id) return IRQ_NONE; if (val & PAB_INTP_RESET) { - ls_pcie_g4_disable_interrupt(pcie); + ls_g4_pcie_disable_interrupt(pcie); schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1)); } @@ -151,9 +151,9 @@ static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci) +static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci) { - struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci); + struct ls_g4_pcie *pcie = to_ls_g4_pcie(mv_pci); struct platform_device *pdev = mv_pci->pdev; struct device *dev = &pdev->dev; int ret; @@ -162,7 +162,7 @@ static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci) if (pcie->irq < 0) return pcie->irq; - ret = devm_request_irq(dev, pcie->irq, ls_pcie_g4_isr, + ret = devm_request_irq(dev, pcie->irq, ls_g4_pcie_isr, IRQF_SHARED, pdev->name, pcie); if (ret) { dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret); @@ -172,11 +172,11 @@ static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci) return 0; } -static void ls_pcie_g4_reset(struct work_struct *work) +static void ls_g4_pcie_reset(struct work_struct *work) { struct delayed_work *dwork = container_of(work, struct delayed_work, work); - struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork); + struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork); struct mobiveil_pcie *mv_pci = &pcie->pci; u16 ctrl; @@ -184,26 +184,26 @@ static void ls_pcie_g4_reset(struct work_struct *work) ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; mobiveil_csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL); - if (!ls_pcie_g4_reinit_hw(pcie)) + if (!ls_g4_pcie_reinit_hw(pcie)) return; - ls_pcie_g4_enable_interrupt(pcie); + ls_g4_pcie_enable_interrupt(pcie); } -static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = { - .interrupt_init = ls_pcie_g4_interrupt_init, +static struct mobiveil_rp_ops ls_g4_pcie_rp_ops = { + .interrupt_init = ls_g4_pcie_interrupt_init, }; -static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = { - .link_up = ls_pcie_g4_link_up, +static const struct mobiveil_pab_ops ls_g4_pcie_pab_ops = { + .link_up = ls_g4_pcie_link_up, }; -static int __init ls_pcie_g4_probe(struct platform_device *pdev) +static int __init ls_g4_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct pci_host_bridge *bridge; struct mobiveil_pcie *mv_pci; - struct ls_pcie_g4 *pcie; + struct ls_g4_pcie *pcie; struct device_node *np = dev->of_node; int ret; @@ -220,13 +220,13 @@ static int __init ls_pcie_g4_probe(struct platform_device *pdev) mv_pci = &pcie->pci; mv_pci->pdev = pdev; - mv_pci->ops = &ls_pcie_g4_pab_ops; - mv_pci->rp.ops = &ls_pcie_g4_rp_ops; + mv_pci->ops = &ls_g4_pcie_pab_ops; + mv_pci->rp.ops = &ls_g4_pcie_rp_ops; mv_pci->rp.bridge = bridge; platform_set_drvdata(pdev, pcie); - INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset); + INIT_DELAYED_WORK(&pcie->dwork, ls_g4_pcie_reset); ret = mobiveil_pcie_host_probe(mv_pci); if (ret) { @@ -234,22 +234,22 @@ static int __init ls_pcie_g4_probe(struct platform_device *pdev) return ret; } - ls_pcie_g4_enable_interrupt(pcie); + ls_g4_pcie_enable_interrupt(pcie); return 0; } -static const struct of_device_id ls_pcie_g4_of_match[] = { +static const struct of_device_id ls_g4_pcie_of_match[] = { { .compatible = "fsl,lx2160a-pcie", }, { }, }; -static struct platform_driver ls_pcie_g4_driver = { +static struct platform_driver ls_g4_pcie_driver = { .driver = { .name = "layerscape-pcie-gen4", - .of_match_table = ls_pcie_g4_of_match, + .of_match_table = ls_g4_pcie_of_match, .suppress_bind_attrs = true, }, }; -builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe); +builtin_platform_driver_probe(ls_g4_pcie_driver, ls_g4_pcie_probe); diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index c3b725afa11f..4f5b44827d21 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -115,6 +115,7 @@ #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) +#define PCIE_MSI_ALL_MASK GENMASK(31, 0) #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) #define PCIE_MSI_DATA_MASK GENMASK(15, 0) @@ -570,6 +571,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); /* Clear all interrupts */ + advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG); advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); @@ -582,7 +584,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); /* Unmask all MSIs */ - advk_writel(pcie, 0, PCIE_MSI_MASK_REG); + advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); /* Enable summary interrupt for GIC SPI source */ reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); @@ -872,11 +874,15 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, return PCI_BRIDGE_EMUL_HANDLED; } - case PCI_CAP_LIST_ID: case PCI_EXP_DEVCAP: case PCI_EXP_DEVCTL: + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); return PCI_BRIDGE_EMUL_HANDLED; + default: return PCI_BRIDGE_EMUL_NOT_HANDLED; } @@ -890,10 +896,6 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, struct advk_pcie *pcie = bridge->data; switch (reg) { - case PCI_EXP_DEVCTL: - advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); - break; - case PCI_EXP_LNKCTL: advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); if (new & PCI_EXP_LNKCTL_RL) @@ -915,6 +917,12 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, advk_writel(pcie, new, PCIE_ISR0_REG); break; + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCTL2: + advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); + break; + default: break; } @@ -953,6 +961,9 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) /* Support interrupt A for MSI feature */ bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE; + /* Aardvark HW provides PCIe Capability structure in version 2 */ + bridge->pcie_conf.cap = cpu_to_le16(2); + /* Indicates supports for Completion Retry Status */ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); @@ -1017,10 +1028,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, u32 reg; int ret; - if (!advk_pcie_valid_device(pcie, bus, devfn)) { - *val = 0xffffffff; + if (!advk_pcie_valid_device(pcie, bus, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; - } if (pci_is_root_bus(bus)) return pci_bridge_emul_conf_read(&pcie->bridge, where, @@ -1383,7 +1392,7 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie) msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); - msi_status = msi_val & ~msi_mask; + msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK); for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) { if (!(BIT(msi_idx) & msi_status)) @@ -1535,8 +1544,7 @@ static int advk_pcie_probe(struct platform_device *pdev) * only PIO for issuing configuration transfers which does * not use PCIe window configuration. */ - if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 && - type != IORESOURCE_IO) + if (type != IORESOURCE_MEM && type != IORESOURCE_IO) continue; /* @@ -1544,8 +1552,7 @@ static int advk_pcie_probe(struct platform_device *pdev) * configuration is set to transparent memory access so it * does not need window configuration. */ - if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) && - entry->offset == 0) + if (type == IORESOURCE_MEM && entry->offset == 0) continue; /* @@ -1677,20 +1684,64 @@ static int advk_pcie_remove(struct platform_device *pdev) { struct advk_pcie *pcie = platform_get_drvdata(pdev); struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); + u32 val; int i; + /* Remove PCI bus with all devices */ pci_lock_rescan_remove(); pci_stop_root_bus(bridge->bus); pci_remove_root_bus(bridge->bus); pci_unlock_rescan_remove(); + /* Disable Root Bridge I/O space, memory space and bus mastering */ + val = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); + val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG); + + /* Disable MSI */ + val = advk_readl(pcie, PCIE_CORE_CTRL2_REG); + val &= ~PCIE_CORE_CTRL2_MSI_ENABLE; + advk_writel(pcie, val, PCIE_CORE_CTRL2_REG); + + /* Clear MSI address */ + advk_writel(pcie, 0, PCIE_MSI_ADDR_LOW_REG); + advk_writel(pcie, 0, PCIE_MSI_ADDR_HIGH_REG); + + /* Mask all interrupts */ + advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); + advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG); + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_MASK_REG); + + /* Clear all interrupts */ + advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG); + advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); + + /* Remove IRQ domains */ advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie); + /* Free config space for emulated root bridge */ + pci_bridge_emul_cleanup(&pcie->bridge); + + /* Assert PERST# signal which prepares PCIe card for power down */ + if (pcie->reset_gpio) + gpiod_set_value_cansleep(pcie->reset_gpio, 1); + + /* Disable link training */ + val = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + val &= ~LINK_TRAINING_EN; + advk_writel(pcie, val, PCIE_CORE_CTRL0_REG); + /* Disable outbound address windows mapping */ for (i = 0; i < OB_WIN_COUNT; i++) advk_pcie_disable_ob_win(pcie, i); + /* Disable phy */ + advk_pcie_disable_phy(pcie); + return 0; } diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 6733cb14e775..20ea2ee330b8 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -43,13 +43,12 @@ #include <linux/pci-ecam.h> #include <linux/delay.h> #include <linux/semaphore.h> -#include <linux/irqdomain.h> -#include <asm/irqdomain.h> -#include <asm/apic.h> #include <linux/irq.h> #include <linux/msi.h> #include <linux/hyperv.h> #include <linux/refcount.h> +#include <linux/irqdomain.h> +#include <linux/acpi.h> #include <asm/mshyperv.h> /* @@ -583,6 +582,265 @@ struct hv_pci_compl { static void hv_pci_onchannelcallback(void *context); +#ifdef CONFIG_X86 +#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED +#define FLOW_HANDLER handle_edge_irq +#define FLOW_NAME "edge" + +static int hv_pci_irqchip_init(void) +{ + return 0; +} + +static struct irq_domain *hv_pci_get_root_domain(void) +{ + return x86_vector_domain; +} + +static unsigned int hv_msi_get_int_vector(struct irq_data *data) +{ + struct irq_cfg *cfg = irqd_cfg(data); + + return cfg->vector; +} + +static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, + struct msi_desc *msi_desc) +{ + msi_entry->address.as_uint32 = msi_desc->msg.address_lo; + msi_entry->data.as_uint32 = msi_desc->msg.data; +} + +static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + return pci_msi_prepare(domain, dev, nvec, info); +} +#elif defined(CONFIG_ARM64) +/* + * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit + * of room at the start to allow for SPIs to be specified through ACPI and + * starting with a power of two to satisfy power of 2 multi-MSI requirement. + */ +#define HV_PCI_MSI_SPI_START 64 +#define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START) +#define DELIVERY_MODE 0 +#define FLOW_HANDLER NULL +#define FLOW_NAME NULL +#define hv_msi_prepare NULL + +struct hv_pci_chip_data { + DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR); + struct mutex map_lock; +}; + +/* Hyper-V vPCI MSI GIC IRQ domain */ +static struct irq_domain *hv_msi_gic_irq_domain; + +/* Hyper-V PCI MSI IRQ chip */ +static struct irq_chip hv_arm64_msi_irq_chip = { + .name = "MSI", + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent +}; + +static unsigned int hv_msi_get_int_vector(struct irq_data *irqd) +{ + return irqd->parent_data->hwirq; +} + +static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, + struct msi_desc *msi_desc) +{ + msi_entry->address = ((u64)msi_desc->msg.address_hi << 32) | + msi_desc->msg.address_lo; + msi_entry->data = msi_desc->msg.data; +} + +/* + * @nr_bm_irqs: Indicates the number of IRQs that were allocated from + * the bitmap. + * @nr_dom_irqs: Indicates the number of IRQs that were allocated from + * the parent domain. + */ +static void hv_pci_vec_irq_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_bm_irqs, + unsigned int nr_dom_irqs) +{ + struct hv_pci_chip_data *chip_data = domain->host_data; + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + int first = d->hwirq - HV_PCI_MSI_SPI_START; + int i; + + mutex_lock(&chip_data->map_lock); + bitmap_release_region(chip_data->spi_map, + first, + get_count_order(nr_bm_irqs)); + mutex_unlock(&chip_data->map_lock); + for (i = 0; i < nr_dom_irqs; i++) { + if (i) + d = irq_domain_get_irq_data(domain, virq + i); + irq_domain_reset_irq_data(d); + } + + irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs); +} + +static void hv_pci_vec_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs); +} + +static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain, + unsigned int nr_irqs, + irq_hw_number_t *hwirq) +{ + struct hv_pci_chip_data *chip_data = domain->host_data; + int index; + + /* Find and allocate region from the SPI bitmap */ + mutex_lock(&chip_data->map_lock); + index = bitmap_find_free_region(chip_data->spi_map, + HV_PCI_MSI_SPI_NR, + get_count_order(nr_irqs)); + mutex_unlock(&chip_data->map_lock); + if (index < 0) + return -ENOSPC; + + *hwirq = index + HV_PCI_MSI_SPI_START; + + return 0; +} + +static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq) +{ + struct irq_fwspec fwspec; + struct irq_data *d; + int ret; + + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = hwirq; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + + ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); + if (ret) + return ret; + + /* + * Since the interrupt specifier is not coming from ACPI or DT, the + * trigger type will need to be set explicitly. Otherwise, it will be + * set to whatever is in the GIC configuration. + */ + d = irq_domain_get_irq_data(domain->parent, virq); + + return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); +} + +static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + irq_hw_number_t hwirq; + unsigned int i; + int ret; + + ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i, + hwirq + i); + if (ret) { + hv_pci_vec_irq_free(domain, virq, nr_irqs, i); + return ret; + } + + irq_domain_set_hwirq_and_chip(domain, virq + i, + hwirq + i, + &hv_arm64_msi_irq_chip, + domain->host_data); + pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i); + } + + return 0; +} + +/* + * Pick the first cpu as the irq affinity that can be temporarily used for + * composing MSI from the hypervisor. GIC will eventually set the right + * affinity for the irq and the 'unmask' will retarget the interrupt to that + * cpu. + */ +static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain, + struct irq_data *irqd, bool reserve) +{ + int cpu = cpumask_first(cpu_present_mask); + + irq_data_update_effective_affinity(irqd, cpumask_of(cpu)); + + return 0; +} + +static const struct irq_domain_ops hv_pci_domain_ops = { + .alloc = hv_pci_vec_irq_domain_alloc, + .free = hv_pci_vec_irq_domain_free, + .activate = hv_pci_vec_irq_domain_activate, +}; + +static int hv_pci_irqchip_init(void) +{ + static struct hv_pci_chip_data *chip_data; + struct fwnode_handle *fn = NULL; + int ret = -ENOMEM; + + chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL); + if (!chip_data) + return ret; + + mutex_init(&chip_data->map_lock); + fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64"); + if (!fn) + goto free_chip; + + /* + * IRQ domain once enabled, should not be removed since there is no + * way to ensure that all the corresponding devices are also gone and + * no interrupts will be generated. + */ + hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR, + fn, &hv_pci_domain_ops, + chip_data); + + if (!hv_msi_gic_irq_domain) { + pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n"); + goto free_chip; + } + + return 0; + +free_chip: + kfree(chip_data); + if (fn) + irq_domain_free_fwnode(fn); + + return ret; +} + +static struct irq_domain *hv_pci_get_root_domain(void) +{ + return hv_msi_gic_irq_domain; +} +#endif /* CONFIG_ARM64 */ + /** * hv_pci_generic_compl() - Invoked for a completion packet * @context: Set up by the sender of the packet. @@ -1191,17 +1449,11 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, put_pcichild(hpdev); } -static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest, - bool force) -{ - struct irq_data *parent = data->parent_data; - - return parent->chip->irq_set_affinity(parent, dest, force); -} - static void hv_irq_mask(struct irq_data *data) { pci_msi_mask_irq(data); + if (data->parent_data->chip->irq_mask) + irq_chip_mask_parent(data); } /** @@ -1217,7 +1469,6 @@ static void hv_irq_mask(struct irq_data *data) static void hv_irq_unmask(struct irq_data *data) { struct msi_desc *msi_desc = irq_data_get_msi_desc(data); - struct irq_cfg *cfg = irqd_cfg(data); struct hv_retarget_device_interrupt *params; struct hv_pcibus_device *hbus; struct cpumask *dest; @@ -1246,7 +1497,7 @@ static void hv_irq_unmask(struct irq_data *data) (hbus->hdev->dev_instance.b[7] << 8) | (hbus->hdev->dev_instance.b[6] & 0xf8) | PCI_FUNC(pdev->devfn); - params->int_target.vector = cfg->vector; + params->int_target.vector = hv_msi_get_int_vector(data); /* * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by @@ -1319,6 +1570,8 @@ exit_unlock: dev_err(&hbus->hdev->device, "%s() failed: %#llx", __func__, res); + if (data->parent_data->chip->irq_unmask) + irq_chip_unmask_parent(data); pci_msi_unmask_irq(data); } @@ -1347,7 +1600,7 @@ static u32 hv_compose_msi_req_v1( int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; int_pkt->int_desc.vector_count = 1; - int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED; + int_pkt->int_desc.delivery_mode = DELIVERY_MODE; /* * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in @@ -1377,7 +1630,7 @@ static u32 hv_compose_msi_req_v2( int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; int_pkt->int_desc.vector_count = 1; - int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED; + int_pkt->int_desc.delivery_mode = DELIVERY_MODE; cpu = hv_compose_msi_req_get_cpu(affinity); int_pkt->int_desc.processor_array[0] = hv_cpu_number_to_vp_number(cpu); @@ -1397,7 +1650,7 @@ static u32 hv_compose_msi_req_v3( int_pkt->int_desc.vector = vector; int_pkt->int_desc.reserved = 0; int_pkt->int_desc.vector_count = 1; - int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED; + int_pkt->int_desc.delivery_mode = DELIVERY_MODE; cpu = hv_compose_msi_req_get_cpu(affinity); int_pkt->int_desc.processor_array[0] = hv_cpu_number_to_vp_number(cpu); @@ -1419,7 +1672,6 @@ static u32 hv_compose_msi_req_v3( */ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { - struct irq_cfg *cfg = irqd_cfg(data); struct hv_pcibus_device *hbus; struct vmbus_channel *channel; struct hv_pci_dev *hpdev; @@ -1470,7 +1722,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, dest, hpdev->desc.win_slot.slot, - cfg->vector); + hv_msi_get_int_vector(data)); break; case PCI_PROTOCOL_VERSION_1_2: @@ -1478,14 +1730,14 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, dest, hpdev->desc.win_slot.slot, - cfg->vector); + hv_msi_get_int_vector(data)); break; case PCI_PROTOCOL_VERSION_1_4: size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3, dest, hpdev->desc.win_slot.slot, - cfg->vector); + hv_msi_get_int_vector(data)); break; default: @@ -1594,14 +1846,18 @@ return_null_message: static struct irq_chip hv_msi_irq_chip = { .name = "Hyper-V PCIe MSI", .irq_compose_msi_msg = hv_compose_msi_msg, - .irq_set_affinity = hv_set_affinity, + .irq_set_affinity = irq_chip_set_affinity_parent, +#ifdef CONFIG_X86 .irq_ack = irq_chip_ack_parent, +#elif defined(CONFIG_ARM64) + .irq_eoi = irq_chip_eoi_parent, +#endif .irq_mask = hv_irq_mask, .irq_unmask = hv_irq_unmask, }; static struct msi_domain_ops hv_msi_ops = { - .msi_prepare = pci_msi_prepare, + .msi_prepare = hv_msi_prepare, .msi_free = hv_msi_free, }; @@ -1625,12 +1881,12 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus) hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX); - hbus->msi_info.handler = handle_edge_irq; - hbus->msi_info.handler_name = "edge"; + hbus->msi_info.handler = FLOW_HANDLER; + hbus->msi_info.handler_name = FLOW_NAME; hbus->msi_info.data = hbus; hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode, &hbus->msi_info, - x86_vector_domain); + hv_pci_get_root_domain()); if (!hbus->irq_domain) { dev_err(&hbus->hdev->device, "Failed to build an MSI IRQ domain\n"); @@ -1774,7 +2030,7 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus) * If the memory enable bit is already set, Hyper-V silently ignores * the below BAR updates, and the related PCI device driver can not * work, because reading from the device register(s) always returns - * 0xFFFFFFFF. + * 0xFFFFFFFF (PCI_ERROR_RESPONSE). */ list_for_each_entry(hpdev, &hbus->children, list_entry) { _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command); @@ -3445,18 +3701,23 @@ static int hv_pci_suspend(struct hv_device *hdev) static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg) { - struct msi_desc *entry; struct irq_data *irq_data; + struct msi_desc *entry; + int ret = 0; - for_each_pci_msi_entry(entry, pdev) { + msi_lock_descs(&pdev->dev); + msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) { irq_data = irq_get_irq_data(entry->irq); - if (WARN_ON_ONCE(!irq_data)) - return -EINVAL; + if (WARN_ON_ONCE(!irq_data)) { + ret = -EINVAL; + break; + } hv_compose_msi_msg(irq_data, &entry->msg); } + msi_unlock_descs(&pdev->dev); - return 0; + return ret; } /* @@ -3542,9 +3803,15 @@ static void __exit exit_hv_pci_drv(void) static int __init init_hv_pci_drv(void) { + int ret; + if (!hv_is_hyperv_initialized()) return -ENODEV; + ret = hv_pci_irqchip_init(); + if (ret) + return ret; + /* Set the invalid domain number's bit, so it will not be used */ set_bit(HVPCI_DOM_INVALID, hvpci_dom_map); diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index ed13e81cd691..71258ea3d35f 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -6,6 +6,7 @@ */ #include <linux/kernel.h> +#include <linux/module.h> #include <linux/pci.h> #include <linux/clk.h> #include <linux/delay.h> @@ -51,10 +52,14 @@ PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \ PCIE_CONF_ADDR_EN) #define PCIE_CONF_DATA_OFF 0x18fc +#define PCIE_INT_CAUSE_OFF 0x1900 +#define PCIE_INT_PM_PME BIT(28) #define PCIE_MASK_OFF 0x1910 #define PCIE_MASK_ENABLE_INTS 0x0f000000 #define PCIE_CTRL_OFF 0x1a00 #define PCIE_CTRL_X1_MODE 0x0001 +#define PCIE_CTRL_RC_MODE BIT(1) +#define PCIE_CTRL_MASTER_HOT_RESET BIT(24) #define PCIE_STAT_OFF 0x1a04 #define PCIE_STAT_BUS 0xff00 #define PCIE_STAT_DEV 0x1f0000 @@ -125,6 +130,11 @@ static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); } +static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port) +{ + return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8; +} + static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) { u32 stat; @@ -145,22 +155,13 @@ static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) mvebu_writel(port, stat, PCIE_STAT_OFF); } -/* - * Setup PCIE BARs and Address Decode Wins: - * BAR[0] -> internal registers (needed for MSI) - * BAR[1] -> covers all DRAM banks - * BAR[2] -> Disabled - * WIN[0-3] -> DRAM bank[0-3] - */ -static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) +static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port) { - const struct mbus_dram_target_info *dram; - u32 size; int i; - dram = mv_mbus_dram_info(); + mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0)); + mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0)); - /* First, disable and clear BARs and windows. */ for (i = 1; i < 3; i++) { mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i)); mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i)); @@ -176,6 +177,25 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF); mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF); mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF); +} + +/* + * Setup PCIE BARs and Address Decode Wins: + * BAR[0] -> internal registers (needed for MSI) + * BAR[1] -> covers all DRAM banks + * BAR[2] -> Disabled + * WIN[0-3] -> DRAM bank[0-3] + */ +static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) +{ + const struct mbus_dram_target_info *dram; + u32 size; + int i; + + dram = mv_mbus_dram_info(); + + /* First, disable and clear BARs and windows. */ + mvebu_pcie_disable_wins(port); /* Setup windows for DDR banks. Count total DDR size on the fly. */ size = 0; @@ -213,18 +233,47 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) { - u32 cmd, mask; + u32 ctrl, cmd, dev_rev, mask; - /* Point PCIe unit MBUS decode windows to DRAM space. */ - mvebu_pcie_setup_wins(port); + /* Setup PCIe controller to Root Complex mode. */ + ctrl = mvebu_readl(port, PCIE_CTRL_OFF); + ctrl |= PCIE_CTRL_RC_MODE; + mvebu_writel(port, ctrl, PCIE_CTRL_OFF); - /* Master + slave enable. */ + /* Disable Root Bridge I/O space, memory space and bus mastering. */ cmd = mvebu_readl(port, PCIE_CMD_OFF); - cmd |= PCI_COMMAND_IO; - cmd |= PCI_COMMAND_MEMORY; - cmd |= PCI_COMMAND_MASTER; + cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); mvebu_writel(port, cmd, PCIE_CMD_OFF); + /* + * Change Class Code of PCI Bridge device to PCI Bridge (0x6004) + * because default value is Memory controller (0x5080). + * + * Note that this mvebu PCI Bridge does not have compliant Type 1 + * Configuration Space. Header Type is reported as Type 0 and it + * has format of Type 0 config space. + * + * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34) + * have the same format in Marvell's specification as in PCIe + * specification, but their meaning is totally different and they do + * different things: they are aliased into internal mvebu registers + * (e.g. PCIE_BAR_LO_OFF) and these should not be changed or + * reconfigured by pci device drivers. + * + * Therefore driver uses emulation of PCI Bridge which emulates + * access to configuration space via internal mvebu registers or + * emulated configuration buffer. Driver access these PCI Bridge + * directly for simplification, but these registers can be accessed + * also via standard mvebu way for accessing PCI config space. + */ + dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF); + dev_rev &= ~0xffffff00; + dev_rev |= (PCI_CLASS_BRIDGE_PCI << 8) << 8; + mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF); + + /* Point PCIe unit MBUS decode windows to DRAM space. */ + mvebu_pcie_setup_wins(port); + /* Enable interrupt lines A-D. */ mask = mvebu_readl(port, PCIE_MASK_OFF); mask |= PCIE_MASK_ENABLE_INTS; @@ -250,6 +299,9 @@ static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, case 4: *val = readl_relaxed(conf_data); break; + default: + *val = 0xffffffff; + return PCIBIOS_BAD_REGISTER_NUMBER; } return PCIBIOS_SUCCESSFUL; @@ -303,7 +355,7 @@ static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port, * areas each having a power of two size. We start from the largest * one (i.e highest order bit set in the size). */ -static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, +static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port, unsigned int target, unsigned int attribute, phys_addr_t base, size_t size, phys_addr_t remap) @@ -324,7 +376,7 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, &base, &end, ret); mvebu_pcie_del_windows(port, base - size_mapped, size_mapped); - return; + return ret; } size -= sz; @@ -333,16 +385,20 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, if (remap != MVEBU_MBUS_NO_REMAP) remap += sz; } + + return 0; } -static void mvebu_pcie_set_window(struct mvebu_pcie_port *port, +static int mvebu_pcie_set_window(struct mvebu_pcie_port *port, unsigned int target, unsigned int attribute, const struct mvebu_pcie_window *desired, struct mvebu_pcie_window *cur) { + int ret; + if (desired->base == cur->base && desired->remap == cur->remap && desired->size == cur->size) - return; + return 0; if (cur->size != 0) { mvebu_pcie_del_windows(port, cur->base, cur->size); @@ -357,31 +413,35 @@ static void mvebu_pcie_set_window(struct mvebu_pcie_port *port, } if (desired->size == 0) - return; + return 0; + + ret = mvebu_pcie_add_windows(port, target, attribute, desired->base, + desired->size, desired->remap); + if (ret) { + cur->size = 0; + cur->base = 0; + return ret; + } - mvebu_pcie_add_windows(port, target, attribute, desired->base, - desired->size, desired->remap); *cur = *desired; + return 0; } -static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) +static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) { struct mvebu_pcie_window desired = {}; struct pci_bridge_emul_conf *conf = &port->bridge.conf; /* Are the new iobase/iolimit values invalid? */ if (conf->iolimit < conf->iobase || - conf->iolimitupper < conf->iobaseupper || - !(conf->command & PCI_COMMAND_IO)) { - mvebu_pcie_set_window(port, port->io_target, port->io_attr, - &desired, &port->iowin); - return; - } + conf->iolimitupper < conf->iobaseupper) + return mvebu_pcie_set_window(port, port->io_target, port->io_attr, + &desired, &port->iowin); if (!mvebu_has_ioport(port)) { dev_WARN(&port->pcie->pdev->dev, "Attempt to set IO when IO is disabled\n"); - return; + return -EOPNOTSUPP; } /* @@ -399,22 +459,19 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) desired.remap) + 1; - mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, - &port->iowin); + return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, + &port->iowin); } -static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) +static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) { struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP}; struct pci_bridge_emul_conf *conf = &port->bridge.conf; /* Are the new membase/memlimit values invalid? */ - if (conf->memlimit < conf->membase || - !(conf->command & PCI_COMMAND_MEMORY)) { - mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, - &desired, &port->memwin); - return; - } + if (conf->memlimit < conf->membase) + return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, + &desired, &port->memwin); /* * We read the PCI-to-PCI bridge emulated registers, and @@ -426,8 +483,56 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) - desired.base + 1; - mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, - &port->memwin); + return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, + &port->memwin); +} + +static pci_bridge_emul_read_status_t +mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, + int reg, u32 *value) +{ + struct mvebu_pcie_port *port = bridge->data; + + switch (reg) { + case PCI_COMMAND: + *value = mvebu_readl(port, PCIE_CMD_OFF); + break; + + case PCI_PRIMARY_BUS: { + /* + * From the whole 32bit register we support reading from HW only + * secondary bus number which is mvebu local bus number. + * Other bits are retrieved only from emulated config buffer. + */ + __le32 *cfgspace = (__le32 *)&bridge->conf; + u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]); + val &= ~0xff00; + val |= mvebu_pcie_get_local_bus_nr(port) << 8; + *value = val; + break; + } + + case PCI_INTERRUPT_LINE: { + /* + * From the whole 32bit register we support reading from HW only + * one bit: PCI_BRIDGE_CTL_BUS_RESET. + * Other bits are retrieved only from emulated config buffer. + */ + __le32 *cfgspace = (__le32 *)&bridge->conf; + u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); + if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET) + val |= PCI_BRIDGE_CTL_BUS_RESET << 16; + else + val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16); + *value = val; + break; + } + + default: + return PCI_BRIDGE_EMUL_NOT_HANDLED; + } + + return PCI_BRIDGE_EMUL_HANDLED; } static pci_bridge_emul_read_status_t @@ -442,9 +547,7 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, break; case PCI_EXP_DEVCTL: - *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) & - ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); break; case PCI_EXP_LNKCAP: @@ -468,6 +571,18 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, *value = mvebu_readl(port, PCIE_RC_RTSTA); break; + case PCI_EXP_DEVCAP2: + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2); + break; + + case PCI_EXP_DEVCTL2: + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2); + break; + + case PCI_EXP_LNKCTL2: + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2); + break; + default: return PCI_BRIDGE_EMUL_NOT_HANDLED; } @@ -484,39 +599,62 @@ mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, switch (reg) { case PCI_COMMAND: - { - if (!mvebu_has_ioport(port)) - conf->command &= ~PCI_COMMAND_IO; - - if ((old ^ new) & PCI_COMMAND_IO) - mvebu_pcie_handle_iobase_change(port); - if ((old ^ new) & PCI_COMMAND_MEMORY) - mvebu_pcie_handle_membase_change(port); + if (!mvebu_has_ioport(port)) { + conf->command = cpu_to_le16( + le16_to_cpu(conf->command) & ~PCI_COMMAND_IO); + new &= ~PCI_COMMAND_IO; + } + mvebu_writel(port, new, PCIE_CMD_OFF); break; - } case PCI_IO_BASE: - /* - * We keep bit 1 set, it is a read-only bit that - * indicates we support 32 bits addressing for the - * I/O - */ - conf->iobase |= PCI_IO_RANGE_TYPE_32; - conf->iolimit |= PCI_IO_RANGE_TYPE_32; - mvebu_pcie_handle_iobase_change(port); + if ((mask & 0xffff) && mvebu_pcie_handle_iobase_change(port)) { + /* On error disable IO range */ + conf->iobase &= ~0xf0; + conf->iolimit &= ~0xf0; + conf->iobaseupper = cpu_to_le16(0x0000); + conf->iolimitupper = cpu_to_le16(0x0000); + if (mvebu_has_ioport(port)) + conf->iobase |= 0xf0; + } break; case PCI_MEMORY_BASE: - mvebu_pcie_handle_membase_change(port); + if (mvebu_pcie_handle_membase_change(port)) { + /* On error disable mem range */ + conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0); + conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0); + conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0); + } break; case PCI_IO_BASE_UPPER16: - mvebu_pcie_handle_iobase_change(port); + if (mvebu_pcie_handle_iobase_change(port)) { + /* On error disable IO range */ + conf->iobase &= ~0xf0; + conf->iolimit &= ~0xf0; + conf->iobaseupper = cpu_to_le16(0x0000); + conf->iolimitupper = cpu_to_le16(0x0000); + if (mvebu_has_ioport(port)) + conf->iobase |= 0xf0; + } break; case PCI_PRIMARY_BUS: - mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus); + if (mask & 0xff00) + mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus); + break; + + case PCI_INTERRUPT_LINE: + if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { + u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF); + if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) + ctrl |= PCIE_CTRL_MASTER_HOT_RESET; + else + ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET; + mvebu_writel(port, ctrl, PCIE_CTRL_OFF); + } break; default: @@ -532,13 +670,6 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, switch (reg) { case PCI_EXP_DEVCTL: - /* - * Armada370 data says these bits must always - * be zero when in root complex mode. - */ - new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); - mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); break; @@ -555,12 +686,31 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, break; case PCI_EXP_RTSTA: - mvebu_writel(port, new, PCIE_RC_RTSTA); + /* + * PME Status bit in Root Status Register (PCIE_RC_RTSTA) + * is read-only and can be cleared only by writing 0b to the + * Interrupt Cause RW0C register (PCIE_INT_CAUSE_OFF). So + * clear PME via Interrupt Cause. + */ + if (new & PCI_EXP_RTSTA_PME) + mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF); + break; + + case PCI_EXP_DEVCTL2: + mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2); + break; + + case PCI_EXP_LNKCTL2: + mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2); + break; + + default: break; } } static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { + .read_base = mvebu_pci_bridge_emul_base_conf_read, .write_base = mvebu_pci_bridge_emul_base_conf_write, .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read, .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write, @@ -570,9 +720,11 @@ static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { * Initialize the configuration space of the PCI-to-PCI bridge * associated with the given PCIe interface. */ -static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) +static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) { struct pci_bridge_emul *bridge = &port->bridge; + u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP); + u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS); bridge->conf.vendor = PCI_VENDOR_ID_MARVELL; bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; @@ -585,11 +737,17 @@ static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; } + /* + * Older mvebu hardware provides PCIe Capability structure only in + * version 1. New hardware provides it in version 2. + */ + bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver); + bridge->has_pcie = true; bridge->data = port; bridge->ops = &mvebu_pci_bridge_emul_ops; - pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR); + return pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR); } static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) @@ -606,6 +764,9 @@ static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = &pcie->ports[i]; + if (!port->base) + continue; + if (bus->number == 0 && port->devfn == devfn) return port; if (bus->number != 0 && @@ -653,20 +814,16 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int ret; port = mvebu_pcie_find_port(pcie, bus, devfn); - if (!port) { - *val = 0xffffffff; + if (!port) return PCIBIOS_DEVICE_NOT_FOUND; - } /* Access the emulated PCI-to-PCI bridge */ if (bus->number == 0) return pci_bridge_emul_conf_read(&port->bridge, where, size, val); - if (!mvebu_pcie_link_up(port)) { - *val = 0xffffffff; + if (!mvebu_pcie_link_up(port)) return PCIBIOS_DEVICE_NOT_FOUND; - } /* Access the real PCIe interface */ ret = mvebu_pcie_hw_rd_conf(port, bus, devfn, @@ -680,6 +837,15 @@ static struct pci_ops mvebu_pcie_ops = { .write = mvebu_pcie_wr_conf, }; +static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + /* Interrupt support on mvebu emulated bridges is not implemented yet */ + if (dev->bus->number == 0) + return 0; /* Proper return code 0 == NO_IRQ */ + + return of_irq_parse_and_map_pci(dev, slot, pin); +} + static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, const struct resource *res, resource_size_t start, @@ -781,6 +947,8 @@ static int mvebu_pcie_suspend(struct device *dev) pcie = dev_get_drvdata(dev); for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = pcie->ports + i; + if (!port->base) + continue; port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF); } @@ -795,6 +963,8 @@ static int mvebu_pcie_resume(struct device *dev) pcie = dev_get_drvdata(dev); for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = pcie->ports + i; + if (!port->base) + continue; mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF); mvebu_pcie_setup_hw(port); } @@ -838,6 +1008,11 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, port->devfn = of_pci_get_devfn(child); if (port->devfn < 0) goto skip; + if (PCI_FUNC(port->devfn) != 0) { + dev_err(dev, "%s: invalid function number, must be zero\n", + port->name); + goto skip; + } ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM, &port->mem_target, &port->mem_attr); @@ -992,6 +1167,10 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) resource_size(&pcie->io) - 1); pcie->realio.name = "PCI I/O"; + ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start); + if (ret) + return ret; + pci_add_resource(&bridge->windows, &pcie->realio); ret = devm_request_resource(dev, &ioport_resource, &pcie->realio); if (ret) @@ -1001,54 +1180,6 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) return 0; } -/* - * This is a copy of pci_host_probe(), except that it does the I/O - * remap as the last step, once we are sure we won't fail. - * - * It should be removed once the I/O remap error handling issue has - * been sorted out. - */ -static int mvebu_pci_host_probe(struct pci_host_bridge *bridge) -{ - struct mvebu_pcie *pcie; - struct pci_bus *bus, *child; - int ret; - - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) { - dev_err(bridge->dev.parent, "Scanning root bridge failed"); - return ret; - } - - pcie = pci_host_bridge_priv(bridge); - if (resource_size(&pcie->io) != 0) { - unsigned int i; - - for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K) - pci_ioremap_io(i, pcie->io.start + i); - } - - bus = bridge->bus; - - /* - * We insert PCI resources into the iomem_resource and - * ioport_resource trees in either pci_bus_claim_resources() - * or pci_bus_assign_resources(). - */ - if (pci_has_flag(PCI_PROBE_ONLY)) { - pci_bus_claim_resources(bus); - } else { - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); - - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - } - - pci_bus_add_devices(bus); - return 0; -} - static int mvebu_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1112,9 +1243,93 @@ static int mvebu_pcie_probe(struct platform_device *pdev) continue; } + ret = mvebu_pci_bridge_emul_init(port); + if (ret < 0) { + dev_err(dev, "%s: cannot init emulated bridge\n", + port->name); + devm_iounmap(dev, port->base); + port->base = NULL; + mvebu_pcie_powerdown(port); + continue; + } + + /* + * PCIe topology exported by mvebu hw is quite complicated. In + * reality has something like N fully independent host bridges + * where each host bridge has one PCIe Root Port (which acts as + * PCI Bridge device). Each host bridge has its own independent + * internal registers, independent access to PCI config space, + * independent interrupt lines, independent window and memory + * access configuration. But additionally there is some kind of + * peer-to-peer support between PCIe devices behind different + * host bridges limited just to forwarding of memory and I/O + * transactions (forwarding of error messages and config cycles + * is not supported). So we could say there are N independent + * PCIe Root Complexes. + * + * For this kind of setup DT should have been structured into + * N independent PCIe controllers / host bridges. But instead + * structure in past was defined to put PCIe Root Ports of all + * host bridges into one bus zero, like in classic multi-port + * Root Complex setup with just one host bridge. + * + * This means that pci-mvebu.c driver provides "virtual" bus 0 + * on which registers all PCIe Root Ports (PCI Bridge devices) + * specified in DT by their BDF addresses and virtually routes + * PCI config access of each PCI bridge device to specific PCIe + * host bridge. + * + * Normally PCI Bridge should choose between Type 0 and Type 1 + * config requests based on primary and secondary bus numbers + * configured on the bridge itself. But because mvebu PCI Bridge + * does not have registers for primary and secondary bus numbers + * in its config space, it determinates type of config requests + * via its own custom way. + * + * There are two options how mvebu determinate type of config + * request. + * + * 1. If Secondary Bus Number Enable bit is not set or is not + * available (applies for pre-XP PCIe controllers) then Type 0 + * is used if target bus number equals Local Bus Number (bits + * [15:8] in register 0x1a04) and target device number differs + * from Local Device Number (bits [20:16] in register 0x1a04). + * Type 1 is used if target bus number differs from Local Bus + * Number. And when target bus number equals Local Bus Number + * and target device equals Local Device Number then request is + * routed to Local PCI Bridge (PCIe Root Port). + * + * 2. If Secondary Bus Number Enable bit is set (bit 7 in + * register 0x1a2c) then mvebu hw determinate type of config + * request like compliant PCI Bridge based on primary bus number + * which is configured via Local Bus Number (bits [15:8] in + * register 0x1a04) and secondary bus number which is configured + * via Secondary Bus Number (bits [7:0] in register 0x1a2c). + * Local PCI Bridge (PCIe Root Port) is available on primary bus + * as device with Local Device Number (bits [20:16] in register + * 0x1a04). + * + * Secondary Bus Number Enable bit is disabled by default and + * option 2. is not available on pre-XP PCIe controllers. Hence + * this driver always use option 1. + * + * Basically it means that primary and secondary buses shares + * one virtual number configured via Local Bus Number bits and + * Local Device Number bits determinates if accessing primary + * or secondary bus. Set Local Device Number to 1 and redirect + * all writes of PCI Bridge Secondary Bus Number register to + * Local Bus Number (bits [15:8] in register 0x1a04). + * + * So when accessing devices on buses behind secondary bus + * number it would work correctly. And also when accessing + * device 0 at secondary bus number via config space would be + * correctly routed to secondary bus. Due to issues described + * in mvebu_pcie_setup_hw(), PCI Bridges at primary bus (zero) + * are not accessed directly via PCI config space but rarher + * indirectly via kernel emulated PCI bridge driver. + */ mvebu_pcie_setup_hw(port); - mvebu_pcie_set_local_dev_nr(port, 1); - mvebu_pci_bridge_emul_init(port); + mvebu_pcie_set_local_dev_nr(port, 0); } pcie->nports = i; @@ -1122,8 +1337,55 @@ static int mvebu_pcie_probe(struct platform_device *pdev) bridge->sysdata = pcie; bridge->ops = &mvebu_pcie_ops; bridge->align_resource = mvebu_pcie_align_resource; + bridge->map_irq = mvebu_pcie_map_irq; + + return pci_host_probe(bridge); +} + +static int mvebu_pcie_remove(struct platform_device *pdev) +{ + struct mvebu_pcie *pcie = platform_get_drvdata(pdev); + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); + u32 cmd; + int i; + + /* Remove PCI bus with all devices. */ + pci_lock_rescan_remove(); + pci_stop_root_bus(bridge->bus); + pci_remove_root_bus(bridge->bus); + pci_unlock_rescan_remove(); + + for (i = 0; i < pcie->nports; i++) { + struct mvebu_pcie_port *port = &pcie->ports[i]; + + if (!port->base) + continue; + + /* Disable Root Bridge I/O space, memory space and bus mastering. */ + cmd = mvebu_readl(port, PCIE_CMD_OFF); + cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + mvebu_writel(port, cmd, PCIE_CMD_OFF); + + /* Mask all interrupt sources. */ + mvebu_writel(port, 0, PCIE_MASK_OFF); + + /* Free config space for emulated root bridge. */ + pci_bridge_emul_cleanup(&port->bridge); - return mvebu_pci_host_probe(bridge); + /* Disable and clear BARs and windows. */ + mvebu_pcie_disable_wins(port); + + /* Delete PCIe IO and MEM windows. */ + if (port->iowin.size) + mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size); + if (port->memwin.size) + mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size); + + /* Power down card and disable clocks. Must be the last step. */ + mvebu_pcie_powerdown(port); + } + + return 0; } static const struct of_device_id mvebu_pcie_of_match_table[] = { @@ -1142,10 +1404,14 @@ static struct platform_driver mvebu_pcie_driver = { .driver = { .name = "mvebu-pcie", .of_match_table = mvebu_pcie_of_match_table, - /* driver unloading/unbinding currently not supported */ - .suppress_bind_attrs = true, .pm = &mvebu_pcie_pm_ops, }, .probe = mvebu_pcie_probe, + .remove = mvebu_pcie_remove, }; -builtin_platform_driver(mvebu_pcie_driver); +module_platform_driver(mvebu_pcie_driver); + +MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@bootlin.com>"); +MODULE_AUTHOR("Pali Rohár <pali@kernel.org>"); +MODULE_DESCRIPTION("Marvell EBU PCIe controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c index afde4aa8f6dc..35804ea394fd 100644 --- a/drivers/pci/controller/pci-rcar-gen2.c +++ b/drivers/pci/controller/pci-rcar-gen2.c @@ -93,7 +93,7 @@ #define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48) -struct rcar_pci_priv { +struct rcar_pci { struct device *dev; void __iomem *reg; struct resource mem_res; @@ -105,7 +105,7 @@ struct rcar_pci_priv { static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, int where) { - struct rcar_pci_priv *priv = bus->sysdata; + struct rcar_pci *priv = bus->sysdata; int slot, val; if (!pci_is_root_bus(bus) || PCI_FUNC(devfn)) @@ -132,7 +132,7 @@ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, static irqreturn_t rcar_pci_err_irq(int irq, void *pw) { - struct rcar_pci_priv *priv = pw; + struct rcar_pci *priv = pw; struct device *dev = priv->dev; u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG); @@ -148,7 +148,7 @@ static irqreturn_t rcar_pci_err_irq(int irq, void *pw) return IRQ_NONE; } -static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) +static void rcar_pci_setup_errirq(struct rcar_pci *priv) { struct device *dev = priv->dev; int ret; @@ -166,11 +166,11 @@ static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG); } #else -static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { } +static inline void rcar_pci_setup_errirq(struct rcar_pci *priv) { } #endif /* PCI host controller setup */ -static void rcar_pci_setup(struct rcar_pci_priv *priv) +static void rcar_pci_setup(struct rcar_pci *priv) { struct pci_host_bridge *bridge = pci_host_bridge_from_priv(priv); struct device *dev = priv->dev; @@ -279,7 +279,7 @@ static int rcar_pci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *cfg_res, *mem_res; - struct rcar_pci_priv *priv; + struct rcar_pci *priv; struct pci_host_bridge *bridge; void __iomem *reg; diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c index e9d5ca245f5e..b5bd10a62adb 100644 --- a/drivers/pci/controller/pci-thunder-ecam.c +++ b/drivers/pci/controller/pci-thunder-ecam.c @@ -41,10 +41,9 @@ static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus, } if (where_a == 0x4) { addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */ - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } + v = readl(addr); v &= ~0xf; v |= 2; /* EA entry-1. Base-L */ @@ -56,10 +55,9 @@ static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus, u32 barl_rb; addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */ - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } + barl_orig = readl(addr + 0); writel(0xffffffff, addr + 0); barl_rb = readl(addr + 0); @@ -72,10 +70,9 @@ static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus, } if (where_a == 0xc) { addr = bus->ops->map_bus(bus, devfn, bar + 4); /* BAR 1 */ - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } + v = readl(addr); /* EA entry-3. Base-H */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; @@ -104,10 +101,8 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn, } addr = bus->ops->map_bus(bus, devfn, where_a); - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } v = readl(addr); @@ -135,10 +130,8 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn, int where_a = where & ~3; addr = bus->ops->map_bus(bus, devfn, 0xc); - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } v = readl(addr); @@ -146,10 +139,8 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn, cfg_type = (v >> 16) & 0x7f; addr = bus->ops->map_bus(bus, devfn, 8); - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } class_rev = readl(addr); if (class_rev == 0xffffffff) @@ -176,10 +167,8 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn, } addr = bus->ops->map_bus(bus, devfn, 0); - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } vendor_device = readl(addr); if (vendor_device == 0xffffffff) @@ -196,10 +185,9 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn, bool is_tns = (vendor_device == 0xa01f177d); addr = bus->ops->map_bus(bus, devfn, 0x70); - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } + /* E_CAP */ v = readl(addr); has_msix = (v & 0xff00) != 0; @@ -211,10 +199,9 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn, } if (where_a == 0xb0) { addr = bus->ops->map_bus(bus, devfn, where_a); - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } + v = readl(addr); if (v & 0xff00) pr_err("Bad MSIX cap header: %08x\n", v); @@ -268,10 +255,9 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn, if (where_a == 0x70) { addr = bus->ops->map_bus(bus, devfn, where_a); - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } + v = readl(addr); if (v & 0xff00) pr_err("Bad PCIe cap header: %08x\n", v); diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c index 0660b9da204f..06a9855cb431 100644 --- a/drivers/pci/controller/pci-thunder-pem.c +++ b/drivers/pci/controller/pci-thunder-pem.c @@ -41,10 +41,8 @@ static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn, struct pci_config_window *cfg = bus->sysdata; struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv; - if (devfn != 0 || where >= 2048) { - *val = ~0; + if (devfn != 0 || where >= 2048) return PCIBIOS_DEVICE_NOT_FOUND; - } /* * 32-bit accesses only. Write the address to the low order diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index c50ff279903c..bfa259781b69 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -269,9 +269,7 @@ static void xgene_free_domains(struct xgene_msi *msi) static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) { - int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long); - - xgene_msi->bitmap = kzalloc(size, GFP_KERNEL); + xgene_msi->bitmap = bitmap_zalloc(NR_MSI_VEC, GFP_KERNEL); if (!xgene_msi->bitmap) return -ENOMEM; @@ -360,7 +358,7 @@ static int xgene_msi_remove(struct platform_device *pdev) kfree(msi->msi_groups); - kfree(msi->bitmap); + bitmap_free(msi->bitmap); msi->bitmap = NULL; xgene_free_domains(msi); diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index 56d0d50338c8..0d5acbfc7143 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -60,7 +60,7 @@ #define XGENE_PCIE_IP_VER_2 2 #if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) -struct xgene_pcie_port { +struct xgene_pcie { struct device_node *node; struct device *dev; struct clk *clk; @@ -71,12 +71,12 @@ struct xgene_pcie_port { u32 version; }; -static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg) +static u32 xgene_pcie_readl(struct xgene_pcie *port, u32 reg) { return readl(port->csr_base + reg); } -static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val) +static void xgene_pcie_writel(struct xgene_pcie *port, u32 reg, u32 val) { writel(val, port->csr_base + reg); } @@ -86,15 +86,15 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags) return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; } -static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus) +static inline struct xgene_pcie *pcie_bus_to_port(struct pci_bus *bus) { struct pci_config_window *cfg; if (acpi_disabled) - return (struct xgene_pcie_port *)(bus->sysdata); + return (struct xgene_pcie *)(bus->sysdata); cfg = bus->sysdata; - return (struct xgene_pcie_port *)(cfg->priv); + return (struct xgene_pcie *)(cfg->priv); } /* @@ -103,7 +103,7 @@ static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus) */ static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus) { - struct xgene_pcie_port *port = pcie_bus_to_port(bus); + struct xgene_pcie *port = pcie_bus_to_port(bus); if (bus->number >= (bus->primary + 1)) return port->cfg_base + AXI_EP_CFG_ACCESS; @@ -117,7 +117,7 @@ static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus) */ static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn) { - struct xgene_pcie_port *port = pcie_bus_to_port(bus); + struct xgene_pcie *port = pcie_bus_to_port(bus); unsigned int b, d, f; u32 rtdid_val = 0; @@ -164,18 +164,18 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { - struct xgene_pcie_port *port = pcie_bus_to_port(bus); + struct xgene_pcie *port = pcie_bus_to_port(bus); if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) != PCIBIOS_SUCCESSFUL) return PCIBIOS_DEVICE_NOT_FOUND; /* - * The v1 controller has a bug in its Configuration Request - * Retry Status (CRS) logic: when CRS Software Visibility is - * enabled and we read the Vendor and Device ID of a non-existent - * device, the controller fabricates return data of 0xFFFF0001 - * ("device exists but is not ready") instead of 0xFFFFFFFF + * The v1 controller has a bug in its Configuration Request Retry + * Status (CRS) logic: when CRS Software Visibility is enabled and + * we read the Vendor and Device ID of a non-existent device, the + * controller fabricates return data of 0xFFFF0001 ("device exists + * but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE) * ("device does not exist"). This causes the PCI core to retry * the read until it times out. Avoid this by not claiming to * support CRS SV. @@ -227,7 +227,7 @@ static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion) { struct device *dev = cfg->parent; struct acpi_device *adev = to_acpi_device(dev); - struct xgene_pcie_port *port; + struct xgene_pcie *port; struct resource csr; int ret; @@ -281,7 +281,7 @@ const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = { #endif #if defined(CONFIG_PCI_XGENE) -static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr, +static u64 xgene_pcie_set_ib_mask(struct xgene_pcie *port, u32 addr, u32 flags, u64 size) { u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags; @@ -307,7 +307,7 @@ static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr, return mask; } -static void xgene_pcie_linkup(struct xgene_pcie_port *port, +static void xgene_pcie_linkup(struct xgene_pcie *port, u32 *lanes, u32 *speed) { u32 val32; @@ -322,7 +322,7 @@ static void xgene_pcie_linkup(struct xgene_pcie_port *port, } } -static int xgene_pcie_init_port(struct xgene_pcie_port *port) +static int xgene_pcie_init_port(struct xgene_pcie *port) { struct device *dev = port->dev; int rc; @@ -342,7 +342,7 @@ static int xgene_pcie_init_port(struct xgene_pcie_port *port) return 0; } -static int xgene_pcie_map_reg(struct xgene_pcie_port *port, +static int xgene_pcie_map_reg(struct xgene_pcie *port, struct platform_device *pdev) { struct device *dev = port->dev; @@ -362,7 +362,7 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port, return 0; } -static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port, +static void xgene_pcie_setup_ob_reg(struct xgene_pcie *port, struct resource *res, u32 offset, u64 cpu_addr, u64 pci_addr) { @@ -394,7 +394,7 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port, xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr)); } -static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port) +static void xgene_pcie_setup_cfg_reg(struct xgene_pcie *port) { u64 addr = port->cfg_addr; @@ -403,7 +403,7 @@ static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port) xgene_pcie_writel(port, CFGCTL, EN_REG); } -static int xgene_pcie_map_ranges(struct xgene_pcie_port *port) +static int xgene_pcie_map_ranges(struct xgene_pcie *port) { struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port); struct resource_entry *window; @@ -444,7 +444,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port) return 0; } -static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg, +static void xgene_pcie_setup_pims(struct xgene_pcie *port, u32 pim_reg, u64 pim, u64 size) { xgene_pcie_writel(port, pim_reg, lower_32_bits(pim)); @@ -465,7 +465,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) return 1; } - if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) { + if ((size > SZ_1K) && (size < SZ_4G) && !(*ib_reg_mask & (1 << 0))) { *ib_reg_mask |= (1 << 0); return 0; } @@ -478,7 +478,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) return -EINVAL; } -static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, +static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port, struct resource_entry *entry, u8 *ib_reg_mask) { @@ -529,7 +529,7 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1)); } -static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port) +static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie *port) { struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port); struct resource_entry *entry; @@ -542,7 +542,7 @@ static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port) } /* clear BAR configuration which was done by firmware */ -static void xgene_pcie_clear_config(struct xgene_pcie_port *port) +static void xgene_pcie_clear_config(struct xgene_pcie *port) { int i; @@ -550,7 +550,7 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port) xgene_pcie_writel(port, i, 0); } -static int xgene_pcie_setup(struct xgene_pcie_port *port) +static int xgene_pcie_setup(struct xgene_pcie *port) { struct device *dev = port->dev; u32 val, lanes = 0, speed = 0; @@ -588,7 +588,7 @@ static int xgene_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *dn = dev->of_node; - struct xgene_pcie_port *port; + struct xgene_pcie *port; struct pci_host_bridge *bridge; int ret; diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 2513e9363236..18b2361d6462 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -510,10 +510,8 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, if (altera_pcie_hide_rc_bar(bus, devfn, where)) return PCIBIOS_BAD_REGISTER_NUMBER; - if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) { - *value = 0xffffffff; + if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; - } return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size, value); @@ -767,7 +765,7 @@ static int altera_pcie_probe(struct platform_device *pdev) struct altera_pcie *pcie; struct pci_host_bridge *bridge; int ret; - const struct of_device_id *match; + const struct altera_pcie_data *data; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) @@ -777,11 +775,11 @@ static int altera_pcie_probe(struct platform_device *pdev) pcie->pdev = pdev; platform_set_drvdata(pdev, pcie); - match = of_match_device(altera_pcie_of_match, &pdev->dev); - if (!match) + data = of_device_get_match_data(&pdev->dev); + if (!data) return -ENODEV; - pcie->pcie_data = match->data; + pcie->pcie_data = data; ret = altera_pcie_parse_dt(pcie); if (ret) { diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index b090924b41fe..854d95163112 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -42,8 +42,9 @@ #define CORE_FABRIC_STAT_MASK 0x001F001F #define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port)) #define CORE_LANE_CFG_REFCLK0REQ BIT(0) -#define CORE_LANE_CFG_REFCLK1 BIT(1) +#define CORE_LANE_CFG_REFCLK1REQ BIT(1) #define CORE_LANE_CFG_REFCLK0ACK BIT(2) +#define CORE_LANE_CFG_REFCLK1ACK BIT(3) #define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10)) #define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port)) #define CORE_LANE_CTL_CFGACC BIT(15) @@ -482,9 +483,9 @@ static int apple_pcie_setup_refclk(struct apple_pcie *pcie, if (res < 0) return res; - rmw_set(CORE_LANE_CFG_REFCLK1, pcie->base + CORE_LANE_CFG(port->idx)); + rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx)); res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx), - stat, stat & CORE_LANE_CFG_REFCLK1, + stat, stat & CORE_LANE_CFG_REFCLK1ACK, 100, 50000); if (res < 0) @@ -563,6 +564,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, return ret; } + rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK); + rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK); + ret = apple_pcie_port_setup_irq(port); if (ret) return ret; diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 1fc7bd49a7ad..375c0c40bbf8 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -24,6 +24,7 @@ #include <linux/pci.h> #include <linux/pci-ecam.h> #include <linux/printk.h> +#include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/sizes.h> #include <linux/slab.h> @@ -118,6 +119,7 @@ #define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000 +#define PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x00800000 #define PCIE_INTR2_CPU_BASE 0x4300 @@ -144,6 +146,9 @@ #define BRCM_INT_PCI_MSI_NR 32 #define BRCM_INT_PCI_MSI_LEGACY_NR 8 #define BRCM_INT_PCI_MSI_SHIFT 0 +#define BRCM_INT_PCI_MSI_MASK GENMASK(BRCM_INT_PCI_MSI_NR - 1, 0) +#define BRCM_INT_PCI_MSI_LEGACY_MASK GENMASK(31, \ + 32 - BRCM_INT_PCI_MSI_LEGACY_NR) /* MSI target addresses */ #define BRCM_MSI_TARGET_ADDR_LT_4GB 0x0fffffffcULL @@ -191,6 +196,8 @@ static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val); static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val); static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val); +static int brcm_pcie_linkup(struct brcm_pcie *pcie); +static int brcm_pcie_add_bus(struct pci_bus *bus); enum { RGR1_SW_INIT_1, @@ -205,6 +212,8 @@ enum { enum pcie_type { GENERIC, + BCM7425, + BCM7435, BCM4908, BCM7278, BCM2711, @@ -223,6 +232,12 @@ static const int pcie_offsets[] = { [EXT_CFG_DATA] = 0x9004, }; +static const int pcie_offsets_bmips_7425[] = { + [RGR1_SW_INIT_1] = 0x8010, + [EXT_CFG_INDEX] = 0x8300, + [EXT_CFG_DATA] = 0x8304, +}; + static const struct pcie_cfg_data generic_cfg = { .offsets = pcie_offsets, .type = GENERIC, @@ -230,6 +245,20 @@ static const struct pcie_cfg_data generic_cfg = { .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, }; +static const struct pcie_cfg_data bcm7425_cfg = { + .offsets = pcie_offsets_bmips_7425, + .type = BCM7425, + .perst_set = brcm_pcie_perst_set_generic, + .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; + +static const struct pcie_cfg_data bcm7435_cfg = { + .offsets = pcie_offsets, + .type = BCM7435, + .perst_set = brcm_pcie_perst_set_generic, + .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; + static const struct pcie_cfg_data bcm4908_cfg = { .offsets = pcie_offsets, .type = BCM4908, @@ -257,6 +286,14 @@ static const struct pcie_cfg_data bcm2711_cfg = { .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, }; +struct subdev_regulators { + unsigned int num_supplies; + struct regulator_bulk_data supplies[]; +}; + +static int pci_subdev_regulators_add_bus(struct pci_bus *bus); +static void pci_subdev_regulators_remove_bus(struct pci_bus *bus); + struct brcm_msi { struct device *dev; void __iomem *base; @@ -266,8 +303,7 @@ struct brcm_msi { struct mutex lock; /* guards the alloc/free operations */ u64 target_addr; int irq; - /* used indicates which MSI interrupts have been alloc'd */ - unsigned long used; + DECLARE_BITMAP(used, BRCM_INT_PCI_MSI_NR); bool legacy; /* Some chips have MSIs in bits [31..24] of a shared register. */ int legacy_shift; @@ -295,8 +331,16 @@ struct brcm_pcie { u32 hw_rev; void (*perst_set)(struct brcm_pcie *pcie, u32 val); void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); + bool refusal_mode; + struct subdev_regulators *sr; + bool ep_wakeup_capable; }; +static inline bool is_bmips(const struct brcm_pcie *pcie) +{ + return pcie->type == BCM7435 || pcie->type == BCM7425; +} + /* * This is to convert the size of the inbound "BAR" region to the * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE @@ -406,6 +450,99 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie) return ssc && pll ? 0 : -EIO; } +static void *alloc_subdev_regulators(struct device *dev) +{ + static const char * const supplies[] = { + "vpcie3v3", + "vpcie3v3aux", + "vpcie12v", + }; + const size_t size = sizeof(struct subdev_regulators) + + sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies); + struct subdev_regulators *sr; + int i; + + sr = devm_kzalloc(dev, size, GFP_KERNEL); + if (sr) { + sr->num_supplies = ARRAY_SIZE(supplies); + for (i = 0; i < ARRAY_SIZE(supplies); i++) + sr->supplies[i].supply = supplies[i]; + } + + return sr; +} + +static int pci_subdev_regulators_add_bus(struct pci_bus *bus) +{ + struct device *dev = &bus->dev; + struct subdev_regulators *sr; + int ret; + + if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent)) + return 0; + + if (dev->driver_data) + dev_err(dev, "dev.driver_data unexpectedly non-NULL\n"); + + sr = alloc_subdev_regulators(dev); + if (!sr) + return -ENOMEM; + + dev->driver_data = sr; + ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies); + if (ret) + return ret; + + ret = regulator_bulk_enable(sr->num_supplies, sr->supplies); + if (ret) { + dev_err(dev, "failed to enable regulators for downstream device\n"); + return ret; + } + + return 0; +} + +static int brcm_pcie_add_bus(struct pci_bus *bus) +{ + struct device *dev = &bus->dev; + struct brcm_pcie *pcie = (struct brcm_pcie *) bus->sysdata; + int ret; + + if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent)) + return 0; + + ret = pci_subdev_regulators_add_bus(bus); + if (ret) + return ret; + + /* Grab the regulators for suspend/resume */ + pcie->sr = bus->dev.driver_data; + + /* + * If we have failed linkup there is no point to return an error as + * currently it will cause a WARNING() from pci_alloc_child_bus(). + * We return 0 and turn on the "refusal_mode" so that any further + * accesses to the pci_dev just get 0xffffffff + */ + if (brcm_pcie_linkup(pcie) != 0) + pcie->refusal_mode = true; + + return 0; +} + +static void pci_subdev_regulators_remove_bus(struct pci_bus *bus) +{ + struct device *dev = &bus->dev; + struct subdev_regulators *sr = dev->driver_data; + + if (!sr || !bus->parent || !pci_is_root_bus(bus->parent)) + return; + + if (regulator_bulk_disable(sr->num_supplies, sr->supplies)) + dev_err(dev, "failed to disable regulators for downstream device\n"); + dev->driver_data = NULL; +} + /* Limits operation to a specific generation (1, 2, or 3) */ static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen) { @@ -443,6 +580,9 @@ static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie, PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK); writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win)); + if (is_bmips(pcie)) + return; + /* Write the cpu & limit addr upper bits */ high_addr_shift = HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK); @@ -534,7 +674,7 @@ static int brcm_msi_alloc(struct brcm_msi *msi) int hwirq; mutex_lock(&msi->lock); - hwirq = bitmap_find_free_region(&msi->used, msi->nr, 0); + hwirq = bitmap_find_free_region(msi->used, msi->nr, 0); mutex_unlock(&msi->lock); return hwirq; @@ -543,7 +683,7 @@ static int brcm_msi_alloc(struct brcm_msi *msi) static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq) { mutex_lock(&msi->lock); - bitmap_release_region(&msi->used, hwirq, 0); + bitmap_release_region(msi->used, hwirq, 0); mutex_unlock(&msi->lock); } @@ -619,7 +759,8 @@ static void brcm_msi_remove(struct brcm_pcie *pcie) static void brcm_msi_set_regs(struct brcm_msi *msi) { - u32 val = __GENMASK(31, msi->legacy_shift); + u32 val = msi->legacy ? BRCM_INT_PCI_MSI_LEGACY_MASK : + BRCM_INT_PCI_MSI_MASK; writel(val, msi->intr_base + MSI_INT_MASK_CLR); writel(val, msi->intr_base + MSI_INT_CLR); @@ -661,6 +802,12 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie) msi->irq = irq; msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33; + /* + * Sanity check to make sure that the 'used' bitmap in struct brcm_msi + * is large enough. + */ + BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR); + if (msi->legacy) { msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE; msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR; @@ -711,6 +858,18 @@ static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn, /* Accesses to the RC go right to the RC registers if slot==0 */ if (pci_is_root_bus(bus)) return PCI_SLOT(devfn) ? NULL : base + where; + if (pcie->refusal_mode) { + /* + * At this point we do not have link. There will be a CPU + * abort -- a quirk with this controller --if Linux tries + * to read any config-space registers besides those + * targeting the host bridge. To prevent this we hijack + * the address to point to a safe access that will return + * 0xffffffff. + */ + writel(0xffffffff, base + PCIE_MISC_RC_BAR2_CONFIG_HI); + return base + PCIE_MISC_RC_BAR2_CONFIG_HI + (where & 0x3); + } /* For devices, write to the config space index register */ idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0); @@ -718,10 +877,35 @@ static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn, return base + PCIE_EXT_CFG_DATA + where; } +static void __iomem *brcm_pcie_map_conf32(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct brcm_pcie *pcie = bus->sysdata; + void __iomem *base = pcie->base; + int idx; + + /* Accesses to the RC go right to the RC registers if slot==0 */ + if (pci_is_root_bus(bus)) + return PCI_SLOT(devfn) ? NULL : base + (where & ~0x3); + + /* For devices, write to the config space index register */ + idx = PCIE_ECAM_OFFSET(bus->number, devfn, (where & ~3)); + writel(idx, base + IDX_ADDR(pcie)); + return base + DATA_ADDR(pcie); +} + static struct pci_ops brcm_pcie_ops = { .map_bus = brcm_pcie_map_conf, .read = pci_generic_config_read, .write = pci_generic_config_write, + .add_bus = brcm_pcie_add_bus, + .remove_bus = pci_subdev_regulators_remove_bus, +}; + +static struct pci_ops brcm_pcie_ops32 = { + .map_bus = brcm_pcie_map_conf32, + .read = pci_generic_config_read32, + .write = pci_generic_config_write32, }; static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val) @@ -863,16 +1047,9 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie, static int brcm_pcie_setup(struct brcm_pcie *pcie) { - struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); u64 rc_bar2_offset, rc_bar2_size; void __iomem *base = pcie->base; - struct device *dev = pcie->dev; - struct resource_entry *entry; - bool ssc_good = false; - struct resource *res; - int num_out_wins = 0; - u16 nlw, cls, lnksta; - int i, ret, memc; + int ret, memc; u32 tmp, burst, aspm_support; /* Reset the bridge */ @@ -883,7 +1060,10 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) pcie->bridge_sw_init_set(pcie, 0); tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); - tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK; + if (is_bmips(pcie)) + tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK; + else + tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK; writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); /* Wait for SerDes to be stable */ usleep_range(100, 200); @@ -893,8 +1073,10 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it * is encoded as 0=Rsvd, 1=128, 2=256, 3=512. */ - if (pcie->type == BCM2711) - burst = 0x0; /* 128B */ + if (is_bmips(pcie)) + burst = 0x1; /* 256 bytes */ + else if (pcie->type == BCM2711) + burst = 0x0; /* 128 bytes */ else if (pcie->type == BCM7278) burst = 0x3; /* 512 bytes */ else @@ -957,6 +1139,40 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) if (pcie->gen) brcm_pcie_set_gen(pcie, pcie->gen); + /* Don't advertise L0s capability if 'aspm-no-l0s' */ + aspm_support = PCIE_LINK_STATE_L1; + if (!of_property_read_bool(pcie->np, "aspm-no-l0s")) + aspm_support |= PCIE_LINK_STATE_L0S; + tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); + u32p_replace_bits(&tmp, aspm_support, + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK); + writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); + + /* + * For config space accesses on the RC, show the right class for + * a PCIe-PCIe bridge (the default setting is to be EP mode). + */ + tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3); + u32p_replace_bits(&tmp, 0x060400, + PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK); + writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3); + + return 0; +} + +static int brcm_pcie_linkup(struct brcm_pcie *pcie) +{ + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); + struct device *dev = pcie->dev; + void __iomem *base = pcie->base; + struct resource_entry *entry; + struct resource *res; + int num_out_wins = 0; + u16 nlw, cls, lnksta; + bool ssc_good = false; + u32 tmp; + int ret, i; + /* Unassert the fundamental reset */ pcie->perst_set(pcie, 0); @@ -988,30 +1204,25 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) return -EINVAL; } + if (is_bmips(pcie)) { + u64 start = res->start; + unsigned int j, nwins = resource_size(res) / SZ_128M; + + /* bmips PCIe outbound windows have a 128MB max size */ + if (nwins > BRCM_NUM_PCIE_OUT_WINS) + nwins = BRCM_NUM_PCIE_OUT_WINS; + for (j = 0; j < nwins; j++, start += SZ_128M) + brcm_pcie_set_outbound_win(pcie, j, start, + start - entry->offset, + SZ_128M); + break; + } brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start, res->start - entry->offset, resource_size(res)); num_out_wins++; } - /* Don't advertise L0s capability if 'aspm-no-l0s' */ - aspm_support = PCIE_LINK_STATE_L1; - if (!of_property_read_bool(pcie->np, "aspm-no-l0s")) - aspm_support |= PCIE_LINK_STATE_L0S; - tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); - u32p_replace_bits(&tmp, aspm_support, - PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK); - writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); - - /* - * For config space accesses on the RC, show the right class for - * a PCIe-PCIe bridge (the default setting is to be EP mode). - */ - tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3); - u32p_replace_bits(&tmp, 0x060400, - PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK); - writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3); - if (pcie->ssc) { ret = brcm_pcie_set_ssc(pcie); if (ret == 0) @@ -1140,17 +1351,60 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie) pcie->bridge_sw_init_set(pcie, 1); } +static int pci_dev_may_wakeup(struct pci_dev *dev, void *data) +{ + bool *ret = data; + + if (device_may_wakeup(&dev->dev)) { + *ret = true; + dev_info(&dev->dev, "disable cancelled for wake-up device\n"); + } + return (int) *ret; +} + static int brcm_pcie_suspend(struct device *dev) { struct brcm_pcie *pcie = dev_get_drvdata(dev); + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); int ret; brcm_pcie_turn_off(pcie); - ret = brcm_phy_stop(pcie); - reset_control_rearm(pcie->rescal); + /* + * If brcm_phy_stop() returns an error, just dev_err(). If we + * return the error it will cause the suspend to fail and this is a + * forgivable offense that will probably be erased on resume. + */ + if (brcm_phy_stop(pcie)) + dev_err(dev, "Could not stop phy for suspend\n"); + + ret = reset_control_rearm(pcie->rescal); + if (ret) { + dev_err(dev, "Could not rearm rescal reset\n"); + return ret; + } + + if (pcie->sr) { + /* + * Now turn off the regulators, but if at least one + * downstream device is enabled as a wake-up source, do not + * turn off regulators. + */ + pcie->ep_wakeup_capable = false; + pci_walk_bus(bridge->bus, pci_dev_may_wakeup, + &pcie->ep_wakeup_capable); + if (!pcie->ep_wakeup_capable) { + ret = regulator_bulk_disable(pcie->sr->num_supplies, + pcie->sr->supplies); + if (ret) { + dev_err(dev, "Could not turn off regulators\n"); + reset_control_reset(pcie->rescal); + return ret; + } + } + } clk_disable_unprepare(pcie->clk); - return ret; + return 0; } static int brcm_pcie_resume(struct device *dev) @@ -1161,11 +1415,32 @@ static int brcm_pcie_resume(struct device *dev) int ret; base = pcie->base; - clk_prepare_enable(pcie->clk); + ret = clk_prepare_enable(pcie->clk); + if (ret) + return ret; + + if (pcie->sr) { + if (pcie->ep_wakeup_capable) { + /* + * We are resuming from a suspend. In the suspend we + * did not disable the power supplies, so there is + * no need to enable them (and falsely increase their + * usage count). + */ + pcie->ep_wakeup_capable = false; + } else { + ret = regulator_bulk_enable(pcie->sr->num_supplies, + pcie->sr->supplies); + if (ret) { + dev_err(dev, "Could not turn on regulators\n"); + goto err_disable_clk; + } + } + } ret = reset_control_reset(pcie->rescal); if (ret) - goto err_disable_clk; + goto err_regulator; ret = brcm_phy_start(pcie); if (ret) @@ -1186,6 +1461,10 @@ static int brcm_pcie_resume(struct device *dev) if (ret) goto err_reset; + ret = brcm_pcie_linkup(pcie); + if (ret) + goto err_reset; + if (pcie->msi) brcm_msi_set_regs(pcie->msi); @@ -1193,6 +1472,9 @@ static int brcm_pcie_resume(struct device *dev) err_reset: reset_control_rearm(pcie->rescal); +err_regulator: + if (pcie->sr) + regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies); err_disable_clk: clk_disable_unprepare(pcie->clk); return ret; @@ -1202,8 +1484,10 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie) { brcm_msi_remove(pcie); brcm_pcie_turn_off(pcie); - brcm_phy_stop(pcie); - reset_control_rearm(pcie->rescal); + if (brcm_phy_stop(pcie)) + dev_err(pcie->dev, "Could not stop phy\n"); + if (reset_control_rearm(pcie->rescal)) + dev_err(pcie->dev, "Could not rearm rescal reset\n"); clk_disable_unprepare(pcie->clk); } @@ -1226,6 +1510,8 @@ static const struct of_device_id brcm_pcie_match[] = { { .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg }, { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg }, { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg }, + { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg }, + { .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg }, {}, }; @@ -1315,12 +1601,22 @@ static int brcm_pcie_probe(struct platform_device *pdev) } } - bridge->ops = &brcm_pcie_ops; + bridge->ops = pcie->type == BCM7425 ? &brcm_pcie_ops32 : &brcm_pcie_ops; bridge->sysdata = pcie; platform_set_drvdata(pdev, pcie); - return pci_host_probe(bridge); + ret = pci_host_probe(bridge); + if (!ret && !brcm_pcie_link_up(pcie)) + ret = -ENODEV; + + if (ret) { + brcm_pcie_remove(pdev); + return ret; + } + + return 0; + fail: __brcm_pcie_remove(pcie); return ret; @@ -1329,8 +1625,8 @@ fail: MODULE_DEVICE_TABLE(of, brcm_pcie_match); static const struct dev_pm_ops brcm_pcie_pm_ops = { - .suspend = brcm_pcie_suspend, - .resume = brcm_pcie_resume, + .suspend_noirq = brcm_pcie_suspend, + .resume_noirq = brcm_pcie_resume, }; static struct platform_driver brcm_pcie_driver = { diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c index f918c713afb0..54b6e6d5bc64 100644 --- a/drivers/pci/controller/pcie-iproc-bcma.c +++ b/drivers/pci/controller/pcie-iproc-bcma.c @@ -23,7 +23,7 @@ static void bcma_pcie2_fixup_class(struct pci_dev *dev) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); -static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int iproc_bcma_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct iproc_pcie *pcie = dev->sysdata; struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev); @@ -31,7 +31,7 @@ static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) return bcma_core_irq(bdev, 5); } -static int iproc_pcie_bcma_probe(struct bcma_device *bdev) +static int iproc_bcma_pcie_probe(struct bcma_device *bdev) { struct device *dev = &bdev->dev; struct iproc_pcie *pcie; @@ -64,33 +64,33 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev) if (ret) return ret; - pcie->map_irq = iproc_pcie_bcma_map_irq; + pcie->map_irq = iproc_bcma_pcie_map_irq; bcma_set_drvdata(bdev, pcie); return iproc_pcie_setup(pcie, &bridge->windows); } -static void iproc_pcie_bcma_remove(struct bcma_device *bdev) +static void iproc_bcma_pcie_remove(struct bcma_device *bdev) { struct iproc_pcie *pcie = bcma_get_drvdata(bdev); iproc_pcie_remove(pcie); } -static const struct bcma_device_id iproc_pcie_bcma_table[] = { +static const struct bcma_device_id iproc_bcma_pcie_table[] = { BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS), {}, }; -MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table); +MODULE_DEVICE_TABLE(bcma, iproc_bcma_pcie_table); -static struct bcma_driver iproc_pcie_bcma_driver = { +static struct bcma_driver iproc_bcma_pcie_driver = { .name = KBUILD_MODNAME, - .id_table = iproc_pcie_bcma_table, - .probe = iproc_pcie_bcma_probe, - .remove = iproc_pcie_bcma_remove, + .id_table = iproc_bcma_pcie_table, + .probe = iproc_bcma_pcie_probe, + .remove = iproc_bcma_pcie_remove, }; -module_bcma_driver(iproc_pcie_bcma_driver); +module_bcma_driver(iproc_bcma_pcie_driver); MODULE_AUTHOR("Hauke Mehrtens"); MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver"); diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index b93e7bda101b..538115246c79 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -37,7 +37,7 @@ static const struct of_device_id iproc_pcie_of_match_table[] = { }; MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table); -static int iproc_pcie_pltfm_probe(struct platform_device *pdev) +static int iproc_pltfm_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct iproc_pcie *pcie; @@ -115,30 +115,30 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) return 0; } -static int iproc_pcie_pltfm_remove(struct platform_device *pdev) +static int iproc_pltfm_pcie_remove(struct platform_device *pdev) { struct iproc_pcie *pcie = platform_get_drvdata(pdev); return iproc_pcie_remove(pcie); } -static void iproc_pcie_pltfm_shutdown(struct platform_device *pdev) +static void iproc_pltfm_pcie_shutdown(struct platform_device *pdev) { struct iproc_pcie *pcie = platform_get_drvdata(pdev); iproc_pcie_shutdown(pcie); } -static struct platform_driver iproc_pcie_pltfm_driver = { +static struct platform_driver iproc_pltfm_pcie_driver = { .driver = { .name = "iproc-pcie", .of_match_table = of_match_ptr(iproc_pcie_of_match_table), }, - .probe = iproc_pcie_pltfm_probe, - .remove = iproc_pcie_pltfm_remove, - .shutdown = iproc_pcie_pltfm_shutdown, + .probe = iproc_pltfm_pcie_probe, + .remove = iproc_pltfm_pcie_remove, + .shutdown = iproc_pltfm_pcie_shutdown, }; -module_platform_driver(iproc_pcie_pltfm_driver); +module_platform_driver(iproc_pltfm_pcie_driver); MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>"); MODULE_DESCRIPTION("Broadcom iPROC PCIe platform driver"); diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index 36b9d2c46cfa..b3e75bc61ff1 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -659,10 +659,8 @@ static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie, void __iomem *addr; addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); - if (!addr) { - *val = ~0; + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - } *val = readl(addr); diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index 17c59b0d6978..7705d61fba4c 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -79,6 +79,9 @@ #define PCIE_ICMD_PM_REG 0x198 #define PCIE_TURN_OFF_LINK BIT(4) +#define PCIE_MISC_CTRL_REG 0x348 +#define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1) + #define PCIE_TRANS_TABLE_BASE_REG 0x800 #define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4 #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8 @@ -110,7 +113,7 @@ struct mtk_msi_set { }; /** - * struct mtk_pcie_port - PCIe port information + * struct mtk_gen3_pcie - PCIe port information * @dev: pointer to PCIe device * @base: IO mapped register base * @reg_base: physical register base @@ -129,7 +132,7 @@ struct mtk_msi_set { * @lock: lock protecting IRQ bit map * @msi_irq_in_use: bit map for assigned MSI IRQ */ -struct mtk_pcie_port { +struct mtk_gen3_pcie { struct device *dev; void __iomem *base; phys_addr_t reg_base; @@ -162,7 +165,7 @@ struct mtk_pcie_port { static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn, int where, int size) { - struct mtk_pcie_port *port = bus->sysdata; + struct mtk_gen3_pcie *pcie = bus->sysdata; int bytes; u32 val; @@ -171,15 +174,15 @@ static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn, val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) | PCIE_CFG_HEADER(bus->number, devfn); - writel_relaxed(val, port->base + PCIE_CFGNUM_REG); + writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG); } static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { - struct mtk_pcie_port *port = bus->sysdata; + struct mtk_gen3_pcie *pcie = bus->sysdata; - return port->base + PCIE_CFG_OFFSET_ADDR + where; + return pcie->base + PCIE_CFG_OFFSET_ADDR + where; } static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, @@ -207,7 +210,7 @@ static struct pci_ops mtk_pcie_ops = { .write = mtk_pcie_config_write, }; -static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port, +static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie, resource_size_t cpu_addr, resource_size_t pci_addr, resource_size_t size, @@ -217,12 +220,12 @@ static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port, u32 val; if (num >= PCIE_MAX_TRANS_TABLES) { - dev_err(port->dev, "not enough translate table for addr: %#llx, limited to [%d]\n", + dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n", (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES); return -ENODEV; } - table = port->base + PCIE_TRANS_TABLE_BASE_REG + + table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + num * PCIE_ATR_TLB_SET_OFFSET; writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1), @@ -244,66 +247,71 @@ static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port, return 0; } -static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) +static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie) { int i; u32 val; for (i = 0; i < PCIE_MSI_SET_NUM; i++) { - struct mtk_msi_set *msi_set = &port->msi_sets[i]; + struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; - msi_set->base = port->base + PCIE_MSI_SET_BASE_REG + + msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG + i * PCIE_MSI_SET_OFFSET; - msi_set->msg_addr = port->reg_base + PCIE_MSI_SET_BASE_REG + + msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG + i * PCIE_MSI_SET_OFFSET; /* Configure the MSI capture address */ writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base); writel_relaxed(upper_32_bits(msi_set->msg_addr), - port->base + PCIE_MSI_SET_ADDR_HI_BASE + + pcie->base + PCIE_MSI_SET_ADDR_HI_BASE + i * PCIE_MSI_SET_ADDR_HI_OFFSET); } - val = readl_relaxed(port->base + PCIE_MSI_SET_ENABLE_REG); + val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG); val |= PCIE_MSI_SET_ENABLE; - writel_relaxed(val, port->base + PCIE_MSI_SET_ENABLE_REG); + writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG); - val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG); + val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); val |= PCIE_MSI_ENABLE; - writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG); + writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); } -static int mtk_pcie_startup_port(struct mtk_pcie_port *port) +static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) { struct resource_entry *entry; - struct pci_host_bridge *host = pci_host_bridge_from_priv(port); + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); unsigned int table_index = 0; int err; u32 val; /* Set as RC mode */ - val = readl_relaxed(port->base + PCIE_SETTING_REG); + val = readl_relaxed(pcie->base + PCIE_SETTING_REG); val |= PCIE_RC_MODE; - writel_relaxed(val, port->base + PCIE_SETTING_REG); + writel_relaxed(val, pcie->base + PCIE_SETTING_REG); /* Set class code */ - val = readl_relaxed(port->base + PCIE_PCI_IDS_1); + val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1); val &= ~GENMASK(31, 8); val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8); - writel_relaxed(val, port->base + PCIE_PCI_IDS_1); + writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1); /* Mask all INTx interrupts */ - val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG); + val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); val &= ~PCIE_INTX_ENABLE; - writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG); + writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); + + /* Disable DVFSRC voltage request */ + val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG); + val |= PCIE_DISABLE_DVFSRC_VLT_REQ; + writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG); /* Assert all reset signals */ - val = readl_relaxed(port->base + PCIE_RST_CTRL_REG); + val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB; - writel_relaxed(val, port->base + PCIE_RST_CTRL_REG); + writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); /* - * Described in PCIe CEM specification setctions 2.2 (PERST# Signal) + * Described in PCIe CEM specification sections 2.2 (PERST# Signal) * and 2.2.1 (Initial Power-Up (G3 to S0)). * The deassertion of PERST# should be delayed 100ms (TPVPERL) * for the power and clock to become stable. @@ -312,19 +320,19 @@ static int mtk_pcie_startup_port(struct mtk_pcie_port *port) /* De-assert reset signals */ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB); - writel_relaxed(val, port->base + PCIE_RST_CTRL_REG); + writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); /* Check if the link is up or not */ - err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val, + err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val, !!(val & PCIE_PORT_LINKUP), 20, PCI_PM_D3COLD_WAIT * USEC_PER_MSEC); if (err) { - val = readl_relaxed(port->base + PCIE_LTSSM_STATUS_REG); - dev_err(port->dev, "PCIe link down, ltssm reg val: %#x\n", val); + val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG); + dev_err(pcie->dev, "PCIe link down, ltssm reg val: %#x\n", val); return err; } - mtk_pcie_enable_msi(port); + mtk_pcie_enable_msi(pcie); /* Set PCIe translation windows */ resource_list_for_each_entry(entry, &host->windows) { @@ -347,12 +355,12 @@ static int mtk_pcie_startup_port(struct mtk_pcie_port *port) pci_addr = res->start - entry->offset; size = resource_size(res); - err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size, + err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size, type, table_index); if (err) return err; - dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", + dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", range_type, table_index, (unsigned long long)cpu_addr, (unsigned long long)pci_addr, (unsigned long long)size); @@ -396,7 +404,7 @@ static struct msi_domain_info mtk_msi_domain_info = { static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data); - struct mtk_pcie_port *port = data->domain->host_data; + struct mtk_gen3_pcie *pcie = data->domain->host_data; unsigned long hwirq; hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; @@ -404,7 +412,7 @@ static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) msg->address_hi = upper_32_bits(msi_set->msg_addr); msg->address_lo = lower_32_bits(msi_set->msg_addr); msg->data = hwirq; - dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n", + dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n", hwirq, msg->address_hi, msg->address_lo, msg->data); } @@ -421,33 +429,33 @@ static void mtk_msi_bottom_irq_ack(struct irq_data *data) static void mtk_msi_bottom_irq_mask(struct irq_data *data) { struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data); - struct mtk_pcie_port *port = data->domain->host_data; + struct mtk_gen3_pcie *pcie = data->domain->host_data; unsigned long hwirq, flags; u32 val; hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; - raw_spin_lock_irqsave(&port->irq_lock, flags); + raw_spin_lock_irqsave(&pcie->irq_lock, flags); val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); val &= ~BIT(hwirq); writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); - raw_spin_unlock_irqrestore(&port->irq_lock, flags); + raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); } static void mtk_msi_bottom_irq_unmask(struct irq_data *data) { struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data); - struct mtk_pcie_port *port = data->domain->host_data; + struct mtk_gen3_pcie *pcie = data->domain->host_data; unsigned long hwirq, flags; u32 val; hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; - raw_spin_lock_irqsave(&port->irq_lock, flags); + raw_spin_lock_irqsave(&pcie->irq_lock, flags); val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); val |= BIT(hwirq); writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); - raw_spin_unlock_irqrestore(&port->irq_lock, flags); + raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); } static struct irq_chip mtk_msi_bottom_irq_chip = { @@ -463,22 +471,22 @@ static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { - struct mtk_pcie_port *port = domain->host_data; + struct mtk_gen3_pcie *pcie = domain->host_data; struct mtk_msi_set *msi_set; int i, hwirq, set_idx; - mutex_lock(&port->lock); + mutex_lock(&pcie->lock); - hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM, + hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM, order_base_2(nr_irqs)); - mutex_unlock(&port->lock); + mutex_unlock(&pcie->lock); if (hwirq < 0) return -ENOSPC; set_idx = hwirq / PCIE_MSI_IRQS_PER_SET; - msi_set = &port->msi_sets[set_idx]; + msi_set = &pcie->msi_sets[set_idx]; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, hwirq + i, @@ -491,15 +499,15 @@ static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain, static void mtk_msi_bottom_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { - struct mtk_pcie_port *port = domain->host_data; + struct mtk_gen3_pcie *pcie = domain->host_data; struct irq_data *data = irq_domain_get_irq_data(domain, virq); - mutex_lock(&port->lock); + mutex_lock(&pcie->lock); - bitmap_release_region(port->msi_irq_in_use, data->hwirq, + bitmap_release_region(pcie->msi_irq_in_use, data->hwirq, order_base_2(nr_irqs)); - mutex_unlock(&port->lock); + mutex_unlock(&pcie->lock); irq_domain_free_irqs_common(domain, virq, nr_irqs); } @@ -511,28 +519,28 @@ static const struct irq_domain_ops mtk_msi_bottom_domain_ops = { static void mtk_intx_mask(struct irq_data *data) { - struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); + struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); unsigned long flags; u32 val; - raw_spin_lock_irqsave(&port->irq_lock, flags); - val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG); + raw_spin_lock_irqsave(&pcie->irq_lock, flags); + val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT); - writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG); - raw_spin_unlock_irqrestore(&port->irq_lock, flags); + writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); + raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); } static void mtk_intx_unmask(struct irq_data *data) { - struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); + struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); unsigned long flags; u32 val; - raw_spin_lock_irqsave(&port->irq_lock, flags); - val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG); + raw_spin_lock_irqsave(&pcie->irq_lock, flags); + val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); val |= BIT(data->hwirq + PCIE_INTX_SHIFT); - writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG); - raw_spin_unlock_irqrestore(&port->irq_lock, flags); + writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); + raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); } /** @@ -545,11 +553,11 @@ static void mtk_intx_unmask(struct irq_data *data) */ static void mtk_intx_eoi(struct irq_data *data) { - struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); + struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); unsigned long hwirq; hwirq = data->hwirq + PCIE_INTX_SHIFT; - writel_relaxed(BIT(hwirq), port->base + PCIE_INT_STATUS_REG); + writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG); } static struct irq_chip mtk_intx_irq_chip = { @@ -573,13 +581,13 @@ static const struct irq_domain_ops intx_domain_ops = { .map = mtk_pcie_intx_map, }; -static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port) +static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) { - struct device *dev = port->dev; + struct device *dev = pcie->dev; struct device_node *intc_node, *node = dev->of_node; int ret; - raw_spin_lock_init(&port->irq_lock); + raw_spin_lock_init(&pcie->irq_lock); /* Setup INTx */ intc_node = of_get_child_by_name(node, "interrupt-controller"); @@ -588,28 +596,28 @@ static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port) return -ENODEV; } - port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX, - &intx_domain_ops, port); - if (!port->intx_domain) { + pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX, + &intx_domain_ops, pcie); + if (!pcie->intx_domain) { dev_err(dev, "failed to create INTx IRQ domain\n"); return -ENODEV; } /* Setup MSI */ - mutex_init(&port->lock); + mutex_init(&pcie->lock); - port->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM, - &mtk_msi_bottom_domain_ops, port); - if (!port->msi_bottom_domain) { + pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM, + &mtk_msi_bottom_domain_ops, pcie); + if (!pcie->msi_bottom_domain) { dev_err(dev, "failed to create MSI bottom domain\n"); ret = -ENODEV; goto err_msi_bottom_domain; } - port->msi_domain = pci_msi_create_irq_domain(dev->fwnode, + pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode, &mtk_msi_domain_info, - port->msi_bottom_domain); - if (!port->msi_domain) { + pcie->msi_bottom_domain); + if (!pcie->msi_domain) { dev_err(dev, "failed to create MSI domain\n"); ret = -ENODEV; goto err_msi_domain; @@ -618,32 +626,32 @@ static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port) return 0; err_msi_domain: - irq_domain_remove(port->msi_bottom_domain); + irq_domain_remove(pcie->msi_bottom_domain); err_msi_bottom_domain: - irq_domain_remove(port->intx_domain); + irq_domain_remove(pcie->intx_domain); return ret; } -static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port) +static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie) { - irq_set_chained_handler_and_data(port->irq, NULL, NULL); + irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); - if (port->intx_domain) - irq_domain_remove(port->intx_domain); + if (pcie->intx_domain) + irq_domain_remove(pcie->intx_domain); - if (port->msi_domain) - irq_domain_remove(port->msi_domain); + if (pcie->msi_domain) + irq_domain_remove(pcie->msi_domain); - if (port->msi_bottom_domain) - irq_domain_remove(port->msi_bottom_domain); + if (pcie->msi_bottom_domain) + irq_domain_remove(pcie->msi_bottom_domain); - irq_dispose_mapping(port->irq); + irq_dispose_mapping(pcie->irq); } -static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx) +static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx) { - struct mtk_msi_set *msi_set = &port->msi_sets[set_idx]; + struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx]; unsigned long msi_enable, msi_status; irq_hw_number_t bit, hwirq; @@ -658,59 +666,59 @@ static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx) for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) { hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET; - generic_handle_domain_irq(port->msi_bottom_domain, hwirq); + generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq); } } while (true); } static void mtk_pcie_irq_handler(struct irq_desc *desc) { - struct mtk_pcie_port *port = irq_desc_get_handler_data(desc); + struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc); struct irq_chip *irqchip = irq_desc_get_chip(desc); unsigned long status; irq_hw_number_t irq_bit = PCIE_INTX_SHIFT; chained_irq_enter(irqchip, desc); - status = readl_relaxed(port->base + PCIE_INT_STATUS_REG); + status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG); for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX + PCIE_INTX_SHIFT) - generic_handle_domain_irq(port->intx_domain, + generic_handle_domain_irq(pcie->intx_domain, irq_bit - PCIE_INTX_SHIFT); irq_bit = PCIE_MSI_SHIFT; for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM + PCIE_MSI_SHIFT) { - mtk_pcie_msi_handler(port, irq_bit - PCIE_MSI_SHIFT); + mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT); - writel_relaxed(BIT(irq_bit), port->base + PCIE_INT_STATUS_REG); + writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG); } chained_irq_exit(irqchip, desc); } -static int mtk_pcie_setup_irq(struct mtk_pcie_port *port) +static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie) { - struct device *dev = port->dev; + struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); int err; - err = mtk_pcie_init_irq_domains(port); + err = mtk_pcie_init_irq_domains(pcie); if (err) return err; - port->irq = platform_get_irq(pdev, 0); - if (port->irq < 0) - return port->irq; + pcie->irq = platform_get_irq(pdev, 0); + if (pcie->irq < 0) + return pcie->irq; - irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port); + irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie); return 0; } -static int mtk_pcie_parse_port(struct mtk_pcie_port *port) +static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) { - struct device *dev = port->dev; + struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); struct resource *regs; int ret; @@ -718,77 +726,77 @@ static int mtk_pcie_parse_port(struct mtk_pcie_port *port) regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac"); if (!regs) return -EINVAL; - port->base = devm_ioremap_resource(dev, regs); - if (IS_ERR(port->base)) { + pcie->base = devm_ioremap_resource(dev, regs); + if (IS_ERR(pcie->base)) { dev_err(dev, "failed to map register base\n"); - return PTR_ERR(port->base); + return PTR_ERR(pcie->base); } - port->reg_base = regs->start; + pcie->reg_base = regs->start; - port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy"); - if (IS_ERR(port->phy_reset)) { - ret = PTR_ERR(port->phy_reset); + pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy"); + if (IS_ERR(pcie->phy_reset)) { + ret = PTR_ERR(pcie->phy_reset); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to get PHY reset\n"); return ret; } - port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac"); - if (IS_ERR(port->mac_reset)) { - ret = PTR_ERR(port->mac_reset); + pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac"); + if (IS_ERR(pcie->mac_reset)) { + ret = PTR_ERR(pcie->mac_reset); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to get MAC reset\n"); return ret; } - port->phy = devm_phy_optional_get(dev, "pcie-phy"); - if (IS_ERR(port->phy)) { - ret = PTR_ERR(port->phy); + pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); + if (IS_ERR(pcie->phy)) { + ret = PTR_ERR(pcie->phy); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to get PHY\n"); return ret; } - port->num_clks = devm_clk_bulk_get_all(dev, &port->clks); - if (port->num_clks < 0) { + pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks); + if (pcie->num_clks < 0) { dev_err(dev, "failed to get clocks\n"); - return port->num_clks; + return pcie->num_clks; } return 0; } -static int mtk_pcie_power_up(struct mtk_pcie_port *port) +static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie) { - struct device *dev = port->dev; + struct device *dev = pcie->dev; int err; /* PHY power on and enable pipe clock */ - reset_control_deassert(port->phy_reset); + reset_control_deassert(pcie->phy_reset); - err = phy_init(port->phy); + err = phy_init(pcie->phy); if (err) { dev_err(dev, "failed to initialize PHY\n"); goto err_phy_init; } - err = phy_power_on(port->phy); + err = phy_power_on(pcie->phy); if (err) { dev_err(dev, "failed to power on PHY\n"); goto err_phy_on; } /* MAC power on and enable transaction layer clocks */ - reset_control_deassert(port->mac_reset); + reset_control_deassert(pcie->mac_reset); pm_runtime_enable(dev); pm_runtime_get_sync(dev); - err = clk_bulk_prepare_enable(port->num_clks, port->clks); + err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); if (err) { dev_err(dev, "failed to enable clocks\n"); goto err_clk_init; @@ -799,55 +807,55 @@ static int mtk_pcie_power_up(struct mtk_pcie_port *port) err_clk_init: pm_runtime_put_sync(dev); pm_runtime_disable(dev); - reset_control_assert(port->mac_reset); - phy_power_off(port->phy); + reset_control_assert(pcie->mac_reset); + phy_power_off(pcie->phy); err_phy_on: - phy_exit(port->phy); + phy_exit(pcie->phy); err_phy_init: - reset_control_assert(port->phy_reset); + reset_control_assert(pcie->phy_reset); return err; } -static void mtk_pcie_power_down(struct mtk_pcie_port *port) +static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie) { - clk_bulk_disable_unprepare(port->num_clks, port->clks); + clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks); - pm_runtime_put_sync(port->dev); - pm_runtime_disable(port->dev); - reset_control_assert(port->mac_reset); + pm_runtime_put_sync(pcie->dev); + pm_runtime_disable(pcie->dev); + reset_control_assert(pcie->mac_reset); - phy_power_off(port->phy); - phy_exit(port->phy); - reset_control_assert(port->phy_reset); + phy_power_off(pcie->phy); + phy_exit(pcie->phy); + reset_control_assert(pcie->phy_reset); } -static int mtk_pcie_setup(struct mtk_pcie_port *port) +static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie) { int err; - err = mtk_pcie_parse_port(port); + err = mtk_pcie_parse_port(pcie); if (err) return err; /* Don't touch the hardware registers before power up */ - err = mtk_pcie_power_up(port); + err = mtk_pcie_power_up(pcie); if (err) return err; /* Try link up */ - err = mtk_pcie_startup_port(port); + err = mtk_pcie_startup_port(pcie); if (err) goto err_setup; - err = mtk_pcie_setup_irq(port); + err = mtk_pcie_setup_irq(pcie); if (err) goto err_setup; return 0; err_setup: - mtk_pcie_power_down(port); + mtk_pcie_power_down(pcie); return err; } @@ -855,30 +863,30 @@ err_setup: static int mtk_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct mtk_pcie_port *port; + struct mtk_gen3_pcie *pcie; struct pci_host_bridge *host; int err; - host = devm_pci_alloc_host_bridge(dev, sizeof(*port)); + host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!host) return -ENOMEM; - port = pci_host_bridge_priv(host); + pcie = pci_host_bridge_priv(host); - port->dev = dev; - platform_set_drvdata(pdev, port); + pcie->dev = dev; + platform_set_drvdata(pdev, pcie); - err = mtk_pcie_setup(port); + err = mtk_pcie_setup(pcie); if (err) return err; host->ops = &mtk_pcie_ops; - host->sysdata = port; + host->sysdata = pcie; err = pci_host_probe(host); if (err) { - mtk_pcie_irq_teardown(port); - mtk_pcie_power_down(port); + mtk_pcie_irq_teardown(pcie); + mtk_pcie_power_down(pcie); return err; } @@ -887,66 +895,66 @@ static int mtk_pcie_probe(struct platform_device *pdev) static int mtk_pcie_remove(struct platform_device *pdev) { - struct mtk_pcie_port *port = platform_get_drvdata(pdev); - struct pci_host_bridge *host = pci_host_bridge_from_priv(port); + struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev); + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); pci_lock_rescan_remove(); pci_stop_root_bus(host->bus); pci_remove_root_bus(host->bus); pci_unlock_rescan_remove(); - mtk_pcie_irq_teardown(port); - mtk_pcie_power_down(port); + mtk_pcie_irq_teardown(pcie); + mtk_pcie_power_down(pcie); return 0; } -static void __maybe_unused mtk_pcie_irq_save(struct mtk_pcie_port *port) +static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie) { int i; - raw_spin_lock(&port->irq_lock); + raw_spin_lock(&pcie->irq_lock); - port->saved_irq_state = readl_relaxed(port->base + PCIE_INT_ENABLE_REG); + pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); for (i = 0; i < PCIE_MSI_SET_NUM; i++) { - struct mtk_msi_set *msi_set = &port->msi_sets[i]; + struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; msi_set->saved_irq_state = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); } - raw_spin_unlock(&port->irq_lock); + raw_spin_unlock(&pcie->irq_lock); } -static void __maybe_unused mtk_pcie_irq_restore(struct mtk_pcie_port *port) +static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie) { int i; - raw_spin_lock(&port->irq_lock); + raw_spin_lock(&pcie->irq_lock); - writel_relaxed(port->saved_irq_state, port->base + PCIE_INT_ENABLE_REG); + writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG); for (i = 0; i < PCIE_MSI_SET_NUM; i++) { - struct mtk_msi_set *msi_set = &port->msi_sets[i]; + struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; writel_relaxed(msi_set->saved_irq_state, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); } - raw_spin_unlock(&port->irq_lock); + raw_spin_unlock(&pcie->irq_lock); } -static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port) +static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie) { u32 val; - val = readl_relaxed(port->base + PCIE_ICMD_PM_REG); + val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG); val |= PCIE_TURN_OFF_LINK; - writel_relaxed(val, port->base + PCIE_ICMD_PM_REG); + writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG); /* Check the link is L2 */ - return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val, + return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val, (PCIE_LTSSM_STATE(val) == PCIE_LTSSM_STATE_L2_IDLE), 20, 50 * USEC_PER_MSEC); @@ -954,46 +962,46 @@ static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port) static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev) { - struct mtk_pcie_port *port = dev_get_drvdata(dev); + struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); int err; u32 val; /* Trigger link to L2 state */ - err = mtk_pcie_turn_off_link(port); + err = mtk_pcie_turn_off_link(pcie); if (err) { - dev_err(port->dev, "cannot enter L2 state\n"); + dev_err(pcie->dev, "cannot enter L2 state\n"); return err; } /* Pull down the PERST# pin */ - val = readl_relaxed(port->base + PCIE_RST_CTRL_REG); + val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); val |= PCIE_PE_RSTB; - writel_relaxed(val, port->base + PCIE_RST_CTRL_REG); + writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); - dev_dbg(port->dev, "entered L2 states successfully"); + dev_dbg(pcie->dev, "entered L2 states successfully"); - mtk_pcie_irq_save(port); - mtk_pcie_power_down(port); + mtk_pcie_irq_save(pcie); + mtk_pcie_power_down(pcie); return 0; } static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev) { - struct mtk_pcie_port *port = dev_get_drvdata(dev); + struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); int err; - err = mtk_pcie_power_up(port); + err = mtk_pcie_power_up(pcie); if (err) return err; - err = mtk_pcie_startup_port(port); + err = mtk_pcie_startup_port(pcie); if (err) { - mtk_pcie_power_down(port); + mtk_pcie_power_down(pcie); return err; } - mtk_pcie_irq_restore(port); + mtk_pcie_irq_restore(pcie); return 0; } diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 2f3f974977a3..ddfbd4aebdec 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -365,19 +365,12 @@ static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, { struct mtk_pcie_port *port; u32 bn = bus->number; - int ret; port = mtk_pcie_find_port(bus, devfn); - if (!port) { - *val = ~0; + if (!port) return PCIBIOS_DEVICE_NOT_FOUND; - } - - ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val); - if (ret) - *val = ~0; - return ret; + return mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val); } static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn, @@ -702,6 +695,13 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) */ writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); + /* + * Described in PCIe CEM specification sections 2.2 (PERST# Signal) and + * 2.2.1 (Initial Power-Up (G3 to S0)). The deassertion of PERST# should + * be delayed 100ms (TPVPERL) for the power and clock to become stable. + */ + msleep(100); + /* De-assert PHY, PE, PIPE, MAC and configuration reset */ val = readl(port->base + PCIE_RST_CTRL); val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c index 329f930d17aa..29d8e81e4181 100644 --- a/drivers/pci/controller/pcie-microchip-host.c +++ b/drivers/pci/controller/pcie-microchip-host.c @@ -262,7 +262,7 @@ struct mc_msi { DECLARE_BITMAP(used, MC_NUM_MSI_IRQS); }; -struct mc_port { +struct mc_pcie { void __iomem *axi_base_addr; struct device *dev; struct irq_domain *intx_domain; @@ -382,7 +382,7 @@ static struct { static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" }; -static void mc_pcie_enable_msi(struct mc_port *port, void __iomem *base) +static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *base) { struct mc_msi *msi = &port->msi; u32 cap_offset = MC_MSI_CAP_CTRL_OFFSET; @@ -405,7 +405,7 @@ static void mc_pcie_enable_msi(struct mc_port *port, void __iomem *base) static void mc_handle_msi(struct irq_desc *desc) { - struct mc_port *port = irq_desc_get_handler_data(desc); + struct mc_pcie *port = irq_desc_get_handler_data(desc); struct device *dev = port->dev; struct mc_msi *msi = &port->msi; void __iomem *bridge_base_addr = @@ -428,7 +428,7 @@ static void mc_handle_msi(struct irq_desc *desc) static void mc_msi_bottom_irq_ack(struct irq_data *data) { - struct mc_port *port = irq_data_get_irq_chip_data(data); + struct mc_pcie *port = irq_data_get_irq_chip_data(data); void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; u32 bitpos = data->hwirq; @@ -443,7 +443,7 @@ static void mc_msi_bottom_irq_ack(struct irq_data *data) static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { - struct mc_port *port = irq_data_get_irq_chip_data(data); + struct mc_pcie *port = irq_data_get_irq_chip_data(data); phys_addr_t addr = port->msi.vector_phy; msg->address_lo = lower_32_bits(addr); @@ -470,7 +470,7 @@ static struct irq_chip mc_msi_bottom_irq_chip = { static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { - struct mc_port *port = domain->host_data; + struct mc_pcie *port = domain->host_data; struct mc_msi *msi = &port->msi; void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; @@ -503,7 +503,7 @@ static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); - struct mc_port *port = irq_data_get_irq_chip_data(d); + struct mc_pcie *port = irq_data_get_irq_chip_data(d); struct mc_msi *msi = &port->msi; mutex_lock(&msi->lock); @@ -534,7 +534,7 @@ static struct msi_domain_info mc_msi_domain_info = { .chip = &mc_msi_irq_chip, }; -static int mc_allocate_msi_domains(struct mc_port *port) +static int mc_allocate_msi_domains(struct mc_pcie *port) { struct device *dev = port->dev; struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); @@ -562,7 +562,7 @@ static int mc_allocate_msi_domains(struct mc_port *port) static void mc_handle_intx(struct irq_desc *desc) { - struct mc_port *port = irq_desc_get_handler_data(desc); + struct mc_pcie *port = irq_desc_get_handler_data(desc); struct device *dev = port->dev; void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; @@ -585,7 +585,7 @@ static void mc_handle_intx(struct irq_desc *desc) static void mc_ack_intx_irq(struct irq_data *data) { - struct mc_port *port = irq_data_get_irq_chip_data(data); + struct mc_pcie *port = irq_data_get_irq_chip_data(data); void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); @@ -595,7 +595,7 @@ static void mc_ack_intx_irq(struct irq_data *data) static void mc_mask_intx_irq(struct irq_data *data) { - struct mc_port *port = irq_data_get_irq_chip_data(data); + struct mc_pcie *port = irq_data_get_irq_chip_data(data); void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; unsigned long flags; @@ -611,7 +611,7 @@ static void mc_mask_intx_irq(struct irq_data *data) static void mc_unmask_intx_irq(struct irq_data *data) { - struct mc_port *port = irq_data_get_irq_chip_data(data); + struct mc_pcie *port = irq_data_get_irq_chip_data(data); void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; unsigned long flags; @@ -698,7 +698,7 @@ static u32 local_events(void __iomem *addr) return val; } -static u32 get_events(struct mc_port *port) +static u32 get_events(struct mc_pcie *port) { void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; @@ -715,7 +715,7 @@ static u32 get_events(struct mc_port *port) static irqreturn_t mc_event_handler(int irq, void *dev_id) { - struct mc_port *port = dev_id; + struct mc_pcie *port = dev_id; struct device *dev = port->dev; struct irq_data *data; @@ -731,7 +731,7 @@ static irqreturn_t mc_event_handler(int irq, void *dev_id) static void mc_handle_event(struct irq_desc *desc) { - struct mc_port *port = irq_desc_get_handler_data(desc); + struct mc_pcie *port = irq_desc_get_handler_data(desc); unsigned long events; u32 bit; struct irq_chip *chip = irq_desc_get_chip(desc); @@ -748,7 +748,7 @@ static void mc_handle_event(struct irq_desc *desc) static void mc_ack_event_irq(struct irq_data *data) { - struct mc_port *port = irq_data_get_irq_chip_data(data); + struct mc_pcie *port = irq_data_get_irq_chip_data(data); u32 event = data->hwirq; void __iomem *addr; u32 mask; @@ -763,7 +763,7 @@ static void mc_ack_event_irq(struct irq_data *data) static void mc_mask_event_irq(struct irq_data *data) { - struct mc_port *port = irq_data_get_irq_chip_data(data); + struct mc_pcie *port = irq_data_get_irq_chip_data(data); u32 event = data->hwirq; void __iomem *addr; u32 mask; @@ -793,7 +793,7 @@ static void mc_mask_event_irq(struct irq_data *data) static void mc_unmask_event_irq(struct irq_data *data) { - struct mc_port *port = irq_data_get_irq_chip_data(data); + struct mc_pcie *port = irq_data_get_irq_chip_data(data); u32 event = data->hwirq; void __iomem *addr; u32 mask; @@ -881,7 +881,7 @@ static int mc_pcie_init_clks(struct device *dev) return 0; } -static int mc_pcie_init_irq_domains(struct mc_port *port) +static int mc_pcie_init_irq_domains(struct mc_pcie *port) { struct device *dev = port->dev; struct device_node *node = dev->of_node; @@ -957,7 +957,7 @@ static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index, } static int mc_pcie_setup_windows(struct platform_device *pdev, - struct mc_port *port) + struct mc_pcie *port) { void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; @@ -983,7 +983,7 @@ static int mc_platform_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; struct platform_device *pdev = to_platform_device(dev); - struct mc_port *port; + struct mc_pcie *port; void __iomem *bridge_base_addr; void __iomem *ctrl_base_addr; int ret; diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c index b60dfb45ef7b..3824862ea144 100644 --- a/drivers/pci/controller/pcie-mt7621.c +++ b/drivers/pci/controller/pcie-mt7621.c @@ -93,8 +93,8 @@ struct mt7621_pcie_port { * reset lines are inverted. */ struct mt7621_pcie { - void __iomem *base; struct device *dev; + void __iomem *base; struct list_head ports; bool resets_inverted; }; @@ -129,7 +129,7 @@ static inline void pcie_port_write(struct mt7621_pcie_port *port, writel_relaxed(val, port->base + reg); } -static inline u32 mt7621_pci_get_cfgaddr(unsigned int bus, unsigned int slot, +static inline u32 mt7621_pcie_get_cfgaddr(unsigned int bus, unsigned int slot, unsigned int func, unsigned int where) { return (((where & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) | @@ -140,7 +140,7 @@ static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct mt7621_pcie *pcie = bus->sysdata; - u32 address = mt7621_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), + u32 address = mt7621_pcie_get_cfgaddr(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where); writel_relaxed(address, pcie->base + RALINK_PCI_CONFIG_ADDR); @@ -148,7 +148,7 @@ static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus, return pcie->base + RALINK_PCI_CONFIG_DATA + (where & 3); } -struct pci_ops mt7621_pci_ops = { +static struct pci_ops mt7621_pcie_ops = { .map_bus = mt7621_pcie_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, @@ -156,7 +156,7 @@ struct pci_ops mt7621_pci_ops = { static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg) { - u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg); + u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg); pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR); return pcie_read(pcie, RALINK_PCI_CONFIG_DATA); @@ -165,7 +165,7 @@ static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg) static void write_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg, u32 val) { - u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg); + u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg); pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR); pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA); @@ -208,37 +208,6 @@ static inline void mt7621_control_deassert(struct mt7621_pcie_port *port) reset_control_assert(port->pcie_rst); } -static int setup_cm_memory_region(struct pci_host_bridge *host) -{ - struct mt7621_pcie *pcie = pci_host_bridge_priv(host); - struct device *dev = pcie->dev; - struct resource_entry *entry; - resource_size_t mask; - - entry = resource_list_first_type(&host->windows, IORESOURCE_MEM); - if (!entry) { - dev_err(dev, "cannot get memory resource\n"); - return -EINVAL; - } - - if (mips_cps_numiocu(0)) { - /* - * FIXME: hardware doesn't accept mask values with 1s after - * 0s (e.g. 0xffef), so it would be great to warn if that's - * about to happen - */ - mask = ~(entry->res->end - entry->res->start); - - write_gcr_reg1_base(entry->res->start); - write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0); - dev_info(dev, "PCI coherence region base: 0x%08llx, mask/settings: 0x%08llx\n", - (unsigned long long)read_gcr_reg1_base(), - (unsigned long long)read_gcr_reg1_mask()); - } - - return 0; -} - static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie, struct device_node *node, int slot) @@ -505,16 +474,16 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host) { struct mt7621_pcie *pcie = pci_host_bridge_priv(host); - host->ops = &mt7621_pci_ops; + host->ops = &mt7621_pcie_ops; host->sysdata = pcie; return pci_host_probe(host); } -static const struct soc_device_attribute mt7621_pci_quirks_match[] = { +static const struct soc_device_attribute mt7621_pcie_quirks_match[] = { { .soc_id = "mt7621", .revision = "E2" } }; -static int mt7621_pci_probe(struct platform_device *pdev) +static int mt7621_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct soc_device_attribute *attr; @@ -535,7 +504,7 @@ static int mt7621_pci_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pcie); INIT_LIST_HEAD(&pcie->ports); - attr = soc_device_match(mt7621_pci_quirks_match); + attr = soc_device_match(mt7621_pcie_quirks_match); if (attr) pcie->resets_inverted = true; @@ -557,12 +526,6 @@ static int mt7621_pci_probe(struct platform_device *pdev) goto remove_resets; } - err = setup_cm_memory_region(bridge); - if (err) { - dev_err(dev, "error setting up iocu mem regions\n"); - goto remove_resets; - } - return mt7621_pcie_register_host(bridge); remove_resets: @@ -572,7 +535,7 @@ remove_resets: return err; } -static int mt7621_pci_remove(struct platform_device *pdev) +static int mt7621_pcie_remove(struct platform_device *pdev) { struct mt7621_pcie *pcie = platform_get_drvdata(pdev); struct mt7621_pcie_port *port; @@ -583,18 +546,20 @@ static int mt7621_pci_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id mt7621_pci_ids[] = { +static const struct of_device_id mt7621_pcie_ids[] = { { .compatible = "mediatek,mt7621-pci" }, {}, }; -MODULE_DEVICE_TABLE(of, mt7621_pci_ids); +MODULE_DEVICE_TABLE(of, mt7621_pcie_ids); -static struct platform_driver mt7621_pci_driver = { - .probe = mt7621_pci_probe, - .remove = mt7621_pci_remove, +static struct platform_driver mt7621_pcie_driver = { + .probe = mt7621_pcie_probe, + .remove = mt7621_pcie_remove, .driver = { .name = "mt7621-pci", - .of_match_table = of_match_ptr(mt7621_pci_ids), + .of_match_table = of_match_ptr(mt7621_pcie_ids), }, }; -builtin_platform_driver(mt7621_pci_driver); +builtin_platform_driver(mt7621_pcie_driver); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index e12c2d8be05a..38b6e02edfa9 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -50,10 +50,10 @@ struct rcar_msi { */ static void __iomem *pcie_base; /* - * Static copy of bus clock pointer, so we can check whether the clock - * is enabled or not. + * Static copy of PCIe device pointer, so we can check whether the + * device is runtime suspended or not. */ -static struct clk *pcie_bus_clk; +static struct device *pcie_dev; #endif /* Structure representing the PCIe interface */ @@ -159,10 +159,8 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ, bus, devfn, where, val); - if (ret != PCIBIOS_SUCCESSFUL) { - *val = 0xffffffff; + if (ret != PCIBIOS_SUCCESSFUL) return ret; - } if (size == 1) *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff; @@ -792,7 +790,7 @@ static int rcar_pcie_get_resources(struct rcar_pcie_host *host) #ifdef CONFIG_ARM /* Cache static copy for L1 link state fixup hook on aarch32 */ pcie_base = pcie->base; - pcie_bus_clk = host->bus_clk; + pcie_dev = pcie->dev; #endif return 0; @@ -1062,7 +1060,7 @@ static int rcar_pcie_aarch32_abort_handler(unsigned long addr, spin_lock_irqsave(&pmsr_lock, flags); - if (!pcie_base || !__clk_is_enabled(pcie_bus_clk)) { + if (!pcie_base || pm_runtime_suspended(pcie_dev)) { ret = 1; goto unlock_exit; } diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index c52316d0bfd2..45a28880f322 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -221,10 +221,8 @@ static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, { struct rockchip_pcie *rockchip = bus->sysdata; - if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) { - *val = 0xffffffff; + if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; - } if (pci_is_root_bus(bus)) return rockchip_pcie_rd_own_conf(rockchip, where, size, val); diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index 95426df03200..c7cd44ed4dfc 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -99,10 +99,10 @@ #define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11) /** - * struct xilinx_cpm_pcie_port - PCIe port information + * struct xilinx_cpm_pcie - PCIe port information + * @dev: Device pointer * @reg_base: Bridge Register Base * @cpm_base: CPM System Level Control and Status Register(SLCR) Base - * @dev: Device pointer * @intx_domain: Legacy IRQ domain pointer * @cpm_domain: CPM IRQ domain pointer * @cfg: Holds mappings of config space window @@ -110,10 +110,10 @@ * @irq: Error interrupt number * @lock: lock protecting shared register access */ -struct xilinx_cpm_pcie_port { +struct xilinx_cpm_pcie { + struct device *dev; void __iomem *reg_base; void __iomem *cpm_base; - struct device *dev; struct irq_domain *intx_domain; struct irq_domain *cpm_domain; struct pci_config_window *cfg; @@ -122,24 +122,24 @@ struct xilinx_cpm_pcie_port { raw_spinlock_t lock; }; -static u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg) +static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg) { return readl_relaxed(port->reg_base + reg); } -static void pcie_write(struct xilinx_cpm_pcie_port *port, +static void pcie_write(struct xilinx_cpm_pcie *port, u32 val, u32 reg) { writel_relaxed(val, port->reg_base + reg); } -static bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port) +static bool cpm_pcie_link_up(struct xilinx_cpm_pcie *port) { return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) & XILINX_CPM_PCIE_REG_PSCR_LNKUP); } -static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port) +static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie *port) { unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR); @@ -153,7 +153,7 @@ static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port) static void xilinx_cpm_mask_leg_irq(struct irq_data *data) { - struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data); + struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data); unsigned long flags; u32 mask; u32 val; @@ -167,7 +167,7 @@ static void xilinx_cpm_mask_leg_irq(struct irq_data *data) static void xilinx_cpm_unmask_leg_irq(struct irq_data *data) { - struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data); + struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data); unsigned long flags; u32 mask; u32 val; @@ -211,7 +211,7 @@ static const struct irq_domain_ops intx_domain_ops = { static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc) { - struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc); + struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long val; int i; @@ -229,7 +229,7 @@ static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc) static void xilinx_cpm_mask_event_irq(struct irq_data *d) { - struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d); + struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d); u32 val; raw_spin_lock(&port->lock); @@ -241,7 +241,7 @@ static void xilinx_cpm_mask_event_irq(struct irq_data *d) static void xilinx_cpm_unmask_event_irq(struct irq_data *d) { - struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d); + struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d); u32 val; raw_spin_lock(&port->lock); @@ -273,7 +273,7 @@ static const struct irq_domain_ops event_domain_ops = { static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc) { - struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc); + struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long val; int i; @@ -327,7 +327,7 @@ static const struct { static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id) { - struct xilinx_cpm_pcie_port *port = dev_id; + struct xilinx_cpm_pcie *port = dev_id; struct device *dev = port->dev; struct irq_data *d; @@ -350,7 +350,7 @@ static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id) return IRQ_HANDLED; } -static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie_port *port) +static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie *port) { if (port->intx_domain) { irq_domain_remove(port->intx_domain); @@ -369,7 +369,7 @@ static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie_port *port) * * Return: '0' on success and error value on failure */ -static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port) +static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port) { struct device *dev = port->dev; struct device_node *node = dev->of_node; @@ -410,7 +410,7 @@ out: return -ENOMEM; } -static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie_port *port) +static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port) { struct device *dev = port->dev; struct platform_device *pdev = to_platform_device(dev); @@ -462,7 +462,7 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie_port *port) * xilinx_cpm_pcie_init_port - Initialize hardware * @port: PCIe port information */ -static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port) +static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port) { if (cpm_pcie_link_up(port)) dev_info(port->dev, "PCIe Link is UP\n"); @@ -497,7 +497,7 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port) * * Return: '0' on success and error value on failure */ -static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port, +static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port, struct resource *bus_range) { struct device *dev = port->dev; @@ -523,7 +523,7 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port, return 0; } -static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie_port *port) +static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie *port) { irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL); irq_set_chained_handler_and_data(port->irq, NULL, NULL); @@ -537,7 +537,7 @@ static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie_port *port) */ static int xilinx_cpm_pcie_probe(struct platform_device *pdev) { - struct xilinx_cpm_pcie_port *port; + struct xilinx_cpm_pcie *port; struct device *dev = &pdev->dev; struct pci_host_bridge *bridge; struct resource_entry *bus; diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index a72b4f9a2b00..40d070e54ad2 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -146,7 +146,7 @@ struct nwl_msi { /* MSI information */ struct irq_domain *msi_domain; - unsigned long *bitmap; + DECLARE_BITMAP(bitmap, INT_PCI_MSI_NR); struct irq_domain *dev_domain; struct mutex lock; /* protect bitmap variable */ int irq_msi0; @@ -335,12 +335,10 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc) static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg) { - struct nwl_msi *msi; + struct nwl_msi *msi = &pcie->msi; unsigned long status; u32 bit; - msi = &pcie->msi; - while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) { for_each_set_bit(bit, &status, 32) { nwl_bridge_writel(pcie, 1 << bit, status_reg); @@ -560,30 +558,21 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) struct nwl_msi *msi = &pcie->msi; unsigned long base; int ret; - int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long); mutex_init(&msi->lock); - msi->bitmap = kzalloc(size, GFP_KERNEL); - if (!msi->bitmap) - return -ENOMEM; - /* Get msi_1 IRQ number */ msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1"); - if (msi->irq_msi1 < 0) { - ret = -EINVAL; - goto err; - } + if (msi->irq_msi1 < 0) + return -EINVAL; irq_set_chained_handler_and_data(msi->irq_msi1, nwl_pcie_msi_handler_high, pcie); /* Get msi_0 IRQ number */ msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0"); - if (msi->irq_msi0 < 0) { - ret = -EINVAL; - goto err; - } + if (msi->irq_msi0 < 0) + return -EINVAL; irq_set_chained_handler_and_data(msi->irq_msi0, nwl_pcie_msi_handler_low, pcie); @@ -592,8 +581,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT; if (!ret) { dev_err(dev, "MSI not present\n"); - ret = -EIO; - goto err; + return -EIO; } /* Enable MSII */ @@ -632,10 +620,6 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO); return 0; -err: - kfree(msi->bitmap); - msi->bitmap = NULL; - return ret; } static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index aa9bdcebc838..cb6e9f7b0152 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -91,18 +91,18 @@ #define XILINX_NUM_MSI_IRQS 128 /** - * struct xilinx_pcie_port - PCIe port information - * @reg_base: IO Mapped Register Base + * struct xilinx_pcie - PCIe port information * @dev: Device pointer + * @reg_base: IO Mapped Register Base * @msi_map: Bitmap of allocated MSIs * @map_lock: Mutex protecting the MSI allocation * @msi_domain: MSI IRQ domain pointer * @leg_domain: Legacy IRQ domain pointer * @resources: Bus Resources */ -struct xilinx_pcie_port { - void __iomem *reg_base; +struct xilinx_pcie { struct device *dev; + void __iomem *reg_base; unsigned long msi_map[BITS_TO_LONGS(XILINX_NUM_MSI_IRQS)]; struct mutex map_lock; struct irq_domain *msi_domain; @@ -110,35 +110,35 @@ struct xilinx_pcie_port { struct list_head resources; }; -static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg) +static inline u32 pcie_read(struct xilinx_pcie *pcie, u32 reg) { - return readl(port->reg_base + reg); + return readl(pcie->reg_base + reg); } -static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg) +static inline void pcie_write(struct xilinx_pcie *pcie, u32 val, u32 reg) { - writel(val, port->reg_base + reg); + writel(val, pcie->reg_base + reg); } -static inline bool xilinx_pcie_link_up(struct xilinx_pcie_port *port) +static inline bool xilinx_pcie_link_up(struct xilinx_pcie *pcie) { - return (pcie_read(port, XILINX_PCIE_REG_PSCR) & + return (pcie_read(pcie, XILINX_PCIE_REG_PSCR) & XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0; } /** * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts - * @port: PCIe port information + * @pcie: PCIe port information */ -static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) +static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie *pcie) { - struct device *dev = port->dev; - unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR); + struct device *dev = pcie->dev; + unsigned long val = pcie_read(pcie, XILINX_PCIE_REG_RPEFR); if (val & XILINX_PCIE_RPEFR_ERR_VALID) { dev_dbg(dev, "Requester ID %lu\n", val & XILINX_PCIE_RPEFR_REQ_ID); - pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK, + pcie_write(pcie, XILINX_PCIE_RPEFR_ALL_MASK, XILINX_PCIE_REG_RPEFR); } } @@ -152,11 +152,11 @@ static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) */ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) { - struct xilinx_pcie_port *port = bus->sysdata; + struct xilinx_pcie *pcie = bus->sysdata; - /* Check if link is up when trying to access downstream ports */ + /* Check if link is up when trying to access downstream pcie ports */ if (!pci_is_root_bus(bus)) { - if (!xilinx_pcie_link_up(port)) + if (!xilinx_pcie_link_up(pcie)) return false; } else if (devfn > 0) { /* Only one device down on each root port */ @@ -177,12 +177,12 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { - struct xilinx_pcie_port *port = bus->sysdata; + struct xilinx_pcie *pcie = bus->sysdata; if (!xilinx_pcie_valid_device(bus, devfn)) return NULL; - return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); + return pcie->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); } /* PCIe operations */ @@ -215,7 +215,7 @@ static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mas static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { - struct xilinx_pcie_port *pcie = irq_data_get_irq_chip_data(data); + struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data); phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K); msg->address_lo = lower_32_bits(pa); @@ -232,14 +232,14 @@ static struct irq_chip xilinx_msi_bottom_chip = { static int xilinx_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { - struct xilinx_pcie_port *port = domain->host_data; + struct xilinx_pcie *pcie = domain->host_data; int hwirq, i; - mutex_lock(&port->map_lock); + mutex_lock(&pcie->map_lock); - hwirq = bitmap_find_free_region(port->msi_map, XILINX_NUM_MSI_IRQS, order_base_2(nr_irqs)); + hwirq = bitmap_find_free_region(pcie->msi_map, XILINX_NUM_MSI_IRQS, order_base_2(nr_irqs)); - mutex_unlock(&port->map_lock); + mutex_unlock(&pcie->map_lock); if (hwirq < 0) return -ENOSPC; @@ -256,13 +256,13 @@ static void xilinx_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); - struct xilinx_pcie_port *port = domain->host_data; + struct xilinx_pcie *pcie = domain->host_data; - mutex_lock(&port->map_lock); + mutex_lock(&pcie->map_lock); - bitmap_release_region(port->msi_map, d->hwirq, order_base_2(nr_irqs)); + bitmap_release_region(pcie->msi_map, d->hwirq, order_base_2(nr_irqs)); - mutex_unlock(&port->map_lock); + mutex_unlock(&pcie->map_lock); } static const struct irq_domain_ops xilinx_msi_domain_ops = { @@ -275,7 +275,7 @@ static struct msi_domain_info xilinx_msi_info = { .chip = &xilinx_msi_top_chip, }; -static int xilinx_allocate_msi_domains(struct xilinx_pcie_port *pcie) +static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie) { struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); struct irq_domain *parent; @@ -298,7 +298,7 @@ static int xilinx_allocate_msi_domains(struct xilinx_pcie_port *pcie) return 0; } -static void xilinx_free_msi_domains(struct xilinx_pcie_port *pcie) +static void xilinx_free_msi_domains(struct xilinx_pcie *pcie) { struct irq_domain *parent = pcie->msi_domain->parent; @@ -342,13 +342,13 @@ static const struct irq_domain_ops intx_domain_ops = { */ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) { - struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data; - struct device *dev = port->dev; + struct xilinx_pcie *pcie = (struct xilinx_pcie *)data; + struct device *dev = pcie->dev; u32 val, mask, status; /* Read interrupt decode and mask registers */ - val = pcie_read(port, XILINX_PCIE_REG_IDR); - mask = pcie_read(port, XILINX_PCIE_REG_IMR); + val = pcie_read(pcie, XILINX_PCIE_REG_IDR); + mask = pcie_read(pcie, XILINX_PCIE_REG_IMR); status = val & mask; if (!status) @@ -371,23 +371,23 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) if (status & XILINX_PCIE_INTR_CORRECTABLE) { dev_warn(dev, "Correctable error message\n"); - xilinx_pcie_clear_err_interrupts(port); + xilinx_pcie_clear_err_interrupts(pcie); } if (status & XILINX_PCIE_INTR_NONFATAL) { dev_warn(dev, "Non fatal error message\n"); - xilinx_pcie_clear_err_interrupts(port); + xilinx_pcie_clear_err_interrupts(pcie); } if (status & XILINX_PCIE_INTR_FATAL) { dev_warn(dev, "Fatal error message\n"); - xilinx_pcie_clear_err_interrupts(port); + xilinx_pcie_clear_err_interrupts(pcie); } if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) { struct irq_domain *domain; - val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); + val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR1); /* Check whether interrupt valid */ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { @@ -397,17 +397,17 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) /* Decode the IRQ number */ if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { - val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) & + val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR2) & XILINX_PCIE_RPIFR2_MSG_DATA; - domain = port->msi_domain->parent; + domain = pcie->msi_domain->parent; } else { val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >> XILINX_PCIE_RPIFR1_INTR_SHIFT; - domain = port->leg_domain; + domain = pcie->leg_domain; } /* Clear interrupt FIFO register 1 */ - pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, + pcie_write(pcie, XILINX_PCIE_RPIFR1_ALL_MASK, XILINX_PCIE_REG_RPIFR1); generic_handle_domain_irq(domain, val); @@ -442,20 +442,20 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) error: /* Clear the Interrupt Decode register */ - pcie_write(port, status, XILINX_PCIE_REG_IDR); + pcie_write(pcie, status, XILINX_PCIE_REG_IDR); return IRQ_HANDLED; } /** * xilinx_pcie_init_irq_domain - Initialize IRQ domain - * @port: PCIe port information + * @pcie: PCIe port information * * Return: '0' on success and error value on failure */ -static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) +static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie) { - struct device *dev = port->dev; + struct device *dev = pcie->dev; struct device_node *pcie_intc_node; int ret; @@ -466,25 +466,25 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) return -ENODEV; } - port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, - port); + pcie); of_node_put(pcie_intc_node); - if (!port->leg_domain) { + if (!pcie->leg_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENODEV; } /* Setup MSI */ if (IS_ENABLED(CONFIG_PCI_MSI)) { - phys_addr_t pa = ALIGN_DOWN(virt_to_phys(port), SZ_4K); + phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K); - ret = xilinx_allocate_msi_domains(port); + ret = xilinx_allocate_msi_domains(pcie); if (ret) return ret; - pcie_write(port, upper_32_bits(pa), XILINX_PCIE_REG_MSIBASE1); - pcie_write(port, lower_32_bits(pa), XILINX_PCIE_REG_MSIBASE2); + pcie_write(pcie, upper_32_bits(pa), XILINX_PCIE_REG_MSIBASE1); + pcie_write(pcie, lower_32_bits(pa), XILINX_PCIE_REG_MSIBASE2); } return 0; @@ -492,44 +492,44 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) /** * xilinx_pcie_init_port - Initialize hardware - * @port: PCIe port information + * @pcie: PCIe port information */ -static void xilinx_pcie_init_port(struct xilinx_pcie_port *port) +static void xilinx_pcie_init_port(struct xilinx_pcie *pcie) { - struct device *dev = port->dev; + struct device *dev = pcie->dev; - if (xilinx_pcie_link_up(port)) + if (xilinx_pcie_link_up(pcie)) dev_info(dev, "PCIe Link is UP\n"); else dev_info(dev, "PCIe Link is DOWN\n"); /* Disable all interrupts */ - pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK, + pcie_write(pcie, ~XILINX_PCIE_IDR_ALL_MASK, XILINX_PCIE_REG_IMR); /* Clear pending interrupts */ - pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) & + pcie_write(pcie, pcie_read(pcie, XILINX_PCIE_REG_IDR) & XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IDR); /* Enable all interrupts we handle */ - pcie_write(port, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR); + pcie_write(pcie, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR); /* Enable the Bridge enable bit */ - pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) | + pcie_write(pcie, pcie_read(pcie, XILINX_PCIE_REG_RPSC) | XILINX_PCIE_REG_RPSC_BEN, XILINX_PCIE_REG_RPSC); } /** * xilinx_pcie_parse_dt - Parse Device tree - * @port: PCIe port information + * @pcie: PCIe port information * * Return: '0' on success and error value on failure */ -static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) +static int xilinx_pcie_parse_dt(struct xilinx_pcie *pcie) { - struct device *dev = port->dev; + struct device *dev = pcie->dev; struct device_node *node = dev->of_node; struct resource regs; unsigned int irq; @@ -541,14 +541,14 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) return err; } - port->reg_base = devm_pci_remap_cfg_resource(dev, ®s); - if (IS_ERR(port->reg_base)) - return PTR_ERR(port->reg_base); + pcie->reg_base = devm_pci_remap_cfg_resource(dev, ®s); + if (IS_ERR(pcie->reg_base)) + return PTR_ERR(pcie->reg_base); irq = irq_of_parse_and_map(node, 0); err = devm_request_irq(dev, irq, xilinx_pcie_intr_handler, IRQF_SHARED | IRQF_NO_THREAD, - "xilinx-pcie", port); + "xilinx-pcie", pcie); if (err) { dev_err(dev, "unable to request irq %d\n", irq); return err; @@ -566,41 +566,41 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) static int xilinx_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct xilinx_pcie_port *port; + struct xilinx_pcie *pcie; struct pci_host_bridge *bridge; int err; if (!dev->of_node) return -ENODEV; - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) return -ENODEV; - port = pci_host_bridge_priv(bridge); - mutex_init(&port->map_lock); - port->dev = dev; + pcie = pci_host_bridge_priv(bridge); + mutex_init(&pcie->map_lock); + pcie->dev = dev; - err = xilinx_pcie_parse_dt(port); + err = xilinx_pcie_parse_dt(pcie); if (err) { dev_err(dev, "Parsing DT failed\n"); return err; } - xilinx_pcie_init_port(port); + xilinx_pcie_init_port(pcie); - err = xilinx_pcie_init_irq_domain(port); + err = xilinx_pcie_init_irq_domain(pcie); if (err) { dev_err(dev, "Failed creating IRQ Domain\n"); return err; } - bridge->sysdata = port; + bridge->sysdata = pcie; bridge->ops = &xilinx_pcie_ops; err = pci_host_probe(bridge); if (err) - xilinx_free_msi_domains(port); + xilinx_free_msi_domains(pcie); return err; } diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index a45e8e59d3d4..cc166c683638 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -501,6 +501,40 @@ static inline void vmd_acpi_begin(void) { } static inline void vmd_acpi_end(void) { } #endif /* CONFIG_ACPI */ +static void vmd_domain_reset(struct vmd_dev *vmd) +{ + u16 bus, max_buses = resource_size(&vmd->resources[0]); + u8 dev, functions, fn, hdr_type; + char __iomem *base; + + for (bus = 0; bus < max_buses; bus++) { + for (dev = 0; dev < 32; dev++) { + base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus, + PCI_DEVFN(dev, 0), 0); + + hdr_type = readb(base + PCI_HEADER_TYPE) & + PCI_HEADER_TYPE_MASK; + + functions = (hdr_type & 0x80) ? 8 : 1; + for (fn = 0; fn < functions; fn++) { + base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus, + PCI_DEVFN(dev, fn), 0); + + hdr_type = readb(base + PCI_HEADER_TYPE) & + PCI_HEADER_TYPE_MASK; + + if (hdr_type != PCI_HEADER_TYPE_BRIDGE || + (readw(base + PCI_CLASS_DEVICE) != + PCI_CLASS_BRIDGE_PCI)) + continue; + + memset_io(base + PCI_IO_BASE, 0, + PCI_ROM_ADDRESS1 - PCI_IO_BASE); + } + } + } +} + static void vmd_attach_resources(struct vmd_dev *vmd) { vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; @@ -541,7 +575,7 @@ static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint, int ret; ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock); - if (ret || vmlock == ~0) + if (ret || PCI_POSSIBLE_ERROR(vmlock)) return -ENODEV; if (MB2_SHADOW_EN(vmlock)) { @@ -661,6 +695,21 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd) return 0; } +/* + * Since VMD is an aperture to regular PCIe root ports, only allow it to + * control features that the OS is allowed to control on the physical PCI bus. + */ +static void vmd_copy_host_bridge_flags(struct pci_host_bridge *root_bridge, + struct pci_host_bridge *vmd_bridge) +{ + vmd_bridge->native_pcie_hotplug = root_bridge->native_pcie_hotplug; + vmd_bridge->native_shpc_hotplug = root_bridge->native_shpc_hotplug; + vmd_bridge->native_aer = root_bridge->native_aer; + vmd_bridge->native_pme = root_bridge->native_pme; + vmd_bridge->native_ltr = root_bridge->native_ltr; + vmd_bridge->native_dpc = root_bridge->native_dpc; +} + static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) { struct pci_sysdata *sd = &vmd->sysdata; @@ -798,6 +847,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) return -ENODEV; } + vmd_copy_host_bridge_flags(pci_find_host_bridge(vmd->dev->bus), + to_pci_host_bridge(vmd->bus->bridge)); + vmd_attach_resources(vmd); if (vmd->irq_domain) dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); @@ -805,6 +857,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) vmd_acpi_begin(); pci_scan_child_bus(vmd->bus); + vmd_domain_reset(vmd); + list_for_each_entry(child, &vmd->bus->children, node) + pci_reset_bus(child->self); pci_assign_unassigned_bus_resources(vmd->bus); /* @@ -953,6 +1008,10 @@ static const struct pci_device_id vmd_ids[] = { .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | VMD_FEAT_HAS_BUS_RESTRICTIONS | VMD_FEAT_OFFSET_FIRST_VECTOR,}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa77f), + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | + VMD_FEAT_HAS_BUS_RESTRICTIONS | + VMD_FEAT_OFFSET_FIRST_VECTOR,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | VMD_FEAT_HAS_BUS_RESTRICTIONS | diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c index 5a03401f4571..9a00448c7e61 100644 --- a/drivers/pci/endpoint/functions/pci-epf-ntb.c +++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c @@ -1262,7 +1262,7 @@ static void epf_ntb_db_mw_bar_cleanup(struct epf_ntb *ntb, } /** - * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity + * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability * @ntb: NTB device that facilitates communication between HOST1 and HOST2 * @type: PRIMARY interface or SECONDARY interface * diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 38621558d397..3bc9273d0a08 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -334,7 +334,7 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts) u8 encode_int; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || - interrupts > 32) + interrupts < 1 || interrupts > 32) return -EINVAL; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO index cc6194aa24c1..88f217c82b4f 100644 --- a/drivers/pci/hotplug/TODO +++ b/drivers/pci/hotplug/TODO @@ -30,11 +30,6 @@ ibmphp: or ibmphp should store a pointer to its bus in struct slot. Probably the former. -* The functions get_max_adapter_speed() and get_bus_name() are commented out. - Can they be deleted? There are also forward declarations at the top of - ibmphp_core.c as well as pointers in ibmphp_hotplug_slot_ops, likewise - commented out. - * ibmphp_init_devno() takes a struct slot **, it could instead take a struct slot *. diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index ed7b58eb64d2..93fd2a621822 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -2273,7 +2273,7 @@ static u32 configure_new_device(struct controller *ctrl, struct pci_func *func while ((function < max_functions) && (!stop_it)) { pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID); - if (ID == 0xFFFFFFFF) { + if (PCI_POSSIBLE_ERROR(ID)) { function++; } else { /* Setup slot structure. */ @@ -2517,7 +2517,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func pci_bus_read_config_dword(pci_bus, PCI_DEVFN(device, 0), 0x00, &ID); pci_bus->number = func->bus; - if (ID != 0xFFFFFFFF) { /* device present */ + if (!PCI_POSSIBLE_ERROR(ID)) { /* device present */ /* Setup slot structure. */ new_slot = cpqhp_slot_create(hold_bus_node->base); diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 17124254d897..197997e264a2 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c @@ -50,14 +50,6 @@ static int irqs[16]; /* PIC mode IRQs we're using so far (in case MPS static int init_flag; -/* -static int get_max_adapter_speed_1 (struct hotplug_slot *, u8 *, u8); - -static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value) -{ - return get_max_adapter_speed_1 (hs, value, 1); -} -*/ static inline int get_cur_bus_info(struct slot **sl) { int rc = 1; @@ -401,69 +393,6 @@ static int get_max_bus_speed(struct slot *slot) return rc; } -/* -static int get_max_adapter_speed_1(struct hotplug_slot *hotplug_slot, u8 *value, u8 flag) -{ - int rc = -ENODEV; - struct slot *pslot; - struct slot myslot; - - debug("get_max_adapter_speed_1 - Entry hotplug_slot[%lx] pvalue[%lx]\n", - (ulong)hotplug_slot, (ulong) value); - - if (flag) - ibmphp_lock_operations(); - - if (hotplug_slot && value) { - pslot = hotplug_slot->private; - if (pslot) { - memcpy(&myslot, pslot, sizeof(struct slot)); - rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, - &(myslot.status)); - - if (!(SLOT_LATCH (myslot.status)) && - (SLOT_PRESENT (myslot.status))) { - rc = ibmphp_hpc_readslot(pslot, - READ_EXTSLOTSTATUS, - &(myslot.ext_status)); - if (!rc) - *value = SLOT_SPEED(myslot.ext_status); - } else - *value = MAX_ADAPTER_NONE; - } - } - - if (flag) - ibmphp_unlock_operations(); - - debug("get_max_adapter_speed_1 - Exit rc[%d] value[%x]\n", rc, *value); - return rc; -} - -static int get_bus_name(struct hotplug_slot *hotplug_slot, char *value) -{ - int rc = -ENODEV; - struct slot *pslot = NULL; - - debug("get_bus_name - Entry hotplug_slot[%lx]\n", (ulong)hotplug_slot); - - ibmphp_lock_operations(); - - if (hotplug_slot) { - pslot = hotplug_slot->private; - if (pslot) { - rc = 0; - snprintf(value, 100, "Bus %x", pslot->bus); - } - } else - rc = -ENODEV; - - ibmphp_unlock_operations(); - debug("get_bus_name - Exit rc[%d] value[%x]\n", rc, *value); - return rc; -} -*/ - /**************************************************************************** * This routine will initialize the ops data structure used in the validate * function. It will also power off empty slots that are powered on since BIOS @@ -1231,9 +1160,6 @@ const struct hotplug_slot_ops ibmphp_hotplug_slot_ops = { .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_present, -/* .get_max_adapter_speed = get_max_adapter_speed, - .get_bus_name_status = get_bus_name, -*/ }; static void ibmphp_unload(void) diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 918dccbc74b6..e0a614acee05 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -75,6 +75,8 @@ extern int pciehp_poll_time; * @reset_lock: prevents access to the Data Link Layer Link Active bit in the * Link Status register and to the Presence Detect State bit in the Slot * Status register during a slot reset which may cause them to flap + * @depth: Number of additional hotplug ports in the path to the root bus, + * used as lock subclass for @reset_lock * @ist_running: flag to keep user request waiting while IRQ thread is running * @request_result: result of last user request submitted to the IRQ thread * @requester: wait queue to wake up on completion of user request, @@ -106,6 +108,7 @@ struct controller { struct hotplug_slot hotplug_slot; /* hotplug core interface */ struct rw_semaphore reset_lock; + unsigned int depth; unsigned int ist_running; int request_result; wait_queue_head_t requester; diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index f34114d45259..4042d87d539d 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -166,7 +166,7 @@ static void pciehp_check_presence(struct controller *ctrl) { int occupied; - down_read(&ctrl->reset_lock); + down_read_nested(&ctrl->reset_lock, ctrl->depth); mutex_lock(&ctrl->state_lock); occupied = pciehp_card_present_or_link_active(ctrl); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 83a0fa119cae..1c1ebf3dad43 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -89,7 +89,7 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout) do { pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); - if (slot_status == (u16) ~0) { + if (PCI_POSSIBLE_ERROR(slot_status)) { ctrl_info(ctrl, "%s: no response from device\n", __func__); return 0; @@ -165,7 +165,7 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd, pcie_wait_cmd(ctrl); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); - if (slot_ctrl == (u16) ~0) { + if (PCI_POSSIBLE_ERROR(slot_ctrl)) { ctrl_info(ctrl, "%s: no response from device\n", __func__); goto out; } @@ -236,7 +236,7 @@ int pciehp_check_link_active(struct controller *ctrl) int ret; ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); - if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0) + if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status)) return -ENODEV; ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); @@ -443,7 +443,7 @@ int pciehp_card_present(struct controller *ctrl) int ret; ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); - if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0) + if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(slot_status)) return -ENODEV; return !!(slot_status & PCI_EXP_SLTSTA_PDS); @@ -583,7 +583,7 @@ static void pciehp_ignore_dpc_link_change(struct controller *ctrl, * the corresponding link change may have been ignored above. * Synthesize it to ensure that it is acted on. */ - down_read(&ctrl->reset_lock); + down_read_nested(&ctrl->reset_lock, ctrl->depth); if (!pciehp_check_link_active(ctrl)) pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC); up_read(&ctrl->reset_lock); @@ -621,7 +621,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) read_status: pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status); - if (status == (u16) ~0) { + if (PCI_POSSIBLE_ERROR(status)) { ctrl_info(ctrl, "%s: no response from device\n", __func__); if (parent) pm_runtime_put(parent); @@ -642,6 +642,8 @@ read_status: */ if (ctrl->power_fault_detected) status &= ~PCI_EXP_SLTSTA_PFD; + else if (status & PCI_EXP_SLTSTA_PFD) + ctrl->power_fault_detected = true; events |= status; if (!events) { @@ -651,7 +653,7 @@ read_status: } if (status) { - pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status); /* * In MSI mode, all event bits must be zero before the port @@ -725,8 +727,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) } /* Check Power Fault Detected */ - if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { - ctrl->power_fault_detected = 1; + if (events & PCI_EXP_SLTSTA_PFD) { ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl)); pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_ATTN_IND_ON); @@ -746,7 +747,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) * Disable requests have higher priority than Presence Detect Changed * or Data Link Layer State Changed events. */ - down_read(&ctrl->reset_lock); + down_read_nested(&ctrl->reset_lock, ctrl->depth); if (events & DISABLE_SLOT) pciehp_handle_disable_request(ctrl); else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC)) @@ -906,7 +907,7 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe) if (probe) return 0; - down_write(&ctrl->reset_lock); + down_write_nested(&ctrl->reset_lock, ctrl->depth); if (!ATTN_BUTTN(ctrl)) { ctrl_mask |= PCI_EXP_SLTCTL_PDCE; @@ -962,6 +963,20 @@ static inline void dbg_ctrl(struct controller *ctrl) #define FLAG(x, y) (((x) & (y)) ? '+' : '-') +static inline int pcie_hotplug_depth(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + int depth = 0; + + while (bus->parent) { + bus = bus->parent; + if (bus->self && bus->self->is_hotplug_bridge) + depth++; + } + + return depth; +} + struct controller *pcie_init(struct pcie_device *dev) { struct controller *ctrl; @@ -975,6 +990,7 @@ struct controller *pcie_init(struct pcie_device *dev) return NULL; ctrl->pcie = dev; + ctrl->depth = pcie_hotplug_depth(dev->port); pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); if (pdev->hotplug_user_indicators) diff --git a/drivers/pci/msi/Makefile b/drivers/pci/msi/Makefile new file mode 100644 index 000000000000..93ef7b9e404d --- /dev/null +++ b/drivers/pci/msi/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the PCI/MSI +obj-$(CONFIG_PCI) += pcidev_msi.o +obj-$(CONFIG_PCI_MSI) += msi.o +obj-$(CONFIG_PCI_MSI_IRQ_DOMAIN) += irqdomain.o +obj-$(CONFIG_PCI_MSI_ARCH_FALLBACKS) += legacy.o diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c new file mode 100644 index 000000000000..0d63541c4052 --- /dev/null +++ b/drivers/pci/msi/irqdomain.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI Message Signaled Interrupt (MSI) - irqdomain support + */ +#include <linux/acpi_iort.h> +#include <linux/irqdomain.h> +#include <linux/of_irq.h> + +#include "msi.h" + +int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct irq_domain *domain; + + domain = dev_get_msi_domain(&dev->dev); + if (domain && irq_domain_is_hierarchy(domain)) + return msi_domain_alloc_irqs_descs_locked(domain, &dev->dev, nvec); + + return pci_msi_legacy_setup_msi_irqs(dev, nvec, type); +} + +void pci_msi_teardown_msi_irqs(struct pci_dev *dev) +{ + struct irq_domain *domain; + + domain = dev_get_msi_domain(&dev->dev); + if (domain && irq_domain_is_hierarchy(domain)) + msi_domain_free_irqs_descs_locked(domain, &dev->dev); + else + pci_msi_legacy_teardown_msi_irqs(dev); +} + +/** + * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space + * @irq_data: Pointer to interrupt data of the MSI interrupt + * @msg: Pointer to the message + */ +static void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) +{ + struct msi_desc *desc = irq_data_get_msi_desc(irq_data); + + /* + * For MSI-X desc->irq is always equal to irq_data->irq. For + * MSI only the first interrupt of MULTI MSI passes the test. + */ + if (desc->irq == irq_data->irq) + __pci_write_msi_msg(desc, msg); +} + +/** + * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source + * @desc: Pointer to the MSI descriptor + * + * The ID number is only used within the irqdomain. + */ +static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc) +{ + struct pci_dev *dev = msi_desc_to_pci_dev(desc); + + return (irq_hw_number_t)desc->msi_index | + pci_dev_id(dev) << 11 | + (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; +} + +static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) +{ + return !desc->pci.msi_attrib.is_msix && desc->nvec_used > 1; +} + +/** + * pci_msi_domain_check_cap - Verify that @domain supports the capabilities + * for @dev + * @domain: The interrupt domain to check + * @info: The domain info for verification + * @dev: The device to check + * + * Returns: + * 0 if the functionality is supported + * 1 if Multi MSI is requested, but the domain does not support it + * -ENOTSUPP otherwise + */ +static int pci_msi_domain_check_cap(struct irq_domain *domain, + struct msi_domain_info *info, + struct device *dev) +{ + struct msi_desc *desc = msi_first_desc(dev, MSI_DESC_ALL); + + /* Special handling to support __pci_enable_msi_range() */ + if (pci_msi_desc_is_multi_msi(desc) && + !(info->flags & MSI_FLAG_MULTI_PCI_MSI)) + return 1; + + if (desc->pci.msi_attrib.is_msix) { + if (!(info->flags & MSI_FLAG_PCI_MSIX)) + return -ENOTSUPP; + + if (info->flags & MSI_FLAG_MSIX_CONTIGUOUS) { + unsigned int idx = 0; + + /* Check for gaps in the entry indices */ + msi_for_each_desc(desc, dev, MSI_DESC_ALL) { + if (desc->msi_index != idx++) + return -ENOTSUPP; + } + } + } + return 0; +} + +static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, + struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = pci_msi_domain_calc_hwirq(desc); +} + +static struct msi_domain_ops pci_msi_domain_ops_default = { + .set_desc = pci_msi_domain_set_desc, + .msi_check = pci_msi_domain_check_cap, +}; + +static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info) +{ + struct msi_domain_ops *ops = info->ops; + + if (ops == NULL) { + info->ops = &pci_msi_domain_ops_default; + } else { + if (ops->set_desc == NULL) + ops->set_desc = pci_msi_domain_set_desc; + if (ops->msi_check == NULL) + ops->msi_check = pci_msi_domain_check_cap; + } +} + +static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) +{ + struct irq_chip *chip = info->chip; + + BUG_ON(!chip); + if (!chip->irq_write_msi_msg) + chip->irq_write_msi_msg = pci_msi_domain_write_msg; + if (!chip->irq_mask) + chip->irq_mask = pci_msi_mask_irq; + if (!chip->irq_unmask) + chip->irq_unmask = pci_msi_unmask_irq; +} + +/** + * pci_msi_create_irq_domain - Create a MSI interrupt domain + * @fwnode: Optional fwnode of the interrupt controller + * @info: MSI domain info + * @parent: Parent irq domain + * + * Updates the domain and chip ops and creates a MSI interrupt domain. + * + * Returns: + * A domain pointer or NULL in case of failure. + */ +struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, + struct msi_domain_info *info, + struct irq_domain *parent) +{ + struct irq_domain *domain; + + if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE)) + info->flags &= ~MSI_FLAG_LEVEL_CAPABLE; + + if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) + pci_msi_domain_update_dom_ops(info); + if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) + pci_msi_domain_update_chip_ops(info); + + info->flags |= MSI_FLAG_ACTIVATE_EARLY | MSI_FLAG_DEV_SYSFS | + MSI_FLAG_FREE_MSI_DESCS; + if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) + info->flags |= MSI_FLAG_MUST_REACTIVATE; + + /* PCI-MSI is oneshot-safe */ + info->chip->flags |= IRQCHIP_ONESHOT_SAFE; + + domain = msi_create_irq_domain(fwnode, info, parent); + if (!domain) + return NULL; + + irq_domain_update_bus_token(domain, DOMAIN_BUS_PCI_MSI); + return domain; +} +EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain); + +/* + * Users of the generic MSI infrastructure expect a device to have a single ID, + * so with DMA aliases we have to pick the least-worst compromise. Devices with + * DMA phantom functions tend to still emit MSIs from the real function number, + * so we ignore those and only consider topological aliases where either the + * alias device or RID appears on a different bus number. We also make the + * reasonable assumption that bridges are walked in an upstream direction (so + * the last one seen wins), and the much braver assumption that the most likely + * case is that of PCI->PCIe so we should always use the alias RID. This echoes + * the logic from intel_irq_remapping's set_msi_sid(), which presumably works + * well enough in practice; in the face of the horrible PCIe<->PCI-X conditions + * for taking ownership all we can really do is close our eyes and hope... + */ +static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) +{ + u32 *pa = data; + u8 bus = PCI_BUS_NUM(*pa); + + if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus) + *pa = alias; + + return 0; +} + +/** + * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID) + * @domain: The interrupt domain + * @pdev: The PCI device. + * + * The RID for a device is formed from the alias, with a firmware + * supplied mapping applied + * + * Returns: The RID. + */ +u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) +{ + struct device_node *of_node; + u32 rid = pci_dev_id(pdev); + + pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); + + of_node = irq_domain_get_of_node(domain); + rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) : + iort_msi_map_id(&pdev->dev, rid); + + return rid; +} + +/** + * pci_msi_get_device_domain - Get the MSI domain for a given PCI device + * @pdev: The PCI device + * + * Use the firmware data to find a device-specific MSI domain + * (i.e. not one that is set as a default). + * + * Returns: The corresponding MSI domain or NULL if none has been found. + */ +struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) +{ + struct irq_domain *dom; + u32 rid = pci_dev_id(pdev); + + pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); + dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI); + if (!dom) + dom = iort_get_device_domain(&pdev->dev, rid, + DOMAIN_BUS_PCI_MSI); + return dom; +} + +/** + * pci_dev_has_special_msi_domain - Check whether the device is handled by + * a non-standard PCI-MSI domain + * @pdev: The PCI device to check. + * + * Returns: True if the device irqdomain or the bus irqdomain is + * non-standard PCI/MSI. + */ +bool pci_dev_has_special_msi_domain(struct pci_dev *pdev) +{ + struct irq_domain *dom = dev_get_msi_domain(&pdev->dev); + + if (!dom) + dom = dev_get_msi_domain(&pdev->bus->dev); + + if (!dom) + return true; + + return dom->bus_token != DOMAIN_BUS_PCI_MSI; +} diff --git a/drivers/pci/msi/legacy.c b/drivers/pci/msi/legacy.c new file mode 100644 index 000000000000..cdbb4689db78 --- /dev/null +++ b/drivers/pci/msi/legacy.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI Message Signaled Interrupt (MSI). + * + * Legacy architecture specific setup and teardown mechanism. + */ +#include "msi.h" + +/* Arch hooks */ +int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) +{ + return -EINVAL; +} + +void __weak arch_teardown_msi_irq(unsigned int irq) +{ +} + +int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct msi_desc *desc; + int ret; + + /* + * If an architecture wants to support multiple MSI, it needs to + * override arch_setup_msi_irqs() + */ + if (type == PCI_CAP_ID_MSI && nvec > 1) + return 1; + + msi_for_each_desc(desc, &dev->dev, MSI_DESC_NOTASSOCIATED) { + ret = arch_setup_msi_irq(dev, desc); + if (ret) + return ret < 0 ? ret : -ENOSPC; + } + + return 0; +} + +void __weak arch_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *desc; + int i; + + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) { + for (i = 0; i < desc->nvec_used; i++) + arch_teardown_msi_irq(desc->irq + i); + } +} + +static int pci_msi_setup_check_result(struct pci_dev *dev, int type, int ret) +{ + struct msi_desc *desc; + int avail = 0; + + if (type != PCI_CAP_ID_MSIX || ret >= 0) + return ret; + + /* Scan the MSI descriptors for successfully allocated ones. */ + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) + avail++; + + return avail ? avail : ret; +} + +int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + int ret = arch_setup_msi_irqs(dev, nvec, type); + + ret = pci_msi_setup_check_result(dev, type, ret); + if (!ret) + ret = msi_device_populate_sysfs(&dev->dev); + return ret; +} + +void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev) +{ + msi_device_destroy_sysfs(&dev->dev); + arch_teardown_msi_irqs(dev); + msi_free_msi_descs(&dev->dev); +} diff --git a/drivers/pci/msi.c b/drivers/pci/msi/msi.c index d84cf30bb279..c19c7ca58186 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi/msi.c @@ -6,156 +6,29 @@ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) * Copyright (C) 2016 Christoph Hellwig. */ - #include <linux/err.h> -#include <linux/mm.h> -#include <linux/irq.h> -#include <linux/interrupt.h> #include <linux/export.h> -#include <linux/ioport.h> -#include <linux/pci.h> -#include <linux/proc_fs.h> -#include <linux/msi.h> -#include <linux/smp.h> -#include <linux/errno.h> -#include <linux/io.h> -#include <linux/acpi_iort.h> -#include <linux/slab.h> -#include <linux/irqdomain.h> -#include <linux/of_irq.h> - -#include "pci.h" - -#ifdef CONFIG_PCI_MSI +#include <linux/irq.h> + +#include "../pci.h" +#include "msi.h" static int pci_msi_enable = 1; int pci_msi_ignore_mask; -#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) - -#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN -static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -{ - struct irq_domain *domain; - - domain = dev_get_msi_domain(&dev->dev); - if (domain && irq_domain_is_hierarchy(domain)) - return msi_domain_alloc_irqs(domain, &dev->dev, nvec); - - return arch_setup_msi_irqs(dev, nvec, type); -} - -static void pci_msi_teardown_msi_irqs(struct pci_dev *dev) -{ - struct irq_domain *domain; - - domain = dev_get_msi_domain(&dev->dev); - if (domain && irq_domain_is_hierarchy(domain)) - msi_domain_free_irqs(domain, &dev->dev); - else - arch_teardown_msi_irqs(dev); -} -#else -#define pci_msi_setup_msi_irqs arch_setup_msi_irqs -#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs -#endif - -#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS -/* Arch hooks */ -int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) -{ - return -EINVAL; -} - -void __weak arch_teardown_msi_irq(unsigned int irq) -{ -} - -int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -{ - struct msi_desc *entry; - int ret; - - /* - * If an architecture wants to support multiple MSI, it needs to - * override arch_setup_msi_irqs() - */ - if (type == PCI_CAP_ID_MSI && nvec > 1) - return 1; - - for_each_pci_msi_entry(entry, dev) { - ret = arch_setup_msi_irq(dev, entry); - if (ret < 0) - return ret; - if (ret > 0) - return -ENOSPC; - } - - return 0; -} - -void __weak arch_teardown_msi_irqs(struct pci_dev *dev) -{ - int i; - struct msi_desc *entry; - - for_each_pci_msi_entry(entry, dev) - if (entry->irq) - for (i = 0; i < entry->nvec_used; i++) - arch_teardown_msi_irq(entry->irq + i); -} -#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */ - -static void default_restore_msi_irq(struct pci_dev *dev, int irq) -{ - struct msi_desc *entry; - - entry = NULL; - if (dev->msix_enabled) { - for_each_pci_msi_entry(entry, dev) { - if (irq == entry->irq) - break; - } - } else if (dev->msi_enabled) { - entry = irq_get_msi_desc(irq); - } - - if (entry) - __pci_write_msi_msg(entry, &entry->msg); -} - -void __weak arch_restore_msi_irqs(struct pci_dev *dev) -{ - return default_restore_msi_irqs(dev); -} - -/* - * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to - * mask all MSI interrupts by clearing the MSI enable bit does not work - * reliably as devices without an INTx disable bit will then generate a - * level IRQ which will never be cleared. - */ -static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc) -{ - /* Don't shift by >= width of type */ - if (desc->msi_attrib.multi_cap >= 5) - return 0xffffffff; - return (1 << (1 << desc->msi_attrib.multi_cap)) - 1; -} - static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set) { - raw_spinlock_t *lock = &desc->dev->msi_lock; + raw_spinlock_t *lock = &to_pci_dev(desc->dev)->msi_lock; unsigned long flags; - if (!desc->msi_attrib.can_mask) + if (!desc->pci.msi_attrib.can_mask) return; raw_spin_lock_irqsave(lock, flags); - desc->msi_mask &= ~clear; - desc->msi_mask |= set; - pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, - desc->msi_mask); + desc->pci.msi_mask &= ~clear; + desc->pci.msi_mask |= set; + pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->pci.mask_pos, + desc->pci.msi_mask); raw_spin_unlock_irqrestore(lock, flags); } @@ -171,7 +44,7 @@ static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask) static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc) { - return desc->mask_base + desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; + return desc->pci.mask_base + desc->msi_index * PCI_MSIX_ENTRY_SIZE; } /* @@ -184,27 +57,27 @@ static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl) { void __iomem *desc_addr = pci_msix_desc_addr(desc); - if (desc->msi_attrib.can_mask) + if (desc->pci.msi_attrib.can_mask) writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL); } static inline void pci_msix_mask(struct msi_desc *desc) { - desc->msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT; - pci_msix_write_vector_ctrl(desc, desc->msix_ctrl); + desc->pci.msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT; + pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl); /* Flush write to device */ - readl(desc->mask_base); + readl(desc->pci.mask_base); } static inline void pci_msix_unmask(struct msi_desc *desc) { - desc->msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; - pci_msix_write_vector_ctrl(desc, desc->msix_ctrl); + desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; + pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl); } static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask) { - if (desc->msi_attrib.is_msix) + if (desc->pci.msi_attrib.is_msix) pci_msix_mask(desc); else pci_msi_mask(desc, mask); @@ -212,7 +85,7 @@ static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask) static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask) { - if (desc->msi_attrib.is_msix) + if (desc->pci.msi_attrib.is_msix) pci_msix_unmask(desc); else pci_msi_unmask(desc, mask); @@ -242,24 +115,16 @@ void pci_msi_unmask_irq(struct irq_data *data) } EXPORT_SYMBOL_GPL(pci_msi_unmask_irq); -void default_restore_msi_irqs(struct pci_dev *dev) -{ - struct msi_desc *entry; - - for_each_pci_msi_entry(entry, dev) - default_restore_msi_irq(dev, entry->irq); -} - void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { struct pci_dev *dev = msi_desc_to_pci_dev(entry); BUG_ON(dev->current_state != PCI_D0); - if (entry->msi_attrib.is_msix) { + if (entry->pci.msi_attrib.is_msix) { void __iomem *base = pci_msix_desc_addr(entry); - if (WARN_ON_ONCE(entry->msi_attrib.is_virtual)) + if (WARN_ON_ONCE(entry->pci.msi_attrib.is_virtual)) return; msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); @@ -271,7 +136,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &msg->address_lo); - if (entry->msi_attrib.is_64) { + if (entry->pci.msi_attrib.is_64) { pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &msg->address_hi); pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data); @@ -289,12 +154,12 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) { /* Don't touch the hardware now */ - } else if (entry->msi_attrib.is_msix) { + } else if (entry->pci.msi_attrib.is_msix) { void __iomem *base = pci_msix_desc_addr(entry); - u32 ctrl = entry->msix_ctrl; + u32 ctrl = entry->pci.msix_ctrl; bool unmasked = !(ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT); - if (entry->msi_attrib.is_virtual) + if (entry->pci.msi_attrib.is_virtual) goto skip; /* @@ -323,12 +188,12 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); msgctl &= ~PCI_MSI_FLAGS_QSIZE; - msgctl |= entry->msi_attrib.multiple << 4; + msgctl |= entry->pci.msi_attrib.multiple << 4; pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl); pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, msg->address_lo); - if (entry->msi_attrib.is_64) { + if (entry->pci.msi_attrib.is_64) { pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, msg->address_hi); pci_write_config_word(dev, pos + PCI_MSI_DATA_64, @@ -359,30 +224,11 @@ EXPORT_SYMBOL_GPL(pci_write_msi_msg); static void free_msi_irqs(struct pci_dev *dev) { - struct list_head *msi_list = dev_to_msi_list(&dev->dev); - struct msi_desc *entry, *tmp; - int i; - - for_each_pci_msi_entry(entry, dev) - if (entry->irq) - for (i = 0; i < entry->nvec_used; i++) - BUG_ON(irq_has_action(entry->irq + i)); - - if (dev->msi_irq_groups) { - msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups); - dev->msi_irq_groups = NULL; - } - pci_msi_teardown_msi_irqs(dev); - list_for_each_entry_safe(entry, tmp, msi_list, list) { - if (entry->msi_attrib.is_msix) { - if (list_is_last(&entry->list, msi_list)) - iounmap(entry->mask_base); - } - - list_del(&entry->list); - free_msi_entry(entry); + if (dev->msix_base) { + iounmap(dev->msix_base); + dev->msix_base = NULL; } } @@ -403,10 +249,19 @@ static void pci_msi_set_enable(struct pci_dev *dev, int enable) pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); } +/* + * Architecture override returns true when the PCI MSI message should be + * written by the generic restore function. + */ +bool __weak arch_restore_msi_irqs(struct pci_dev *dev) +{ + return true; +} + static void __pci_restore_msi_state(struct pci_dev *dev) { - u16 control; struct msi_desc *entry; + u16 control; if (!dev->msi_enabled) return; @@ -415,12 +270,13 @@ static void __pci_restore_msi_state(struct pci_dev *dev) pci_intx_for_msi(dev, 0); pci_msi_set_enable(dev, 0); - arch_restore_msi_irqs(dev); + if (arch_restore_msi_irqs(dev)) + __pci_write_msi_msg(entry, &entry->msg); pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); pci_msi_update_mask(entry, 0, 0); control &= ~PCI_MSI_FLAGS_QSIZE; - control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; + control |= (entry->pci.msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); } @@ -437,19 +293,25 @@ static void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) static void __pci_restore_msix_state(struct pci_dev *dev) { struct msi_desc *entry; + bool write_msg; if (!dev->msix_enabled) return; - BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); /* route the table */ pci_intx_for_msi(dev, 0); pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); - arch_restore_msi_irqs(dev); - for_each_pci_msi_entry(entry, dev) - pci_msix_write_vector_ctrl(entry, entry->msix_ctrl); + write_msg = arch_restore_msi_irqs(dev); + + msi_lock_descs(&dev->dev); + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { + if (write_msg) + __pci_write_msi_msg(entry, &entry->msg); + pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); + } + msi_unlock_descs(&dev->dev); pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); } @@ -461,48 +323,79 @@ void pci_restore_msi_state(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_restore_msi_state); -static struct msi_desc * -msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd) +static void pcim_msi_release(void *pcidev) { - struct irq_affinity_desc *masks = NULL; - struct msi_desc *entry; - u16 control; + struct pci_dev *dev = pcidev; - if (affd) - masks = irq_create_affinity_masks(nvec, affd); + dev->is_msi_managed = false; + pci_free_irq_vectors(dev); +} + +/* + * Needs to be separate from pcim_release to prevent an ordering problem + * vs. msi_device_data_release() in the MSI core code. + */ +static int pcim_setup_msi_release(struct pci_dev *dev) +{ + int ret; + + if (!pci_is_managed(dev) || dev->is_msi_managed) + return 0; + + ret = devm_add_action(&dev->dev, pcim_msi_release, dev); + if (!ret) + dev->is_msi_managed = true; + return ret; +} + +/* + * Ordering vs. devres: msi device data has to be installed first so that + * pcim_msi_release() is invoked before it on device release. + */ +static int pci_setup_msi_context(struct pci_dev *dev) +{ + int ret = msi_setup_device_data(&dev->dev); + + if (!ret) + ret = pcim_setup_msi_release(dev); + return ret; +} + +static int msi_setup_msi_desc(struct pci_dev *dev, int nvec, + struct irq_affinity_desc *masks) +{ + struct msi_desc desc; + u16 control; /* MSI Entry Initialization */ - entry = alloc_msi_entry(&dev->dev, nvec, masks); - if (!entry) - goto out; + memset(&desc, 0, sizeof(desc)); pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); /* Lies, damned lies, and MSIs */ if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING) control |= PCI_MSI_FLAGS_MASKBIT; + /* Respect XEN's mask disabling */ + if (pci_msi_ignore_mask) + control &= ~PCI_MSI_FLAGS_MASKBIT; - entry->msi_attrib.is_msix = 0; - entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); - entry->msi_attrib.is_virtual = 0; - entry->msi_attrib.entry_nr = 0; - entry->msi_attrib.can_mask = !pci_msi_ignore_mask && - !!(control & PCI_MSI_FLAGS_MASKBIT); - entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ - entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; - entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); + desc.nvec_used = nvec; + desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); + desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT); + desc.pci.msi_attrib.default_irq = dev->irq; + desc.pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; + desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); + desc.affinity = masks; if (control & PCI_MSI_FLAGS_64BIT) - entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; + desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64; else - entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; + desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32; /* Save the initial mask status */ - if (entry->msi_attrib.can_mask) - pci_read_config_dword(dev, entry->mask_pos, &entry->msi_mask); + if (desc.pci.msi_attrib.can_mask) + pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask); -out: - kfree(masks); - return entry; + return msi_add_msi_desc(&dev->dev, &desc); } static int msi_verify_entries(struct pci_dev *dev) @@ -512,14 +405,14 @@ static int msi_verify_entries(struct pci_dev *dev) if (!dev->no_64bit_msi) return 0; - for_each_pci_msi_entry(entry, dev) { + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { if (entry->msg.address_hi) { pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n", entry->msg.address_hi, entry->msg.address_lo); - return -EIO; + break; } } - return 0; + return !entry ? 0 : -EIO; } /** @@ -537,21 +430,29 @@ static int msi_verify_entries(struct pci_dev *dev) static int msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity *affd) { - const struct attribute_group **groups; + struct irq_affinity_desc *masks = NULL; struct msi_desc *entry; int ret; - pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ + /* + * Disable MSI during setup in the hardware, but mark it enabled + * so that setup code can evaluate it. + */ + pci_msi_set_enable(dev, 0); + dev->msi_enabled = 1; - entry = msi_setup_entry(dev, nvec, affd); - if (!entry) - return -ENOMEM; + if (affd) + masks = irq_create_affinity_masks(nvec, affd); + + msi_lock_descs(&dev->dev); + ret = msi_setup_msi_desc(dev, nvec, masks); + if (ret) + goto fail; /* All MSIs are unmasked by default; mask them all */ + entry = msi_first_desc(&dev->dev, MSI_DESC_ALL); pci_msi_mask(entry, msi_multi_mask(entry)); - list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); - /* Configure MSI capability structure */ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); if (ret) @@ -561,26 +462,22 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, if (ret) goto err; - groups = msi_populate_sysfs(&dev->dev); - if (IS_ERR(groups)) { - ret = PTR_ERR(groups); - goto err; - } - - dev->msi_irq_groups = groups; - /* Set MSI enabled bits */ pci_intx_for_msi(dev, 0); pci_msi_set_enable(dev, 1); - dev->msi_enabled = 1; pcibios_free_irq(dev); dev->irq = entry->irq; - return 0; + goto unlock; err: pci_msi_unmask(entry, msi_multi_mask(entry)); free_msi_irqs(dev); +fail: + dev->msi_enabled = 0; +unlock: + msi_unlock_descs(&dev->dev); + kfree(masks); return ret; } @@ -605,70 +502,49 @@ static void __iomem *msix_map_region(struct pci_dev *dev, return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); } -static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, - struct msix_entry *entries, int nvec, - struct irq_affinity *affd) +static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base, + struct msix_entry *entries, int nvec, + struct irq_affinity_desc *masks) { - struct irq_affinity_desc *curmsk, *masks = NULL; - struct msi_desc *entry; + int ret = 0, i, vec_count = pci_msix_vec_count(dev); + struct irq_affinity_desc *curmsk; + struct msi_desc desc; void __iomem *addr; - int ret, i; - int vec_count = pci_msix_vec_count(dev); - - if (affd) - masks = irq_create_affinity_masks(nvec, affd); - for (i = 0, curmsk = masks; i < nvec; i++) { - entry = alloc_msi_entry(&dev->dev, 1, curmsk); - if (!entry) { - if (!i) - iounmap(base); - else - free_msi_irqs(dev); - /* No enough memory. Don't try again */ - ret = -ENOMEM; - goto out; - } + memset(&desc, 0, sizeof(desc)); - entry->msi_attrib.is_msix = 1; - entry->msi_attrib.is_64 = 1; + desc.nvec_used = 1; + desc.pci.msi_attrib.is_msix = 1; + desc.pci.msi_attrib.is_64 = 1; + desc.pci.msi_attrib.default_irq = dev->irq; + desc.pci.mask_base = base; - if (entries) - entry->msi_attrib.entry_nr = entries[i].entry; - else - entry->msi_attrib.entry_nr = i; + for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) { + desc.msi_index = entries ? entries[i].entry : i; + desc.affinity = masks ? curmsk : NULL; + desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count; + desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask && + !desc.pci.msi_attrib.is_virtual; - entry->msi_attrib.is_virtual = - entry->msi_attrib.entry_nr >= vec_count; - - entry->msi_attrib.can_mask = !pci_msi_ignore_mask && - !entry->msi_attrib.is_virtual; - - entry->msi_attrib.default_irq = dev->irq; - entry->mask_base = base; - - if (entry->msi_attrib.can_mask) { - addr = pci_msix_desc_addr(entry); - entry->msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); + if (!desc.pci.msi_attrib.can_mask) { + addr = pci_msix_desc_addr(&desc); + desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); } - list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); - if (masks) - curmsk++; + ret = msi_add_msi_desc(&dev->dev, &desc); + if (ret) + break; } - ret = 0; -out: - kfree(masks); return ret; } static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries) { - struct msi_desc *entry; + struct msi_desc *desc; - for_each_pci_msi_entry(entry, dev) { - if (entries) { - entries->vector = entry->irq; + if (entries) { + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) { + entries->vector = desc->irq; entries++; } } @@ -686,6 +562,41 @@ static void msix_mask_all(void __iomem *base, int tsize) writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); } +static int msix_setup_interrupts(struct pci_dev *dev, void __iomem *base, + struct msix_entry *entries, int nvec, + struct irq_affinity *affd) +{ + struct irq_affinity_desc *masks = NULL; + int ret; + + if (affd) + masks = irq_create_affinity_masks(nvec, affd); + + msi_lock_descs(&dev->dev); + ret = msix_setup_msi_descs(dev, base, entries, nvec, masks); + if (ret) + goto out_free; + + ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); + if (ret) + goto out_free; + + /* Check if all MSI entries honor device restrictions */ + ret = msi_verify_entries(dev); + if (ret) + goto out_free; + + msix_update_entries(dev, entries); + goto out_unlock; + +out_free: + free_msi_irqs(dev); +out_unlock: + msi_unlock_descs(&dev->dev); + kfree(masks); + return ret; +} + /** * msix_capability_init - configure device's MSI-X capability * @dev: pointer to the pci_dev data structure of MSI-X device function @@ -700,7 +611,6 @@ static void msix_mask_all(void __iomem *base, int tsize) static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, int nvec, struct irq_affinity *affd) { - const struct attribute_group **groups; void __iomem *base; int ret, tsize; u16 control; @@ -713,6 +623,9 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); + /* Mark it enabled so setup functions can query it */ + dev->msix_enabled = 1; + pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); /* Request & Map MSI-X table region */ tsize = msix_table_size(control); @@ -722,32 +635,14 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, goto out_disable; } - ret = msix_setup_entries(dev, base, entries, nvec, affd); - if (ret) - goto out_disable; - - ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); - if (ret) - goto out_avail; + dev->msix_base = base; - /* Check if all MSI entries honor device restrictions */ - ret = msi_verify_entries(dev); + ret = msix_setup_interrupts(dev, base, entries, nvec, affd); if (ret) - goto out_free; - - msix_update_entries(dev, entries); - - groups = msi_populate_sysfs(&dev->dev); - if (IS_ERR(groups)) { - ret = PTR_ERR(groups); - goto out_free; - } - - dev->msi_irq_groups = groups; + goto out_disable; - /* Set MSI-X enabled bits and unmask the function */ + /* Disable INTX */ pci_intx_for_msi(dev, 0); - dev->msix_enabled = 1; /* * Ensure that all table entries are masked to prevent @@ -763,27 +658,8 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, pcibios_free_irq(dev); return 0; -out_avail: - if (ret < 0) { - /* - * If we had some success, report the number of IRQs - * we succeeded in setting up. - */ - struct msi_desc *entry; - int avail = 0; - - for_each_pci_msi_entry(entry, dev) { - if (entry->irq != 0) - avail++; - } - if (avail != 0) - ret = avail; - } - -out_free: - free_msi_irqs(dev); - out_disable: + dev->msix_enabled = 0; pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0); return ret; @@ -870,18 +746,17 @@ static void pci_msi_shutdown(struct pci_dev *dev) if (!pci_msi_enable || !dev || !dev->msi_enabled) return; - BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); - desc = first_pci_msi_entry(dev); - pci_msi_set_enable(dev, 0); pci_intx_for_msi(dev, 1); dev->msi_enabled = 0; /* Return the device with MSI unmasked as initial states */ - pci_msi_unmask(desc, msi_multi_mask(desc)); + desc = msi_first_desc(&dev->dev, MSI_DESC_ALL); + if (!WARN_ON_ONCE(!desc)) + pci_msi_unmask(desc, msi_multi_mask(desc)); /* Restore dev->irq to its default pin-assertion IRQ */ - dev->irq = desc->msi_attrib.default_irq; + dev->irq = desc->pci.msi_attrib.default_irq; pcibios_alloc_irq(dev); } @@ -890,8 +765,10 @@ void pci_disable_msi(struct pci_dev *dev) if (!pci_msi_enable || !dev || !dev->msi_enabled) return; + msi_lock_descs(&dev->dev); pci_msi_shutdown(dev); free_msi_irqs(dev); + msi_unlock_descs(&dev->dev); } EXPORT_SYMBOL(pci_disable_msi); @@ -952,7 +829,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, static void pci_msix_shutdown(struct pci_dev *dev) { - struct msi_desc *entry; + struct msi_desc *desc; if (!pci_msi_enable || !dev || !dev->msix_enabled) return; @@ -963,8 +840,8 @@ static void pci_msix_shutdown(struct pci_dev *dev) } /* Return the device with MSI-X masked as initial states */ - for_each_pci_msi_entry(entry, dev) - pci_msix_mask(entry); + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) + pci_msix_mask(desc); pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_intx_for_msi(dev, 1); @@ -977,28 +854,13 @@ void pci_disable_msix(struct pci_dev *dev) if (!pci_msi_enable || !dev || !dev->msix_enabled) return; + msi_lock_descs(&dev->dev); pci_msix_shutdown(dev); free_msi_irqs(dev); + msi_unlock_descs(&dev->dev); } EXPORT_SYMBOL(pci_disable_msix); -void pci_no_msi(void) -{ - pci_msi_enable = 0; -} - -/** - * pci_msi_enabled - is MSI enabled? - * - * Returns true if MSI has not been disabled by the command-line option - * pci=nomsi. - **/ -int pci_msi_enabled(void) -{ - return pci_msi_enable; -} -EXPORT_SYMBOL(pci_msi_enabled); - static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, struct irq_affinity *affd) { @@ -1029,6 +891,10 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, if (nvec > maxvec) nvec = maxvec; + rc = pci_setup_msi_context(dev); + if (rc) + return rc; + for (;;) { if (affd) { nvec = irq_calc_affinity_vectors(minvec, nvec, affd); @@ -1072,6 +938,10 @@ static int __pci_enable_msix_range(struct pci_dev *dev, if (WARN_ON_ONCE(dev->msix_enabled)) return -EINVAL; + rc = pci_setup_msi_context(dev); + if (rc) + return rc; + for (;;) { if (affd) { nvec = irq_calc_affinity_vectors(minvec, nvec, affd); @@ -1194,35 +1064,25 @@ EXPORT_SYMBOL(pci_free_irq_vectors); /** * pci_irq_vector - return Linux IRQ number of a device vector - * @dev: PCI device to operate on - * @nr: device-relative interrupt vector index (0-based). + * @dev: PCI device to operate on + * @nr: Interrupt vector index (0-based) + * + * @nr has the following meanings depending on the interrupt mode: + * MSI-X: The index in the MSI-X vector table + * MSI: The index of the enabled MSI vectors + * INTx: Must be 0 + * + * Return: The Linux interrupt number or -EINVAl if @nr is out of range. */ int pci_irq_vector(struct pci_dev *dev, unsigned int nr) { - if (dev->msix_enabled) { - struct msi_desc *entry; - int i = 0; - - for_each_pci_msi_entry(entry, dev) { - if (i == nr) - return entry->irq; - i++; - } - WARN_ON_ONCE(1); - return -EINVAL; - } + unsigned int irq; - if (dev->msi_enabled) { - struct msi_desc *entry = first_pci_msi_entry(dev); - - if (WARN_ON_ONCE(nr >= entry->nvec_used)) - return -EINVAL; - } else { - if (WARN_ON_ONCE(nr > 0)) - return -EINVAL; - } + if (!dev->msi_enabled && !dev->msix_enabled) + return !nr ? dev->irq : -EINVAL; - return dev->irq + nr; + irq = msi_get_virq(&dev->dev, nr); + return irq ? irq : -EINVAL; } EXPORT_SYMBOL(pci_irq_vector); @@ -1230,332 +1090,58 @@ EXPORT_SYMBOL(pci_irq_vector); * pci_irq_get_affinity - return the affinity of a particular MSI vector * @dev: PCI device to operate on * @nr: device-relative interrupt vector index (0-based). + * + * @nr has the following meanings depending on the interrupt mode: + * MSI-X: The index in the MSI-X vector table + * MSI: The index of the enabled MSI vectors + * INTx: Must be 0 + * + * Return: A cpumask pointer or NULL if @nr is out of range */ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) { - if (dev->msix_enabled) { - struct msi_desc *entry; - int i = 0; + int idx, irq = pci_irq_vector(dev, nr); + struct msi_desc *desc; - for_each_pci_msi_entry(entry, dev) { - if (i == nr) - return &entry->affinity->mask; - i++; - } - WARN_ON_ONCE(1); + if (WARN_ON_ONCE(irq <= 0)) return NULL; - } else if (dev->msi_enabled) { - struct msi_desc *entry = first_pci_msi_entry(dev); - if (WARN_ON_ONCE(!entry || !entry->affinity || - nr >= entry->nvec_used)) - return NULL; - - return &entry->affinity[nr].mask; - } else { + desc = irq_get_msi_desc(irq); + /* Non-MSI does not have the information handy */ + if (!desc) return cpu_possible_mask; - } -} -EXPORT_SYMBOL(pci_irq_get_affinity); -struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) -{ - return to_pci_dev(desc->dev); -} -EXPORT_SYMBOL(msi_desc_to_pci_dev); - -void *msi_desc_to_pci_sysdata(struct msi_desc *desc) -{ - struct pci_dev *dev = msi_desc_to_pci_dev(desc); - - return dev->bus->sysdata; -} -EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata); - -#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN -/** - * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space - * @irq_data: Pointer to interrupt data of the MSI interrupt - * @msg: Pointer to the message - */ -void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) -{ - struct msi_desc *desc = irq_data_get_msi_desc(irq_data); + if (WARN_ON_ONCE(!desc->affinity)) + return NULL; /* - * For MSI-X desc->irq is always equal to irq_data->irq. For - * MSI only the first interrupt of MULTI MSI passes the test. + * MSI has a mask array in the descriptor. + * MSI-X has a single mask. */ - if (desc->irq == irq_data->irq) - __pci_write_msi_msg(desc, msg); -} - -/** - * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source - * @desc: Pointer to the MSI descriptor - * - * The ID number is only used within the irqdomain. - */ -static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc) -{ - struct pci_dev *dev = msi_desc_to_pci_dev(desc); - - return (irq_hw_number_t)desc->msi_attrib.entry_nr | - pci_dev_id(dev) << 11 | - (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; -} - -static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) -{ - return !desc->msi_attrib.is_msix && desc->nvec_used > 1; -} - -/** - * pci_msi_domain_check_cap - Verify that @domain supports the capabilities - * for @dev - * @domain: The interrupt domain to check - * @info: The domain info for verification - * @dev: The device to check - * - * Returns: - * 0 if the functionality is supported - * 1 if Multi MSI is requested, but the domain does not support it - * -ENOTSUPP otherwise - */ -int pci_msi_domain_check_cap(struct irq_domain *domain, - struct msi_domain_info *info, struct device *dev) -{ - struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev)); - - /* Special handling to support __pci_enable_msi_range() */ - if (pci_msi_desc_is_multi_msi(desc) && - !(info->flags & MSI_FLAG_MULTI_PCI_MSI)) - return 1; - else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX)) - return -ENOTSUPP; - - return 0; -} - -static int pci_msi_domain_handle_error(struct irq_domain *domain, - struct msi_desc *desc, int error) -{ - /* Special handling to support __pci_enable_msi_range() */ - if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC) - return 1; - - return error; -} - -static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, - struct msi_desc *desc) -{ - arg->desc = desc; - arg->hwirq = pci_msi_domain_calc_hwirq(desc); -} - -static struct msi_domain_ops pci_msi_domain_ops_default = { - .set_desc = pci_msi_domain_set_desc, - .msi_check = pci_msi_domain_check_cap, - .handle_error = pci_msi_domain_handle_error, -}; - -static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info) -{ - struct msi_domain_ops *ops = info->ops; - - if (ops == NULL) { - info->ops = &pci_msi_domain_ops_default; - } else { - if (ops->set_desc == NULL) - ops->set_desc = pci_msi_domain_set_desc; - if (ops->msi_check == NULL) - ops->msi_check = pci_msi_domain_check_cap; - if (ops->handle_error == NULL) - ops->handle_error = pci_msi_domain_handle_error; - } -} - -static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) -{ - struct irq_chip *chip = info->chip; - - BUG_ON(!chip); - if (!chip->irq_write_msi_msg) - chip->irq_write_msi_msg = pci_msi_domain_write_msg; - if (!chip->irq_mask) - chip->irq_mask = pci_msi_mask_irq; - if (!chip->irq_unmask) - chip->irq_unmask = pci_msi_unmask_irq; -} - -/** - * pci_msi_create_irq_domain - Create a MSI interrupt domain - * @fwnode: Optional fwnode of the interrupt controller - * @info: MSI domain info - * @parent: Parent irq domain - * - * Updates the domain and chip ops and creates a MSI interrupt domain. - * - * Returns: - * A domain pointer or NULL in case of failure. - */ -struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, - struct msi_domain_info *info, - struct irq_domain *parent) -{ - struct irq_domain *domain; - - if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE)) - info->flags &= ~MSI_FLAG_LEVEL_CAPABLE; - - if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) - pci_msi_domain_update_dom_ops(info); - if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) - pci_msi_domain_update_chip_ops(info); - - info->flags |= MSI_FLAG_ACTIVATE_EARLY; - if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) - info->flags |= MSI_FLAG_MUST_REACTIVATE; - - /* PCI-MSI is oneshot-safe */ - info->chip->flags |= IRQCHIP_ONESHOT_SAFE; - - domain = msi_create_irq_domain(fwnode, info, parent); - if (!domain) - return NULL; - - irq_domain_update_bus_token(domain, DOMAIN_BUS_PCI_MSI); - return domain; -} -EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain); - -/* - * Users of the generic MSI infrastructure expect a device to have a single ID, - * so with DMA aliases we have to pick the least-worst compromise. Devices with - * DMA phantom functions tend to still emit MSIs from the real function number, - * so we ignore those and only consider topological aliases where either the - * alias device or RID appears on a different bus number. We also make the - * reasonable assumption that bridges are walked in an upstream direction (so - * the last one seen wins), and the much braver assumption that the most likely - * case is that of PCI->PCIe so we should always use the alias RID. This echoes - * the logic from intel_irq_remapping's set_msi_sid(), which presumably works - * well enough in practice; in the face of the horrible PCIe<->PCI-X conditions - * for taking ownership all we can really do is close our eyes and hope... - */ -static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) -{ - u32 *pa = data; - u8 bus = PCI_BUS_NUM(*pa); - - if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus) - *pa = alias; - - return 0; + idx = dev->msi_enabled ? nr : 0; + return &desc->affinity[idx].mask; } +EXPORT_SYMBOL(pci_irq_get_affinity); -/** - * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID) - * @domain: The interrupt domain - * @pdev: The PCI device. - * - * The RID for a device is formed from the alias, with a firmware - * supplied mapping applied - * - * Returns: The RID. - */ -u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) +struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) { - struct device_node *of_node; - u32 rid = pci_dev_id(pdev); - - pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); - - of_node = irq_domain_get_of_node(domain); - rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) : - iort_msi_map_id(&pdev->dev, rid); - - return rid; + return to_pci_dev(desc->dev); } +EXPORT_SYMBOL(msi_desc_to_pci_dev); -/** - * pci_msi_get_device_domain - Get the MSI domain for a given PCI device - * @pdev: The PCI device - * - * Use the firmware data to find a device-specific MSI domain - * (i.e. not one that is set as a default). - * - * Returns: The corresponding MSI domain or NULL if none has been found. - */ -struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) +void pci_no_msi(void) { - struct irq_domain *dom; - u32 rid = pci_dev_id(pdev); - - pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); - dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI); - if (!dom) - dom = iort_get_device_domain(&pdev->dev, rid, - DOMAIN_BUS_PCI_MSI); - return dom; + pci_msi_enable = 0; } /** - * pci_dev_has_special_msi_domain - Check whether the device is handled by - * a non-standard PCI-MSI domain - * @pdev: The PCI device to check. + * pci_msi_enabled - is MSI enabled? * - * Returns: True if the device irqdomain or the bus irqdomain is - * non-standard PCI/MSI. - */ -bool pci_dev_has_special_msi_domain(struct pci_dev *pdev) -{ - struct irq_domain *dom = dev_get_msi_domain(&pdev->dev); - - if (!dom) - dom = dev_get_msi_domain(&pdev->bus->dev); - - if (!dom) - return true; - - return dom->bus_token != DOMAIN_BUS_PCI_MSI; -} - -#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ -#endif /* CONFIG_PCI_MSI */ - -void pci_msi_init(struct pci_dev *dev) -{ - u16 ctrl; - - /* - * Disable the MSI hardware to avoid screaming interrupts - * during boot. This is the power on reset default so - * usually this should be a noop. - */ - dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); - if (!dev->msi_cap) - return; - - pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl); - if (ctrl & PCI_MSI_FLAGS_ENABLE) - pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, - ctrl & ~PCI_MSI_FLAGS_ENABLE); - - if (!(ctrl & PCI_MSI_FLAGS_64BIT)) - dev->no_64bit_msi = 1; -} - -void pci_msix_init(struct pci_dev *dev) + * Returns true if MSI has not been disabled by the command-line option + * pci=nomsi. + **/ +int pci_msi_enabled(void) { - u16 ctrl; - - dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); - if (!dev->msix_cap) - return; - - pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); - if (ctrl & PCI_MSIX_FLAGS_ENABLE) - pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, - ctrl & ~PCI_MSIX_FLAGS_ENABLE); + return pci_msi_enable; } +EXPORT_SYMBOL(pci_msi_enabled); diff --git a/drivers/pci/msi/msi.h b/drivers/pci/msi/msi.h new file mode 100644 index 000000000000..dbeff066bedd --- /dev/null +++ b/drivers/pci/msi/msi.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/pci.h> +#include <linux/msi.h> + +#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) + +extern int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); +extern void pci_msi_teardown_msi_irqs(struct pci_dev *dev); + +#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS +extern int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); +extern void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev); +#else +static inline int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + WARN_ON_ONCE(1); + return -ENODEV; +} + +static inline void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev) +{ + WARN_ON_ONCE(1); +} +#endif + +/* + * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to + * mask all MSI interrupts by clearing the MSI enable bit does not work + * reliably as devices without an INTx disable bit will then generate a + * level IRQ which will never be cleared. + */ +static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc) +{ + /* Don't shift by >= width of type */ + if (desc->pci.msi_attrib.multi_cap >= 5) + return 0xffffffff; + return (1 << (1 << desc->pci.msi_attrib.multi_cap)) - 1; +} diff --git a/drivers/pci/msi/pcidev_msi.c b/drivers/pci/msi/pcidev_msi.c new file mode 100644 index 000000000000..5520aff53b56 --- /dev/null +++ b/drivers/pci/msi/pcidev_msi.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MSI[X} related functions which are available unconditionally. + */ +#include "../pci.h" + +/* + * Disable the MSI[X] hardware to avoid screaming interrupts during boot. + * This is the power on reset default so usually this should be a noop. + */ + +void pci_msi_init(struct pci_dev *dev) +{ + u16 ctrl; + + dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); + if (!dev->msi_cap) + return; + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl); + if (ctrl & PCI_MSI_FLAGS_ENABLE) { + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, + ctrl & ~PCI_MSI_FLAGS_ENABLE); + } + + if (!(ctrl & PCI_MSI_FLAGS_64BIT)) + dev->no_64bit_msi = 1; +} + +void pci_msix_init(struct pci_dev *dev) +{ + u16 ctrl; + + dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); + if (!dev->msix_cap) + return; + + pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + if (ctrl & PCI_MSIX_FLAGS_ENABLE) { + pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, + ctrl & ~PCI_MSIX_FLAGS_ENABLE); + } +} diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 0b1237cff239..cb2e8351c2cc 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -247,7 +247,7 @@ void of_pci_check_probe_only(void) else pci_clear_flags(PCI_PROBE_ONLY); - pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis"); + pr_info("PROBE_ONLY %s\n", val ? "enabled" : "disabled"); } EXPORT_SYMBOL_GPL(of_pci_check_probe_only); diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 454d5f6f16ff..1015274bd2fe 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -710,7 +710,7 @@ void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size) if (!ret) goto out; - if (unlikely(!percpu_ref_tryget_live(ref))) { + if (unlikely(!percpu_ref_tryget_live_rcu(ref))) { gen_pool_free(p2pdma->pool, (unsigned long) ret, size); ret = NULL; goto out; diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c index db97cddfc85e..c994ebec2360 100644 --- a/drivers/pci/pci-bridge-emul.c +++ b/drivers/pci/pci-bridge-emul.c @@ -139,8 +139,13 @@ struct pci_bridge_reg_behavior pci_regs_behavior[PCI_STD_HEADER_SIZEOF / 4] = { .ro = GENMASK(7, 0), }, + /* + * If expansion ROM is unsupported then ROM Base Address register must + * be implemented as read-only register that return 0 when read, same + * as for unused Base Address registers. + */ [PCI_ROM_ADDRESS1 / 4] = { - .rw = GENMASK(31, 11) | BIT(0), + .ro = ~0, }, /* @@ -171,41 +176,55 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = [PCI_CAP_LIST_ID / 4] = { /* * Capability ID, Next Capability Pointer and - * Capabilities register are all read-only. + * bits [14:0] of Capabilities register are all read-only. + * Bit 15 of Capabilities register is reserved. */ - .ro = ~0, + .ro = GENMASK(30, 0), }, [PCI_EXP_DEVCAP / 4] = { - .ro = ~0, + /* + * Bits [31:29] and [17:16] are reserved. + * Bits [27:18] are reserved for non-upstream ports. + * Bits 28 and [14:6] are reserved for non-endpoint devices. + * Other bits are read-only. + */ + .ro = BIT(15) | GENMASK(5, 0), }, [PCI_EXP_DEVCTL / 4] = { - /* Device control register is RW */ - .rw = GENMASK(15, 0), + /* + * Device control register is RW, except bit 15 which is + * reserved for non-endpoints or non-PCIe-to-PCI/X bridges. + */ + .rw = GENMASK(14, 0), /* * Device status register has bits 6 and [3:0] W1C, [5:4] RO, - * the rest is reserved + * the rest is reserved. Also bit 6 is reserved for non-upstream + * ports. */ - .w1c = (BIT(6) | GENMASK(3, 0)) << 16, + .w1c = GENMASK(3, 0) << 16, .ro = GENMASK(5, 4) << 16, }, [PCI_EXP_LNKCAP / 4] = { - /* All bits are RO, except bit 23 which is reserved */ - .ro = lower_32_bits(~BIT(23)), + /* + * All bits are RO, except bit 23 which is reserved and + * bit 18 which is reserved for non-upstream ports. + */ + .ro = lower_32_bits(~(BIT(23) | PCI_EXP_LNKCAP_CLKPM)), }, [PCI_EXP_LNKCTL / 4] = { /* * Link control has bits [15:14], [11:3] and [1:0] RW, the - * rest is reserved. + * rest is reserved. Bit 8 is reserved for non-upstream ports. * * Link status has bits [13:0] RO, and bits [15:14] * W1C. */ - .rw = GENMASK(15, 14) | GENMASK(11, 3) | GENMASK(1, 0), + .rw = GENMASK(15, 14) | GENMASK(11, 9) | GENMASK(7, 3) | GENMASK(1, 0), .ro = GENMASK(13, 0) << 16, .w1c = GENMASK(15, 14) << 16, }, @@ -251,6 +270,49 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = .ro = GENMASK(15, 0) | PCI_EXP_RTSTA_PENDING, .w1c = PCI_EXP_RTSTA_PME, }, + + [PCI_EXP_DEVCAP2 / 4] = { + /* + * Device capabilities 2 register has reserved bits [30:27]. + * Also bits [26:24] are reserved for non-upstream ports. + */ + .ro = BIT(31) | GENMASK(23, 0), + }, + + [PCI_EXP_DEVCTL2 / 4] = { + /* + * Device control 2 register is RW. Bit 11 is reserved for + * non-upstream ports. + * + * Device status 2 register is reserved. + */ + .rw = GENMASK(15, 12) | GENMASK(10, 0), + }, + + [PCI_EXP_LNKCAP2 / 4] = { + /* Link capabilities 2 register has reserved bits [30:25] and 0. */ + .ro = BIT(31) | GENMASK(24, 1), + }, + + [PCI_EXP_LNKCTL2 / 4] = { + /* + * Link control 2 register is RW. + * + * Link status 2 register has bits 5, 15 W1C; + * bits 10, 11 reserved and others are RO. + */ + .rw = GENMASK(15, 0), + .w1c = (BIT(15) | BIT(5)) << 16, + .ro = (GENMASK(14, 12) | GENMASK(9, 6) | GENMASK(4, 0)) << 16, + }, + + [PCI_EXP_SLTCAP2 / 4] = { + /* Slot capabilities 2 register is reserved. */ + }, + + [PCI_EXP_SLTCTL2 / 4] = { + /* Both Slot control 2 and Slot status 2 registers are reserved. */ + }, }; /* @@ -265,7 +327,11 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge, { BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END); - bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16); + /* + * class_revision: Class is high 24 bits and revision is low 8 bit of this member, + * while class for PCI Bridge Normal Decode has the 24-bit value: PCI_CLASS_BRIDGE_PCI << 8 + */ + bridge->conf.class_revision |= cpu_to_le32((PCI_CLASS_BRIDGE_PCI << 8) << 8); bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE; bridge->conf.cache_line_size = 0x10; bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST); @@ -277,11 +343,9 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge, if (bridge->has_pcie) { bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START; + bridge->conf.status |= cpu_to_le16(PCI_STATUS_CAP_LIST); bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP; - /* Set PCIe v2, root port, slot support */ - bridge->pcie_conf.cap = - cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4 | 2 | - PCI_EXP_FLAGS_SLOT); + bridge->pcie_conf.cap |= cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4); bridge->pcie_cap_regs_behavior = kmemdup(pcie_cap_regs_behavior, sizeof(pcie_cap_regs_behavior), @@ -290,6 +354,27 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge, kfree(bridge->pci_regs_behavior); return -ENOMEM; } + /* These bits are applicable only for PCI and reserved on PCIe */ + bridge->pci_regs_behavior[PCI_CACHE_LINE_SIZE / 4].ro &= + ~GENMASK(15, 8); + bridge->pci_regs_behavior[PCI_COMMAND / 4].ro &= + ~((PCI_COMMAND_SPECIAL | PCI_COMMAND_INVALIDATE | + PCI_COMMAND_VGA_PALETTE | PCI_COMMAND_WAIT | + PCI_COMMAND_FAST_BACK) | + (PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK | + PCI_STATUS_DEVSEL_MASK) << 16); + bridge->pci_regs_behavior[PCI_PRIMARY_BUS / 4].ro &= + ~GENMASK(31, 24); + bridge->pci_regs_behavior[PCI_IO_BASE / 4].ro &= + ~((PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK | + PCI_STATUS_DEVSEL_MASK) << 16); + bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].rw &= + ~((PCI_BRIDGE_CTL_MASTER_ABORT | + BIT(8) | BIT(9) | BIT(11)) << 16); + bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].ro &= + ~((PCI_BRIDGE_CTL_FAST_BACK) << 16); + bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].w1c &= + ~(BIT(10) << 16); } if (flags & PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR) { diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index cfe2f85af09e..602f0fb0b007 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -62,11 +62,8 @@ static ssize_t irq_show(struct device *dev, * For MSI, show the first MSI IRQ; for all other cases including * MSI-X, show the legacy INTx IRQ. */ - if (pdev->msi_enabled) { - struct msi_desc *desc = first_pci_msi_entry(pdev); - - return sysfs_emit(buf, "%u\n", desc->irq); - } + if (pdev->msi_enabled) + return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0)); #endif return sysfs_emit(buf, "%u\n", pdev->irq); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 3d2fb394986a..9ecce435fb3f 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1115,7 +1115,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) return -EIO; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); - if (pmcsr == (u16) ~0) { + if (PCI_POSSIBLE_ERROR(pmcsr)) { pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n", pci_power_name(dev->current_state), pci_power_name(state)); @@ -1271,16 +1271,16 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) * After reset, the device should not silently discard config * requests, but it may still indicate that it needs more time by * responding to them with CRS completions. The Root Port will - * generally synthesize ~0 data to complete the read (except when - * CRS SV is enabled and the read was for the Vendor ID; in that - * case it synthesizes 0x0001 data). + * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete + * the read (except when CRS SV is enabled and the read was for the + * Vendor ID; in that case it synthesizes 0x0001 data). * * Wait for the device to return a non-CRS completion. Read the * Command register instead of Vendor ID so we don't have to * contend with the CRS SV value. */ pci_read_config_dword(dev, PCI_COMMAND, &id); - while (id == ~0) { + while (PCI_POSSIBLE_ERROR(id)) { if (delay > timeout) { pci_warn(dev, "not ready %dms after %s; giving up\n", delay - 1, reset_type); @@ -1556,7 +1556,7 @@ static void pci_save_ltr_state(struct pci_dev *dev) { int ltr; struct pci_cap_saved_state *save_state; - u16 *cap; + u32 *cap; if (!pci_is_pcie(dev)) return; @@ -1571,25 +1571,25 @@ static void pci_save_ltr_state(struct pci_dev *dev) return; } - cap = (u16 *)&save_state->cap.data[0]; - pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++); - pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++); + /* Some broken devices only support dword access to LTR */ + cap = &save_state->cap.data[0]; + pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap); } static void pci_restore_ltr_state(struct pci_dev *dev) { struct pci_cap_saved_state *save_state; int ltr; - u16 *cap; + u32 *cap; save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); if (!save_state || !ltr) return; - cap = (u16 *)&save_state->cap.data[0]; - pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++); - pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++); + /* Some broken devices only support dword access to LTR */ + cap = &save_state->cap.data[0]; + pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap); } /** @@ -2024,11 +2024,6 @@ static void pcim_release(struct device *gendev, void *res) struct pci_devres *this = res; int i; - if (dev->msi_enabled) - pci_disable_msi(dev); - if (dev->msix_enabled) - pci_disable_msix(dev); - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) if (this->region_mask & (1 << i)) pci_release_region(dev, i); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 52c74682601a..a96b7424c9bc 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -41,11 +41,6 @@ #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \ ASPM_STATE_L1SS) -struct aspm_latency { - u32 l0s; /* L0s latency (nsec) */ - u32 l1; /* L1 latency (nsec) */ -}; - struct pcie_link_state { struct pci_dev *pdev; /* Upstream component of the Link */ struct pci_dev *downstream; /* Downstream component, function 0 */ @@ -65,15 +60,6 @@ struct pcie_link_state { u32 clkpm_enabled:1; /* Current Clock PM state */ u32 clkpm_default:1; /* Default Clock PM state by BIOS */ u32 clkpm_disable:1; /* Clock PM disabled */ - - /* Exit latencies */ - struct aspm_latency latency_up; /* Upstream direction exit latency */ - struct aspm_latency latency_dw; /* Downstream direction exit latency */ - /* - * Endpoint acceptable latencies. A pcie downstream port only - * has one slot under it, so at most there are 8 functions. - */ - struct aspm_latency acceptable[8]; }; static int aspm_disabled, aspm_force; @@ -105,6 +91,20 @@ static const char *policy_str[] = { #define LINK_RETRAIN_TIMEOUT HZ +/* + * The L1 PM substate capability is only implemented in function 0 in a + * multi function device. + */ +static struct pci_dev *pci_function_0(struct pci_bus *linkbus) +{ + struct pci_dev *child; + + list_for_each_entry(child, &linkbus->devices, bus_list) + if (PCI_FUNC(child->devfn) == 0) + return child; + return NULL; +} + static int policy_to_aspm_state(struct pcie_link_state *link) { switch (aspm_policy) { @@ -378,8 +378,10 @@ static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value) static void pcie_aspm_check_latency(struct pci_dev *endpoint) { - u32 latency, l1_switch_latency = 0; - struct aspm_latency *acceptable; + u32 latency, encoding, lnkcap_up, lnkcap_dw; + u32 l1_switch_latency = 0, latency_up_l0s; + u32 latency_up_l1, latency_dw_l0s, latency_dw_l1; + u32 acceptable_l0s, acceptable_l1; struct pcie_link_state *link; /* Device not in D0 doesn't need latency check */ @@ -388,17 +390,36 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) return; link = endpoint->bus->self->link_state; - acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)]; + + /* Calculate endpoint L0s acceptable latency */ + encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L0S) >> 6; + acceptable_l0s = calc_l0s_acceptable(encoding); + + /* Calculate endpoint L1 acceptable latency */ + encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L1) >> 9; + acceptable_l1 = calc_l1_acceptable(encoding); while (link) { + struct pci_dev *dev = pci_function_0(link->pdev->subordinate); + + /* Read direction exit latencies */ + pcie_capability_read_dword(link->pdev, PCI_EXP_LNKCAP, + &lnkcap_up); + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, + &lnkcap_dw); + latency_up_l0s = calc_l0s_latency(lnkcap_up); + latency_up_l1 = calc_l1_latency(lnkcap_up); + latency_dw_l0s = calc_l0s_latency(lnkcap_dw); + latency_dw_l1 = calc_l1_latency(lnkcap_dw); + /* Check upstream direction L0s latency */ if ((link->aspm_capable & ASPM_STATE_L0S_UP) && - (link->latency_up.l0s > acceptable->l0s)) + (latency_up_l0s > acceptable_l0s)) link->aspm_capable &= ~ASPM_STATE_L0S_UP; /* Check downstream direction L0s latency */ if ((link->aspm_capable & ASPM_STATE_L0S_DW) && - (link->latency_dw.l0s > acceptable->l0s)) + (latency_dw_l0s > acceptable_l0s)) link->aspm_capable &= ~ASPM_STATE_L0S_DW; /* * Check L1 latency. @@ -413,9 +434,9 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) * L1 exit latencies advertised by a device include L1 * substate latencies (and hence do not do any check). */ - latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1); + latency = max_t(u32, latency_up_l1, latency_dw_l1); if ((link->aspm_capable & ASPM_STATE_L1) && - (latency + l1_switch_latency > acceptable->l1)) + (latency + l1_switch_latency > acceptable_l1)) link->aspm_capable &= ~ASPM_STATE_L1; l1_switch_latency += 1000; @@ -423,20 +444,6 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) } } -/* - * The L1 PM substate capability is only implemented in function 0 in a - * multi function device. - */ -static struct pci_dev *pci_function_0(struct pci_bus *linkbus) -{ - struct pci_dev *child; - - list_for_each_entry(child, &linkbus->devices, bus_list) - if (PCI_FUNC(child->devfn) == 0) - return child; - return NULL; -} - static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, u32 clear, u32 set) { @@ -496,6 +503,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link, encode_l12_threshold(l1_2_threshold, &scale, &value); ctl1 |= t_common_mode << 8 | scale << 29 | value << 16; + /* Some broken devices only support dword access to L1 SS */ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1); pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2); pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1); @@ -593,8 +601,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) link->aspm_enabled |= ASPM_STATE_L0S_UP; if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S) link->aspm_enabled |= ASPM_STATE_L0S_DW; - link->latency_up.l0s = calc_l0s_latency(parent_lnkcap); - link->latency_dw.l0s = calc_l0s_latency(child_lnkcap); /* Setup L1 state */ if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1) @@ -602,8 +608,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1) link->aspm_enabled |= ASPM_STATE_L1; - link->latency_up.l1 = calc_l1_latency(parent_lnkcap); - link->latency_dw.l1 = calc_l1_latency(child_lnkcap); /* Setup L1 substate */ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP, @@ -660,22 +664,10 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) /* Get and check endpoint acceptable latencies */ list_for_each_entry(child, &linkbus->devices, bus_list) { - u32 reg32, encoding; - struct aspm_latency *acceptable = - &link->acceptable[PCI_FUNC(child->devfn)]; - if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT && pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END) continue; - pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32); - /* Calculate endpoint L0s acceptable latency */ - encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; - acceptable->l0s = calc_l0s_acceptable(encoding); - /* Calculate endpoint L1 acceptable latency */ - encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; - acceptable->l1 = calc_l1_acceptable(encoding); - pcie_aspm_check_latency(child); } } diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index c556e7beafe3..3e9afee02e8d 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -79,7 +79,7 @@ static bool dpc_completed(struct pci_dev *pdev) u16 status; pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS, &status); - if ((status != 0xffff) && (status & PCI_EXP_DPC_STATUS_TRIGGER)) + if ((!PCI_POSSIBLE_ERROR(status)) && (status & PCI_EXP_DPC_STATUS_TRIGGER)) return false; if (test_bit(PCI_DPC_RECOVERING, &pdev->priv_flags)) @@ -312,7 +312,7 @@ static irqreturn_t dpc_irq(int irq, void *context) pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); - if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || status == (u16)(~0)) + if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || PCI_POSSIBLE_ERROR(status)) return IRQ_NONE; pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS, diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 1d0dd77fed3a..ef8ce436ead9 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -224,7 +224,7 @@ static void pcie_pme_work_fn(struct work_struct *work) break; pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); - if (rtsta == (u32) ~0) + if (PCI_POSSIBLE_ERROR(rtsta)) break; if (rtsta & PCI_EXP_RTSTA_PME) { @@ -274,7 +274,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context) spin_lock_irqsave(&data->lock, flags); pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); - if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) { + if (PCI_POSSIBLE_ERROR(rtsta) || !(rtsta & PCI_EXP_RTSTA_PME)) { spin_unlock_irqrestore(&data->lock, flags); return IRQ_NONE; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 087d3658f75c..17a969942d37 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -206,14 +206,14 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit * 1 must be clear. */ - if (sz == 0xffffffff) + if (PCI_POSSIBLE_ERROR(sz)) sz = 0; /* * I don't know how l can have all bits set. Copied from old code. * Maybe it fixes a bug on some ancient platform. */ - if (l == 0xffffffff) + if (PCI_POSSIBLE_ERROR(l)) l = 0; if (type == pci_bar_unknown) { @@ -898,8 +898,6 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) bridge->bus = bus; - /* Temporarily move resources off the list */ - list_splice_init(&bridge->windows, &resources); bus->sysdata = bridge->sysdata; bus->ops = bridge->ops; bus->number = bus->busn_res.start = bridge->busnr; @@ -925,6 +923,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) if (err) goto free; + /* Temporarily move resources off the list */ + list_splice_init(&bridge->windows, &resources); err = device_add(&bridge->dev); if (err) { put_device(&bridge->dev); @@ -1579,20 +1579,12 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev) static void set_pcie_thunderbolt(struct pci_dev *dev) { - int vsec = 0; - u32 header; - - while ((vsec = pci_find_next_ext_capability(dev, vsec, - PCI_EXT_CAP_ID_VNDR))) { - pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header); + u16 vsec; - /* Is the device part of a Thunderbolt controller? */ - if (dev->vendor == PCI_VENDOR_ID_INTEL && - PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) { - dev->is_thunderbolt = 1; - return; - } - } + /* Is the device part of a Thunderbolt controller? */ + vsec = pci_find_vsec_capability(dev, PCI_VENDOR_ID_INTEL, PCI_VSEC_ID_INTEL_TBT); + if (vsec) + dev->is_thunderbolt = 1; } static void set_pcie_untrusted(struct pci_dev *dev) @@ -1683,7 +1675,7 @@ static int pci_cfg_space_size_ext(struct pci_dev *dev) if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) return PCI_CFG_SPACE_SIZE; - if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev)) + if (PCI_POSSIBLE_ERROR(status) || pci_ext_cfg_is_aliased(dev)) return PCI_CFG_SPACE_SIZE; return PCI_CFG_SPACE_EXP_SIZE; @@ -2311,7 +2303,9 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus) INIT_LIST_HEAD(&dev->bus_list); dev->dev.type = &pci_dev_type; dev->bus = pci_bus_get(bus); - +#ifdef CONFIG_PCI_MSI + raw_spin_lock_init(&dev->msi_lock); +#endif return dev; } EXPORT_SYMBOL(pci_alloc_dev); @@ -2371,8 +2365,8 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) return false; - /* Some broken boards return 0 or ~0 if a slot is empty: */ - if (*l == 0xffffffff || *l == 0x00000000 || + /* Some broken boards return 0 or ~0 (PCI_ERROR_RESPONSE) if a slot is empty: */ + if (PCI_POSSIBLE_ERROR(*l) || *l == 0x00000000 || *l == 0x0000ffff || *l == 0xffff0000) return false; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index b4cb658cce2b..d2dd6a6cda60 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -980,8 +980,8 @@ static void quirk_via_ioapic(struct pci_dev *dev) else tmp = 0x1f; /* all known bits (4-0) routed to external APIC */ - pci_info(dev, "%sbling VIA external APIC routing\n", - tmp == 0 ? "Disa" : "Ena"); + pci_info(dev, "%s VIA external APIC routing\n", + tmp ? "Enabling" : "Disabling"); /* Offset 0x58: External APIC IRQ output control */ pci_write_config_byte(dev, 0x58, tmp); @@ -4103,6 +4103,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, quirk_dma_func1_alias); +/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c136 */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9125, + quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ @@ -5683,6 +5686,15 @@ SWITCHTEC_QUIRK(0x4268); /* PAX 68XG4 */ SWITCHTEC_QUIRK(0x4252); /* PAX 52XG4 */ SWITCHTEC_QUIRK(0x4236); /* PAX 36XG4 */ SWITCHTEC_QUIRK(0x4228); /* PAX 28XG4 */ +SWITCHTEC_QUIRK(0x4352); /* PFXA 52XG4 */ +SWITCHTEC_QUIRK(0x4336); /* PFXA 36XG4 */ +SWITCHTEC_QUIRK(0x4328); /* PFXA 28XG4 */ +SWITCHTEC_QUIRK(0x4452); /* PSXA 52XG4 */ +SWITCHTEC_QUIRK(0x4436); /* PSXA 36XG4 */ +SWITCHTEC_QUIRK(0x4428); /* PSXA 28XG4 */ +SWITCHTEC_QUIRK(0x4552); /* PAXA 52XG4 */ +SWITCHTEC_QUIRK(0x4536); /* PAXA 36XG4 */ +SWITCHTEC_QUIRK(0x4528); /* PAXA 28XG4 */ /* * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints. @@ -5857,3 +5869,13 @@ static void nvidia_ion_ahci_fixup(struct pci_dev *pdev) pdev->dev_flags |= PCI_DEV_FLAGS_HAS_MSI_MASKING; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0ab8, nvidia_ion_ahci_fixup); + +static void rom_bar_overlap_defect(struct pci_dev *dev) +{ + pci_info(dev, "working around ROM BAR overlap defect\n"); + dev->rom_bar_overlap = 1; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1533, rom_bar_overlap_defect); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1536, rom_bar_overlap_defect); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1537, rom_bar_overlap_defect); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1538, rom_bar_overlap_defect); diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 7f1acb3918d0..439ac5f5907a 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -75,12 +75,16 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) * as zero when disabled, so don't update ROM BARs unless * they're enabled. See * https://lore.kernel.org/r/43147B3D.1030309@vc.cvut.cz/ + * But we must update ROM BAR for buggy devices where even a + * disabled ROM can conflict with other BARs. */ - if (!(res->flags & IORESOURCE_ROM_ENABLE)) + if (!(res->flags & IORESOURCE_ROM_ENABLE) && + !dev->rom_bar_overlap) return; reg = dev->rom_base_reg; - new |= PCI_ROM_ADDRESS_ENABLE; + if (res->flags & IORESOURCE_ROM_ENABLE) + new |= PCI_ROM_ADDRESS_ENABLE; } else return; diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 751a26668e3a..a0c67191a8b9 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -96,11 +96,12 @@ static struct attribute *pci_slot_default_attrs[] = { &pci_slot_attr_cur_speed.attr, NULL, }; +ATTRIBUTE_GROUPS(pci_slot_default); static struct kobj_type pci_slot_ktype = { .sysfs_ops = &pci_slot_sysfs_ops, .release = &pci_slot_release, - .default_attrs = pci_slot_default_attrs, + .default_groups = pci_slot_default_groups, }; static char *make_slot_name(const char *name) diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 38c2b036fb8e..c36c1238c604 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -122,7 +122,7 @@ static void stuser_set_state(struct switchtec_user *stuser, { /* requires the mrpc_mutex to already be held when called */ - const char * const state_names[] = { + static const char * const state_names[] = { [MRPC_IDLE] = "IDLE", [MRPC_QUEUED] = "QUEUED", [MRPC_RUNNING] = "RUNNING", @@ -1779,6 +1779,15 @@ static const struct pci_device_id switchtec_pci_tbl[] = { SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4), //PAX 52XG4 SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4), //PAX 36XG4 SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4), //PAX 28XG4 + SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4), //PFXA 52XG4 + SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4), //PFXA 36XG4 + SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4), //PFXA 28XG4 + SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4), //PSXA 52XG4 + SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4), //PSXA 36XG4 + SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4), //PSXA 28XG4 + SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4), //PAXA 52XG4 + SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4), //PAXA 36XG4 + SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4), //PAXA 28XG4 {0} }; MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl); diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index d858d25b6cab..d2a7b9fd678b 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -262,8 +262,8 @@ static int pci_frontend_enable_msix(struct pci_dev *dev, } i = 0; - for_each_pci_msi_entry(entry, dev) { - op.msix_entries[i].entry = entry->msi_attrib.entry_nr; + msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) { + op.msix_entries[i].entry = entry->msi_index; /* Vector is useless at this point. */ op.msix_entries[i].vector = -1; i++; diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c index 6b1edfc890a3..92df2c2c5d07 100644 --- a/drivers/pcmcia/at91_cf.c +++ b/drivers/pcmcia/at91_cf.c @@ -20,6 +20,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> +#include <linux/pci.h> #include <linux/regmap.h> #include <pcmcia/ss.h> @@ -230,6 +231,7 @@ static int at91_cf_probe(struct platform_device *pdev) struct at91_cf_socket *cf; struct at91_cf_data *board; struct resource *io; + struct resource realio; int status; board = devm_kzalloc(&pdev->dev, sizeof(*board), GFP_KERNEL); @@ -307,7 +309,9 @@ static int at91_cf_probe(struct platform_device *pdev) * io_offset is set to 0x10000 to avoid the check in static_find_io(). * */ cf->socket.io_offset = 0x10000; - status = pci_ioremap_io(0x10000, cf->phys_baseaddr + CF_IO_PHYS); + realio.start = cf->socket.io_offset; + realio.end = realio.start + SZ_64K - 1; + status = pci_remap_iospace(&realio, cf->phys_baseaddr + CF_IO_PHYS); if (status) goto fail0a; diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 1ae19f7301b2..c49108a72865 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -703,7 +703,6 @@ static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) static void smmu_pmu_setup_msi(struct smmu_pmu *pmu) { - struct msi_desc *desc; struct device *dev = pmu->dev; int ret; @@ -720,9 +719,7 @@ static void smmu_pmu_setup_msi(struct smmu_pmu *pmu) return; } - desc = first_msi_entry(dev); - if (desc) - pmu->irq = desc->irq; + pmu->irq = msi_get_virq(dev, 0); /* Add callback to free MSIs on teardown */ devm_add_action(dev, smmu_pmu_free_msis, dev); diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig index db5d0cd757e3..486ca23aba32 100644 --- a/drivers/phy/amlogic/Kconfig +++ b/drivers/phy/amlogic/Kconfig @@ -2,6 +2,16 @@ # # Phy drivers for Amlogic platforms # +config PHY_MESON8_HDMI_TX + tristate "Meson8, Meson8b and Meson8m2 HDMI TX PHY driver" + depends on (ARCH_MESON && ARM) || COMPILE_TEST + depends on OF + select MFD_SYSCON + help + Enable this to support the HDMI TX PHYs found in Meson8, + Meson8b and Meson8m2 SoCs. + If unsure, say N. + config PHY_MESON8B_USB2 tristate "Meson8, Meson8b, Meson8m2 and GXBB USB2 PHY driver" default ARCH_MESON diff --git a/drivers/phy/amlogic/Makefile b/drivers/phy/amlogic/Makefile index 8fa07fbd0d92..c0886c850bb0 100644 --- a/drivers/phy/amlogic/Makefile +++ b/drivers/phy/amlogic/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_PHY_MESON8_HDMI_TX) += phy-meson8-hdmi-tx.o obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o obj-$(CONFIG_PHY_MESON_GXL_USB2) += phy-meson-gxl-usb2.o obj-$(CONFIG_PHY_MESON_G12A_USB2) += phy-meson-g12a-usb2.o diff --git a/drivers/phy/amlogic/phy-meson8-hdmi-tx.c b/drivers/phy/amlogic/phy-meson8-hdmi-tx.c new file mode 100644 index 000000000000..f9a6572c27d8 --- /dev/null +++ b/drivers/phy/amlogic/phy-meson8-hdmi-tx.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Meson8, Meson8b and Meson8m2 HDMI TX PHY. + * + * Copyright (C) 2021 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regmap.h> + +/* + * Unfortunately there is no detailed documentation available for the + * HHI_HDMI_PHY_CNTL0 register. CTL0 and CTL1 is all we know about. + * Magic register values in the driver below are taken from the vendor + * BSP / kernel. + */ +#define HHI_HDMI_PHY_CNTL0 0x3a0 + #define HHI_HDMI_PHY_CNTL0_HDMI_CTL1 GENMASK(31, 16) + #define HHI_HDMI_PHY_CNTL0_HDMI_CTL0 GENMASK(15, 0) + +#define HHI_HDMI_PHY_CNTL1 0x3a4 + #define HHI_HDMI_PHY_CNTL1_CLOCK_ENABLE BIT(1) + #define HHI_HDMI_PHY_CNTL1_SOFT_RESET BIT(0) + +#define HHI_HDMI_PHY_CNTL2 0x3a8 + +struct phy_meson8_hdmi_tx_priv { + struct regmap *hhi; + struct clk *tmds_clk; +}; + +static int phy_meson8_hdmi_tx_init(struct phy *phy) +{ + struct phy_meson8_hdmi_tx_priv *priv = phy_get_drvdata(phy); + + return clk_prepare_enable(priv->tmds_clk); +} + +static int phy_meson8_hdmi_tx_exit(struct phy *phy) +{ + struct phy_meson8_hdmi_tx_priv *priv = phy_get_drvdata(phy); + + clk_disable_unprepare(priv->tmds_clk); + + return 0; +} + +static int phy_meson8_hdmi_tx_power_on(struct phy *phy) +{ + struct phy_meson8_hdmi_tx_priv *priv = phy_get_drvdata(phy); + unsigned int i; + u16 hdmi_ctl0; + + if (clk_get_rate(priv->tmds_clk) >= 2970UL * 1000 * 1000) + hdmi_ctl0 = 0x1e8b; + else + hdmi_ctl0 = 0x4d0b; + + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, + FIELD_PREP(HHI_HDMI_PHY_CNTL0_HDMI_CTL1, 0x08c3) | + FIELD_PREP(HHI_HDMI_PHY_CNTL0_HDMI_CTL0, hdmi_ctl0)); + + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, 0x0); + + /* Reset three times, just like the vendor driver does */ + for (i = 0; i < 3; i++) { + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, + HHI_HDMI_PHY_CNTL1_CLOCK_ENABLE | + HHI_HDMI_PHY_CNTL1_SOFT_RESET); + usleep_range(1000, 2000); + + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, + HHI_HDMI_PHY_CNTL1_CLOCK_ENABLE); + usleep_range(1000, 2000); + } + + return 0; +} + +static int phy_meson8_hdmi_tx_power_off(struct phy *phy) +{ + struct phy_meson8_hdmi_tx_priv *priv = phy_get_drvdata(phy); + + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, + FIELD_PREP(HHI_HDMI_PHY_CNTL0_HDMI_CTL1, 0x0841) | + FIELD_PREP(HHI_HDMI_PHY_CNTL0_HDMI_CTL0, 0x8d00)); + + return 0; +} + +static const struct phy_ops phy_meson8_hdmi_tx_ops = { + .init = phy_meson8_hdmi_tx_init, + .exit = phy_meson8_hdmi_tx_exit, + .power_on = phy_meson8_hdmi_tx_power_on, + .power_off = phy_meson8_hdmi_tx_power_off, + .owner = THIS_MODULE, +}; + +static int phy_meson8_hdmi_tx_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct phy_meson8_hdmi_tx_priv *priv; + struct phy_provider *phy_provider; + struct resource *res; + struct phy *phy; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->hhi = syscon_node_to_regmap(np->parent); + if (IS_ERR(priv->hhi)) + return PTR_ERR(priv->hhi); + + priv->tmds_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->tmds_clk)) + return PTR_ERR(priv->tmds_clk); + + phy = devm_phy_create(&pdev->dev, np, &phy_meson8_hdmi_tx_ops); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + phy_set_drvdata(phy, priv); + + phy_provider = devm_of_phy_provider_register(&pdev->dev, + of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct of_device_id phy_meson8_hdmi_tx_of_match[] = { + { .compatible = "amlogic,meson8-hdmi-tx-phy" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, phy_meson8_hdmi_tx_of_match); + +static struct platform_driver phy_meson8_hdmi_tx_driver = { + .probe = phy_meson8_hdmi_tx_probe, + .driver = { + .name = "phy-meson8-hdmi-tx", + .of_match_table = phy_meson8_hdmi_tx_of_match, + }, +}; +module_platform_driver(phy_meson8_hdmi_tx_driver); + +MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); +MODULE_DESCRIPTION("Meson8, Meson8b and Meson8m2 HDMI TX PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb2.c b/drivers/phy/broadcom/phy-bcm-ns-usb2.c index 4b015b8a71c3..6a36e187d100 100644 --- a/drivers/phy/broadcom/phy-bcm-ns-usb2.c +++ b/drivers/phy/broadcom/phy-bcm-ns-usb2.c @@ -9,17 +9,23 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/regmap.h> #include <linux/slab.h> struct bcm_ns_usb2 { struct device *dev; struct clk *ref_clk; struct phy *phy; + struct regmap *clkset; + void __iomem *base; + + /* Deprecated binding */ void __iomem *dmu; }; @@ -27,7 +33,6 @@ static int bcm_ns_usb2_phy_init(struct phy *phy) { struct bcm_ns_usb2 *usb2 = phy_get_drvdata(phy); struct device *dev = usb2->dev; - void __iomem *dmu = usb2->dmu; u32 ref_clk_rate, usb2ctl, usb_pll_ndiv, usb_pll_pdiv; int err = 0; @@ -44,7 +49,10 @@ static int bcm_ns_usb2_phy_init(struct phy *phy) goto err_clk_off; } - usb2ctl = readl(dmu + BCMA_DMU_CRU_USB2_CONTROL); + if (usb2->base) + usb2ctl = readl(usb2->base); + else + usb2ctl = readl(usb2->dmu + BCMA_DMU_CRU_USB2_CONTROL); if (usb2ctl & BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK) { usb_pll_pdiv = usb2ctl; @@ -58,15 +66,24 @@ static int bcm_ns_usb2_phy_init(struct phy *phy) usb_pll_ndiv = (1920000000 * usb_pll_pdiv) / ref_clk_rate; /* Unlock DMU PLL settings with some magic value */ - writel(0x0000ea68, dmu + BCMA_DMU_CRU_CLKSET_KEY); + if (usb2->clkset) + regmap_write(usb2->clkset, 0, 0x0000ea68); + else + writel(0x0000ea68, usb2->dmu + BCMA_DMU_CRU_CLKSET_KEY); /* Write USB 2.0 PLL control setting */ usb2ctl &= ~BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK; usb2ctl |= usb_pll_ndiv << BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT; - writel(usb2ctl, dmu + BCMA_DMU_CRU_USB2_CONTROL); + if (usb2->base) + writel(usb2ctl, usb2->base); + else + writel(usb2ctl, usb2->dmu + BCMA_DMU_CRU_USB2_CONTROL); /* Lock DMU PLL settings */ - writel(0x00000000, dmu + BCMA_DMU_CRU_CLKSET_KEY); + if (usb2->clkset) + regmap_write(usb2->clkset, 0, 0x00000000); + else + writel(0x00000000, usb2->dmu + BCMA_DMU_CRU_CLKSET_KEY); err_clk_off: clk_disable_unprepare(usb2->ref_clk); @@ -90,15 +107,32 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev) return -ENOMEM; usb2->dev = dev; - usb2->dmu = devm_platform_ioremap_resource_byname(pdev, "dmu"); - if (IS_ERR(usb2->dmu)) { - dev_err(dev, "Failed to map DMU regs\n"); - return PTR_ERR(usb2->dmu); + if (of_find_property(dev->of_node, "brcm,syscon-clkset", NULL)) { + usb2->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(usb2->base)) { + dev_err(dev, "Failed to map control reg\n"); + return PTR_ERR(usb2->base); + } + + usb2->clkset = syscon_regmap_lookup_by_phandle(dev->of_node, + "brcm,syscon-clkset"); + if (IS_ERR(usb2->clkset)) { + dev_err(dev, "Failed to lookup clkset regmap\n"); + return PTR_ERR(usb2->clkset); + } + } else { + usb2->dmu = devm_platform_ioremap_resource_byname(pdev, "dmu"); + if (IS_ERR(usb2->dmu)) { + dev_err(dev, "Failed to map DMU regs\n"); + return PTR_ERR(usb2->dmu); + } + + dev_warn(dev, "using deprecated DT binding\n"); } usb2->ref_clk = devm_clk_get(dev, "phy-ref-clk"); if (IS_ERR(usb2->ref_clk)) { - dev_err(dev, "Clock not defined\n"); + dev_err_probe(dev, PTR_ERR(usb2->ref_clk), "failed to get ref clk\n"); return PTR_ERR(usb2->ref_clk); } diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c index e93818e3991f..da24acd26666 100644 --- a/drivers/phy/cadence/phy-cadence-sierra.c +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -23,6 +23,9 @@ #include <dt-bindings/phy/phy.h> #include <dt-bindings/phy/phy-cadence.h> +#define NUM_SSC_MODE 3 +#define NUM_PHY_TYPE 4 + /* PHY register offsets */ #define SIERRA_COMMON_CDB_OFFSET 0x0 #define SIERRA_MACRO_ID_REG 0x0 @@ -31,12 +34,21 @@ #define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49 #define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A #define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B +#define SIERRA_CMN_PLLLC_CLK1_PREG 0x4D #define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F #define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50 +#define SIERRA_CMN_PLLLC_DSMCORR_PREG 0x51 +#define SIERRA_CMN_PLLLC_SS_PREG 0x52 +#define SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG 0x53 +#define SIERRA_CMN_PLLLC_SSTWOPT_PREG 0x54 #define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62 +#define SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG 0x63 #define SIERRA_CMN_REFRCV_PREG 0x98 #define SIERRA_CMN_REFRCV1_PREG 0xB8 #define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2 +#define SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG 0xCA +#define SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG 0xD0 +#define SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG 0xE2 #define SIERRA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \ ((0x4000 << (block_offset)) + \ @@ -49,7 +61,11 @@ #define SIERRA_DET_STANDEC_E_PREG 0x004 #define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG 0x008 #define SIERRA_PSM_A0IN_TMR_PREG 0x009 +#define SIERRA_PSM_A3IN_TMR_PREG 0x00C #define SIERRA_PSM_DIAG_PREG 0x015 +#define SIERRA_PSC_LN_A3_PREG 0x023 +#define SIERRA_PSC_LN_A4_PREG 0x024 +#define SIERRA_PSC_LN_IDLE_PREG 0x026 #define SIERRA_PSC_TX_A0_PREG 0x028 #define SIERRA_PSC_TX_A1_PREG 0x029 #define SIERRA_PSC_TX_A2_PREG 0x02A @@ -59,18 +75,22 @@ #define SIERRA_PSC_RX_A2_PREG 0x032 #define SIERRA_PSC_RX_A3_PREG 0x033 #define SIERRA_PLLCTRL_SUBRATE_PREG 0x03A +#define SIERRA_PLLCTRL_GEN_A_PREG 0x03B #define SIERRA_PLLCTRL_GEN_D_PREG 0x03E #define SIERRA_PLLCTRL_CPGAIN_MODE_PREG 0x03F #define SIERRA_PLLCTRL_STATUS_PREG 0x044 #define SIERRA_CLKPATH_BIASTRIM_PREG 0x04B #define SIERRA_DFE_BIASTRIM_PREG 0x04C #define SIERRA_DRVCTRL_ATTEN_PREG 0x06A +#define SIERRA_DRVCTRL_BOOST_PREG 0x06F #define SIERRA_CLKPATHCTRL_TMR_PREG 0x081 #define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085 #define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086 #define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG 0x087 #define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG 0x088 +#define SIERRA_CREQ_DCBIASATTEN_OVR_PREG 0x08C #define SIERRA_CREQ_CCLKDET_MODE01_PREG 0x08E +#define SIERRA_RX_CTLE_CAL_PREG 0x08F #define SIERRA_RX_CTLE_MAINTENANCE_PREG 0x091 #define SIERRA_CREQ_FSMCLK_SEL_PREG 0x092 #define SIERRA_CREQ_EQ_CTRL_PREG 0x093 @@ -120,15 +140,28 @@ #define SIERRA_DEQ_ALUT12 0x114 #define SIERRA_DEQ_ALUT13 0x115 #define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128 +#define SIERRA_DEQ_DFETAP0 0x129 +#define SIERRA_DEQ_DFETAP1 0x12B +#define SIERRA_DEQ_DFETAP2 0x12D +#define SIERRA_DEQ_DFETAP3 0x12F +#define SIERRA_DEQ_DFETAP4 0x131 #define SIERRA_DFE_EN_1010_IGNORE_PREG 0x134 +#define SIERRA_DEQ_PRECUR_PREG 0x138 +#define SIERRA_DEQ_POSTCUR_PREG 0x140 +#define SIERRA_DEQ_POSTCUR_DECR_PREG 0x142 #define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150 #define SIERRA_DEQ_TAU_CTRL2_PREG 0x151 +#define SIERRA_DEQ_TAU_CTRL3_PREG 0x152 +#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x158 #define SIERRA_DEQ_PICTRL_PREG 0x161 #define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170 #define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171 #define SIERRA_CPICAL_PICNT_MODE1_PREG 0x174 #define SIERRA_CPI_OUTBUF_RATESEL_PREG 0x17C +#define SIERRA_CPI_RESBIAS_BIN_PREG 0x17E +#define SIERRA_CPI_TRIM_PREG 0x17F #define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183 +#define SIERRA_EPI_CTRL_PREG 0x187 #define SIERRA_LFPSDET_SUPPORT_PREG 0x188 #define SIERRA_LFPSFILT_NS_PREG 0x18A #define SIERRA_LFPSFILT_RD_PREG 0x18B @@ -142,15 +175,36 @@ #define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F #define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150 -#define SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset) \ - (0xc000 << (block_offset)) +/* PHY PCS common registers */ +#define SIERRA_PHY_PCS_COMMON_OFFSET(block_offset) \ + (0xc000 << (block_offset)) +#define SIERRA_PHY_PIPE_CMN_CTRL1 0x0 #define SIERRA_PHY_PLL_CFG 0xe +/* PHY PCS lane registers */ +#define SIERRA_PHY_PCS_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \ + ((0xD000 << (block_offset)) + \ + (((ln) << 8) << (reg_offset))) + +#define SIERRA_PHY_ISO_LINK_CTRL 0xB + +/* PHY PMA common registers */ +#define SIERRA_PHY_PMA_COMMON_OFFSET(block_offset) \ + (0xE000 << (block_offset)) +#define SIERRA_PHY_PMA_CMN_CTRL 0x000 + +/* PHY PMA lane registers */ +#define SIERRA_PHY_PMA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \ + ((0xF000 << (block_offset)) + \ + (((ln) << 8) << (reg_offset))) + +#define SIERRA_PHY_PMA_XCVR_CTRL 0x000 + #define SIERRA_MACRO_ID 0x00007364 #define SIERRA_MAX_LANES 16 #define PLL_LOCK_TIME 100000 -#define CDNS_SIERRA_OUTPUT_CLOCKS 2 +#define CDNS_SIERRA_OUTPUT_CLOCKS 3 #define CDNS_SIERRA_INPUT_CLOCKS 5 enum cdns_sierra_clock_input { PHY_CLK, @@ -167,12 +221,21 @@ static const struct reg_field macro_id_type = REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15); static const struct reg_field phy_pll_cfg_1 = REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1); +static const struct reg_field pma_cmn_ready = + REG_FIELD(SIERRA_PHY_PMA_CMN_CTRL, 0, 0); static const struct reg_field pllctrl_lock = REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0); +static const struct reg_field phy_iso_link_ctrl_1 = + REG_FIELD(SIERRA_PHY_ISO_LINK_CTRL, 1, 1); +static const struct reg_field cmn_plllc_clk1outdiv_preg = + REG_FIELD(SIERRA_CMN_PLLLC_CLK1_PREG, 0, 6); +static const struct reg_field cmn_plllc_clk1_en_preg = + REG_FIELD(SIERRA_CMN_PLLLC_CLK1_PREG, 12, 12); static const char * const clk_names[] = { [CDNS_SIERRA_PLL_CMNLC] = "pll_cmnlc", [CDNS_SIERRA_PLL_CMNLC1] = "pll_cmnlc1", + [CDNS_SIERRA_DERIVED_REFCLK] = "refclk_der", }; enum cdns_sierra_cmn_plllc { @@ -215,14 +278,41 @@ static const int pll_mux_parent_index[][SIERRA_NUM_CMN_PLLC_PARENTS] = { [CMN_PLLLC1] = { PLL1_REFCLK, PLL0_REFCLK }, }; -static u32 cdns_sierra_pll_mux_table[] = { 0, 1 }; +static u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = { + [CMN_PLLLC] = { 0, 1 }, + [CMN_PLLLC1] = { 1, 0 }, +}; + +struct cdns_sierra_derived_refclk { + struct clk_hw hw; + struct regmap_field *cmn_plllc_clk1outdiv_preg; + struct regmap_field *cmn_plllc_clk1_en_preg; + struct clk_init_data clk_data; +}; + +#define to_cdns_sierra_derived_refclk(_hw) \ + container_of(_hw, struct cdns_sierra_derived_refclk, hw) + +enum cdns_sierra_phy_type { + TYPE_NONE, + TYPE_PCIE, + TYPE_USB, + TYPE_QSGMII +}; + +enum cdns_sierra_ssc_mode { + NO_SSC, + EXTERNAL_SSC, + INTERNAL_SSC +}; struct cdns_sierra_inst { struct phy *phy; - u32 phy_type; + enum cdns_sierra_phy_type phy_type; u32 num_lanes; u32 mlane; struct reset_control *lnk_rst; + enum cdns_sierra_ssc_mode ssc_mode; }; struct cdns_reg_pairs { @@ -230,18 +320,23 @@ struct cdns_reg_pairs { u32 off; }; +struct cdns_sierra_vals { + const struct cdns_reg_pairs *reg_pairs; + u32 num_regs; +}; + struct cdns_sierra_data { - u32 id_value; - u8 block_offset_shift; - u8 reg_offset_shift; - u32 pcie_cmn_regs; - u32 pcie_ln_regs; - u32 usb_cmn_regs; - u32 usb_ln_regs; - const struct cdns_reg_pairs *pcie_cmn_vals; - const struct cdns_reg_pairs *pcie_ln_vals; - const struct cdns_reg_pairs *usb_cmn_vals; - const struct cdns_reg_pairs *usb_ln_vals; + u32 id_value; + u8 block_offset_shift; + u8 reg_offset_shift; + struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; }; struct cdns_regmap_cdb_context { @@ -253,16 +348,21 @@ struct cdns_regmap_cdb_context { struct cdns_sierra_phy { struct device *dev; struct regmap *regmap; - struct cdns_sierra_data *init_data; + const struct cdns_sierra_data *init_data; struct cdns_sierra_inst phys[SIERRA_MAX_LANES]; struct reset_control *phy_rst; struct reset_control *apb_rst; struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES]; - struct regmap *regmap_phy_config_ctrl; + struct regmap *regmap_phy_pcs_common_cdb; + struct regmap *regmap_phy_pcs_lane_cdb[SIERRA_MAX_LANES]; + struct regmap *regmap_phy_pma_common_cdb; + struct regmap *regmap_phy_pma_lane_cdb[SIERRA_MAX_LANES]; struct regmap *regmap_common_cdb; struct regmap_field *macro_id_type; struct regmap_field *phy_pll_cfg_1; + struct regmap_field *pma_cmn_ready; struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES]; + struct regmap_field *phy_iso_link_ctrl_1[SIERRA_MAX_LANES]; struct regmap_field *cmn_refrcv_refclk_plllc1en_preg[SIERRA_NUM_CMN_PLLC]; struct regmap_field *cmn_refrcv_refclk_termen_preg[SIERRA_NUM_CMN_PLLC]; struct regmap_field *cmn_plllc_pfdclk1_sel_preg[SIERRA_NUM_CMN_PLLC]; @@ -329,51 +429,141 @@ static const struct regmap_config cdns_sierra_common_cdb_config = { .reg_read = cdns_regmap_read, }; -static const struct regmap_config cdns_sierra_phy_config_ctrl_config = { - .name = "sierra_phy_config_ctrl", +static const struct regmap_config cdns_sierra_phy_pcs_cmn_cdb_config = { + .name = "sierra_phy_pcs_cmn_cdb", .reg_stride = 1, .fast_io = true, .reg_write = cdns_regmap_write, .reg_read = cdns_regmap_read, }; +#define SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF(n) \ +{ \ + .name = "sierra_phy_pcs_lane" n "_cdb", \ + .reg_stride = 1, \ + .fast_io = true, \ + .reg_write = cdns_regmap_write, \ + .reg_read = cdns_regmap_read, \ +} + +static const struct regmap_config cdns_sierra_phy_pcs_lane_cdb_config[] = { + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("0"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("1"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("2"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("3"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("4"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("5"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("6"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("7"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("8"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("9"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("10"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("11"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("12"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("13"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("14"), + SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("15"), +}; + +static const struct regmap_config cdns_sierra_phy_pma_cmn_cdb_config = { + .name = "sierra_phy_pma_cmn_cdb", + .reg_stride = 1, + .fast_io = true, + .reg_write = cdns_regmap_write, + .reg_read = cdns_regmap_read, +}; + +#define SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF(n) \ +{ \ + .name = "sierra_phy_pma_lane" n "_cdb", \ + .reg_stride = 1, \ + .fast_io = true, \ + .reg_write = cdns_regmap_write, \ + .reg_read = cdns_regmap_read, \ +} + +static const struct regmap_config cdns_sierra_phy_pma_lane_cdb_config[] = { + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("0"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("1"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("2"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("3"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("4"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("5"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("6"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("7"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("8"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("9"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("10"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("11"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("12"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("13"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("14"), + SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("15"), +}; + static int cdns_sierra_phy_init(struct phy *gphy) { struct cdns_sierra_inst *ins = phy_get_drvdata(gphy); struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent); + const struct cdns_sierra_data *init_data = phy->init_data; + struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals; + enum cdns_sierra_phy_type phy_type = ins->phy_type; + enum cdns_sierra_ssc_mode ssc = ins->ssc_mode; + struct cdns_sierra_vals *phy_pma_ln_vals; + const struct cdns_reg_pairs *reg_pairs; + struct cdns_sierra_vals *pcs_cmn_vals; struct regmap *regmap; + u32 num_regs; int i, j; - const struct cdns_reg_pairs *cmn_vals, *ln_vals; - u32 num_cmn_regs, num_ln_regs; /* Initialise the PHY registers, unless auto configured */ - if (phy->autoconf) + if (phy->autoconf || phy->nsubnodes > 1) return 0; clk_set_rate(phy->input_clks[CMN_REFCLK_DIG_DIV], 25000000); clk_set_rate(phy->input_clks[CMN_REFCLK1_DIG_DIV], 25000000); - if (ins->phy_type == PHY_TYPE_PCIE) { - num_cmn_regs = phy->init_data->pcie_cmn_regs; - num_ln_regs = phy->init_data->pcie_ln_regs; - cmn_vals = phy->init_data->pcie_cmn_vals; - ln_vals = phy->init_data->pcie_ln_vals; - } else if (ins->phy_type == PHY_TYPE_USB3) { - num_cmn_regs = phy->init_data->usb_cmn_regs; - num_ln_regs = phy->init_data->usb_ln_regs; - cmn_vals = phy->init_data->usb_cmn_vals; - ln_vals = phy->init_data->usb_ln_vals; - } else { - return -EINVAL; + + /* PHY PCS common registers configurations */ + pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc]; + if (pcs_cmn_vals) { + reg_pairs = pcs_cmn_vals->reg_pairs; + num_regs = pcs_cmn_vals->num_regs; + regmap = phy->regmap_phy_pcs_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val); + } + + /* PHY PMA lane registers configurations */ + phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_type][TYPE_NONE][ssc]; + if (phy_pma_ln_vals) { + reg_pairs = phy_pma_ln_vals->reg_pairs; + num_regs = phy_pma_ln_vals->num_regs; + for (i = 0; i < ins->num_lanes; i++) { + regmap = phy->regmap_phy_pma_lane_cdb[i + ins->mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val); + } } - regmap = phy->regmap_common_cdb; - for (j = 0; j < num_cmn_regs ; j++) - regmap_write(regmap, cmn_vals[j].off, cmn_vals[j].val); + /* PMA common registers configurations */ + pma_cmn_vals = init_data->pma_cmn_vals[phy_type][TYPE_NONE][ssc]; + if (pma_cmn_vals) { + reg_pairs = pma_cmn_vals->reg_pairs; + num_regs = pma_cmn_vals->num_regs; + regmap = phy->regmap_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val); + } - for (i = 0; i < ins->num_lanes; i++) { - for (j = 0; j < num_ln_regs ; j++) { + /* PMA lane registers configurations */ + pma_ln_vals = init_data->pma_ln_vals[phy_type][TYPE_NONE][ssc]; + if (pma_ln_vals) { + reg_pairs = pma_ln_vals->reg_pairs; + num_regs = pma_ln_vals->num_regs; + for (i = 0; i < ins->num_lanes; i++) { regmap = phy->regmap_lane_cdb[i + ins->mlane]; - regmap_write(regmap, ln_vals[j].off, ln_vals[j].val); + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val); } } @@ -388,10 +578,13 @@ static int cdns_sierra_phy_on(struct phy *gphy) u32 val; int ret; - ret = reset_control_deassert(sp->phy_rst); - if (ret) { - dev_err(dev, "Failed to take the PHY out of reset\n"); - return ret; + if (sp->nsubnodes == 1) { + /* Take the PHY out of reset */ + ret = reset_control_deassert(sp->phy_rst); + if (ret) { + dev_err(dev, "Failed to take the PHY out of reset\n"); + return ret; + } } /* Take the PHY lane group out of reset */ @@ -401,6 +594,26 @@ static int cdns_sierra_phy_on(struct phy *gphy) return ret; } + if (ins->phy_type == TYPE_PCIE || ins->phy_type == TYPE_USB) { + ret = regmap_field_read_poll_timeout(sp->phy_iso_link_ctrl_1[ins->mlane], + val, !val, 1000, PLL_LOCK_TIME); + if (ret) { + dev_err(dev, "Timeout waiting for PHY status ready\n"); + return ret; + } + } + + /* + * Wait for cmn_ready assertion + * PHY_PMA_CMN_CTRL[0] == 1 + */ + ret = regmap_field_read_poll_timeout(sp->pma_cmn_ready, val, val, + 1000, PLL_LOCK_TIME); + if (ret) { + dev_err(dev, "Timeout waiting for CMN ready\n"); + return ret; + } + ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane], val, val, 1000, PLL_LOCK_TIME); if (ret < 0) @@ -436,11 +649,25 @@ static const struct phy_ops ops = { static u8 cdns_sierra_pll_mux_get_parent(struct clk_hw *hw) { struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw); + struct regmap_field *plllc1en_field = mux->plllc1en_field; + struct regmap_field *termen_field = mux->termen_field; struct regmap_field *field = mux->pfdclk_sel_preg; unsigned int val; + int index; regmap_field_read(field, &val); - return clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table, 0, val); + + if (strstr(clk_hw_get_name(hw), clk_names[CDNS_SIERRA_PLL_CMNLC1])) { + index = clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table[CMN_PLLLC1], 0, val); + if (index == 1) { + regmap_field_write(plllc1en_field, 1); + regmap_field_write(termen_field, 1); + } + } else { + index = clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table[CMN_PLLLC], 0, val); + } + + return index; } static int cdns_sierra_pll_mux_set_parent(struct clk_hw *hw, u8 index) @@ -458,7 +685,11 @@ static int cdns_sierra_pll_mux_set_parent(struct clk_hw *hw, u8 index) ret |= regmap_field_write(termen_field, 1); } - val = cdns_sierra_pll_mux_table[index]; + if (strstr(clk_hw_get_name(hw), clk_names[CDNS_SIERRA_PLL_CMNLC1])) + val = cdns_sierra_pll_mux_table[CMN_PLLLC1][index]; + else + val = cdns_sierra_pll_mux_table[CMN_PLLLC][index]; + ret |= regmap_field_write(field, val); return ret; @@ -496,8 +727,8 @@ static int cdns_sierra_pll_mux_register(struct cdns_sierra_phy *sp, for (i = 0; i < num_parents; i++) { clk = sp->input_clks[pll_mux_parent_index[clk_index][i]]; if (IS_ERR_OR_NULL(clk)) { - dev_err(dev, "No parent clock for derived_refclk\n"); - return PTR_ERR(clk); + dev_err(dev, "No parent clock for PLL mux clocks\n"); + return IS_ERR(clk) ? PTR_ERR(clk) : -ENOENT; } parent_names[i] = __clk_get_name(clk); } @@ -551,6 +782,91 @@ static int cdns_sierra_phy_register_pll_mux(struct cdns_sierra_phy *sp) return 0; } +static int cdns_sierra_derived_refclk_enable(struct clk_hw *hw) +{ + struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw); + + regmap_field_write(derived_refclk->cmn_plllc_clk1_en_preg, 0x1); + + /* Programming to get 100Mhz clock output in ref_der_clk_out 5GHz VCO/50 = 100MHz */ + regmap_field_write(derived_refclk->cmn_plllc_clk1outdiv_preg, 0x2E); + + return 0; +} + +static void cdns_sierra_derived_refclk_disable(struct clk_hw *hw) +{ + struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw); + + regmap_field_write(derived_refclk->cmn_plllc_clk1_en_preg, 0); +} + +static int cdns_sierra_derived_refclk_is_enabled(struct clk_hw *hw) +{ + struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw); + int val; + + regmap_field_read(derived_refclk->cmn_plllc_clk1_en_preg, &val); + + return !!val; +} + +static const struct clk_ops cdns_sierra_derived_refclk_ops = { + .enable = cdns_sierra_derived_refclk_enable, + .disable = cdns_sierra_derived_refclk_disable, + .is_enabled = cdns_sierra_derived_refclk_is_enabled, +}; + +static int cdns_sierra_derived_refclk_register(struct cdns_sierra_phy *sp) +{ + struct cdns_sierra_derived_refclk *derived_refclk; + struct device *dev = sp->dev; + struct regmap_field *field; + struct clk_init_data *init; + struct regmap *regmap; + char clk_name[100]; + struct clk *clk; + + derived_refclk = devm_kzalloc(dev, sizeof(*derived_refclk), GFP_KERNEL); + if (!derived_refclk) + return -ENOMEM; + + snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev), + clk_names[CDNS_SIERRA_DERIVED_REFCLK]); + + init = &derived_refclk->clk_data; + + init->ops = &cdns_sierra_derived_refclk_ops; + init->flags = 0; + init->name = clk_name; + + regmap = sp->regmap_common_cdb; + + field = devm_regmap_field_alloc(dev, regmap, cmn_plllc_clk1outdiv_preg); + if (IS_ERR(field)) { + dev_err(dev, "cmn_plllc_clk1outdiv_preg reg field init failed\n"); + return PTR_ERR(field); + } + derived_refclk->cmn_plllc_clk1outdiv_preg = field; + + field = devm_regmap_field_alloc(dev, regmap, cmn_plllc_clk1_en_preg); + if (IS_ERR(field)) { + dev_err(dev, "cmn_plllc_clk1_en_preg reg field init failed\n"); + return PTR_ERR(field); + } + derived_refclk->cmn_plllc_clk1_en_preg = field; + + derived_refclk->hw.init = init; + + clk = devm_clk_register(dev, &derived_refclk->hw); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + sp->output_clks[CDNS_SIERRA_DERIVED_REFCLK] = clk; + + return 0; +} + static void cdns_sierra_clk_unregister(struct cdns_sierra_phy *sp) { struct device *dev = sp->dev; @@ -571,6 +887,12 @@ static int cdns_sierra_clk_register(struct cdns_sierra_phy *sp) return ret; } + ret = cdns_sierra_derived_refclk_register(sp); + if (ret) { + dev_err(dev, "Failed to register derived refclk\n"); + return ret; + } + sp->clk_data.clks = sp->output_clks; sp->clk_data.clk_num = CDNS_SIERRA_OUTPUT_CLOCKS; ret = of_clk_add_provider(node, of_clk_src_onecell_get, &sp->clk_data); @@ -583,20 +905,37 @@ static int cdns_sierra_clk_register(struct cdns_sierra_phy *sp) static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst, struct device_node *child) { + u32 phy_type; + if (of_property_read_u32(child, "reg", &inst->mlane)) return -EINVAL; if (of_property_read_u32(child, "cdns,num-lanes", &inst->num_lanes)) return -EINVAL; - if (of_property_read_u32(child, "cdns,phy-type", &inst->phy_type)) + if (of_property_read_u32(child, "cdns,phy-type", &phy_type)) return -EINVAL; + switch (phy_type) { + case PHY_TYPE_PCIE: + inst->phy_type = TYPE_PCIE; + break; + case PHY_TYPE_USB3: + inst->phy_type = TYPE_USB; + break; + case PHY_TYPE_QSGMII: + inst->phy_type = TYPE_QSGMII; + break; + default: + return -EINVAL; + } + + inst->ssc_mode = EXTERNAL_SSC; + of_property_read_u32(child, "cdns,ssc-mode", &inst->ssc_mode); + return 0; } -static const struct of_device_id cdns_sierra_id_table[]; - static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base, u32 block_offset, u8 reg_offset_shift, const struct regmap_config *config) @@ -656,7 +995,7 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp) sp->cmn_refrcv_refclk_termen_preg[i] = field; } - regmap = sp->regmap_phy_config_ctrl; + regmap = sp->regmap_phy_pcs_common_cdb; field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1); if (IS_ERR(field)) { dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n"); @@ -664,6 +1003,14 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp) } sp->phy_pll_cfg_1 = field; + regmap = sp->regmap_phy_pma_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, pma_cmn_ready); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PMA_CMN_CTRL reg field init failed\n"); + return PTR_ERR(field); + } + sp->pma_cmn_ready = field; + for (i = 0; i < SIERRA_MAX_LANES; i++) { regmap = sp->regmap_lane_cdb[i]; field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock); @@ -671,7 +1018,17 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp) dev_err(dev, "P%d_ENABLE reg field init failed\n", i); return PTR_ERR(field); } - sp->pllctrl_lock[i] = field; + sp->pllctrl_lock[i] = field; + } + + for (i = 0; i < SIERRA_MAX_LANES; i++) { + regmap = sp->regmap_phy_pcs_lane_cdb[i]; + field = devm_regmap_field_alloc(dev, regmap, phy_iso_link_ctrl_1); + if (IS_ERR(field)) { + dev_err(dev, "PHY_ISO_LINK_CTRL reg field init for lane %d failed\n", i); + return PTR_ERR(field); + } + sp->phy_iso_link_ctrl_1[i] = field; } return 0; @@ -708,14 +1065,49 @@ static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp, } sp->regmap_common_cdb = regmap; - block_offset = SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset_shift); + block_offset = SIERRA_PHY_PCS_COMMON_OFFSET(block_offset_shift); + regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift, + &cdns_sierra_phy_pcs_cmn_cdb_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init PHY PCS common CDB regmap\n"); + return PTR_ERR(regmap); + } + sp->regmap_phy_pcs_common_cdb = regmap; + + for (i = 0; i < SIERRA_MAX_LANES; i++) { + block_offset = SIERRA_PHY_PCS_LANE_CDB_OFFSET(i, block_offset_shift, + reg_offset_shift); + regmap = cdns_regmap_init(dev, base, block_offset, + reg_offset_shift, + &cdns_sierra_phy_pcs_lane_cdb_config[i]); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init PHY PCS lane CDB regmap\n"); + return PTR_ERR(regmap); + } + sp->regmap_phy_pcs_lane_cdb[i] = regmap; + } + + block_offset = SIERRA_PHY_PMA_COMMON_OFFSET(block_offset_shift); regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift, - &cdns_sierra_phy_config_ctrl_config); + &cdns_sierra_phy_pma_cmn_cdb_config); if (IS_ERR(regmap)) { - dev_err(dev, "Failed to init PHY config and control regmap\n"); + dev_err(dev, "Failed to init PHY PMA common CDB regmap\n"); return PTR_ERR(regmap); } - sp->regmap_phy_config_ctrl = regmap; + sp->regmap_phy_pma_common_cdb = regmap; + + for (i = 0; i < SIERRA_MAX_LANES; i++) { + block_offset = SIERRA_PHY_PMA_LANE_CDB_OFFSET(i, block_offset_shift, + reg_offset_shift); + regmap = cdns_regmap_init(dev, base, block_offset, + reg_offset_shift, + &cdns_sierra_phy_pma_lane_cdb_config[i]); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init PHY PMA lane CDB regmap\n"); + return PTR_ERR(regmap); + } + sp->regmap_phy_pma_lane_cdb[i] = regmap; + } return 0; } @@ -824,13 +1216,127 @@ static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp, return 0; } +static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp) +{ + const struct cdns_sierra_data *init_data = sp->init_data; + struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals; + enum cdns_sierra_phy_type phy_t1, phy_t2; + struct cdns_sierra_vals *phy_pma_ln_vals; + const struct cdns_reg_pairs *reg_pairs; + struct cdns_sierra_vals *pcs_cmn_vals; + int i, j, node, mlane, num_lanes, ret; + enum cdns_sierra_ssc_mode ssc; + struct regmap *regmap; + u32 num_regs; + + /* Maximum 2 links (subnodes) are supported */ + if (sp->nsubnodes != 2) + return -EINVAL; + + clk_set_rate(sp->input_clks[CMN_REFCLK_DIG_DIV], 25000000); + clk_set_rate(sp->input_clks[CMN_REFCLK1_DIG_DIV], 25000000); + + /* PHY configured to use both PLL LC and LC1 */ + regmap_field_write(sp->phy_pll_cfg_1, 0x1); + + phy_t1 = sp->phys[0].phy_type; + phy_t2 = sp->phys[1].phy_type; + + /* + * PHY configuration for multi-link operation is done in two steps. + * e.g. Consider a case for a 4 lane PHY with PCIe using 2 lanes and QSGMII other 2 lanes. + * Sierra PHY has 2 PLLs, viz. PLLLC and PLLLC1. So in this case, PLLLC is used for PCIe + * and PLLLC1 is used for QSGMII. PHY is configured in two steps as described below. + * + * [1] For first step, phy_t1 = TYPE_PCIE and phy_t2 = TYPE_QSGMII + * So the register values are selected as [TYPE_PCIE][TYPE_QSGMII][ssc]. + * This will configure PHY registers associated for PCIe (i.e. first protocol) + * involving PLLLC registers and registers for first 2 lanes of PHY. + * [2] In second step, the variables phy_t1 and phy_t2 are swapped. So now, + * phy_t1 = TYPE_QSGMII and phy_t2 = TYPE_PCIE. And the register values are selected as + * [TYPE_QSGMII][TYPE_PCIE][ssc]. + * This will configure PHY registers associated for QSGMII (i.e. second protocol) + * involving PLLLC1 registers and registers for other 2 lanes of PHY. + * + * This completes the PHY configuration for multilink operation. This approach enables + * dividing the large number of PHY register configurations into protocol specific + * smaller groups. + */ + for (node = 0; node < sp->nsubnodes; node++) { + if (node == 1) { + /* + * If first link with phy_t1 is configured, then configure the PHY for + * second link with phy_t2. Get the array values as [phy_t2][phy_t1][ssc]. + */ + swap(phy_t1, phy_t2); + } + + mlane = sp->phys[node].mlane; + ssc = sp->phys[node].ssc_mode; + num_lanes = sp->phys[node].num_lanes; + + /* PHY PCS common registers configurations */ + pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc]; + if (pcs_cmn_vals) { + reg_pairs = pcs_cmn_vals->reg_pairs; + num_regs = pcs_cmn_vals->num_regs; + regmap = sp->regmap_phy_pcs_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val); + } + + /* PHY PMA lane registers configurations */ + phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_t1][phy_t2][ssc]; + if (phy_pma_ln_vals) { + reg_pairs = phy_pma_ln_vals->reg_pairs; + num_regs = phy_pma_ln_vals->num_regs; + for (i = 0; i < num_lanes; i++) { + regmap = sp->regmap_phy_pma_lane_cdb[i + mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val); + } + } + + /* PMA common registers configurations */ + pma_cmn_vals = init_data->pma_cmn_vals[phy_t1][phy_t2][ssc]; + if (pma_cmn_vals) { + reg_pairs = pma_cmn_vals->reg_pairs; + num_regs = pma_cmn_vals->num_regs; + regmap = sp->regmap_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val); + } + + /* PMA lane registers configurations */ + pma_ln_vals = init_data->pma_ln_vals[phy_t1][phy_t2][ssc]; + if (pma_ln_vals) { + reg_pairs = pma_ln_vals->reg_pairs; + num_regs = pma_ln_vals->num_regs; + for (i = 0; i < num_lanes; i++) { + regmap = sp->regmap_lane_cdb[i + mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val); + } + } + + if (phy_t1 == TYPE_QSGMII) + reset_control_deassert(sp->phys[node].lnk_rst); + } + + /* Take the PHY out of reset */ + ret = reset_control_deassert(sp->phy_rst); + if (ret) + return ret; + + return 0; +} + static int cdns_sierra_phy_probe(struct platform_device *pdev) { struct cdns_sierra_phy *sp; struct phy_provider *phy_provider; struct device *dev = &pdev->dev; - const struct of_device_id *match; - struct cdns_sierra_data *data; + const struct cdns_sierra_data *data; unsigned int id_value; int i, ret, node = 0; void __iomem *base; @@ -840,12 +1346,10 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) return -ENODEV; /* Get init data for this PHY */ - match = of_match_device(cdns_sierra_id_table, dev); - if (!match) + data = of_device_get_match_data(dev); + if (!data) return -EINVAL; - data = (struct cdns_sierra_data *)match->data; - sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL); if (!sp) return -ENOMEM; @@ -946,8 +1450,11 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) } /* If more than one subnode, configure the PHY as multilink */ - if (!sp->autoconf && sp->nsubnodes > 1) - regmap_field_write(sp->phy_pll_cfg_1, 0x1); + if (!sp->autoconf && sp->nsubnodes > 1) { + ret = cdns_sierra_phy_configure_multilink(sp); + if (ret) + goto put_child2; + } pm_runtime_enable(dev); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); @@ -991,6 +1498,449 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev) return 0; } +/* QSGMII PHY PMA lane configuration */ +static struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = { + {0x9010, SIERRA_PHY_PMA_XCVR_CTRL} +}; + +static struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = { + .reg_pairs = qsgmii_phy_pma_ln_regs, + .num_regs = ARRAY_SIZE(qsgmii_phy_pma_ln_regs), +}; + +/* QSGMII refclk 100MHz, 20b, opt1, No BW cal, no ssc, PLL LC1 */ +static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_cmn_regs[] = { + {0x2085, SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG}, + {0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG}, + {0x0000, SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG} +}; + +static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_ln_regs[] = { + {0xFC08, SIERRA_DET_STANDEC_A_PREG}, + {0x0252, SIERRA_DET_STANDEC_E_PREG}, + {0x0004, SIERRA_PSC_LN_IDLE_PREG}, + {0x0FFE, SIERRA_PSC_RX_A0_PREG}, + {0x0011, SIERRA_PLLCTRL_SUBRATE_PREG}, + {0x0001, SIERRA_PLLCTRL_GEN_A_PREG}, + {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG}, + {0x0000, SIERRA_DRVCTRL_ATTEN_PREG}, + {0x0089, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x3C3C, SIERRA_CREQ_CCLKDET_MODE01_PREG}, + {0x3222, SIERRA_CREQ_FSMCLK_SEL_PREG}, + {0x0000, SIERRA_CREQ_EQ_CTRL_PREG}, + {0x8422, SIERRA_CTLELUT_CTRL_PREG}, + {0x4111, SIERRA_DFE_ECMP_RATESEL_PREG}, + {0x4111, SIERRA_DFE_SMP_RATESEL_PREG}, + {0x0002, SIERRA_DEQ_PHALIGN_CTRL}, + {0x9595, SIERRA_DEQ_VGATUNE_CTRL_PREG}, + {0x0186, SIERRA_DEQ_GLUT0}, + {0x0186, SIERRA_DEQ_GLUT1}, + {0x0186, SIERRA_DEQ_GLUT2}, + {0x0186, SIERRA_DEQ_GLUT3}, + {0x0186, SIERRA_DEQ_GLUT4}, + {0x0861, SIERRA_DEQ_ALUT0}, + {0x07E0, SIERRA_DEQ_ALUT1}, + {0x079E, SIERRA_DEQ_ALUT2}, + {0x071D, SIERRA_DEQ_ALUT3}, + {0x03F5, SIERRA_DEQ_DFETAP_CTRL_PREG}, + {0x0C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG}, + {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, + {0x1C04, SIERRA_DEQ_TAU_CTRL2_PREG}, + {0x0033, SIERRA_DEQ_PICTRL_PREG}, + {0x0660, SIERRA_CPICAL_TMRVAL_MODE0_PREG}, + {0x00D5, SIERRA_CPI_OUTBUF_RATESEL_PREG}, + {0x0B6D, SIERRA_CPI_RESBIAS_BIN_PREG}, + {0x0102, SIERRA_RXBUFFER_CTLECTRL_PREG}, + {0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG} +}; + +static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = { + .reg_pairs = qsgmii_100_no_ssc_plllc1_cmn_regs, + .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_cmn_regs), +}; + +static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = { + .reg_pairs = qsgmii_100_no_ssc_plllc1_ln_regs, + .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_ln_regs), +}; + +/* PCIE PHY PCS common configuration */ +static struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = { + {0x0430, SIERRA_PHY_PIPE_CMN_CTRL1} +}; + +static struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = { + .reg_pairs = pcie_phy_pcs_cmn_regs, + .num_regs = ARRAY_SIZE(pcie_phy_pcs_cmn_regs), +}; + +/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc, pcie_links_using_plllc, pipe_bw_3 */ +static const struct cdns_reg_pairs pcie_100_no_ssc_plllc_cmn_regs[] = { + {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG}, + {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG}, + {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG}, + {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG} +}; + +/* + * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc, + * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz + */ +static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = { + {0xFC08, SIERRA_DET_STANDEC_A_PREG}, + {0x001D, SIERRA_PSM_A3IN_TMR_PREG}, + {0x0004, SIERRA_PSC_LN_A3_PREG}, + {0x0004, SIERRA_PSC_LN_A4_PREG}, + {0x0004, SIERRA_PSC_LN_IDLE_PREG}, + {0x1555, SIERRA_DFE_BIASTRIM_PREG}, + {0x9703, SIERRA_DRVCTRL_BOOST_PREG}, + {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG}, + {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG}, + {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG}, + {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG}, + {0x9800, SIERRA_RX_CTLE_CAL_PREG}, + {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG}, + {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG}, + {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG}, + {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG}, + {0x0041, SIERRA_DEQ_GLUT0}, + {0x0082, SIERRA_DEQ_GLUT1}, + {0x00C3, SIERRA_DEQ_GLUT2}, + {0x0145, SIERRA_DEQ_GLUT3}, + {0x0186, SIERRA_DEQ_GLUT4}, + {0x09E7, SIERRA_DEQ_ALUT0}, + {0x09A6, SIERRA_DEQ_ALUT1}, + {0x0965, SIERRA_DEQ_ALUT2}, + {0x08E3, SIERRA_DEQ_ALUT3}, + {0x00FA, SIERRA_DEQ_DFETAP0}, + {0x00FA, SIERRA_DEQ_DFETAP1}, + {0x00FA, SIERRA_DEQ_DFETAP2}, + {0x00FA, SIERRA_DEQ_DFETAP3}, + {0x00FA, SIERRA_DEQ_DFETAP4}, + {0x000F, SIERRA_DEQ_PRECUR_PREG}, + {0x0280, SIERRA_DEQ_POSTCUR_PREG}, + {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG}, + {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, + {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, + {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, + {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x002B, SIERRA_CPI_TRIM_PREG}, + {0x0003, SIERRA_EPI_CTRL_PREG}, + {0x803F, SIERRA_SDFILT_H2L_A_PREG}, + {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG}, + {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG}, + {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} +}; + +static struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = { + .reg_pairs = pcie_100_no_ssc_plllc_cmn_regs, + .num_regs = ARRAY_SIZE(pcie_100_no_ssc_plllc_cmn_regs), +}; + +static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = { + .reg_pairs = ml_pcie_100_no_ssc_ln_regs, + .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs), +}; + +/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc, pcie_links_using_plllc, pipe_bw_3 */ +static const struct cdns_reg_pairs pcie_100_int_ssc_plllc_cmn_regs[] = { + {0x000E, SIERRA_CMN_PLLLC_MODE_PREG}, + {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG}, + {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG}, + {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG}, + {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}, + {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG}, + {0x7F80, SIERRA_CMN_PLLLC_SS_PREG}, + {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG}, + {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG}, + {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}, + {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG} +}; + +/* + * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc, + * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz + */ +static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = { + {0xFC08, SIERRA_DET_STANDEC_A_PREG}, + {0x001D, SIERRA_PSM_A3IN_TMR_PREG}, + {0x0004, SIERRA_PSC_LN_A3_PREG}, + {0x0004, SIERRA_PSC_LN_A4_PREG}, + {0x0004, SIERRA_PSC_LN_IDLE_PREG}, + {0x1555, SIERRA_DFE_BIASTRIM_PREG}, + {0x9703, SIERRA_DRVCTRL_BOOST_PREG}, + {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG}, + {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG}, + {0x9800, SIERRA_RX_CTLE_CAL_PREG}, + {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG}, + {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}, + {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG}, + {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG}, + {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG}, + {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG}, + {0x0041, SIERRA_DEQ_GLUT0}, + {0x0082, SIERRA_DEQ_GLUT1}, + {0x00C3, SIERRA_DEQ_GLUT2}, + {0x0145, SIERRA_DEQ_GLUT3}, + {0x0186, SIERRA_DEQ_GLUT4}, + {0x09E7, SIERRA_DEQ_ALUT0}, + {0x09A6, SIERRA_DEQ_ALUT1}, + {0x0965, SIERRA_DEQ_ALUT2}, + {0x08E3, SIERRA_DEQ_ALUT3}, + {0x00FA, SIERRA_DEQ_DFETAP0}, + {0x00FA, SIERRA_DEQ_DFETAP1}, + {0x00FA, SIERRA_DEQ_DFETAP2}, + {0x00FA, SIERRA_DEQ_DFETAP3}, + {0x00FA, SIERRA_DEQ_DFETAP4}, + {0x000F, SIERRA_DEQ_PRECUR_PREG}, + {0x0280, SIERRA_DEQ_POSTCUR_PREG}, + {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG}, + {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, + {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, + {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, + {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x002B, SIERRA_CPI_TRIM_PREG}, + {0x0003, SIERRA_EPI_CTRL_PREG}, + {0x803F, SIERRA_SDFILT_H2L_A_PREG}, + {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG}, + {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG}, + {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} +}; + +static struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = { + .reg_pairs = pcie_100_int_ssc_plllc_cmn_regs, + .num_regs = ARRAY_SIZE(pcie_100_int_ssc_plllc_cmn_regs), +}; + +static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = { + .reg_pairs = ml_pcie_100_int_ssc_ln_regs, + .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs), +}; + +/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc, pcie_links_using_plllc, pipe_bw_3 */ +static const struct cdns_reg_pairs pcie_100_ext_ssc_plllc_cmn_regs[] = { + {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG}, + {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG}, + {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG}, + {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}, + {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG} +}; + +/* + * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc, + * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz + */ +static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = { + {0xFC08, SIERRA_DET_STANDEC_A_PREG}, + {0x001D, SIERRA_PSM_A3IN_TMR_PREG}, + {0x0004, SIERRA_PSC_LN_A3_PREG}, + {0x0004, SIERRA_PSC_LN_A4_PREG}, + {0x0004, SIERRA_PSC_LN_IDLE_PREG}, + {0x1555, SIERRA_DFE_BIASTRIM_PREG}, + {0x9703, SIERRA_DRVCTRL_BOOST_PREG}, + {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG}, + {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG}, + {0x9800, SIERRA_RX_CTLE_CAL_PREG}, + {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG}, + {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}, + {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG}, + {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG}, + {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG}, + {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG}, + {0x0041, SIERRA_DEQ_GLUT0}, + {0x0082, SIERRA_DEQ_GLUT1}, + {0x00C3, SIERRA_DEQ_GLUT2}, + {0x0145, SIERRA_DEQ_GLUT3}, + {0x0186, SIERRA_DEQ_GLUT4}, + {0x09E7, SIERRA_DEQ_ALUT0}, + {0x09A6, SIERRA_DEQ_ALUT1}, + {0x0965, SIERRA_DEQ_ALUT2}, + {0x08E3, SIERRA_DEQ_ALUT3}, + {0x00FA, SIERRA_DEQ_DFETAP0}, + {0x00FA, SIERRA_DEQ_DFETAP1}, + {0x00FA, SIERRA_DEQ_DFETAP2}, + {0x00FA, SIERRA_DEQ_DFETAP3}, + {0x00FA, SIERRA_DEQ_DFETAP4}, + {0x000F, SIERRA_DEQ_PRECUR_PREG}, + {0x0280, SIERRA_DEQ_POSTCUR_PREG}, + {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG}, + {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, + {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, + {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, + {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x002B, SIERRA_CPI_TRIM_PREG}, + {0x0003, SIERRA_EPI_CTRL_PREG}, + {0x803F, SIERRA_SDFILT_H2L_A_PREG}, + {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG}, + {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG}, + {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} +}; + +static struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = { + .reg_pairs = pcie_100_ext_ssc_plllc_cmn_regs, + .num_regs = ARRAY_SIZE(pcie_100_ext_ssc_plllc_cmn_regs), +}; + +static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = { + .reg_pairs = ml_pcie_100_ext_ssc_ln_regs, + .num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs), +}; + +/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc */ +static const struct cdns_reg_pairs cdns_pcie_cmn_regs_no_ssc[] = { + {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG}, + {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG}, + {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG}, + {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG} +}; + +/* refclk100MHz_32b_PCIe_ln_no_ssc */ +static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = { + {0xFC08, SIERRA_DET_STANDEC_A_PREG}, + {0x001D, SIERRA_PSM_A3IN_TMR_PREG}, + {0x1555, SIERRA_DFE_BIASTRIM_PREG}, + {0x9703, SIERRA_DRVCTRL_BOOST_PREG}, + {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG}, + {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG}, + {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG}, + {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG}, + {0x9800, SIERRA_RX_CTLE_CAL_PREG}, + {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG}, + {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG}, + {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG}, + {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG}, + {0x0041, SIERRA_DEQ_GLUT0}, + {0x0082, SIERRA_DEQ_GLUT1}, + {0x00C3, SIERRA_DEQ_GLUT2}, + {0x0145, SIERRA_DEQ_GLUT3}, + {0x0186, SIERRA_DEQ_GLUT4}, + {0x09E7, SIERRA_DEQ_ALUT0}, + {0x09A6, SIERRA_DEQ_ALUT1}, + {0x0965, SIERRA_DEQ_ALUT2}, + {0x08E3, SIERRA_DEQ_ALUT3}, + {0x00FA, SIERRA_DEQ_DFETAP0}, + {0x00FA, SIERRA_DEQ_DFETAP1}, + {0x00FA, SIERRA_DEQ_DFETAP2}, + {0x00FA, SIERRA_DEQ_DFETAP3}, + {0x00FA, SIERRA_DEQ_DFETAP4}, + {0x000F, SIERRA_DEQ_PRECUR_PREG}, + {0x0280, SIERRA_DEQ_POSTCUR_PREG}, + {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG}, + {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, + {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, + {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, + {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x002B, SIERRA_CPI_TRIM_PREG}, + {0x0003, SIERRA_EPI_CTRL_PREG}, + {0x803F, SIERRA_SDFILT_H2L_A_PREG}, + {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG}, + {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG}, + {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} +}; + +static struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = { + .reg_pairs = cdns_pcie_cmn_regs_no_ssc, + .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_no_ssc), +}; + +static struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = { + .reg_pairs = cdns_pcie_ln_regs_no_ssc, + .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_no_ssc), +}; + +/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc */ +static const struct cdns_reg_pairs cdns_pcie_cmn_regs_int_ssc[] = { + {0x000E, SIERRA_CMN_PLLLC_MODE_PREG}, + {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG}, + {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG}, + {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG}, + {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}, + {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG}, + {0x7F80, SIERRA_CMN_PLLLC_SS_PREG}, + {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG}, + {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG}, + {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}, + {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG} +}; + +/* refclk100MHz_32b_PCIe_ln_int_ssc */ +static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = { + {0xFC08, SIERRA_DET_STANDEC_A_PREG}, + {0x001D, SIERRA_PSM_A3IN_TMR_PREG}, + {0x1555, SIERRA_DFE_BIASTRIM_PREG}, + {0x9703, SIERRA_DRVCTRL_BOOST_PREG}, + {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG}, + {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG}, + {0x9800, SIERRA_RX_CTLE_CAL_PREG}, + {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG}, + {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}, + {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG}, + {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG}, + {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG}, + {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG}, + {0x0041, SIERRA_DEQ_GLUT0}, + {0x0082, SIERRA_DEQ_GLUT1}, + {0x00C3, SIERRA_DEQ_GLUT2}, + {0x0145, SIERRA_DEQ_GLUT3}, + {0x0186, SIERRA_DEQ_GLUT4}, + {0x09E7, SIERRA_DEQ_ALUT0}, + {0x09A6, SIERRA_DEQ_ALUT1}, + {0x0965, SIERRA_DEQ_ALUT2}, + {0x08E3, SIERRA_DEQ_ALUT3}, + {0x00FA, SIERRA_DEQ_DFETAP0}, + {0x00FA, SIERRA_DEQ_DFETAP1}, + {0x00FA, SIERRA_DEQ_DFETAP2}, + {0x00FA, SIERRA_DEQ_DFETAP3}, + {0x00FA, SIERRA_DEQ_DFETAP4}, + {0x000F, SIERRA_DEQ_PRECUR_PREG}, + {0x0280, SIERRA_DEQ_POSTCUR_PREG}, + {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG}, + {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, + {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, + {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, + {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x002B, SIERRA_CPI_TRIM_PREG}, + {0x0003, SIERRA_EPI_CTRL_PREG}, + {0x803F, SIERRA_SDFILT_H2L_A_PREG}, + {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG}, + {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG}, + {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} +}; + +static struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = { + .reg_pairs = cdns_pcie_cmn_regs_int_ssc, + .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_int_ssc), +}; + +static struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = { + .reg_pairs = cdns_pcie_ln_regs_int_ssc, + .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_int_ssc), +}; + /* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */ static const struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = { {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG}, @@ -1002,13 +1952,62 @@ static const struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = { /* refclk100MHz_32b_PCIe_ln_ext_ssc */ static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = { + {0xFC08, SIERRA_DET_STANDEC_A_PREG}, + {0x001D, SIERRA_PSM_A3IN_TMR_PREG}, + {0x1555, SIERRA_DFE_BIASTRIM_PREG}, + {0x9703, SIERRA_DRVCTRL_BOOST_PREG}, {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG}, {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG}, {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG}, {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG}, {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG}, + {0x9800, SIERRA_RX_CTLE_CAL_PREG}, {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG}, - {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG} + {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}, + {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG}, + {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG}, + {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG}, + {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG}, + {0x0041, SIERRA_DEQ_GLUT0}, + {0x0082, SIERRA_DEQ_GLUT1}, + {0x00C3, SIERRA_DEQ_GLUT2}, + {0x0145, SIERRA_DEQ_GLUT3}, + {0x0186, SIERRA_DEQ_GLUT4}, + {0x09E7, SIERRA_DEQ_ALUT0}, + {0x09A6, SIERRA_DEQ_ALUT1}, + {0x0965, SIERRA_DEQ_ALUT2}, + {0x08E3, SIERRA_DEQ_ALUT3}, + {0x00FA, SIERRA_DEQ_DFETAP0}, + {0x00FA, SIERRA_DEQ_DFETAP1}, + {0x00FA, SIERRA_DEQ_DFETAP2}, + {0x00FA, SIERRA_DEQ_DFETAP3}, + {0x00FA, SIERRA_DEQ_DFETAP4}, + {0x000F, SIERRA_DEQ_PRECUR_PREG}, + {0x0280, SIERRA_DEQ_POSTCUR_PREG}, + {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG}, + {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, + {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, + {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, + {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x002B, SIERRA_CPI_TRIM_PREG}, + {0x0003, SIERRA_EPI_CTRL_PREG}, + {0x803F, SIERRA_SDFILT_H2L_A_PREG}, + {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG}, + {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG}, + {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} +}; + +static struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = { + .reg_pairs = cdns_pcie_cmn_regs_ext_ssc, + .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc), +}; + +static struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = { + .reg_pairs = cdns_pcie_ln_regs_ext_ssc, + .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc), }; /* refclk100MHz_20b_USB_cmn_pll_ext_ssc */ @@ -1118,32 +2117,167 @@ static const struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = { {0x4243, SIERRA_RXBUFFER_DFECTRL_PREG} }; +static struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = { + .reg_pairs = cdns_usb_cmn_regs_ext_ssc, + .num_regs = ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc), +}; + +static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = { + .reg_pairs = cdns_usb_ln_regs_ext_ssc, + .num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc), +}; + static const struct cdns_sierra_data cdns_map_sierra = { - SIERRA_MACRO_ID, - 0x2, - 0x2, - ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc), - ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc), - ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc), - ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc), - cdns_pcie_cmn_regs_ext_ssc, - cdns_pcie_ln_regs_ext_ssc, - cdns_usb_cmn_regs_ext_ssc, - cdns_usb_ln_regs_ext_ssc, + .id_value = SIERRA_MACRO_ID, + .block_offset_shift = 0x2, + .reg_offset_shift = 0x2, + .pcs_cmn_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = &pcie_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, + }, + }, + }, + .pma_cmn_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals, + }, + }, + }, + .pma_ln_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = &pcie_100_no_ssc_ln_vals, + [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals, + [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals, + [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals, + }, + }, + }, }; static const struct cdns_sierra_data cdns_ti_map_sierra = { - SIERRA_MACRO_ID, - 0x0, - 0x1, - ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc), - ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc), - ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc), - ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc), - cdns_pcie_cmn_regs_ext_ssc, - cdns_pcie_ln_regs_ext_ssc, - cdns_usb_cmn_regs_ext_ssc, - cdns_usb_ln_regs_ext_ssc, + .id_value = SIERRA_MACRO_ID, + .block_offset_shift = 0x0, + .reg_offset_shift = 0x1, + .pcs_cmn_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = &pcie_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, + }, + }, + }, + .phy_pma_ln_vals = { + [TYPE_QSGMII] = { + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_phy_pma_ln_vals, + [EXTERNAL_SSC] = &qsgmii_phy_pma_ln_vals, + [INTERNAL_SSC] = &qsgmii_phy_pma_ln_vals, + }, + }, + }, + .pma_cmn_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals, + }, + }, + }, + .pma_ln_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = &pcie_100_no_ssc_ln_vals, + [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals, + [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals, + [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals, + }, + }, + }, }; static const struct of_device_id cdns_sierra_id_table[] = { diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c index 5786166133d3..7c4b8050485f 100644 --- a/drivers/phy/cadence/phy-cadence-torrent.c +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -2278,7 +2278,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate; struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; - enum cdns_torrent_phy_type phy_t1, phy_t2, tmp_phy_type; + enum cdns_torrent_phy_type phy_t1, phy_t2; struct cdns_torrent_vals *pcs_cmn_vals; int i, j, node, mlane, num_lanes, ret; struct cdns_reg_pairs *reg_pairs; @@ -2304,9 +2304,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) * configure the PHY for second link with phy_t2. * Get the array values as [phy_t2][phy_t1][ssc]. */ - tmp_phy_type = phy_t1; - phy_t1 = phy_t2; - phy_t2 = tmp_phy_type; + swap(phy_t1, phy_t2); } mlane = cdns_phy->phys[node].mlane; diff --git a/drivers/phy/freescale/Kconfig b/drivers/phy/freescale/Kconfig index 320630ffe3cd..c3669c28ea9f 100644 --- a/drivers/phy/freescale/Kconfig +++ b/drivers/phy/freescale/Kconfig @@ -14,3 +14,11 @@ config PHY_MIXEL_MIPI_DPHY help Enable this to add support for the Mixel DSI PHY as found on NXP's i.MX8 family of SOCs. + +config PHY_FSL_IMX8M_PCIE + tristate "Freescale i.MX8M PCIE PHY" + depends on OF && HAS_IOMEM + select GENERIC_PHY + help + Enable this to add support for the PCIE PHY as found on + i.MX8M family of SOCs. diff --git a/drivers/phy/freescale/Makefile b/drivers/phy/freescale/Makefile index 1d02e3869b45..55d07c742ab0 100644 --- a/drivers/phy/freescale/Makefile +++ b/drivers/phy/freescale/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_PHY_FSL_IMX8MQ_USB) += phy-fsl-imx8mq-usb.o obj-$(CONFIG_PHY_MIXEL_MIPI_DPHY) += phy-fsl-imx8-mipi-dphy.o +obj-$(CONFIG_PHY_FSL_IMX8M_PCIE) += phy-fsl-imx8m-pcie.o diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c new file mode 100644 index 000000000000..04b1aafb29f4 --- /dev/null +++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2021 NXP + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/delay.h> +#include <linux/mfd/syscon.h> +#include <linux/mfd/syscon/imx7-iomuxc-gpr.h> +#include <linux/module.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <dt-bindings/phy/phy-imx8-pcie.h> + +#define IMX8MM_PCIE_PHY_CMN_REG061 0x184 +#define ANA_PLL_CLK_OUT_TO_EXT_IO_EN BIT(0) +#define IMX8MM_PCIE_PHY_CMN_REG062 0x188 +#define ANA_PLL_CLK_OUT_TO_EXT_IO_SEL BIT(3) +#define IMX8MM_PCIE_PHY_CMN_REG063 0x18C +#define AUX_PLL_REFCLK_SEL_SYS_PLL GENMASK(7, 6) +#define IMX8MM_PCIE_PHY_CMN_REG064 0x190 +#define ANA_AUX_RX_TX_SEL_TX BIT(7) +#define ANA_AUX_RX_TERM_GND_EN BIT(3) +#define ANA_AUX_TX_TERM BIT(2) +#define IMX8MM_PCIE_PHY_CMN_REG065 0x194 +#define ANA_AUX_RX_TERM (BIT(7) | BIT(4)) +#define ANA_AUX_TX_LVL GENMASK(3, 0) +#define IMX8MM_PCIE_PHY_CMN_REG75 0x1D4 +#define PCIE_PHY_CMN_REG75_PLL_DONE 0x3 +#define PCIE_PHY_TRSV_REG5 0x414 +#define PCIE_PHY_TRSV_REG5_GEN1_DEEMP 0x2D +#define PCIE_PHY_TRSV_REG6 0x418 +#define PCIE_PHY_TRSV_REG6_GEN2_DEEMP 0xF + +#define IMX8MM_GPR_PCIE_REF_CLK_SEL GENMASK(25, 24) +#define IMX8MM_GPR_PCIE_REF_CLK_PLL FIELD_PREP(IMX8MM_GPR_PCIE_REF_CLK_SEL, 0x3) +#define IMX8MM_GPR_PCIE_REF_CLK_EXT FIELD_PREP(IMX8MM_GPR_PCIE_REF_CLK_SEL, 0x2) +#define IMX8MM_GPR_PCIE_AUX_EN BIT(19) +#define IMX8MM_GPR_PCIE_CMN_RST BIT(18) +#define IMX8MM_GPR_PCIE_POWER_OFF BIT(17) +#define IMX8MM_GPR_PCIE_SSC_EN BIT(16) +#define IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE BIT(9) + +struct imx8_pcie_phy { + void __iomem *base; + struct clk *clk; + struct phy *phy; + struct regmap *iomuxc_gpr; + struct reset_control *reset; + u32 refclk_pad_mode; + u32 tx_deemph_gen1; + u32 tx_deemph_gen2; + bool clkreq_unused; +}; + +static int imx8_pcie_phy_init(struct phy *phy) +{ + int ret; + u32 val, pad_mode; + struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy); + + reset_control_assert(imx8_phy->reset); + + pad_mode = imx8_phy->refclk_pad_mode; + /* Set AUX_EN_OVERRIDE 1'b0, when the CLKREQ# isn't hooked */ + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE, + imx8_phy->clkreq_unused ? + 0 : IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE); + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_AUX_EN, + IMX8MM_GPR_PCIE_AUX_EN); + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_POWER_OFF, 0); + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_SSC_EN, 0); + + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_REF_CLK_SEL, + pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ? + IMX8MM_GPR_PCIE_REF_CLK_EXT : + IMX8MM_GPR_PCIE_REF_CLK_PLL); + usleep_range(100, 200); + + /* Do the PHY common block reset */ + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_CMN_RST, + IMX8MM_GPR_PCIE_CMN_RST); + usleep_range(200, 500); + + if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT) { + /* Configure the pad as input */ + val = readl(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061); + writel(val & ~ANA_PLL_CLK_OUT_TO_EXT_IO_EN, + imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061); + } else if (pad_mode == IMX8_PCIE_REFCLK_PAD_OUTPUT) { + /* Configure the PHY to output the refclock via pad */ + writel(ANA_PLL_CLK_OUT_TO_EXT_IO_EN, + imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061); + writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL, + imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062); + writel(AUX_PLL_REFCLK_SEL_SYS_PLL, + imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063); + val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM; + writel(val | ANA_AUX_RX_TERM_GND_EN, + imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064); + writel(ANA_AUX_RX_TERM | ANA_AUX_TX_LVL, + imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG065); + } + + /* Tune PHY de-emphasis setting to pass PCIe compliance. */ + if (imx8_phy->tx_deemph_gen1) + writel(imx8_phy->tx_deemph_gen1, + imx8_phy->base + PCIE_PHY_TRSV_REG5); + if (imx8_phy->tx_deemph_gen2) + writel(imx8_phy->tx_deemph_gen2, + imx8_phy->base + PCIE_PHY_TRSV_REG6); + + reset_control_deassert(imx8_phy->reset); + + /* Polling to check the phy is ready or not. */ + ret = readl_poll_timeout(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG75, + val, val == PCIE_PHY_CMN_REG75_PLL_DONE, + 10, 20000); + return ret; +} + +static int imx8_pcie_phy_power_on(struct phy *phy) +{ + struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy); + + return clk_prepare_enable(imx8_phy->clk); +} + +static int imx8_pcie_phy_power_off(struct phy *phy) +{ + struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy); + + clk_disable_unprepare(imx8_phy->clk); + + return 0; +} + +static const struct phy_ops imx8_pcie_phy_ops = { + .init = imx8_pcie_phy_init, + .power_on = imx8_pcie_phy_power_on, + .power_off = imx8_pcie_phy_power_off, + .owner = THIS_MODULE, +}; + +static int imx8_pcie_phy_probe(struct platform_device *pdev) +{ + struct phy_provider *phy_provider; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct imx8_pcie_phy *imx8_phy; + struct resource *res; + + imx8_phy = devm_kzalloc(dev, sizeof(*imx8_phy), GFP_KERNEL); + if (!imx8_phy) + return -ENOMEM; + + /* get PHY refclk pad mode */ + of_property_read_u32(np, "fsl,refclk-pad-mode", + &imx8_phy->refclk_pad_mode); + + if (of_property_read_u32(np, "fsl,tx-deemph-gen1", + &imx8_phy->tx_deemph_gen1)) + imx8_phy->tx_deemph_gen1 = 0; + + if (of_property_read_u32(np, "fsl,tx-deemph-gen2", + &imx8_phy->tx_deemph_gen2)) + imx8_phy->tx_deemph_gen2 = 0; + + if (of_property_read_bool(np, "fsl,clkreq-unsupported")) + imx8_phy->clkreq_unused = true; + else + imx8_phy->clkreq_unused = false; + + imx8_phy->clk = devm_clk_get(dev, "ref"); + if (IS_ERR(imx8_phy->clk)) { + dev_err(dev, "failed to get imx pcie phy clock\n"); + return PTR_ERR(imx8_phy->clk); + } + + /* Grab GPR config register range */ + imx8_phy->iomuxc_gpr = + syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); + if (IS_ERR(imx8_phy->iomuxc_gpr)) { + dev_err(dev, "unable to find iomuxc registers\n"); + return PTR_ERR(imx8_phy->iomuxc_gpr); + } + + imx8_phy->reset = devm_reset_control_get_exclusive(dev, "pciephy"); + if (IS_ERR(imx8_phy->reset)) { + dev_err(dev, "Failed to get PCIEPHY reset control\n"); + return PTR_ERR(imx8_phy->reset); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + imx8_phy->base = devm_ioremap_resource(dev, res); + if (IS_ERR(imx8_phy->base)) + return PTR_ERR(imx8_phy->base); + + imx8_phy->phy = devm_phy_create(dev, NULL, &imx8_pcie_phy_ops); + if (IS_ERR(imx8_phy->phy)) + return PTR_ERR(imx8_phy->phy); + + phy_set_drvdata(imx8_phy->phy, imx8_phy); + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct of_device_id imx8_pcie_phy_of_match[] = { + {.compatible = "fsl,imx8mm-pcie-phy",}, + { }, +}; +MODULE_DEVICE_TABLE(of, imx8_pcie_phy_of_match); + +static struct platform_driver imx8_pcie_phy_driver = { + .probe = imx8_pcie_phy_probe, + .driver = { + .name = "imx8-pcie-phy", + .of_match_table = imx8_pcie_phy_of_match, + } +}; +module_platform_driver(imx8_pcie_phy_driver); + +MODULE_DESCRIPTION("FSL IMX8 PCIE PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig index ac42bb2fb394..18a3cc5b98c0 100644 --- a/drivers/phy/intel/Kconfig +++ b/drivers/phy/intel/Kconfig @@ -46,3 +46,13 @@ config PHY_INTEL_LGM_EMMC select GENERIC_PHY help Enable this to support the Intel EMMC PHY + +config PHY_INTEL_THUNDERBAY_EMMC + tristate "Intel Thunder Bay eMMC PHY driver" + depends on OF && (ARCH_THUNDERBAY || COMPILE_TEST) + select GENERIC_PHY + help + This option enables support for Intel Thunder Bay SoC eMMC PHY. + + To compile this driver as a module, choose M here: the module + will be called phy-intel-thunderbay-emmc.ko. diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile index 14550981a707..b7321d56b0bb 100644 --- a/drivers/phy/intel/Makefile +++ b/drivers/phy/intel/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_PHY_INTEL_KEEMBAY_EMMC) += phy-intel-keembay-emmc.o obj-$(CONFIG_PHY_INTEL_KEEMBAY_USB) += phy-intel-keembay-usb.o obj-$(CONFIG_PHY_INTEL_LGM_COMBO) += phy-intel-lgm-combo.o obj-$(CONFIG_PHY_INTEL_LGM_EMMC) += phy-intel-lgm-emmc.o +obj-$(CONFIG_PHY_INTEL_THUNDERBAY_EMMC) += phy-intel-thunderbay-emmc.o diff --git a/drivers/phy/intel/phy-intel-thunderbay-emmc.c b/drivers/phy/intel/phy-intel-thunderbay-emmc.c new file mode 100644 index 000000000000..593f6970b81e --- /dev/null +++ b/drivers/phy/intel/phy-intel-thunderbay-emmc.c @@ -0,0 +1,509 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel ThunderBay eMMC PHY driver + * + * Copyright (C) 2021 Intel Corporation + * + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> + +/* eMMC/SD/SDIO core/phy configuration registers */ +#define CTRL_CFG_0 0x00 +#define CTRL_CFG_1 0x04 +#define CTRL_PRESET_0 0x08 +#define CTRL_PRESET_1 0x0c +#define CTRL_PRESET_2 0x10 +#define CTRL_PRESET_3 0x14 +#define CTRL_PRESET_4 0x18 +#define CTRL_CFG_2 0x1c +#define CTRL_CFG_3 0x20 +#define PHY_CFG_0 0x24 +#define PHY_CFG_1 0x28 +#define PHY_CFG_2 0x2c +#define PHYBIST_CTRL 0x30 +#define SDHC_STAT3 0x34 +#define PHY_STAT 0x38 +#define PHYBIST_STAT_0 0x3c +#define PHYBIST_STAT_1 0x40 +#define EMMC_AXI 0x44 + +/* CTRL_PRESET_3 */ +#define CTRL_PRESET3_MASK GENMASK(31, 0) +#define CTRL_PRESET3_SHIFT 0 + +/* CTRL_CFG_0 bit fields */ +#define SUPPORT_HS_MASK BIT(26) +#define SUPPORT_HS_SHIFT 26 + +#define SUPPORT_8B_MASK BIT(24) +#define SUPPORT_8B_SHIFT 24 + +/* CTRL_CFG_1 bit fields */ +#define SUPPORT_SDR50_MASK BIT(28) +#define SUPPORT_SDR50_SHIFT 28 +#define SLOT_TYPE_MASK GENMASK(27, 26) +#define SLOT_TYPE_OFFSET 26 +#define SUPPORT_64B_MASK BIT(24) +#define SUPPORT_64B_SHIFT 24 +#define SUPPORT_HS400_MASK BIT(2) +#define SUPPORT_HS400_SHIFT 2 +#define SUPPORT_DDR50_MASK BIT(1) +#define SUPPORT_DDR50_SHIFT 1 +#define SUPPORT_SDR104_MASK BIT(0) +#define SUPPORT_SDR104_SHIFT 0 + +/* PHY_CFG_0 bit fields */ +#define SEL_DLY_TXCLK_MASK BIT(29) +#define SEL_DLY_TXCLK_SHIFT 29 +#define SEL_DLY_RXCLK_MASK BIT(28) +#define SEL_DLY_RXCLK_SHIFT 28 + +#define OTAP_DLY_ENA_MASK BIT(27) +#define OTAP_DLY_ENA_SHIFT 27 +#define OTAP_DLY_SEL_MASK GENMASK(26, 23) +#define OTAP_DLY_SEL_SHIFT 23 +#define ITAP_CHG_WIN_MASK BIT(22) +#define ITAP_CHG_WIN_SHIFT 22 +#define ITAP_DLY_ENA_MASK BIT(21) +#define ITAP_DLY_ENA_SHIFT 21 +#define ITAP_DLY_SEL_MASK GENMASK(20, 16) +#define ITAP_DLY_SEL_SHIFT 16 +#define RET_ENB_MASK BIT(15) +#define RET_ENB_SHIFT 15 +#define RET_EN_MASK BIT(14) +#define RET_EN_SHIFT 14 +#define DLL_IFF_MASK GENMASK(13, 11) +#define DLL_IFF_SHIFT 11 +#define DLL_EN_MASK BIT(10) +#define DLL_EN_SHIFT 10 +#define DLL_TRIM_ICP_MASK GENMASK(9, 6) +#define DLL_TRIM_ICP_SHIFT 6 +#define RETRIM_EN_MASK BIT(5) +#define RETRIM_EN_SHIFT 5 +#define RETRIM_MASK BIT(4) +#define RETRIM_SHIFT 4 +#define DR_TY_MASK GENMASK(3, 1) +#define DR_TY_SHIFT 1 +#define PWR_DOWN_MASK BIT(0) +#define PWR_DOWN_SHIFT 0 + +/* PHY_CFG_1 bit fields */ +#define REN_DAT_MASK GENMASK(19, 12) +#define REN_DAT_SHIFT 12 +#define REN_CMD_MASK BIT(11) +#define REN_CMD_SHIFT 11 +#define REN_STRB_MASK BIT(10) +#define REN_STRB_SHIFT 10 +#define PU_STRB_MASK BIT(20) +#define PU_STRB_SHIFT 20 + +/* PHY_CFG_2 bit fields */ +#define CLKBUF_MASK GENMASK(24, 21) +#define CLKBUF_SHIFT 21 +#define SEL_STRB_MASK GENMASK(20, 13) +#define SEL_STRB_SHIFT 13 +#define SEL_FREQ_MASK GENMASK(12, 10) +#define SEL_FREQ_SHIFT 10 + +/* PHY_STAT bit fields */ +#define CAL_DONE BIT(6) +#define DLL_RDY BIT(5) + +#define OTAP_DLY 0x0 +#define ITAP_DLY 0x0 +#define STRB 0x33 + +/* From ACS_eMMC51_16nFFC_RO1100_Userguide_v1p0.pdf p17 */ +#define FREQSEL_200M_170M 0x0 +#define FREQSEL_170M_140M 0x1 +#define FREQSEL_140M_110M 0x2 +#define FREQSEL_110M_80M 0x3 +#define FREQSEL_80M_50M 0x4 +#define FREQSEL_275M_250M 0x5 +#define FREQSEL_250M_225M 0x6 +#define FREQSEL_225M_200M 0x7 + +/* Phy power status */ +#define PHY_UNINITIALIZED 0 +#define PHY_INITIALIZED 1 + +/* + * During init(400KHz) phy_settings will be called with 200MHZ clock + * To avoid incorrectly setting the phy for init(400KHZ) "phy_power_sts" is used. + * When actual clock is set always phy is powered off once and then powered on. + * (sdhci_arasan_set_clock). That feature will be used to identify whether the + * settings are for init phy_power_on or actual clock phy_power_on + * 0 --> init settings + * 1 --> actual settings + */ + +struct thunderbay_emmc_phy { + void __iomem *reg_base; + struct clk *emmcclk; + int phy_power_sts; +}; + +static inline void update_reg(struct thunderbay_emmc_phy *tbh_phy, u32 offset, + u32 mask, u32 shift, u32 val) +{ + u32 tmp; + + tmp = readl(tbh_phy->reg_base + offset); + tmp &= ~mask; + tmp |= val << shift; + writel(tmp, tbh_phy->reg_base + offset); +} + +static int thunderbay_emmc_phy_power(struct phy *phy, bool power_on) +{ + struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy); + unsigned int freqsel = FREQSEL_200M_170M; + unsigned long rate; + static int lock; + u32 val; + int ret; + + /* Disable DLL */ + rate = clk_get_rate(tbh_phy->emmcclk); + switch (rate) { + case 200000000: + /* lock dll only when it is used, i.e only if SEL_DLY_TXCLK/RXCLK are 0 */ + update_reg(tbh_phy, PHY_CFG_0, DLL_EN_MASK, DLL_EN_SHIFT, 0x0); + break; + + /* dll lock not required for other frequencies */ + case 50000000 ... 52000000: + case 400000: + default: + break; + } + + if (!power_on) + return 0; + + rate = clk_get_rate(tbh_phy->emmcclk); + switch (rate) { + case 170000001 ... 200000000: + freqsel = FREQSEL_200M_170M; + break; + + case 140000001 ... 170000000: + freqsel = FREQSEL_170M_140M; + break; + + case 110000001 ... 140000000: + freqsel = FREQSEL_140M_110M; + break; + + case 80000001 ... 110000000: + freqsel = FREQSEL_110M_80M; + break; + + case 50000000 ... 80000000: + freqsel = FREQSEL_80M_50M; + break; + + case 250000001 ... 275000000: + freqsel = FREQSEL_275M_250M; + break; + + case 225000001 ... 250000000: + freqsel = FREQSEL_250M_225M; + break; + + case 200000001 ... 225000000: + freqsel = FREQSEL_225M_200M; + break; + default: + break; + } + /* Clock rate is checked against upper limit. It may fall low during init */ + if (rate > 200000000) + dev_warn(&phy->dev, "Unsupported rate: %lu\n", rate); + + udelay(5); + + if (lock == 0) { + /* PDB will be done only once per boot */ + update_reg(tbh_phy, PHY_CFG_0, PWR_DOWN_MASK, + PWR_DOWN_SHIFT, 0x1); + lock = 1; + /* + * According to the user manual, it asks driver to wait 5us for + * calpad busy trimming. However it is documented that this value is + * PVT(A.K.A. process, voltage and temperature) relevant, so some + * failure cases are found which indicates we should be more tolerant + * to calpad busy trimming. + */ + ret = readl_poll_timeout(tbh_phy->reg_base + PHY_STAT, + val, (val & CAL_DONE), 10, 50); + if (ret) { + dev_err(&phy->dev, "caldone failed, ret=%d\n", ret); + return ret; + } + } + rate = clk_get_rate(tbh_phy->emmcclk); + switch (rate) { + case 200000000: + /* Set frequency of the DLL operation */ + update_reg(tbh_phy, PHY_CFG_2, SEL_FREQ_MASK, SEL_FREQ_SHIFT, freqsel); + + /* Enable DLL */ + update_reg(tbh_phy, PHY_CFG_0, DLL_EN_MASK, DLL_EN_SHIFT, 0x1); + + /* + * After enabling analog DLL circuits docs say that we need 10.2 us if + * our source clock is at 50 MHz and that lock time scales linearly + * with clock speed. If we are powering on the PHY and the card clock + * is super slow (like 100kHz) this could take as long as 5.1 ms as + * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms + * hopefully we won't be running at 100 kHz, but we should still make + * sure we wait long enough. + * + * NOTE: There appear to be corner cases where the DLL seems to take + * extra long to lock for reasons that aren't understood. In some + * extreme cases we've seen it take up to over 10ms (!). We'll be + * generous and give it 50ms. + */ + ret = readl_poll_timeout(tbh_phy->reg_base + PHY_STAT, + val, (val & DLL_RDY), 10, 50 * USEC_PER_MSEC); + if (ret) { + dev_err(&phy->dev, "dllrdy failed, ret=%d\n", ret); + return ret; + } + break; + + default: + break; + } + return 0; +} + +static int thunderbay_emmc_phy_init(struct phy *phy) +{ + struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy); + + tbh_phy->emmcclk = clk_get(&phy->dev, "emmcclk"); + + return PTR_ERR_OR_ZERO(tbh_phy->emmcclk); +} + +static int thunderbay_emmc_phy_exit(struct phy *phy) +{ + struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy); + + clk_put(tbh_phy->emmcclk); + + return 0; +} + +static int thunderbay_emmc_phy_power_on(struct phy *phy) +{ + struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy); + unsigned long rate; + + /* Overwrite capability bits configurable in bootloader */ + update_reg(tbh_phy, CTRL_CFG_0, + SUPPORT_HS_MASK, SUPPORT_HS_SHIFT, 0x1); + update_reg(tbh_phy, CTRL_CFG_0, + SUPPORT_8B_MASK, SUPPORT_8B_SHIFT, 0x1); + update_reg(tbh_phy, CTRL_CFG_1, + SUPPORT_SDR50_MASK, SUPPORT_SDR50_SHIFT, 0x1); + update_reg(tbh_phy, CTRL_CFG_1, + SUPPORT_DDR50_MASK, SUPPORT_DDR50_SHIFT, 0x1); + update_reg(tbh_phy, CTRL_CFG_1, + SUPPORT_SDR104_MASK, SUPPORT_SDR104_SHIFT, 0x1); + update_reg(tbh_phy, CTRL_CFG_1, + SUPPORT_HS400_MASK, SUPPORT_HS400_SHIFT, 0x1); + update_reg(tbh_phy, CTRL_CFG_1, + SUPPORT_64B_MASK, SUPPORT_64B_SHIFT, 0x1); + + if (tbh_phy->phy_power_sts == PHY_UNINITIALIZED) { + /* Indicates initialization, settings for init, same as 400KHZ setting */ + update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK, SEL_DLY_TXCLK_SHIFT, 0x1); + update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK, SEL_DLY_RXCLK_SHIFT, 0x1); + update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK, ITAP_DLY_ENA_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK, ITAP_DLY_SEL_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK, OTAP_DLY_ENA_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK, OTAP_DLY_SEL_SHIFT, 0); + update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK, DLL_TRIM_ICP_SHIFT, 0); + update_reg(tbh_phy, PHY_CFG_0, DR_TY_MASK, DR_TY_SHIFT, 0x1); + + } else if (tbh_phy->phy_power_sts == PHY_INITIALIZED) { + /* Indicates actual clock setting */ + rate = clk_get_rate(tbh_phy->emmcclk); + switch (rate) { + case 200000000: + update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK, + SEL_DLY_TXCLK_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK, + SEL_DLY_RXCLK_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK, + ITAP_DLY_ENA_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK, + ITAP_DLY_SEL_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK, + OTAP_DLY_ENA_SHIFT, 0x1); + update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK, + OTAP_DLY_SEL_SHIFT, 2); + update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK, + DLL_TRIM_ICP_SHIFT, 0x8); + update_reg(tbh_phy, PHY_CFG_0, DR_TY_MASK, + DR_TY_SHIFT, 0x1); + /* For HS400 only */ + update_reg(tbh_phy, PHY_CFG_2, SEL_STRB_MASK, + SEL_STRB_SHIFT, STRB); + break; + + case 50000000 ... 52000000: + /* For both HS and DDR52 this setting works */ + update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK, + SEL_DLY_TXCLK_SHIFT, 0x1); + update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK, + SEL_DLY_RXCLK_SHIFT, 0x1); + update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK, + ITAP_DLY_ENA_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK, + ITAP_DLY_SEL_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK, + OTAP_DLY_ENA_SHIFT, 0x1); + update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK, + OTAP_DLY_SEL_SHIFT, 4); + update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK, + DLL_TRIM_ICP_SHIFT, 0x8); + update_reg(tbh_phy, PHY_CFG_0, + DR_TY_MASK, DR_TY_SHIFT, 0x1); + break; + + case 400000: + update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK, + SEL_DLY_TXCLK_SHIFT, 0x1); + update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK, + SEL_DLY_RXCLK_SHIFT, 0x1); + update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK, + ITAP_DLY_ENA_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK, + ITAP_DLY_SEL_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK, + OTAP_DLY_ENA_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK, + OTAP_DLY_SEL_SHIFT, 0); + update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK, + DLL_TRIM_ICP_SHIFT, 0); + update_reg(tbh_phy, PHY_CFG_0, DR_TY_MASK, DR_TY_SHIFT, 0x1); + break; + + default: + update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK, + SEL_DLY_TXCLK_SHIFT, 0x1); + update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK, + SEL_DLY_RXCLK_SHIFT, 0x1); + update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK, + ITAP_DLY_ENA_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK, + ITAP_DLY_SEL_SHIFT, 0x0); + update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK, + OTAP_DLY_ENA_SHIFT, 0x1); + update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK, + OTAP_DLY_SEL_SHIFT, 2); + update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK, + DLL_TRIM_ICP_SHIFT, 0x8); + update_reg(tbh_phy, PHY_CFG_0, DR_TY_MASK, + DR_TY_SHIFT, 0x1); + break; + } + /* Reset, init seq called without phy_power_off, this indicates init seq */ + tbh_phy->phy_power_sts = PHY_UNINITIALIZED; + } + + update_reg(tbh_phy, PHY_CFG_0, RETRIM_EN_MASK, RETRIM_EN_SHIFT, 0x1); + update_reg(tbh_phy, PHY_CFG_0, RETRIM_MASK, RETRIM_SHIFT, 0x0); + + return thunderbay_emmc_phy_power(phy, 1); +} + +static int thunderbay_emmc_phy_power_off(struct phy *phy) +{ + struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy); + + tbh_phy->phy_power_sts = PHY_INITIALIZED; + + return thunderbay_emmc_phy_power(phy, 0); +} + +static const struct phy_ops thunderbay_emmc_phy_ops = { + .init = thunderbay_emmc_phy_init, + .exit = thunderbay_emmc_phy_exit, + .power_on = thunderbay_emmc_phy_power_on, + .power_off = thunderbay_emmc_phy_power_off, + .owner = THIS_MODULE, +}; + +static const struct of_device_id thunderbay_emmc_phy_of_match[] = { + { .compatible = "intel,thunderbay-emmc-phy", + (void *)&thunderbay_emmc_phy_ops }, + {} +}; +MODULE_DEVICE_TABLE(of, thunderbay_emmc_phy_of_match); + +static int thunderbay_emmc_phy_probe(struct platform_device *pdev) +{ + struct thunderbay_emmc_phy *tbh_phy; + struct phy_provider *phy_provider; + struct device *dev = &pdev->dev; + const struct of_device_id *id; + struct phy *generic_phy; + struct resource *res; + + if (!dev->of_node) + return -ENODEV; + + tbh_phy = devm_kzalloc(dev, sizeof(*tbh_phy), GFP_KERNEL); + if (!tbh_phy) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + tbh_phy->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(tbh_phy->reg_base)) + return PTR_ERR(tbh_phy->reg_base); + + tbh_phy->phy_power_sts = PHY_UNINITIALIZED; + id = of_match_node(thunderbay_emmc_phy_of_match, pdev->dev.of_node); + if (!id) { + dev_err(dev, "failed to get match_node\n"); + return -EINVAL; + } + + generic_phy = devm_phy_create(dev, dev->of_node, id->data); + if (IS_ERR(generic_phy)) { + dev_err(dev, "failed to create PHY\n"); + return PTR_ERR(generic_phy); + } + + phy_set_drvdata(generic_phy, tbh_phy); + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); +} + +static struct platform_driver thunderbay_emmc_phy_driver = { + .probe = thunderbay_emmc_phy_probe, + .driver = { + .name = "thunderbay-emmc-phy", + .of_match_table = thunderbay_emmc_phy_of_match, + }, +}; +module_platform_driver(thunderbay_emmc_phy_driver); + +MODULE_AUTHOR("Nandhini S <nandhini.srikandan@intel.com>"); +MODULE_AUTHOR("Rashmi A <rashmi.a@intel.com>"); +MODULE_DESCRIPTION("Intel Thunder Bay eMMC PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/mediatek/phy-mtk-io.h b/drivers/phy/mediatek/phy-mtk-io.h new file mode 100644 index 000000000000..500fcdab165d --- /dev/null +++ b/drivers/phy/mediatek/phy-mtk-io.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 MediaTek Inc. + * + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> + */ + +#ifndef __PHY_MTK_H__ +#define __PHY_MTK_H__ + +#include <linux/io.h> + +static inline void mtk_phy_clear_bits(void __iomem *reg, u32 bits) +{ + u32 tmp = readl(reg); + + tmp &= ~bits; + writel(tmp, reg); +} + +static inline void mtk_phy_set_bits(void __iomem *reg, u32 bits) +{ + u32 tmp = readl(reg); + + tmp |= bits; + writel(tmp, reg); +} + +static inline void mtk_phy_update_bits(void __iomem *reg, u32 mask, u32 val) +{ + u32 tmp = readl(reg); + + tmp &= ~mask; + tmp |= val & mask; + writel(tmp, reg); +} + +#endif diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c index 28ad9403c441..67b005d5b9e3 100644 --- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c +++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c @@ -146,6 +146,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) return -ENOMEM; mipi_tx->driver_data = of_device_get_match_data(dev); + if (!mipi_tx->driver_data) + return -ENODEV; mipi_tx->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mipi_tx->regs)) diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index cdcef865fe9e..6d307102f4f6 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -8,16 +8,18 @@ #include <dt-bindings/phy/phy.h> #include <linux/clk.h> #include <linux/delay.h> -#include <linux/io.h> #include <linux/iopoll.h> #include <linux/mfd/syscon.h> #include <linux/module.h> +#include <linux/nvmem-consumer.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> +#include "phy-mtk-io.h" + /* version V1 sub-banks offset base address */ /* banks shared by multiple phys */ #define SSUSB_SIFSLV_V1_SPLLC 0x000 /* shared by u3 phys */ @@ -41,6 +43,9 @@ #define SSUSB_SIFSLV_V2_U3PHYD 0x200 #define SSUSB_SIFSLV_V2_U3PHYA 0x400 +#define U3P_MISC_REG1 0x04 +#define MR1_EFUSE_AUTO_LOAD_DIS BIT(6) + #define U3P_USBPHYACR0 0x000 #define PA0_RG_U2PLL_FORCE_ON BIT(15) #define PA0_USB20_PLL_PREDIV GENMASK(7, 6) @@ -133,6 +138,8 @@ #define P3C_RG_SWRST_U3_PHYD_FORCE_EN BIT(24) #define U3P_U3_PHYA_REG0 0x000 +#define P3A_RG_IEXT_INTR GENMASK(15, 10) +#define P3A_RG_IEXT_INTR_VAL(x) ((0x3f & (x)) << 10) #define P3A_RG_CLKDRV_OFF GENMASK(3, 2) #define P3A_RG_CLKDRV_OFF_VAL(x) ((0x3 & (x)) << 2) @@ -187,6 +194,19 @@ #define P3D_RG_FWAKE_TH GENMASK(21, 16) #define P3D_RG_FWAKE_TH_VAL(x) ((0x3f & (x)) << 16) +#define U3P_U3_PHYD_IMPCAL0 0x010 +#define P3D_RG_FORCE_TX_IMPEL BIT(31) +#define P3D_RG_TX_IMPEL GENMASK(28, 24) +#define P3D_RG_TX_IMPEL_VAL(x) ((0x1f & (x)) << 24) + +#define U3P_U3_PHYD_IMPCAL1 0x014 +#define P3D_RG_FORCE_RX_IMPEL BIT(31) +#define P3D_RG_RX_IMPEL GENMASK(28, 24) +#define P3D_RG_RX_IMPEL_VAL(x) ((0x1f & (x)) << 24) + +#define U3P_U3_PHYD_RSV 0x054 +#define P3D_RG_EFUSE_AUTO_LOAD_DIS BIT(12) + #define U3P_U3_PHYD_CDR1 0x05c #define P3D_RG_CDR_BIR_LTD1 GENMASK(28, 24) #define P3D_RG_CDR_BIR_LTD1_VAL(x) ((0x1f & (x)) << 24) @@ -307,6 +327,11 @@ struct mtk_phy_pdata { * 48M PLL, fix it by switching PLL to 26M from default 48M */ bool sw_pll_48m_to_26m; + /* + * Some SoCs (e.g. mt8195) drop a bit when use auto load efuse, + * support sw way, also support it for v2/v3 optionally. + */ + bool sw_efuse_supported; enum mtk_phy_version version; }; @@ -336,6 +361,10 @@ struct mtk_phy_instance { struct regmap *type_sw; u32 type_sw_reg; u32 type_sw_index; + u32 efuse_sw_en; + u32 efuse_intr; + u32 efuse_tx_imp; + u32 efuse_rx_imp; int eye_src; int eye_vrt; int eye_term; @@ -373,15 +402,11 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy, return; /* enable USB ring oscillator */ - tmp = readl(com + U3P_USBPHYACR5); - tmp |= PA5_RG_U2_HSTX_SRCAL_EN; - writel(tmp, com + U3P_USBPHYACR5); + mtk_phy_set_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCAL_EN); udelay(1); /*enable free run clock */ - tmp = readl(fmreg + U3P_U2FREQ_FMMONR1); - tmp |= P2F_RG_FRCK_EN; - writel(tmp, fmreg + U3P_U2FREQ_FMMONR1); + mtk_phy_set_bits(fmreg + U3P_U2FREQ_FMMONR1, P2F_RG_FRCK_EN); /* set cycle count as 1024, and select u2 channel */ tmp = readl(fmreg + U3P_U2FREQ_FMCR0); @@ -393,9 +418,7 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy, writel(tmp, fmreg + U3P_U2FREQ_FMCR0); /* enable frequency meter */ - tmp = readl(fmreg + U3P_U2FREQ_FMCR0); - tmp |= P2F_RG_FREQDET_EN; - writel(tmp, fmreg + U3P_U2FREQ_FMCR0); + mtk_phy_set_bits(fmreg + U3P_U2FREQ_FMCR0, P2F_RG_FREQDET_EN); /* ignore return value */ readl_poll_timeout(fmreg + U3P_U2FREQ_FMMONR1, tmp, @@ -404,14 +427,10 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy, fm_out = readl(fmreg + U3P_U2FREQ_VALUE); /* disable frequency meter */ - tmp = readl(fmreg + U3P_U2FREQ_FMCR0); - tmp &= ~P2F_RG_FREQDET_EN; - writel(tmp, fmreg + U3P_U2FREQ_FMCR0); + mtk_phy_clear_bits(fmreg + U3P_U2FREQ_FMCR0, P2F_RG_FREQDET_EN); /*disable free run clock */ - tmp = readl(fmreg + U3P_U2FREQ_FMMONR1); - tmp &= ~P2F_RG_FRCK_EN; - writel(tmp, fmreg + U3P_U2FREQ_FMMONR1); + mtk_phy_clear_bits(fmreg + U3P_U2FREQ_FMMONR1, P2F_RG_FRCK_EN); if (fm_out) { /* ( 1024 / FM_OUT ) x reference clock frequency x coef */ @@ -427,63 +446,44 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy, tphy->src_ref_clk, tphy->src_coef); /* set HS slew rate */ - tmp = readl(com + U3P_USBPHYACR5); - tmp &= ~PA5_RG_U2_HSTX_SRCTRL; - tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(calibration_val); - writel(tmp, com + U3P_USBPHYACR5); + mtk_phy_update_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL, + PA5_RG_U2_HSTX_SRCTRL_VAL(calibration_val)); /* disable USB ring oscillator */ - tmp = readl(com + U3P_USBPHYACR5); - tmp &= ~PA5_RG_U2_HSTX_SRCAL_EN; - writel(tmp, com + U3P_USBPHYACR5); + mtk_phy_clear_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCAL_EN); } static void u3_phy_instance_init(struct mtk_tphy *tphy, struct mtk_phy_instance *instance) { struct u3phy_banks *u3_banks = &instance->u3_banks; - u32 tmp; /* gating PCIe Analog XTAL clock */ - tmp = readl(u3_banks->spllc + U3P_SPLLC_XTALCTL3); - tmp |= XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD; - writel(tmp, u3_banks->spllc + U3P_SPLLC_XTALCTL3); + mtk_phy_set_bits(u3_banks->spllc + U3P_SPLLC_XTALCTL3, + XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD); /* gating XSQ */ - tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG0); - tmp &= ~P3A_RG_XTAL_EXT_EN_U3; - tmp |= P3A_RG_XTAL_EXT_EN_U3_VAL(2); - writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG0); - - tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG9); - tmp &= ~P3A_RG_RX_DAC_MUX; - tmp |= P3A_RG_RX_DAC_MUX_VAL(4); - writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG9); - - tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG6); - tmp &= ~P3A_RG_TX_EIDLE_CM; - tmp |= P3A_RG_TX_EIDLE_CM_VAL(0xe); - writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG6); - - tmp = readl(u3_banks->phyd + U3P_U3_PHYD_CDR1); - tmp &= ~(P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1); - tmp |= P3D_RG_CDR_BIR_LTD0_VAL(0xc) | P3D_RG_CDR_BIR_LTD1_VAL(0x3); - writel(tmp, u3_banks->phyd + U3P_U3_PHYD_CDR1); - - tmp = readl(u3_banks->phyd + U3P_U3_PHYD_LFPS1); - tmp &= ~P3D_RG_FWAKE_TH; - tmp |= P3D_RG_FWAKE_TH_VAL(0x34); - writel(tmp, u3_banks->phyd + U3P_U3_PHYD_LFPS1); - - tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET1); - tmp &= ~P3D_RG_RXDET_STB2_SET; - tmp |= P3D_RG_RXDET_STB2_SET_VAL(0x10); - writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET1); - - tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET2); - tmp &= ~P3D_RG_RXDET_STB2_SET_P3; - tmp |= P3D_RG_RXDET_STB2_SET_P3_VAL(0x10); - writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET2); + mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_DA_REG0, + P3A_RG_XTAL_EXT_EN_U3, P3A_RG_XTAL_EXT_EN_U3_VAL(2)); + + mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG9, + P3A_RG_RX_DAC_MUX, P3A_RG_RX_DAC_MUX_VAL(4)); + + mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG6, + P3A_RG_TX_EIDLE_CM, P3A_RG_TX_EIDLE_CM_VAL(0xe)); + + mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_CDR1, + P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1, + P3D_RG_CDR_BIR_LTD0_VAL(0xc) | P3D_RG_CDR_BIR_LTD1_VAL(0x3)); + + mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_LFPS1, + P3D_RG_FWAKE_TH, P3D_RG_FWAKE_TH_VAL(0x34)); + + mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET1, + P3D_RG_RXDET_STB2_SET, P3D_RG_RXDET_STB2_SET_VAL(0x10)); + + mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET2, + P3D_RG_RXDET_STB2_SET_P3, P3D_RG_RXDET_STB2_SET_P3_VAL(0x10)); dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index); } @@ -493,26 +493,20 @@ static void u2_phy_pll_26m_set(struct mtk_tphy *tphy, { struct u2phy_banks *u2_banks = &instance->u2_banks; void __iomem *com = u2_banks->com; - u32 tmp; if (!tphy->pdata->sw_pll_48m_to_26m) return; - tmp = readl(com + U3P_USBPHYACR0); - tmp &= ~PA0_USB20_PLL_PREDIV; - tmp |= PA0_USB20_PLL_PREDIV_VAL(0); - writel(tmp, com + U3P_USBPHYACR0); + mtk_phy_update_bits(com + U3P_USBPHYACR0, PA0_USB20_PLL_PREDIV, + PA0_USB20_PLL_PREDIV_VAL(0)); - tmp = readl(com + U3P_USBPHYACR2); - tmp &= ~PA2_RG_U2PLL_BW; - tmp |= PA2_RG_U2PLL_BW_VAL(3); - writel(tmp, com + U3P_USBPHYACR2); + mtk_phy_update_bits(com + U3P_USBPHYACR2, PA2_RG_U2PLL_BW, + PA2_RG_U2PLL_BW_VAL(3)); writel(P2R_RG_U2PLL_FBDIV_26M, com + U3P_U2PHYA_RESV); - tmp = readl(com + U3P_U2PHYA_RESV1); - tmp |= P2R_RG_U2PLL_FRA_EN | P2R_RG_U2PLL_REFCLK_SEL; - writel(tmp, com + U3P_U2PHYA_RESV1); + mtk_phy_set_bits(com + U3P_U2PHYA_RESV1, + P2R_RG_U2PLL_FRA_EN | P2R_RG_U2PLL_REFCLK_SEL); } static void u2_phy_instance_init(struct mtk_tphy *tphy, @@ -521,58 +515,40 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy, struct u2phy_banks *u2_banks = &instance->u2_banks; void __iomem *com = u2_banks->com; u32 index = instance->index; - u32 tmp; /* switch to USB function, and enable usb pll */ - tmp = readl(com + U3P_U2PHYDTM0); - tmp &= ~(P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM); - tmp |= P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0); - writel(tmp, com + U3P_U2PHYDTM0); + mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM); + + mtk_phy_update_bits(com + U3P_U2PHYDTM0, P2C_RG_XCVRSEL | P2C_RG_DATAIN, + P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0)); - tmp = readl(com + U3P_U2PHYDTM1); - tmp &= ~P2C_RG_UART_EN; - writel(tmp, com + U3P_U2PHYDTM1); + mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_UART_EN); - tmp = readl(com + U3P_USBPHYACR0); - tmp |= PA0_RG_USB20_INTR_EN; - writel(tmp, com + U3P_USBPHYACR0); + mtk_phy_set_bits(com + U3P_USBPHYACR0, PA0_RG_USB20_INTR_EN); /* disable switch 100uA current to SSUSB */ - tmp = readl(com + U3P_USBPHYACR5); - tmp &= ~PA5_RG_U2_HS_100U_U3_EN; - writel(tmp, com + U3P_USBPHYACR5); - - if (!index) { - tmp = readl(com + U3P_U2PHYACR4); - tmp &= ~P2C_U2_GPIO_CTR_MSK; - writel(tmp, com + U3P_U2PHYACR4); - } + mtk_phy_clear_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HS_100U_U3_EN); + + if (!index) + mtk_phy_clear_bits(com + U3P_U2PHYACR4, P2C_U2_GPIO_CTR_MSK); if (tphy->pdata->avoid_rx_sen_degradation) { if (!index) { - tmp = readl(com + U3P_USBPHYACR2); - tmp |= PA2_RG_SIF_U2PLL_FORCE_EN; - writel(tmp, com + U3P_USBPHYACR2); + mtk_phy_set_bits(com + U3P_USBPHYACR2, PA2_RG_SIF_U2PLL_FORCE_EN); - tmp = readl(com + U3D_U2PHYDCR0); - tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON; - writel(tmp, com + U3D_U2PHYDCR0); + mtk_phy_clear_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON); } else { - tmp = readl(com + U3D_U2PHYDCR0); - tmp |= P2C_RG_SIF_U2PLL_FORCE_ON; - writel(tmp, com + U3D_U2PHYDCR0); + mtk_phy_set_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON); - tmp = readl(com + U3P_U2PHYDTM0); - tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM; - writel(tmp, com + U3P_U2PHYDTM0); + mtk_phy_set_bits(com + U3P_U2PHYDTM0, + P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM); } } - tmp = readl(com + U3P_USBPHYACR6); - tmp &= ~PA6_RG_U2_BC11_SW_EN; /* DP/DM BC1.1 path Disable */ - tmp &= ~PA6_RG_U2_SQTH; - tmp |= PA6_RG_U2_SQTH_VAL(2); - writel(tmp, com + U3P_USBPHYACR6); + /* DP/DM BC1.1 path Disable */ + mtk_phy_clear_bits(com + U3P_USBPHYACR6, PA6_RG_U2_BC11_SW_EN); + + mtk_phy_update_bits(com + U3P_USBPHYACR6, PA6_RG_U2_SQTH, PA6_RG_U2_SQTH_VAL(2)); /* Workaround only for mt8195, HW fix it for others (V3) */ u2_phy_pll_26m_set(tphy, instance); @@ -586,30 +562,21 @@ static void u2_phy_instance_power_on(struct mtk_tphy *tphy, struct u2phy_banks *u2_banks = &instance->u2_banks; void __iomem *com = u2_banks->com; u32 index = instance->index; - u32 tmp; - tmp = readl(com + U3P_U2PHYDTM0); - tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK); - writel(tmp, com + U3P_U2PHYDTM0); + mtk_phy_clear_bits(com + U3P_U2PHYDTM0, + P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK); /* OTG Enable */ - tmp = readl(com + U3P_USBPHYACR6); - tmp |= PA6_RG_U2_OTG_VBUSCMP_EN; - writel(tmp, com + U3P_USBPHYACR6); + mtk_phy_set_bits(com + U3P_USBPHYACR6, PA6_RG_U2_OTG_VBUSCMP_EN); + + mtk_phy_set_bits(com + U3P_U2PHYDTM1, P2C_RG_VBUSVALID | P2C_RG_AVALID); - tmp = readl(com + U3P_U2PHYDTM1); - tmp |= P2C_RG_VBUSVALID | P2C_RG_AVALID; - tmp &= ~P2C_RG_SESSEND; - writel(tmp, com + U3P_U2PHYDTM1); + mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_SESSEND); if (tphy->pdata->avoid_rx_sen_degradation && index) { - tmp = readl(com + U3D_U2PHYDCR0); - tmp |= P2C_RG_SIF_U2PLL_FORCE_ON; - writel(tmp, com + U3D_U2PHYDCR0); + mtk_phy_set_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON); - tmp = readl(com + U3P_U2PHYDTM0); - tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM; - writel(tmp, com + U3P_U2PHYDTM0); + mtk_phy_set_bits(com + U3P_U2PHYDTM0, P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM); } dev_dbg(tphy->dev, "%s(%d)\n", __func__, index); } @@ -620,30 +587,20 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy, struct u2phy_banks *u2_banks = &instance->u2_banks; void __iomem *com = u2_banks->com; u32 index = instance->index; - u32 tmp; - tmp = readl(com + U3P_U2PHYDTM0); - tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN); - writel(tmp, com + U3P_U2PHYDTM0); + mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_RG_XCVRSEL | P2C_RG_DATAIN); /* OTG Disable */ - tmp = readl(com + U3P_USBPHYACR6); - tmp &= ~PA6_RG_U2_OTG_VBUSCMP_EN; - writel(tmp, com + U3P_USBPHYACR6); + mtk_phy_clear_bits(com + U3P_USBPHYACR6, PA6_RG_U2_OTG_VBUSCMP_EN); + + mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_VBUSVALID | P2C_RG_AVALID); - tmp = readl(com + U3P_U2PHYDTM1); - tmp &= ~(P2C_RG_VBUSVALID | P2C_RG_AVALID); - tmp |= P2C_RG_SESSEND; - writel(tmp, com + U3P_U2PHYDTM1); + mtk_phy_set_bits(com + U3P_U2PHYDTM1, P2C_RG_SESSEND); if (tphy->pdata->avoid_rx_sen_degradation && index) { - tmp = readl(com + U3P_U2PHYDTM0); - tmp &= ~(P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM); - writel(tmp, com + U3P_U2PHYDTM0); + mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM); - tmp = readl(com + U3D_U2PHYDCR0); - tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON; - writel(tmp, com + U3D_U2PHYDCR0); + mtk_phy_clear_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON); } dev_dbg(tphy->dev, "%s(%d)\n", __func__, index); @@ -655,16 +612,11 @@ static void u2_phy_instance_exit(struct mtk_tphy *tphy, struct u2phy_banks *u2_banks = &instance->u2_banks; void __iomem *com = u2_banks->com; u32 index = instance->index; - u32 tmp; if (tphy->pdata->avoid_rx_sen_degradation && index) { - tmp = readl(com + U3D_U2PHYDCR0); - tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON; - writel(tmp, com + U3D_U2PHYDCR0); + mtk_phy_clear_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON); - tmp = readl(com + U3P_U2PHYDTM0); - tmp &= ~P2C_FORCE_SUSPENDM; - writel(tmp, com + U3P_U2PHYDTM0); + mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_FORCE_SUSPENDM); } } @@ -697,69 +649,50 @@ static void pcie_phy_instance_init(struct mtk_tphy *tphy, struct mtk_phy_instance *instance) { struct u3phy_banks *u3_banks = &instance->u3_banks; - u32 tmp; + void __iomem *phya = u3_banks->phya; if (tphy->pdata->version != MTK_PHY_V1) return; - tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG0); - tmp &= ~(P3A_RG_XTAL_EXT_PE1H | P3A_RG_XTAL_EXT_PE2H); - tmp |= P3A_RG_XTAL_EXT_PE1H_VAL(0x2) | P3A_RG_XTAL_EXT_PE2H_VAL(0x2); - writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG0); + mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG0, + P3A_RG_XTAL_EXT_PE1H | P3A_RG_XTAL_EXT_PE2H, + P3A_RG_XTAL_EXT_PE1H_VAL(0x2) | P3A_RG_XTAL_EXT_PE2H_VAL(0x2)); /* ref clk drive */ - tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG1); - tmp &= ~P3A_RG_CLKDRV_AMP; - tmp |= P3A_RG_CLKDRV_AMP_VAL(0x4); - writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG1); + mtk_phy_update_bits(phya + U3P_U3_PHYA_REG1, P3A_RG_CLKDRV_AMP, + P3A_RG_CLKDRV_AMP_VAL(0x4)); - tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG0); - tmp &= ~P3A_RG_CLKDRV_OFF; - tmp |= P3A_RG_CLKDRV_OFF_VAL(0x1); - writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG0); + mtk_phy_update_bits(phya + U3P_U3_PHYA_REG0, P3A_RG_CLKDRV_OFF, + P3A_RG_CLKDRV_OFF_VAL(0x1)); /* SSC delta -5000ppm */ - tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG20); - tmp &= ~P3A_RG_PLL_DELTA1_PE2H; - tmp |= P3A_RG_PLL_DELTA1_PE2H_VAL(0x3c); - writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG20); + mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG20, P3A_RG_PLL_DELTA1_PE2H, + P3A_RG_PLL_DELTA1_PE2H_VAL(0x3c)); - tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG25); - tmp &= ~P3A_RG_PLL_DELTA_PE2H; - tmp |= P3A_RG_PLL_DELTA_PE2H_VAL(0x36); - writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG25); + mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG25, P3A_RG_PLL_DELTA_PE2H, + P3A_RG_PLL_DELTA_PE2H_VAL(0x36)); /* change pll BW 0.6M */ - tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG5); - tmp &= ~(P3A_RG_PLL_BR_PE2H | P3A_RG_PLL_IC_PE2H); - tmp |= P3A_RG_PLL_BR_PE2H_VAL(0x1) | P3A_RG_PLL_IC_PE2H_VAL(0x1); - writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG5); - - tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG4); - tmp &= ~(P3A_RG_PLL_DIVEN_PE2H | P3A_RG_PLL_BC_PE2H); - tmp |= P3A_RG_PLL_BC_PE2H_VAL(0x3); - writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG4); - - tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG6); - tmp &= ~P3A_RG_PLL_IR_PE2H; - tmp |= P3A_RG_PLL_IR_PE2H_VAL(0x2); - writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG6); - - tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG7); - tmp &= ~P3A_RG_PLL_BP_PE2H; - tmp |= P3A_RG_PLL_BP_PE2H_VAL(0xa); - writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG7); + mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG5, + P3A_RG_PLL_BR_PE2H | P3A_RG_PLL_IC_PE2H, + P3A_RG_PLL_BR_PE2H_VAL(0x1) | P3A_RG_PLL_IC_PE2H_VAL(0x1)); + + mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG4, + P3A_RG_PLL_DIVEN_PE2H | P3A_RG_PLL_BC_PE2H, + P3A_RG_PLL_BC_PE2H_VAL(0x3)); + + mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG6, P3A_RG_PLL_IR_PE2H, + P3A_RG_PLL_IR_PE2H_VAL(0x2)); + + mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG7, P3A_RG_PLL_BP_PE2H, + P3A_RG_PLL_BP_PE2H_VAL(0xa)); /* Tx Detect Rx Timing: 10us -> 5us */ - tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET1); - tmp &= ~P3D_RG_RXDET_STB2_SET; - tmp |= P3D_RG_RXDET_STB2_SET_VAL(0x10); - writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET1); + mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET1, + P3D_RG_RXDET_STB2_SET, P3D_RG_RXDET_STB2_SET_VAL(0x10)); - tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET2); - tmp &= ~P3D_RG_RXDET_STB2_SET_P3; - tmp |= P3D_RG_RXDET_STB2_SET_P3_VAL(0x10); - writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET2); + mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET2, + P3D_RG_RXDET_STB2_SET_P3, P3D_RG_RXDET_STB2_SET_P3_VAL(0x10)); /* wait for PCIe subsys register to active */ usleep_range(2500, 3000); @@ -770,15 +703,12 @@ static void pcie_phy_instance_power_on(struct mtk_tphy *tphy, struct mtk_phy_instance *instance) { struct u3phy_banks *bank = &instance->u3_banks; - u32 tmp; - tmp = readl(bank->chip + U3P_U3_CHIP_GPIO_CTLD); - tmp &= ~(P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST); - writel(tmp, bank->chip + U3P_U3_CHIP_GPIO_CTLD); + mtk_phy_clear_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLD, + P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST); - tmp = readl(bank->chip + U3P_U3_CHIP_GPIO_CTLE); - tmp &= ~(P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD); - writel(tmp, bank->chip + U3P_U3_CHIP_GPIO_CTLE); + mtk_phy_clear_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLE, + P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD); } static void pcie_phy_instance_power_off(struct mtk_tphy *tphy, @@ -786,15 +716,12 @@ static void pcie_phy_instance_power_off(struct mtk_tphy *tphy, { struct u3phy_banks *bank = &instance->u3_banks; - u32 tmp; - tmp = readl(bank->chip + U3P_U3_CHIP_GPIO_CTLD); - tmp |= P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST; - writel(tmp, bank->chip + U3P_U3_CHIP_GPIO_CTLD); + mtk_phy_set_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLD, + P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST); - tmp = readl(bank->chip + U3P_U3_CHIP_GPIO_CTLE); - tmp |= P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD; - writel(tmp, bank->chip + U3P_U3_CHIP_GPIO_CTLE); + mtk_phy_set_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLE, + P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD); } static void sata_phy_instance_init(struct mtk_tphy *tphy, @@ -802,55 +729,42 @@ static void sata_phy_instance_init(struct mtk_tphy *tphy, { struct u3phy_banks *u3_banks = &instance->u3_banks; void __iomem *phyd = u3_banks->phyd; - u32 tmp; /* charge current adjustment */ - tmp = readl(phyd + ANA_RG_CTRL_SIGNAL6); - tmp &= ~(RG_CDR_BIRLTR_GEN1_MSK | RG_CDR_BC_GEN1_MSK); - tmp |= RG_CDR_BIRLTR_GEN1_VAL(0x6) | RG_CDR_BC_GEN1_VAL(0x1a); - writel(tmp, phyd + ANA_RG_CTRL_SIGNAL6); - - tmp = readl(phyd + ANA_EQ_EYE_CTRL_SIGNAL4); - tmp &= ~RG_CDR_BIRLTD0_GEN1_MSK; - tmp |= RG_CDR_BIRLTD0_GEN1_VAL(0x18); - writel(tmp, phyd + ANA_EQ_EYE_CTRL_SIGNAL4); - - tmp = readl(phyd + ANA_EQ_EYE_CTRL_SIGNAL5); - tmp &= ~RG_CDR_BIRLTD0_GEN3_MSK; - tmp |= RG_CDR_BIRLTD0_GEN3_VAL(0x06); - writel(tmp, phyd + ANA_EQ_EYE_CTRL_SIGNAL5); - - tmp = readl(phyd + ANA_RG_CTRL_SIGNAL4); - tmp &= ~(RG_CDR_BICLTR_GEN1_MSK | RG_CDR_BR_GEN2_MSK); - tmp |= RG_CDR_BICLTR_GEN1_VAL(0x0c) | RG_CDR_BR_GEN2_VAL(0x07); - writel(tmp, phyd + ANA_RG_CTRL_SIGNAL4); - - tmp = readl(phyd + PHYD_CTRL_SIGNAL_MODE4); - tmp &= ~(RG_CDR_BICLTD0_GEN1_MSK | RG_CDR_BICLTD1_GEN1_MSK); - tmp |= RG_CDR_BICLTD0_GEN1_VAL(0x08) | RG_CDR_BICLTD1_GEN1_VAL(0x02); - writel(tmp, phyd + PHYD_CTRL_SIGNAL_MODE4); - - tmp = readl(phyd + PHYD_DESIGN_OPTION2); - tmp &= ~RG_LOCK_CNT_SEL_MSK; - tmp |= RG_LOCK_CNT_SEL_VAL(0x02); - writel(tmp, phyd + PHYD_DESIGN_OPTION2); - - tmp = readl(phyd + PHYD_DESIGN_OPTION9); - tmp &= ~(RG_T2_MIN_MSK | RG_TG_MIN_MSK | - RG_T2_MAX_MSK | RG_TG_MAX_MSK); - tmp |= RG_T2_MIN_VAL(0x12) | RG_TG_MIN_VAL(0x04) | - RG_T2_MAX_VAL(0x31) | RG_TG_MAX_VAL(0x0e); - writel(tmp, phyd + PHYD_DESIGN_OPTION9); - - tmp = readl(phyd + ANA_RG_CTRL_SIGNAL1); - tmp &= ~RG_IDRV_0DB_GEN1_MSK; - tmp |= RG_IDRV_0DB_GEN1_VAL(0x20); - writel(tmp, phyd + ANA_RG_CTRL_SIGNAL1); - - tmp = readl(phyd + ANA_EQ_EYE_CTRL_SIGNAL1); - tmp &= ~RG_EQ_DLEQ_LFI_GEN1_MSK; - tmp |= RG_EQ_DLEQ_LFI_GEN1_VAL(0x03); - writel(tmp, phyd + ANA_EQ_EYE_CTRL_SIGNAL1); + mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL6, + RG_CDR_BIRLTR_GEN1_MSK | RG_CDR_BC_GEN1_MSK, + RG_CDR_BIRLTR_GEN1_VAL(0x6) | RG_CDR_BC_GEN1_VAL(0x1a)); + + mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL4, RG_CDR_BIRLTD0_GEN1_MSK, + RG_CDR_BIRLTD0_GEN1_VAL(0x18)); + + mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL5, RG_CDR_BIRLTD0_GEN3_MSK, + RG_CDR_BIRLTD0_GEN3_VAL(0x06)); + + mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL4, + RG_CDR_BICLTR_GEN1_MSK | RG_CDR_BR_GEN2_MSK, + RG_CDR_BICLTR_GEN1_VAL(0x0c) | RG_CDR_BR_GEN2_VAL(0x07)); + + mtk_phy_update_bits(phyd + PHYD_CTRL_SIGNAL_MODE4, + RG_CDR_BICLTD0_GEN1_MSK | RG_CDR_BICLTD1_GEN1_MSK, + RG_CDR_BICLTD0_GEN1_VAL(0x08) | RG_CDR_BICLTD1_GEN1_VAL(0x02)); + + mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION2, RG_LOCK_CNT_SEL_MSK, + RG_LOCK_CNT_SEL_VAL(0x02)); + + mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION9, + RG_T2_MIN_MSK | RG_TG_MIN_MSK, + RG_T2_MIN_VAL(0x12) | RG_TG_MIN_VAL(0x04)); + + mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION9, + RG_T2_MAX_MSK | RG_TG_MAX_MSK, + RG_T2_MAX_VAL(0x31) | RG_TG_MAX_VAL(0x0e)); + + mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL1, RG_IDRV_0DB_GEN1_MSK, + RG_IDRV_0DB_GEN1_VAL(0x20)); + + mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL1, RG_EQ_DLEQ_LFI_GEN1_MSK, + RG_EQ_DLEQ_LFI_GEN1_VAL(0x03)); dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index); } @@ -938,48 +852,29 @@ static void u2_phy_props_set(struct mtk_tphy *tphy, { struct u2phy_banks *u2_banks = &instance->u2_banks; void __iomem *com = u2_banks->com; - u32 tmp; - if (instance->bc12_en) { - tmp = readl(com + U3P_U2PHYBC12C); - tmp |= P2C_RG_CHGDT_EN; /* BC1.2 path Enable */ - writel(tmp, com + U3P_U2PHYBC12C); - } + if (instance->bc12_en) /* BC1.2 path Enable */ + mtk_phy_set_bits(com + U3P_U2PHYBC12C, P2C_RG_CHGDT_EN); - if (tphy->pdata->version < MTK_PHY_V3 && instance->eye_src) { - tmp = readl(com + U3P_USBPHYACR5); - tmp &= ~PA5_RG_U2_HSTX_SRCTRL; - tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(instance->eye_src); - writel(tmp, com + U3P_USBPHYACR5); - } + if (tphy->pdata->version < MTK_PHY_V3 && instance->eye_src) + mtk_phy_update_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL, + PA5_RG_U2_HSTX_SRCTRL_VAL(instance->eye_src)); - if (instance->eye_vrt) { - tmp = readl(com + U3P_USBPHYACR1); - tmp &= ~PA1_RG_VRT_SEL; - tmp |= PA1_RG_VRT_SEL_VAL(instance->eye_vrt); - writel(tmp, com + U3P_USBPHYACR1); - } + if (instance->eye_vrt) + mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL, + PA1_RG_VRT_SEL_VAL(instance->eye_vrt)); - if (instance->eye_term) { - tmp = readl(com + U3P_USBPHYACR1); - tmp &= ~PA1_RG_TERM_SEL; - tmp |= PA1_RG_TERM_SEL_VAL(instance->eye_term); - writel(tmp, com + U3P_USBPHYACR1); - } + if (instance->eye_term) + mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_TERM_SEL, + PA1_RG_TERM_SEL_VAL(instance->eye_term)); - if (instance->intr) { - tmp = readl(com + U3P_USBPHYACR1); - tmp &= ~PA1_RG_INTR_CAL; - tmp |= PA1_RG_INTR_CAL_VAL(instance->intr); - writel(tmp, com + U3P_USBPHYACR1); - } + if (instance->intr) + mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_INTR_CAL, + PA1_RG_INTR_CAL_VAL(instance->intr)); - if (instance->discth) { - tmp = readl(com + U3P_USBPHYACR6); - tmp &= ~PA6_RG_U2_DISCTH; - tmp |= PA6_RG_U2_DISCTH_VAL(instance->discth); - writel(tmp, com + U3P_USBPHYACR6); - } + if (instance->discth) + mtk_phy_update_bits(com + U3P_USBPHYACR6, PA6_RG_U2_DISCTH, + PA6_RG_U2_DISCTH_VAL(instance->discth)); } /* type switch for usb3/pcie/sgmii/sata */ @@ -1040,6 +935,117 @@ static int phy_type_set(struct mtk_phy_instance *instance) return 0; } +static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instance) +{ + struct device *dev = &instance->phy->dev; + int ret = 0; + + /* tphy v1 doesn't support sw efuse, skip it */ + if (!tphy->pdata->sw_efuse_supported) { + instance->efuse_sw_en = 0; + return 0; + } + + /* software efuse is optional */ + instance->efuse_sw_en = device_property_read_bool(dev, "nvmem-cells"); + if (!instance->efuse_sw_en) + return 0; + + switch (instance->type) { + case PHY_TYPE_USB2: + ret = nvmem_cell_read_variable_le_u32(dev, "intr", &instance->efuse_intr); + if (ret) { + dev_err(dev, "fail to get u2 intr efuse, %d\n", ret); + break; + } + + /* no efuse, ignore it */ + if (!instance->efuse_intr) { + dev_warn(dev, "no u2 intr efuse, but dts enable it\n"); + instance->efuse_sw_en = 0; + break; + } + + dev_dbg(dev, "u2 efuse - intr %x\n", instance->efuse_intr); + break; + + case PHY_TYPE_USB3: + case PHY_TYPE_PCIE: + ret = nvmem_cell_read_variable_le_u32(dev, "intr", &instance->efuse_intr); + if (ret) { + dev_err(dev, "fail to get u3 intr efuse, %d\n", ret); + break; + } + + ret = nvmem_cell_read_variable_le_u32(dev, "rx_imp", &instance->efuse_rx_imp); + if (ret) { + dev_err(dev, "fail to get u3 rx_imp efuse, %d\n", ret); + break; + } + + ret = nvmem_cell_read_variable_le_u32(dev, "tx_imp", &instance->efuse_tx_imp); + if (ret) { + dev_err(dev, "fail to get u3 tx_imp efuse, %d\n", ret); + break; + } + + /* no efuse, ignore it */ + if (!instance->efuse_intr && + !instance->efuse_rx_imp && + !instance->efuse_rx_imp) { + dev_warn(dev, "no u3 intr efuse, but dts enable it\n"); + instance->efuse_sw_en = 0; + break; + } + + dev_dbg(dev, "u3 efuse - intr %x, rx_imp %x, tx_imp %x\n", + instance->efuse_intr, instance->efuse_rx_imp,instance->efuse_tx_imp); + break; + default: + dev_err(dev, "no sw efuse for type %d\n", instance->type); + ret = -EINVAL; + } + + return ret; +} + +static void phy_efuse_set(struct mtk_phy_instance *instance) +{ + struct device *dev = &instance->phy->dev; + struct u2phy_banks *u2_banks = &instance->u2_banks; + struct u3phy_banks *u3_banks = &instance->u3_banks; + + if (!instance->efuse_sw_en) + return; + + switch (instance->type) { + case PHY_TYPE_USB2: + mtk_phy_set_bits(u2_banks->misc + U3P_MISC_REG1, MR1_EFUSE_AUTO_LOAD_DIS); + + mtk_phy_update_bits(u2_banks->com + U3P_USBPHYACR1, PA1_RG_INTR_CAL, + PA1_RG_INTR_CAL_VAL(instance->efuse_intr)); + break; + case PHY_TYPE_USB3: + case PHY_TYPE_PCIE: + mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_RSV, P3D_RG_EFUSE_AUTO_LOAD_DIS); + + mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_TX_IMPEL, + P3D_RG_TX_IMPEL_VAL(instance->efuse_tx_imp)); + mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_FORCE_TX_IMPEL); + + mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_RX_IMPEL, + P3D_RG_RX_IMPEL_VAL(instance->efuse_rx_imp)); + mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_FORCE_RX_IMPEL); + + mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG0, P3A_RG_IEXT_INTR, + P3A_RG_IEXT_INTR_VAL(instance->efuse_intr)); + break; + default: + dev_warn(dev, "no sw efuse for type %d\n", instance->type); + break; + } +} + static int mtk_phy_init(struct phy *phy) { struct mtk_phy_instance *instance = phy_get_drvdata(phy); @@ -1050,6 +1056,8 @@ static int mtk_phy_init(struct phy *phy) if (ret) return ret; + phy_efuse_set(instance); + switch (instance->type) { case PHY_TYPE_USB2: u2_phy_instance_init(tphy, instance); @@ -1134,6 +1142,7 @@ static struct phy *mtk_phy_xlate(struct device *dev, struct mtk_phy_instance *instance = NULL; struct device_node *phy_np = args->np; int index; + int ret; if (args->args_count != 1) { dev_err(dev, "invalid number of cells in 'phy' property\n"); @@ -1174,6 +1183,10 @@ static struct phy *mtk_phy_xlate(struct device *dev, return ERR_PTR(-EINVAL); } + ret = phy_efuse_get(tphy, instance); + if (ret) + return ERR_PTR(ret); + phy_parse_property(tphy, instance); phy_type_set(instance); @@ -1196,10 +1209,12 @@ static const struct mtk_phy_pdata tphy_v1_pdata = { static const struct mtk_phy_pdata tphy_v2_pdata = { .avoid_rx_sen_degradation = false, + .sw_efuse_supported = true, .version = MTK_PHY_V2, }; static const struct mtk_phy_pdata tphy_v3_pdata = { + .sw_efuse_supported = true, .version = MTK_PHY_V3, }; @@ -1210,6 +1225,7 @@ static const struct mtk_phy_pdata mt8173_pdata = { static const struct mtk_phy_pdata mt8195_pdata = { .sw_pll_48m_to_26m = true, + .sw_efuse_supported = true, .version = MTK_PHY_V3, }; diff --git a/drivers/phy/mediatek/phy-mtk-xsphy.c b/drivers/phy/mediatek/phy-mtk-xsphy.c index 8c51131945c0..c0cdb78f77fa 100644 --- a/drivers/phy/mediatek/phy-mtk-xsphy.c +++ b/drivers/phy/mediatek/phy-mtk-xsphy.c @@ -10,13 +10,14 @@ #include <dt-bindings/phy/phy.h> #include <linux/clk.h> #include <linux/delay.h> -#include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include "phy-mtk-io.h" + /* u2 phy banks */ #define SSUSB_SIFSLV_MISC 0x000 #define SSUSB_SIFSLV_U2FREQ 0x100 @@ -126,26 +127,18 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy, return; /* enable USB ring oscillator */ - tmp = readl(pbase + XSP_USBPHYACR5); - tmp |= P2A5_RG_HSTX_SRCAL_EN; - writel(tmp, pbase + XSP_USBPHYACR5); + mtk_phy_set_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCAL_EN); udelay(1); /* wait clock stable */ /* enable free run clock */ - tmp = readl(pbase + XSP_U2FREQ_FMMONR1); - tmp |= P2F_RG_FRCK_EN; - writel(tmp, pbase + XSP_U2FREQ_FMMONR1); + mtk_phy_set_bits(pbase + XSP_U2FREQ_FMMONR1, P2F_RG_FRCK_EN); /* set cycle count as 1024 */ - tmp = readl(pbase + XSP_U2FREQ_FMCR0); - tmp &= ~(P2F_RG_CYCLECNT); - tmp |= P2F_RG_CYCLECNT_VAL(XSP_FM_DET_CYCLE_CNT); - writel(tmp, pbase + XSP_U2FREQ_FMCR0); + mtk_phy_update_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_CYCLECNT, + P2F_RG_CYCLECNT_VAL(XSP_FM_DET_CYCLE_CNT)); /* enable frequency meter */ - tmp = readl(pbase + XSP_U2FREQ_FMCR0); - tmp |= P2F_RG_FREQDET_EN; - writel(tmp, pbase + XSP_U2FREQ_FMCR0); + mtk_phy_set_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_FREQDET_EN); /* ignore return value */ readl_poll_timeout(pbase + XSP_U2FREQ_FMMONR1, tmp, @@ -154,14 +147,10 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy, fm_out = readl(pbase + XSP_U2FREQ_MMONR0); /* disable frequency meter */ - tmp = readl(pbase + XSP_U2FREQ_FMCR0); - tmp &= ~P2F_RG_FREQDET_EN; - writel(tmp, pbase + XSP_U2FREQ_FMCR0); + mtk_phy_clear_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_FREQDET_EN); /* disable free run clock */ - tmp = readl(pbase + XSP_U2FREQ_FMMONR1); - tmp &= ~P2F_RG_FRCK_EN; - writel(tmp, pbase + XSP_U2FREQ_FMMONR1); + mtk_phy_clear_bits(pbase + XSP_U2FREQ_FMMONR1, P2F_RG_FRCK_EN); if (fm_out) { /* (1024 / FM_OUT) x reference clock frequency x coefficient */ @@ -177,31 +166,22 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy, xsphy->src_ref_clk, xsphy->src_coef); /* set HS slew rate */ - tmp = readl(pbase + XSP_USBPHYACR5); - tmp &= ~P2A5_RG_HSTX_SRCTRL; - tmp |= P2A5_RG_HSTX_SRCTRL_VAL(calib_val); - writel(tmp, pbase + XSP_USBPHYACR5); + mtk_phy_update_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL, + P2A5_RG_HSTX_SRCTRL_VAL(calib_val)); /* disable USB ring oscillator */ - tmp = readl(pbase + XSP_USBPHYACR5); - tmp &= ~P2A5_RG_HSTX_SRCAL_EN; - writel(tmp, pbase + XSP_USBPHYACR5); + mtk_phy_clear_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCAL_EN); } static void u2_phy_instance_init(struct mtk_xsphy *xsphy, struct xsphy_instance *inst) { void __iomem *pbase = inst->port_base; - u32 tmp; /* DP/DM BC1.1 path Disable */ - tmp = readl(pbase + XSP_USBPHYACR6); - tmp &= ~P2A6_RG_BC11_SW_EN; - writel(tmp, pbase + XSP_USBPHYACR6); + mtk_phy_clear_bits(pbase + XSP_USBPHYACR6, P2A6_RG_BC11_SW_EN); - tmp = readl(pbase + XSP_USBPHYACR0); - tmp |= P2A0_RG_INTR_EN; - writel(tmp, pbase + XSP_USBPHYACR0); + mtk_phy_set_bits(pbase + XSP_USBPHYACR0, P2A0_RG_INTR_EN); } static void u2_phy_instance_power_on(struct mtk_xsphy *xsphy, @@ -209,16 +189,12 @@ static void u2_phy_instance_power_on(struct mtk_xsphy *xsphy, { void __iomem *pbase = inst->port_base; u32 index = inst->index; - u32 tmp; - tmp = readl(pbase + XSP_USBPHYACR6); - tmp |= P2A6_RG_OTG_VBUSCMP_EN; - writel(tmp, pbase + XSP_USBPHYACR6); + mtk_phy_set_bits(pbase + XSP_USBPHYACR6, P2A6_RG_OTG_VBUSCMP_EN); - tmp = readl(pbase + XSP_U2PHYDTM1); - tmp |= P2D_RG_VBUSVALID | P2D_RG_AVALID; - tmp &= ~P2D_RG_SESSEND; - writel(tmp, pbase + XSP_U2PHYDTM1); + mtk_phy_update_bits(pbase + XSP_U2PHYDTM1, + P2D_RG_VBUSVALID | P2D_RG_AVALID | P2D_RG_SESSEND, + P2D_RG_VBUSVALID | P2D_RG_AVALID); dev_dbg(xsphy->dev, "%s(%d)\n", __func__, index); } @@ -228,16 +204,12 @@ static void u2_phy_instance_power_off(struct mtk_xsphy *xsphy, { void __iomem *pbase = inst->port_base; u32 index = inst->index; - u32 tmp; - tmp = readl(pbase + XSP_USBPHYACR6); - tmp &= ~P2A6_RG_OTG_VBUSCMP_EN; - writel(tmp, pbase + XSP_USBPHYACR6); + mtk_phy_clear_bits(pbase + XSP_USBPHYACR6, P2A6_RG_OTG_VBUSCMP_EN); - tmp = readl(pbase + XSP_U2PHYDTM1); - tmp &= ~(P2D_RG_VBUSVALID | P2D_RG_AVALID); - tmp |= P2D_RG_SESSEND; - writel(tmp, pbase + XSP_U2PHYDTM1); + mtk_phy_update_bits(pbase + XSP_U2PHYDTM1, + P2D_RG_VBUSVALID | P2D_RG_AVALID | P2D_RG_SESSEND, + P2D_RG_SESSEND); dev_dbg(xsphy->dev, "%s(%d)\n", __func__, index); } @@ -306,63 +278,43 @@ static void u2_phy_props_set(struct mtk_xsphy *xsphy, struct xsphy_instance *inst) { void __iomem *pbase = inst->port_base; - u32 tmp; - if (inst->efuse_intr) { - tmp = readl(pbase + XSP_USBPHYACR1); - tmp &= ~P2A1_RG_INTR_CAL; - tmp |= P2A1_RG_INTR_CAL_VAL(inst->efuse_intr); - writel(tmp, pbase + XSP_USBPHYACR1); - } + if (inst->efuse_intr) + mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_INTR_CAL, + P2A1_RG_INTR_CAL_VAL(inst->efuse_intr)); - if (inst->eye_src) { - tmp = readl(pbase + XSP_USBPHYACR5); - tmp &= ~P2A5_RG_HSTX_SRCTRL; - tmp |= P2A5_RG_HSTX_SRCTRL_VAL(inst->eye_src); - writel(tmp, pbase + XSP_USBPHYACR5); - } + if (inst->eye_src) + mtk_phy_update_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL, + P2A5_RG_HSTX_SRCTRL_VAL(inst->eye_src)); - if (inst->eye_vrt) { - tmp = readl(pbase + XSP_USBPHYACR1); - tmp &= ~P2A1_RG_VRT_SEL; - tmp |= P2A1_RG_VRT_SEL_VAL(inst->eye_vrt); - writel(tmp, pbase + XSP_USBPHYACR1); - } + if (inst->eye_vrt) + mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_VRT_SEL, + P2A1_RG_VRT_SEL_VAL(inst->eye_vrt)); - if (inst->eye_term) { - tmp = readl(pbase + XSP_USBPHYACR1); - tmp &= ~P2A1_RG_TERM_SEL; - tmp |= P2A1_RG_TERM_SEL_VAL(inst->eye_term); - writel(tmp, pbase + XSP_USBPHYACR1); - } + if (inst->eye_term) + mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_TERM_SEL, + P2A1_RG_TERM_SEL_VAL(inst->eye_term)); } static void u3_phy_props_set(struct mtk_xsphy *xsphy, struct xsphy_instance *inst) { void __iomem *pbase = inst->port_base; - u32 tmp; - if (inst->efuse_intr) { - tmp = readl(xsphy->glb_base + SSPXTP_PHYA_GLB_00); - tmp &= ~RG_XTP_GLB_BIAS_INTR_CTRL; - tmp |= RG_XTP_GLB_BIAS_INTR_CTRL_VAL(inst->efuse_intr); - writel(tmp, xsphy->glb_base + SSPXTP_PHYA_GLB_00); - } + if (inst->efuse_intr) + mtk_phy_update_bits(xsphy->glb_base + SSPXTP_PHYA_GLB_00, + RG_XTP_GLB_BIAS_INTR_CTRL, + RG_XTP_GLB_BIAS_INTR_CTRL_VAL(inst->efuse_intr)); - if (inst->efuse_tx_imp) { - tmp = readl(pbase + SSPXTP_PHYA_LN_04); - tmp &= ~RG_XTP_LN0_TX_IMPSEL; - tmp |= RG_XTP_LN0_TX_IMPSEL_VAL(inst->efuse_tx_imp); - writel(tmp, pbase + SSPXTP_PHYA_LN_04); - } + if (inst->efuse_tx_imp) + mtk_phy_update_bits(pbase + SSPXTP_PHYA_LN_04, + RG_XTP_LN0_TX_IMPSEL, + RG_XTP_LN0_TX_IMPSEL_VAL(inst->efuse_tx_imp)); - if (inst->efuse_rx_imp) { - tmp = readl(pbase + SSPXTP_PHYA_LN_14); - tmp &= ~RG_XTP_LN0_RX_IMPSEL; - tmp |= RG_XTP_LN0_RX_IMPSEL_VAL(inst->efuse_rx_imp); - writel(tmp, pbase + SSPXTP_PHYA_LN_14); - } + if (inst->efuse_rx_imp) + mtk_phy_update_bits(pbase + SSPXTP_PHYA_LN_14, + RG_XTP_LN0_RX_IMPSEL, + RG_XTP_LN0_RX_IMPSEL_VAL(inst->efuse_rx_imp)); } static int mtk_phy_init(struct phy *phy) diff --git a/drivers/phy/microchip/Kconfig b/drivers/phy/microchip/Kconfig index 3728a284bf64..38039ed0754c 100644 --- a/drivers/phy/microchip/Kconfig +++ b/drivers/phy/microchip/Kconfig @@ -11,3 +11,11 @@ config PHY_SPARX5_SERDES depends on HAS_IOMEM help Enable this for support of the 10G/25G SerDes on Microchip Sparx5. + +config PHY_LAN966X_SERDES + tristate "SerDes PHY driver for Microchip LAN966X" + select GENERIC_PHY + depends on OF + depends on MFD_SYSCON + help + Enable this for supporting SerDes muxing with Microchip LAN966X diff --git a/drivers/phy/microchip/Makefile b/drivers/phy/microchip/Makefile index 7b98345712aa..fd73b87960a5 100644 --- a/drivers/phy/microchip/Makefile +++ b/drivers/phy/microchip/Makefile @@ -4,3 +4,4 @@ # obj-$(CONFIG_PHY_SPARX5_SERDES) := sparx5_serdes.o +obj-$(CONFIG_PHY_LAN966X_SERDES) := lan966x_serdes.o diff --git a/drivers/phy/microchip/lan966x_serdes.c b/drivers/phy/microchip/lan966x_serdes.c new file mode 100644 index 000000000000..e86a879b92b5 --- /dev/null +++ b/drivers/phy/microchip/lan966x_serdes.c @@ -0,0 +1,545 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/err.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/phy.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> + +#include <dt-bindings/phy/phy-lan966x-serdes.h> +#include "lan966x_serdes_regs.h" + +#define PLL_CONF_MASK GENMASK(4, 3) +#define PLL_CONF_25MHZ 0 +#define PLL_CONF_125MHZ 1 +#define PLL_CONF_SERDES_125MHZ 2 +#define PLL_CONF_BYPASS 3 + +#define lan_offset_(id, tinst, tcnt, \ + gbase, ginst, gcnt, gwidth, \ + raddr, rinst, rcnt, rwidth) \ + (gbase + ((ginst) * gwidth) + raddr + ((rinst) * rwidth)) +#define lan_offset(...) lan_offset_(__VA_ARGS__) + +#define lan_rmw(val, mask, reg, off) \ + lan_rmw_(val, mask, reg, lan_offset(off)) + +#define SERDES_MUX(_idx, _port, _mode, _submode, _mask, _mux) { \ + .idx = _idx, \ + .port = _port, \ + .mode = _mode, \ + .submode = _submode, \ + .mask = _mask, \ + .mux = _mux, \ +} + +#define SERDES_MUX_GMII(i, p, m, c) \ + SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_GMII, m, c) +#define SERDES_MUX_SGMII(i, p, m, c) \ + SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_SGMII, m, c) +#define SERDES_MUX_QSGMII(i, p, m, c) \ + SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_QSGMII, m, c) +#define SERDES_MUX_RGMII(i, p, m, c) \ + SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII, m, c) + +static void lan_rmw_(u32 val, u32 mask, void __iomem *mem, u32 offset) +{ + u32 v; + + v = readl(mem + offset); + v = (v & ~mask) | (val & mask); + writel(v, mem + offset); +} + +struct serdes_mux { + u8 idx; + u8 port; + enum phy_mode mode; + int submode; + u32 mask; + u32 mux; +}; + +static const struct serdes_mux lan966x_serdes_muxes[] = { + SERDES_MUX_QSGMII(SERDES6G(1), 0, HSIO_HW_CFG_QSGMII_ENA, + HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))), + SERDES_MUX_QSGMII(SERDES6G(1), 1, HSIO_HW_CFG_QSGMII_ENA, + HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))), + SERDES_MUX_QSGMII(SERDES6G(1), 2, HSIO_HW_CFG_QSGMII_ENA, + HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))), + SERDES_MUX_QSGMII(SERDES6G(1), 3, HSIO_HW_CFG_QSGMII_ENA, + HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))), + + SERDES_MUX_QSGMII(SERDES6G(2), 4, HSIO_HW_CFG_QSGMII_ENA, + HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))), + SERDES_MUX_QSGMII(SERDES6G(2), 5, HSIO_HW_CFG_QSGMII_ENA, + HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))), + SERDES_MUX_QSGMII(SERDES6G(2), 6, HSIO_HW_CFG_QSGMII_ENA, + HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))), + SERDES_MUX_QSGMII(SERDES6G(2), 7, HSIO_HW_CFG_QSGMII_ENA, + HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))), + + SERDES_MUX_GMII(CU(0), 0, HSIO_HW_CFG_GMII_ENA, + HSIO_HW_CFG_GMII_ENA_SET(BIT(0))), + SERDES_MUX_GMII(CU(1), 1, HSIO_HW_CFG_GMII_ENA, + HSIO_HW_CFG_GMII_ENA_SET(BIT(1))), + + SERDES_MUX_SGMII(SERDES6G(0), 0, HSIO_HW_CFG_SD6G_0_CFG, 0), + SERDES_MUX_SGMII(SERDES6G(1), 1, HSIO_HW_CFG_SD6G_1_CFG, 0), + SERDES_MUX_SGMII(SERDES6G(0), 2, HSIO_HW_CFG_SD6G_0_CFG, + HSIO_HW_CFG_SD6G_0_CFG_SET(1)), + SERDES_MUX_SGMII(SERDES6G(1), 3, HSIO_HW_CFG_SD6G_1_CFG, + HSIO_HW_CFG_SD6G_1_CFG_SET(1)), + + SERDES_MUX_RGMII(RGMII(0), 2, HSIO_HW_CFG_RGMII_0_CFG | + HSIO_HW_CFG_RGMII_ENA, + HSIO_HW_CFG_RGMII_0_CFG_SET(BIT(0)) | + HSIO_HW_CFG_RGMII_ENA_SET(BIT(0))), + SERDES_MUX_RGMII(RGMII(1), 3, HSIO_HW_CFG_RGMII_1_CFG | + HSIO_HW_CFG_RGMII_ENA, + HSIO_HW_CFG_RGMII_1_CFG_SET(BIT(0)) | + HSIO_HW_CFG_RGMII_ENA_SET(BIT(1))), + SERDES_MUX_RGMII(RGMII(0), 5, HSIO_HW_CFG_RGMII_0_CFG | + HSIO_HW_CFG_RGMII_ENA, + HSIO_HW_CFG_RGMII_0_CFG_SET(BIT(0)) | + HSIO_HW_CFG_RGMII_ENA_SET(BIT(0))), + SERDES_MUX_RGMII(RGMII(1), 6, HSIO_HW_CFG_RGMII_1_CFG | + HSIO_HW_CFG_RGMII_ENA, + HSIO_HW_CFG_RGMII_1_CFG_SET(BIT(0)) | + HSIO_HW_CFG_RGMII_ENA_SET(BIT(1))), +}; + +struct serdes_ctrl { + void __iomem *regs; + struct device *dev; + struct phy *phys[SERDES_MAX]; + int ref125; +}; + +struct serdes_macro { + u8 idx; + int port; + struct serdes_ctrl *ctrl; + int speed; + phy_interface_t mode; +}; + +enum lan966x_sd6g40_mode { + LAN966X_SD6G40_MODE_QSGMII, + LAN966X_SD6G40_MODE_SGMII, +}; + +enum lan966x_sd6g40_ltx2rx { + LAN966X_SD6G40_TX2RX_LOOP_NONE, + LAN966X_SD6G40_LTX2RX +}; + +struct lan966x_sd6g40_setup_args { + enum lan966x_sd6g40_mode mode; + enum lan966x_sd6g40_ltx2rx tx2rx_loop; + bool txinvert; + bool rxinvert; + bool refclk125M; + bool mute; +}; + +struct lan966x_sd6g40_mode_args { + enum lan966x_sd6g40_mode mode; + u8 lane_10bit_sel; + u8 mpll_multiplier; + u8 ref_clkdiv2; + u8 tx_rate; + u8 rx_rate; +}; + +struct lan966x_sd6g40_setup { + u8 rx_term_en; + u8 lane_10bit_sel; + u8 tx_invert; + u8 rx_invert; + u8 mpll_multiplier; + u8 lane_loopbk_en; + u8 ref_clkdiv2; + u8 tx_rate; + u8 rx_rate; +}; + +static int lan966x_sd6g40_reg_cfg(struct serdes_macro *macro, + struct lan966x_sd6g40_setup *res_struct, + u32 idx) +{ + u32 value; + + /* Note: SerDes HSIO is configured in 1G_LAN mode */ + lan_rmw(HSIO_SD_CFG_LANE_10BIT_SEL_SET(res_struct->lane_10bit_sel) | + HSIO_SD_CFG_RX_RATE_SET(res_struct->rx_rate) | + HSIO_SD_CFG_TX_RATE_SET(res_struct->tx_rate) | + HSIO_SD_CFG_TX_INVERT_SET(res_struct->tx_invert) | + HSIO_SD_CFG_RX_INVERT_SET(res_struct->rx_invert) | + HSIO_SD_CFG_LANE_LOOPBK_EN_SET(res_struct->lane_loopbk_en) | + HSIO_SD_CFG_RX_RESET_SET(0) | + HSIO_SD_CFG_TX_RESET_SET(0), + HSIO_SD_CFG_LANE_10BIT_SEL | + HSIO_SD_CFG_RX_RATE | + HSIO_SD_CFG_TX_RATE | + HSIO_SD_CFG_TX_INVERT | + HSIO_SD_CFG_RX_INVERT | + HSIO_SD_CFG_LANE_LOOPBK_EN | + HSIO_SD_CFG_RX_RESET | + HSIO_SD_CFG_TX_RESET, + macro->ctrl->regs, HSIO_SD_CFG(idx)); + + lan_rmw(HSIO_MPLL_CFG_MPLL_MULTIPLIER_SET(res_struct->mpll_multiplier) | + HSIO_MPLL_CFG_REF_CLKDIV2_SET(res_struct->ref_clkdiv2), + HSIO_MPLL_CFG_MPLL_MULTIPLIER | + HSIO_MPLL_CFG_REF_CLKDIV2, + macro->ctrl->regs, HSIO_MPLL_CFG(idx)); + + lan_rmw(HSIO_SD_CFG_RX_TERM_EN_SET(res_struct->rx_term_en), + HSIO_SD_CFG_RX_TERM_EN, + macro->ctrl->regs, HSIO_SD_CFG(idx)); + + lan_rmw(HSIO_MPLL_CFG_REF_SSP_EN_SET(1), + HSIO_MPLL_CFG_REF_SSP_EN, + macro->ctrl->regs, HSIO_MPLL_CFG(idx)); + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + + lan_rmw(HSIO_SD_CFG_PHY_RESET_SET(0), + HSIO_SD_CFG_PHY_RESET, + macro->ctrl->regs, HSIO_SD_CFG(idx)); + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + + lan_rmw(HSIO_MPLL_CFG_MPLL_EN_SET(1), + HSIO_MPLL_CFG_MPLL_EN, + macro->ctrl->regs, HSIO_MPLL_CFG(idx)); + + usleep_range(7 * USEC_PER_MSEC, 8 * USEC_PER_MSEC); + + value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx))); + value = HSIO_SD_STAT_MPLL_STATE_GET(value); + if (value != 0x1) { + dev_err(macro->ctrl->dev, + "Unexpected sd_sd_stat[%u] mpll_state was 0x1 but is 0x%x\n", + idx, value); + return -EIO; + } + + lan_rmw(HSIO_SD_CFG_TX_CM_EN_SET(1), + HSIO_SD_CFG_TX_CM_EN, + macro->ctrl->regs, HSIO_SD_CFG(idx)); + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + + value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx))); + value = HSIO_SD_STAT_TX_CM_STATE_GET(value); + if (value != 0x1) { + dev_err(macro->ctrl->dev, + "Unexpected sd_sd_stat[%u] tx_cm_state was 0x1 but is 0x%x\n", + idx, value); + return -EIO; + } + + lan_rmw(HSIO_SD_CFG_RX_PLL_EN_SET(1) | + HSIO_SD_CFG_TX_EN_SET(1), + HSIO_SD_CFG_RX_PLL_EN | + HSIO_SD_CFG_TX_EN, + macro->ctrl->regs, HSIO_SD_CFG(idx)); + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + + /* Waiting for serdes 0 rx DPLL to lock... */ + value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx))); + value = HSIO_SD_STAT_RX_PLL_STATE_GET(value); + if (value != 0x1) { + dev_err(macro->ctrl->dev, + "Unexpected sd_sd_stat[%u] rx_pll_state was 0x1 but is 0x%x\n", + idx, value); + return -EIO; + } + + /* Waiting for serdes 0 tx operational... */ + value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx))); + value = HSIO_SD_STAT_TX_STATE_GET(value); + if (value != 0x1) { + dev_err(macro->ctrl->dev, + "Unexpected sd_sd_stat[%u] tx_state was 0x1 but is 0x%x\n", + idx, value); + return -EIO; + } + + lan_rmw(HSIO_SD_CFG_TX_DATA_EN_SET(1) | + HSIO_SD_CFG_RX_DATA_EN_SET(1), + HSIO_SD_CFG_TX_DATA_EN | + HSIO_SD_CFG_RX_DATA_EN, + macro->ctrl->regs, HSIO_SD_CFG(idx)); + + return 0; +} + +static int lan966x_sd6g40_get_conf_from_mode(struct serdes_macro *macro, + enum lan966x_sd6g40_mode f_mode, + bool ref125M, + struct lan966x_sd6g40_mode_args *ret_val) +{ + switch (f_mode) { + case LAN966X_SD6G40_MODE_QSGMII: + ret_val->lane_10bit_sel = 0; + if (ref125M) { + ret_val->mpll_multiplier = 40; + ret_val->ref_clkdiv2 = 0x1; + ret_val->tx_rate = 0x0; + ret_val->rx_rate = 0x0; + } else { + ret_val->mpll_multiplier = 100; + ret_val->ref_clkdiv2 = 0x0; + ret_val->tx_rate = 0x0; + ret_val->rx_rate = 0x0; + } + break; + + case LAN966X_SD6G40_MODE_SGMII: + ret_val->lane_10bit_sel = 1; + if (ref125M) { + ret_val->mpll_multiplier = macro->speed == SPEED_2500 ? 50 : 40; + ret_val->ref_clkdiv2 = 0x1; + ret_val->tx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2; + ret_val->rx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2; + } else { + ret_val->mpll_multiplier = macro->speed == SPEED_2500 ? 125 : 100; + ret_val->ref_clkdiv2 = 0x0; + ret_val->tx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2; + ret_val->rx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2; + } + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int lan966x_calc_sd6g40_setup_lane(struct serdes_macro *macro, + struct lan966x_sd6g40_setup_args config, + struct lan966x_sd6g40_setup *ret_val) +{ + struct lan966x_sd6g40_mode_args sd6g40_mode; + struct lan966x_sd6g40_mode_args *mode_args = &sd6g40_mode; + int ret; + + ret = lan966x_sd6g40_get_conf_from_mode(macro, config.mode, + config.refclk125M, mode_args); + if (ret) + return ret; + + ret_val->lane_10bit_sel = mode_args->lane_10bit_sel; + ret_val->rx_rate = mode_args->rx_rate; + ret_val->tx_rate = mode_args->tx_rate; + ret_val->mpll_multiplier = mode_args->mpll_multiplier; + ret_val->ref_clkdiv2 = mode_args->ref_clkdiv2; + ret_val->rx_term_en = 0; + + if (config.tx2rx_loop == LAN966X_SD6G40_LTX2RX) + ret_val->lane_loopbk_en = 1; + else + ret_val->lane_loopbk_en = 0; + + ret_val->tx_invert = !!config.txinvert; + ret_val->rx_invert = !!config.rxinvert; + + return 0; +} + +static int lan966x_sd6g40_setup_lane(struct serdes_macro *macro, + struct lan966x_sd6g40_setup_args config, + u32 idx) +{ + struct lan966x_sd6g40_setup calc_results = {}; + int ret; + + ret = lan966x_calc_sd6g40_setup_lane(macro, config, &calc_results); + if (ret) + return ret; + + return lan966x_sd6g40_reg_cfg(macro, &calc_results, idx); +} + +static int lan966x_sd6g40_setup(struct serdes_macro *macro, u32 idx, int mode) +{ + struct lan966x_sd6g40_setup_args conf = {}; + + conf.refclk125M = macro->ctrl->ref125; + + if (mode == PHY_INTERFACE_MODE_QSGMII) + conf.mode = LAN966X_SD6G40_MODE_QSGMII; + else + conf.mode = LAN966X_SD6G40_MODE_SGMII; + + return lan966x_sd6g40_setup_lane(macro, conf, idx); +} + +static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode) +{ + struct serdes_macro *macro = phy_get_drvdata(phy); + unsigned int i; + int val; + + /* As of now only PHY_MODE_ETHERNET is supported */ + if (mode != PHY_MODE_ETHERNET) + return -EOPNOTSUPP; + + if (submode == PHY_INTERFACE_MODE_2500BASEX) + macro->speed = SPEED_2500; + else + macro->speed = SPEED_1000; + + if (submode == PHY_INTERFACE_MODE_1000BASEX || + submode == PHY_INTERFACE_MODE_2500BASEX) + submode = PHY_INTERFACE_MODE_SGMII; + + for (i = 0; i < ARRAY_SIZE(lan966x_serdes_muxes); i++) { + if (macro->idx != lan966x_serdes_muxes[i].idx || + mode != lan966x_serdes_muxes[i].mode || + submode != lan966x_serdes_muxes[i].submode || + macro->port != lan966x_serdes_muxes[i].port) + continue; + + val = readl(macro->ctrl->regs + lan_offset(HSIO_HW_CFG)); + val |= lan966x_serdes_muxes[i].mux; + lan_rmw(val, lan966x_serdes_muxes[i].mask, + macro->ctrl->regs, HSIO_HW_CFG); + + macro->mode = lan966x_serdes_muxes[i].submode; + + if (macro->idx < CU_MAX) + return 0; + + if (macro->idx < SERDES6G_MAX) + return lan966x_sd6g40_setup(macro, + macro->idx - (CU_MAX + 1), + macro->mode); + + if (macro->idx < RGMII_MAX) + return 0; + + return -EOPNOTSUPP; + } + + return -EINVAL; +} + +static const struct phy_ops serdes_ops = { + .set_mode = serdes_set_mode, + .owner = THIS_MODULE, +}; + +static struct phy *serdes_simple_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct serdes_ctrl *ctrl = dev_get_drvdata(dev); + unsigned int port, idx, i; + + if (args->args_count != 2) + return ERR_PTR(-EINVAL); + + port = args->args[0]; + idx = args->args[1]; + + for (i = 0; i < SERDES_MAX; i++) { + struct serdes_macro *macro = phy_get_drvdata(ctrl->phys[i]); + + if (idx != macro->idx) + continue; + + macro->port = port; + return ctrl->phys[i]; + } + + return ERR_PTR(-ENODEV); +} + +static int serdes_phy_create(struct serdes_ctrl *ctrl, u8 idx, struct phy **phy) +{ + struct serdes_macro *macro; + + *phy = devm_phy_create(ctrl->dev, NULL, &serdes_ops); + if (IS_ERR(*phy)) + return PTR_ERR(*phy); + + macro = devm_kzalloc(ctrl->dev, sizeof(*macro), GFP_KERNEL); + if (!macro) + return -ENOMEM; + + macro->idx = idx; + macro->ctrl = ctrl; + macro->port = -1; + + phy_set_drvdata(*phy, macro); + + return 0; +} + +static int serdes_probe(struct platform_device *pdev) +{ + struct phy_provider *provider; + struct serdes_ctrl *ctrl; + void __iomem *hw_stat; + unsigned int i; + u32 val; + int ret; + + ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + ctrl->dev = &pdev->dev; + ctrl->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(ctrl->regs)) + return PTR_ERR(ctrl->regs); + + hw_stat = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); + if (IS_ERR(hw_stat)) + return PTR_ERR(hw_stat); + + for (i = 0; i < SERDES_MAX; i++) { + ret = serdes_phy_create(ctrl, i, &ctrl->phys[i]); + if (ret) + return ret; + } + + val = readl(hw_stat); + val = FIELD_GET(PLL_CONF_MASK, val); + ctrl->ref125 = (val == PLL_CONF_125MHZ || + val == PLL_CONF_SERDES_125MHZ); + + dev_set_drvdata(&pdev->dev, ctrl); + + provider = devm_of_phy_provider_register(ctrl->dev, + serdes_simple_xlate); + + return PTR_ERR_OR_ZERO(provider); +} + +static const struct of_device_id serdes_ids[] = { + { .compatible = "microchip,lan966x-serdes", }, + {}, +}; +MODULE_DEVICE_TABLE(of, serdes_ids); + +static struct platform_driver mscc_lan966x_serdes = { + .probe = serdes_probe, + .driver = { + .name = "microchip,lan966x-serdes", + .of_match_table = of_match_ptr(serdes_ids), + }, +}; + +module_platform_driver(mscc_lan966x_serdes); + +MODULE_DESCRIPTION("Microchip lan966x switch serdes driver"); +MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/microchip/lan966x_serdes_regs.h b/drivers/phy/microchip/lan966x_serdes_regs.h new file mode 100644 index 000000000000..ea30f64ffd5c --- /dev/null +++ b/drivers/phy/microchip/lan966x_serdes_regs.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _LAN966X_SERDES_REGS_H_ +#define _LAN966X_SERDES_REGS_H_ + +#include <linux/bitfield.h> +#include <linux/types.h> +#include <linux/bug.h> + +enum lan966x_target { + TARGET_HSIO = 32, + NUM_TARGETS = 66 +}; + +#define __REG(...) __VA_ARGS__ + +/* HSIO:SD:SD_CFG */ +#define HSIO_SD_CFG(g) __REG(TARGET_HSIO, 0, 1, 8, g, 3, 32, 0, 0, 1, 4) + +#define HSIO_SD_CFG_PHY_RESET BIT(27) +#define HSIO_SD_CFG_PHY_RESET_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_PHY_RESET, x) +#define HSIO_SD_CFG_PHY_RESET_GET(x)\ + FIELD_GET(HSIO_SD_CFG_PHY_RESET, x) + +#define HSIO_SD_CFG_TX_RESET BIT(18) +#define HSIO_SD_CFG_TX_RESET_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_TX_RESET, x) +#define HSIO_SD_CFG_TX_RESET_GET(x)\ + FIELD_GET(HSIO_SD_CFG_TX_RESET, x) + +#define HSIO_SD_CFG_TX_RATE GENMASK(17, 16) +#define HSIO_SD_CFG_TX_RATE_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_TX_RATE, x) +#define HSIO_SD_CFG_TX_RATE_GET(x)\ + FIELD_GET(HSIO_SD_CFG_TX_RATE, x) + +#define HSIO_SD_CFG_TX_INVERT BIT(15) +#define HSIO_SD_CFG_TX_INVERT_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_TX_INVERT, x) +#define HSIO_SD_CFG_TX_INVERT_GET(x)\ + FIELD_GET(HSIO_SD_CFG_TX_INVERT, x) + +#define HSIO_SD_CFG_TX_EN BIT(14) +#define HSIO_SD_CFG_TX_EN_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_TX_EN, x) +#define HSIO_SD_CFG_TX_EN_GET(x)\ + FIELD_GET(HSIO_SD_CFG_TX_EN, x) + +#define HSIO_SD_CFG_TX_DATA_EN BIT(12) +#define HSIO_SD_CFG_TX_DATA_EN_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_TX_DATA_EN, x) +#define HSIO_SD_CFG_TX_DATA_EN_GET(x)\ + FIELD_GET(HSIO_SD_CFG_TX_DATA_EN, x) + +#define HSIO_SD_CFG_TX_CM_EN BIT(11) +#define HSIO_SD_CFG_TX_CM_EN_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_TX_CM_EN, x) +#define HSIO_SD_CFG_TX_CM_EN_GET(x)\ + FIELD_GET(HSIO_SD_CFG_TX_CM_EN, x) + +#define HSIO_SD_CFG_LANE_10BIT_SEL BIT(10) +#define HSIO_SD_CFG_LANE_10BIT_SEL_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_LANE_10BIT_SEL, x) +#define HSIO_SD_CFG_LANE_10BIT_SEL_GET(x)\ + FIELD_GET(HSIO_SD_CFG_LANE_10BIT_SEL, x) + +#define HSIO_SD_CFG_RX_TERM_EN BIT(9) +#define HSIO_SD_CFG_RX_TERM_EN_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_RX_TERM_EN, x) +#define HSIO_SD_CFG_RX_TERM_EN_GET(x)\ + FIELD_GET(HSIO_SD_CFG_RX_TERM_EN, x) + +#define HSIO_SD_CFG_RX_RESET BIT(8) +#define HSIO_SD_CFG_RX_RESET_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_RX_RESET, x) +#define HSIO_SD_CFG_RX_RESET_GET(x)\ + FIELD_GET(HSIO_SD_CFG_RX_RESET, x) + +#define HSIO_SD_CFG_RX_RATE GENMASK(7, 6) +#define HSIO_SD_CFG_RX_RATE_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_RX_RATE, x) +#define HSIO_SD_CFG_RX_RATE_GET(x)\ + FIELD_GET(HSIO_SD_CFG_RX_RATE, x) + +#define HSIO_SD_CFG_RX_PLL_EN BIT(5) +#define HSIO_SD_CFG_RX_PLL_EN_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_RX_PLL_EN, x) +#define HSIO_SD_CFG_RX_PLL_EN_GET(x)\ + FIELD_GET(HSIO_SD_CFG_RX_PLL_EN, x) + +#define HSIO_SD_CFG_RX_INVERT BIT(3) +#define HSIO_SD_CFG_RX_INVERT_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_RX_INVERT, x) +#define HSIO_SD_CFG_RX_INVERT_GET(x)\ + FIELD_GET(HSIO_SD_CFG_RX_INVERT, x) + +#define HSIO_SD_CFG_RX_DATA_EN BIT(2) +#define HSIO_SD_CFG_RX_DATA_EN_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_RX_DATA_EN, x) +#define HSIO_SD_CFG_RX_DATA_EN_GET(x)\ + FIELD_GET(HSIO_SD_CFG_RX_DATA_EN, x) + +#define HSIO_SD_CFG_LANE_LOOPBK_EN BIT(0) +#define HSIO_SD_CFG_LANE_LOOPBK_EN_SET(x)\ + FIELD_PREP(HSIO_SD_CFG_LANE_LOOPBK_EN, x) +#define HSIO_SD_CFG_LANE_LOOPBK_EN_GET(x)\ + FIELD_GET(HSIO_SD_CFG_LANE_LOOPBK_EN, x) + +/* HSIO:SD:MPLL_CFG */ +#define HSIO_MPLL_CFG(g) __REG(TARGET_HSIO, 0, 1, 8, g, 3, 32, 8, 0, 1, 4) + +#define HSIO_MPLL_CFG_REF_SSP_EN BIT(18) +#define HSIO_MPLL_CFG_REF_SSP_EN_SET(x)\ + FIELD_PREP(HSIO_MPLL_CFG_REF_SSP_EN, x) +#define HSIO_MPLL_CFG_REF_SSP_EN_GET(x)\ + FIELD_GET(HSIO_MPLL_CFG_REF_SSP_EN, x) + +#define HSIO_MPLL_CFG_REF_CLKDIV2 BIT(17) +#define HSIO_MPLL_CFG_REF_CLKDIV2_SET(x)\ + FIELD_PREP(HSIO_MPLL_CFG_REF_CLKDIV2, x) +#define HSIO_MPLL_CFG_REF_CLKDIV2_GET(x)\ + FIELD_GET(HSIO_MPLL_CFG_REF_CLKDIV2, x) + +#define HSIO_MPLL_CFG_MPLL_EN BIT(16) +#define HSIO_MPLL_CFG_MPLL_EN_SET(x)\ + FIELD_PREP(HSIO_MPLL_CFG_MPLL_EN, x) +#define HSIO_MPLL_CFG_MPLL_EN_GET(x)\ + FIELD_GET(HSIO_MPLL_CFG_MPLL_EN, x) + +#define HSIO_MPLL_CFG_MPLL_MULTIPLIER GENMASK(6, 0) +#define HSIO_MPLL_CFG_MPLL_MULTIPLIER_SET(x)\ + FIELD_PREP(HSIO_MPLL_CFG_MPLL_MULTIPLIER, x) +#define HSIO_MPLL_CFG_MPLL_MULTIPLIER_GET(x)\ + FIELD_GET(HSIO_MPLL_CFG_MPLL_MULTIPLIER, x) + +/* HSIO:SD:SD_STAT */ +#define HSIO_SD_STAT(g) __REG(TARGET_HSIO, 0, 1, 8, g, 3, 32, 12, 0, 1, 4) + +#define HSIO_SD_STAT_MPLL_STATE BIT(6) +#define HSIO_SD_STAT_MPLL_STATE_SET(x)\ + FIELD_PREP(HSIO_SD_STAT_MPLL_STATE, x) +#define HSIO_SD_STAT_MPLL_STATE_GET(x)\ + FIELD_GET(HSIO_SD_STAT_MPLL_STATE, x) + +#define HSIO_SD_STAT_TX_STATE BIT(5) +#define HSIO_SD_STAT_TX_STATE_SET(x)\ + FIELD_PREP(HSIO_SD_STAT_TX_STATE, x) +#define HSIO_SD_STAT_TX_STATE_GET(x)\ + FIELD_GET(HSIO_SD_STAT_TX_STATE, x) + +#define HSIO_SD_STAT_TX_CM_STATE BIT(2) +#define HSIO_SD_STAT_TX_CM_STATE_SET(x)\ + FIELD_PREP(HSIO_SD_STAT_TX_CM_STATE, x) +#define HSIO_SD_STAT_TX_CM_STATE_GET(x)\ + FIELD_GET(HSIO_SD_STAT_TX_CM_STATE, x) + +#define HSIO_SD_STAT_RX_PLL_STATE BIT(0) +#define HSIO_SD_STAT_RX_PLL_STATE_SET(x)\ + FIELD_PREP(HSIO_SD_STAT_RX_PLL_STATE, x) +#define HSIO_SD_STAT_RX_PLL_STATE_GET(x)\ + FIELD_GET(HSIO_SD_STAT_RX_PLL_STATE, x) + +/* HSIO:HW_CFGSTAT:HW_CFG */ +#define HSIO_HW_CFG __REG(TARGET_HSIO, 0, 1, 104, 0, 1, 52, 0, 0, 1, 4) + +#define HSIO_HW_CFG_RGMII_1_CFG BIT(15) +#define HSIO_HW_CFG_RGMII_1_CFG_SET(x)\ + (((x) << 15) & GENMASK(15, 15)) +#define HSIO_HW_CFG_RGMII_1_CFG_GET(x)\ + FIELD_GET(HSIO_HW_CFG_RGMII_1_CFG, x) + +#define HSIO_HW_CFG_RGMII_0_CFG BIT(14) +#define HSIO_HW_CFG_RGMII_0_CFG_SET(x)\ + (((x) << 14) & GENMASK(14, 14)) +#define HSIO_HW_CFG_RGMII_0_CFG_GET(x)\ + FIELD_GET(HSIO_HW_CFG_RGMII_0_CFG, x) + +#define HSIO_HW_CFG_RGMII_ENA GENMASK(13, 12) +#define HSIO_HW_CFG_RGMII_ENA_SET(x)\ + (((x) << 12) & GENMASK(13, 12)) +#define HSIO_HW_CFG_RGMII_ENA_GET(x)\ + FIELD_GET(HSIO_HW_CFG_RGMII_ENA, x) + +#define HSIO_HW_CFG_SD6G_0_CFG BIT(11) +#define HSIO_HW_CFG_SD6G_0_CFG_SET(x)\ + (((x) << 11) & GENMASK(11, 11)) +#define HSIO_HW_CFG_SD6G_0_CFG_GET(x)\ + FIELD_GET(HSIO_HW_CFG_SD6G_0_CFG, x) + +#define HSIO_HW_CFG_SD6G_1_CFG BIT(10) +#define HSIO_HW_CFG_SD6G_1_CFG_SET(x)\ + (((x) << 10) & GENMASK(10, 10)) +#define HSIO_HW_CFG_SD6G_1_CFG_GET(x)\ + FIELD_GET(HSIO_HW_CFG_SD6G_1_CFG, x) + +#define HSIO_HW_CFG_GMII_ENA GENMASK(9, 2) +#define HSIO_HW_CFG_GMII_ENA_SET(x)\ + (((x) << 2) & GENMASK(9, 2)) +#define HSIO_HW_CFG_GMII_ENA_GET(x)\ + FIELD_GET(HSIO_HW_CFG_GMII_ENA, x) + +#define HSIO_HW_CFG_QSGMII_ENA GENMASK(1, 0) +#define HSIO_HW_CFG_QSGMII_ENA_SET(x)\ + ((x) & GENMASK(1, 0)) +#define HSIO_HW_CFG_QSGMII_ENA_GET(x)\ + FIELD_GET(HSIO_HW_CFG_QSGMII_ENA, x) + +#endif /* _LAN966X_HSIO_REGS_H_ */ diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c index c2cb93b4df71..6f3fe37dee0e 100644 --- a/drivers/phy/phy-can-transceiver.c +++ b/drivers/phy/phy-can-transceiver.c @@ -110,14 +110,14 @@ static int can_transceiver_phy_probe(struct platform_device *pdev) can_transceiver_phy->generic_phy = phy; if (drvdata->flags & CAN_TRANSCEIVER_STB_PRESENT) { - standby_gpio = devm_gpiod_get(dev, "standby", GPIOD_OUT_HIGH); + standby_gpio = devm_gpiod_get_optional(dev, "standby", GPIOD_OUT_HIGH); if (IS_ERR(standby_gpio)) return PTR_ERR(standby_gpio); can_transceiver_phy->standby_gpio = standby_gpio; } if (drvdata->flags & CAN_TRANSCEIVER_EN_PRESENT) { - enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); + enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(enable_gpio)) return PTR_ERR(enable_gpio); can_transceiver_phy->enable_gpio = enable_gpio; diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig index 7f6fcb8ec5ba..5c98850f5a36 100644 --- a/drivers/phy/qualcomm/Kconfig +++ b/drivers/phy/qualcomm/Kconfig @@ -18,6 +18,16 @@ config PHY_QCOM_APQ8064_SATA depends on OF select GENERIC_PHY +config PHY_QCOM_EDP + tristate "Qualcomm eDP PHY driver" + depends on ARCH_QCOM || COMPILE_TEST + depends on OF + depends on COMMON_CLK + select GENERIC_PHY + help + Enable this driver to support the Qualcomm eDP PHY found in various + Qualcomm chipsets. + config PHY_QCOM_IPQ4019_USB tristate "Qualcomm IPQ4019 USB PHY driver" depends on OF && (ARCH_QCOM || COMPILE_TEST) diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile index 47acbd7daa3a..e9e3b1a4dbb0 100644 --- a/drivers/phy/qualcomm/Makefile +++ b/drivers/phy/qualcomm/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_PHY_ATH79_USB) += phy-ath79-usb.o obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o +obj-$(CONFIG_PHY_QCOM_EDP) += phy-qcom-edp.o obj-$(CONFIG_PHY_QCOM_IPQ4019_USB) += phy-qcom-ipq4019-usb.o obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o diff --git a/drivers/phy/qualcomm/phy-qcom-edp.c b/drivers/phy/qualcomm/phy-qcom-edp.c new file mode 100644 index 000000000000..a8ecd2e8442d --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-edp.c @@ -0,0 +1,674 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017, 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Linaro Ltd. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#include <dt-bindings/phy/phy.h> + +#include "phy-qcom-qmp.h" + +/* EDP_PHY registers */ +#define DP_PHY_CFG 0x0010 +#define DP_PHY_CFG_1 0x0014 +#define DP_PHY_PD_CTL 0x001c +#define DP_PHY_MODE 0x0020 + +#define DP_PHY_AUX_CFG0 0x0024 +#define DP_PHY_AUX_CFG1 0x0028 +#define DP_PHY_AUX_CFG2 0x002C +#define DP_PHY_AUX_CFG3 0x0030 +#define DP_PHY_AUX_CFG4 0x0034 +#define DP_PHY_AUX_CFG5 0x0038 +#define DP_PHY_AUX_CFG6 0x003C +#define DP_PHY_AUX_CFG7 0x0040 +#define DP_PHY_AUX_CFG8 0x0044 +#define DP_PHY_AUX_CFG9 0x0048 + +#define DP_PHY_AUX_INTERRUPT_MASK 0x0058 + +#define DP_PHY_VCO_DIV 0x0074 +#define DP_PHY_TX0_TX1_LANE_CTL 0x007c +#define DP_PHY_TX2_TX3_LANE_CTL 0x00a0 + +#define DP_PHY_STATUS 0x00e0 + +/* LANE_TXn registers */ +#define TXn_CLKBUF_ENABLE 0x0000 +#define TXn_TX_EMP_POST1_LVL 0x0004 + +#define TXn_TX_DRV_LVL 0x0014 +#define TXn_TX_DRV_LVL_OFFSET 0x0018 +#define TXn_RESET_TSYNC_EN 0x001c +#define TXn_LDO_CONFIG 0x0084 +#define TXn_TX_BAND 0x0028 + +#define TXn_RES_CODE_LANE_OFFSET_TX0 0x0044 +#define TXn_RES_CODE_LANE_OFFSET_TX1 0x0048 + +#define TXn_TRANSCEIVER_BIAS_EN 0x0054 +#define TXn_HIGHZ_DRVR_EN 0x0058 +#define TXn_TX_POL_INV 0x005c +#define TXn_LANE_MODE_1 0x0064 + +#define TXn_TRAN_DRVR_EMP_EN 0x0078 + +struct qcom_edp { + struct device *dev; + + struct phy *phy; + + void __iomem *edp; + void __iomem *tx0; + void __iomem *tx1; + void __iomem *pll; + + struct clk_hw dp_link_hw; + struct clk_hw dp_pixel_hw; + + struct phy_configure_opts_dp dp_opts; + + struct clk_bulk_data clks[2]; + struct regulator_bulk_data supplies[2]; +}; + +static int qcom_edp_phy_init(struct phy *phy) +{ + struct qcom_edp *edp = phy_get_drvdata(phy); + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(edp->supplies), edp->supplies); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(edp->clks), edp->clks); + if (ret) + goto out_disable_supplies; + + writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | + DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN, + edp->edp + DP_PHY_PD_CTL); + + /* Turn on BIAS current for PHY/PLL */ + writel(0x17, edp->pll + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN); + + writel(DP_PHY_PD_CTL_PSR_PWRDN, edp->edp + DP_PHY_PD_CTL); + msleep(20); + + writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | + DP_PHY_PD_CTL_LANE_0_1_PWRDN | DP_PHY_PD_CTL_LANE_2_3_PWRDN | + DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN, + edp->edp + DP_PHY_PD_CTL); + + writel(0x00, edp->edp + DP_PHY_AUX_CFG0); + writel(0x13, edp->edp + DP_PHY_AUX_CFG1); + writel(0x24, edp->edp + DP_PHY_AUX_CFG2); + writel(0x00, edp->edp + DP_PHY_AUX_CFG3); + writel(0x0a, edp->edp + DP_PHY_AUX_CFG4); + writel(0x26, edp->edp + DP_PHY_AUX_CFG5); + writel(0x0a, edp->edp + DP_PHY_AUX_CFG6); + writel(0x03, edp->edp + DP_PHY_AUX_CFG7); + writel(0x37, edp->edp + DP_PHY_AUX_CFG8); + writel(0x03, edp->edp + DP_PHY_AUX_CFG9); + + writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK | + PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK | + PHY_AUX_REQ_ERR_MASK, edp->edp + DP_PHY_AUX_INTERRUPT_MASK); + + msleep(20); + + return 0; + +out_disable_supplies: + regulator_bulk_disable(ARRAY_SIZE(edp->supplies), edp->supplies); + + return ret; +} + +static int qcom_edp_phy_configure(struct phy *phy, union phy_configure_opts *opts) +{ + const struct phy_configure_opts_dp *dp_opts = &opts->dp; + struct qcom_edp *edp = phy_get_drvdata(phy); + + memcpy(&edp->dp_opts, dp_opts, sizeof(*dp_opts)); + + return 0; +} + +static int qcom_edp_configure_ssc(const struct qcom_edp *edp) +{ + const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts; + u32 step1; + u32 step2; + + switch (dp_opts->link_rate) { + case 1620: + case 2700: + case 8100: + step1 = 0x45; + step2 = 0x06; + break; + + case 5400: + step1 = 0x5c; + step2 = 0x08; + break; + + default: + /* Other link rates aren't supported */ + return -EINVAL; + } + + writel(0x01, edp->pll + QSERDES_V4_COM_SSC_EN_CENTER); + writel(0x00, edp->pll + QSERDES_V4_COM_SSC_ADJ_PER1); + writel(0x36, edp->pll + QSERDES_V4_COM_SSC_PER1); + writel(0x01, edp->pll + QSERDES_V4_COM_SSC_PER2); + writel(step1, edp->pll + QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0); + writel(step2, edp->pll + QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0); + + return 0; +} + +static int qcom_edp_configure_pll(const struct qcom_edp *edp) +{ + const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts; + u32 div_frac_start2_mode0; + u32 div_frac_start3_mode0; + u32 dec_start_mode0; + u32 lock_cmp1_mode0; + u32 lock_cmp2_mode0; + u32 hsclk_sel; + + switch (dp_opts->link_rate) { + case 1620: + hsclk_sel = 0x5; + dec_start_mode0 = 0x69; + div_frac_start2_mode0 = 0x80; + div_frac_start3_mode0 = 0x07; + lock_cmp1_mode0 = 0x6f; + lock_cmp2_mode0 = 0x08; + break; + + case 2700: + hsclk_sel = 0x3; + dec_start_mode0 = 0x69; + div_frac_start2_mode0 = 0x80; + div_frac_start3_mode0 = 0x07; + lock_cmp1_mode0 = 0x0f; + lock_cmp2_mode0 = 0x0e; + break; + + case 5400: + hsclk_sel = 0x1; + dec_start_mode0 = 0x8c; + div_frac_start2_mode0 = 0x00; + div_frac_start3_mode0 = 0x0a; + lock_cmp1_mode0 = 0x1f; + lock_cmp2_mode0 = 0x1c; + break; + + case 8100: + hsclk_sel = 0x0; + dec_start_mode0 = 0x69; + div_frac_start2_mode0 = 0x80; + div_frac_start3_mode0 = 0x07; + lock_cmp1_mode0 = 0x2f; + lock_cmp2_mode0 = 0x2a; + break; + + default: + /* Other link rates aren't supported */ + return -EINVAL; + } + + writel(0x01, edp->pll + QSERDES_V4_COM_SVS_MODE_CLK_SEL); + writel(0x0b, edp->pll + QSERDES_V4_COM_SYSCLK_EN_SEL); + writel(0x02, edp->pll + QSERDES_V4_COM_SYS_CLK_CTRL); + writel(0x0c, edp->pll + QSERDES_V4_COM_CLK_ENABLE1); + writel(0x06, edp->pll + QSERDES_V4_COM_SYSCLK_BUF_ENABLE); + writel(0x30, edp->pll + QSERDES_V4_COM_CLK_SELECT); + writel(hsclk_sel, edp->pll + QSERDES_V4_COM_HSCLK_SEL); + writel(0x0f, edp->pll + QSERDES_V4_COM_PLL_IVCO); + writel(0x08, edp->pll + QSERDES_V4_COM_LOCK_CMP_EN); + writel(0x36, edp->pll + QSERDES_V4_COM_PLL_CCTRL_MODE0); + writel(0x16, edp->pll + QSERDES_V4_COM_PLL_RCTRL_MODE0); + writel(0x06, edp->pll + QSERDES_V4_COM_CP_CTRL_MODE0); + writel(dec_start_mode0, edp->pll + QSERDES_V4_COM_DEC_START_MODE0); + writel(0x00, edp->pll + QSERDES_V4_COM_DIV_FRAC_START1_MODE0); + writel(div_frac_start2_mode0, edp->pll + QSERDES_V4_COM_DIV_FRAC_START2_MODE0); + writel(div_frac_start3_mode0, edp->pll + QSERDES_V4_COM_DIV_FRAC_START3_MODE0); + writel(0x02, edp->pll + QSERDES_V4_COM_CMN_CONFIG); + writel(0x3f, edp->pll + QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0); + writel(0x00, edp->pll + QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0); + writel(0x00, edp->pll + QSERDES_V4_COM_VCO_TUNE_MAP); + writel(lock_cmp1_mode0, edp->pll + QSERDES_V4_COM_LOCK_CMP1_MODE0); + writel(lock_cmp2_mode0, edp->pll + QSERDES_V4_COM_LOCK_CMP2_MODE0); + + writel(0x0a, edp->pll + QSERDES_V4_COM_BG_TIMER); + writel(0x14, edp->pll + QSERDES_V4_COM_CORECLK_DIV_MODE0); + writel(0x00, edp->pll + QSERDES_V4_COM_VCO_TUNE_CTRL); + writel(0x17, edp->pll + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN); + writel(0x0f, edp->pll + QSERDES_V4_COM_CORE_CLK_EN); + writel(0xa0, edp->pll + QSERDES_V4_COM_VCO_TUNE1_MODE0); + writel(0x03, edp->pll + QSERDES_V4_COM_VCO_TUNE2_MODE0); + + return 0; +} + +static int qcom_edp_set_vco_div(const struct qcom_edp *edp) +{ + const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts; + unsigned long pixel_freq; + u32 vco_div; + + switch (dp_opts->link_rate) { + case 1620: + vco_div = 0x1; + pixel_freq = 1620000000UL / 2; + break; + + case 2700: + vco_div = 0x1; + pixel_freq = 2700000000UL / 2; + break; + + case 5400: + vco_div = 0x2; + pixel_freq = 5400000000UL / 4; + break; + + case 8100: + vco_div = 0x0; + pixel_freq = 8100000000UL / 6; + break; + + default: + /* Other link rates aren't supported */ + return -EINVAL; + } + + writel(vco_div, edp->edp + DP_PHY_VCO_DIV); + + clk_set_rate(edp->dp_link_hw.clk, dp_opts->link_rate * 100000); + clk_set_rate(edp->dp_pixel_hw.clk, pixel_freq); + + return 0; +} + +static int qcom_edp_phy_power_on(struct phy *phy) +{ + const struct qcom_edp *edp = phy_get_drvdata(phy); + int timeout; + int ret; + u32 val; + + writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | + DP_PHY_PD_CTL_LANE_0_1_PWRDN | DP_PHY_PD_CTL_LANE_2_3_PWRDN | + DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN, + edp->edp + DP_PHY_PD_CTL); + writel(0xfc, edp->edp + DP_PHY_MODE); + + timeout = readl_poll_timeout(edp->pll + QSERDES_V4_COM_CMN_STATUS, + val, val & BIT(7), 5, 200); + if (timeout) + return timeout; + + writel(0x01, edp->tx0 + TXn_LDO_CONFIG); + writel(0x01, edp->tx1 + TXn_LDO_CONFIG); + writel(0x00, edp->tx0 + TXn_LANE_MODE_1); + writel(0x00, edp->tx1 + TXn_LANE_MODE_1); + + ret = qcom_edp_configure_ssc(edp); + if (ret) + return ret; + + ret = qcom_edp_configure_pll(edp); + if (ret) + return ret; + + /* TX Lane configuration */ + writel(0x05, edp->edp + DP_PHY_TX0_TX1_LANE_CTL); + writel(0x05, edp->edp + DP_PHY_TX2_TX3_LANE_CTL); + + /* TX-0 register configuration */ + writel(0x03, edp->tx0 + TXn_TRANSCEIVER_BIAS_EN); + writel(0x0f, edp->tx0 + TXn_CLKBUF_ENABLE); + writel(0x03, edp->tx0 + TXn_RESET_TSYNC_EN); + writel(0x01, edp->tx0 + TXn_TRAN_DRVR_EMP_EN); + writel(0x04, edp->tx0 + TXn_TX_BAND); + + /* TX-1 register configuration */ + writel(0x03, edp->tx1 + TXn_TRANSCEIVER_BIAS_EN); + writel(0x0f, edp->tx1 + TXn_CLKBUF_ENABLE); + writel(0x03, edp->tx1 + TXn_RESET_TSYNC_EN); + writel(0x01, edp->tx1 + TXn_TRAN_DRVR_EMP_EN); + writel(0x04, edp->tx1 + TXn_TX_BAND); + + ret = qcom_edp_set_vco_div(edp); + if (ret) + return ret; + + writel(0x01, edp->edp + DP_PHY_CFG); + writel(0x05, edp->edp + DP_PHY_CFG); + writel(0x01, edp->edp + DP_PHY_CFG); + writel(0x09, edp->edp + DP_PHY_CFG); + + writel(0x20, edp->pll + QSERDES_V4_COM_RESETSM_CNTRL); + + timeout = readl_poll_timeout(edp->pll + QSERDES_V4_COM_C_READY_STATUS, + val, val & BIT(0), 500, 10000); + if (timeout) + return timeout; + + writel(0x19, edp->edp + DP_PHY_CFG); + writel(0x1f, edp->tx0 + TXn_HIGHZ_DRVR_EN); + writel(0x04, edp->tx0 + TXn_HIGHZ_DRVR_EN); + writel(0x00, edp->tx0 + TXn_TX_POL_INV); + writel(0x1f, edp->tx1 + TXn_HIGHZ_DRVR_EN); + writel(0x04, edp->tx1 + TXn_HIGHZ_DRVR_EN); + writel(0x00, edp->tx1 + TXn_TX_POL_INV); + writel(0x10, edp->tx0 + TXn_TX_DRV_LVL_OFFSET); + writel(0x10, edp->tx1 + TXn_TX_DRV_LVL_OFFSET); + writel(0x11, edp->tx0 + TXn_RES_CODE_LANE_OFFSET_TX0); + writel(0x11, edp->tx0 + TXn_RES_CODE_LANE_OFFSET_TX1); + writel(0x11, edp->tx1 + TXn_RES_CODE_LANE_OFFSET_TX0); + writel(0x11, edp->tx1 + TXn_RES_CODE_LANE_OFFSET_TX1); + + writel(0x10, edp->tx0 + TXn_TX_EMP_POST1_LVL); + writel(0x10, edp->tx1 + TXn_TX_EMP_POST1_LVL); + writel(0x1f, edp->tx0 + TXn_TX_DRV_LVL); + writel(0x1f, edp->tx1 + TXn_TX_DRV_LVL); + + writel(0x4, edp->tx0 + TXn_HIGHZ_DRVR_EN); + writel(0x3, edp->tx0 + TXn_TRANSCEIVER_BIAS_EN); + writel(0x4, edp->tx1 + TXn_HIGHZ_DRVR_EN); + writel(0x0, edp->tx1 + TXn_TRANSCEIVER_BIAS_EN); + writel(0x3, edp->edp + DP_PHY_CFG_1); + + writel(0x18, edp->edp + DP_PHY_CFG); + usleep_range(100, 1000); + + writel(0x19, edp->edp + DP_PHY_CFG); + + return readl_poll_timeout(edp->edp + DP_PHY_STATUS, + val, val & BIT(1), 500, 10000); +} + +static int qcom_edp_phy_power_off(struct phy *phy) +{ + const struct qcom_edp *edp = phy_get_drvdata(phy); + + writel(DP_PHY_PD_CTL_PSR_PWRDN, edp->edp + DP_PHY_PD_CTL); + + return 0; +} + +static int qcom_edp_phy_exit(struct phy *phy) +{ + struct qcom_edp *edp = phy_get_drvdata(phy); + + clk_bulk_disable_unprepare(ARRAY_SIZE(edp->clks), edp->clks); + regulator_bulk_disable(ARRAY_SIZE(edp->supplies), edp->supplies); + + return 0; +} + +static const struct phy_ops qcom_edp_ops = { + .init = qcom_edp_phy_init, + .configure = qcom_edp_phy_configure, + .power_on = qcom_edp_phy_power_on, + .power_off = qcom_edp_phy_power_off, + .exit = qcom_edp_phy_exit, + .owner = THIS_MODULE, +}; + +/* + * Embedded Display Port PLL driver block diagram for branch clocks + * + * +------------------------------+ + * | EDP_VCO_CLK | + * | | + * | +-------------------+ | + * | | (EDP PLL/VCO) | | + * | +---------+---------+ | + * | v | + * | +----------+-----------+ | + * | | hsclk_divsel_clk_src | | + * | +----------+-----------+ | + * +------------------------------+ + * | + * +---------<---------v------------>----------+ + * | | + * +--------v----------------+ | + * | edp_phy_pll_link_clk | | + * | link_clk | | + * +--------+----------------+ | + * | | + * | | + * v v + * Input to DISPCC block | + * for link clk, crypto clk | + * and interface clock | + * | + * | + * +--------<------------+-----------------+---<---+ + * | | | + * +----v---------+ +--------v-----+ +--------v------+ + * | vco_divided | | vco_divided | | vco_divided | + * | _clk_src | | _clk_src | | _clk_src | + * | | | | | | + * |divsel_six | | divsel_two | | divsel_four | + * +-------+------+ +-----+--------+ +--------+------+ + * | | | + * v---->----------v-------------<------v + * | + * +----------+-----------------+ + * | edp_phy_pll_vco_div_clk | + * +---------+------------------+ + * | + * v + * Input to DISPCC block + * for EDP pixel clock + * + */ +static int qcom_edp_dp_pixel_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + switch (req->rate) { + case 1620000000UL / 2: + case 2700000000UL / 2: + /* 5.4 and 8.1 GHz are same link rate as 2.7GHz, i.e. div 4 and div 6 */ + return 0; + + default: + return -EINVAL; + } +} + +static unsigned long +qcom_edp_dp_pixel_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + const struct qcom_edp *edp = container_of(hw, struct qcom_edp, dp_pixel_hw); + const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts; + + switch (dp_opts->link_rate) { + case 1620: + return 1620000000UL / 2; + case 2700: + return 2700000000UL / 2; + case 5400: + return 5400000000UL / 4; + case 8100: + return 8100000000UL / 6; + default: + return 0; + } +} + +static const struct clk_ops qcom_edp_dp_pixel_clk_ops = { + .determine_rate = qcom_edp_dp_pixel_clk_determine_rate, + .recalc_rate = qcom_edp_dp_pixel_clk_recalc_rate, +}; + +static int qcom_edp_dp_link_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + switch (req->rate) { + case 162000000: + case 270000000: + case 540000000: + case 810000000: + return 0; + + default: + return -EINVAL; + } +} + +static unsigned long +qcom_edp_dp_link_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + const struct qcom_edp *edp = container_of(hw, struct qcom_edp, dp_link_hw); + const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts; + + switch (dp_opts->link_rate) { + case 1620: + case 2700: + case 5400: + case 8100: + return dp_opts->link_rate * 100000; + + default: + return 0; + } +} + +static const struct clk_ops qcom_edp_dp_link_clk_ops = { + .determine_rate = qcom_edp_dp_link_clk_determine_rate, + .recalc_rate = qcom_edp_dp_link_clk_recalc_rate, +}; + +static int qcom_edp_clks_register(struct qcom_edp *edp, struct device_node *np) +{ + struct clk_hw_onecell_data *data; + struct clk_init_data init = { }; + int ret; + + data = devm_kzalloc(edp->dev, struct_size(data, hws, 2), GFP_KERNEL); + if (!data) + return -ENOMEM; + + init.ops = &qcom_edp_dp_link_clk_ops; + init.name = "edp_phy_pll_link_clk"; + edp->dp_link_hw.init = &init; + ret = devm_clk_hw_register(edp->dev, &edp->dp_link_hw); + if (ret) + return ret; + + init.ops = &qcom_edp_dp_pixel_clk_ops; + init.name = "edp_phy_pll_vco_div_clk"; + edp->dp_pixel_hw.init = &init; + ret = devm_clk_hw_register(edp->dev, &edp->dp_pixel_hw); + if (ret) + return ret; + + data->hws[0] = &edp->dp_link_hw; + data->hws[1] = &edp->dp_pixel_hw; + data->num = 2; + + return devm_of_clk_add_hw_provider(edp->dev, of_clk_hw_onecell_get, data); +} + +static int qcom_edp_phy_probe(struct platform_device *pdev) +{ + struct phy_provider *phy_provider; + struct device *dev = &pdev->dev; + struct qcom_edp *edp; + int ret; + + edp = devm_kzalloc(dev, sizeof(*edp), GFP_KERNEL); + if (!edp) + return -ENOMEM; + + edp->dev = dev; + + edp->edp = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(edp->edp)) + return PTR_ERR(edp->edp); + + edp->tx0 = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(edp->tx0)) + return PTR_ERR(edp->tx0); + + edp->tx1 = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(edp->tx1)) + return PTR_ERR(edp->tx1); + + edp->pll = devm_platform_ioremap_resource(pdev, 3); + if (IS_ERR(edp->pll)) + return PTR_ERR(edp->pll); + + edp->clks[0].id = "aux"; + edp->clks[1].id = "cfg_ahb"; + ret = devm_clk_bulk_get(dev, ARRAY_SIZE(edp->clks), edp->clks); + if (ret) + return ret; + + edp->supplies[0].supply = "vdda-phy"; + edp->supplies[1].supply = "vdda-pll"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(edp->supplies), edp->supplies); + if (ret) + return ret; + + ret = qcom_edp_clks_register(edp, pdev->dev.of_node); + if (ret) + return ret; + + edp->phy = devm_phy_create(dev, pdev->dev.of_node, &qcom_edp_ops); + if (IS_ERR(edp->phy)) { + dev_err(dev, "failed to register phy\n"); + return PTR_ERR(edp->phy); + } + + phy_set_drvdata(edp->phy, edp); + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct of_device_id qcom_edp_phy_match_table[] = { + { .compatible = "qcom,sc8180x-edp-phy" }, + { } +}; +MODULE_DEVICE_TABLE(of, qcom_edp_phy_match_table); + +static struct platform_driver qcom_edp_phy_driver = { + .probe = qcom_edp_phy_probe, + .driver = { + .name = "qcom-edp-phy", + .of_match_table = qcom_edp_phy_match_table, + }, +}; + +module_platform_driver(qcom_edp_phy_driver); + +MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm eDP QMP PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index c96639d5f581..8ea87c69f463 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -2866,6 +2866,215 @@ static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88), }; +static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE1, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x4c), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x75), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x04), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xd8), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa6), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf0), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_TX_ADAPT_POST_THRESH, 0xf0), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x77), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_RATE_SLEW_CNTRL1, 0x0b), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x05), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0f), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0xd0), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_2, 0xf6), + QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX, 0x0c), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xcc), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xcc), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x29), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0xc5), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xad), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0xb6), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xfb), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xc7), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xef), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x81), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37), + + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_3, 0x05), + + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3, 0x1f), + + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210, 0x1f), + + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f), +}; + +/* Register names should be validated, they might be different for this PHY */ +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG2, 0x16), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG3, 0x22), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_G3S2_PRE_GAIN, 0x2e), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x99), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3, 0x28), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e), +}; + struct qmp_phy; /* struct qmp_phy_cfg - per-PHY initialization config */ @@ -3094,6 +3303,10 @@ static const char * const qmp_v4_sm8250_usbphy_clk_l[] = { "aux", "ref_clk_src", "com_aux" }; +static const char * const sm8450_ufs_phy_clk_l[] = { + "qref", "ref", "ref_aux", +}; + static const char * const sdm845_ufs_phy_clk_l[] = { "ref", "ref_aux", }; @@ -4090,6 +4303,94 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = { .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, }; +static const struct qmp_phy_cfg sm8450_ufsphy_cfg = { + .type = PHY_TYPE_UFS, + .nlanes = 2, + + .serdes_tbl = sm8350_ufsphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl), + .tx_tbl = sm8350_ufsphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl), + .rx_tbl = sm8350_ufsphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl), + .pcs_tbl = sm8350_ufsphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl), + .clk_list = sm8450_ufs_phy_clk_l, + .num_clks = ARRAY_SIZE(sm8450_ufs_phy_clk_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8150_ufsphy_regs_layout, + + .start_ctrl = SERDES_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = sm8450_qmp_gen3x1_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl), + .tx_tbl = sm8450_qmp_gen3x1_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl), + .rx_tbl = sm8450_qmp_gen3x1_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl), + .pcs_tbl = sm8450_qmp_gen3x1_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl), + .pcs_misc_tbl = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 2, + + .serdes_tbl = sm8450_qmp_gen4x2_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl), + .tx_tbl = sm8450_qmp_gen4x2_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl), + .rx_tbl = sm8450_qmp_gen4x2_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl), + .pcs_tbl = sm8450_qmp_gen4x2_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl), + .pcs_misc_tbl = sm8450_qmp_gen4x2_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS_4_20, + + .is_dual_lane_phy = true, + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = { .type = PHY_TYPE_USB3, .nlanes = 1, @@ -5749,6 +6050,18 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { .compatible = "qcom,sm8350-qmp-usb3-uni-phy", .data = &sm8350_usb3_uniphy_cfg, }, { + .compatible = "qcom,sm8450-qmp-gen3x1-pcie-phy", + .data = &sm8450_qmp_gen3x1_pciephy_cfg, + }, { + .compatible = "qcom,sm8450-qmp-gen4x2-pcie-phy", + .data = &sm8450_qmp_gen4x2_pciephy_cfg, + }, { + .compatible = "qcom,sm8450-qmp-ufs-phy", + .data = &sm8450_ufsphy_cfg, + }, { + .compatible = "qcom,sm8450-qmp-usb3-phy", + .data = &sm8350_usb3phy_cfg, + }, { .compatible = "qcom,qcm2290-qmp-usb3-phy", .data = &qcm2290_usb3phy_cfg, }, diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index e15f461065bb..06b2556ed93a 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -551,6 +551,7 @@ /* Only for QMP V4 PHY - QSERDES COM registers */ #define QSERDES_V4_COM_BG_TIMER 0x00c #define QSERDES_V4_COM_SSC_EN_CENTER 0x010 +#define QSERDES_V4_COM_SSC_ADJ_PER1 0x014 #define QSERDES_V4_COM_SSC_PER1 0x01c #define QSERDES_V4_COM_SSC_PER2 0x020 #define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0 0x024 @@ -1069,6 +1070,16 @@ #define QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2 0x828 /* Only for QMP V5 PHY - QSERDES COM registers */ +#define QSERDES_V5_COM_SSC_EN_CENTER 0x010 +#define QSERDES_V5_COM_SSC_PER1 0x01c +#define QSERDES_V5_COM_SSC_PER2 0x020 +#define QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0 0x024 +#define QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0 0x028 +#define QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1 0x030 +#define QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1 0x034 +#define QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN 0x044 +#define QSERDES_V5_COM_CLK_ENABLE1 0x048 +#define QSERDES_V5_COM_SYSCLK_BUF_ENABLE 0x050 #define QSERDES_V5_COM_PLL_IVCO 0x058 #define QSERDES_V5_COM_CP_CTRL_MODE0 0x074 #define QSERDES_V5_COM_CP_CTRL_MODE1 0x078 @@ -1078,16 +1089,35 @@ #define QSERDES_V5_COM_PLL_CCTRL_MODE1 0x088 #define QSERDES_V5_COM_SYSCLK_EN_SEL 0x094 #define QSERDES_V5_COM_LOCK_CMP_EN 0x0a4 +#define QSERDES_V5_COM_LOCK_CMP_CFG 0x0a8 #define QSERDES_V5_COM_LOCK_CMP1_MODE0 0x0ac #define QSERDES_V5_COM_LOCK_CMP2_MODE0 0x0b0 #define QSERDES_V5_COM_LOCK_CMP1_MODE1 0x0b4 #define QSERDES_V5_COM_DEC_START_MODE0 0x0bc #define QSERDES_V5_COM_LOCK_CMP2_MODE1 0x0b8 #define QSERDES_V5_COM_DEC_START_MODE1 0x0c4 +#define QSERDES_V5_COM_DIV_FRAC_START1_MODE0 0x0cc +#define QSERDES_V5_COM_DIV_FRAC_START2_MODE0 0x0d0 +#define QSERDES_V5_COM_DIV_FRAC_START3_MODE0 0x0d4 +#define QSERDES_V5_COM_DIV_FRAC_START1_MODE1 0x0d8 +#define QSERDES_V5_COM_DIV_FRAC_START2_MODE1 0x0dc +#define QSERDES_V5_COM_DIV_FRAC_START3_MODE1 0x0e0 #define QSERDES_V5_COM_VCO_TUNE_MAP 0x10c +#define QSERDES_V5_COM_VCO_TUNE1_MODE0 0x110 +#define QSERDES_V5_COM_VCO_TUNE2_MODE0 0x114 +#define QSERDES_V5_COM_VCO_TUNE1_MODE1 0x118 +#define QSERDES_V5_COM_VCO_TUNE2_MODE1 0x11c #define QSERDES_V5_COM_VCO_TUNE_INITVAL2 0x124 +#define QSERDES_V5_COM_CLK_SELECT 0x154 #define QSERDES_V5_COM_HSCLK_SEL 0x158 #define QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL 0x15c +#define QSERDES_V5_COM_CORECLK_DIV_MODE0 0x168 +#define QSERDES_V5_COM_CORECLK_DIV_MODE1 0x16c +#define QSERDES_V5_COM_CORE_CLK_EN 0x174 +#define QSERDES_V5_COM_CMN_CONFIG 0x17c +#define QSERDES_V5_COM_CMN_MISC1 0x19c +#define QSERDES_V5_COM_CMN_MODE 0x1a4 +#define QSERDES_V5_COM_VCO_DC_LEVEL_CTRL 0x1a8 #define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac #define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0 #define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4 @@ -1112,6 +1142,12 @@ #define QSERDES_V5_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x180 #define QSERDES_V5_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x184 +/* Only for QMP V5_20 PHY - TX registers */ +#define QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX 0x30 +#define QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX 0x34 +#define QSERDES_V5_20_TX_LANE_MODE_1 0x78 +#define QSERDES_V5_20_TX_LANE_MODE_2 0x7c + /* Only for QMP V5 PHY - RX registers */ #define QSERDES_V5_RX_UCDR_FO_GAIN 0x008 #define QSERDES_V5_RX_UCDR_SO_GAIN 0x014 @@ -1130,6 +1166,7 @@ #define QSERDES_V5_RX_AC_JTAG_ENABLE 0x068 #define QSERDES_V5_RX_AC_JTAG_MODE 0x078 #define QSERDES_V5_RX_RX_TERM_BW 0x080 +#define QSERDES_V5_RX_TX_ADAPT_POST_THRESH 0x0cc #define QSERDES_V5_RX_VGA_CAL_CNTRL1 0x0d4 #define QSERDES_V5_RX_VGA_CAL_CNTRL2 0x0d8 #define QSERDES_V5_RX_GM_CAL 0x0dc @@ -1167,6 +1204,73 @@ #define QSERDES_V5_RX_DCC_CTRL1 0x1a8 #define QSERDES_V5_RX_VTH_CODE 0x1b0 +/* Only for QMP V5_20 PHY - RX registers */ +#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2 0x008 +#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3 0x00c +#define QSERDES_V5_20_RX_UCDR_PI_CONTROLS 0x020 +#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1 0x02c +#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3 0x030 +#define QSERDES_V5_20_RX_RX_IDAC_SAOFFSET 0x07c +#define QSERDES_V5_20_RX_DFE_3 0x090 +#define QSERDES_V5_20_RX_DFE_DAC_ENABLE1 0x0b4 +#define QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1 0x0c4 +#define QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2 0x0c8 +#define QSERDES_V5_20_RX_VGA_CAL_MAN_VAL 0x0dc +#define QSERDES_V5_20_RX_GM_CAL 0x0ec +#define QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4 0x108 +#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1 0x164 +#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2 0x168 +#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3 0x16c +#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5 0x174 +#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6 0x178 +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B0 0x17c +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B1 0x180 +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B2 0x184 +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B3 0x188 +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B4 0x18c +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B5 0x190 +#define QSERDES_V5_20_RX_RX_MODE_RATE2_B6 0x194 +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B0 0x198 +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B1 0x19c +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B2 0x1a0 +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B3 0x1a4 +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B4 0x1a8 +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B5 0x1ac +#define QSERDES_V5_20_RX_RX_MODE_RATE3_B6 0x1b0 +#define QSERDES_V5_20_RX_PHPRE_CTRL 0x1b4 +#define QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET 0x1c0 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210 0x1f4 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3 0x1f8 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210 0x1fc +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3 0x200 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210 0x204 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3 0x208 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3 0x210 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3 0x218 +#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3 0x220 + +/* Only for QMP V5 PHY - USB/PCIe PCS registers */ +#define QPHY_V5_PCS_REFGEN_REQ_CONFIG1 0x0dc +#define QPHY_V5_PCS_G3S2_PRE_GAIN 0x170 +#define QPHY_V5_PCS_RX_SIGDET_LVL 0x188 +#define QPHY_V5_PCS_RATE_SLEW_CNTRL1 0x198 +#define QPHY_V5_PCS_EQ_CONFIG2 0x1e0 +#define QPHY_V5_PCS_EQ_CONFIG3 0x1e4 + +/* Only for QMP V5 PHY - PCS_PCIE registers */ +#define QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20 +#define QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x54 +#define QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS 0x94 +#define QPHY_V5_PCS_PCIE_EQ_CONFIG2 0xa8 + +/* Only for QMP V5_20 PHY - PCIe PCS registers */ +#define QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x01c +#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS 0x090 +#define QPHY_V5_20_PCS_PCIE_EQ_CONFIG1 0x0a0 +#define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5 0x108 +#define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN 0x15c +#define QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3 0x184 + /* Only for QMP V5 PHY - UFS PCS registers */ #define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c #define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010 diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c index 1938365abbb3..eca77e44a4c1 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c @@ -204,6 +204,7 @@ struct rockchip_usb2phy_port { * @dcd_retries: The retry count used to track Data contact * detection process. * @edev: extcon device for notification registration + * @irq: muxed interrupt for single irq configuration * @phy_cfg: phy register configuration, assigned by driver data. * @ports: phy port instance. */ @@ -218,6 +219,7 @@ struct rockchip_usb2phy { enum power_supply_type chg_type; u8 dcd_retries; struct extcon_dev *edev; + int irq; const struct rockchip_usb2phy_cfg *phy_cfg; struct rockchip_usb2phy_port ports[USB2PHY_NUM_PORTS]; }; @@ -750,7 +752,6 @@ static void rockchip_chg_detect_work(struct work_struct *work) fallthrough; case USB_CHG_STATE_SECONDARY_DONE: rphy->chg_state = USB_CHG_STATE_DETECTED; - delay = 0; fallthrough; case USB_CHG_STATE_DETECTED: /* put the controller in normal mode */ @@ -927,6 +928,102 @@ static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data) return IRQ_NONE; } +static irqreturn_t rockchip_usb2phy_irq(int irq, void *data) +{ + struct rockchip_usb2phy *rphy = data; + struct rockchip_usb2phy_port *rport; + irqreturn_t ret = IRQ_NONE; + unsigned int index; + + for (index = 0; index < rphy->phy_cfg->num_ports; index++) { + rport = &rphy->ports[index]; + if (!rport->phy) + continue; + + /* Handle linestate irq for both otg port and host port */ + ret = rockchip_usb2phy_linestate_irq(irq, rport); + } + + return ret; +} + +static int rockchip_usb2phy_port_irq_init(struct rockchip_usb2phy *rphy, + struct rockchip_usb2phy_port *rport, + struct device_node *child_np) +{ + int ret; + + /* + * If the usb2 phy used combined irq for otg and host port, + * don't need to init otg and host port irq separately. + */ + if (rphy->irq > 0) + return 0; + + switch (rport->port_id) { + case USB2PHY_PORT_HOST: + rport->ls_irq = of_irq_get_byname(child_np, "linestate"); + if (rport->ls_irq < 0) { + dev_err(rphy->dev, "no linestate irq provided\n"); + return rport->ls_irq; + } + + ret = devm_request_threaded_irq(rphy->dev, rport->ls_irq, NULL, + rockchip_usb2phy_linestate_irq, + IRQF_ONESHOT, + "rockchip_usb2phy", rport); + if (ret) { + dev_err(rphy->dev, "failed to request linestate irq handle\n"); + return ret; + } + break; + case USB2PHY_PORT_OTG: + /* + * Some SoCs use one interrupt with otg-id/otg-bvalid/linestate + * interrupts muxed together, so probe the otg-mux interrupt first, + * if not found, then look for the regular interrupts one by one. + */ + rport->otg_mux_irq = of_irq_get_byname(child_np, "otg-mux"); + if (rport->otg_mux_irq > 0) { + ret = devm_request_threaded_irq(rphy->dev, rport->otg_mux_irq, + NULL, + rockchip_usb2phy_otg_mux_irq, + IRQF_ONESHOT, + "rockchip_usb2phy_otg", + rport); + if (ret) { + dev_err(rphy->dev, + "failed to request otg-mux irq handle\n"); + return ret; + } + } else { + rport->bvalid_irq = of_irq_get_byname(child_np, "otg-bvalid"); + if (rport->bvalid_irq < 0) { + dev_err(rphy->dev, "no vbus valid irq provided\n"); + ret = rport->bvalid_irq; + return ret; + } + + ret = devm_request_threaded_irq(rphy->dev, rport->bvalid_irq, + NULL, + rockchip_usb2phy_bvalid_irq, + IRQF_ONESHOT, + "rockchip_usb2phy_bvalid", + rport); + if (ret) { + dev_err(rphy->dev, + "failed to request otg-bvalid irq handle\n"); + return ret; + } + } + break; + default: + return -EINVAL; + } + + return 0; +} + static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy, struct rockchip_usb2phy_port *rport, struct device_node *child_np) @@ -940,18 +1037,9 @@ static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy, mutex_init(&rport->mutex); INIT_DELAYED_WORK(&rport->sm_work, rockchip_usb2phy_sm_work); - rport->ls_irq = of_irq_get_byname(child_np, "linestate"); - if (rport->ls_irq < 0) { - dev_err(rphy->dev, "no linestate irq provided\n"); - return rport->ls_irq; - } - - ret = devm_request_threaded_irq(rphy->dev, rport->ls_irq, NULL, - rockchip_usb2phy_linestate_irq, - IRQF_ONESHOT, - "rockchip_usb2phy", rport); + ret = rockchip_usb2phy_port_irq_init(rphy, rport, child_np); if (ret) { - dev_err(rphy->dev, "failed to request linestate irq handle\n"); + dev_err(rphy->dev, "failed to setup host irq\n"); return ret; } @@ -1000,43 +1088,10 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy, INIT_DELAYED_WORK(&rport->chg_work, rockchip_chg_detect_work); INIT_DELAYED_WORK(&rport->otg_sm_work, rockchip_usb2phy_otg_sm_work); - /* - * Some SoCs use one interrupt with otg-id/otg-bvalid/linestate - * interrupts muxed together, so probe the otg-mux interrupt first, - * if not found, then look for the regular interrupts one by one. - */ - rport->otg_mux_irq = of_irq_get_byname(child_np, "otg-mux"); - if (rport->otg_mux_irq > 0) { - ret = devm_request_threaded_irq(rphy->dev, rport->otg_mux_irq, - NULL, - rockchip_usb2phy_otg_mux_irq, - IRQF_ONESHOT, - "rockchip_usb2phy_otg", - rport); - if (ret) { - dev_err(rphy->dev, - "failed to request otg-mux irq handle\n"); - goto out; - } - } else { - rport->bvalid_irq = of_irq_get_byname(child_np, "otg-bvalid"); - if (rport->bvalid_irq < 0) { - dev_err(rphy->dev, "no vbus valid irq provided\n"); - ret = rport->bvalid_irq; - goto out; - } - - ret = devm_request_threaded_irq(rphy->dev, rport->bvalid_irq, - NULL, - rockchip_usb2phy_bvalid_irq, - IRQF_ONESHOT, - "rockchip_usb2phy_bvalid", - rport); - if (ret) { - dev_err(rphy->dev, - "failed to request otg-bvalid irq handle\n"); - goto out; - } + ret = rockchip_usb2phy_port_irq_init(rphy, rport, child_np); + if (ret) { + dev_err(rphy->dev, "failed to init irq for host port\n"); + goto out; } if (!IS_ERR(rphy->edev)) { @@ -1074,12 +1129,19 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) return -EINVAL; } - if (!dev->parent || !dev->parent->of_node) - return -EINVAL; + if (!dev->parent || !dev->parent->of_node) { + rphy->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,usbgrf"); + if (IS_ERR(rphy->grf)) { + dev_err(dev, "failed to locate usbgrf\n"); + return PTR_ERR(rphy->grf); + } + } - rphy->grf = syscon_node_to_regmap(dev->parent->of_node); - if (IS_ERR(rphy->grf)) - return PTR_ERR(rphy->grf); + else { + rphy->grf = syscon_node_to_regmap(dev->parent->of_node); + if (IS_ERR(rphy->grf)) + return PTR_ERR(rphy->grf); + } if (of_device_is_compatible(np, "rockchip,rv1108-usb2phy")) { rphy->usbgrf = @@ -1091,16 +1153,26 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) rphy->usbgrf = NULL; } - if (of_property_read_u32(np, "reg", ®)) { + if (of_property_read_u32_index(np, "reg", 0, ®)) { dev_err(dev, "the reg property is not assigned in %pOFn node\n", np); return -EINVAL; } + /* support address_cells=2 */ + if (reg == 0) { + if (of_property_read_u32_index(np, "reg", 1, ®)) { + dev_err(dev, "the reg property is not assigned in %pOFn node\n", + np); + return -EINVAL; + } + } + rphy->dev = dev; phy_cfgs = match->data; rphy->chg_state = USB_CHG_STATE_UNDEFINED; rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN; + rphy->irq = platform_get_irq_optional(pdev, 0); platform_set_drvdata(pdev, rphy); ret = rockchip_usb2phy_extcon_register(rphy); @@ -1180,6 +1252,20 @@ next_child: } provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + if (rphy->irq > 0) { + ret = devm_request_threaded_irq(rphy->dev, rphy->irq, NULL, + rockchip_usb2phy_irq, + IRQF_ONESHOT, + "rockchip_usb2phy", + rphy); + if (ret) { + dev_err(rphy->dev, + "failed to request usb2phy irq handle\n"); + goto put_child; + } + } + return PTR_ERR_OR_ZERO(provider); put_child: @@ -1418,6 +1504,69 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = { { /* sentinel */ } }; +static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = { + { + .reg = 0xfe8a0000, + .num_ports = 2, + .clkout_ctl = { 0x0008, 4, 4, 1, 0 }, + .port_cfgs = { + [USB2PHY_PORT_OTG] = { + .phy_sus = { 0x0000, 8, 0, 0, 0x1d1 }, + .bvalid_det_en = { 0x0080, 2, 2, 0, 1 }, + .bvalid_det_st = { 0x0084, 2, 2, 0, 1 }, + .bvalid_det_clr = { 0x0088, 2, 2, 0, 1 }, + .utmi_avalid = { 0x00c0, 10, 10, 0, 1 }, + .utmi_bvalid = { 0x00c0, 9, 9, 0, 1 }, + }, + [USB2PHY_PORT_HOST] = { + /* Select suspend control from controller */ + .phy_sus = { 0x0004, 8, 0, 0x1d2, 0x1d2 }, + .ls_det_en = { 0x0080, 1, 1, 0, 1 }, + .ls_det_st = { 0x0084, 1, 1, 0, 1 }, + .ls_det_clr = { 0x0088, 1, 1, 0, 1 }, + .utmi_ls = { 0x00c0, 17, 16, 0, 1 }, + .utmi_hstdet = { 0x00c0, 19, 19, 0, 1 } + } + }, + .chg_det = { + .opmode = { 0x0000, 3, 0, 5, 1 }, + .cp_det = { 0x00c0, 24, 24, 0, 1 }, + .dcp_det = { 0x00c0, 23, 23, 0, 1 }, + .dp_det = { 0x00c0, 25, 25, 0, 1 }, + .idm_sink_en = { 0x0008, 8, 8, 0, 1 }, + .idp_sink_en = { 0x0008, 7, 7, 0, 1 }, + .idp_src_en = { 0x0008, 9, 9, 0, 1 }, + .rdm_pdwn_en = { 0x0008, 10, 10, 0, 1 }, + .vdm_src_en = { 0x0008, 12, 12, 0, 1 }, + .vdp_src_en = { 0x0008, 11, 11, 0, 1 }, + }, + }, + { + .reg = 0xfe8b0000, + .num_ports = 2, + .clkout_ctl = { 0x0008, 4, 4, 1, 0 }, + .port_cfgs = { + [USB2PHY_PORT_OTG] = { + .phy_sus = { 0x0000, 8, 0, 0x1d2, 0x1d1 }, + .ls_det_en = { 0x0080, 0, 0, 0, 1 }, + .ls_det_st = { 0x0084, 0, 0, 0, 1 }, + .ls_det_clr = { 0x0088, 0, 0, 0, 1 }, + .utmi_ls = { 0x00c0, 5, 4, 0, 1 }, + .utmi_hstdet = { 0x00c0, 7, 7, 0, 1 } + }, + [USB2PHY_PORT_HOST] = { + .phy_sus = { 0x0004, 8, 0, 0x1d2, 0x1d1 }, + .ls_det_en = { 0x0080, 1, 1, 0, 1 }, + .ls_det_st = { 0x0084, 1, 1, 0, 1 }, + .ls_det_clr = { 0x0088, 1, 1, 0, 1 }, + .utmi_ls = { 0x00c0, 17, 16, 0, 1 }, + .utmi_hstdet = { 0x00c0, 19, 19, 0, 1 } + } + }, + }, + { /* sentinel */ } +}; + static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = { { .reg = 0x100, @@ -1467,6 +1616,7 @@ static const struct of_device_id rockchip_usb2phy_dt_match[] = { { .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs }, { .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs }, { .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs }, + { .compatible = "rockchip,rk3568-usb2phy", .data = &rk3568_phy_cfgs }, { .compatible = "rockchip,rv1108-usb2phy", .data = &rv1108_phy_cfgs }, {} }; diff --git a/drivers/phy/socionext/Kconfig b/drivers/phy/socionext/Kconfig index a3970e0f89da..8ae644756352 100644 --- a/drivers/phy/socionext/Kconfig +++ b/drivers/phy/socionext/Kconfig @@ -43,4 +43,4 @@ config PHY_UNIPHIER_AHCI select GENERIC_PHY help Enable this to support PHY implemented in AHCI controller - on UniPhier SoCs. This driver supports PXs2 and PXs3 SoCs. + on UniPhier SoCs. This driver supports Pro4, PXs2 and PXs3 SoCs. diff --git a/drivers/phy/socionext/phy-uniphier-ahci.c b/drivers/phy/socionext/phy-uniphier-ahci.c index 7427c40bf4ae..28cf3efe0695 100644 --- a/drivers/phy/socionext/phy-uniphier-ahci.c +++ b/drivers/phy/socionext/phy-uniphier-ahci.c @@ -19,8 +19,9 @@ struct uniphier_ahciphy_priv { struct device *dev; void __iomem *base; - struct clk *clk, *clk_parent; - struct reset_control *rst, *rst_parent; + struct clk *clk, *clk_parent, *clk_parent_gio; + struct reset_control *rst, *rst_parent, *rst_parent_gio; + struct reset_control *rst_pm, *rst_tx, *rst_rx; const struct uniphier_ahciphy_soc_data *data; }; @@ -28,10 +29,30 @@ struct uniphier_ahciphy_soc_data { int (*init)(struct uniphier_ahciphy_priv *priv); int (*power_on)(struct uniphier_ahciphy_priv *priv); int (*power_off)(struct uniphier_ahciphy_priv *priv); + bool is_legacy; bool is_ready_high; bool is_phy_clk; }; +/* for Pro4 */ +#define CKCTRL0 0x0 +#define CKCTRL0_CK_OFF BIT(9) +#define CKCTRL0_NCY_MASK GENMASK(8, 4) +#define CKCTRL0_NCY5_MASK GENMASK(3, 2) +#define CKCTRL0_PRESCALE_MASK GENMASK(1, 0) +#define CKCTRL1 0x4 +#define CKCTRL1_LOS_LVL_MASK GENMASK(20, 16) +#define CKCTRL1_TX_LVL_MASK GENMASK(12, 8) +#define RXTXCTRL 0x8 +#define RXTXCTRL_RX_EQ_VALL_MASK GENMASK(31, 29) +#define RXTXCTRL_RX_DPLL_MODE_MASK GENMASK(28, 26) +#define RXTXCTRL_TX_ATTEN_MASK GENMASK(14, 12) +#define RXTXCTRL_TX_BOOST_MASK GENMASK(11, 8) +#define RXTXCTRL_TX_EDGERATE_MASK GENMASK(3, 2) +#define RXTXCTRL_TX_CKO_EN BIT(0) +#define RSTPWR 0x30 +#define RSTPWR_RX_EN_VAL BIT(18) + /* for PXs2/PXs3 */ #define CKCTRL 0x0 #define CKCTRL_P0_READY BIT(15) @@ -50,6 +71,128 @@ struct uniphier_ahciphy_soc_data { #define RXCTRL_LOS_BIAS_MASK GENMASK(10, 8) #define RXCTRL_RX_EQ_MASK GENMASK(2, 0) +static int uniphier_ahciphy_pro4_init(struct uniphier_ahciphy_priv *priv) +{ + u32 val; + + /* set phy MPLL parameters */ + val = readl(priv->base + CKCTRL0); + val &= ~CKCTRL0_NCY_MASK; + val |= FIELD_PREP(CKCTRL0_NCY_MASK, 0x6); + val &= ~CKCTRL0_NCY5_MASK; + val |= FIELD_PREP(CKCTRL0_NCY5_MASK, 0x2); + val &= ~CKCTRL0_PRESCALE_MASK; + val |= FIELD_PREP(CKCTRL0_PRESCALE_MASK, 0x1); + writel(val, priv->base + CKCTRL0); + + /* setup phy control parameters */ + val = readl(priv->base + CKCTRL1); + val &= ~CKCTRL1_LOS_LVL_MASK; + val |= FIELD_PREP(CKCTRL1_LOS_LVL_MASK, 0x10); + val &= ~CKCTRL1_TX_LVL_MASK; + val |= FIELD_PREP(CKCTRL1_TX_LVL_MASK, 0x06); + writel(val, priv->base + CKCTRL1); + + val = readl(priv->base + RXTXCTRL); + val &= ~RXTXCTRL_RX_EQ_VALL_MASK; + val |= FIELD_PREP(RXTXCTRL_RX_EQ_VALL_MASK, 0x6); + val &= ~RXTXCTRL_RX_DPLL_MODE_MASK; + val |= FIELD_PREP(RXTXCTRL_RX_DPLL_MODE_MASK, 0x3); + val &= ~RXTXCTRL_TX_ATTEN_MASK; + val |= FIELD_PREP(RXTXCTRL_TX_ATTEN_MASK, 0x3); + val &= ~RXTXCTRL_TX_BOOST_MASK; + val |= FIELD_PREP(RXTXCTRL_TX_BOOST_MASK, 0x5); + val &= ~RXTXCTRL_TX_EDGERATE_MASK; + val |= FIELD_PREP(RXTXCTRL_TX_EDGERATE_MASK, 0x0); + writel(val, priv->base + RXTXCTRL); + + return 0; +} + +static int uniphier_ahciphy_pro4_power_on(struct uniphier_ahciphy_priv *priv) +{ + u32 val; + int ret; + + /* enable reference clock for phy */ + val = readl(priv->base + CKCTRL0); + val &= ~CKCTRL0_CK_OFF; + writel(val, priv->base + CKCTRL0); + + /* enable TX clock */ + val = readl(priv->base + RXTXCTRL); + val |= RXTXCTRL_TX_CKO_EN; + writel(val, priv->base + RXTXCTRL); + + /* wait until RX is ready */ + ret = readl_poll_timeout(priv->base + RSTPWR, val, + !(val & RSTPWR_RX_EN_VAL), 200, 2000); + if (ret) { + dev_err(priv->dev, "Failed to check whether Rx is ready\n"); + goto out_disable_clock; + } + + /* release all reset */ + ret = reset_control_deassert(priv->rst_pm); + if (ret) { + dev_err(priv->dev, "Failed to release PM reset\n"); + goto out_disable_clock; + } + + ret = reset_control_deassert(priv->rst_tx); + if (ret) { + dev_err(priv->dev, "Failed to release Tx reset\n"); + goto out_reset_pm_assert; + } + + ret = reset_control_deassert(priv->rst_rx); + if (ret) { + dev_err(priv->dev, "Failed to release Rx reset\n"); + goto out_reset_tx_assert; + } + + return 0; + +out_reset_tx_assert: + reset_control_assert(priv->rst_tx); +out_reset_pm_assert: + reset_control_assert(priv->rst_pm); + +out_disable_clock: + /* disable TX clock */ + val = readl(priv->base + RXTXCTRL); + val &= ~RXTXCTRL_TX_CKO_EN; + writel(val, priv->base + RXTXCTRL); + + /* disable reference clock for phy */ + val = readl(priv->base + CKCTRL0); + val |= CKCTRL0_CK_OFF; + writel(val, priv->base + CKCTRL0); + + return ret; +} + +static int uniphier_ahciphy_pro4_power_off(struct uniphier_ahciphy_priv *priv) +{ + u32 val; + + reset_control_assert(priv->rst_rx); + reset_control_assert(priv->rst_tx); + reset_control_assert(priv->rst_pm); + + /* disable TX clock */ + val = readl(priv->base + RXTXCTRL); + val &= ~RXTXCTRL_TX_CKO_EN; + writel(val, priv->base + RXTXCTRL); + + /* disable reference clock for phy */ + val = readl(priv->base + CKCTRL0); + val |= CKCTRL0_CK_OFF; + writel(val, priv->base + CKCTRL0); + + return 0; +} + static void uniphier_ahciphy_pxs2_enable(struct uniphier_ahciphy_priv *priv, bool enable) { @@ -142,14 +285,22 @@ static int uniphier_ahciphy_init(struct phy *phy) struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy); int ret; - ret = clk_prepare_enable(priv->clk_parent); + ret = clk_prepare_enable(priv->clk_parent_gio); if (ret) return ret; - ret = reset_control_deassert(priv->rst_parent); + ret = clk_prepare_enable(priv->clk_parent); + if (ret) + goto out_clk_gio_disable; + + ret = reset_control_deassert(priv->rst_parent_gio); if (ret) goto out_clk_disable; + ret = reset_control_deassert(priv->rst_parent); + if (ret) + goto out_rst_gio_assert; + if (priv->data->init) { ret = priv->data->init(priv); if (ret) @@ -160,8 +311,12 @@ static int uniphier_ahciphy_init(struct phy *phy) out_rst_assert: reset_control_assert(priv->rst_parent); +out_rst_gio_assert: + reset_control_assert(priv->rst_parent_gio); out_clk_disable: clk_disable_unprepare(priv->clk_parent); +out_clk_gio_disable: + clk_disable_unprepare(priv->clk_parent_gio); return ret; } @@ -171,7 +326,9 @@ static int uniphier_ahciphy_exit(struct phy *phy) struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy); reset_control_assert(priv->rst_parent); + reset_control_assert(priv->rst_parent_gio); clk_disable_unprepare(priv->clk_parent); + clk_disable_unprepare(priv->clk_parent_gio); return 0; } @@ -265,6 +422,28 @@ static int uniphier_ahciphy_probe(struct platform_device *pdev) if (IS_ERR(priv->rst)) return PTR_ERR(priv->rst); + if (priv->data->is_legacy) { + priv->clk_parent_gio = devm_clk_get(dev, "gio"); + if (IS_ERR(priv->clk_parent_gio)) + return PTR_ERR(priv->clk_parent_gio); + priv->rst_parent_gio = + devm_reset_control_get_shared(dev, "gio"); + if (IS_ERR(priv->rst_parent_gio)) + return PTR_ERR(priv->rst_parent_gio); + + priv->rst_pm = devm_reset_control_get_shared(dev, "pm"); + if (IS_ERR(priv->rst_pm)) + return PTR_ERR(priv->rst_pm); + + priv->rst_tx = devm_reset_control_get_shared(dev, "tx"); + if (IS_ERR(priv->rst_tx)) + return PTR_ERR(priv->rst_tx); + + priv->rst_rx = devm_reset_control_get_shared(dev, "rx"); + if (IS_ERR(priv->rst_rx)) + return PTR_ERR(priv->rst_rx); + } + phy = devm_phy_create(dev, dev->of_node, &uniphier_ahciphy_ops); if (IS_ERR(phy)) { dev_err(dev, "failed to create phy\n"); @@ -279,9 +458,18 @@ static int uniphier_ahciphy_probe(struct platform_device *pdev) return 0; } +static const struct uniphier_ahciphy_soc_data uniphier_pro4_data = { + .init = uniphier_ahciphy_pro4_init, + .power_on = uniphier_ahciphy_pro4_power_on, + .power_off = uniphier_ahciphy_pro4_power_off, + .is_legacy = true, + .is_phy_clk = false, +}; + static const struct uniphier_ahciphy_soc_data uniphier_pxs2_data = { .power_on = uniphier_ahciphy_pxs2_power_on, .power_off = uniphier_ahciphy_pxs2_power_off, + .is_legacy = false, .is_ready_high = false, .is_phy_clk = false, }; @@ -290,12 +478,17 @@ static const struct uniphier_ahciphy_soc_data uniphier_pxs3_data = { .init = uniphier_ahciphy_pxs3_init, .power_on = uniphier_ahciphy_pxs2_power_on, .power_off = uniphier_ahciphy_pxs2_power_off, + .is_legacy = false, .is_ready_high = true, .is_phy_clk = true, }; static const struct of_device_id uniphier_ahciphy_match[] = { { + .compatible = "socionext,uniphier-pro4-ahci-phy", + .data = &uniphier_pro4_data, + }, + { .compatible = "socionext,uniphier-pxs2-ahci-phy", .data = &uniphier_pxs2_data, }, diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c index 6bdbd1f214dd..ebca296ef123 100644 --- a/drivers/phy/socionext/phy-uniphier-pcie.c +++ b/drivers/phy/socionext/phy-uniphier-pcie.c @@ -27,6 +27,7 @@ #define TESTI_DAT_MASK GENMASK(13, 6) #define TESTI_ADR_MASK GENMASK(5, 1) #define TESTI_WR_EN BIT(0) +#define TESTIO_PHY_SHIFT 16 #define PCL_PHY_TEST_O 0x2004 #define TESTO_DAT_MASK GENMASK(7, 0) @@ -39,6 +40,10 @@ #define SG_USBPCIESEL 0x590 #define SG_USBPCIESEL_PCIE BIT(0) +/* SC */ +#define SC_US3SRCSEL 0x2244 +#define SC_US3SRCSEL_2LANE GENMASK(9, 8) + #define PCL_PHY_R00 0 #define RX_EQ_ADJ_EN BIT(3) /* enable for EQ adjustment */ #define PCL_PHY_R06 6 @@ -47,6 +52,9 @@ #define PCL_PHY_R26 26 #define VCO_CTRL GENMASK(7, 4) /* Tx VCO adjustment value */ #define VCO_CTRL_INIT_VAL 5 +#define PCL_PHY_R28 28 +#define VCOPLL_CLMP GENMASK(3, 2) /* Tx VCOPLL clamp mode */ +#define VCOPLL_CLMP_VAL 0 struct uniphier_pciephy_priv { void __iomem *base; @@ -58,43 +66,57 @@ struct uniphier_pciephy_priv { struct uniphier_pciephy_soc_data { bool is_legacy; + bool is_dual_phy; void (*set_phymode)(struct regmap *regmap); }; static void uniphier_pciephy_testio_write(struct uniphier_pciephy_priv *priv, - u32 data) + int id, u32 data) { + if (id) + data <<= TESTIO_PHY_SHIFT; + /* need to read TESTO twice after accessing TESTI */ writel(data, priv->base + PCL_PHY_TEST_I); readl(priv->base + PCL_PHY_TEST_O); readl(priv->base + PCL_PHY_TEST_O); } +static u32 uniphier_pciephy_testio_read(struct uniphier_pciephy_priv *priv, int id) +{ + u32 val = readl(priv->base + PCL_PHY_TEST_O); + + if (id) + val >>= TESTIO_PHY_SHIFT; + + return val & TESTO_DAT_MASK; +} + static void uniphier_pciephy_set_param(struct uniphier_pciephy_priv *priv, - u32 reg, u32 mask, u32 param) + int id, u32 reg, u32 mask, u32 param) { u32 val; /* read previous data */ val = FIELD_PREP(TESTI_DAT_MASK, 1); val |= FIELD_PREP(TESTI_ADR_MASK, reg); - uniphier_pciephy_testio_write(priv, val); - val = readl(priv->base + PCL_PHY_TEST_O) & TESTO_DAT_MASK; + uniphier_pciephy_testio_write(priv, id, val); + val = uniphier_pciephy_testio_read(priv, id); /* update value */ val &= ~mask; val |= mask & param; val = FIELD_PREP(TESTI_DAT_MASK, val); val |= FIELD_PREP(TESTI_ADR_MASK, reg); - uniphier_pciephy_testio_write(priv, val); - uniphier_pciephy_testio_write(priv, val | TESTI_WR_EN); - uniphier_pciephy_testio_write(priv, val); + uniphier_pciephy_testio_write(priv, id, val); + uniphier_pciephy_testio_write(priv, id, val | TESTI_WR_EN); + uniphier_pciephy_testio_write(priv, id, val); /* read current data as dummy */ val = FIELD_PREP(TESTI_DAT_MASK, 1); val |= FIELD_PREP(TESTI_ADR_MASK, reg); - uniphier_pciephy_testio_write(priv, val); - readl(priv->base + PCL_PHY_TEST_O); + uniphier_pciephy_testio_write(priv, id, val); + uniphier_pciephy_testio_read(priv, id); } static void uniphier_pciephy_assert(struct uniphier_pciephy_priv *priv) @@ -120,7 +142,7 @@ static int uniphier_pciephy_init(struct phy *phy) { struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy); u32 val; - int ret; + int ret, id; ret = clk_prepare_enable(priv->clk); if (ret) @@ -148,12 +170,16 @@ static int uniphier_pciephy_init(struct phy *phy) if (priv->data->is_legacy) return 0; - uniphier_pciephy_set_param(priv, PCL_PHY_R00, + for (id = 0; id < (priv->data->is_dual_phy ? 2 : 1); id++) { + uniphier_pciephy_set_param(priv, id, PCL_PHY_R00, RX_EQ_ADJ_EN, RX_EQ_ADJ_EN); - uniphier_pciephy_set_param(priv, PCL_PHY_R06, RX_EQ_ADJ, + uniphier_pciephy_set_param(priv, id, PCL_PHY_R06, RX_EQ_ADJ, FIELD_PREP(RX_EQ_ADJ, RX_EQ_ADJ_VAL)); - uniphier_pciephy_set_param(priv, PCL_PHY_R26, VCO_CTRL, + uniphier_pciephy_set_param(priv, id, PCL_PHY_R26, VCO_CTRL, FIELD_PREP(VCO_CTRL, VCO_CTRL_INIT_VAL)); + uniphier_pciephy_set_param(priv, id, PCL_PHY_R28, VCOPLL_CLMP, + FIELD_PREP(VCOPLL_CLMP, VCOPLL_CLMP_VAL)); + } usleep_range(1, 10); uniphier_pciephy_deassert(priv); @@ -261,17 +287,31 @@ static void uniphier_pciephy_ld20_setmode(struct regmap *regmap) SG_USBPCIESEL_PCIE, SG_USBPCIESEL_PCIE); } +static void uniphier_pciephy_nx1_setmode(struct regmap *regmap) +{ + regmap_update_bits(regmap, SC_US3SRCSEL, + SC_US3SRCSEL_2LANE, SC_US3SRCSEL_2LANE); +} + static const struct uniphier_pciephy_soc_data uniphier_pro5_data = { .is_legacy = true, }; static const struct uniphier_pciephy_soc_data uniphier_ld20_data = { .is_legacy = false, + .is_dual_phy = false, .set_phymode = uniphier_pciephy_ld20_setmode, }; static const struct uniphier_pciephy_soc_data uniphier_pxs3_data = { .is_legacy = false, + .is_dual_phy = false, +}; + +static const struct uniphier_pciephy_soc_data uniphier_nx1_data = { + .is_legacy = false, + .is_dual_phy = true, + .set_phymode = uniphier_pciephy_nx1_setmode, }; static const struct of_device_id uniphier_pciephy_match[] = { @@ -287,6 +327,10 @@ static const struct of_device_id uniphier_pciephy_match[] = { .compatible = "socionext,uniphier-pxs3-pcie-phy", .data = &uniphier_pxs3_data, }, + { + .compatible = "socionext,uniphier-nx1-pcie-phy", + .data = &uniphier_nx1_data, + }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, uniphier_pciephy_match); diff --git a/drivers/phy/socionext/phy-uniphier-usb3hs.c b/drivers/phy/socionext/phy-uniphier-usb3hs.c index a9bc74121f38..8c8673df0084 100644 --- a/drivers/phy/socionext/phy-uniphier-usb3hs.c +++ b/drivers/phy/socionext/phy-uniphier-usb3hs.c @@ -447,6 +447,10 @@ static const struct of_device_id uniphier_u3hsphy_match[] = { .compatible = "socionext,uniphier-pxs3-usb3-hsphy", .data = &uniphier_pxs3_data, }, + { + .compatible = "socionext,uniphier-nx1-usb3-hsphy", + .data = &uniphier_pxs3_data, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, uniphier_u3hsphy_match); diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c index 6700645bcbe6..f402ed8732fd 100644 --- a/drivers/phy/socionext/phy-uniphier-usb3ss.c +++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c @@ -22,11 +22,13 @@ #include <linux/reset.h> #define SSPHY_TESTI 0x0 -#define SSPHY_TESTO 0x4 #define TESTI_DAT_MASK GENMASK(13, 6) #define TESTI_ADR_MASK GENMASK(5, 1) #define TESTI_WR_EN BIT(0) +#define SSPHY_TESTO 0x4 +#define TESTO_DAT_MASK GENMASK(7, 0) + #define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) } #define CDR_CPD_TRIM PHY_F(7, 3, 0) /* RxPLL charge pump current */ @@ -84,12 +86,12 @@ static void uniphier_u3ssphy_set_param(struct uniphier_u3ssphy_priv *priv, val = FIELD_PREP(TESTI_DAT_MASK, 1); val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no); uniphier_u3ssphy_testio_write(priv, val); - val = readl(priv->base + SSPHY_TESTO); + val = readl(priv->base + SSPHY_TESTO) & TESTO_DAT_MASK; /* update value */ - val &= ~FIELD_PREP(TESTI_DAT_MASK, field_mask); + val &= ~field_mask; data = field_mask & (p->value << p->field.lsb); - val = FIELD_PREP(TESTI_DAT_MASK, data); + val = FIELD_PREP(TESTI_DAT_MASK, data | val); val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no); uniphier_u3ssphy_testio_write(priv, val); uniphier_u3ssphy_testio_write(priv, val | TESTI_WR_EN); @@ -328,6 +330,10 @@ static const struct of_device_id uniphier_u3ssphy_match[] = { .compatible = "socionext,uniphier-pxs3-usb3-ssphy", .data = &uniphier_ld20_data, }, + { + .compatible = "socionext,uniphier-nx1-usb3-ssphy", + .data = &uniphier_ld20_data, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, uniphier_u3ssphy_match); diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index e4f4a9be5132..2ce9bfd783d4 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -672,17 +672,15 @@ static int stm32_usbphyc_probe(struct platform_device *pdev) usbphyc->vdda1v1 = devm_regulator_get(dev, "vdda1v1"); if (IS_ERR(usbphyc->vdda1v1)) { - ret = PTR_ERR(usbphyc->vdda1v1); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get vdda1v1 supply: %d\n", ret); + ret = dev_err_probe(dev, PTR_ERR(usbphyc->vdda1v1), + "failed to get vdda1v1 supply\n"); goto clk_disable; } usbphyc->vdda1v8 = devm_regulator_get(dev, "vdda1v8"); if (IS_ERR(usbphyc->vdda1v8)) { - ret = PTR_ERR(usbphyc->vdda1v8); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get vdda1v8 supply: %d\n", ret); + ret = dev_err_probe(dev, PTR_ERR(usbphyc->vdda1v8), + "failed to get vdda1v8 supply\n"); goto clk_disable; } diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 963de5913e50..aa5237eacd29 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -455,7 +455,7 @@ tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type, name = kasprintf(GFP_KERNEL, "%s-%u", type, index); if (!name) { of_node_put(ports); - return ERR_PTR(-ENOMEM); + return NULL; } np = of_get_child_by_name(ports, name); kfree(name); diff --git a/drivers/phy/ti/phy-omap-control.c b/drivers/phy/ti/phy-omap-control.c index 47482f106fab..76c5595f0859 100644 --- a/drivers/phy/ti/phy-omap-control.c +++ b/drivers/phy/ti/phy-omap-control.c @@ -26,7 +26,7 @@ void omap_control_pcie_pcs(struct device *dev, u8 delay) u32 val; struct omap_control_phy *control_phy; - if (IS_ERR(dev) || !dev) { + if (IS_ERR_OR_NULL(dev)) { pr_err("%s: invalid device\n", __func__); return; } @@ -61,7 +61,7 @@ void omap_control_phy_power(struct device *dev, int on) unsigned long rate; struct omap_control_phy *control_phy; - if (IS_ERR(dev) || !dev) { + if (IS_ERR_OR_NULL(dev)) { pr_err("%s: invalid device\n", __func__); return; } @@ -202,7 +202,7 @@ void omap_control_usb_set_mode(struct device *dev, { struct omap_control_phy *ctrl_phy; - if (IS_ERR(dev) || !dev) + if (IS_ERR_OR_NULL(dev)) return; ctrl_phy = dev_get_drvdata(dev); diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig index 8ac149173c64..d421e1482395 100644 --- a/drivers/platform/mips/Kconfig +++ b/drivers/platform/mips/Kconfig @@ -30,4 +30,10 @@ config RS780E_ACPI help Loongson RS780E PCH ACPI Controller driver. +config LS2K_RESET + bool "Loongson-2K1000 Reset Controller" + depends on MACH_LOONGSON64 || COMPILE_TEST + help + Loongson-2K1000 Reset Controller driver. + endif # MIPS_PLATFORM_DEVICES diff --git a/drivers/platform/mips/Makefile b/drivers/platform/mips/Makefile index 178149098777..4c71444e453a 100644 --- a/drivers/platform/mips/Makefile +++ b/drivers/platform/mips/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_CPU_HWMON) += cpu_hwmon.o obj-$(CONFIG_RS780E_ACPI) += rs780e-acpi.o +obj-$(CONFIG_LS2K_RESET) += ls2k-reset.o diff --git a/drivers/platform/mips/ls2k-reset.c b/drivers/platform/mips/ls2k-reset.c new file mode 100644 index 000000000000..8f42d5d16480 --- /dev/null +++ b/drivers/platform/mips/ls2k-reset.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021, Qing Zhang <zhangqing@loongson.cn> + * Loongson-2K1000 reset support + */ + +#include <linux/of_address.h> +#include <linux/pm.h> +#include <asm/reboot.h> + +#define PM1_STS 0x0c /* Power Management 1 Status Register */ +#define PM1_CNT 0x14 /* Power Management 1 Control Register */ +#define RST_CNT 0x30 /* Reset Control Register */ + +static void __iomem *base; + +static void ls2k_restart(char *command) +{ + writel(0x1, base + RST_CNT); +} + +static void ls2k_poweroff(void) +{ + /* Clear */ + writel((readl(base + PM1_STS) & 0xffffffff), base + PM1_STS); + /* Sleep Enable | Soft Off*/ + writel(GENMASK(12, 10) | BIT(13), base + PM1_CNT); +} + +static int ls2k_reset_init(void) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "loongson,ls2k-pm"); + if (!np) { + pr_info("Failed to get PM node\n"); + return -ENODEV; + } + + base = of_iomap(np, 0); + of_node_put(np); + if (!base) { + pr_info("Failed to map PM register base address\n"); + return -ENOMEM; + } + + _machine_restart = ls2k_restart; + pm_power_off = ls2k_poweroff; + + return 0; +} + +arch_initcall(ls2k_reset_init); diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c index 669ef4700c1a..f7e86ae9f72f 100644 --- a/drivers/pnp/pnpbios/core.c +++ b/drivers/pnp/pnpbios/core.c @@ -160,7 +160,7 @@ static int pnp_dock_thread(void *unused) * No dock to manage */ case PNP_FUNCTION_NOT_SUPPORTED: - complete_and_exit(&unload_sem, 0); + kthread_complete_and_exit(&unload_sem, 0); case PNP_SYSTEM_NOT_DOCKED: d = 0; break; @@ -170,7 +170,7 @@ static int pnp_dock_thread(void *unused) default: pnpbios_print_status("pnp_dock_thread", status); printk(KERN_WARNING "PnPBIOS: disabling dock monitoring.\n"); - complete_and_exit(&unload_sem, 0); + kthread_complete_and_exit(&unload_sem, 0); } if (d != docked) { if (pnp_dock_event(d, &now) == 0) { @@ -183,7 +183,7 @@ static int pnp_dock_thread(void *unused) } } } - complete_and_exit(&unload_sem, 0); + kthread_complete_and_exit(&unload_sem, 0); } static int pnpbios_get_resources(struct pnp_dev *dev) diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 93772ab8d7e3..c7552df32082 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -548,6 +548,73 @@ static void pwm_apply_state_debug(struct pwm_device *pwm, } } +static int pwm_apply_legacy(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + struct pwm_state initial_state = pwm->state; + + if (state->polarity != pwm->state.polarity) { + if (!chip->ops->set_polarity) + return -EINVAL; + + /* + * Changing the polarity of a running PWM is only allowed when + * the PWM driver implements ->apply(). + */ + if (pwm->state.enabled) { + chip->ops->disable(chip, pwm); + + /* + * Update pwm->state already here in case + * .set_polarity() or another callback depend on that. + */ + pwm->state.enabled = false; + } + + err = chip->ops->set_polarity(chip, pwm, state->polarity); + if (err) + goto rollback; + + pwm->state.polarity = state->polarity; + } + + if (!state->enabled) { + if (pwm->state.enabled) + chip->ops->disable(chip, pwm); + + return 0; + } + + /* + * We cannot skip calling ->config even if state->period == + * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle + * because we might have exited early in the last call to + * pwm_apply_state because of !state->enabled and so the two values in + * pwm->state might not be configured in hardware. + */ + err = chip->ops->config(pwm->chip, pwm, + state->duty_cycle, + state->period); + if (err) + goto rollback; + + pwm->state.period = state->period; + pwm->state.duty_cycle = state->duty_cycle; + + if (!pwm->state.enabled) { + err = chip->ops->enable(chip, pwm); + if (err) + goto rollback; + } + + return 0; + +rollback: + pwm->state = initial_state; + return err; +} + /** * pwm_apply_state() - atomically apply a new state to a PWM device * @pwm: PWM device @@ -580,70 +647,22 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state) state->usage_power == pwm->state.usage_power) return 0; - if (chip->ops->apply) { + if (chip->ops->apply) err = chip->ops->apply(chip, pwm, state); - if (err) - return err; - - trace_pwm_apply(pwm, state); - - pwm->state = *state; - - /* - * only do this after pwm->state was applied as some - * implementations of .get_state depend on this - */ - pwm_apply_state_debug(pwm, state); - } else { - /* - * FIXME: restore the initial state in case of error. - */ - if (state->polarity != pwm->state.polarity) { - if (!chip->ops->set_polarity) - return -EINVAL; + else + err = pwm_apply_legacy(chip, pwm, state); + if (err) + return err; - /* - * Changing the polarity of a running PWM is - * only allowed when the PWM driver implements - * ->apply(). - */ - if (pwm->state.enabled) { - chip->ops->disable(chip, pwm); - pwm->state.enabled = false; - } + trace_pwm_apply(pwm, state); - err = chip->ops->set_polarity(chip, pwm, - state->polarity); - if (err) - return err; + pwm->state = *state; - pwm->state.polarity = state->polarity; - } - - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - err = chip->ops->config(pwm->chip, pwm, - state->duty_cycle, - state->period); - if (err) - return err; - - pwm->state.duty_cycle = state->duty_cycle; - pwm->state.period = state->period; - } - - if (state->enabled != pwm->state.enabled) { - if (state->enabled) { - err = chip->ops->enable(chip, pwm); - if (err) - return err; - } else { - chip->ops->disable(chip, pwm); - } - - pwm->state.enabled = state->enabled; - } - } + /* + * only do this after pwm->state was applied as some + * implementations of .get_state depend on this + */ + pwm_apply_state_debug(pwm, state); return 0; } diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c index f97f82548293..5996049f66ec 100644 --- a/drivers/pwm/pwm-img.c +++ b/drivers/pwm/pwm-img.c @@ -128,11 +128,9 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, duty = DIV_ROUND_UP(timebase * duty_ns, period_ns); - ret = pm_runtime_get_sync(chip->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(chip->dev); + ret = pm_runtime_resume_and_get(chip->dev); + if (ret < 0) return ret; - } val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm)); @@ -184,10 +182,33 @@ static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) pm_runtime_put_autosuspend(chip->dev); } +static int img_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) { + if (pwm->state.enabled) + img_pwm_disable(chip, pwm); + + return 0; + } + + err = img_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!pwm->state.enabled) + err = img_pwm_enable(chip, pwm); + + return err; +} + static const struct pwm_ops img_pwm_ops = { - .config = img_pwm_config, - .enable = img_pwm_enable, - .disable = img_pwm_disable, + .apply = img_pwm_apply, .owner = THIS_MODULE, }; diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c index 203194f2c92e..86567add79db 100644 --- a/drivers/pwm/pwm-twl.c +++ b/drivers/pwm/pwm-twl.c @@ -58,9 +58,9 @@ static inline struct twl_pwm_chip *to_twl(struct pwm_chip *chip) } static int twl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) + u64 duty_ns, u64 period_ns) { - int duty_cycle = DIV_ROUND_UP(duty_ns * TWL_PWM_MAX, period_ns) + 1; + int duty_cycle = DIV64_U64_ROUND_UP(duty_ns * TWL_PWM_MAX, period_ns) + 1; u8 pwm_config[2] = { 1, 0 }; int base, ret; @@ -279,19 +279,65 @@ out: mutex_unlock(&twl->mutex); } +static int twl4030_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) { + if (pwm->state.enabled) + twl4030_pwm_disable(chip, pwm); + + return 0; + } + + err = twl_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!pwm->state.enabled) + err = twl4030_pwm_enable(chip, pwm); + + return err; +} + +static int twl6030_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) { + if (pwm->state.enabled) + twl6030_pwm_disable(chip, pwm); + + return 0; + } + + err = twl_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!pwm->state.enabled) + err = twl6030_pwm_enable(chip, pwm); + + return err; +} + static const struct pwm_ops twl4030_pwm_ops = { - .config = twl_pwm_config, - .enable = twl4030_pwm_enable, - .disable = twl4030_pwm_disable, + .apply = twl4030_pwm_apply, .request = twl4030_pwm_request, .free = twl4030_pwm_free, .owner = THIS_MODULE, }; static const struct pwm_ops twl6030_pwm_ops = { - .config = twl_pwm_config, - .enable = twl6030_pwm_enable, - .disable = twl6030_pwm_disable, + .apply = twl6030_pwm_apply, .owner = THIS_MODULE, }; diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c index 480bfc29782f..7170a315535b 100644 --- a/drivers/pwm/pwm-vt8500.c +++ b/drivers/pwm/pwm-vt8500.c @@ -70,7 +70,7 @@ static inline void vt8500_pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 b } static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) + u64 duty_ns, u64 period_ns) { struct vt8500_chip *vt8500 = to_vt8500_chip(chip); unsigned long long c; @@ -102,8 +102,8 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, } c = (unsigned long long)pv * duty_ns; - do_div(c, period_ns); - dc = c; + + dc = div64_u64(c, period_ns); writel(prescale, vt8500->base + REG_SCALAR(pwm->hwpwm)); vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_SCALAR_UPDATE); @@ -176,11 +176,54 @@ static int vt8500_pwm_set_polarity(struct pwm_chip *chip, return 0; } +static int vt8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + bool enabled = pwm->state.enabled; + + if (state->polarity != pwm->state.polarity) { + /* + * Changing the polarity of a running PWM is only allowed when + * the PWM driver implements ->apply(). + */ + if (enabled) { + vt8500_pwm_disable(chip, pwm); + + enabled = false; + } + + err = vt8500_pwm_set_polarity(chip, pwm, state->polarity); + if (err) + return err; + } + + if (!state->enabled) { + if (enabled) + vt8500_pwm_disable(chip, pwm); + + return 0; + } + + /* + * We cannot skip calling ->config even if state->period == + * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle + * because we might have exited early in the last call to + * pwm_apply_state because of !state->enabled and so the two values in + * pwm->state might not be configured in hardware. + */ + err = vt8500_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!enabled) + err = vt8500_pwm_enable(chip, pwm); + + return err; +} + static const struct pwm_ops vt8500_pwm_ops = { - .enable = vt8500_pwm_enable, - .disable = vt8500_pwm_disable, - .config = vt8500_pwm_config, - .set_polarity = vt8500_pwm_set_polarity, + .apply = vt8500_pwm_apply, .owner = THIS_MODULE, }; diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig index 3e18f9c51e29..02771ba3e54f 100644 --- a/drivers/rapidio/switches/Kconfig +++ b/drivers/rapidio/switches/Kconfig @@ -2,22 +2,11 @@ # # RapidIO switches configuration # -config RAPIDIO_TSI57X - tristate "IDT Tsi57x SRIO switches support" - help - Includes support for IDT Tsi57x family of serial RapidIO switches. - config RAPIDIO_CPS_XX tristate "IDT CPS-xx SRIO switches support" help Includes support for IDT CPS-16/12/10/8 serial RapidIO switches. -config RAPIDIO_TSI568 - tristate "Tsi568 SRIO switch support" - default n - help - Includes support for IDT Tsi568 serial RapidIO switch. - config RAPIDIO_CPS_GEN2 tristate "IDT CPS Gen.2 SRIO switch support" default n diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile index 69e7de31e41c..ef1749a79c2b 100644 --- a/drivers/rapidio/switches/Makefile +++ b/drivers/rapidio/switches/Makefile @@ -3,8 +3,6 @@ # Makefile for RIO switches # -obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o -obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o obj-$(CONFIG_RAPIDIO_RXS_GEN3) += idt_gen3.o diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c deleted file mode 100644 index 103b48a24980..000000000000 --- a/drivers/rapidio/switches/tsi568.c +++ /dev/null @@ -1,195 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * RapidIO Tsi568 switch support - * - * Copyright 2009-2010 Integrated Device Technology, Inc. - * Alexandre Bounine <alexandre.bounine@idt.com> - * - Added EM support - * - Modified switch operations initialization. - * - * Copyright 2005 MontaVista Software, Inc. - * Matt Porter <mporter@kernel.crashing.org> - */ - -#include <linux/rio.h> -#include <linux/rio_drv.h> -#include <linux/rio_ids.h> -#include <linux/delay.h> -#include <linux/module.h> -#include "../rio.h" - -/* Global (broadcast) route registers */ -#define SPBC_ROUTE_CFG_DESTID 0x10070 -#define SPBC_ROUTE_CFG_PORT 0x10074 - -/* Per port route registers */ -#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) -#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) - -#define TSI568_SP_MODE(n) (0x11004 + 0x100*n) -#define TSI568_SP_MODE_PW_DIS 0x08000000 - -static int -tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, - u16 table, u16 route_destid, u8 route_port) -{ - if (table == RIO_GLOBAL_TABLE) { - rio_mport_write_config_32(mport, destid, hopcount, - SPBC_ROUTE_CFG_DESTID, route_destid); - rio_mport_write_config_32(mport, destid, hopcount, - SPBC_ROUTE_CFG_PORT, route_port); - } else { - rio_mport_write_config_32(mport, destid, hopcount, - SPP_ROUTE_CFG_DESTID(table), - route_destid); - rio_mport_write_config_32(mport, destid, hopcount, - SPP_ROUTE_CFG_PORT(table), route_port); - } - - udelay(10); - - return 0; -} - -static int -tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, - u16 table, u16 route_destid, u8 *route_port) -{ - int ret = 0; - u32 result; - - if (table == RIO_GLOBAL_TABLE) { - rio_mport_write_config_32(mport, destid, hopcount, - SPBC_ROUTE_CFG_DESTID, route_destid); - rio_mport_read_config_32(mport, destid, hopcount, - SPBC_ROUTE_CFG_PORT, &result); - } else { - rio_mport_write_config_32(mport, destid, hopcount, - SPP_ROUTE_CFG_DESTID(table), - route_destid); - rio_mport_read_config_32(mport, destid, hopcount, - SPP_ROUTE_CFG_PORT(table), &result); - } - - *route_port = result; - if (*route_port > 15) - ret = -1; - - return ret; -} - -static int -tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, - u16 table) -{ - u32 route_idx; - u32 lut_size; - - lut_size = (mport->sys_size) ? 0x1ff : 0xff; - - if (table == RIO_GLOBAL_TABLE) { - rio_mport_write_config_32(mport, destid, hopcount, - SPBC_ROUTE_CFG_DESTID, 0x80000000); - for (route_idx = 0; route_idx <= lut_size; route_idx++) - rio_mport_write_config_32(mport, destid, hopcount, - SPBC_ROUTE_CFG_PORT, - RIO_INVALID_ROUTE); - } else { - rio_mport_write_config_32(mport, destid, hopcount, - SPP_ROUTE_CFG_DESTID(table), - 0x80000000); - for (route_idx = 0; route_idx <= lut_size; route_idx++) - rio_mport_write_config_32(mport, destid, hopcount, - SPP_ROUTE_CFG_PORT(table), - RIO_INVALID_ROUTE); - } - - return 0; -} - -static int -tsi568_em_init(struct rio_dev *rdev) -{ - u32 regval; - int portnum; - - pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); - - /* Make sure that Port-Writes are disabled (for all ports) */ - for (portnum = 0; - portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) { - rio_read_config_32(rdev, TSI568_SP_MODE(portnum), ®val); - rio_write_config_32(rdev, TSI568_SP_MODE(portnum), - regval | TSI568_SP_MODE_PW_DIS); - } - - return 0; -} - -static struct rio_switch_ops tsi568_switch_ops = { - .owner = THIS_MODULE, - .add_entry = tsi568_route_add_entry, - .get_entry = tsi568_route_get_entry, - .clr_table = tsi568_route_clr_table, - .set_domain = NULL, - .get_domain = NULL, - .em_init = tsi568_em_init, - .em_handle = NULL, -}; - -static int tsi568_probe(struct rio_dev *rdev, const struct rio_device_id *id) -{ - pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); - - spin_lock(&rdev->rswitch->lock); - - if (rdev->rswitch->ops) { - spin_unlock(&rdev->rswitch->lock); - return -EINVAL; - } - - rdev->rswitch->ops = &tsi568_switch_ops; - spin_unlock(&rdev->rswitch->lock); - return 0; -} - -static void tsi568_remove(struct rio_dev *rdev) -{ - pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); - spin_lock(&rdev->rswitch->lock); - if (rdev->rswitch->ops != &tsi568_switch_ops) { - spin_unlock(&rdev->rswitch->lock); - return; - } - rdev->rswitch->ops = NULL; - spin_unlock(&rdev->rswitch->lock); -} - -static const struct rio_device_id tsi568_id_table[] = { - {RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)}, - { 0, } /* terminate list */ -}; - -static struct rio_driver tsi568_driver = { - .name = "tsi568", - .id_table = tsi568_id_table, - .probe = tsi568_probe, - .remove = tsi568_remove, -}; - -static int __init tsi568_init(void) -{ - return rio_register_driver(&tsi568_driver); -} - -static void __exit tsi568_exit(void) -{ - rio_unregister_driver(&tsi568_driver); -} - -device_initcall(tsi568_init); -module_exit(tsi568_exit); - -MODULE_DESCRIPTION("IDT Tsi568 Serial RapidIO switch driver"); -MODULE_AUTHOR("Integrated Device Technology, Inc."); -MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c deleted file mode 100644 index 271762046f8c..000000000000 --- a/drivers/rapidio/switches/tsi57x.c +++ /dev/null @@ -1,365 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * RapidIO Tsi57x switch family support - * - * Copyright 2009-2010 Integrated Device Technology, Inc. - * Alexandre Bounine <alexandre.bounine@idt.com> - * - Added EM support - * - Modified switch operations initialization. - * - * Copyright 2005 MontaVista Software, Inc. - * Matt Porter <mporter@kernel.crashing.org> - */ - -#include <linux/rio.h> -#include <linux/rio_drv.h> -#include <linux/rio_ids.h> -#include <linux/delay.h> -#include <linux/module.h> -#include "../rio.h" - -/* Global (broadcast) route registers */ -#define SPBC_ROUTE_CFG_DESTID 0x10070 -#define SPBC_ROUTE_CFG_PORT 0x10074 - -/* Per port route registers */ -#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) -#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) - -#define TSI578_SP_MODE(n) (0x11004 + n*0x100) -#define TSI578_SP_MODE_GLBL 0x10004 -#define TSI578_SP_MODE_PW_DIS 0x08000000 -#define TSI578_SP_MODE_LUT_512 0x01000000 - -#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100) -#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100) -#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100) -#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100) - -#define TSI578_GLBL_ROUTE_BASE 0x10078 - -static int -tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, - u16 table, u16 route_destid, u8 route_port) -{ - if (table == RIO_GLOBAL_TABLE) { - rio_mport_write_config_32(mport, destid, hopcount, - SPBC_ROUTE_CFG_DESTID, route_destid); - rio_mport_write_config_32(mport, destid, hopcount, - SPBC_ROUTE_CFG_PORT, route_port); - } else { - rio_mport_write_config_32(mport, destid, hopcount, - SPP_ROUTE_CFG_DESTID(table), route_destid); - rio_mport_write_config_32(mport, destid, hopcount, - SPP_ROUTE_CFG_PORT(table), route_port); - } - - udelay(10); - - return 0; -} - -static int -tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, - u16 table, u16 route_destid, u8 *route_port) -{ - int ret = 0; - u32 result; - - if (table == RIO_GLOBAL_TABLE) { - /* Use local RT of the ingress port to avoid possible - race condition */ - rio_mport_read_config_32(mport, destid, hopcount, - RIO_SWP_INFO_CAR, &result); - table = (result & RIO_SWP_INFO_PORT_NUM_MASK); - } - - rio_mport_write_config_32(mport, destid, hopcount, - SPP_ROUTE_CFG_DESTID(table), route_destid); - rio_mport_read_config_32(mport, destid, hopcount, - SPP_ROUTE_CFG_PORT(table), &result); - - *route_port = (u8)result; - if (*route_port > 15) - ret = -1; - - return ret; -} - -static int -tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, - u16 table) -{ - u32 route_idx; - u32 lut_size; - - lut_size = (mport->sys_size) ? 0x1ff : 0xff; - - if (table == RIO_GLOBAL_TABLE) { - rio_mport_write_config_32(mport, destid, hopcount, - SPBC_ROUTE_CFG_DESTID, 0x80000000); - for (route_idx = 0; route_idx <= lut_size; route_idx++) - rio_mport_write_config_32(mport, destid, hopcount, - SPBC_ROUTE_CFG_PORT, - RIO_INVALID_ROUTE); - } else { - rio_mport_write_config_32(mport, destid, hopcount, - SPP_ROUTE_CFG_DESTID(table), 0x80000000); - for (route_idx = 0; route_idx <= lut_size; route_idx++) - rio_mport_write_config_32(mport, destid, hopcount, - SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE); - } - - return 0; -} - -static int -tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, - u8 sw_domain) -{ - u32 regval; - - /* - * Switch domain configuration operates only at global level - */ - - /* Turn off flat (LUT_512) mode */ - rio_mport_read_config_32(mport, destid, hopcount, - TSI578_SP_MODE_GLBL, ®val); - rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL, - regval & ~TSI578_SP_MODE_LUT_512); - /* Set switch domain base */ - rio_mport_write_config_32(mport, destid, hopcount, - TSI578_GLBL_ROUTE_BASE, - (u32)(sw_domain << 24)); - return 0; -} - -static int -tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, - u8 *sw_domain) -{ - u32 regval; - - /* - * Switch domain configuration operates only at global level - */ - rio_mport_read_config_32(mport, destid, hopcount, - TSI578_GLBL_ROUTE_BASE, ®val); - - *sw_domain = (u8)(regval >> 24); - - return 0; -} - -static int -tsi57x_em_init(struct rio_dev *rdev) -{ - u32 regval; - int portnum; - - pr_debug("TSI578 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); - - for (portnum = 0; - portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) { - /* Make sure that Port-Writes are enabled (for all ports) */ - rio_read_config_32(rdev, - TSI578_SP_MODE(portnum), ®val); - rio_write_config_32(rdev, - TSI578_SP_MODE(portnum), - regval & ~TSI578_SP_MODE_PW_DIS); - - /* Clear all pending interrupts */ - rio_read_config_32(rdev, - RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), - ®val); - rio_write_config_32(rdev, - RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), - regval & 0x07120214); - - rio_read_config_32(rdev, - TSI578_SP_INT_STATUS(portnum), ®val); - rio_write_config_32(rdev, - TSI578_SP_INT_STATUS(portnum), - regval & 0x000700bd); - - /* Enable all interrupts to allow ports to send a port-write */ - rio_read_config_32(rdev, - TSI578_SP_CTL_INDEP(portnum), ®val); - rio_write_config_32(rdev, - TSI578_SP_CTL_INDEP(portnum), - regval | 0x000b0000); - - /* Skip next (odd) port if the current port is in x4 mode */ - rio_read_config_32(rdev, - RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), - ®val); - if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4) - portnum++; - } - - /* set TVAL = ~50us */ - rio_write_config_32(rdev, - rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8); - - return 0; -} - -static int -tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) -{ - struct rio_mport *mport = rdev->net->hport; - u32 intstat, err_status; - int sendcount, checkcount; - u8 route_port; - u32 regval; - - rio_read_config_32(rdev, - RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), - &err_status); - - if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) && - (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | - RIO_PORT_N_ERR_STS_INP_ES))) { - /* Remove any queued packets by locking/unlocking port */ - rio_read_config_32(rdev, - RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), - ®val); - if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) { - rio_write_config_32(rdev, - RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), - regval | RIO_PORT_N_CTL_LOCKOUT); - udelay(50); - rio_write_config_32(rdev, - RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), - regval); - } - - /* Read from link maintenance response register to clear - * valid bit - */ - rio_read_config_32(rdev, - RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, portnum), - ®val); - - /* Send a Packet-Not-Accepted/Link-Request-Input-Status control - * symbol to recover from IES/OES - */ - sendcount = 3; - while (sendcount) { - rio_write_config_32(rdev, - TSI578_SP_CS_TX(portnum), 0x40fc8000); - checkcount = 3; - while (checkcount--) { - udelay(50); - rio_read_config_32(rdev, - RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, - portnum), - ®val); - if (regval & RIO_PORT_N_MNT_RSP_RVAL) - goto exit_es; - } - - sendcount--; - } - } - -exit_es: - /* Clear implementation specific error status bits */ - rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), &intstat); - pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n", - rdev->destid, rdev->hopcount, portnum, intstat); - - if (intstat & 0x10000) { - rio_read_config_32(rdev, - TSI578_SP_LUT_PEINF(portnum), ®val); - regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24); - route_port = rdev->rswitch->route_table[regval]; - pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n", - rio_name(rdev), portnum, regval); - tsi57x_route_add_entry(mport, rdev->destid, rdev->hopcount, - RIO_GLOBAL_TABLE, regval, route_port); - } - - rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum), - intstat & 0x000700bd); - - return 0; -} - -static struct rio_switch_ops tsi57x_switch_ops = { - .owner = THIS_MODULE, - .add_entry = tsi57x_route_add_entry, - .get_entry = tsi57x_route_get_entry, - .clr_table = tsi57x_route_clr_table, - .set_domain = tsi57x_set_domain, - .get_domain = tsi57x_get_domain, - .em_init = tsi57x_em_init, - .em_handle = tsi57x_em_handler, -}; - -static int tsi57x_probe(struct rio_dev *rdev, const struct rio_device_id *id) -{ - pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); - - spin_lock(&rdev->rswitch->lock); - - if (rdev->rswitch->ops) { - spin_unlock(&rdev->rswitch->lock); - return -EINVAL; - } - rdev->rswitch->ops = &tsi57x_switch_ops; - - if (rdev->do_enum) { - /* Ensure that default routing is disabled on startup */ - rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, - RIO_INVALID_ROUTE); - } - - spin_unlock(&rdev->rswitch->lock); - return 0; -} - -static void tsi57x_remove(struct rio_dev *rdev) -{ - pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); - spin_lock(&rdev->rswitch->lock); - if (rdev->rswitch->ops != &tsi57x_switch_ops) { - spin_unlock(&rdev->rswitch->lock); - return; - } - rdev->rswitch->ops = NULL; - spin_unlock(&rdev->rswitch->lock); -} - -static const struct rio_device_id tsi57x_id_table[] = { - {RIO_DEVICE(RIO_DID_TSI572, RIO_VID_TUNDRA)}, - {RIO_DEVICE(RIO_DID_TSI574, RIO_VID_TUNDRA)}, - {RIO_DEVICE(RIO_DID_TSI577, RIO_VID_TUNDRA)}, - {RIO_DEVICE(RIO_DID_TSI578, RIO_VID_TUNDRA)}, - { 0, } /* terminate list */ -}; - -static struct rio_driver tsi57x_driver = { - .name = "tsi57x", - .id_table = tsi57x_id_table, - .probe = tsi57x_probe, - .remove = tsi57x_remove, -}; - -static int __init tsi57x_init(void) -{ - return rio_register_driver(&tsi57x_driver); -} - -static void __exit tsi57x_exit(void) -{ - rio_unregister_driver(&tsi57x_driver); -} - -device_initcall(tsi57x_init); -module_exit(tsi57x_exit); - -MODULE_DESCRIPTION("IDT Tsi57x Serial RapidIO switch family driver"); -MODULE_AUTHOR("Integrated Device Technology, Inc."); -MODULE_LICENSE("GPL"); diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index f2e961f998ca..3ddd426fc969 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -283,6 +283,17 @@ config QCOM_WCNSS_PIL verified and booted with the help of the Peripheral Authentication System (PAS) in TrustZone. +config RCAR_REMOTEPROC + tristate "Renesas R-Car Gen3 remoteproc support" + depends on ARCH_RENESAS || COMPILE_TEST + help + Say y here to support R-Car realtime processor via the + remote processor framework. An ELF firmware can be loaded + thanks to sysfs remoteproc entries. The remote processor + can be started and stopped. + This can be either built-in or a loadable module. + If compiled as module (M), the module name is rcar_rproc. + config ST_REMOTEPROC tristate "ST remoteproc support" depends on ARCH_STI diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index 0ac256b6c977..5478c7cb9e07 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o qcom_wcnss_pil-y += qcom_wcnss.o qcom_wcnss_pil-y += qcom_wcnss_iris.o +obj-$(CONFIG_RCAR_REMOTEPROC) += rcar_rproc.o obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index ff8170dbbc3c..7a096f1891e6 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -34,7 +34,8 @@ #define IMX7D_M4_START (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \ | IMX7D_SW_M4C_RST) -#define IMX7D_M4_STOP IMX7D_SW_M4C_NON_SCLR_RST +#define IMX7D_M4_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST | \ + IMX7D_SW_M4C_NON_SCLR_RST) /* Address: 0x020D8000 */ #define IMX6SX_SRC_SCR 0x00 @@ -45,7 +46,8 @@ #define IMX6SX_M4_START (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \ | IMX6SX_SW_M4C_RST) -#define IMX6SX_M4_STOP IMX6SX_SW_M4C_NON_SCLR_RST +#define IMX6SX_M4_STOP (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4C_RST | \ + IMX6SX_SW_M4C_NON_SCLR_RST) #define IMX6SX_M4_RST_MASK (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \ | IMX6SX_SW_M4C_NON_SCLR_RST \ | IMX6SX_SW_M4C_RST) @@ -684,7 +686,7 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv) return ret; } - if (!(val & dcfg->src_stop)) + if ((val & dcfg->src_mask) != dcfg->src_stop) priv->rproc->state = RPROC_DETACHED; return 0; @@ -804,6 +806,7 @@ static int imx_rproc_remove(struct platform_device *pdev) clk_disable_unprepare(priv->clk); rproc_del(rproc); imx_rproc_free_mbox(rproc); + destroy_workqueue(priv->workqueue); rproc_free(rproc); return 0; diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c index a356738160a4..9902cce28692 100644 --- a/drivers/remoteproc/ingenic_rproc.c +++ b/drivers/remoteproc/ingenic_rproc.c @@ -218,14 +218,13 @@ static int ingenic_rproc_probe(struct platform_device *pdev) if (vpu->irq < 0) return vpu->irq; - ret = devm_request_irq(dev, vpu->irq, vpu_interrupt, 0, "VPU", rproc); + ret = devm_request_irq(dev, vpu->irq, vpu_interrupt, IRQF_NO_AUTOEN, + "VPU", rproc); if (ret < 0) { dev_err(dev, "Failed to request IRQ\n"); return ret; } - disable_irq(vpu->irq); - ret = devm_rproc_add(dev, rproc); if (ret) { dev_err(dev, "Failed to register remote processor\n"); diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c index 6dc955ecab80..00f041ebcde6 100644 --- a/drivers/remoteproc/mtk_scp_ipi.c +++ b/drivers/remoteproc/mtk_scp_ipi.c @@ -23,7 +23,7 @@ * * Register an ipi function to receive ipi interrupt from SCP. * - * Returns 0 if ipi registers successfully, -error on error. + * Return: 0 if ipi registers successfully, -error on error. */ int scp_ipi_register(struct mtk_scp *scp, u32 id, @@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(scp_ipi_unlock); * When the processing completes, IPI handler registered * by scp_ipi_register will be called in interrupt context. * - * Returns 0 if sending data successfully, -error on error. + * Return: 0 if sending data successfully, -error on error. **/ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len, unsigned int wait) diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c index 7c007dd7b200..aca21560e20b 100644 --- a/drivers/remoteproc/qcom_pil_info.c +++ b/drivers/remoteproc/qcom_pil_info.c @@ -104,7 +104,7 @@ int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size) return -ENOMEM; found_unused: - memcpy_toio(entry, image, PIL_RELOC_NAME_LEN); + memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN)); found_existing: /* Use two writel() as base is only aligned to 4 bytes on odd entries */ writel(base, entry + PIL_RELOC_NAME_LEN); diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 03857dc9cdc1..184bb7cdf95a 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -524,6 +524,23 @@ static const struct adsp_data sdm845_adsp_resource_init = { .ssctl_id = 0x14, }; +static const struct adsp_data sm6350_adsp_resource = { + .crash_reason_smem = 423, + .firmware_name = "adsp.mdt", + .pas_id = 1, + .has_aggre2_clk = false, + .auto_boot = true, + .proxy_pd_names = (char*[]){ + "lcx", + "lmx", + NULL + }, + .load_state = "adsp", + .ssr_name = "lpass", + .sysmon_name = "adsp", + .ssctl_id = 0x14, +}; + static const struct adsp_data sm8150_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", @@ -612,6 +629,23 @@ static const struct adsp_data sdm845_cdsp_resource_init = { .ssctl_id = 0x17, }; +static const struct adsp_data sm6350_cdsp_resource = { + .crash_reason_smem = 601, + .firmware_name = "cdsp.mdt", + .pas_id = 18, + .has_aggre2_clk = false, + .auto_boot = true, + .proxy_pd_names = (char*[]){ + "cx", + "mx", + NULL + }, + .load_state = "cdsp", + .ssr_name = "cdsp", + .sysmon_name = "cdsp", + .ssctl_id = 0x17, +}; + static const struct adsp_data sm8150_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", @@ -652,6 +686,7 @@ static const struct adsp_data sm8350_cdsp_resource = { .auto_boot = true, .proxy_pd_names = (char*[]){ "cx", + "mxc", NULL }, .load_state = "cdsp", @@ -804,6 +839,9 @@ static const struct of_device_id adsp_of_match[] = { { .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init}, { .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init}, { .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource}, + { .compatible = "qcom,sm6350-adsp-pas", .data = &sm6350_adsp_resource}, + { .compatible = "qcom,sm6350-cdsp-pas", .data = &sm6350_cdsp_resource}, + { .compatible = "qcom,sm6350-mpss-pas", .data = &mpss_resource_init}, { .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource}, { .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource}, { .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init}, diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c new file mode 100644 index 000000000000..aa86154109c7 --- /dev/null +++ b/drivers/remoteproc/rcar_rproc.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) IoT.bzh 2021 + */ + +#include <linux/limits.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/pm_runtime.h> +#include <linux/remoteproc.h> +#include <linux/reset.h> +#include <linux/soc/renesas/rcar-rst.h> + +#include "remoteproc_internal.h" + +struct rcar_rproc { + struct reset_control *rst; +}; + +static int rcar_rproc_mem_alloc(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + struct device *dev = &rproc->dev; + void *va; + + dev_dbg(dev, "map memory: %pa+%zx\n", &mem->dma, mem->len); + va = ioremap_wc(mem->dma, mem->len); + if (!va) { + dev_err(dev, "Unable to map memory region: %pa+%zx\n", + &mem->dma, mem->len); + return -ENOMEM; + } + + /* Update memory entry va */ + mem->va = va; + + return 0; +} + +static int rcar_rproc_mem_release(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + dev_dbg(&rproc->dev, "unmap memory: %pa\n", &mem->dma); + iounmap(mem->va); + + return 0; +} + +static int rcar_rproc_prepare(struct rproc *rproc) +{ + struct device *dev = rproc->dev.parent; + struct device_node *np = dev->of_node; + struct of_phandle_iterator it; + struct rproc_mem_entry *mem; + struct reserved_mem *rmem; + u32 da; + + /* Register associated reserved memory regions */ + of_phandle_iterator_init(&it, np, "memory-region", NULL, 0); + while (of_phandle_iterator_next(&it) == 0) { + + rmem = of_reserved_mem_lookup(it.node); + if (!rmem) { + dev_err(&rproc->dev, + "unable to acquire memory-region\n"); + return -EINVAL; + } + + if (rmem->base > U32_MAX) + return -EINVAL; + + /* No need to translate pa to da, R-Car use same map */ + da = rmem->base; + mem = rproc_mem_entry_init(dev, NULL, + rmem->base, + rmem->size, da, + rcar_rproc_mem_alloc, + rcar_rproc_mem_release, + it.node->name); + + if (!mem) + return -ENOMEM; + + rproc_add_carveout(rproc, mem); + } + + return 0; +} + +static int rcar_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) +{ + int ret; + + ret = rproc_elf_load_rsc_table(rproc, fw); + if (ret) + dev_info(&rproc->dev, "No resource table in elf\n"); + + return 0; +} + +static int rcar_rproc_start(struct rproc *rproc) +{ + struct rcar_rproc *priv = rproc->priv; + int err; + + if (!rproc->bootaddr) + return -EINVAL; + + err = rcar_rst_set_rproc_boot_addr(rproc->bootaddr); + if (err) { + dev_err(&rproc->dev, "failed to set rproc boot addr\n"); + return err; + } + + err = reset_control_deassert(priv->rst); + if (err) + dev_err(&rproc->dev, "failed to deassert reset\n"); + + return err; +} + +static int rcar_rproc_stop(struct rproc *rproc) +{ + struct rcar_rproc *priv = rproc->priv; + int err; + + err = reset_control_assert(priv->rst); + if (err) + dev_err(&rproc->dev, "failed to assert reset\n"); + + return err; +} + +static struct rproc_ops rcar_rproc_ops = { + .prepare = rcar_rproc_prepare, + .start = rcar_rproc_start, + .stop = rcar_rproc_stop, + .load = rproc_elf_load_segments, + .parse_fw = rcar_rproc_parse_fw, + .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table, + .sanity_check = rproc_elf_sanity_check, + .get_boot_addr = rproc_elf_get_boot_addr, + +}; + +static int rcar_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct rcar_rproc *priv; + struct rproc *rproc; + int ret; + + rproc = devm_rproc_alloc(dev, np->name, &rcar_rproc_ops, + NULL, sizeof(*priv)); + if (!rproc) + return -ENOMEM; + + priv = rproc->priv; + + priv->rst = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(priv->rst)) { + ret = PTR_ERR(priv->rst); + dev_err_probe(dev, ret, "fail to acquire rproc reset\n"); + return ret; + } + + pm_runtime_enable(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret) { + dev_err(dev, "failed to power up\n"); + return ret; + } + + dev_set_drvdata(dev, rproc); + + /* Manually start the rproc */ + rproc->auto_boot = false; + + ret = devm_rproc_add(dev, rproc); + if (ret) { + dev_err(dev, "rproc_add failed\n"); + goto pm_disable; + } + + return 0; + +pm_disable: + pm_runtime_disable(dev); + + return ret; +} + +static int rcar_rproc_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + pm_runtime_disable(dev); + + return 0; +} + +static const struct of_device_id rcar_rproc_of_match[] = { + { .compatible = "renesas,rcar-cr7" }, + {}, +}; + +MODULE_DEVICE_TABLE(of, rcar_rproc_of_match); + +static struct platform_driver rcar_rproc_driver = { + .probe = rcar_rproc_probe, + .remove = rcar_rproc_remove, + .driver = { + .name = "rcar-rproc", + .of_match_table = rcar_rproc_of_match, + }, +}; + +module_platform_driver(rcar_rproc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Renesas R-Car Gen3 remote processor control driver"); +MODULE_AUTHOR("Julien Massot <julien.massot@iot.bzh>"); diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 775df165eb45..69f51acf235e 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -577,8 +577,8 @@ static int rproc_handle_vdev(struct rproc *rproc, void *ptr, dma_get_mask(rproc->dev.parent)); if (ret) { dev_warn(dev, - "Failed to set DMA mask %llx. Trying to continue... %x\n", - dma_get_mask(rproc->dev.parent), ret); + "Failed to set DMA mask %llx. Trying to continue... (%pe)\n", + dma_get_mask(rproc->dev.parent), ERR_PTR(ret)); } /* parse the vrings */ diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c index c892f433a323..4b093420d98a 100644 --- a/drivers/remoteproc/remoteproc_coredump.c +++ b/drivers/remoteproc/remoteproc_coredump.c @@ -166,7 +166,7 @@ static void rproc_copy_segment(struct rproc *rproc, void *dest, memset(dest, 0xff, size); } else { if (is_iomem) - memcpy_fromio(dest, ptr, size); + memcpy_fromio(dest, (void const __iomem *)ptr, size); else memcpy(dest, ptr, size); } diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c index 22096adc1ad3..4ed9467897e5 100644 --- a/drivers/remoteproc/st_slim_rproc.c +++ b/drivers/remoteproc/st_slim_rproc.c @@ -216,7 +216,7 @@ static const struct rproc_ops slim_rproc_ops = { * obtains and enables any clocks required by the SLIM core and also * ioremaps the various IO. * - * Returns st_slim_rproc pointer or PTR_ERR() on error. + * Return: st_slim_rproc pointer or PTR_ERR() on error. */ struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev, diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c index b643efcf995a..7d782ed9e589 100644 --- a/drivers/remoteproc/stm32_rproc.c +++ b/drivers/remoteproc/stm32_rproc.c @@ -494,7 +494,7 @@ static int stm32_rproc_stop(struct rproc *rproc) int err, idx; /* request shutdown of the remote processor */ - if (rproc->state != RPROC_OFFLINE) { + if (rproc->state != RPROC_OFFLINE && rproc->state != RPROC_CRASHED) { idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN); if (idx >= 0 && ddata->mb[idx].chan) { err = mbox_send_message(ddata->mb[idx].chan, "detach"); diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c index c352fa277c8d..939c5d90b562 100644 --- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c +++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c @@ -767,6 +767,7 @@ static const struct k3_dsp_dev_data c71_data = { static const struct of_device_id k3_dsp_of_match[] = { { .compatible = "ti,j721e-c66-dsp", .data = &c66_data, }, { .compatible = "ti,j721e-c71-dsp", .data = &c71_data, }, + { .compatible = "ti,j721s2-c71-dsp", .data = &c71_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, k3_dsp_of_match); diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index 6499302d00c3..969531c05b13 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -1535,7 +1535,7 @@ static const struct k3_r5_soc_data am65_j721e_soc_data = { .single_cpu_mode = false, }; -static const struct k3_r5_soc_data j7200_soc_data = { +static const struct k3_r5_soc_data j7200_j721s2_soc_data = { .tcm_is_double = true, .tcm_ecc_autoinit = true, .single_cpu_mode = false, @@ -1550,8 +1550,9 @@ static const struct k3_r5_soc_data am64_soc_data = { static const struct of_device_id k3_r5_of_match[] = { { .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, }, { .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, }, - { .compatible = "ti,j7200-r5fss", .data = &j7200_soc_data, }, + { .compatible = "ti,j7200-r5fss", .data = &j7200_j721s2_soc_data, }, { .compatible = "ti,am64-r5fss", .data = &am64_soc_data, }, + { .compatible = "ti,j721s2-r5fss", .data = &j7200_j721s2_soc_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, k3_r5_of_match); diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 3f377a795b33..1030cfa80e04 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -427,7 +427,7 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink, * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote. * Will return with refcount held, regardless of outcome. * - * Returns 0 on success, negative errno otherwise. + * Return: 0 on success, negative errno otherwise. */ static int qcom_glink_send_open_req(struct qcom_glink *glink, struct glink_channel *channel) diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 8da1b5cb31b3..540e027f08c4 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -1467,7 +1467,7 @@ ATTRIBUTE_GROUPS(qcom_smd_edge); * @parent: parent device for the edge * @node: device_node describing the edge * - * Returns an edge reference, or negative ERR_PTR() on failure. + * Return: an edge reference, or negative ERR_PTR() on failure. */ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, struct device_node *node) diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c index b5907b80727c..d6214cb66026 100644 --- a/drivers/rpmsg/rpmsg_char.c +++ b/drivers/rpmsg/rpmsg_char.c @@ -9,6 +9,9 @@ * Based on rpmsg performance statistics driver by Michal Simek, which in turn * was based on TI & Google OMX rpmsg driver. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/cdev.h> #include <linux/device.h> #include <linux/fs.h> @@ -550,7 +553,7 @@ static int rpmsg_chrdev_init(void) ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg"); if (ret < 0) { - pr_err("rpmsg: failed to allocate char dev region\n"); + pr_err("failed to allocate char dev region\n"); return ret; } @@ -563,7 +566,7 @@ static int rpmsg_chrdev_init(void) ret = register_rpmsg_driver(&rpmsg_chrdev_driver); if (ret < 0) { - pr_err("rpmsgchr: failed to register rpmsg driver\n"); + pr_err("failed to register rpmsg driver\n"); class_destroy(rpmsg_class); unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX); } diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index d3eb60059ef1..d9e612f4f0f2 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -26,7 +26,7 @@ * @rpdev: rpmsg device * @chinfo: channel_info to bind * - * Returns a pointer to the new rpmsg device on success, or NULL on error. + * Return: a pointer to the new rpmsg device on success, or NULL on error. */ struct rpmsg_device *rpmsg_create_channel(struct rpmsg_device *rpdev, struct rpmsg_channel_info *chinfo) @@ -48,7 +48,7 @@ EXPORT_SYMBOL(rpmsg_create_channel); * @rpdev: rpmsg device * @chinfo: channel_info to bind * - * Returns 0 on success or an appropriate error value. + * Return: 0 on success or an appropriate error value. */ int rpmsg_release_channel(struct rpmsg_device *rpdev, struct rpmsg_channel_info *chinfo) @@ -102,7 +102,7 @@ EXPORT_SYMBOL(rpmsg_release_channel); * dynamically assign them an available rpmsg address (drivers should have * a very good reason why not to always use RPMSG_ADDR_ANY here). * - * Returns a pointer to the endpoint on success, or NULL on error. + * Return: a pointer to the endpoint on success, or NULL on error. */ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv, @@ -146,7 +146,7 @@ EXPORT_SYMBOL(rpmsg_destroy_ept); * * Can only be called from process context (for now). * - * Returns 0 on success and an appropriate error value on failure. + * Return: 0 on success and an appropriate error value on failure. */ int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) { @@ -175,7 +175,7 @@ EXPORT_SYMBOL(rpmsg_send); * * Can only be called from process context (for now). * - * Returns 0 on success and an appropriate error value on failure. + * Return: 0 on success and an appropriate error value on failure. */ int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) { @@ -206,7 +206,7 @@ EXPORT_SYMBOL(rpmsg_sendto); * * Can only be called from process context (for now). * - * Returns 0 on success and an appropriate error value on failure. + * Return: 0 on success and an appropriate error value on failure. */ int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, void *data, int len) @@ -235,7 +235,7 @@ EXPORT_SYMBOL(rpmsg_send_offchannel); * * Can only be called from process context (for now). * - * Returns 0 on success and an appropriate error value on failure. + * Return: 0 on success and an appropriate error value on failure. */ int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len) { @@ -263,7 +263,7 @@ EXPORT_SYMBOL(rpmsg_trysend); * * Can only be called from process context (for now). * - * Returns 0 on success and an appropriate error value on failure. + * Return: 0 on success and an appropriate error value on failure. */ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) { @@ -282,7 +282,7 @@ EXPORT_SYMBOL(rpmsg_trysendto); * @filp: file for poll_wait() * @wait: poll_table for poll_wait() * - * Returns mask representing the current state of the endpoint's send buffers + * Return: mask representing the current state of the endpoint's send buffers */ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, poll_table *wait) @@ -313,7 +313,7 @@ EXPORT_SYMBOL(rpmsg_poll); * * Can only be called from process context (for now). * - * Returns 0 on success and an appropriate error value on failure. + * Return: 0 on success and an appropriate error value on failure. */ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, void *data, int len) @@ -540,13 +540,25 @@ static int rpmsg_dev_probe(struct device *dev) err = rpdrv->probe(rpdev); if (err) { dev_err(dev, "%s: failed: %d\n", __func__, err); - if (ept) - rpmsg_destroy_ept(ept); - goto out; + goto destroy_ept; } - if (ept && rpdev->ops->announce_create) + if (ept && rpdev->ops->announce_create) { err = rpdev->ops->announce_create(rpdev); + if (err) { + dev_err(dev, "failed to announce creation\n"); + goto remove_rpdev; + } + } + + return 0; + +remove_rpdev: + if (rpdrv->remove) + rpdrv->remove(rpdev); +destroy_ept: + if (ept) + rpmsg_destroy_ept(ept); out: return err; } @@ -623,7 +635,7 @@ EXPORT_SYMBOL(rpmsg_unregister_device); * @rpdrv: pointer to a struct rpmsg_driver * @owner: owning module/driver * - * Returns 0 on success, and an appropriate error value on failure. + * Return: 0 on success, and an appropriate error value on failure. */ int __register_rpmsg_driver(struct rpmsg_driver *rpdrv, struct module *owner) { @@ -637,7 +649,7 @@ EXPORT_SYMBOL(__register_rpmsg_driver); * unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus * @rpdrv: pointer to a struct rpmsg_driver * - * Returns 0 on success, and an appropriate error value on failure. + * Return: 0 on success, and an appropriate error value on failure. */ void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv) { diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 9c112aa65040..ac764e04c898 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -547,7 +547,7 @@ static void rpmsg_downref_sleepers(struct virtproc_info *vrp) * should use the appropriate rpmsg_{try}send{to, _offchannel} API * (see include/linux/rpmsg.h). * - * Returns 0 on success and an appropriate error value on failure. + * Return: 0 on success and an appropriate error value on failure. */ static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev, u32 src, u32 dst, @@ -1024,7 +1024,7 @@ static void rpmsg_remove(struct virtio_device *vdev) size_t total_buf_space = vrp->num_bufs * vrp->buf_size; int ret; - vdev->config->reset(vdev); + virtio_reset_device(vdev); ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device); if (ret) diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 058e56a10ab8..d85a3c31347c 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -1216,6 +1216,17 @@ config RTC_DRV_V3020 This driver can also be built as a module. If so, the module will be called rtc-v3020. +config RTC_DRV_GAMECUBE + tristate "Nintendo GameCube, Wii and Wii U RTC" + depends on GAMECUBE || WII || COMPILE_TEST + select REGMAP + help + If you say yes here you will get support for the RTC subsystem + of the Nintendo GameCube, Wii and Wii U. + + This driver can also be built as a module. If so, the module + will be called "rtc-gamecube". + config RTC_DRV_WM831X tristate "Wolfson Microelectronics WM831x RTC" depends on MFD_WM831X @@ -1444,6 +1455,19 @@ config RTC_DRV_SH To compile this driver as a module, choose M here: the module will be called rtc-sh. +config RTC_DRV_SUNPLUS + tristate "Sunplus SP7021 RTC" + depends on SOC_SP7021 + help + Say 'yes' to get support for the real-time clock present in + Sunplus SP7021 - a SoC for industrial applications. It provides + RTC status check, timer/alarm functionalities, user data + reservation with the battery over 2.5V, RTC power status check + and battery charge. + + This driver can also be built as a module. If so, the module + will be called rtc-sunplus. + config RTC_DRV_VR41XX tristate "NEC VR41XX" depends on CPU_VR41XX || COMPILE_TEST diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 678a8ef4abae..e92f3e943245 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -111,6 +111,7 @@ obj-$(CONFIG_RTC_DRV_MT7622) += rtc-mt7622.o obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o obj-$(CONFIG_RTC_DRV_MXC_V2) += rtc-mxc_v2.o +obj-$(CONFIG_RTC_DRV_GAMECUBE) += rtc-gamecube.o obj-$(CONFIG_RTC_DRV_NTXEC) += rtc-ntxec.o obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o @@ -165,6 +166,7 @@ obj-$(CONFIG_RTC_DRV_STM32) += rtc-stm32.o obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o obj-$(CONFIG_RTC_DRV_SUN6I) += rtc-sun6i.o +obj-$(CONFIG_RTC_DRV_SUNPLUS) += rtc-sunplus.o obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c index e104972a28fd..69325aeede1a 100644 --- a/drivers/rtc/dev.c +++ b/drivers/rtc/dev.c @@ -391,14 +391,14 @@ static long rtc_dev_ioctl(struct file *file, } switch(param.param) { - long offset; case RTC_PARAM_FEATURES: if (param.index != 0) err = -EINVAL; param.uvalue = rtc->features[0]; break; - case RTC_PARAM_CORRECTION: + case RTC_PARAM_CORRECTION: { + long offset; mutex_unlock(&rtc->ops_lock); if (param.index != 0) return -EINVAL; @@ -407,7 +407,7 @@ static long rtc_dev_ioctl(struct file *file, if (err == 0) param.svalue = offset; break; - + } default: if (rtc->ops->param_get) err = rtc->ops->param_get(rtc->dev.parent, ¶m); diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 4eb53412b808..7c006c2b125f 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -222,6 +222,8 @@ static inline void cmos_write_bank2(unsigned char val, unsigned char addr) static int cmos_read_time(struct device *dev, struct rtc_time *t) { + int ret; + /* * If pm_trace abused the RTC for storage, set the timespec to 0, * which tells the caller that this RTC value is unusable. @@ -229,7 +231,12 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t) if (!pm_trace_rtc_valid()) return -EIO; - mc146818_get_time(t); + ret = mc146818_get_time(t); + if (ret < 0) { + dev_err_ratelimited(dev, "unable to read current time\n"); + return ret; + } + return 0; } @@ -242,10 +249,46 @@ static int cmos_set_time(struct device *dev, struct rtc_time *t) return mc146818_set_time(t); } +struct cmos_read_alarm_callback_param { + struct cmos_rtc *cmos; + struct rtc_time *time; + unsigned char rtc_control; +}; + +static void cmos_read_alarm_callback(unsigned char __always_unused seconds, + void *param_in) +{ + struct cmos_read_alarm_callback_param *p = + (struct cmos_read_alarm_callback_param *)param_in; + struct rtc_time *time = p->time; + + time->tm_sec = CMOS_READ(RTC_SECONDS_ALARM); + time->tm_min = CMOS_READ(RTC_MINUTES_ALARM); + time->tm_hour = CMOS_READ(RTC_HOURS_ALARM); + + if (p->cmos->day_alrm) { + /* ignore upper bits on readback per ACPI spec */ + time->tm_mday = CMOS_READ(p->cmos->day_alrm) & 0x3f; + if (!time->tm_mday) + time->tm_mday = -1; + + if (p->cmos->mon_alrm) { + time->tm_mon = CMOS_READ(p->cmos->mon_alrm); + if (!time->tm_mon) + time->tm_mon = -1; + } + } + + p->rtc_control = CMOS_READ(RTC_CONTROL); +} + static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) { struct cmos_rtc *cmos = dev_get_drvdata(dev); - unsigned char rtc_control; + struct cmos_read_alarm_callback_param p = { + .cmos = cmos, + .time = &t->time, + }; /* This not only a rtc_op, but also called directly */ if (!is_valid_irq(cmos->irq)) @@ -256,28 +299,18 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) * the future. */ - spin_lock_irq(&rtc_lock); - t->time.tm_sec = CMOS_READ(RTC_SECONDS_ALARM); - t->time.tm_min = CMOS_READ(RTC_MINUTES_ALARM); - t->time.tm_hour = CMOS_READ(RTC_HOURS_ALARM); - - if (cmos->day_alrm) { - /* ignore upper bits on readback per ACPI spec */ - t->time.tm_mday = CMOS_READ(cmos->day_alrm) & 0x3f; - if (!t->time.tm_mday) - t->time.tm_mday = -1; - - if (cmos->mon_alrm) { - t->time.tm_mon = CMOS_READ(cmos->mon_alrm); - if (!t->time.tm_mon) - t->time.tm_mon = -1; - } - } - - rtc_control = CMOS_READ(RTC_CONTROL); - spin_unlock_irq(&rtc_lock); + /* Some Intel chipsets disconnect the alarm registers when the clock + * update is in progress - during this time reads return bogus values + * and writes may fail silently. See for example "7th Generation Intel® + * Processor Family I/O for U/Y Platforms [...] Datasheet", section + * 27.7.1 + * + * Use the mc146818_avoid_UIP() function to avoid this. + */ + if (!mc146818_avoid_UIP(cmos_read_alarm_callback, &p)) + return -EIO; - if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + if (!(p.rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { if (((unsigned)t->time.tm_sec) < 0x60) t->time.tm_sec = bcd2bin(t->time.tm_sec); else @@ -306,7 +339,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) } } - t->enabled = !!(rtc_control & RTC_AIE); + t->enabled = !!(p.rtc_control & RTC_AIE); t->pending = 0; return 0; @@ -437,10 +470,57 @@ static int cmos_validate_alarm(struct device *dev, struct rtc_wkalrm *t) return 0; } +struct cmos_set_alarm_callback_param { + struct cmos_rtc *cmos; + unsigned char mon, mday, hrs, min, sec; + struct rtc_wkalrm *t; +}; + +/* Note: this function may be executed by mc146818_avoid_UIP() more then + * once + */ +static void cmos_set_alarm_callback(unsigned char __always_unused seconds, + void *param_in) +{ + struct cmos_set_alarm_callback_param *p = + (struct cmos_set_alarm_callback_param *)param_in; + + /* next rtc irq must not be from previous alarm setting */ + cmos_irq_disable(p->cmos, RTC_AIE); + + /* update alarm */ + CMOS_WRITE(p->hrs, RTC_HOURS_ALARM); + CMOS_WRITE(p->min, RTC_MINUTES_ALARM); + CMOS_WRITE(p->sec, RTC_SECONDS_ALARM); + + /* the system may support an "enhanced" alarm */ + if (p->cmos->day_alrm) { + CMOS_WRITE(p->mday, p->cmos->day_alrm); + if (p->cmos->mon_alrm) + CMOS_WRITE(p->mon, p->cmos->mon_alrm); + } + + if (use_hpet_alarm()) { + /* + * FIXME the HPET alarm glue currently ignores day_alrm + * and mon_alrm ... + */ + hpet_set_alarm_time(p->t->time.tm_hour, p->t->time.tm_min, + p->t->time.tm_sec); + } + + if (p->t->enabled) + cmos_irq_enable(p->cmos, RTC_AIE); +} + static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) { struct cmos_rtc *cmos = dev_get_drvdata(dev); - unsigned char mon, mday, hrs, min, sec, rtc_control; + struct cmos_set_alarm_callback_param p = { + .cmos = cmos, + .t = t + }; + unsigned char rtc_control; int ret; /* This not only a rtc_op, but also called directly */ @@ -451,52 +531,33 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) if (ret < 0) return ret; - mon = t->time.tm_mon + 1; - mday = t->time.tm_mday; - hrs = t->time.tm_hour; - min = t->time.tm_min; - sec = t->time.tm_sec; + p.mon = t->time.tm_mon + 1; + p.mday = t->time.tm_mday; + p.hrs = t->time.tm_hour; + p.min = t->time.tm_min; + p.sec = t->time.tm_sec; + spin_lock_irq(&rtc_lock); rtc_control = CMOS_READ(RTC_CONTROL); + spin_unlock_irq(&rtc_lock); + if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { /* Writing 0xff means "don't care" or "match all". */ - mon = (mon <= 12) ? bin2bcd(mon) : 0xff; - mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff; - hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff; - min = (min < 60) ? bin2bcd(min) : 0xff; - sec = (sec < 60) ? bin2bcd(sec) : 0xff; - } - - spin_lock_irq(&rtc_lock); - - /* next rtc irq must not be from previous alarm setting */ - cmos_irq_disable(cmos, RTC_AIE); - - /* update alarm */ - CMOS_WRITE(hrs, RTC_HOURS_ALARM); - CMOS_WRITE(min, RTC_MINUTES_ALARM); - CMOS_WRITE(sec, RTC_SECONDS_ALARM); - - /* the system may support an "enhanced" alarm */ - if (cmos->day_alrm) { - CMOS_WRITE(mday, cmos->day_alrm); - if (cmos->mon_alrm) - CMOS_WRITE(mon, cmos->mon_alrm); + p.mon = (p.mon <= 12) ? bin2bcd(p.mon) : 0xff; + p.mday = (p.mday >= 1 && p.mday <= 31) ? bin2bcd(p.mday) : 0xff; + p.hrs = (p.hrs < 24) ? bin2bcd(p.hrs) : 0xff; + p.min = (p.min < 60) ? bin2bcd(p.min) : 0xff; + p.sec = (p.sec < 60) ? bin2bcd(p.sec) : 0xff; } - if (use_hpet_alarm()) { - /* - * FIXME the HPET alarm glue currently ignores day_alrm - * and mon_alrm ... - */ - hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min, - t->time.tm_sec); - } - - if (t->enabled) - cmos_irq_enable(cmos, RTC_AIE); - - spin_unlock_irq(&rtc_lock); + /* + * Some Intel chipsets disconnect the alarm registers when the clock + * update is in progress - during this time writes fail silently. + * + * Use mc146818_avoid_UIP() to avoid this. + */ + if (!mc146818_avoid_UIP(cmos_set_alarm_callback, &p)) + return -EIO; cmos->alarm_expires = rtc_tm_to_time64(&t->time); @@ -790,16 +851,14 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) rename_region(ports, dev_name(&cmos_rtc.rtc->dev)); - spin_lock_irq(&rtc_lock); - - /* Ensure that the RTC is accessible. Bit 6 must be 0! */ - if ((CMOS_READ(RTC_VALID) & 0x40) != 0) { - spin_unlock_irq(&rtc_lock); - dev_warn(dev, "not accessible\n"); + if (!mc146818_does_rtc_work()) { + dev_warn(dev, "broken or not accessible\n"); retval = -ENXIO; goto cleanup1; } + spin_lock_irq(&rtc_lock); + if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) { /* force periodic irq to CMOS reset default of 1024Hz; * diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c index d4b72a9fa2ba..ee2efb496174 100644 --- a/drivers/rtc/rtc-da9063.c +++ b/drivers/rtc/rtc-da9063.c @@ -475,12 +475,14 @@ static int da9063_rtc_probe(struct platform_device *pdev) da9063_data_to_tm(data, &rtc->alarm_time, rtc); rtc->rtc_sync = false; - /* - * TODO: some models have alarms on a minute boundary but still support - * real hardware interrupts. Add this once the core supports it. - */ - if (config->rtc_data_start != RTC_SEC) - rtc->rtc_dev->uie_unsupported = 1; + if (config->rtc_data_start != RTC_SEC) { + set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtc_dev->features); + /* + * TODO: some models have alarms on a minute boundary but still + * support real hardware interrupts. + */ + clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtc_dev->features); + } irq_alarm = platform_get_irq_byname(pdev, "ALARM"); if (irq_alarm < 0) @@ -494,6 +496,8 @@ static int da9063_rtc_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n", irq_alarm, ret); + device_init_wakeup(&pdev->dev, true); + return devm_rtc_register_device(rtc->rtc_dev); } diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c index ad3add5db4c8..53bb08fe1cd4 100644 --- a/drivers/rtc/rtc-ftrtc010.c +++ b/drivers/rtc/rtc-ftrtc010.c @@ -141,11 +141,9 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev) } } - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) - return -ENODEV; - - rtc->rtc_irq = res->start; + rtc->rtc_irq = platform_get_irq(pdev, 0); + if (rtc->rtc_irq < 0) + return rtc->rtc_irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) diff --git a/drivers/rtc/rtc-gamecube.c b/drivers/rtc/rtc-gamecube.c new file mode 100644 index 000000000000..f717b36f4738 --- /dev/null +++ b/drivers/rtc/rtc-gamecube.c @@ -0,0 +1,377 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Nintendo GameCube, Wii and Wii U RTC driver + * + * This driver is for the MX23L4005, more specifically its real-time clock and + * SRAM storage. The value returned by the RTC counter must be added with the + * offset stored in a bias register in SRAM (on the GameCube and Wii) or in + * /config/rtc.xml (on the Wii U). The latter being very impractical to access + * from Linux, this driver assumes the bootloader has read it and stored it in + * SRAM like for the other two consoles. + * + * This device sits on a bus named EXI (which is similar to SPI), channel 0, + * device 1. This driver assumes no other user of the EXI bus, which is + * currently the case but would have to be reworked to add support for other + * GameCube hardware exposed on this bus. + * + * References: + * - https://wiiubrew.org/wiki/Hardware/RTC + * - https://wiibrew.org/wiki/MX23L4005 + * + * Copyright (C) 2018 rw-r-r-0644 + * Copyright (C) 2021 Emmanuel Gil Peyrot <linkmauve@linkmauve.fr> + * + * Based on rtc-gcn.c + * Copyright (C) 2004-2009 The GameCube Linux Team + * Copyright (C) 2005,2008,2009 Albert Herranz + * Based on gamecube_time.c from Torben Nielsen. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/rtc.h> +#include <linux/time.h> + +/* EXI registers */ +#define EXICSR 0 +#define EXICR 12 +#define EXIDATA 16 + +/* EXI register values */ +#define EXICSR_DEV 0x380 + #define EXICSR_DEV1 0x100 +#define EXICSR_CLK 0x070 + #define EXICSR_CLK_1MHZ 0x000 + #define EXICSR_CLK_2MHZ 0x010 + #define EXICSR_CLK_4MHZ 0x020 + #define EXICSR_CLK_8MHZ 0x030 + #define EXICSR_CLK_16MHZ 0x040 + #define EXICSR_CLK_32MHZ 0x050 +#define EXICSR_INT 0x008 + #define EXICSR_INTSET 0x008 + +#define EXICR_TSTART 0x001 +#define EXICR_TRSMODE 0x002 + #define EXICR_TRSMODE_IMM 0x000 +#define EXICR_TRSTYPE 0x00C + #define EXICR_TRSTYPE_R 0x000 + #define EXICR_TRSTYPE_W 0x004 +#define EXICR_TLEN 0x030 + #define EXICR_TLEN32 0x030 + +/* EXI registers values to access the RTC */ +#define RTC_EXICSR (EXICSR_DEV1 | EXICSR_CLK_8MHZ | EXICSR_INTSET) +#define RTC_EXICR_W (EXICR_TSTART | EXICR_TRSMODE_IMM | EXICR_TRSTYPE_W | EXICR_TLEN32) +#define RTC_EXICR_R (EXICR_TSTART | EXICR_TRSMODE_IMM | EXICR_TRSTYPE_R | EXICR_TLEN32) +#define RTC_EXIDATA_W 0x80000000 + +/* RTC registers */ +#define RTC_COUNTER 0x200000 +#define RTC_SRAM 0x200001 +#define RTC_SRAM_BIAS 0x200004 +#define RTC_SNAPSHOT 0x204000 +#define RTC_ONTMR 0x210000 +#define RTC_OFFTMR 0x210001 +#define RTC_TEST0 0x210004 +#define RTC_TEST1 0x210005 +#define RTC_TEST2 0x210006 +#define RTC_TEST3 0x210007 +#define RTC_CONTROL0 0x21000c +#define RTC_CONTROL1 0x21000d + +/* RTC flags */ +#define RTC_CONTROL0_UNSTABLE_POWER 0x00000800 +#define RTC_CONTROL0_LOW_BATTERY 0x00000200 + +struct priv { + struct regmap *regmap; + void __iomem *iob; + u32 rtc_bias; +}; + +static int exi_read(void *context, u32 reg, u32 *data) +{ + struct priv *d = (struct priv *)context; + void __iomem *iob = d->iob; + + /* The spin loops here loop about 15~16 times each, so there is no need + * to use a more expensive sleep method. + */ + + /* Write register offset */ + iowrite32be(RTC_EXICSR, iob + EXICSR); + iowrite32be(reg << 8, iob + EXIDATA); + iowrite32be(RTC_EXICR_W, iob + EXICR); + while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET)) + cpu_relax(); + + /* Read data */ + iowrite32be(RTC_EXICSR, iob + EXICSR); + iowrite32be(RTC_EXICR_R, iob + EXICR); + while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET)) + cpu_relax(); + *data = ioread32be(iob + EXIDATA); + + /* Clear channel parameters */ + iowrite32be(0, iob + EXICSR); + + return 0; +} + +static int exi_write(void *context, u32 reg, u32 data) +{ + struct priv *d = (struct priv *)context; + void __iomem *iob = d->iob; + + /* The spin loops here loop about 15~16 times each, so there is no need + * to use a more expensive sleep method. + */ + + /* Write register offset */ + iowrite32be(RTC_EXICSR, iob + EXICSR); + iowrite32be(RTC_EXIDATA_W | (reg << 8), iob + EXIDATA); + iowrite32be(RTC_EXICR_W, iob + EXICR); + while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET)) + cpu_relax(); + + /* Write data */ + iowrite32be(RTC_EXICSR, iob + EXICSR); + iowrite32be(data, iob + EXIDATA); + iowrite32be(RTC_EXICR_W, iob + EXICR); + while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET)) + cpu_relax(); + + /* Clear channel parameters */ + iowrite32be(0, iob + EXICSR); + + return 0; +} + +static const struct regmap_bus exi_bus = { + /* TODO: is that true? Not that it matters here, but still. */ + .fast_io = true, + .reg_read = exi_read, + .reg_write = exi_write, +}; + +static int gamecube_rtc_read_time(struct device *dev, struct rtc_time *t) +{ + struct priv *d = dev_get_drvdata(dev); + int ret; + u32 counter; + time64_t timestamp; + + ret = regmap_read(d->regmap, RTC_COUNTER, &counter); + if (ret) + return ret; + + /* Add the counter and the bias to obtain the timestamp */ + timestamp = (time64_t)d->rtc_bias + counter; + rtc_time64_to_tm(timestamp, t); + + return 0; +} + +static int gamecube_rtc_set_time(struct device *dev, struct rtc_time *t) +{ + struct priv *d = dev_get_drvdata(dev); + time64_t timestamp; + + /* Subtract the timestamp and the bias to obtain the counter value */ + timestamp = rtc_tm_to_time64(t); + return regmap_write(d->regmap, RTC_COUNTER, timestamp - d->rtc_bias); +} + +static int gamecube_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) +{ + struct priv *d = dev_get_drvdata(dev); + int value; + int control0; + int ret; + + switch (cmd) { + case RTC_VL_READ: + ret = regmap_read(d->regmap, RTC_CONTROL0, &control0); + if (ret) + return ret; + + value = 0; + if (control0 & RTC_CONTROL0_UNSTABLE_POWER) + value |= RTC_VL_DATA_INVALID; + if (control0 & RTC_CONTROL0_LOW_BATTERY) + value |= RTC_VL_BACKUP_LOW; + return put_user(value, (unsigned int __user *)arg); + + default: + return -ENOIOCTLCMD; + } +} + +static const struct rtc_class_ops gamecube_rtc_ops = { + .read_time = gamecube_rtc_read_time, + .set_time = gamecube_rtc_set_time, + .ioctl = gamecube_rtc_ioctl, +}; + +static int gamecube_rtc_read_offset_from_sram(struct priv *d) +{ + struct device_node *np; + int ret; + struct resource res; + void __iomem *hw_srnprot; + u32 old; + + np = of_find_compatible_node(NULL, NULL, "nintendo,latte-srnprot"); + if (!np) + np = of_find_compatible_node(NULL, NULL, + "nintendo,hollywood-srnprot"); + if (!np) { + pr_info("HW_SRNPROT not found, assuming a GameCube\n"); + return regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias); + } + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + pr_err("no io memory range found\n"); + return -1; + } + + hw_srnprot = ioremap(res.start, resource_size(&res)); + old = ioread32be(hw_srnprot); + + /* TODO: figure out why we use this magic constant. I obtained it by + * reading the leftover value after boot, after IOSU already ran. + * + * On my Wii U, setting this register to 1 prevents the console from + * rebooting properly, so wiiubrew.org must be missing something. + * + * See https://wiiubrew.org/wiki/Hardware/Latte_registers + */ + if (old != 0x7bf) + iowrite32be(0x7bf, hw_srnprot); + + /* Get the offset from RTC SRAM. + * + * Its default location on the GameCube and on the Wii is in the SRAM, + * while on the Wii U the bootloader needs to fill it with the contents + * of /config/rtc.xml on the SLC (the eMMC). We don’t do that from + * Linux since it requires implementing a proprietary filesystem and do + * file decryption, instead we require the bootloader to fill the same + * SRAM address as on previous consoles. + */ + ret = regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias); + if (ret) { + pr_err("failed to get the RTC bias\n"); + return -1; + } + + /* Reset SRAM access to how it was before, our job here is done. */ + if (old != 0x7bf) + iowrite32be(old, hw_srnprot); + iounmap(hw_srnprot); + + return 0; +} + +static const struct regmap_range rtc_rd_ranges[] = { + regmap_reg_range(0x200000, 0x200010), + regmap_reg_range(0x204000, 0x204000), + regmap_reg_range(0x210000, 0x210001), + regmap_reg_range(0x210004, 0x210007), + regmap_reg_range(0x21000c, 0x21000d), +}; + +static const struct regmap_access_table rtc_rd_regs = { + .yes_ranges = rtc_rd_ranges, + .n_yes_ranges = ARRAY_SIZE(rtc_rd_ranges), +}; + +static const struct regmap_range rtc_wr_ranges[] = { + regmap_reg_range(0x200000, 0x200010), + regmap_reg_range(0x204000, 0x204000), + regmap_reg_range(0x210000, 0x210001), + regmap_reg_range(0x21000d, 0x21000d), +}; + +static const struct regmap_access_table rtc_wr_regs = { + .yes_ranges = rtc_wr_ranges, + .n_yes_ranges = ARRAY_SIZE(rtc_wr_ranges), +}; + +static const struct regmap_config gamecube_rtc_regmap_config = { + .reg_bits = 24, + .val_bits = 32, + .rd_table = &rtc_rd_regs, + .wr_table = &rtc_wr_regs, + .max_register = 0x21000d, + .name = "gamecube-rtc", +}; + +static int gamecube_rtc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rtc_device *rtc; + struct priv *d; + int ret; + + d = devm_kzalloc(dev, sizeof(struct priv), GFP_KERNEL); + if (!d) + return -ENOMEM; + + d->iob = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(d->iob)) + return PTR_ERR(d->iob); + + d->regmap = devm_regmap_init(dev, &exi_bus, d, + &gamecube_rtc_regmap_config); + if (IS_ERR(d->regmap)) + return PTR_ERR(d->regmap); + + ret = gamecube_rtc_read_offset_from_sram(d); + if (ret) + return ret; + dev_dbg(dev, "SRAM bias: 0x%x", d->rtc_bias); + + dev_set_drvdata(dev, d); + + rtc = devm_rtc_allocate_device(dev); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + /* We can represent further than that, but it depends on the stored + * bias and we can’t modify it persistently on all supported consoles, + * so here we pretend to be limited to 2106. + */ + rtc->range_min = 0; + rtc->range_max = U32_MAX; + rtc->ops = &gamecube_rtc_ops; + + devm_rtc_register_device(rtc); + + return 0; +} + +static const struct of_device_id gamecube_rtc_of_match[] = { + {.compatible = "nintendo,latte-exi" }, + {.compatible = "nintendo,hollywood-exi" }, + {.compatible = "nintendo,flipper-exi" }, + { } +}; +MODULE_DEVICE_TABLE(of, gamecube_rtc_of_match); + +static struct platform_driver gamecube_rtc_driver = { + .probe = gamecube_rtc_probe, + .driver = { + .name = "rtc-gamecube", + .of_match_table = gamecube_rtc_of_match, + }, +}; +module_platform_driver(gamecube_rtc_driver); + +MODULE_AUTHOR("Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>"); +MODULE_DESCRIPTION("Nintendo GameCube, Wii and Wii U RTC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index dcfaf09946ee..ae9f131b43c0 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -8,48 +8,100 @@ #include <linux/acpi.h> #endif -unsigned int mc146818_get_time(struct rtc_time *time) +/* + * Execute a function while the UIP (Update-in-progress) bit of the RTC is + * unset. + * + * Warning: callback may be executed more then once. + */ +bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param), + void *param) { - unsigned char ctrl; + int i; unsigned long flags; - unsigned char century = 0; - bool retry; + unsigned char seconds; -#ifdef CONFIG_MACH_DECSTATION - unsigned int real_year; -#endif + for (i = 0; i < 10; i++) { + spin_lock_irqsave(&rtc_lock, flags); -again: - spin_lock_irqsave(&rtc_lock, flags); - /* Ensure that the RTC is accessible. Bit 6 must be 0! */ - if (WARN_ON_ONCE((CMOS_READ(RTC_VALID) & 0x40) != 0)) { - spin_unlock_irqrestore(&rtc_lock, flags); - memset(time, 0xff, sizeof(*time)); - return 0; - } + /* + * Check whether there is an update in progress during which the + * readout is unspecified. The maximum update time is ~2ms. Poll + * every msec for completion. + * + * Store the second value before checking UIP so a long lasting + * NMI which happens to hit after the UIP check cannot make + * an update cycle invisible. + */ + seconds = CMOS_READ(RTC_SECONDS); - /* - * Check whether there is an update in progress during which the - * readout is unspecified. The maximum update time is ~2ms. Poll - * every msec for completion. - * - * Store the second value before checking UIP so a long lasting NMI - * which happens to hit after the UIP check cannot make an update - * cycle invisible. - */ - time->tm_sec = CMOS_READ(RTC_SECONDS); + if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) { + spin_unlock_irqrestore(&rtc_lock, flags); + mdelay(1); + continue; + } - if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) { - spin_unlock_irqrestore(&rtc_lock, flags); - mdelay(1); - goto again; - } + /* Revalidate the above readout */ + if (seconds != CMOS_READ(RTC_SECONDS)) { + spin_unlock_irqrestore(&rtc_lock, flags); + continue; + } - /* Revalidate the above readout */ - if (time->tm_sec != CMOS_READ(RTC_SECONDS)) { + if (callback) + callback(seconds, param); + + /* + * Check for the UIP bit again. If it is set now then + * the above values may contain garbage. + */ + if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) { + spin_unlock_irqrestore(&rtc_lock, flags); + mdelay(1); + continue; + } + + /* + * A NMI might have interrupted the above sequence so check + * whether the seconds value has changed which indicates that + * the NMI took longer than the UIP bit was set. Unlikely, but + * possible and there is also virt... + */ + if (seconds != CMOS_READ(RTC_SECONDS)) { + spin_unlock_irqrestore(&rtc_lock, flags); + continue; + } spin_unlock_irqrestore(&rtc_lock, flags); - goto again; + + return true; } + return false; +} +EXPORT_SYMBOL_GPL(mc146818_avoid_UIP); + +/* + * If the UIP (Update-in-progress) bit of the RTC is set for more then + * 10ms, the RTC is apparently broken or not present. + */ +bool mc146818_does_rtc_work(void) +{ + return mc146818_avoid_UIP(NULL, NULL); +} +EXPORT_SYMBOL_GPL(mc146818_does_rtc_work); + +struct mc146818_get_time_callback_param { + struct rtc_time *time; + unsigned char ctrl; +#ifdef CONFIG_ACPI + unsigned char century; +#endif +#ifdef CONFIG_MACH_DECSTATION + unsigned int real_year; +#endif +}; + +static void mc146818_get_time_callback(unsigned char seconds, void *param_in) +{ + struct mc146818_get_time_callback_param *p = param_in; /* * Only the values that we read from the RTC are set. We leave @@ -57,39 +109,39 @@ again: * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated * by the RTC when initially set to a non-zero value. */ - time->tm_min = CMOS_READ(RTC_MINUTES); - time->tm_hour = CMOS_READ(RTC_HOURS); - time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); - time->tm_mon = CMOS_READ(RTC_MONTH); - time->tm_year = CMOS_READ(RTC_YEAR); + p->time->tm_sec = seconds; + p->time->tm_min = CMOS_READ(RTC_MINUTES); + p->time->tm_hour = CMOS_READ(RTC_HOURS); + p->time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); + p->time->tm_mon = CMOS_READ(RTC_MONTH); + p->time->tm_year = CMOS_READ(RTC_YEAR); #ifdef CONFIG_MACH_DECSTATION - real_year = CMOS_READ(RTC_DEC_YEAR); + p->real_year = CMOS_READ(RTC_DEC_YEAR); #endif #ifdef CONFIG_ACPI if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && - acpi_gbl_FADT.century) - century = CMOS_READ(acpi_gbl_FADT.century); + acpi_gbl_FADT.century) { + p->century = CMOS_READ(acpi_gbl_FADT.century); + } else { + p->century = 0; + } #endif - ctrl = CMOS_READ(RTC_CONTROL); - /* - * Check for the UIP bit again. If it is set now then - * the above values may contain garbage. - */ - retry = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP; - /* - * A NMI might have interrupted the above sequence so check whether - * the seconds value has changed which indicates that the NMI took - * longer than the UIP bit was set. Unlikely, but possible and - * there is also virt... - */ - retry |= time->tm_sec != CMOS_READ(RTC_SECONDS); - spin_unlock_irqrestore(&rtc_lock, flags); + p->ctrl = CMOS_READ(RTC_CONTROL); +} - if (retry) - goto again; +int mc146818_get_time(struct rtc_time *time) +{ + struct mc146818_get_time_callback_param p = { + .time = time + }; + + if (!mc146818_avoid_UIP(mc146818_get_time_callback, &p)) { + memset(time, 0, sizeof(*time)); + return -EIO; + } - if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) + if (!(p.ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { time->tm_sec = bcd2bin(time->tm_sec); time->tm_min = bcd2bin(time->tm_min); @@ -97,15 +149,19 @@ again: time->tm_mday = bcd2bin(time->tm_mday); time->tm_mon = bcd2bin(time->tm_mon); time->tm_year = bcd2bin(time->tm_year); - century = bcd2bin(century); +#ifdef CONFIG_ACPI + p.century = bcd2bin(p.century); +#endif } #ifdef CONFIG_MACH_DECSTATION - time->tm_year += real_year - 72; + time->tm_year += p.real_year - 72; #endif - if (century > 20) - time->tm_year += (century - 19) * 100; +#ifdef CONFIG_ACPI + if (p.century > 19) + time->tm_year += (p.century - 19) * 100; +#endif /* * Account for differences between how the RTC uses the values @@ -116,7 +172,7 @@ again: time->tm_mon--; - return RTC_24H; + return 0; } EXPORT_SYMBOL_GPL(mc146818_get_time); diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index 56c58b055dff..81a5b1f2e68c 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -748,7 +748,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap, /* * Enable timestamp function and store timestamp of first trigger - * event until TSF1 and TFS2 interrupt flags are cleared. + * event until TSF1 and TSF2 interrupt flags are cleared. */ ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_TS_CTRL, PCF2127_BIT_TS_CTRL_TSOFF | diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index 15e50bb10cf0..df2b072c394d 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -514,21 +514,56 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063) } #endif -static const struct pcf85063_config pcf85063tp_config = { - .regmap = { - .reg_bits = 8, - .val_bits = 8, - .max_register = 0x0a, +enum pcf85063_type { + PCF85063, + PCF85063TP, + PCF85063A, + RV8263, + PCF85063_LAST_ID +}; + +static struct pcf85063_config pcf85063_cfg[] = { + [PCF85063] = { + .regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x0a, + }, + }, + [PCF85063TP] = { + .regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x0a, + }, + }, + [PCF85063A] = { + .regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x11, + }, + .has_alarms = 1, + }, + [RV8263] = { + .regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x11, + }, + .has_alarms = 1, + .force_cap_7000 = 1, }, }; +static const struct i2c_device_id pcf85063_ids[]; + static int pcf85063_probe(struct i2c_client *client) { struct pcf85063 *pcf85063; unsigned int tmp; int err; - const struct pcf85063_config *config = &pcf85063tp_config; - const void *data = of_device_get_match_data(&client->dev); + const struct pcf85063_config *config; struct nvmem_config nvmem_cfg = { .name = "pcf85063_nvram", .reg_read = pcf85063_nvmem_read, @@ -544,8 +579,17 @@ static int pcf85063_probe(struct i2c_client *client) if (!pcf85063) return -ENOMEM; - if (data) - config = data; + if (client->dev.of_node) { + config = of_device_get_match_data(&client->dev); + if (!config) + return -ENODEV; + } else { + enum pcf85063_type type = + i2c_match_id(pcf85063_ids, client)->driver_data; + if (type >= PCF85063_LAST_ID) + return -ENODEV; + config = &pcf85063_cfg[type]; + } pcf85063->regmap = devm_regmap_init_i2c(client, &config->regmap); if (IS_ERR(pcf85063->regmap)) @@ -604,31 +648,21 @@ static int pcf85063_probe(struct i2c_client *client) return devm_rtc_register_device(pcf85063->rtc); } -#ifdef CONFIG_OF -static const struct pcf85063_config pcf85063a_config = { - .regmap = { - .reg_bits = 8, - .val_bits = 8, - .max_register = 0x11, - }, - .has_alarms = 1, -}; - -static const struct pcf85063_config rv8263_config = { - .regmap = { - .reg_bits = 8, - .val_bits = 8, - .max_register = 0x11, - }, - .has_alarms = 1, - .force_cap_7000 = 1, +static const struct i2c_device_id pcf85063_ids[] = { + { "pcf85063", PCF85063 }, + { "pcf85063tp", PCF85063TP }, + { "pcf85063a", PCF85063A }, + { "rv8263", RV8263 }, + {} }; +MODULE_DEVICE_TABLE(i2c, pcf85063_ids); +#ifdef CONFIG_OF static const struct of_device_id pcf85063_of_match[] = { - { .compatible = "nxp,pcf85063", .data = &pcf85063tp_config }, - { .compatible = "nxp,pcf85063tp", .data = &pcf85063tp_config }, - { .compatible = "nxp,pcf85063a", .data = &pcf85063a_config }, - { .compatible = "microcrystal,rv8263", .data = &rv8263_config }, + { .compatible = "nxp,pcf85063", .data = &pcf85063_cfg[PCF85063] }, + { .compatible = "nxp,pcf85063tp", .data = &pcf85063_cfg[PCF85063TP] }, + { .compatible = "nxp,pcf85063a", .data = &pcf85063_cfg[PCF85063A] }, + { .compatible = "microcrystal,rv8263", .data = &pcf85063_cfg[RV8263] }, {} }; MODULE_DEVICE_TABLE(of, pcf85063_of_match); @@ -640,6 +674,7 @@ static struct i2c_driver pcf85063_driver = { .of_match_table = of_match_ptr(pcf85063_of_match), }, .probe_new = pcf85063_probe, + .id_table = pcf85063_ids, }; module_i2c_driver(pcf85063_driver); diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c index d2f1d8f754bf..cf8119b6d320 100644 --- a/drivers/rtc/rtc-pxa.c +++ b/drivers/rtc/rtc-pxa.c @@ -330,6 +330,10 @@ static int __init pxa_rtc_probe(struct platform_device *pdev) if (sa1100_rtc->irq_alarm < 0) return -ENXIO; + sa1100_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(sa1100_rtc->rtc)) + return PTR_ERR(sa1100_rtc->rtc); + pxa_rtc->base = devm_ioremap(dev, pxa_rtc->ress->start, resource_size(pxa_rtc->ress)); if (!pxa_rtc->base) { diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c index 80980414890c..cb15983383f5 100644 --- a/drivers/rtc/rtc-rs5c372.c +++ b/drivers/rtc/rtc-rs5c372.c @@ -28,8 +28,10 @@ #define RS5C372_REG_MONTH 5 #define RS5C372_REG_YEAR 6 #define RS5C372_REG_TRIM 7 -# define RS5C372_TRIM_XSL 0x80 +# define RS5C372_TRIM_XSL 0x80 /* only if RS5C372[a|b] */ # define RS5C372_TRIM_MASK 0x7F +# define R2221TL_TRIM_DEV (1 << 7) /* only if R2221TL */ +# define RS5C372_TRIM_DECR (1 << 6) #define RS5C_REG_ALARM_A_MIN 8 /* or ALARM_W */ #define RS5C_REG_ALARM_A_HOURS 9 @@ -324,8 +326,12 @@ static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim) struct rs5c372 *rs5c372 = i2c_get_clientdata(client); u8 tmp = rs5c372->regs[RS5C372_REG_TRIM]; - if (osc) - *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768; + if (osc) { + if (rs5c372->type == rtc_rs5c372a || rs5c372->type == rtc_rs5c372b) + *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768; + else + *osc = 32768; + } if (trim) { dev_dbg(&client->dev, "%s: raw trim=%x\n", __func__, tmp); @@ -485,6 +491,176 @@ static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq) #define rs5c372_rtc_proc NULL #endif +#ifdef CONFIG_RTC_INTF_DEV +static int rs5c372_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) +{ + struct rs5c372 *rs5c = i2c_get_clientdata(to_i2c_client(dev)); + unsigned char ctrl2; + int addr; + unsigned int flags; + + dev_dbg(dev, "%s: cmd=%x\n", __func__, cmd); + + addr = RS5C_ADDR(RS5C_REG_CTRL2); + ctrl2 = i2c_smbus_read_byte_data(rs5c->client, addr); + + switch (cmd) { + case RTC_VL_READ: + flags = 0; + + switch (rs5c->type) { + case rtc_r2025sd: + case rtc_r2221tl: + if ((rs5c->type == rtc_r2025sd && !(ctrl2 & R2x2x_CTRL2_XSTP)) || + (rs5c->type == rtc_r2221tl && (ctrl2 & R2x2x_CTRL2_XSTP))) { + flags |= RTC_VL_DATA_INVALID; + } + if (ctrl2 & R2x2x_CTRL2_VDET) + flags |= RTC_VL_BACKUP_LOW; + break; + default: + if (ctrl2 & RS5C_CTRL2_XSTP) + flags |= RTC_VL_DATA_INVALID; + break; + } + + return put_user(flags, (unsigned int __user *)arg); + case RTC_VL_CLR: + /* clear VDET bit */ + if (rs5c->type == rtc_r2025sd || rs5c->type == rtc_r2221tl) { + ctrl2 &= ~R2x2x_CTRL2_VDET; + if (i2c_smbus_write_byte_data(rs5c->client, addr, ctrl2) < 0) { + dev_dbg(&rs5c->client->dev, "%s: write error in line %i\n", + __func__, __LINE__); + return -EIO; + } + } + return 0; + default: + return -ENOIOCTLCMD; + } + return 0; +} +#else +#define rs5c372_ioctl NULL +#endif + +static int rs5c372_read_offset(struct device *dev, long *offset) +{ + struct rs5c372 *rs5c = i2c_get_clientdata(to_i2c_client(dev)); + u8 val = rs5c->regs[RS5C372_REG_TRIM]; + long ppb_per_step = 0; + bool decr = val & RS5C372_TRIM_DECR; + + switch (rs5c->type) { + case rtc_r2221tl: + ppb_per_step = val & R2221TL_TRIM_DEV ? 1017 : 3051; + break; + case rtc_rs5c372a: + case rtc_rs5c372b: + ppb_per_step = val & RS5C372_TRIM_XSL ? 3125 : 3051; + break; + default: + ppb_per_step = 3051; + break; + } + + /* Only bits[0:5] repsents the time counts */ + val &= 0x3F; + + /* If bits[1:5] are all 0, it means no increment or decrement */ + if (!(val & 0x3E)) { + *offset = 0; + } else { + if (decr) + *offset = -(((~val) & 0x3F) + 1) * ppb_per_step; + else + *offset = (val - 1) * ppb_per_step; + } + + return 0; +} + +static int rs5c372_set_offset(struct device *dev, long offset) +{ + struct rs5c372 *rs5c = i2c_get_clientdata(to_i2c_client(dev)); + int addr = RS5C_ADDR(RS5C372_REG_TRIM); + u8 val = 0; + u8 tmp = 0; + long ppb_per_step = 3051; + long steps = LONG_MIN; + + switch (rs5c->type) { + case rtc_rs5c372a: + case rtc_rs5c372b: + tmp = rs5c->regs[RS5C372_REG_TRIM]; + if (tmp & RS5C372_TRIM_XSL) { + ppb_per_step = 3125; + val |= RS5C372_TRIM_XSL; + } + break; + case rtc_r2221tl: + /* + * Check if it is possible to use high resolution mode (DEV=1). + * In this mode, the minimum resolution is 2 / (32768 * 20 * 3), + * which is about 1017 ppb. + */ + steps = DIV_ROUND_CLOSEST(offset, 1017); + if (steps >= -0x3E && steps <= 0x3E) { + ppb_per_step = 1017; + val |= R2221TL_TRIM_DEV; + } else { + /* + * offset is out of the range of high resolution mode. + * Try to use low resolution mode (DEV=0). In this mode, + * the minimum resolution is 2 / (32768 * 20), which is + * about 3051 ppb. + */ + steps = LONG_MIN; + } + break; + default: + break; + } + + if (steps == LONG_MIN) { + steps = DIV_ROUND_CLOSEST(offset, ppb_per_step); + if (steps > 0x3E || steps < -0x3E) + return -ERANGE; + } + + if (steps > 0) { + val |= steps + 1; + } else { + val |= RS5C372_TRIM_DECR; + val |= (~(-steps - 1)) & 0x3F; + } + + if (!steps || !(val & 0x3E)) { + /* + * if offset is too small, set oscillation adjustment register + * or time trimming register with its default value whic means + * no increment or decrement. But for rs5c372[a|b], the XSL bit + * should be kept unchanged. + */ + if (rs5c->type == rtc_rs5c372a || rs5c->type == rtc_rs5c372b) + val &= RS5C372_TRIM_XSL; + else + val = 0; + } + + dev_dbg(&rs5c->client->dev, "write 0x%x for offset %ld\n", val, offset); + + if (i2c_smbus_write_byte_data(rs5c->client, addr, val) < 0) { + dev_err(&rs5c->client->dev, "failed to write 0x%x to reg %d\n", val, addr); + return -EIO; + } + + rs5c->regs[RS5C372_REG_TRIM] = val; + + return 0; +} + static const struct rtc_class_ops rs5c372_rtc_ops = { .proc = rs5c372_rtc_proc, .read_time = rs5c372_rtc_read_time, @@ -492,6 +668,9 @@ static const struct rtc_class_ops rs5c372_rtc_ops = { .read_alarm = rs5c_read_alarm, .set_alarm = rs5c_set_alarm, .alarm_irq_enable = rs5c_rtc_alarm_irq_enable, + .ioctl = rs5c372_ioctl, + .read_offset = rs5c372_read_offset, + .set_offset = rs5c372_set_offset, }; #if IS_ENABLED(CONFIG_RTC_INTF_SYSFS) diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index 0d5ed38bf60c..f69e0b1137cd 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -55,6 +55,7 @@ enum rv8803_type { rv_8803, + rx_8804, rx_8900 }; @@ -601,6 +602,7 @@ static int rv8803_probe(struct i2c_client *client, static const struct i2c_device_id rv8803_id[] = { { "rv8803", rv_8803 }, + { "rv8804", rx_8804 }, { "rx8803", rv_8803 }, { "rx8900", rx_8900 }, { } @@ -617,6 +619,10 @@ static const __maybe_unused struct of_device_id rv8803_of_match[] = { .data = (void *)rv_8803 }, { + .compatible = "epson,rx8804", + .data = (void *)rx_8804 + }, + { .compatible = "epson,rx8900", .data = (void *)rx_8900 }, diff --git a/drivers/rtc/rtc-sunplus.c b/drivers/rtc/rtc-sunplus.c new file mode 100644 index 000000000000..e8e2ab1103fc --- /dev/null +++ b/drivers/rtc/rtc-sunplus.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * The RTC driver for Sunplus SP7021 + * + * Copyright (C) 2019 Sunplus Technology Inc., All rights reseerved. + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/ktime.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/rtc.h> + +#define RTC_REG_NAME "rtc" + +#define RTC_CTRL 0x40 +#define TIMER_FREEZE_MASK_BIT BIT(5 + 16) +#define TIMER_FREEZE BIT(5) +#define DIS_SYS_RST_RTC_MASK_BIT BIT(4 + 16) +#define DIS_SYS_RST_RTC BIT(4) +#define RTC32K_MODE_RESET_MASK_BIT BIT(3 + 16) +#define RTC32K_MODE_RESET BIT(3) +#define ALARM_EN_OVERDUE_MASK_BIT BIT(2 + 16) +#define ALARM_EN_OVERDUE BIT(2) +#define ALARM_EN_PMC_MASK_BIT BIT(1 + 16) +#define ALARM_EN_PMC BIT(1) +#define ALARM_EN_MASK_BIT BIT(0 + 16) +#define ALARM_EN BIT(0) +#define RTC_TIMER_OUT 0x44 +#define RTC_DIVIDER 0x48 +#define RTC_TIMER_SET 0x4c +#define RTC_ALARM_SET 0x50 +#define RTC_USER_DATA 0x54 +#define RTC_RESET_RECORD 0x58 +#define RTC_BATT_CHARGE_CTRL 0x5c +#define BAT_CHARGE_RSEL_MASK_BIT GENMASK(3 + 16, 2 + 16) +#define BAT_CHARGE_RSEL_MASK GENMASK(3, 2) +#define BAT_CHARGE_RSEL_2K_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 0) +#define BAT_CHARGE_RSEL_250_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 1) +#define BAT_CHARGE_RSEL_50_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 2) +#define BAT_CHARGE_RSEL_0_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 3) +#define BAT_CHARGE_DSEL_MASK_BIT BIT(1 + 16) +#define BAT_CHARGE_DSEL_MASK GENMASK(1, 1) +#define BAT_CHARGE_DSEL_ON FIELD_PREP(BAT_CHARGE_DSEL_MASK, 0) +#define BAT_CHARGE_DSEL_OFF FIELD_PREP(BAT_CHARGE_DSEL_MASK, 1) +#define BAT_CHARGE_EN_MASK_BIT BIT(0 + 16) +#define BAT_CHARGE_EN BIT(0) +#define RTC_TRIM_CTRL 0x60 + +struct sunplus_rtc { + struct rtc_device *rtc; + struct resource *res; + struct clk *rtcclk; + struct reset_control *rstc; + void __iomem *reg_base; + int irq; +}; + +static void sp_get_seconds(struct device *dev, unsigned long *secs) +{ + struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev); + + *secs = (unsigned long)readl(sp_rtc->reg_base + RTC_TIMER_OUT); +} + +static void sp_set_seconds(struct device *dev, unsigned long secs) +{ + struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev); + + writel((u32)secs, sp_rtc->reg_base + RTC_TIMER_SET); +} + +static int sp_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long secs; + + sp_get_seconds(dev, &secs); + rtc_time64_to_tm(secs, tm); + + return 0; +} + +static int sp_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long secs; + + secs = rtc_tm_to_time64(tm); + dev_dbg(dev, "%s, secs = %lu\n", __func__, secs); + sp_set_seconds(dev, secs); + + return 0; +} + +static int sp_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev); + unsigned long alarm_time; + + alarm_time = rtc_tm_to_time64(&alrm->time); + dev_dbg(dev, "%s, alarm_time: %u\n", __func__, (u32)(alarm_time)); + writel((u32)alarm_time, sp_rtc->reg_base + RTC_ALARM_SET); + + return 0; +} + +static int sp_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev); + unsigned int alarm_time; + + alarm_time = readl(sp_rtc->reg_base + RTC_ALARM_SET); + dev_dbg(dev, "%s, alarm_time: %u\n", __func__, alarm_time); + + if (alarm_time == 0) + alrm->enabled = 0; + else + alrm->enabled = 1; + + rtc_time64_to_tm((unsigned long)(alarm_time), &alrm->time); + + return 0; +} + +static int sp_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev); + + if (enabled) + writel((TIMER_FREEZE_MASK_BIT | DIS_SYS_RST_RTC_MASK_BIT | + RTC32K_MODE_RESET_MASK_BIT | ALARM_EN_OVERDUE_MASK_BIT | + ALARM_EN_PMC_MASK_BIT | ALARM_EN_MASK_BIT) | + (DIS_SYS_RST_RTC | ALARM_EN_OVERDUE | ALARM_EN_PMC | ALARM_EN), + sp_rtc->reg_base + RTC_CTRL); + else + writel((ALARM_EN_OVERDUE_MASK_BIT | ALARM_EN_PMC_MASK_BIT | ALARM_EN_MASK_BIT) | + 0x0, sp_rtc->reg_base + RTC_CTRL); + + return 0; +} + +static const struct rtc_class_ops sp_rtc_ops = { + .read_time = sp_rtc_read_time, + .set_time = sp_rtc_set_time, + .set_alarm = sp_rtc_set_alarm, + .read_alarm = sp_rtc_read_alarm, + .alarm_irq_enable = sp_rtc_alarm_irq_enable, +}; + +static irqreturn_t sp_rtc_irq_handler(int irq, void *dev_id) +{ + struct platform_device *plat_dev = dev_id; + struct sunplus_rtc *sp_rtc = dev_get_drvdata(&plat_dev->dev); + + rtc_update_irq(sp_rtc->rtc, 1, RTC_IRQF | RTC_AF); + dev_dbg(&plat_dev->dev, "[RTC] ALARM INT\n"); + + return IRQ_HANDLED; +} + +/* + * ------------------------------------------------------------------------------------- + * bat_charge_rsel bat_charge_dsel bat_charge_en Remarks + * x x 0 Disable + * 0 0 1 0.86mA (2K Ohm with diode) + * 1 0 1 1.81mA (250 Ohm with diode) + * 2 0 1 2.07mA (50 Ohm with diode) + * 3 0 1 16.0mA (0 Ohm with diode) + * 0 1 1 1.36mA (2K Ohm without diode) + * 1 1 1 3.99mA (250 Ohm without diode) + * 2 1 1 4.41mA (50 Ohm without diode) + * 3 1 1 16.0mA (0 Ohm without diode) + * ------------------------------------------------------------------------------------- + */ +static void sp_rtc_set_trickle_charger(struct device dev) +{ + struct sunplus_rtc *sp_rtc = dev_get_drvdata(&dev); + u32 ohms, rsel; + u32 chargeable; + + if (of_property_read_u32(dev.of_node, "trickle-resistor-ohms", &ohms) || + of_property_read_u32(dev.of_node, "aux-voltage-chargeable", &chargeable)) { + dev_warn(&dev, "battery charger disabled\n"); + return; + } + + switch (ohms) { + case 2000: + rsel = BAT_CHARGE_RSEL_2K_OHM; + break; + case 250: + rsel = BAT_CHARGE_RSEL_250_OHM; + break; + case 50: + rsel = BAT_CHARGE_RSEL_50_OHM; + break; + case 0: + rsel = BAT_CHARGE_RSEL_0_OHM; + break; + default: + dev_err(&dev, "invalid charger resistor value (%d)\n", ohms); + return; + } + + writel(BAT_CHARGE_RSEL_MASK_BIT | rsel, sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL); + + switch (chargeable) { + case 0: + writel(BAT_CHARGE_DSEL_MASK_BIT | BAT_CHARGE_DSEL_OFF, + sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL); + break; + case 1: + writel(BAT_CHARGE_DSEL_MASK_BIT | BAT_CHARGE_DSEL_ON, + sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL); + break; + default: + dev_err(&dev, "invalid aux-voltage-chargeable value (%d)\n", chargeable); + return; + } + + writel(BAT_CHARGE_EN_MASK_BIT | BAT_CHARGE_EN, sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL); +} + +static int sp_rtc_probe(struct platform_device *plat_dev) +{ + struct sunplus_rtc *sp_rtc; + int ret; + + sp_rtc = devm_kzalloc(&plat_dev->dev, sizeof(*sp_rtc), GFP_KERNEL); + if (!sp_rtc) + return -ENOMEM; + + sp_rtc->res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, RTC_REG_NAME); + sp_rtc->reg_base = devm_ioremap_resource(&plat_dev->dev, sp_rtc->res); + if (IS_ERR(sp_rtc->reg_base)) + return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->reg_base), + "%s devm_ioremap_resource fail\n", RTC_REG_NAME); + dev_dbg(&plat_dev->dev, "res = 0x%x, reg_base = 0x%lx\n", + sp_rtc->res->start, (unsigned long)sp_rtc->reg_base); + + sp_rtc->irq = platform_get_irq(plat_dev, 0); + if (sp_rtc->irq < 0) + return dev_err_probe(&plat_dev->dev, sp_rtc->irq, "platform_get_irq failed\n"); + + ret = devm_request_irq(&plat_dev->dev, sp_rtc->irq, sp_rtc_irq_handler, + IRQF_TRIGGER_RISING, "rtc irq", plat_dev); + if (ret) + return dev_err_probe(&plat_dev->dev, ret, "devm_request_irq failed:\n"); + + sp_rtc->rtcclk = devm_clk_get(&plat_dev->dev, NULL); + if (IS_ERR(sp_rtc->rtcclk)) + return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->rtcclk), + "devm_clk_get fail\n"); + + sp_rtc->rstc = devm_reset_control_get_exclusive(&plat_dev->dev, NULL); + if (IS_ERR(sp_rtc->rstc)) + return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->rstc), + "failed to retrieve reset controller\n"); + + ret = clk_prepare_enable(sp_rtc->rtcclk); + if (ret) + goto free_clk; + + ret = reset_control_deassert(sp_rtc->rstc); + if (ret) + goto free_reset_assert; + + device_init_wakeup(&plat_dev->dev, 1); + dev_set_drvdata(&plat_dev->dev, sp_rtc); + + sp_rtc->rtc = devm_rtc_allocate_device(&plat_dev->dev); + if (IS_ERR(sp_rtc->rtc)) { + ret = PTR_ERR(sp_rtc->rtc); + goto free_reset_assert; + } + + sp_rtc->rtc->range_max = U32_MAX; + sp_rtc->rtc->range_min = 0; + sp_rtc->rtc->ops = &sp_rtc_ops; + + ret = devm_rtc_register_device(sp_rtc->rtc); + if (ret) + goto free_reset_assert; + + /* Setup trickle charger */ + if (plat_dev->dev.of_node) + sp_rtc_set_trickle_charger(plat_dev->dev); + + /* Keep RTC from system reset */ + writel(DIS_SYS_RST_RTC_MASK_BIT | DIS_SYS_RST_RTC, sp_rtc->reg_base + RTC_CTRL); + + return 0; + +free_reset_assert: + reset_control_assert(sp_rtc->rstc); +free_clk: + clk_disable_unprepare(sp_rtc->rtcclk); + + return ret; +} + +static int sp_rtc_remove(struct platform_device *plat_dev) +{ + struct sunplus_rtc *sp_rtc = dev_get_drvdata(&plat_dev->dev); + + device_init_wakeup(&plat_dev->dev, 0); + reset_control_assert(sp_rtc->rstc); + clk_disable_unprepare(sp_rtc->rtcclk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sp_rtc_suspend(struct device *dev) +{ + struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(sp_rtc->irq); + + return 0; +} + +static int sp_rtc_resume(struct device *dev) +{ + struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + disable_irq_wake(sp_rtc->irq); + + return 0; +} +#endif + +static const struct of_device_id sp_rtc_of_match[] = { + { .compatible = "sunplus,sp7021-rtc" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sp_rtc_of_match); + +static SIMPLE_DEV_PM_OPS(sp_rtc_pm_ops, sp_rtc_suspend, sp_rtc_resume); + +static struct platform_driver sp_rtc_driver = { + .probe = sp_rtc_probe, + .remove = sp_rtc_remove, + .driver = { + .name = "sp7021-rtc", + .of_match_table = sp_rtc_of_match, + .pm = &sp_rtc_pm_ops, + }, +}; +module_platform_driver(sp_rtc_driver); + +MODULE_AUTHOR("Vincent Shih <vincent.sunplus@gmail.com>"); +MODULE_DESCRIPTION("Sunplus RTC driver"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index 564ade03b530..d02eb5b213d0 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c @@ -904,13 +904,11 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc /** * inia100_queue_lck - queue command with host * @cmd: Command block - * @done: Completion function * * Called by the mid layer to queue a command. Process the command * block, build the host specific scb structures and if there is room * queue the command down to the controller */ - static int inia100_queue_lck(struct scsi_cmnd *cmd) { struct orc_scb *scb; diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index dcd6fae65a88..7143418d690f 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -614,7 +614,6 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id) /** * atp870u_queuecommand_lck - Queue SCSI command * @req_p: request block - * @done: completion function * * Queue a command to the ATP queue. Called with the host lock held. */ diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index c8b947c16069..f46989bd083c 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -981,7 +981,7 @@ const struct attribute_group *bfad_im_host_groups[] = { NULL }; -struct attribute *bfad_im_vport_attrs[] = { +static struct attribute *bfad_im_vport_attrs[] = { &dev_attr_serial_number.attr, &dev_attr_model.attr, &dev_attr_model_description.attr, diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index 6fa300daa31e..908854869864 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -239,7 +239,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data) u_char *buffer; int result; - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if(!buffer) return -ENOMEM; @@ -297,7 +297,7 @@ ch_readconfig(scsi_changer *ch) int result,id,lun,i; u_int elem; - buffer = kzalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kzalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -783,7 +783,7 @@ static long ch_ioctl(struct file *file, return -EINVAL; elem = ch->firsts[cge.cge_type] + cge.cge_unit; - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; mutex_lock(&ch->lock); diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 9b8796c9e634..c11916b8ae00 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -946,7 +946,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, * layer, invoke 'done' on completion * * @cmd: pointer to scsi command object - * @done: function pointer to be invoked on completion * * Returns 1 if the adapter (host) is busy, else returns 0. One * reason for an adapter to be busy is that the number @@ -959,7 +958,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave") * and is expected to be held on return. * - **/ + */ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd) { void (*done)(struct scsi_cmnd *) = scsi_done; diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c index b2b61bc45f12..ae62fc3c9ee3 100644 --- a/drivers/scsi/elx/efct/efct_driver.c +++ b/drivers/scsi/elx/efct/efct_driver.c @@ -261,7 +261,7 @@ efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len, dma.size = FW_WRITE_BUFSIZE; dma.virt = dma_alloc_coherent(&efct->pci->dev, - dma.size, &dma.phys, GFP_DMA); + dma.size, &dma.phys, GFP_KERNEL); if (!dma.virt) return -ENOMEM; diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c index ba8256b4c782..d4bb37960a3c 100644 --- a/drivers/scsi/elx/efct/efct_hw.c +++ b/drivers/scsi/elx/efct/efct_hw.c @@ -516,7 +516,7 @@ efct_hw_setup_io(struct efct_hw *hw) dma = &hw->xfer_rdy; dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io; dma->virt = dma_alloc_coherent(&efct->pci->dev, - dma->size, &dma->phys, GFP_DMA); + dma->size, &dma->phys, GFP_KERNEL); if (!dma->virt) return -ENOMEM; } @@ -562,7 +562,7 @@ efct_hw_setup_io(struct efct_hw *hw) sizeof(struct sli4_sge); dma->virt = dma_alloc_coherent(&efct->pci->dev, dma->size, &dma->phys, - GFP_DMA); + GFP_KERNEL); if (!dma->virt) { efc_log_err(hw->os, "dma_alloc fail %d\n", i); memset(&io->def_sgl, 0, @@ -618,7 +618,7 @@ efct_hw_init_prereg_io(struct efct_hw *hw) memset(&req, 0, sizeof(struct efc_dma)); req.size = 32 + sgls_per_request * 16; req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys, - GFP_DMA); + GFP_KERNEL); if (!req.virt) { kfree(sgls); return -ENOMEM; @@ -1063,7 +1063,7 @@ efct_hw_init(struct efct_hw *hw) dma = &hw->loop_map; dma->size = SLI4_MIN_LOOP_MAP_BYTES; dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys, - GFP_DMA); + GFP_KERNEL); if (!dma->virt) return -EIO; @@ -1192,7 +1192,7 @@ efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count, prq->dma.virt = dma_alloc_coherent(&efct->pci->dev, prq->dma.size, &prq->dma.phys, - GFP_DMA); + GFP_KERNEL); if (!prq->dma.virt) { efc_log_err(hw->os, "DMA allocation failed\n"); kfree(rq_buf); diff --git a/drivers/scsi/elx/efct/efct_io.c b/drivers/scsi/elx/efct/efct_io.c index 71e21655916a..c3247b951a76 100644 --- a/drivers/scsi/elx/efct/efct_io.c +++ b/drivers/scsi/elx/efct/efct_io.c @@ -48,7 +48,7 @@ efct_io_pool_create(struct efct *efct, u32 num_sgl) io->rspbuf.size = SCSI_RSP_BUF_LENGTH; io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev, io->rspbuf.size, - &io->rspbuf.phys, GFP_DMA); + &io->rspbuf.phys, GFP_KERNEL); if (!io->rspbuf.virt) { efc_log_err(efct, "dma_alloc rspbuf failed\n"); efct_io_pool_free(io_pool); diff --git a/drivers/scsi/elx/libefc/efc_cmds.c b/drivers/scsi/elx/libefc/efc_cmds.c index f8665d48904a..da4ac8a4ce12 100644 --- a/drivers/scsi/elx/libefc/efc_cmds.c +++ b/drivers/scsi/elx/libefc/efc_cmds.c @@ -179,7 +179,7 @@ efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport) nport->dma.size = EFC_SPARAM_DMA_SZ; nport->dma.virt = dma_alloc_coherent(&efc->pci->dev, nport->dma.size, &nport->dma.phys, - GFP_DMA); + GFP_KERNEL); if (!nport->dma.virt) { efc_log_err(efc, "Failed to allocate DMA memory\n"); efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data); @@ -466,7 +466,7 @@ efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf) domain->dma.size = EFC_SPARAM_DMA_SZ; domain->dma.virt = dma_alloc_coherent(&efc->pci->dev, domain->dma.size, - &domain->dma.phys, GFP_DMA); + &domain->dma.phys, GFP_KERNEL); if (!domain->dma.virt) { efc_log_err(efc, "Failed to allocate DMA memory\n"); return -EIO; diff --git a/drivers/scsi/elx/libefc/efc_els.c b/drivers/scsi/elx/libefc/efc_els.c index 24db0accb256..7bb4f9aad2c8 100644 --- a/drivers/scsi/elx/libefc/efc_els.c +++ b/drivers/scsi/elx/libefc/efc_els.c @@ -71,7 +71,7 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen) /* now allocate DMA for request and response */ els->io.req.size = reqlen; els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size, - &els->io.req.phys, GFP_DMA); + &els->io.req.phys, GFP_KERNEL); if (!els->io.req.virt) { mempool_free(els, efc->els_io_pool); spin_unlock_irqrestore(&node->els_ios_lock, flags); @@ -80,7 +80,7 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen) els->io.rsp.size = rsplen; els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size, - &els->io.rsp.phys, GFP_DMA); + &els->io.rsp.phys, GFP_KERNEL); if (!els->io.rsp.virt) { dma_free_coherent(&efc->pci->dev, els->io.req.size, els->io.req.virt, els->io.req.phys); diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c index 907d67aeac23..3ea57bd6fb0a 100644 --- a/drivers/scsi/elx/libefc_sli/sli4.c +++ b/drivers/scsi/elx/libefc_sli/sli4.c @@ -445,7 +445,7 @@ sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs, dma->size = payload_size; dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size, - &dma->phys, GFP_DMA); + &dma->phys, GFP_KERNEL); if (!dma->virt) return -EIO; @@ -508,7 +508,7 @@ __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype, q->dma.size = size * n_entries; q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size, - &q->dma.phys, GFP_DMA); + &q->dma.phys, GFP_KERNEL); if (!q->dma.virt) { memset(&q->dma, 0, sizeof(struct efc_dma)); efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]); @@ -849,7 +849,7 @@ static int sli_cmd_cq_set_create(struct sli4 *sli4, dma->size = payload_size; dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size, - &dma->phys, GFP_DMA); + &dma->phys, GFP_KERNEL); if (!dma->virt) return -EIO; @@ -4413,7 +4413,7 @@ sli_get_ctrl_attributes(struct sli4 *sli4) psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes); data.size = psize; data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size, - &data.phys, GFP_DMA); + &data.phys, GFP_KERNEL); if (!data.virt) { memset(&data, 0, sizeof(struct efc_dma)); efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n"); @@ -4653,7 +4653,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev, */ sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe); sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size, - &sli4->bmbx.phys, GFP_DMA); + &sli4->bmbx.phys, GFP_KERNEL); if (!sli4->bmbx.virt) { memset(&sli4->bmbx, 0, sizeof(struct efc_dma)); efc_log_err(sli4, "bootstrap mailbox allocation failed\n"); @@ -4674,7 +4674,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev, sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev, sli4->vpd_data.size, &sli4->vpd_data.phys, - GFP_DMA); + GFP_KERNEL); if (!sli4->vpd_data.virt) { memset(&sli4->vpd_data, 0, sizeof(struct efc_dma)); /* Note that failure isn't fatal in this specific case */ @@ -5070,7 +5070,7 @@ sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma, payload_dma->size = payload_size; payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev, payload_dma->size, - &payload_dma->phys, GFP_DMA); + &payload_dma->phys, GFP_KERNEL); if (!payload_dma->virt) { memset(payload_dma, 0, sizeof(struct efc_dma)); efc_log_err(sli4, "mbox payload memory allocation fail\n"); diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 2213a91923a5..15a58c955516 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -8,7 +8,6 @@ #define _HISI_SAS_H_ #include <linux/acpi.h> -#include <linux/async.h> #include <linux/blk-mq.h> #include <linux/blk-mq-pci.h> #include <linux/clk.h> @@ -134,6 +133,11 @@ struct hisi_sas_rst { bool done; }; +struct hisi_sas_internal_abort { + unsigned int flag; + unsigned int tag; +}; + #define HISI_SAS_RST_WORK_INIT(r, c) \ { .hisi_hba = hisi_hba, \ .completion = &c, \ @@ -154,6 +158,7 @@ enum hisi_sas_bit_err_type { enum hisi_sas_phy_event { HISI_PHYE_PHY_UP = 0U, HISI_PHYE_LINK_RESET, + HISI_PHYE_PHY_UP_PM, HISI_PHYES_NUM, }; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index f206c433de32..f46f679fe825 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -158,7 +158,7 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) { void *bitmap = hisi_hba->slot_index_tags; - clear_bit(slot_idx, bitmap); + __clear_bit(slot_idx, bitmap); } static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) @@ -175,7 +175,7 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) { void *bitmap = hisi_hba->slot_index_tags; - set_bit(slot_idx, bitmap); + __set_bit(slot_idx, bitmap); } static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, @@ -206,14 +206,6 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, return index; } -static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) -{ - int i; - - for (i = 0; i < hisi_hba->slot_index_count; ++i) - hisi_sas_slot_index_clear(hisi_hba, i); -} - void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) { @@ -273,11 +265,11 @@ static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, } static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, - struct hisi_sas_slot *slot, - int device_id, int abort_flag, int tag_to_abort) + struct hisi_sas_internal_abort *abort, + struct hisi_sas_slot *slot, int device_id) { hisi_hba->hw->prep_abort(hisi_hba, slot, - device_id, abort_flag, tag_to_abort); + device_id, abort->flag, abort->tag); } static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, @@ -403,95 +395,20 @@ err_out_dif_dma_unmap: return rc; } -static int hisi_sas_task_prep(struct sas_task *task, - struct hisi_sas_dq **dq_pointer, - bool is_tmf, struct hisi_sas_tmf_task *tmf, - int *pass) +static +void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, + struct hisi_sas_dq *dq, + struct hisi_sas_device *sas_dev, + struct hisi_sas_internal_abort *abort, + struct hisi_sas_tmf_task *tmf) { - struct domain_device *device = task->dev; - struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); - struct hisi_sas_device *sas_dev = device->lldd_dev; - struct hisi_sas_port *port; - struct hisi_sas_slot *slot; - struct hisi_sas_cmd_hdr *cmd_hdr_base; - struct asd_sas_port *sas_port = device->port; - struct device *dev = hisi_hba->dev; - int dlvry_queue_slot, dlvry_queue, rc, slot_idx; - int n_elem = 0, n_elem_dif = 0, n_elem_req = 0; - struct scsi_cmnd *scmd = NULL; - struct hisi_sas_dq *dq; + struct hisi_sas_cmd_hdr *cmd_hdr_base; + int dlvry_queue_slot, dlvry_queue; + struct sas_task *task = slot->task; unsigned long flags; int wr_q_index; - if (DEV_IS_GONE(sas_dev)) { - if (sas_dev) - dev_info(dev, "task prep: device %d not ready\n", - sas_dev->device_id); - else - dev_info(dev, "task prep: device %016llx not ready\n", - SAS_ADDR(device->sas_addr)); - - return -ECOMM; - } - - if (task->uldd_task) { - struct ata_queued_cmd *qc; - - if (dev_is_sata(device)) { - qc = task->uldd_task; - scmd = qc->scsicmd; - } else { - scmd = task->uldd_task; - } - } - - if (scmd) { - unsigned int dq_index; - u32 blk_tag; - - blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); - dq_index = blk_mq_unique_tag_to_hwq(blk_tag); - *dq_pointer = dq = &hisi_hba->dq[dq_index]; - } else { - struct Scsi_Host *shost = hisi_hba->shost; - struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; - int queue = qmap->mq_map[raw_smp_processor_id()]; - - *dq_pointer = dq = &hisi_hba->dq[queue]; - } - - port = to_hisi_sas_port(sas_port); - if (port && !port->port_attached) { - dev_info(dev, "task prep: %s port%d not attach device\n", - (dev_is_sata(device)) ? - "SATA/STP" : "SAS", - device->port->id); - - return -ECOMM; - } - - rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, - &n_elem_req); - if (rc < 0) - goto prep_out; - - if (!sas_protocol_ata(task->task_proto)) { - rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); - if (rc < 0) - goto err_out_dma_unmap; - } - - if (hisi_hba->hw->slot_index_alloc) - rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); - else - rc = hisi_sas_slot_index_alloc(hisi_hba, scmd); - - if (rc < 0) - goto err_out_dif_dma_unmap; - - slot_idx = rc; - slot = &hisi_hba->slot_info[slot_idx]; - spin_lock(&dq->lock); wr_q_index = dq->wr_point; dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; @@ -505,16 +422,13 @@ static int hisi_sas_task_prep(struct sas_task *task, dlvry_queue_slot = wr_q_index; slot->device_id = sas_dev->device_id; - slot->n_elem = n_elem; - slot->n_elem_dif = n_elem_dif; slot->dlvry_queue = dlvry_queue; slot->dlvry_queue_slot = dlvry_queue_slot; cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; - slot->task = task; - slot->port = port; + slot->tmf = tmf; - slot->is_internal = is_tmf; + slot->is_internal = tmf; task->lldd_task = slot; memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); @@ -534,8 +448,14 @@ static int hisi_sas_task_prep(struct sas_task *task, case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: hisi_sas_task_prep_ata(hisi_hba, slot); break; + case SAS_PROTOCOL_NONE: + if (abort) { + hisi_sas_task_prep_abort(hisi_hba, abort, slot, sas_dev->device_id); + break; + } + fallthrough; default: - dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", + dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", task->task_proto); break; } @@ -544,32 +464,27 @@ static int hisi_sas_task_prep(struct sas_task *task, task->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&task->task_state_lock, flags); - ++(*pass); WRITE_ONCE(slot->ready, 1); - return 0; - -err_out_dif_dma_unmap: - if (!sas_protocol_ata(task->task_proto)) - hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); -err_out_dma_unmap: - hisi_sas_dma_unmap(hisi_hba, task, n_elem, - n_elem_req); -prep_out: - dev_err(dev, "task prep: failed[%d]!\n", rc); - return rc; + spin_lock(&dq->lock); + hisi_hba->hw->start_delivery(dq); + spin_unlock(&dq->lock); } static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, - bool is_tmf, struct hisi_sas_tmf_task *tmf) + struct hisi_sas_tmf_task *tmf) { - u32 rc; - u32 pass = 0; - struct hisi_hba *hisi_hba; - struct device *dev; + int n_elem = 0, n_elem_dif = 0, n_elem_req = 0; struct domain_device *device = task->dev; struct asd_sas_port *sas_port = device->port; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct scsi_cmnd *scmd = NULL; struct hisi_sas_dq *dq = NULL; + struct hisi_sas_port *port; + struct hisi_hba *hisi_hba; + struct hisi_sas_slot *slot; + struct device *dev; + int rc; if (!sas_port) { struct task_status_struct *ts = &task->task_status; @@ -596,17 +511,94 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, up(&hisi_hba->sem); } - /* protect task_prep and start_delivery sequence */ - rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); - if (rc) - dev_err(dev, "task exec: failed[%d]!\n", rc); + if (DEV_IS_GONE(sas_dev)) { + if (sas_dev) + dev_info(dev, "task prep: device %d not ready\n", + sas_dev->device_id); + else + dev_info(dev, "task prep: device %016llx not ready\n", + SAS_ADDR(device->sas_addr)); + + return -ECOMM; + } + + if (task->uldd_task) { + struct ata_queued_cmd *qc; + + if (dev_is_sata(device)) { + qc = task->uldd_task; + scmd = qc->scsicmd; + } else { + scmd = task->uldd_task; + } + } - if (likely(pass)) { - spin_lock(&dq->lock); - hisi_hba->hw->start_delivery(dq); - spin_unlock(&dq->lock); + if (scmd) { + unsigned int dq_index; + u32 blk_tag; + + blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + dq_index = blk_mq_unique_tag_to_hwq(blk_tag); + dq = &hisi_hba->dq[dq_index]; + } else { + struct Scsi_Host *shost = hisi_hba->shost; + struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + int queue = qmap->mq_map[raw_smp_processor_id()]; + + dq = &hisi_hba->dq[queue]; + } + + port = to_hisi_sas_port(sas_port); + if (port && !port->port_attached) { + dev_info(dev, "task prep: %s port%d not attach device\n", + (dev_is_sata(device)) ? + "SATA/STP" : "SAS", + device->port->id); + + return -ECOMM; } + rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, + &n_elem_req); + if (rc < 0) + goto prep_out; + + if (!sas_protocol_ata(task->task_proto)) { + rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); + if (rc < 0) + goto err_out_dma_unmap; + } + + if (hisi_hba->hw->slot_index_alloc) + rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); + else + rc = hisi_sas_slot_index_alloc(hisi_hba, scmd); + + if (rc < 0) + goto err_out_dif_dma_unmap; + + slot = &hisi_hba->slot_info[rc]; + slot->n_elem = n_elem; + slot->n_elem_dif = n_elem_dif; + slot->task = task; + slot->port = port; + + slot->tmf = tmf; + slot->is_internal = tmf; + + /* protect task_prep and start_delivery sequence */ + hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL, tmf); + + return 0; + +err_out_dif_dma_unmap: + if (!sas_protocol_ata(task->task_proto)) + hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); +err_out_dma_unmap: + hisi_sas_dma_unmap(hisi_hba, task, n_elem, + n_elem_req); +prep_out: + dev_err(dev, "task exec: failed[%d]!\n", rc); return rc; } @@ -619,12 +611,6 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, if (!phy->phy_attached) return; - if (test_bit(HISI_SAS_PM_BIT, &hisi_hba->flags) && - !sas_phy->suspended) { - dev_warn(hisi_hba->dev, "phy%d during suspend filtered out\n", phy_no); - return; - } - sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); if (sas_phy->phy) { @@ -860,10 +846,11 @@ int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) } EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); -static void hisi_sas_phyup_work(struct work_struct *work) +static void hisi_sas_phyup_work_common(struct work_struct *work, + enum hisi_sas_phy_event event) { struct hisi_sas_phy *phy = - container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]); + container_of(work, typeof(*phy), works[event]); struct hisi_hba *hisi_hba = phy->hisi_hba; struct asd_sas_phy *sas_phy = &phy->sas_phy; int phy_no = sas_phy->id; @@ -874,6 +861,11 @@ static void hisi_sas_phyup_work(struct work_struct *work) hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); } +static void hisi_sas_phyup_work(struct work_struct *work) +{ + hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); +} + static void hisi_sas_linkreset_work(struct work_struct *work) { struct hisi_sas_phy *phy = @@ -883,9 +875,21 @@ static void hisi_sas_linkreset_work(struct work_struct *work) hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); } +static void hisi_sas_phyup_pm_work(struct work_struct *work) +{ + struct hisi_sas_phy *phy = + container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = hisi_hba->dev; + + hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); + pm_runtime_put_sync(dev); +} + static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, + [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, }; bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, @@ -917,10 +921,14 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct device *dev = hisi_hba->dev; + unsigned long flags; dev_dbg(dev, "phy%d OOB ready\n", phy_no); - if (phy->phy_attached) + spin_lock_irqsave(&phy->lock, flags); + if (phy->phy_attached) { + spin_unlock_irqrestore(&phy->lock, flags); return; + } if (!timer_pending(&phy->timer)) { if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { @@ -928,13 +936,17 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT; add_timer(&phy->timer); - } else { - dev_warn(dev, "phy%d failed to come up %d times, giving up\n", - phy_no, phy->wait_phyup_cnt); - phy->wait_phyup_cnt = 0; + spin_unlock_irqrestore(&phy->lock, flags); + return; } + + dev_warn(dev, "phy%d failed to come up %d times, giving up\n", + phy_no, phy->wait_phyup_cnt); + phy->wait_phyup_cnt = 0; } + spin_unlock_irqrestore(&phy->lock, flags); } + EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) @@ -1105,7 +1117,7 @@ static void hisi_sas_dev_gone(struct domain_device *device) static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) { - return hisi_sas_task_exec(task, gfp_flags, 0, NULL); + return hisi_sas_task_exec(task, gfp_flags, NULL); } static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, @@ -1156,6 +1168,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, u8 sts = phy->phy_attached; int ret = 0; + down(&hisi_hba->sem); phy->reset_completion = &completion; switch (func) { @@ -1199,6 +1212,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, out: phy->reset_completion = NULL; + up(&hisi_hba->sem); return ret; } @@ -1259,8 +1273,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, task->slow_task->timer.expires = jiffies + TASK_TIMEOUT; add_timer(&task->slow_task->timer); - res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); - + res = hisi_sas_task_exec(task, GFP_KERNEL, tmf); if (res) { del_timer_sync(&task->slow_task->timer); dev_err(dev, "abort tmf: executing internal task failed: %d\n", @@ -1433,11 +1446,13 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) sas_port = device->port; port = to_hisi_sas_port(sas_port); + spin_lock(&sas_port->phy_list_lock); list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) if (state & BIT(sas_phy->id)) { phy = sas_phy->lldd_phy; break; } + spin_unlock(&sas_port->phy_list_lock); if (phy) { port->id = phy->port_id; @@ -1514,22 +1529,25 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, struct ata_link *link; u8 fis[20] = {0}; u32 state; + int i; state = hisi_hba->hw->get_phys_state(hisi_hba); - list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) { + for (i = 0; i < hisi_hba->n_phy; i++) { if (!(state & BIT(sas_phy->id))) continue; + if (!(sas_port->phy_mask & BIT(i))) + continue; ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); - tmf_task.phy_id = sas_phy->id; + tmf_task.phy_id = i; hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); rc = hisi_sas_exec_internal_tmf_task(device, fis, s, &tmf_task); if (rc != TMF_RESP_FUNC_COMPLETE) { dev_err(dev, "phy%d ata reset failed rc=%d\n", - sas_phy->id, rc); + i, rc); break; } } @@ -1581,7 +1599,6 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) { struct Scsi_Host *shost = hisi_hba->shost; - down(&hisi_hba->sem); hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); scsi_block_requests(shost); @@ -1606,9 +1623,9 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) if (hisi_hba->reject_stp_links_msk) hisi_sas_terminate_stp_reject(hisi_hba); hisi_sas_reset_init_all_devices(hisi_hba); - up(&hisi_hba->sem); scsi_unblock_requests(shost); clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); + up(&hisi_hba->sem); hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); } @@ -1619,8 +1636,11 @@ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) if (!hisi_hba->hw->soft_reset) return -1; - if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) + down(&hisi_hba->sem); + if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { + up(&hisi_hba->sem); return -1; + } if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); @@ -2021,19 +2041,17 @@ static int hisi_sas_query_task(struct sas_task *task) static int hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, - struct sas_task *task, int abort_flag, - int task_tag, struct hisi_sas_dq *dq) + struct hisi_sas_internal_abort *abort, + struct sas_task *task, + struct hisi_sas_dq *dq) { struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; struct device *dev = hisi_hba->dev; struct hisi_sas_port *port; - struct hisi_sas_slot *slot; struct asd_sas_port *sas_port = device->port; - struct hisi_sas_cmd_hdr *cmd_hdr_base; - int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; - unsigned long flags; - int wr_q_index; + struct hisi_sas_slot *slot; + int slot_idx; if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) return -EINVAL; @@ -2044,59 +2062,24 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, port = to_hisi_sas_port(sas_port); /* simply get a slot and send abort command */ - rc = hisi_sas_slot_index_alloc(hisi_hba, NULL); - if (rc < 0) + slot_idx = hisi_sas_slot_index_alloc(hisi_hba, NULL); + if (slot_idx < 0) goto err_out; - slot_idx = rc; slot = &hisi_hba->slot_info[slot_idx]; - - spin_lock(&dq->lock); - wr_q_index = dq->wr_point; - dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; - list_add_tail(&slot->delivery, &dq->list); - spin_unlock(&dq->lock); - spin_lock(&sas_dev->lock); - list_add_tail(&slot->entry, &sas_dev->list); - spin_unlock(&sas_dev->lock); - - dlvry_queue = dq->id; - dlvry_queue_slot = wr_q_index; - - slot->device_id = sas_dev->device_id; - slot->n_elem = n_elem; - slot->dlvry_queue = dlvry_queue; - slot->dlvry_queue_slot = dlvry_queue_slot; - cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; - slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; + slot->n_elem = 0; slot->task = task; slot->port = port; slot->is_internal = true; - task->lldd_task = slot; - - memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); - memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); - memset(hisi_sas_status_buf_addr_mem(slot), 0, - sizeof(struct hisi_sas_err_record)); - hisi_sas_task_prep_abort(hisi_hba, slot, device_id, - abort_flag, task_tag); - - spin_lock_irqsave(&task->task_state_lock, flags); - task->task_state_flags |= SAS_TASK_AT_INITIATOR; - spin_unlock_irqrestore(&task->task_state_lock, flags); - WRITE_ONCE(slot->ready, 1); - /* send abort command to the chip */ - spin_lock(&dq->lock); - hisi_hba->hw->start_delivery(dq); - spin_unlock(&dq->lock); + hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort, NULL); return 0; err_out: - dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); + dev_err(dev, "internal abort task prep: failed[%d]!\n", slot_idx); - return rc; + return slot_idx; } /** @@ -2118,9 +2101,12 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, { struct sas_task *task; struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_internal_abort abort = { + .flag = abort_flag, + .tag = tag, + }; struct device *dev = hisi_hba->dev; int res; - /* * The interface is not realized means this HW don't support internal * abort, or don't need to do internal abort. Then here, we return @@ -2138,14 +2124,14 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, return -ENOMEM; task->dev = device; - task->task_proto = device->tproto; + task->task_proto = SAS_PROTOCOL_NONE; task->task_done = hisi_sas_task_done; task->slow_task->timer.function = hisi_sas_tmf_timedout; task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT; add_timer(&task->slow_task->timer); res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, - task, abort_flag, tag, dq); + &abort, task, dq); if (res) { del_timer_sync(&task->slow_task->timer); dev_err(dev, "internal task abort: executing internal task failed: %d\n", @@ -2516,9 +2502,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) if (!hisi_hba->breakpoint) goto err_out; - hisi_hba->slot_index_count = max_command_entries; - s = hisi_hba->slot_index_count / BITS_PER_BYTE; - hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); + s = hisi_hba->slot_index_count = max_command_entries; + hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); if (!hisi_hba->slot_index_tags) goto err_out; @@ -2536,7 +2521,6 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) if (!hisi_hba->sata_breakpoint) goto err_out; - hisi_sas_slot_index_init(hisi_hba); hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 0ef6c21bf081..a45ef9a5e12e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -1484,7 +1484,6 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) struct asd_sas_phy *sas_phy = &phy->sas_phy; struct device *dev = hisi_hba->dev; - del_timer(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); @@ -1561,9 +1560,18 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) } phy->port_id = port_id; - phy->phy_attached = 1; - hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); + + /* Call pm_runtime_put_sync() with pairs in hisi_sas_phyup_pm_work() */ + pm_runtime_get_noresume(dev); + hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM); + res = IRQ_HANDLED; + + spin_lock(&phy->lock); + /* Delete timer and set phy_attached atomically */ + del_timer(&phy->timer); + phy->phy_attached = 1; + spin_unlock(&phy->lock); end: if (phy->reset_completion) complete(phy->reset_completion); @@ -4775,6 +4783,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) scsi_scan_host(shost); + pm_runtime_set_autosuspend_delay(dev, 5000); + pm_runtime_use_autosuspend(dev); /* * For the situation that there are ATA disks connected with SAS * controller, it additionally creates ata_port which will affect the @@ -4848,6 +4858,7 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev) int rc; dev_info(dev, "FLR prepare\n"); + down(&hisi_hba->sem); set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); hisi_sas_controller_reset_prepare(hisi_hba); @@ -4897,6 +4908,8 @@ static int _suspend_v3_hw(struct device *device) if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) return -1; + dev_warn(dev, "entering suspend state\n"); + scsi_block_requests(shost); set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); flush_workqueue(hisi_hba->wq); @@ -4912,11 +4925,11 @@ static int _suspend_v3_hw(struct device *device) hisi_sas_init_mem(hisi_hba); - dev_warn(dev, "entering suspend state\n"); - hisi_sas_release_tasks(hisi_hba); sas_suspend_ha(sha); + + dev_warn(dev, "end of suspending controller\n"); return 0; } @@ -4943,9 +4956,19 @@ static int _resume_v3_hw(struct device *device) return rc; } phys_init_v3_hw(hisi_hba); - sas_resume_ha(sha); + + /* + * If a directly-attached disk is removed during suspend, a deadlock + * may occur, as the PHYE_RESUME_TIMEOUT processing will require the + * hisi_hba->device to be active, which can only happen when resume + * completes. So don't wait for the HA event workqueue to drain upon + * resume. + */ + sas_resume_ha_no_sync(sha); clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); + dev_warn(dev, "end of resuming controller\n"); + return 0; } diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 8049b00b6766..f69b77cbf538 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -61,6 +61,7 @@ static void scsi_host_cls_release(struct device *dev) static struct class shost_class = { .name = "scsi_host", .dev_release = scsi_host_cls_release, + .dev_groups = scsi_shost_groups, }; /** @@ -377,7 +378,7 @@ static struct device_type scsi_host_type = { struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) { struct Scsi_Host *shost; - int index, i, j = 0; + int index; shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL); if (!shost) @@ -483,17 +484,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost->shost_dev.parent = &shost->shost_gendev; shost->shost_dev.class = &shost_class; dev_set_name(&shost->shost_dev, "host%d", shost->host_no); - shost->shost_dev.groups = shost->shost_dev_attr_groups; - shost->shost_dev_attr_groups[j++] = &scsi_shost_attr_group; - if (sht->shost_groups) { - for (i = 0; sht->shost_groups[i] && - j < ARRAY_SIZE(shost->shost_dev_attr_groups); - i++, j++) { - shost->shost_dev_attr_groups[j] = - sht->shost_groups[i]; - } - } - WARN_ON_ONCE(j >= ARRAY_SIZE(shost->shost_dev_attr_groups)); + shost->shost_dev.groups = sht->shost_groups; shost->ehandler = kthread_run(scsi_error_handler, shost, "scsi_eh_%d", shost->host_no); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index cdf3328cc065..a47bcce3c9c7 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -4354,7 +4354,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) int i, ndevs_to_allocate; int raid_ctlr_position; bool physical_device; - DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL); physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); @@ -4368,7 +4367,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) dev_err(&h->pdev->dev, "out of memory\n"); goto out; } - memset(lunzerobits, 0, sizeof(lunzerobits)); h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */ diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index fd6da96bc51a..5f96ac47d7fd 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c @@ -2602,13 +2602,11 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c /** * i91u_queuecommand_lck - Queue a new command if possible * @cmd: SCSI command block from the mid layer - * @done: Completion handler * * Attempts to queue a new command with the host adapter. Will return * zero if successful or indicate a host busy condition if not (which * will cause the mid layer to call us again later with the command) */ - static int i91u_queuecommand_lck(struct scsi_cmnd *cmd) { struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata; @@ -2849,7 +2847,8 @@ static int initio_probe_one(struct pci_dev *pdev, for (; num_scb >= MAX_TARGETS + 3; num_scb--) { i = num_scb * sizeof(struct scsi_ctrl_blk); - if ((scb = kzalloc(i, GFP_DMA)) != NULL) + scb = kzalloc(i, GFP_KERNEL); + if (scb) break; } diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 12e1e36d7c04..758213694091 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -8,7 +8,6 @@ #include <linux/scatterlist.h> #include <linux/slab.h> -#include <linux/async.h> #include <scsi/scsi_host.h> #include <scsi/scsi_eh.h> #include "sas_internal.h" diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c index f703115e7a25..3613b9b315bc 100644 --- a/drivers/scsi/libsas/sas_event.c +++ b/drivers/scsi/libsas/sas_event.c @@ -41,12 +41,25 @@ static int sas_queue_event(int event, struct sas_work *work, return rc; } - -void __sas_drain_work(struct sas_ha_struct *ha) +void sas_queue_deferred_work(struct sas_ha_struct *ha) { struct sas_work *sw, *_sw; int ret; + spin_lock_irq(&ha->lock); + list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { + list_del_init(&sw->drain_node); + ret = sas_queue_work(ha, sw); + if (ret != 1) { + pm_runtime_put(ha->dev); + sas_free_event(to_asd_sas_event(&sw->work)); + } + } + spin_unlock_irq(&ha->lock); +} + +void __sas_drain_work(struct sas_ha_struct *ha) +{ set_bit(SAS_HA_DRAINING, &ha->state); /* flush submitters */ spin_lock_irq(&ha->lock); @@ -55,16 +68,8 @@ void __sas_drain_work(struct sas_ha_struct *ha) drain_workqueue(ha->event_q); drain_workqueue(ha->disco_q); - spin_lock_irq(&ha->lock); clear_bit(SAS_HA_DRAINING, &ha->state); - list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { - list_del_init(&sw->drain_node); - ret = sas_queue_work(ha, sw); - if (ret != 1) - sas_free_event(to_asd_sas_event(&sw->work)); - - } - spin_unlock_irq(&ha->lock); + sas_queue_deferred_work(ha); } int sas_drain_work(struct sas_ha_struct *ha) @@ -104,11 +109,15 @@ void sas_enable_revalidation(struct sas_ha_struct *ha) if (!test_and_clear_bit(ev, &d->pending)) continue; - if (list_empty(&port->phy_list)) + spin_lock(&port->phy_list_lock); + if (list_empty(&port->phy_list)) { + spin_unlock(&port->phy_list_lock); continue; + } sas_phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el); + spin_unlock(&port->phy_list_lock); sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_KERNEL); } @@ -119,19 +128,43 @@ void sas_enable_revalidation(struct sas_ha_struct *ha) static void sas_port_event_worker(struct work_struct *work) { struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + struct sas_ha_struct *ha = phy->ha; sas_port_event_fns[ev->event](work); + pm_runtime_put(ha->dev); sas_free_event(ev); } static void sas_phy_event_worker(struct work_struct *work) { struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + struct sas_ha_struct *ha = phy->ha; sas_phy_event_fns[ev->event](work); + pm_runtime_put(ha->dev); sas_free_event(ev); } +/* defer works of new phys during suspend */ +static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev) +{ + struct sas_ha_struct *ha = phy->ha; + unsigned long flags; + bool deferred = false; + + spin_lock_irqsave(&ha->lock, flags); + if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) { + struct sas_work *sw = &ev->work; + + list_add_tail(&sw->drain_node, &ha->defer_q); + deferred = true; + } + spin_unlock_irqrestore(&ha->lock, flags); + return deferred; +} + int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event, gfp_t gfp_flags) { @@ -145,11 +178,19 @@ int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event, if (!ev) return -ENOMEM; + /* Call pm_runtime_put() with pairs in sas_port_event_worker() */ + pm_runtime_get_noresume(ha->dev); + INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event); + if (sas_defer_event(phy, ev)) + return 0; + ret = sas_queue_event(event, &ev->work, ha); - if (ret != 1) + if (ret != 1) { + pm_runtime_put(ha->dev); sas_free_event(ev); + } return ret; } @@ -168,11 +209,19 @@ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event, if (!ev) return -ENOMEM; + /* Call pm_runtime_put() with pairs in sas_phy_event_worker() */ + pm_runtime_get_noresume(ha->dev); + INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event); + if (sas_defer_event(phy, ev)) + return 0; + ret = sas_queue_event(event, &ev->work, ha); - if (ret != 1) + if (ret != 1) { + pm_runtime_put(ha->dev); sas_free_event(ev); + } return ret; } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index c2150a818423..6abce9dfc17b 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -58,7 +58,9 @@ static int smp_execute_task_sg(struct domain_device *dev, struct sas_task *task = NULL; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); + struct sas_ha_struct *ha = dev->port->ha; + pm_runtime_get_sync(ha->dev); mutex_lock(&dev->ex_dev.cmd_mutex); for (retry = 0; retry < 3; retry++) { if (test_bit(SAS_DEV_GONE, &dev->state)) { @@ -131,6 +133,7 @@ static int smp_execute_task_sg(struct domain_device *dev, } } mutex_unlock(&dev->ex_dev.cmd_mutex); + pm_runtime_put_sync(ha->dev); BUG_ON(retry == 3 && task != NULL); sas_free_task(task); diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index b640e09af6a4..dc35f0f8eae3 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -362,6 +362,7 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha) int i; set_bit(SAS_HA_REGISTERED, &ha->state); + set_bit(SAS_HA_RESUMING, &ha->state); /* clear out any stale link events/data from the suspension path */ for (i = 0; i < ha->num_phys; i++) { @@ -387,7 +388,31 @@ static int phys_suspended(struct sas_ha_struct *ha) return rc; } -void sas_resume_ha(struct sas_ha_struct *ha) +static void sas_resume_insert_broadcast_ha(struct sas_ha_struct *ha) +{ + int i; + + for (i = 0; i < ha->num_phys; i++) { + struct asd_sas_port *port = ha->sas_port[i]; + struct domain_device *dev = port->port_dev; + + if (dev && dev_is_expander(dev->dev_type)) { + struct asd_sas_phy *first_phy; + + spin_lock(&port->phy_list_lock); + first_phy = list_first_entry_or_null( + &port->phy_list, struct asd_sas_phy, + port_phy_el); + spin_unlock(&port->phy_list_lock); + + if (first_phy) + sas_notify_port_event(first_phy, + PORTE_BROADCAST_RCVD, GFP_KERNEL); + } + } +} + +static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain) { const unsigned long tmo = msecs_to_jiffies(25000); int i; @@ -417,10 +442,30 @@ void sas_resume_ha(struct sas_ha_struct *ha) * flush out disks that did not return */ scsi_unblock_requests(ha->core.shost); - sas_drain_work(ha); + if (drain) + sas_drain_work(ha); + clear_bit(SAS_HA_RESUMING, &ha->state); + + sas_queue_deferred_work(ha); + /* send event PORTE_BROADCAST_RCVD to identify some new inserted + * disks for expander + */ + sas_resume_insert_broadcast_ha(ha); +} + +void sas_resume_ha(struct sas_ha_struct *ha) +{ + _sas_resume_ha(ha, true); } EXPORT_SYMBOL(sas_resume_ha); +/* A no-sync variant, which does not call sas_drain_ha(). */ +void sas_resume_ha_no_sync(struct sas_ha_struct *ha) +{ + _sas_resume_ha(ha, false); +} +EXPORT_SYMBOL(sas_resume_ha_no_sync); + void sas_suspend_ha(struct sas_ha_struct *ha) { int i; diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index d7a1fb5c10c6..acd515c01861 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -14,6 +14,7 @@ #include <scsi/scsi_transport_sas.h> #include <scsi/libsas.h> #include <scsi/sas_ata.h> +#include <linux/pm_runtime.h> #ifdef pr_fmt #undef pr_fmt @@ -56,6 +57,7 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha); void sas_disable_revalidation(struct sas_ha_struct *ha); void sas_enable_revalidation(struct sas_ha_struct *ha); +void sas_queue_deferred_work(struct sas_ha_struct *ha); void __sas_drain_work(struct sas_ha_struct *ha); void sas_deform_port(struct asd_sas_phy *phy, int gone); diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index d337fdf1b9ca..fb19e739a39c 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -37,7 +37,8 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task) { struct task_status_struct *ts = &task->task_status; - int hs = 0, stat = 0; + enum scsi_host_status hs = DID_OK; + enum exec_status stat = SAS_SAM_STAT_GOOD; if (ts->resp == SAS_TASK_UNDELIVERED) { /* transport error */ @@ -82,10 +83,10 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task) case SAS_ABORTED_TASK: hs = DID_ABORT; break; - case SAM_STAT_CHECK_CONDITION: + case SAS_SAM_STAT_CHECK_CONDITION: memcpy(sc->sense_buffer, ts->buf, min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size)); - stat = SAM_STAT_CHECK_CONDITION; + stat = SAS_SAM_STAT_CHECK_CONDITION; break; default: stat = ts->stat; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 2f8e6d0a926f..4878c94761f9 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -496,52 +496,50 @@ struct lpfc_cgn_info { __le32 cgn_alarm_hr[24]; __le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS]; - /* Start of congestion statistics */ - uint8_t cgn_stat_npm; /* Notifications per minute */ - - /* Start Time */ - uint8_t cgn_stat_month; - uint8_t cgn_stat_day; - uint8_t cgn_stat_year; - uint8_t cgn_stat_hour; - uint8_t cgn_stat_minute; - uint8_t cgn_pad2[2]; - - __le32 cgn_notification; - __le32 cgn_peer_notification; - __le32 link_integ_notification; - __le32 delivery_notification; - - uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */ - uint8_t cgn_stat_cgn_day; - uint8_t cgn_stat_cgn_year; - uint8_t cgn_stat_cgn_hour; - uint8_t cgn_stat_cgn_min; - uint8_t cgn_stat_cgn_sec; - - uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */ - uint8_t cgn_stat_peer_day; - uint8_t cgn_stat_peer_year; - uint8_t cgn_stat_peer_hour; - uint8_t cgn_stat_peer_min; - uint8_t cgn_stat_peer_sec; - - uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */ - uint8_t cgn_stat_lnk_day; - uint8_t cgn_stat_lnk_year; - uint8_t cgn_stat_lnk_hour; - uint8_t cgn_stat_lnk_min; - uint8_t cgn_stat_lnk_sec; - - uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */ - uint8_t cgn_stat_del_day; - uint8_t cgn_stat_del_year; - uint8_t cgn_stat_del_hour; - uint8_t cgn_stat_del_min; - uint8_t cgn_stat_del_sec; -#define LPFC_CGN_STAT_SIZE 48 -#define LPFC_CGN_DATA_SIZE (sizeof(struct lpfc_cgn_info) - \ - LPFC_CGN_STAT_SIZE - sizeof(uint32_t)) + struct_group(cgn_stat, + uint8_t cgn_stat_npm; /* Notifications per minute */ + + /* Start Time */ + uint8_t cgn_stat_month; + uint8_t cgn_stat_day; + uint8_t cgn_stat_year; + uint8_t cgn_stat_hour; + uint8_t cgn_stat_minute; + uint8_t cgn_pad2[2]; + + __le32 cgn_notification; + __le32 cgn_peer_notification; + __le32 link_integ_notification; + __le32 delivery_notification; + + uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */ + uint8_t cgn_stat_cgn_day; + uint8_t cgn_stat_cgn_year; + uint8_t cgn_stat_cgn_hour; + uint8_t cgn_stat_cgn_min; + uint8_t cgn_stat_cgn_sec; + + uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */ + uint8_t cgn_stat_peer_day; + uint8_t cgn_stat_peer_year; + uint8_t cgn_stat_peer_hour; + uint8_t cgn_stat_peer_min; + uint8_t cgn_stat_peer_sec; + + uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */ + uint8_t cgn_stat_lnk_day; + uint8_t cgn_stat_lnk_year; + uint8_t cgn_stat_lnk_hour; + uint8_t cgn_stat_lnk_min; + uint8_t cgn_stat_lnk_sec; + + uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */ + uint8_t cgn_stat_del_day; + uint8_t cgn_stat_del_year; + uint8_t cgn_stat_del_hour; + uint8_t cgn_stat_del_min; + uint8_t cgn_stat_del_sec; + ); __le32 cgn_info_crc; #define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41 @@ -669,8 +667,6 @@ struct lpfc_vport { struct timer_list els_tmofunc; struct timer_list delayed_disc_tmo; - int unreg_vpi_cmpl; - uint8_t load_flag; #define FC_LOADING 0x1 /* HBA in process of loading drvr */ #define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */ @@ -1023,7 +1019,6 @@ struct lpfc_hba { #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ #define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */ -#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ #define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ #define HBA_FORCED_LINK_SPEED 0x40000 /* * Firmware supports Forced Link Speed @@ -1031,7 +1026,7 @@ struct lpfc_hba { */ #define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */ #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ -#define HBA_CGN_RSVD1 0x200000 /* Reserved CGN flag */ +#define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */ #define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */ #define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ #define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */ @@ -1040,6 +1035,7 @@ struct lpfc_hba { #define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */ #define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */ + struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; @@ -1604,6 +1600,7 @@ struct lpfc_hba { #define LPFC_MAX_RXMONITOR_ENTRY 800 #define LPFC_MAX_RXMONITOR_DUMP 32 struct rxtable_entry { + uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */ uint64_t total_bytes; /* Total no of read bytes requested */ uint64_t rcv_bytes; /* Total no of read bytes completed */ uint64_t avg_io_size; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index dd4c51b6ef4e..7a7f17d71811 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1709,25 +1709,25 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) before_fc_flag = phba->pport->fc_flag; sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn; - /* Disable SR-IOV virtual functions if enabled */ - if (phba->cfg_sriov_nr_virtfn) { - pci_disable_sriov(pdev); - phba->cfg_sriov_nr_virtfn = 0; - } + if (opcode == LPFC_FW_DUMP) { + init_completion(&online_compl); + phba->fw_dump_cmpl = &online_compl; + } else { + /* Disable SR-IOV virtual functions if enabled */ + if (phba->cfg_sriov_nr_virtfn) { + pci_disable_sriov(pdev); + phba->cfg_sriov_nr_virtfn = 0; + } - if (opcode == LPFC_FW_DUMP) - phba->hba_flag |= HBA_FW_DUMP_OP; + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); - status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); + if (status != 0) + return status; - if (status != 0) { - phba->hba_flag &= ~HBA_FW_DUMP_OP; - return status; + /* wait for the device to be quiesced before firmware reset */ + msleep(100); } - /* wait for the device to be quiesced before firmware reset */ - msleep(100); - reg_val = readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); @@ -1756,24 +1756,42 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3153 Fail to perform the requested " "access: x%x\n", reg_val); + if (phba->fw_dump_cmpl) + phba->fw_dump_cmpl = NULL; return rc; } /* keep the original port state */ - if (before_fc_flag & FC_OFFLINE_MODE) - goto out; - - init_completion(&online_compl); - job_posted = lpfc_workq_post_event(phba, &status, &online_compl, - LPFC_EVT_ONLINE); - if (!job_posted) + if (before_fc_flag & FC_OFFLINE_MODE) { + if (phba->fw_dump_cmpl) + phba->fw_dump_cmpl = NULL; goto out; + } - wait_for_completion(&online_compl); + /* Firmware dump will trigger an HA_ERATT event, and + * lpfc_handle_eratt_s4 routine already handles bringing the port back + * online. + */ + if (opcode == LPFC_FW_DUMP) { + wait_for_completion(phba->fw_dump_cmpl); + } else { + init_completion(&online_compl); + job_posted = lpfc_workq_post_event(phba, &status, &online_compl, + LPFC_EVT_ONLINE); + if (!job_posted) + goto out; + wait_for_completion(&online_compl); + } out: /* in any case, restore the virtual functions enabled as before */ if (sriov_nr_virtfn) { + /* If fw_dump was performed, first disable to clean up */ + if (opcode == LPFC_FW_DUMP) { + pci_disable_sriov(pdev); + phba->cfg_sriov_nr_virtfn = 0; + } + sriov_err = lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn); if (!sriov_err) diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 08b2e85dcd7d..30fac2f6fb06 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -5484,7 +5484,7 @@ lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes, if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) { len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, "Truncated . . .\n"); - break; + goto out; } len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, "%03x: %08x %08x %08x %08x " @@ -5495,6 +5495,17 @@ lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes, cnt += 32; ptr += 8; } + if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) { + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Truncated . . .\n"); + goto out; + } + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Parameter Data\n"); + ptr = (uint32_t *)&phba->cgn_p; + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "%08x %08x %08x %08x\n", + *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3)); out: return simple_read_from_buffer(buf, nbytes, ppos, buffer, len); } @@ -5561,22 +5572,24 @@ lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes, start = tail; len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len, - " MaxBPI\t Total Data Cmd Total Data Cmpl " - " Latency(us) Avg IO Size\tMax IO Size IO cnt " - "Info BWutil(ms)\n"); + " MaxBPI Tot_Data_CMF Tot_Data_Cmd " + "Tot_Data_Cmpl Lat(us) Avg_IO Max_IO " + "Bsy IO_cnt Info BWutil(ms)\n"); get_table: for (i = start; i < last; i++) { entry = &phba->rxtable[i]; len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len, - "%3d:%12lld %12lld\t%12lld\t" - "%8lldus\t%8lld\t%10lld " - "%8d %2d %2d(%2d)\n", + "%3d:%12lld %12lld %12lld %12lld " + "%7lldus %8lld %7lld " + "%2d %4d %2d %2d(%2d)\n", i, entry->max_bytes_per_interval, + entry->cmf_bytes, entry->total_bytes, entry->rcv_bytes, entry->avg_io_latency, entry->avg_io_size, entry->max_read_cnt, + entry->cmf_busy, entry->io_cnt, entry->cmf_info, entry->timer_utilization, diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index a5bf71b34972..6dd361c1fd31 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -282,7 +282,7 @@ struct lpfc_idiag { void *ptr_private; }; -#define MAX_DEBUGFS_RX_TABLE_SIZE (100 * LPFC_MAX_RXMONITOR_ENTRY) +#define MAX_DEBUGFS_RX_TABLE_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY) struct lpfc_rx_monitor_debug { char *i_private; char *buffer; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index e83453bea2ae..db5ccae1b63d 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3538,11 +3538,6 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) return 1; } - /* This will cause the callback-function lpfc_cmpl_els_cmd to - * trigger the release of node. - */ - if (!(vport->fc_flag & FC_PT2PT)) - lpfc_nlp_put(ndlp); return 0; } @@ -6899,6 +6894,7 @@ static int lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) { LPFC_MBOXQ_t *mbox = NULL; + struct lpfc_dmabuf *mp; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -6914,8 +6910,11 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) + if (rc == MBX_NOT_FINISHED) { + mp = (struct lpfc_dmabuf *)mbox->ctx_buf; + lpfc_mbuf_free(phba, mp->virt, mp->phys); goto issue_mbox_fail; + } return 0; @@ -10974,10 +10973,19 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_can_disctmo(vport); } + if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { + /* Wake up lpfc_vport_delete if waiting...*/ + if (ndlp->logo_waitq) + wake_up(ndlp->logo_waitq); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); + ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; + spin_unlock_irq(&ndlp->lock); + } + /* Safe to release resources now. */ lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); - vport->unreg_vpi_cmpl = VPORT_ERROR; } /** diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 9fe6e5b386ce..816fc406135b 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -869,10 +869,16 @@ lpfc_work_done(struct lpfc_hba *phba) if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) lpfc_sli4_post_async_mbox(phba); - if (ha_copy & HA_ERATT) + if (ha_copy & HA_ERATT) { /* Handle the error attention event */ lpfc_handle_eratt(phba); + if (phba->fw_dump_cmpl) { + complete(phba->fw_dump_cmpl); + phba->fw_dump_cmpl = NULL; + } + } + if (ha_copy & HA_MBATT) lpfc_sli_handle_mb_event(phba); @@ -3928,7 +3934,6 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport->vpi_state &= ~LPFC_VPI_REGISTERED; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); - vport->unreg_vpi_cmpl = VPORT_OK; mempool_free(pmb, phba->mbox_mem_pool); lpfc_cleanup_vports_rrqs(vport, NULL); /* @@ -3958,7 +3963,6 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1800 Could not issue unreg_vpi\n"); mempool_free(mbox, phba->mbox_mem_pool); - vport->unreg_vpi_cmpl = VPORT_ERROR; return rc; } return 0; diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 634f8fff7425..4461c3d6fc4f 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -3675,19 +3675,26 @@ union sli_var { }; typedef struct { + struct_group_tagged(MAILBOX_word0, bits, + union { + struct { #ifdef __BIG_ENDIAN_BITFIELD - uint16_t mbxStatus; - uint8_t mbxCommand; - uint8_t mbxReserved:6; - uint8_t mbxHc:1; - uint8_t mbxOwner:1; /* Low order bit first word */ + uint16_t mbxStatus; + uint8_t mbxCommand; + uint8_t mbxReserved:6; + uint8_t mbxHc:1; + uint8_t mbxOwner:1; /* Low order bit first word */ #else /* __LITTLE_ENDIAN_BITFIELD */ - uint8_t mbxOwner:1; /* Low order bit first word */ - uint8_t mbxHc:1; - uint8_t mbxReserved:6; - uint8_t mbxCommand; - uint16_t mbxStatus; + uint8_t mbxOwner:1; /* Low order bit first word */ + uint8_t mbxHc:1; + uint8_t mbxReserved:6; + uint8_t mbxCommand; + uint16_t mbxStatus; #endif + }; + u32 word0; + }; + ); MAILVARIANTS un; union sli_var us; @@ -3746,7 +3753,7 @@ typedef struct { #define IOERR_ILLEGAL_COMMAND 0x06 #define IOERR_XCHG_DROPPED 0x07 #define IOERR_ILLEGAL_FIELD 0x08 -#define IOERR_BAD_CONTINUE 0x09 +#define IOERR_RPI_SUSPENDED 0x09 #define IOERR_TOO_MANY_BUFFERS 0x0A #define IOERR_RCV_BUFFER_WAITING 0x0B #define IOERR_NO_CONNECTION 0x0C diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index ba17a8f740a9..a56f01f659f8 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -5373,8 +5373,10 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, */ if (!(phba->hba_flag & HBA_FCOE_MODE)) { rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) + if (rc == MBX_NOT_FINISHED) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); goto out_free_dmabuf; + } return; } /* @@ -5925,7 +5927,7 @@ lpfc_cmf_timer(struct hrtimer *timer) uint32_t io_cnt; uint32_t head, tail; uint32_t busy, max_read; - uint64_t total, rcv, lat, mbpi, extra; + uint64_t total, rcv, lat, mbpi, extra, cnt; int timer_interval = LPFC_CMF_INTERVAL; uint32_t ms; struct lpfc_cgn_stat *cgs; @@ -5996,20 +5998,28 @@ lpfc_cmf_timer(struct hrtimer *timer) /* Calculate any extra bytes needed to account for the * timer accuracy. If we are less than LPFC_CMF_INTERVAL - * add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL - * add an extra 2%. The goal is to equalize total with a - * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1 + * calculate the adjustment needed for total to reflect + * a full LPFC_CMF_INTERVAL. */ - if (ms == LPFC_CMF_INTERVAL) - extra = div_u64(total, 50); - else if (ms < LPFC_CMF_INTERVAL) - extra = div_u64(total, 33); + if (ms && ms < LPFC_CMF_INTERVAL) { + cnt = div_u64(total, ms); /* bytes per ms */ + cnt *= LPFC_CMF_INTERVAL; /* what total should be */ + + /* If the timeout is scheduled to be shorter, + * this value may skew the data, so cap it at mbpi. + */ + if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi) + cnt = mbpi; + + extra = cnt - total; + } lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); } else { /* For Monitor mode or link down we want mbpi * to be the full link speed */ mbpi = phba->cmf_link_byte_count; + extra = 0; } phba->cmf_timer_cnt++; @@ -6040,6 +6050,7 @@ lpfc_cmf_timer(struct hrtimer *timer) LPFC_RXMONITOR_TABLE_IN_USE); entry = &phba->rxtable[head]; entry->total_bytes = total; + entry->cmf_bytes = total + extra; entry->rcv_bytes = rcv; entry->cmf_busy = busy; entry->cmf_info = phba->cmf_active_info; @@ -6082,6 +6093,8 @@ lpfc_cmf_timer(struct hrtimer *timer) /* Each minute save Fabric and Driver congestion information */ lpfc_cgn_save_evt_cnt(phba); + phba->hba_flag &= ~HBA_SHORT_CMF; + /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the * minute, adjust our next timer interval, if needed, to ensure a * 1 minute granularity when we get the next timer interrupt. @@ -6092,6 +6105,8 @@ lpfc_cmf_timer(struct hrtimer *timer) jiffies); if (timer_interval <= 0) timer_interval = LPFC_CMF_INTERVAL; + else + phba->hba_flag |= HBA_SHORT_CMF; /* If we adjust timer_interval, max_bytes_per_interval * needs to be adjusted as well. @@ -6337,8 +6352,10 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) } rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) + if (rc == MBX_NOT_FINISHED) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); goto out_free_dmabuf; + } return; out_free_dmabuf: @@ -12709,7 +12726,7 @@ lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) cpumask_clear(&eqhdl->aff_mask); cpumask_set_cpu(cpu, &eqhdl->aff_mask); irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); - irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); + irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask); } /** @@ -12998,7 +13015,6 @@ cfg_fail_out: for (--index; index >= 0; index--) { eqhdl = lpfc_get_eq_hdl(index); lpfc_irq_clear_aff(eqhdl); - irq_set_affinity_hint(eqhdl->irq, NULL); free_irq(eqhdl->irq, eqhdl); } @@ -13159,7 +13175,6 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba) for (index = 0; index < phba->cfg_irq_chann; index++) { eqhdl = lpfc_get_eq_hdl(index); lpfc_irq_clear_aff(eqhdl); - irq_set_affinity_hint(eqhdl->irq, NULL); free_irq(eqhdl->irq, eqhdl); } } else { @@ -13466,7 +13481,7 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba) phba->cgn_evt_minute = 0; phba->hba_flag &= ~HBA_CGN_DAY_WRAP; - memset(cp, 0xff, LPFC_CGN_DATA_SIZE); + memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat)); cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); cp->cgn_info_version = LPFC_CGN_INFO_V3; @@ -13525,7 +13540,7 @@ lpfc_init_congestion_stat(struct lpfc_hba *phba) return; cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; - memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE); + memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat)); ktime_get_real_ts64(&cmpl_time); time64_to_tm(cmpl_time.tv_sec, 0, &broken); diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 27263f02ab9f..7d717a4ac14d 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -322,6 +322,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; + struct lpfc_dmabuf *mp; uint64_t nlp_portwwn = 0; uint32_t *lp; IOCB_t *icmd; @@ -571,6 +572,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * a default RPI. */ if (phba->sli_rev == LPFC_SLI_REV4) { + mp = (struct lpfc_dmabuf *)login_mbox->ctx_buf; + if (mp) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } mempool_free(login_mbox, phba->mbox_mem_pool); login_mbox = NULL; } else { diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 6ccf573acdec..5a3da38a9067 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -4393,6 +4393,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, if (lpfc_cmd->result == IOERR_INVALID_RPI || lpfc_cmd->result == IOERR_NO_RESOURCES || lpfc_cmd->result == IOERR_ABORT_REQUESTED || + lpfc_cmd->result == IOERR_RPI_SUSPENDED || lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { cmd->result = DID_REQUEUE << 16; break; @@ -4448,10 +4449,11 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "9039 Iodone <%d/%llu> cmd x%px, error " - "x%x SNS x%x x%x Data: x%x x%x\n", + "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n", cmd->device->id, cmd->device->lun, cmd, - cmd->result, *lp, *(lp + 3), cmd->retries, - scsi_get_resid(cmd)); + cmd->result, *lp, *(lp + 3), + (u64)scsi_get_lba(cmd), + cmd->retries, scsi_get_resid(cmd)); } lpfc_update_stats(vport, lpfc_cmd); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 5dedb3de271d..cd26c0f8c281 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4749,7 +4749,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) { uint32_t __iomem *resp_buf; uint32_t __iomem *mbox_buf; - volatile uint32_t mbox; + volatile struct MAILBOX_word0 mbox; uint32_t hc_copy, ha_copy, resp_data; int i; uint8_t hdrtype; @@ -4783,13 +4783,13 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) phba->pport->stopped = 1; } - mbox = 0; - ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; - ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; + mbox.word0 = 0; + mbox.mbxCommand = MBX_KILL_BOARD; + mbox.mbxOwner = OWN_CHIP; writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); mbox_buf = phba->MBslimaddr; - writel(mbox, mbox_buf); + writel(mbox.word0, mbox_buf); for (i = 0; i < 50; i++) { if (lpfc_readl((resp_buf + 1), &resp_data)) @@ -4810,12 +4810,12 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) goto clear_errat; } - ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; + mbox.mbxOwner = OWN_HOST; resp_data = 0; for (i = 0; i < 500; i++) { if (lpfc_readl(resp_buf, &resp_data)) return; - if (resp_data != mbox) + if (resp_data != mbox.word0) mdelay(1); else break; @@ -5046,12 +5046,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) phba->fcf.fcf_flag = 0; spin_unlock_irq(&phba->hbalock); - /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ - if (phba->hba_flag & HBA_FW_DUMP_OP) { - phba->hba_flag &= ~HBA_FW_DUMP_OP; - return rc; - } - /* Now physically reset the device */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0389 Performing PCI function reset!\n"); @@ -5091,9 +5085,8 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) static int lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) { - MAILBOX_t *mb; + volatile struct MAILBOX_word0 mb; struct lpfc_sli *psli; - volatile uint32_t word0; void __iomem *to_slim; uint32_t hba_aer_enabled; @@ -5110,24 +5103,23 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) (phba->pport) ? phba->pport->port_state : 0, psli->sli_flag); - word0 = 0; - mb = (MAILBOX_t *) &word0; - mb->mbxCommand = MBX_RESTART; - mb->mbxHc = 1; + mb.word0 = 0; + mb.mbxCommand = MBX_RESTART; + mb.mbxHc = 1; lpfc_reset_barrier(phba); to_slim = phba->MBslimaddr; - writel(*(uint32_t *) mb, to_slim); + writel(mb.word0, to_slim); readl(to_slim); /* flush */ /* Only skip post after fc_ffinit is completed */ if (phba->pport && phba->pport->port_state) - word0 = 1; /* This is really setting up word1 */ + mb.word0 = 1; /* This is really setting up word1 */ else - word0 = 0; /* This is really setting up word1 */ + mb.word0 = 0; /* This is really setting up word1 */ to_slim = phba->MBslimaddr + sizeof (uint32_t); - writel(*(uint32_t *) mb, to_slim); + writel(mb.word0, to_slim); readl(to_slim); /* flush */ lpfc_sli_brdreset(phba); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 5a4d3b24fbce..2e9348a6897c 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.0.0.3" +#define LPFC_DRIVER_VERSION "14.0.0.4" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index da9a1f72d938..d694d0cff5a5 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -486,22 +486,67 @@ error_out: } static int +lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + int rc; + struct lpfc_hba *phba = vport->phba; + + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); + + spin_lock_irq(&ndlp->lock); + if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) && + !ndlp->logo_waitq) { + ndlp->logo_waitq = &waitq; + ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag |= NLP_ISSUE_LOGO; + ndlp->save_flags |= NLP_WAIT_FOR_LOGO; + } + spin_unlock_irq(&ndlp->lock); + rc = lpfc_issue_els_npiv_logo(vport, ndlp); + if (!rc) { + wait_event_timeout(waitq, + (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)), + msecs_to_jiffies(phba->fc_ratov * 2000)); + + if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)) + goto logo_cmpl; + /* LOGO wait failed. Correct status. */ + rc = -EINTR; + } else { + rc = -EIO; + } + + /* Error - clean up node flags. */ + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; + ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; + spin_unlock_irq(&ndlp->lock); + + logo_cmpl: + lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, + "1824 Issue LOGO completes with status %d\n", + rc); + spin_lock_irq(&ndlp->lock); + ndlp->logo_waitq = NULL; + spin_unlock_irq(&ndlp->lock); + return rc; +} + +static int disable_vport(struct fc_vport *fc_vport) { struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; - long timeout; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + /* Can't disable during an outstanding delete. */ + if (vport->load_flag & FC_UNLOADING) + return 0; + ndlp = lpfc_findnode_did(vport, Fabric_DID); - if (ndlp && phba->link_state >= LPFC_LINK_UP) { - vport->unreg_vpi_cmpl = VPORT_INVAL; - timeout = msecs_to_jiffies(phba->fc_ratov * 2000); - if (!lpfc_issue_els_npiv_logo(vport, ndlp)) - while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) - timeout = schedule_timeout(timeout); - } + if (ndlp && phba->link_state >= LPFC_LINK_UP) + (void)lpfc_send_npiv_logo(vport, ndlp); lpfc_sli_host_down(vport); @@ -600,7 +645,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; - long timeout; + int rc; if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, @@ -665,15 +710,14 @@ lpfc_vport_delete(struct fc_vport *fc_vport) phba->fc_topology != LPFC_TOPOLOGY_LOOP) { if (vport->cfg_enable_da_id) { /* Send DA_ID and wait for a completion. */ - timeout = msecs_to_jiffies(phba->fc_ratov * 2000); - if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0)) - while (vport->ct_flags && timeout) - timeout = schedule_timeout(timeout); - else + rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0); + if (rc) { lpfc_printf_log(vport->phba, KERN_WARNING, LOG_VPORT, "1829 CT command failed to " - "delete objects on fabric\n"); + "delete objects on fabric, " + "rc %d\n", rc); + } } /* @@ -688,11 +732,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport) ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) goto skip_logo; - vport->unreg_vpi_cmpl = VPORT_INVAL; - timeout = msecs_to_jiffies(phba->fc_ratov * 2000); - if (!lpfc_issue_els_npiv_logo(vport, ndlp)) - while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) - timeout = schedule_timeout(timeout); + + rc = lpfc_send_npiv_logo(vport, ndlp); + if (rc) + goto skip_logo; } if (!(phba->pport->load_flag & FC_UNLOADING)) diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 14f930d27ca1..2a339d4a7e9d 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -1431,7 +1431,6 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb) /** * megaraid_queue_command_lck - generic queue entry point for all LLDs * @scp : pointer to the scsi command to be executed - * @done : callback routine to be called after the cmd has be completed * * Queue entry point for mailbox based controllers. */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index aeb95f409826..82e1e24257bc 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5720,7 +5720,7 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) "Failed to register IRQ for vector %d.\n", i); for (j = 0; j < i; j++) { if (j < instance->low_latency_index_start) - irq_set_affinity_hint( + irq_update_affinity_hint( pci_irq_vector(pdev, j), NULL); free_irq(pci_irq_vector(pdev, j), &instance->irq_context[j]); @@ -5763,7 +5763,7 @@ megasas_destroy_irqs(struct megasas_instance *instance) { if (instance->msix_vectors) for (i = 0; i < instance->msix_vectors; i++) { if (i < instance->low_latency_index_start) - irq_set_affinity_hint( + irq_update_affinity_hint( pci_irq_vector(instance->pdev, i), NULL); free_irq(pci_irq_vector(instance->pdev, i), &instance->irq_context[i]); @@ -5894,22 +5894,25 @@ int megasas_get_device_list(struct megasas_instance *instance) } /** - * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues - * @instance: Adapter soft state - * return: void + * megasas_set_high_iops_queue_affinity_and_hint - Set affinity and hint + * for high IOPS queues + * @instance: Adapter soft state + * return: void */ static inline void -megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance) +megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance) { int i; - int local_numa_node; + unsigned int irq; + const struct cpumask *mask; if (instance->perf_mode == MR_BALANCED_PERF_MODE) { - local_numa_node = dev_to_node(&instance->pdev->dev); + mask = cpumask_of_node(dev_to_node(&instance->pdev->dev)); - for (i = 0; i < instance->low_latency_index_start; i++) - irq_set_affinity_hint(pci_irq_vector(instance->pdev, i), - cpumask_of_node(local_numa_node)); + for (i = 0; i < instance->low_latency_index_start; i++) { + irq = pci_irq_vector(instance->pdev, i); + irq_set_affinity_and_hint(irq, mask); + } } } @@ -5998,7 +6001,7 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance) instance->msix_vectors = 0; if (instance->smp_affinity_enable) - megasas_set_high_iops_queue_affinity_hint(instance); + megasas_set_high_iops_queue_affinity_and_hint(instance); } /** diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h index d43bbecef651..5e1f6ced0e71 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h @@ -8,7 +8,7 @@ #define MPI3_CONFIG_PAGETYPE_IO_UNIT (0x00) #define MPI3_CONFIG_PAGETYPE_MANUFACTURING (0x01) #define MPI3_CONFIG_PAGETYPE_IOC (0x02) -#define MPI3_CONFIG_PAGETYPE_UEFI_BSD (0x03) +#define MPI3_CONFIG_PAGETYPE_DRIVER (0x03) #define MPI3_CONFIG_PAGETYPE_SECURITY (0x04) #define MPI3_CONFIG_PAGETYPE_ENCLOSURE (0x11) #define MPI3_CONFIG_PAGETYPE_DEVICE (0x12) @@ -181,8 +181,17 @@ struct mpi3_config_page_header { #define MPI3_SAS_HWRATE_MIN_RATE_6_0 (0x0a) #define MPI3_SAS_HWRATE_MIN_RATE_12_0 (0x0b) #define MPI3_SAS_HWRATE_MIN_RATE_22_5 (0x0c) -#define MPI3_SLOT_INVALID (0xffff) -#define MPI3_SLOT_INDEX_INVALID (0xffff) +#define MPI3_SLOT_INVALID (0xffff) +#define MPI3_SLOT_INDEX_INVALID (0xffff) +#define MPI3_LINK_CHANGE_COUNT_INVALID (0xffff) +#define MPI3_RATE_CHANGE_COUNT_INVALID (0xffff) +#define MPI3_TEMP_SENSOR_LOCATION_INTERNAL (0x0) +#define MPI3_TEMP_SENSOR_LOCATION_INLET (0x1) +#define MPI3_TEMP_SENSOR_LOCATION_OUTLET (0x2) +#define MPI3_TEMP_SENSOR_LOCATION_DRAM (0x3) +#define MPI3_MFGPAGE_VENDORID_BROADCOM (0x1000) +#define MPI3_MFGPAGE_DEVID_SAS4116 (0x00a5) +#define MPI3_MFGPAGE_DEVID_SAS4016 (0x00a7) struct mpi3_man_page0 { struct mpi3_config_page_header header; u8 chip_revision[8]; @@ -195,7 +204,7 @@ struct mpi3_man_page0 { __le32 reserved98; u8 oem; u8 sub_oem; - __le16 reserved9e; + __le16 flags; u8 board_mfg_day; u8 board_mfg_month; __le16 board_mfg_year; @@ -208,6 +217,8 @@ struct mpi3_man_page0 { }; #define MPI3_MAN0_PAGEVERSION (0x00) +#define MPI3_MAN0_FLAGS_SWITCH_PRESENT (0x0002) +#define MPI3_MAN0_FLAGS_EXPANDER_PRESENT (0x0001) #define MPI3_MAN1_VPD_SIZE (512) struct mpi3_man_page1 { struct mpi3_config_page_header header; @@ -236,7 +247,7 @@ struct mpi3_man_page5 { #define MPI3_MAN5_PAGEVERSION (0x00) struct mpi3_man6_gpio_entry { u8 function_code; - u8 reserved01; + u8 function_flags; __le16 flags; u8 param1; u8 param2; @@ -253,7 +264,6 @@ struct mpi3_man6_gpio_entry { #define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_YELLOW (0x06) #define MPI3_MAN6_GPIO_FUNCTION_CABLE_MANAGEMENT (0x07) #define MPI3_MAN6_GPIO_FUNCTION_BKPLANE_MGMT_TYPE (0x08) -#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_MUX_RESET (0x09) #define MPI3_MAN6_GPIO_FUNCTION_ISTWI_RESET (0x0a) #define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET (0x0b) #define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_FAULT (0x0c) @@ -263,6 +273,10 @@ struct mpi3_man6_gpio_entry { #define MPI3_MAN6_GPIO_FUNCTION_CTRL_TYPE (0x10) #define MPI3_MAN6_GPIO_FUNCTION_LICENSE (0x11) #define MPI3_MAN6_GPIO_FUNCTION_REFCLK_CONTROL (0x12) +#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET_CLAMP (0x13) +#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_MASK (0x01) +#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_ISTWI (0x00) +#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_RECEPTACLEID (0x01) #define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_MASK (0xf0) #define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00) #define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10) @@ -275,8 +289,6 @@ struct mpi3_man6_gpio_entry { #define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00) #define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_ACTIVE_CABLE_ENABLE (0x01) #define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_CABLE_MGMT_ENABLE (0x02) -#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_SPEC_MUX (0x00) -#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_ALL_MUXES (0x01) #define MPI3_MAN6_GPIO_LICENSE_PARAM1_TYPE_IBUTTON (0x00) #define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_MASK (0x0100) #define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_FAST_EDGE (0x0100) @@ -353,6 +365,7 @@ struct mpi3_man8_phy_info { __le32 reserved0c; }; +#define MPI3_MAN8_PHY_INFO_RECEPTACLE_ID_HOST_PHY (0xff) #ifndef MPI3_MAN8_PHY_INFO_MAX #define MPI3_MAN8_PHY_INFO_MAX (1) #endif @@ -373,20 +386,22 @@ struct mpi3_man9_rsrc_entry { }; enum mpi3_man9_resources { - MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0, - MPI3_MAN9_RSRC_TARGET_CMDS = 1, - MPI3_MAN9_RSRC_SAS_TARGETS = 2, - MPI3_MAN9_RSRC_PCIE_TARGETS = 3, - MPI3_MAN9_RSRC_INITIATORS = 4, - MPI3_MAN9_RSRC_VDS = 5, - MPI3_MAN9_RSRC_ENCLOSURES = 6, - MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7, - MPI3_MAN9_RSRC_EXPANDERS = 8, - MPI3_MAN9_RSRC_PCIE_SWITCHES = 9, - MPI3_MAN9_RSRC_PDS = 10, - MPI3_MAN9_RSRC_HOST_PDS = 11, - MPI3_MAN9_RSRC_ADV_HOST_PDS = 12, - MPI3_MAN9_RSRC_RAID_PDS = 13, + MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0, + MPI3_MAN9_RSRC_TARGET_CMDS = 1, + MPI3_MAN9_RSRC_RESERVED02 = 2, + MPI3_MAN9_RSRC_NVME = 3, + MPI3_MAN9_RSRC_INITIATORS = 4, + MPI3_MAN9_RSRC_VDS = 5, + MPI3_MAN9_RSRC_ENCLOSURES = 6, + MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7, + MPI3_MAN9_RSRC_EXPANDERS = 8, + MPI3_MAN9_RSRC_PCIE_SWITCHES = 9, + MPI3_MAN9_RSRC_RESERVED10 = 10, + MPI3_MAN9_RSRC_HOST_PD_DRIVES = 11, + MPI3_MAN9_RSRC_ADV_HOST_PD_DRIVES = 12, + MPI3_MAN9_RSRC_RAID_PD_DRIVES = 13, + MPI3_MAN9_RSRC_DRV_DIAG_BUF = 14, + MPI3_MAN9_RSRC_NAMESPACE_COUNT = 15, MPI3_MAN9_RSRC_NUM_RESOURCES }; @@ -402,6 +417,7 @@ enum mpi3_man9_resources { #define MPI3_MAN9_MIN_ENCLOSURES (0) #define MPI3_MAN9_MAX_ENCLOSURES (65535) #define MPI3_MAN9_MIN_ENCLOSURE_PHYS (0) +#define MPI3_MAN9_MIN_NAMESPACE_COUNT (1) #define MPI3_MAN9_MIN_EXPANDERS (0) #define MPI3_MAN9_MAX_EXPANDERS (65535) #define MPI3_MAN9_MIN_PCIE_SWITCHES (0) @@ -422,9 +438,14 @@ struct mpi3_man_page9 { struct mpi3_man10_istwi_ctrlr_entry { __le16 slave_address; __le16 flags; - __le32 reserved04; + u8 scl_low_override; + u8 scl_high_override; + __le16 reserved06; }; +#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_MASK (0x000c) +#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_100K (0x0000) +#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_400K (0x0004) #define MPI3_MAN10_ISTWI_CTRLR_FLAGS_SLAVE_ENABLED (0x0002) #define MPI3_MAN10_ISTWI_CTRLR_FLAGS_MASTER_ENABLED (0x0001) #ifndef MPI3_MAN10_ISTWI_CTRLR_MAX @@ -451,10 +472,13 @@ struct mpi3_man11_temp_sensor_device_format { u8 temp_channel[4]; }; -#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00) -#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01) -#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02) -#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01) +#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00) +#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01) +#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02) +#define MPI3_MAN11_TEMP_SENSOR_TYPE_SE97B (0x03) +#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_MASK (0xe0) +#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_SHIFT (5) +#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01) struct mpi3_man11_seeprom_device_format { u8 size; u8 page_write_size; @@ -495,31 +519,40 @@ struct mpi3_man11_bkplane_spec_ubm_format { #define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_SHIFT (4) #define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f) #define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_SHIFT (0) -struct mpi3_man11_bkplane_spec_vpp_format { +struct mpi3_man11_bkplane_spec_non_ubm_format { __le16 flags; - __le16 reserved02; + u8 reserved02; + u8 type; }; -#define MPI3_MAN11_BKPLANE_VPP_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0040) -#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_MASK (0x0030) -#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_GPIO (0x0000) -#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_REG (0x0010) -#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_MASK (0x000f) -#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_SHIFT (0) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_MASK (0xf000) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_SHIFT (12) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_MASK (0x0030) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_GPIO (0x0000) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_REG (0x0010) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_SHIFT (0) +#define MPI3_MAN11_BKPLANE_NON_UBM_TYPE_VPP (0x00) union mpi3_man11_bkplane_spec_format { - struct mpi3_man11_bkplane_spec_ubm_format ubm; - struct mpi3_man11_bkplane_spec_vpp_format vpp; + struct mpi3_man11_bkplane_spec_ubm_format ubm; + struct mpi3_man11_bkplane_spec_non_ubm_format non_ubm; }; struct mpi3_man11_bkplane_mgmt_device_format { u8 type; u8 receptacle_id; - __le16 reserved02; + u8 reset_info; + u8 reserved03; union mpi3_man11_bkplane_spec_format backplane_mgmt_specific; }; #define MPI3_MAN11_BKPLANE_MGMT_TYPE_UBM (0x00) -#define MPI3_MAN11_BKPLANE_MGMT_TYPE_VPP (0x01) +#define MPI3_MAN11_BKPLANE_MGMT_TYPE_NON_UBM (0x01) +#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_MASK (0xf0) +#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_SHIFT (4) +#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_MASK (0x0f) +#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_SHIFT (0) struct mpi3_man11_gas_gauge_device_format { u8 type; u8 reserved01[3]; @@ -527,6 +560,11 @@ struct mpi3_man11_gas_gauge_device_format { }; #define MPI3_MAN11_GAS_GAUGE_TYPE_STANDARD (0x00) +struct mpi3_man11_mgmt_ctrlr_device_format { + __le32 reserved00; + __le32 reserved04; +}; + union mpi3_man11_device_specific_format { struct mpi3_man11_mux_device_format mux; struct mpi3_man11_temp_sensor_device_format temp_sensor; @@ -535,6 +573,7 @@ union mpi3_man11_device_specific_format { struct mpi3_man11_cable_mgmt_device_format cable_mgmt; struct mpi3_man11_bkplane_mgmt_device_format bkplane_mgmt; struct mpi3_man11_gas_gauge_device_format gas_gauge; + struct mpi3_man11_mgmt_ctrlr_device_format mgmt_controller; __le32 words[2]; }; @@ -556,10 +595,8 @@ struct mpi3_man11_istwi_device_format { #define MPI3_MAN11_ISTWI_DEVTYPE_CABLE_MGMT (0x04) #define MPI3_MAN11_ISTWI_DEVTYPE_BACKPLANE_MGMT (0x05) #define MPI3_MAN11_ISTWI_DEVTYPE_GAS_GAUGE (0x06) +#define MPI3_MAN11_ISTWI_DEVTYPE_MGMT_CONTROLLER (0x07) #define MPI3_MAN11_ISTWI_FLAGS_MUX_PRESENT (0x01) -#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_MASK (0x06) -#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_100KHZ (0x00) -#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_400KHZ (0x02) #ifndef MPI3_MAN11_ISTWI_DEVICE_MAX #define MPI3_MAN11_ISTWI_DEVICE_MAX (1) #endif @@ -692,8 +729,8 @@ struct mpi3_man_page14 { #define MPI3_MAN14_FLAGS_AUTH_SESSION_REQ (0x01) #define MPI3_MAN14_FLAGS_AUTH_API_MASK (0x0e) #define MPI3_MAN14_FLAGS_AUTH_API_NONE (0x00) -#define MPI3_MAN14_FLAGS_AUTH_API_CEREBUS (0x02) -#define MPI3_MAN14_FLAGS_AUTH_API_DMTF_PMCI (0x04) +#define MPI3_MAN14_FLAGS_AUTH_API_CERBERUS (0x02) +#define MPI3_MAN14_FLAGS_AUTH_API_SPDM (0x04) #ifndef MPI3_MAN15_VERSION_RECORD_MAX #define MPI3_MAN15_VERSION_RECORD_MAX 1 #endif @@ -808,7 +845,7 @@ struct mpi3_io_unit_page1 { struct mpi3_config_page_header header; __le32 flags; u8 dmd_io_delay; - u8 dmd_report_pc_ie; + u8 dmd_report_pcie; u8 dmd_report_sata; u8 dmd_report_sas; }; @@ -844,26 +881,30 @@ struct mpi3_io_unit_page2 { #define MPI3_IOUNIT2_GPIO_SETTING_ON (0x0001) struct mpi3_io_unit3_sensor { __le16 flags; - __le16 reserved02; - __le16 threshold[4]; + u8 threshold_margin; + u8 reserved03; + __le16 threshold[3]; + __le16 reserved0a; __le32 reserved0c; __le32 reserved10; __le32 reserved14; }; -#define MPI3_IOUNIT3_SENSOR_FLAGS_T3_ENABLE (0x0008) -#define MPI3_IOUNIT3_SENSOR_FLAGS_T2_ENABLE (0x0004) -#define MPI3_IOUNIT3_SENSOR_FLAGS_T1_ENABLE (0x0002) -#define MPI3_IOUNIT3_SENSOR_FLAGS_T0_ENABLE (0x0001) +#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_EVENT_ENABLED (0x0010) +#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_ACTION_ENABLED (0x0008) +#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_EVENT_ENABLED (0x0004) +#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_ACTION_ENABLED (0x0002) +#define MPI3_IOUNIT3_SENSOR_FLAGS_WARNING_EVENT_ENABLED (0x0001) #ifndef MPI3_IO_UNIT3_SENSOR_MAX -#define MPI3_IO_UNIT3_SENSOR_MAX (1) +#define MPI3_IO_UNIT3_SENSOR_MAX (1) #endif struct mpi3_io_unit_page3 { struct mpi3_config_page_header header; __le32 reserved08; u8 num_sensors; - u8 polling_interval; - __le16 reserved0e; + u8 nominal_poll_interval; + u8 warning_poll_interval; + u8 reserved0f; struct mpi3_io_unit3_sensor sensor[MPI3_IO_UNIT3_SENSOR_MAX]; }; @@ -873,13 +914,19 @@ struct mpi3_io_unit4_sensor { __le16 reserved02; u8 flags; u8 reserved05[3]; - __le32 reserved08; + __le16 istwi_index; + u8 channel; + u8 reserved0b; __le32 reserved0c; }; +#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_MASK (0xe0) +#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_SHIFT (5) #define MPI3_IOUNIT4_SENSOR_FLAGS_TEMP_VALID (0x01) +#define MPI3_IOUNIT4_SENSOR_ISTWI_INDEX_INTERNAL (0xffff) +#define MPI3_IOUNIT4_SENSOR_CHANNEL_RESERVED (0xff) #ifndef MPI3_IO_UNIT4_SENSOR_MAX -#define MPI3_IO_UNIT4_SENSOR_MAX (1) +#define MPI3_IO_UNIT4_SENSOR_MAX (1) #endif struct mpi3_io_unit_page4 { struct mpi3_config_page_header header; @@ -906,8 +953,9 @@ struct mpi3_io_unit_page5 { struct mpi3_io_unit5_spinup_group spinup_group_parameters[4]; __le32 reserved18; __le32 reserved1c; - __le32 reserved20; - u8 reserved24; + __le16 device_shutdown; + __le16 reserved22; + u8 pcie_device_wait_time; u8 sata_device_wait_time; u8 spinup_encl_drive_count; u8 spinup_encl_delay; @@ -919,6 +967,22 @@ struct mpi3_io_unit_page5 { }; #define MPI3_IOUNIT5_PAGEVERSION (0x00) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NO_ACTION (0x00) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_ATTACHED (0x01) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_EXPANDER_ATTACHED (0x02) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SWITCH_ATTACHED (0x02) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_EXPANDER (0x03) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_SWITCH (0x03) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_MASK (0x0300) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_SHIFT (8) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_MASK (0x00c0) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_SHIFT (6) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_MASK (0x0030) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_SHIFT (4) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_MASK (0x000c) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_SHIFT (2) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_MASK (0x0003) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAA_SSD_SHIFT (0) #define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02) #define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01) #define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03) @@ -1012,7 +1076,52 @@ struct mpi3_ioc_page2 { }; #define MPI3_IOC2_PAGEVERSION (0x00) -struct mpi3_uefibsd_page0 { +#define MPI3_DRIVER_FLAGS_ADMINRAIDPD_BLOCKED (0x0010) +#define MPI3_DRIVER_FLAGS_OOBRAIDPD_BLOCKED (0x0008) +#define MPI3_DRIVER_FLAGS_OOBRAIDVD_BLOCKED (0x0004) +#define MPI3_DRIVER_FLAGS_OOBADVHOSTPD_BLOCKED (0x0002) +#define MPI3_DRIVER_FLAGS_OOBHOSTPD_BLOCKED (0x0001) +struct mpi3_allowed_cmd_scsi { + __le16 service_action; + u8 operation_code; + u8 command_flags; +}; + +struct mpi3_allowed_cmd_ata { + u8 subcommand; + u8 reserved01; + u8 command; + u8 command_flags; +}; + +struct mpi3_allowed_cmd_nvme { + u8 reserved00; + u8 nvme_cmd_flags; + u8 op_code; + u8 command_flags; +}; + +#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_MASK (0x80) +#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_IO (0x00) +#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_ADMIN (0x80) +#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_MASK (0x3f) +#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_NVM (0x00) +union mpi3_allowed_cmd { + struct mpi3_allowed_cmd_scsi scsi; + struct mpi3_allowed_cmd_ata ata; + struct mpi3_allowed_cmd_nvme nvme; +}; + +#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_ADMINRAIDPD_BLOCKED (0x20) +#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDPD_BLOCKED (0x10) +#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDVD_BLOCKED (0x08) +#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBADVHOSTPD_BLOCKED (0x04) +#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBHOSTPD_BLOCKED (0x02) +#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_CHECKSUBCMD_ENABLED (0x01) +#ifndef MPI3_ALLOWED_CMDS_MAX +#define MPI3_ALLOWED_CMDS_MAX (1) +#endif +struct mpi3_driver_page0 { struct mpi3_config_page_header header; __le32 bsd_options; u8 ssu_timeout; @@ -1026,13 +1135,122 @@ struct mpi3_uefibsd_page0 { __le32 reserved18; }; -#define MPI3_UEFIBSD_PAGEVERSION (0x00) -#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_MASK (0x00000003) -#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000) -#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001) -#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_NONE (0x00000002) -#define MPI3_UEFIBSD_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004) -#define MPI3_UEFIBSD_BSDOPTS_EN_ADV_ADAPTER_CONFIG (0x00000008) +#define MPI3_DRIVER0_PAGEVERSION (0x00) +#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003) +#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000) +#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001) +#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004) +#define MPI3_DRIVER0_BSDOPTS_EN_ADV_ADAPTER_CONFIG (0x00000008) +struct mpi3_driver_page1 { + struct mpi3_config_page_header header; + __le32 flags; + __le32 reserved0c; + __le16 host_diag_trace_max_size; + __le16 host_diag_trace_min_size; + __le16 host_diag_trace_decrement_size; + __le16 reserved16; + __le16 host_diag_fw_max_size; + __le16 host_diag_fw_min_size; + __le16 host_diag_fw_decrement_size; + __le16 reserved1e; + __le16 host_diag_driver_max_size; + __le16 host_diag_driver_min_size; + __le16 host_diag_driver_decrement_size; + __le16 reserved26; +}; + +#define MPI3_DRIVER1_PAGEVERSION (0x00) +#ifndef MPI3_DRIVER2_TRIGGER_MAX +#define MPI3_DRIVER2_TRIGGER_MAX (1) +#endif +struct mpi3_driver2_trigger_event { + u8 type; + u8 flags; + u8 reserved02; + u8 event; + __le32 reserved04[3]; +}; + +struct mpi3_driver2_trigger_scsi_sense { + u8 type; + u8 flags; + __le16 reserved02; + u8 ascq; + u8 asc; + u8 sense_key; + u8 reserved07; + __le32 reserved08[2]; +}; + +#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL (0xff) +#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL (0xff) +#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL (0xff) +struct mpi3_driver2_trigger_reply { + u8 type; + u8 flags; + __le16 ioc_status; + __le32 ioc_log_info; + __le32 ioc_log_info_mask; + __le32 reserved0c; +}; + +#define MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL (0xffff) +union mpi3_driver2_trigger_element { + struct mpi3_driver2_trigger_event event; + struct mpi3_driver2_trigger_scsi_sense scsi_sense; + struct mpi3_driver2_trigger_reply reply; +}; + +#define MPI3_DRIVER2_TRIGGER_TYPE_EVENT (0x00) +#define MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE (0x01) +#define MPI3_DRIVER2_TRIGGER_TYPE_REPLY (0x02) +#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE (0x02) +#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE (0x01) +struct mpi3_driver_page2 { + struct mpi3_config_page_header header; + __le64 master_trigger; + __le32 reserved10[3]; + u8 num_triggers; + u8 reserved1d[3]; + union mpi3_driver2_trigger_element trigger[MPI3_DRIVER2_TRIGGER_MAX]; +}; + +#define MPI3_DRIVER2_PAGEVERSION (0x00) +#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_TRACE_RELEASE (0x8000000000000000ULL) +#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_FW_RELEASE (0x4000000000000000ULL) +#define MPI3_DRIVER2_MASTERTRIGGER_SNAPDUMP (0x2000000000000000ULL) +#define MPI3_DRIVER2_MASTERTRIGGER_DEVICE_REMOVAL_ENABLED (0x0000000000000004ULL) +#define MPI3_DRIVER2_MASTERTRIGGER_TASK_MANAGEMENT_ENABLED (0x0000000000000002ULL) +struct mpi3_driver_page10 { + struct mpi3_config_page_header header; + __le16 flags; + __le16 reserved0a; + u8 num_allowed_commands; + u8 reserved0d[3]; + union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX]; +}; + +#define MPI3_DRIVER10_PAGEVERSION (0x00) +struct mpi3_driver_page20 { + struct mpi3_config_page_header header; + __le16 flags; + __le16 reserved0a; + u8 num_allowed_commands; + u8 reserved0d[3]; + union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX]; +}; + +#define MPI3_DRIVER20_PAGEVERSION (0x00) +struct mpi3_driver_page30 { + struct mpi3_config_page_header header; + __le16 flags; + __le16 reserved0a; + u8 num_allowed_commands; + u8 reserved0d[3]; + union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX]; +}; + +#define MPI3_DRIVER30_PAGEVERSION (0x00) union mpi3_security_mac { __le32 dword[16]; __le16 word[32]; @@ -1102,7 +1320,7 @@ struct mpi3_security1_key_record { #define MPI3_SECURITY1_KEY_RECORD_CONSUMER_NOT_VALID (0x00) #define MPI3_SECURITY1_KEY_RECORD_CONSUMER_SAFESTORE (0x01) #define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CERT_CHAIN (0x02) -#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_AUTH_DEV_KEY (0x03) +#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_DEVICE_KEY (0x03) #define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CACHE_OFFLOAD (0x04) struct mpi3_security_page1 { struct mpi3_config_page_header header; @@ -1137,16 +1355,30 @@ struct mpi3_sas_io_unit_page0 { struct mpi3_config_page_header header; __le32 reserved08; u8 num_phys; - u8 reserved0d[3]; + u8 init_status; + __le16 reserved0e; struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX]; }; -#define MPI3_SASIOUNIT0_PAGEVERSION (0x00) -#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08) -#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01) -#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) -#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) -#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) +#define MPI3_SASIOUNIT0_PAGEVERSION (0x00) +#define MPI3_SASIOUNIT0_INITSTATUS_NO_ERRORS (0x00) +#define MPI3_SASIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01) +#define MPI3_SASIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02) +#define MPI3_SASIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04) +#define MPI3_SASIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05) +#define MPI3_SASIOUNIT0_INITSTATUS_HOST_PHYS_ENABLED (0x06) +#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MIN (0xf0) +#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MAX (0xff) +#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08) +#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_MASK (0x03) +#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_IOUNIT1 (0x00) +#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_DYNAMIC (0x01) +#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_BACKPLANE (0x02) +#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) +#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) +#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) +#define MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY (0x02) +#define MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY (0x01) struct mpi3_sas_io_unit1_phy_data { u8 io_unit_port; u8 port_flags; @@ -1343,6 +1575,26 @@ struct mpi3_sas_expander_page1 { #define MPI3_SASEXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04) #define MPI3_SASEXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02) #define MPI3_SASEXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01) +#ifndef MPI3_SASEXPANDER2_MAX_NUM_PHYS +#define MPI3_SASEXPANDER2_MAX_NUM_PHYS (1) +#endif +struct mpi3_sasexpander2_phy_element { + u8 link_change_count; + u8 reserved01; + __le16 rate_change_count; + __le32 reserved04; +}; + +struct mpi3_sas_expander_page2 { + struct mpi3_config_page_header header; + u8 num_phys; + u8 reserved09; + __le16 dev_handle; + __le32 reserved0c; + struct mpi3_sasexpander2_phy_element phy[MPI3_SASEXPANDER2_MAX_NUM_PHYS]; +}; + +#define MPI3_SASEXPANDER2_PAGEVERSION (0x00) struct mpi3_sas_port_page0 { struct mpi3_config_page_header header; u8 port_number; @@ -1510,6 +1762,14 @@ struct mpi3_sas_phy_page4 { #define MPI3_PCIE_NEG_LINK_RATE_8_0 (0x04) #define MPI3_PCIE_NEG_LINK_RATE_16_0 (0x05) #define MPI3_PCIE_NEG_LINK_RATE_32_0 (0x06) +#define MPI3_PCIE_ASPM_ENABLE_NONE (0x0) +#define MPI3_PCIE_ASPM_ENABLE_L0S (0x1) +#define MPI3_PCIE_ASPM_ENABLE_L1 (0x2) +#define MPI3_PCIE_ASPM_ENABLE_L0S_L1 (0x3) +#define MPI3_PCIE_ASPM_SUPPORT_NONE (0x0) +#define MPI3_PCIE_ASPM_SUPPORT_L0S (0x1) +#define MPI3_PCIE_ASPM_SUPPORT_L1 (0x2) +#define MPI3_PCIE_ASPM_SUPPORT_L0S_L1 (0x3) struct mpi3_pcie_io_unit0_phy_data { u8 link; u8 link_flags; @@ -1540,7 +1800,8 @@ struct mpi3_pcie_io_unit_page0 { __le32 reserved08; u8 num_phys; u8 init_status; - __le16 reserved0e; + u8 aspm; + u8 reserved0f; struct mpi3_pcie_io_unit0_phy_data phy_data[MPI3_PCIE_IO_UNIT0_PHY_MAX]; }; @@ -1556,6 +1817,14 @@ struct mpi3_pcie_io_unit_page0 { #define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_CLOCKING_MODE (0x08) #define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_START (0xf0) #define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_END (0xff) +#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_MASK (0xc0) +#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_SHIFT (6) +#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_MASK (0x30) +#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_SHIFT (4) +#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_MASK (0x0c) +#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_SHIFT (2) +#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_MASK (0x03) +#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_SHIFT (0) struct mpi3_pcie_io_unit1_phy_data { u8 link; u8 link_flags; @@ -1569,16 +1838,16 @@ struct mpi3_pcie_io_unit1_phy_data { #define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_DIS_SEPARATE_REFCLK (0x00) #define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRIS (0x01) #define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRNS (0x02) -#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) -#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0) -#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4) -#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20) -#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30) -#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40) -#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50) -#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60) +#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60) #ifndef MPI3_PCIE_IO_UNIT1_PHY_MAX -#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1) +#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1) #endif struct mpi3_pcie_io_unit_page1 { struct mpi3_config_page_header header; @@ -1586,21 +1855,66 @@ struct mpi3_pcie_io_unit_page1 { __le32 reserved0c; u8 num_phys; u8 reserved11; - __le16 reserved12; + u8 aspm; + u8 reserved13; struct mpi3_pcie_io_unit1_phy_data phy_data[MPI3_PCIE_IO_UNIT1_PHY_MAX]; }; -#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00) +#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x80) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_DISABLE (0x40) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_MASK (0x30) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SHIFT (4) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_SRNS_DISABLED (0x00) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x10) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x20) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0f) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x02) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x03) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_8_0 (0x04) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_16_0 (0x05) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_32_0 (0x06) +#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_MASK (0x0c) +#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_SHIFT (2) +#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_MASK (0x03) +#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_SHIFT (0) struct mpi3_pcie_io_unit_page2 { struct mpi3_config_page_header header; - __le16 nv_me_max_queue_depth; - __le16 reserved0a; - u8 nv_me_abort_to; + __le16 nvme_max_q_dx1; + __le16 nvme_max_q_dx2; + u8 nvme_abort_to; u8 reserved0d; - __le16 reserved0e; + __le16 nvme_max_q_dx4; }; #define MPI3_PCIEIOUNIT2_PAGEVERSION (0x00) +#define MPI3_PCIEIOUNIT3_ERROR_RECEIVER_ERROR (0) +#define MPI3_PCIEIOUNIT3_ERROR_RECOVERY (1) +#define MPI3_PCIEIOUNIT3_ERROR_CORRECTABLE_ERROR_MSG (2) +#define MPI3_PCIEIOUNIT3_ERROR_BAD_DLLP (3) +#define MPI3_PCIEIOUNIT3_ERROR_BAD_TLP (4) +#define MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX (5) +struct mpi3_pcie_io_unit3_error { + __le16 threshold_count; + __le16 reserved02; +}; + +struct mpi3_pcie_io_unit_page3 { + struct mpi3_config_page_header header; + u8 threshold_window; + u8 threshold_action; + u8 escalation_count; + u8 escalation_action; + u8 num_errors; + u8 reserved0d[3]; + struct mpi3_pcie_io_unit3_error error[MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX]; +}; + +#define MPI3_PCIEIOUNIT3_PAGEVERSION (0x00) +#define MPI3_PCIEIOUNIT3_ACTION_NO_ACTION (0x00) +#define MPI3_PCIEIOUNIT3_ACTION_HOT_RESET (0x01) +#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_ONLY (0x02) +#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_NO_ACCESS (0x03) struct mpi3_pcie_switch_page0 { struct mpi3_config_page_header header; u8 io_unit_port; @@ -1609,7 +1923,7 @@ struct mpi3_pcie_switch_page0 { __le16 dev_handle; __le16 parent_dev_handle; u8 num_ports; - u8 pc_ie_level; + u8 pcie_level; __le16 reserved12; __le32 reserved14; __le32 reserved18; @@ -1623,7 +1937,8 @@ struct mpi3_pcie_switch_page0 { struct mpi3_pcie_switch_page1 { struct mpi3_config_page_header header; u8 io_unit_port; - u8 reserved09[3]; + u8 flags; + __le16 reserved0a; u8 num_ports; u8 port_num; __le16 attached_dev_handle; @@ -1636,15 +1951,43 @@ struct mpi3_pcie_switch_page1 { }; #define MPI3_PCIESWITCH1_PAGEVERSION (0x00) +#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_MASK (0x0c) +#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_SHIFT (2) +#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_MASK (0x03) +#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_SHIFT (0) +#ifndef MPI3_PCIESWITCH2_MAX_NUM_PORTS +#define MPI3_PCIESWITCH2_MAX_NUM_PORTS (1) +#endif +struct mpi3_pcieswitch2_port_element { + __le16 link_change_count; + __le16 rate_change_count; + __le32 reserved04; +}; + +struct mpi3_pcie_switch_page2 { + struct mpi3_config_page_header header; + u8 num_ports; + u8 reserved09; + __le16 dev_handle; + __le32 reserved0c; + struct mpi3_pcieswitch2_port_element port[MPI3_PCIESWITCH2_MAX_NUM_PORTS]; +}; + +#define MPI3_PCIESWITCH2_PAGEVERSION (0x00) struct mpi3_pcie_link_page0 { struct mpi3_config_page_header header; u8 link; u8 reserved09[3]; - __le32 correctable_error_count; - __le16 n_fatal_error_count; - __le16 reserved12; - __le16 fatal_error_count; - __le16 reserved16; + __le32 reserved0c; + __le32 receiver_error_count; + __le32 recovery_count; + __le32 corr_error_msg_count; + __le32 non_fatal_error_msg_count; + __le32 fatal_error_msg_count; + __le32 non_fatal_error_count; + __le32 fatal_error_count; + __le32 bad_dllp_count; + __le32 bad_tlp_count; }; #define MPI3_PCIELINK0_PAGEVERSION (0x00) @@ -1654,11 +1997,12 @@ struct mpi3_enclosure_page0 { __le16 flags; __le16 enclosure_handle; __le16 num_slots; - __le16 start_slot; + __le16 reserved16; u8 io_unit_port; u8 enclosure_level; __le16 sep_dev_handle; - __le32 reserved1c; + u8 chassis_slot; + u8 reserved1d[3]; }; #define MPI3_ENCLOSURE0_PAGEVERSION (0x00) @@ -1666,6 +2010,7 @@ struct mpi3_enclosure_page0 { #define MPI3_ENCLS0_FLAGS_ENCL_TYPE_VIRTUAL (0x0000) #define MPI3_ENCLS0_FLAGS_ENCL_TYPE_SAS (0x4000) #define MPI3_ENCLS0_FLAGS_ENCL_TYPE_PCIE (0x8000) +#define MPI3_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) #define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK (0x0010) #define MPI3_ENCLS0_FLAGS_ENCL_DEV_NOT_FOUND (0x0000) #define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT (0x0010) @@ -1686,6 +2031,7 @@ struct mpi3_device0_sas_sata_format { u8 zone_group; }; +#define MPI3_DEVICE0_SASSATA_FLAGS_WRITE_SAME_UNMAP_NCQ (0x0400) #define MPI3_DEVICE0_SASSATA_FLAGS_SLUMBER_CAP (0x0200) #define MPI3_DEVICE0_SASSATA_FLAGS_PARTIAL_CAP (0x0100) #define MPI3_DEVICE0_SASSATA_FLAGS_ASYNC_NOTIFY (0x0080) @@ -1707,10 +2053,11 @@ struct mpi3_device0_pcie_format { __le32 maximum_data_transfer_size; __le32 capabilities; __le16 noiob; - u8 nv_me_abort_to; + u8 nvme_abort_to; u8 page_size; __le16 shutdown_latency; - __le16 reserved16; + u8 recovery_info; + u8 reserved17; }; #define MPI3_DEVICE0_PCIE_LINK_RATE_32_0_SUPP (0x10) @@ -1718,16 +2065,38 @@ struct mpi3_device0_pcie_format { #define MPI3_DEVICE0_PCIE_LINK_RATE_8_0_SUPP (0x04) #define MPI3_DEVICE0_PCIE_LINK_RATE_5_0_SUPP (0x02) #define MPI3_DEVICE0_PCIE_LINK_RATE_2_5_SUPP (0x01) -#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0003) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0007) #define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NO_DEVICE (0x0000) #define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE (0x0001) #define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SWITCH_DEVICE (0x0002) #define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE (0x0003) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_MASK (0x0030) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_SHIFT (4) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK (0x00c0) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_SHIFT (6) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0 (0x0000) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_1 (0x0040) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_2 (0x0080) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_3 (0x00c0) +#define MPI3_DEVICE0_PCIE_CAP_SGL_EXTRA_LENGTH_SUPPORTED (0x00000020) #define MPI3_DEVICE0_PCIE_CAP_METADATA_SEPARATED (0x00000010) #define MPI3_DEVICE0_PCIE_CAP_SGL_DWORD_ALIGN_REQUIRED (0x00000008) -#define MPI3_DEVICE0_PCIE_CAP_NVME_SGL_ENABLED (0x00000004) +#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_SGL (0x00000004) +#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_PRP (0x00000000) #define MPI3_DEVICE0_PCIE_CAP_BIT_BUCKET_SGL_SUPP (0x00000002) #define MPI3_DEVICE0_PCIE_CAP_SGL_SUPP (0x00000001) +#define MPI3_DEVICE0_PCIE_CAP_ASPM_MASK (0x000000c0) +#define MPI3_DEVICE0_PCIE_CAP_ASPM_SHIFT (6) +#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_MASK (0xe0) +#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_NS_MGMT (0x00) +#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_FORMAT (0x20) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_MASK (0x1f) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NS (0x00) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NSID_1 (0x01) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_TOO_MANY_NS (0x02) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_PROTECTION (0x03) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_METADATA_SZ (0x04) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_LBA_DATA_SZ (0x05) struct mpi3_device0_vd_format { u8 vd_state; u8 raid_level; @@ -1783,6 +2152,8 @@ struct mpi3_device_page0 { }; #define MPI3_DEVICE0_PAGEVERSION (0x00) +#define MPI3_DEVICE0_PARENT_INVALID (0xffff) +#define MPI3_DEVICE0_ENCLOSURE_HANDLE_NO_ENCLOSURE (0x0000) #define MPI3_DEVICE0_WWID_INVALID (0xffffffffffffffff) #define MPI3_DEVICE0_PERSISTENTID_INVALID (0xffff) #define MPI3_DEVICE0_IOUNITPORT_INVALID (0xff) @@ -1792,9 +2163,13 @@ struct mpi3_device_page0 { #define MPI3_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x03) #define MPI3_DEVICE0_ASTATUS_UNAUTHORIZED (0x04) #define MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY (0x05) +#define MPI3_DEVICE0_ASTATUS_PREPARE (0x06) +#define MPI3_DEVICE0_ASTATUS_SAFE_MODE (0x07) +#define MPI3_DEVICE0_ASTATUS_GENERIC_MAX (0x0f) #define MPI3_DEVICE0_ASTATUS_SAS_UNKNOWN (0x10) #define MPI3_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x11) #define MPI3_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x12) +#define MPI3_DEVICE0_ASTATUS_SAS_MAX (0x1f) #define MPI3_DEVICE0_ASTATUS_SIF_UNKNOWN (0x20) #define MPI3_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x21) #define MPI3_DEVICE0_ASTATUS_SIF_DIAG (0x22) @@ -1810,6 +2185,8 @@ struct mpi3_device_page0 { #define MPI3_DEVICE0_ASTATUS_PCIE_MEM_SPACE_ACCESS (0x31) #define MPI3_DEVICE0_ASTATUS_PCIE_UNSUPPORTED (0x32) #define MPI3_DEVICE0_ASTATUS_PCIE_MSIX_REQUIRED (0x33) +#define MPI3_DEVICE0_ASTATUS_PCIE_ECRC_REQUIRED (0x34) +#define MPI3_DEVICE0_ASTATUS_PCIE_MAX (0x3f) #define MPI3_DEVICE0_ASTATUS_NVME_UNKNOWN (0x40) #define MPI3_DEVICE0_ASTATUS_NVME_READY_TIMEOUT (0x41) #define MPI3_DEVICE0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x42) @@ -1820,7 +2197,17 @@ struct mpi3_device_page0 { #define MPI3_DEVICE0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x47) #define MPI3_DEVICE0_ASTATUS_NVME_IDLE_TIMEOUT (0x48) #define MPI3_DEVICE0_ASTATUS_NVME_CTRL_FAILURE_STATUS (0x49) -#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x50) +#define MPI3_DEVICE0_ASTATUS_NVME_INSUFFICIENT_POWER (0x4a) +#define MPI3_DEVICE0_ASTATUS_NVME_DOORBELL_STRIDE (0x4b) +#define MPI3_DEVICE0_ASTATUS_NVME_MEM_PAGE_MIN_SIZE (0x4c) +#define MPI3_DEVICE0_ASTATUS_NVME_MEMORY_ALLOCATION (0x4d) +#define MPI3_DEVICE0_ASTATUS_NVME_COMPLETION_TIME (0x4e) +#define MPI3_DEVICE0_ASTATUS_NVME_BAR (0x4f) +#define MPI3_DEVICE0_ASTATUS_NVME_NS_DESCRIPTOR (0x50) +#define MPI3_DEVICE0_ASTATUS_NVME_INCOMPATIBLE_SETTINGS (0x51) +#define MPI3_DEVICE0_ASTATUS_NVME_MAX (0x5f) +#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x80) +#define MPI3_DEVICE0_ASTATUS_VD_MAX (0x8f) #define MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE (0x0080) #define MPI3_DEVICE0_FLAGS_HIDDEN (0x0008) #define MPI3_DEVICE0_FLAGS_ATT_METHOD_MASK (0x0006) @@ -1870,11 +2257,17 @@ struct mpi3_device_page1 { struct mpi3_config_page_header header; __le16 dev_handle; __le16 reserved0a; - __le32 reserved0c[12]; + __le16 link_change_count; + __le16 rate_change_count; + __le16 tm_count; + __le16 reserved12; + __le32 reserved14[10]; u8 reserved3c[3]; u8 device_form; union mpi3_device1_dev_spec_format device_specific; }; #define MPI3_DEVICE1_PAGEVERSION (0x00) +#define MPI3_DEVICE1_COUNTER_MAX (0xfffe) +#define MPI3_DEVICE1_COUNTER_INVALID (0xffff) #endif diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h index 169e4f9b7b7c..c29b87de8e18 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h @@ -61,6 +61,8 @@ struct mpi3_component_image_header { #define MPI3_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053) #define MPI3_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147) #define MPI3_IMAGE_HEADER_SIGNATURE1_PBLP (0x504c4250) +#define MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST (0x464e414d) +#define MPI3_IMAGE_HEADER_SIGNATURE1_OEM (0x204d454f) #define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546) #define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030) #define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000) @@ -94,6 +96,61 @@ struct mpi3_component_image_header { #define MPI3_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5c) #define MPI3_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7c) #define MPI3_IMAGE_HEADER_SIZE (0x100) +#ifndef MPI3_CI_MANIFEST_MPI_MAX +#define MPI3_CI_MANIFEST_MPI_MAX (1) +#endif +struct mpi3_ci_manifest_mpi_comp_image_ref { + __le32 signature1; + __le32 reserved04[3]; + struct mpi3_comp_image_version component_image_version; + __le32 component_image_version_string_offset; + __le32 crc; +}; + +struct mpi3_ci_manifest_mpi { + u8 manifest_type; + u8 reserved01[3]; + __le32 reserved04[3]; + u8 num_image_references; + u8 release_level; + __le16 reserved12; + __le16 reserved14; + __le16 flags; + __le32 reserved18[2]; + __le16 vendor_id; + __le16 device_id; + __le16 subsystem_vendor_id; + __le16 subsystem_id; + __le32 reserved28[2]; + union mpi3_version_union package_security_version; + __le32 reserved34; + struct mpi3_comp_image_version package_version; + __le32 package_version_string_offset; + __le32 package_build_date_string_offset; + __le32 package_build_time_string_offset; + __le32 reserved4c; + __le32 diag_authorization_identifier[16]; + struct mpi3_ci_manifest_mpi_comp_image_ref component_image_ref[MPI3_CI_MANIFEST_MPI_MAX]; +}; + +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_DEV (0x00) +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_PREALPHA (0x10) +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_ALPHA (0x20) +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_BETA (0x30) +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_RC (0x40) +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_GCA (0x50) +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_POINT (0x60) +#define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTHORIZATION (0x01) +#define MPI3_CI_MANIFEST_MPI_SUBSYSTEMID_IGNORED (0xffff) +#define MPI3_CI_MANIFEST_MPI_PKG_VER_STR_OFF_UNSPECIFIED (0x00000000) +#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_DATE_STR_OFF_UNSPECIFIED (0x00000000) +#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_TIME_STR_OFF_UNSPECIFIED (0x00000000) +union mpi3_ci_manifest { + struct mpi3_ci_manifest_mpi mpi; + __le32 dword[1]; +}; + +#define MPI3_CI_MANIFEST_TYPE_MPI (0x00) struct mpi3_extended_image_header { u8 image_type; u8 reserved01[3]; @@ -161,6 +218,7 @@ struct mpi3_encrypted_hash_entry { #define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00) #define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01) #define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02) +#define MPI3_HASH_ALGORITHM_SIZE_SHA384 (0x03) #define MPI3_ENCRYPTION_ALGORITHM_UNUSED (0x00) #define MPI3_ENCRYPTION_ALGORITHM_RSA256 (0x01) #define MPI3_ENCRYPTION_ALGORITHM_RSA512 (0x02) @@ -178,7 +236,6 @@ struct mpi3_encrypted_key_with_hash_entry { u8 reserved03; __le32 reserved04; __le32 public_key[MPI3_PUBLIC_KEY_MAX]; - __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX]; }; #ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h index e02b6d3cfba2..7a208dc81d49 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_init.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h @@ -13,7 +13,7 @@ struct mpi3_scsi_io_cdb_eedp32 { __le32 transfer_length; }; -union mpi3_scso_io_cdb_union { +union mpi3_scsi_io_cdb_union { u8 cdb32[32]; struct mpi3_scsi_io_cdb_eedp32 eedp32; struct mpi3_sge_common sge; @@ -32,11 +32,12 @@ struct mpi3_scsi_io_request { __le32 skip_count; __le32 data_length; u8 lun[8]; - union mpi3_scso_io_cdb_union cdb; + union mpi3_scsi_io_cdb_union cdb; union mpi3_sge_union sgl[4]; }; #define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80) +#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40) #define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000) #define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000) #define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000) @@ -155,5 +156,13 @@ struct mpi3_scsi_task_mgmt_reply { __le32 reserved18; }; -#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE (0x00) +#define MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME (0x02) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED (0x04) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED (0x05) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED (0x08) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN (0x09) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG (0x0a) +#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED (0x81) #endif diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h index 1af99a5382d5..bc56273778d3 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h @@ -29,10 +29,15 @@ struct mpi3_ioc_init_request { __le64 driver_information_address; }; -#define MPI3_WHOINIT_NOT_INITIALIZED (0x00) -#define MPI3_WHOINIT_ROM_BIOS (0x02) -#define MPI3_WHOINIT_HOST_DRIVER (0x03) -#define MPI3_WHOINIT_MANUFACTURER (0x04) +#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03) +#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00) +#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01) +#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02) +#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_BOTH (0x03) +#define MPI3_WHOINIT_NOT_INITIALIZED (0x00) +#define MPI3_WHOINIT_ROM_BIOS (0x02) +#define MPI3_WHOINIT_HOST_DRIVER (0x03) +#define MPI3_WHOINIT_MANUFACTURER (0x04) struct mpi3_driver_info_layout { __le32 information_length; u8 driver_signature[12]; @@ -77,17 +82,17 @@ struct mpi3_ioc_facts_data { u8 sge_modifier_shift; u8 protocol_flags; __le16 max_sas_initiators; - __le16 max_sas_targets; + __le16 reserved2a; __le16 max_sas_expanders; __le16 max_enclosures; __le16 min_dev_handle; __le16 max_dev_handle; - __le16 max_pc_ie_switches; + __le16 max_pcie_switches; __le16 max_nvme; - __le16 max_pds; + __le16 reserved38; __le16 max_vds; __le16 max_host_pds; - __le16 max_advanced_host_pds; + __le16 max_adv_host_pds; __le16 max_raid_pds; __le16 max_posted_cmd_buffers; __le32 flags; @@ -97,26 +102,41 @@ struct mpi3_ioc_facts_data { __le16 reserved4e; __le32 diag_trace_size; __le32 diag_fw_size; + __le32 diag_driver_size; + u8 max_host_pd_ns_count; + u8 max_adv_host_pd_ns_count; + u8 max_raidpd_ns_count; + u8 reserved5f; }; -#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD (0x00000010) +#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000) +#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000) +#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x10000000) +#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_CAPABLE (0x00000100) +#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_ENABLED (0x00000080) +#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_ENABLED (0x00000040) +#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_ENABLED (0x00000020) +#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_ENABLED (0x00000010) #define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008) -#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_GRAN_MASK (0x00000001) -#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_IOC_GRAN (0x00000000) -#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_REPLY_Q_GRAN (0x00000001) +#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED (0x00000002) +#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_SUPPORTED (0x00000001) #define MPI3_IOCFACTS_PID_TYPE_MASK (0xf000) #define MPI3_IOCFACTS_PID_TYPE_SHIFT (12) #define MPI3_IOCFACTS_PID_PRODUCT_MASK (0x0f00) #define MPI3_IOCFACTS_PID_PRODUCT_SHIFT (8) #define MPI3_IOCFACTS_PID_FAMILY_MASK (0x00ff) #define MPI3_IOCFACTS_PID_FAMILY_SHIFT (0) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_REKEY (0x2000) +#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000) #define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800) #define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700) #define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000) -#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_RAID (0x0100) -#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0200) -#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_RAID (0x0300) -#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0400) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_MGMT (0x0300) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0400) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0500) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_OOB (0x0600) #define MPI3_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0080) #define MPI3_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0040) #define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020) @@ -175,6 +195,7 @@ struct mpi3_create_request_queue_request { #define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80) #define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80) #define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00) +#define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2) struct mpi3_delete_request_queue_request { __le16 host_tag; u8 ioc_use_only02; @@ -210,6 +231,7 @@ struct mpi3_create_reply_queue_request { #define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01) #define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00) #define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01) +#define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2) struct mpi3_delete_reply_queue_request { __le16 host_tag; u8 ioc_use_only02; @@ -255,7 +277,9 @@ struct mpi3_port_enable_request { #define MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x19) #define MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x20) #define MPI3_EVENT_PCIE_ENUMERATION (0x22) +#define MPI3_EVENT_PCIE_ERROR_THRESHOLD (0x23) #define MPI3_EVENT_HARD_RESET_RECEIVED (0x40) +#define MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE (0x50) #define MPI3_EVENT_MIN_PRODUCT_SPECIFIC (0x60) #define MPI3_EVENT_MAX_PRODUCT_SPECIFIC (0x7f) #define MPI3_EVENT_NOTIFY_EVENTMASK_WORDS (4) @@ -311,10 +335,9 @@ struct mpi3_event_data_temp_threshold { __le32 reserved0c; }; -#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD3_EXCEEDED (0x0008) -#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD2_EXCEEDED (0x0004) -#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD1_EXCEEDED (0x0002) -#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD0_EXCEEDED (0x0001) +#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_FATAL_THRESHOLD_EXCEEDED (0x0004) +#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_CRITICAL_THRESHOLD_EXCEEDED (0x0002) +#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_WARNING_THRESHOLD_EXCEEDED (0x0001) struct mpi3_event_data_cable_management { __le32 active_cable_power_requirement; u8 status; @@ -398,8 +421,10 @@ struct mpi3_event_data_sas_discovery { #define MPI3_SAS_DISC_STATUS_MAX_EXPANDERS_EXCEED (0x40000000) #define MPI3_SAS_DISC_STATUS_MAX_DEVICES_EXCEED (0x20000000) #define MPI3_SAS_DISC_STATUS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI3_SAS_DISC_STATUS_INVALID_CEI (0x00010000) +#define MPI3_SAS_DISC_STATUS_FECEI_MISMATCH (0x00008000) #define MPI3_SAS_DISC_STATUS_MULTIPLE_DEVICES_IN_SLOT (0x00004000) -#define MPI3_SAS_DISC_STATUS_SLOT_COUNT_MISMATCH (0x00002000) +#define MPI3_SAS_DISC_STATUS_NECEI_MISMATCH (0x00002000) #define MPI3_SAS_DISC_STATUS_TOO_MANY_SLOTS (0x00001000) #define MPI3_SAS_DISC_STATUS_EXP_MULTI_SUBTRACTIVE (0x00000800) #define MPI3_SAS_DISC_STATUS_MULTI_PORT_DOMAIN (0x00000400) @@ -581,6 +606,20 @@ struct mpi3_event_data_pcie_topology_change_list { #define MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02) #define MPI3_EVENT_PCIE_TOPO_SS_RESPONDING (0x03) #define MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04) +struct mpi3_event_data_pcie_error_threshold { + __le64 timestamp; + u8 reason_code; + u8 port; + __le16 switch_dev_handle; + u8 error; + u8 action; + __le16 threshold_count; + __le16 attached_dev_handle; + __le16 reserved12; +}; + +#define MPI3_EVENT_PCI_ERROR_RC_THRESHOLD_EXCEEDED (0x00) +#define MPI3_EVENT_PCI_ERROR_RC_ESCALATION (0x01) struct mpi3_event_data_sas_init_dev_status_change { u8 reason_code; u8 io_unit_port; @@ -604,6 +643,16 @@ struct mpi3_event_data_hard_reset_received { __le16 reserved02; }; +struct mpi3_event_data_diag_buffer_status_change { + u8 type; + u8 reason_code; + __le16 reserved02; + __le32 reserved04; +}; + +#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED (0x01) +#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED (0x02) +#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED (0x03) #define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200) #define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100) #define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080) @@ -645,21 +694,23 @@ struct mpi3_pel_seq { }; struct mpi3_pel_entry { + __le64 time_stamp; __le32 sequence_number; - __le32 time_stamp[2]; __le16 log_code; __le16 arg_type; __le16 locale; u8 class; - u8 reserved13; + u8 flags; u8 ext_num; u8 num_exts; u8 arg_data_size; - u8 fixed_format_size; + u8 fixed_format_strings_size; __le32 reserved18[2]; __le32 pel_info[24]; }; +#define MPI3_PEL_FLAGS_COMPLETE_RESET_NEEDED (0x02) +#define MPI3_PEL_FLAGS_ACK_NEEDED (0x01) struct mpi3_pel_list { __le32 log_count; __le32 reserved04; @@ -837,7 +888,10 @@ struct mpi3_pel_req_action_acknowledge { __le32 reserved10; }; -#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT (0x01) +#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03) +#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00) +#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01) +#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02) struct mpi3_pel_reply { __le16 host_tag; u8 ioc_use_only02; @@ -885,6 +939,7 @@ struct mpi3_ci_download_request { #define MPI3_CI_DOWNLOAD_ACTION_ONLINE_ACTIVATION (0x02) #define MPI3_CI_DOWNLOAD_ACTION_OFFLINE_ACTIVATION (0x03) #define MPI3_CI_DOWNLOAD_ACTION_GET_STATUS (0x04) +#define MPI3_CI_DOWNLOAD_ACTION_CANCEL_OFFLINE_ACTIVATION (0x05) struct mpi3_ci_download_reply { __le16 host_tag; u8 ioc_use_only02; @@ -902,6 +957,7 @@ struct mpi3_ci_download_reply { }; #define MPI3_CI_DOWNLOAD_FLAGS_DOWNLOAD_IN_PROGRESS (0x80) +#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20) #define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10) #define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e) #define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00) @@ -939,19 +995,28 @@ struct mpi3_ci_upload_request { #define MPI3_CTRL_OP_REMOVE_DEVICE (0x10) #define MPI3_CTRL_OP_CLOSE_PERSISTENT_CONNECTION (0x11) #define MPI3_CTRL_OP_HIDDEN_ACK (0x12) +#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS (0x13) #define MPI3_CTRL_OP_SAS_SEND_PRIMITIVE (0x20) -#define MPI3_CTRL_OP_SAS_CLEAR_ERROR_LOG (0x21) -#define MPI3_CTRL_OP_PCIE_CLEAR_ERROR_LOG (0x22) +#define MPI3_CTRL_OP_SAS_PHY_CONTROL (0x21) +#define MPI3_CTRL_OP_READ_INTERNAL_BUS (0x23) +#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS (0x24) +#define MPI3_CTRL_OP_PCIE_LINK_CONTROL (0x30) #define MPI3_CTRL_OP_LOOKUP_MAPPING_PARAM8_LOOKUP_METHOD_INDEX (0x00) #define MPI3_CTRL_OP_UPDATE_TIMESTAMP_PARAM64_TIMESTAMP_INDEX (0x00) #define MPI3_CTRL_OP_REMOVE_DEVICE_PARAM16_DEVHANDLE_INDEX (0x00) #define MPI3_CTRL_OP_CLOSE_PERSIST_CONN_PARAM16_DEVHANDLE_INDEX (0x00) #define MPI3_CTRL_OP_HIDDEN_ACK_PARAM16_DEVHANDLE_INDEX (0x00) +#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS_PARAM16_DEVHANDLE_INDEX (0x00) #define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PHY_INDEX (0x00) #define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PRIMSEQ_INDEX (0x01) #define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM32_PRIMITIVE_INDEX (0x00) -#define MPI3_CTRL_OP_SAS_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00) -#define MPI3_CTRL_OP_PCIE_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00) +#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX (0x00) +#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX (0x01) +#define MPI3_CTRL_OP_READ_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00) +#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00) +#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM32_VALUE_INDEX (0x00) +#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_ACTION_INDEX (0x00) +#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_LINK_INDEX (0x01) #define MPI3_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01) #define MPI3_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02) #define MPI3_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) @@ -966,9 +1031,14 @@ struct mpi3_ci_upload_request { #define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_PERSISTENT_ID_INDEX (1) #define MPI3_CTRL_LOOKUP_METHOD_VALUE16_DEVH_INDEX (0) #define MPI3_CTRL_GET_TIMESTAMP_VALUE64_TIMESTAMP_INDEX (0) +#define MPI3_CTRL_READ_INTERNAL_BUS_VALUE32_VALUE_INDEX (0) #define MPI3_CTRL_PRIMFLAGS_SINGLE (0x01) #define MPI3_CTRL_PRIMFLAGS_TRIPLE (0x03) #define MPI3_CTRL_PRIMFLAGS_REDUNDANT (0x06) +#define MPI3_CTRL_ACTION_NOP (0x00) +#define MPI3_CTRL_ACTION_LINK_RESET (0x01) +#define MPI3_CTRL_ACTION_HARD_RESET (0x02) +#define MPI3_CTRL_ACTION_CLEAR_ERROR_LOG (0x05) struct mpi3_iounit_control_request { __le16 host_tag; u8 ioc_use_only02; diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h new file mode 100644 index 000000000000..dbfaf4137560 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2016-2021 Broadcom Inc. All rights reserved. + * + */ +#ifndef MPI30_PCI_H +#define MPI30_PCI_H 1 +#ifndef MPI3_NVME_ENCAP_CMD_MAX +#define MPI3_NVME_ENCAP_CMD_MAX (1) +#endif +struct mpi3_nvme_encapsulated_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 dev_handle; + __le16 encapsulated_command_length; + __le16 flags; + __le32 reserved10[4]; + __le32 command[MPI3_NVME_ENCAP_CMD_MAX]; +}; + +#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002) +#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000) +#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002) +#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001) +#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000) +#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001) +struct mpi3_nvme_encapsulated_error_reply { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 ioc_use_only08; + __le16 ioc_status; + __le32 ioc_log_info; + __le32 nvme_completion_entry[4]; +}; +#endif diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h index ba5018702960..298d895e374b 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h @@ -30,4 +30,18 @@ struct mpi3_smp_passthrough_request { struct mpi3_sge_common request_sge; struct mpi3_sge_common response_sge; }; + +struct mpi3_smp_passthrough_reply { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 ioc_use_only08; + __le16 ioc_status; + __le32 ioc_log_info; + __le16 response_data_length; + __le16 reserved12; +}; #endif diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h index 63e4e81d5397..6d550117ec2e 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h @@ -19,8 +19,8 @@ union mpi3_version_union { #define MPI3_VERSION_MAJOR (3) #define MPI3_VERSION_MINOR (0) -#define MPI3_VERSION_UNIT (0) -#define MPI3_VERSION_DEV (18) +#define MPI3_VERSION_UNIT (22) +#define MPI3_VERSION_DEV (0) struct mpi3_sysif_oper_queue_indexes { __le16 producer_index; __le16 reserved02; @@ -74,6 +74,7 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_IOC_INFO_HIGH_OFFSET (0x00000004) #define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK (0xff000000) #define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT (24) +#define MPI3_SYSIF_IOC_INFO_LOW_HCB_DISABLED (0x00000001) #define MPI3_SYSIF_IOC_CONFIG_OFFSET (0x00000014) #define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ (0x00f00000) #define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20) @@ -82,12 +83,13 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000) #define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000) #define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000) -#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN (0x00002000) +#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ (0x00002000) #define MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE (0x00000010) #define MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC (0x00000001) #define MPI3_SYSIF_IOC_STATUS_OFFSET (0x0000001c) #define MPI3_SYSIF_IOC_STATUS_RESET_HISTORY (0x00000010) #define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK (0x0000000c) +#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_SHIFT (0x00000002) #define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_NONE (0x00000000) #define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS (0x00000004) #define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE (0x00000008) @@ -107,9 +109,9 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000) #define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000) #define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000) -#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x30000000) -#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_MASK (0x00ff0000) -#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_SHIFT (16) +#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x20000000) +#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_MASK (0x01ff0000) +#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_SHIFT (16) #define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_MASK (0x0000ff00) #define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_SHIFT (8) #define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_MASK (0x000000ff) @@ -117,9 +119,9 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET (0x00001000) #define MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET (0x00001004) #define MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET (0x00001008) -#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(n) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((n) - 1) * 8)) +#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(N) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((N) - 1) * 8)) #define MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET (0x0000100c) -#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(n) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((n) - 1) * 8)) +#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8)) #define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04) #define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f) #define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0) @@ -133,7 +135,7 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700) #define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000) #define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100) -#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_FLASH_RCVRY_RESET (0x00000200) +#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200) #define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_COMPLETE_RESET (0x00000300) #define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT (0x00000700) #define MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS (0x00000080) @@ -153,8 +155,9 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001) #define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002) #define MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED (0x0000f003) -#define MPI3_SYSIF_FAULT_CODE_SAFE_MODE_EXIT (0x0000f004) -#define MPI3_SYSIF_FAULT_CODE_FACTORY_RESET (0x0000f005) +#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004) +#define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005) +#define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006) #define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14) #define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18) #define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c) @@ -409,6 +412,8 @@ struct mpi3_default_reply { #define MPI3_IOCSTATUS_INVALID_STATE (0x0008) #define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a) #define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b) +#define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c) +#define MPI3_IOCSTATUS_SUPERVISOR_ONLY (0x000d) #define MPI3_IOCSTATUS_FAILURE (0x001f) #define MPI3_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020) #define MPI3_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021) @@ -448,8 +453,10 @@ struct mpi3_default_reply { #define MPI3_IOCSTATUS_CI_UNSUPPORTED (0x00b0) #define MPI3_IOCSTATUS_CI_UPDATE_SEQUENCE (0x00b1) #define MPI3_IOCSTATUS_CI_VALIDATION_FAILED (0x00b2) -#define MPI3_IOCSTATUS_CI_UPDATE_PENDING (0x00b3) +#define MPI3_IOCSTATUS_CI_KEY_UPDATE_PENDING (0x00b3) +#define MPI3_IOCSTATUS_CI_KEY_UPDATE_NOT_POSSIBLE (0x00b4) #define MPI3_IOCSTATUS_SECURITY_KEY_REQUIRED (0x00c0) +#define MPI3_IOCSTATUS_SECURITY_VIOLATION (0x00c1) #define MPI3_IOCSTATUS_INVALID_QUEUE_ID (0x0f00) #define MPI3_IOCSTATUS_INVALID_QUEUE_SIZE (0x0f01) #define MPI3_IOCSTATUS_INVALID_MSIX_VECTOR (0x0f02) diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index 9787b53a2b59..fc4eaf6d1e47 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -45,6 +45,7 @@ #include "mpi/mpi30_init.h" #include "mpi/mpi30_ioc.h" #include "mpi/mpi30_sas.h" +#include "mpi/mpi30_pci.h" #include "mpi3mr_debug.h" /* Global list and lock for storing multiple adapters managed by the driver */ @@ -52,8 +53,8 @@ extern spinlock_t mrioc_list_lock; extern struct list_head mrioc_list; extern int prot_mask; -#define MPI3MR_DRIVER_VERSION "00.255.45.01" -#define MPI3MR_DRIVER_RELDATE "12-December-2020" +#define MPI3MR_DRIVER_VERSION "8.0.0.61.0" +#define MPI3MR_DRIVER_RELDATE "20-December-2021" #define MPI3MR_DRIVER_NAME "mpi3mr" #define MPI3MR_DRIVER_LICENSE "GPL" @@ -79,7 +80,8 @@ extern int prot_mask; /* Operational queue management definitions */ #define MPI3MR_OP_REQ_Q_QD 512 -#define MPI3MR_OP_REP_Q_QD 4096 +#define MPI3MR_OP_REP_Q_QD 1024 +#define MPI3MR_OP_REP_Q_QD4K 4096 #define MPI3MR_OP_REQ_Q_SEG_SIZE 4096 #define MPI3MR_OP_REP_Q_SEG_SIZE 4096 #define MPI3MR_MAX_SEG_LIST_SIZE 4096 @@ -90,25 +92,31 @@ extern int prot_mask; #define MPI3MR_HOSTTAG_IOCTLCMDS 2 #define MPI3MR_HOSTTAG_BLK_TMS 5 -#define MPI3MR_NUM_DEVRMCMD 1 +#define MPI3MR_NUM_DEVRMCMD 16 #define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_BLK_TMS + 1) #define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \ MPI3MR_NUM_DEVRMCMD - 1) -#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX +#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX +#define MPI3MR_NUM_EVTACKCMD 4 +#define MPI3MR_HOSTTAG_EVTACKCMD_MIN (MPI3MR_HOSTTAG_DEVRMCMD_MAX + 1) +#define MPI3MR_HOSTTAG_EVTACKCMD_MAX (MPI3MR_HOSTTAG_EVTACKCMD_MIN + \ + MPI3MR_NUM_EVTACKCMD - 1) /* Reduced resource count definition for crash kernel */ #define MPI3MR_HOST_IOS_KDUMP 128 /* command/controller interaction timeout definitions in seconds */ -#define MPI3MR_INTADMCMD_TIMEOUT 10 +#define MPI3MR_INTADMCMD_TIMEOUT 60 #define MPI3MR_PORTENABLE_TIMEOUT 300 -#define MPI3MR_ABORTTM_TIMEOUT 30 -#define MPI3MR_RESETTM_TIMEOUT 30 +#define MPI3MR_ABORTTM_TIMEOUT 60 +#define MPI3MR_RESETTM_TIMEOUT 60 #define MPI3MR_RESET_HOST_IOWAIT_TIMEOUT 5 #define MPI3MR_TSUPDATE_INTERVAL 900 #define MPI3MR_DEFAULT_SHUTDOWN_TIME 120 #define MPI3MR_RAID_ERRREC_RESET_TIMEOUT 180 +#define MPI3MR_PREPARE_FOR_RESET_TIMEOUT 180 +#define MPI3MR_RESET_ACK_TIMEOUT 30 #define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */ @@ -121,7 +129,7 @@ extern int prot_mask; /* Definitions for Event replies and sense buffer allocated per controller */ #define MPI3MR_NUM_EVT_REPLIES 64 -#define MPI3MR_SENSEBUF_SZ 256 +#define MPI3MR_SENSE_BUF_SZ 256 #define MPI3MR_SENSEBUF_FACTOR 3 #define MPI3MR_CHAINBUF_FACTOR 3 #define MPI3MR_CHAINBUFDIX_FACTOR 2 @@ -135,17 +143,11 @@ extern int prot_mask; /* ResponseCode definitions */ #define MPI3MR_RI_MASK_RESPCODE (0x000000FF) -#define MPI3MR_RSP_TM_COMPLETE 0x00 -#define MPI3MR_RSP_INVALID_FRAME 0x02 -#define MPI3MR_RSP_TM_NOT_SUPPORTED 0x04 -#define MPI3MR_RSP_TM_FAILED 0x05 -#define MPI3MR_RSP_TM_SUCCEEDED 0x08 -#define MPI3MR_RSP_TM_INVALID_LUN 0x09 -#define MPI3MR_RSP_TM_OVERLAPPED_TAG 0x0A #define MPI3MR_RSP_IO_QUEUED_ON_IOC \ MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC #define MPI3MR_DEFAULT_MDTS (128 * 1024) +#define MPI3MR_DEFAULT_PGSZEXP (12) /* Command retry count definitions */ #define MPI3MR_DEV_RMHS_RETRY_COUNT 3 @@ -183,20 +185,6 @@ enum mpi3mr_iocstate { MRIOC_STATE_UNRECOVERABLE, }; -/* Init type definitions */ -enum mpi3mr_init_type { - MPI3MR_IT_INIT = 0, - MPI3MR_IT_RESET, - MPI3MR_IT_RESUME, -}; - -/* Cleanup reason definitions */ -enum mpi3mr_cleanup_reason { - MPI3MR_COMPLETE_CLEANUP = 0, - MPI3MR_REINIT_FAILURE, - MPI3MR_SUSPEND, -}; - /* Reset reason code definitions*/ enum mpi3mr_reset_reason { MPI3MR_RESET_FROM_BRINGUP = 1, @@ -222,7 +210,14 @@ enum mpi3mr_reset_reason { MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT = 21, MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22, MPI3MR_RESET_FROM_SYSFS = 23, - MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24 + MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24, + MPI3MR_RESET_FROM_FIRMWARE = 27, +}; + +/* Queue type definitions */ +enum queue_type { + MPI3MR_DEFAULT_QUEUE = 0, + MPI3MR_POLL_QUEUE, }; /** @@ -263,7 +258,7 @@ struct mpi3mr_ioc_facts { u16 max_vds; u16 max_hpds; u16 max_advhpds; - u16 max_raidpds; + u16 max_raid_pds; u16 min_devhandle; u16 max_devhandle; u16 max_op_req_q; @@ -336,6 +331,7 @@ struct op_req_qinfo { * @pend_ios: Number of IOs pending in HW for this queue * @enable_irq_poll: Flag to indicate polling is enabled * @in_use: Queue is handled by poll/ISR + * @qtype: Type of queue (types defined in enum queue_type) */ struct op_reply_qinfo { u16 ci; @@ -350,6 +346,7 @@ struct op_reply_qinfo { atomic_t pend_ios; bool enable_irq_poll; atomic_t in_use; + enum queue_type qtype; }; /** @@ -388,6 +385,7 @@ struct tgt_dev_sas_sata { * @pgsz: Device page size * @abort_to: Timeout for abort TM * @reset_to: Timeout for Target/LUN reset TM + * @dev_info: Device information bits */ struct tgt_dev_pcie { u32 mdts; @@ -395,6 +393,7 @@ struct tgt_dev_pcie { u8 pgsz; u8 abort_to; u8 reset_to; + u16 dev_info; }; /** @@ -499,6 +498,8 @@ static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s) * @dev_removedelay: Device is waiting to be removed in FW * @dev_type: Device type * @tgt_dev: Internal target device pointer + * @pend_count: Counter to track pending I/Os during error + * handling */ struct mpi3mr_stgt_priv_data { struct scsi_target *starget; @@ -510,6 +511,7 @@ struct mpi3mr_stgt_priv_data { u8 dev_removedelay; u8 dev_type; struct mpi3mr_tgt_dev *tgt_dev; + u32 pend_count; }; /** @@ -518,11 +520,14 @@ struct mpi3mr_stgt_priv_data { * @tgt_priv_data: Scsi_target private data pointer * @lun_id: LUN ID of the device * @ncq_prio_enable: NCQ priority enable for SATA device + * @pend_count: Counter to track pending I/Os during error + * handling */ struct mpi3mr_sdev_priv_data { struct mpi3mr_stgt_priv_data *tgt_priv_data; u32 lun_id; u8 ncq_prio_enable; + u32 pend_count; }; /** @@ -630,6 +635,7 @@ struct scmd_priv { * @ready_timeout: Controller ready timeout * @intr_info: Interrupt cookie pointer * @intr_info_count: Number of interrupt cookies + * @is_intr_info_set: Flag to indicate intr info is setup * @num_queues: Number of operational queues * @num_op_req_q: Number of operational request queues * @req_qinfo: Operational request queue info pointer @@ -681,17 +687,23 @@ struct scmd_priv { * @chain_buf_lock: Chain buffer list lock * @host_tm_cmds: Command tracker for task management commands * @dev_rmhs_cmds: Command tracker for device removal commands + * @evtack_cmds: Command tracker for event ack commands * @devrem_bitmap_sz: Device removal bitmap size * @devrem_bitmap: Device removal bitmap * @dev_handle_bitmap_sz: Device handle bitmap size * @removepend_bitmap: Remove pending bitmap * @delayed_rmhs_list: Delayed device removal list + * @evtack_cmds_bitmap_sz: Event Ack bitmap size + * @evtack_cmds_bitmap: Event Ack bitmap + * @delayed_evtack_cmds_list: Delayed event acknowledgment list * @ts_update_counter: Timestamp update counter - * @fault_dbg: Fault debug flag * @reset_in_progress: Reset in progress flag * @unrecoverable: Controller unrecoverable flag + * @prev_reset_result: Result of previous reset * @reset_mutex: Controller reset mutex * @reset_waitq: Controller reset wait queue + * @prepare_for_reset: Prepare for reset event received + * @prepare_for_reset_timeout_counter: Prepare for reset timeout * @diagsave_timeout: Diagnostic information save timeout * @logging_level: Controller debug logging level * @flush_io_count: I/O count to flush after reset @@ -699,6 +711,9 @@ struct scmd_priv { * @driver_info: Driver, Kernel, OS information to firmware * @change_count: Topology change count * @op_reply_q_offset: Operational reply queue offset with MSIx + * @default_qcount: Total Default queues + * @active_poll_qcount: Currently active poll queue count + * @requested_poll_qcount: User requested poll queue count */ struct mpi3mr_ioc { struct list_head list; @@ -739,6 +754,7 @@ struct mpi3mr_ioc { struct mpi3mr_intr_info *intr_info; u16 intr_info_count; + bool is_intr_info_set; u16 num_queues; u16 num_op_req_q; @@ -758,6 +774,7 @@ struct mpi3mr_ioc { dma_addr_t reply_buf_dma_max_address; u16 reply_free_qsz; + u16 reply_sz; struct dma_pool *reply_free_q_pool; __le64 *reply_free_q; dma_addr_t reply_free_q_dma; @@ -805,19 +822,26 @@ struct mpi3mr_ioc { struct mpi3mr_drv_cmd host_tm_cmds; struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD]; + struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD]; u16 devrem_bitmap_sz; void *devrem_bitmap; u16 dev_handle_bitmap_sz; void *removepend_bitmap; struct list_head delayed_rmhs_list; + u16 evtack_cmds_bitmap_sz; + void *evtack_cmds_bitmap; + struct list_head delayed_evtack_cmds_list; u32 ts_update_counter; - u8 fault_dbg; u8 reset_in_progress; u8 unrecoverable; + int prev_reset_result; struct mutex reset_mutex; wait_queue_head_t reset_waitq; + u8 prepare_for_reset; + u16 prepare_for_reset_timeout_counter; + u16 diagsave_timeout; int logging_level; u16 flush_io_count; @@ -826,6 +850,10 @@ struct mpi3mr_ioc { struct mpi3_driver_info_layout driver_info; u16 change_count; u16 op_reply_q_offset; + + u16 default_qcount; + u16 active_poll_qcount; + u16 requested_poll_qcount; }; /** @@ -867,10 +895,23 @@ struct delayed_dev_rmhs_node { u8 iou_rc; }; +/** + * struct delayed_evt_ack_node - Delayed event ack node + * @list: list head + * @event: MPI3 event ID + * @event_ctx: event context + */ +struct delayed_evt_ack_node { + struct list_head list; + u8 event; + u32 event_ctx; +}; + int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc); void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc); -int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type); -void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason); +int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc); +int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume); +void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc); int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async); int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, u16 admin_req_sz, u8 ignore_reset); @@ -887,6 +928,7 @@ void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, u64 sense_buf_dma); void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc); +void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc); void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, struct mpi3_event_notification_reply *event_reply); void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, @@ -897,13 +939,11 @@ void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc); int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, u32 reset_reason, u8 snapdump); -int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc, - u32 reset_reason); void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc); void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc); enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc); -int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, +int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event, u32 event_ctx); void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout); @@ -911,6 +951,12 @@ void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc); void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc); void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc); void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc); -void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc); +void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc); +void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code); +void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc); +void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code); +int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, + struct op_reply_qinfo *op_reply_q); +int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); #endif /*MPI3MR_H_INCLUDED*/ diff --git a/drivers/scsi/mpi3mr/mpi3mr_debug.h b/drivers/scsi/mpi3mr/mpi3mr_debug.h index c085bb048d41..cef61c5d59d3 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_debug.h +++ b/drivers/scsi/mpi3mr/mpi3mr_debug.h @@ -14,27 +14,20 @@ /* * debug levels */ -#define MPI3_DEBUG 0x00000001 -#define MPI3_DEBUG_MSG_FRAME 0x00000002 -#define MPI3_DEBUG_SG 0x00000004 -#define MPI3_DEBUG_EVENTS 0x00000008 -#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000010 -#define MPI3_DEBUG_INIT 0x00000020 -#define MPI3_DEBUG_EXIT 0x00000040 -#define MPI3_DEBUG_FAIL 0x00000080 -#define MPI3_DEBUG_TM 0x00000100 -#define MPI3_DEBUG_REPLY 0x00000200 -#define MPI3_DEBUG_HANDSHAKE 0x00000400 -#define MPI3_DEBUG_CONFIG 0x00000800 -#define MPI3_DEBUG_DL 0x00001000 -#define MPI3_DEBUG_RESET 0x00002000 -#define MPI3_DEBUG_SCSI 0x00004000 -#define MPI3_DEBUG_IOCTL 0x00008000 -#define MPI3_DEBUG_CSMISAS 0x00010000 -#define MPI3_DEBUG_SAS 0x00020000 -#define MPI3_DEBUG_TRANSPORT 0x00040000 -#define MPI3_DEBUG_TASK_SET_FULL 0x00080000 -#define MPI3_DEBUG_TRIGGER_DIAG 0x00200000 + +#define MPI3_DEBUG_EVENT 0x00000001 +#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000002 +#define MPI3_DEBUG_INIT 0x00000004 +#define MPI3_DEBUG_EXIT 0x00000008 +#define MPI3_DEBUG_TM 0x00000010 +#define MPI3_DEBUG_RESET 0x00000020 +#define MPI3_DEBUG_SCSI_ERROR 0x00000040 +#define MPI3_DEBUG_REPLY 0x00000080 +#define MPI3_DEBUG_IOCTL_ERROR 0x00008000 +#define MPI3_DEBUG_IOCTL_INFO 0x00010000 +#define MPI3_DEBUG_SCSI_INFO 0x00020000 +#define MPI3_DEBUG 0x01000000 +#define MPI3_DEBUG_SG 0x02000000 /* @@ -50,11 +43,103 @@ #define ioc_info(ioc, fmt, ...) \ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define dprint(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_event_th(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_EVENT) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_event_bh(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_init(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_INIT) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_exit(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_EXIT) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_tm(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_TM) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_reply(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_REPLY) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_reset(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_RESET) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_scsi_info(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_SCSI_INFO) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_scsi_err(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_SCSI_ERROR) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_scsi_command(ioc, SCMD, LOG_LEVEL) \ + do { \ + if (ioc->logging_level & LOG_LEVEL) \ + scsi_print_command(SCMD); \ + } while (0) + + +#define dprint_ioctl_info(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_IOCTL_INFO) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) -#define dbgprint(IOC, FMT, ...) \ +#define dprint_ioctl_err(ioc, fmt, ...) \ do { \ - if (IOC->logging_level & MPI3_DEBUG) \ - pr_info("%s: " FMT, (IOC)->name, ##__VA_ARGS__); \ + if (ioc->logging_level & MPI3_DEBUG_IOCTL_ERROR) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ } while (0) #endif /* MPT3SAS_DEBUG_H_INCLUDED */ + +/** + * dprint_dump_req - print message frame contents + * @req: pointer to message frame + * @sz: number of dwords + */ +static inline void +dprint_dump_req(void *req, int sz) +{ + int i; + __le32 *mfp = (__le32 *)req; + + pr_info("request:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index aa5d877df6f8..c39dd4978c9d 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -10,6 +10,16 @@ #include "mpi3mr.h" #include <linux/io-64-nonatomic-lo-hi.h> +static int +mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason); +static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc); +static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, + struct mpi3_ioc_facts_data *facts_data); + +static int poll_queues; +module_param(poll_queues, int, 0444); +MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); + #if defined(writeq) && defined(CONFIG_64BIT) static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) { @@ -78,6 +88,7 @@ static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) kfree(mrioc->intr_info); mrioc->intr_info = NULL; mrioc->intr_info_count = 0; + mrioc->is_intr_info_set = false; pci_free_irq_vectors(mrioc->pdev); } @@ -124,8 +135,9 @@ static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, u64 reply_dma) { u32 old_idx = 0; + unsigned long flags; - spin_lock(&mrioc->reply_free_queue_lock); + spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags); old_idx = mrioc->reply_free_queue_host_index; mrioc->reply_free_queue_host_index = ( (mrioc->reply_free_queue_host_index == @@ -134,15 +146,16 @@ static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); writel(mrioc->reply_free_queue_host_index, &mrioc->sysif_regs->reply_free_host_index); - spin_unlock(&mrioc->reply_free_queue_lock); + spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags); } void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, u64 sense_buf_dma) { u32 old_idx = 0; + unsigned long flags; - spin_lock(&mrioc->sbq_lock); + spin_lock_irqsave(&mrioc->sbq_lock, flags); old_idx = mrioc->sbq_host_index; mrioc->sbq_host_index = ((mrioc->sbq_host_index == (mrioc->sense_buf_q_sz - 1)) ? 0 : @@ -150,7 +163,7 @@ void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); writel(mrioc->sbq_host_index, &mrioc->sysif_regs->sense_buffer_free_host_index); - spin_unlock(&mrioc->sbq_lock); + spin_unlock_irqrestore(&mrioc->sbq_lock, flags); } static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, @@ -303,6 +316,12 @@ mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, return &mrioc->dev_rmhs_cmds[idx]; } + if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN && + host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) { + idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; + return &mrioc->evtack_cmds[idx]; + } + return NULL; } @@ -369,7 +388,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, if (def_reply) { cmdptr->state |= MPI3MR_CMD_REPLY_VALID; memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, - mrioc->facts.reply_sz); + mrioc->reply_sz); } if (cmdptr->is_waiting) { complete(&cmdptr->done); @@ -446,10 +465,21 @@ mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) return reply_desc; } -static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, - struct mpi3mr_intr_info *intr_info) +/** + * mpi3mr_process_op_reply_q - Operational reply queue handler + * @mrioc: Adapter instance reference + * @op_reply_q: Operational reply queue info + * + * Checks the specific operational reply queue and drains the + * reply queue entries until the queue is empty and process the + * individual reply descriptors. + * + * Return: 0 if queue is already processed,or number of reply + * descriptors processed. + */ +int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, + struct op_reply_qinfo *op_reply_q) { - struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q; struct op_req_qinfo *op_req_q; u32 exp_phase; u32 reply_ci; @@ -500,7 +530,7 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, * Ensure remaining completion happens from threaded ISR. */ if (num_op_reply > mrioc->max_host_ios) { - intr_info->op_reply_q->enable_irq_poll = true; + op_reply_q->enable_irq_poll = true; break; } @@ -515,6 +545,34 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, return num_op_reply; } +/** + * mpi3mr_blk_mq_poll - Operational reply queue handler + * @shost: SCSI Host reference + * @queue_num: Request queue number (w.r.t OS it is hardware context number) + * + * Checks the specific operational reply queue and drains the + * reply queue entries until the queue is empty and process the + * individual reply descriptors. + * + * Return: 0 if queue is already processed,or number of reply + * descriptors processed. + */ +int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + int num_entries = 0; + struct mpi3mr_ioc *mrioc; + + mrioc = (struct mpi3mr_ioc *)shost->hostdata; + + if ((mrioc->reset_in_progress || mrioc->prepare_for_reset)) + return 0; + + num_entries = mpi3mr_process_op_reply_q(mrioc, + &mrioc->op_reply_qinfo[queue_num]); + + return num_entries; +} + static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) { struct mpi3mr_intr_info *intr_info = privdata; @@ -535,7 +593,8 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) if (!midx) num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); if (intr_info->op_reply_q) - num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info); + num_op_reply = mpi3mr_process_op_reply_q(mrioc, + intr_info->op_reply_q); if (num_admin_replies || num_op_reply) return IRQ_HANDLED; @@ -606,9 +665,10 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) mpi3mr_process_admin_reply_q(mrioc); if (intr_info->op_reply_q) num_op_reply += - mpi3mr_process_op_reply_q(mrioc, intr_info); + mpi3mr_process_op_reply_q(mrioc, + intr_info->op_reply_q); - usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep); + usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP); } while (atomic_read(&intr_info->op_reply_q->pend_ios) && (num_op_reply < mrioc->max_host_ios)); @@ -652,6 +712,25 @@ static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) return retval; } +static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors) +{ + if (!mrioc->requested_poll_qcount) + return; + + /* Reserved for Admin and Default Queue */ + if (max_vectors > 2 && + (mrioc->requested_poll_qcount < max_vectors - 2)) { + ioc_info(mrioc, + "enabled polled queues (%d) msix (%d)\n", + mrioc->requested_poll_qcount, max_vectors); + } else { + ioc_info(mrioc, + "disabled polled queues (%d) msix (%d) because of no resources for default queue\n", + mrioc->requested_poll_qcount, max_vectors); + mrioc->requested_poll_qcount = 0; + } +} + /** * mpi3mr_setup_isr - Setup ISR for the controller * @mrioc: Adapter instance reference @@ -664,48 +743,72 @@ static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) { unsigned int irq_flags = PCI_IRQ_MSIX; - int max_vectors; + int max_vectors, min_vec; int retval; int i; - struct irq_affinity desc = { .pre_vectors = 1}; + struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 }; + + if (mrioc->is_intr_info_set) + return 0; mpi3mr_cleanup_isr(mrioc); - if (setup_one || reset_devices) + if (setup_one || reset_devices) { max_vectors = 1; - else { + retval = pci_alloc_irq_vectors(mrioc->pdev, + 1, max_vectors, irq_flags); + if (retval < 0) { + ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", + retval); + goto out_failed; + } + } else { max_vectors = - min_t(int, mrioc->cpu_count + 1, mrioc->msix_count); + min_t(int, mrioc->cpu_count + 1 + + mrioc->requested_poll_qcount, mrioc->msix_count); + + mpi3mr_calc_poll_queues(mrioc, max_vectors); ioc_info(mrioc, "MSI-X vectors supported: %d, no of cores: %d,", mrioc->msix_count, mrioc->cpu_count); ioc_info(mrioc, - "MSI-x vectors requested: %d\n", max_vectors); - } + "MSI-x vectors requested: %d poll_queues %d\n", + max_vectors, mrioc->requested_poll_qcount); + + desc.post_vectors = mrioc->requested_poll_qcount; + min_vec = desc.pre_vectors + desc.post_vectors; + irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; + + retval = pci_alloc_irq_vectors_affinity(mrioc->pdev, + min_vec, max_vectors, irq_flags, &desc); + + if (retval < 0) { + ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", + retval); + goto out_failed; + } - irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; - mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; - retval = pci_alloc_irq_vectors_affinity(mrioc->pdev, - 1, max_vectors, irq_flags, &desc); - if (retval < 0) { - ioc_err(mrioc, "Cannot alloc irq vectors\n"); - goto out_failed; - } - if (retval != max_vectors) { - ioc_info(mrioc, - "allocated vectors (%d) are less than configured (%d)\n", - retval, max_vectors); /* * If only one MSI-x is allocated, then MSI-x 0 will be shared * between Admin queue and operational queue */ - if (retval == 1) + if (retval == min_vec) mrioc->op_reply_q_offset = 0; + else if (retval != (max_vectors)) { + ioc_info(mrioc, + "allocated vectors (%d) are less than configured (%d)\n", + retval, max_vectors); + } max_vectors = retval; + mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; + + mpi3mr_calc_poll_queues(mrioc, max_vectors); + } + mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, GFP_KERNEL); if (!mrioc->intr_info) { @@ -720,6 +823,8 @@ static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) goto out_failed; } } + if (reset_devices || !setup_one) + mrioc->is_intr_info_set = true; mrioc->intr_info_count = max_vectors; mpi3mr_ioc_enable_intr(mrioc); return 0; @@ -796,6 +901,7 @@ static const struct { }, { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, + { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronus reset" }, }; /** @@ -860,7 +966,7 @@ static const char *mpi3mr_reset_type_name(u16 reset_type) * * Return: Nothing. */ -static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) +void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) { u32 ioc_status, code, code1, code2, code3; @@ -958,25 +1064,25 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); - timeout = mrioc->ready_timeout * 10; + timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; do { ioc_status = readl(&mrioc->sysif_regs->ioc_status); if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { mpi3mr_clear_reset_history(mrioc); - ioc_config = - readl(&mrioc->sysif_regs->ioc_configuration); - if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || - (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || - (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) { - retval = 0; - break; - } + break; + } + if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { + mpi3mr_print_fault_info(mrioc); + break; } msleep(100); } while (--timeout); - ioc_status = readl(&mrioc->sysif_regs->ioc_status); ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || + (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || + (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) + retval = 0; ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", (!retval) ? "successful" : "failed", ioc_status, ioc_config); @@ -984,32 +1090,171 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, } /** + * mpi3mr_revalidate_factsdata - validate IOCFacts parameters + * during reset/resume + * @mrioc: Adapter instance reference + * + * Return zero if the new IOCFacts parameters value is compatible with + * older values else return -EPERM + */ +static int +mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) +{ + u16 dev_handle_bitmap_sz; + void *removepend_bitmap; + + if (mrioc->facts.reply_sz > mrioc->reply_sz) { + ioc_err(mrioc, + "cannot increase reply size from %d to %d\n", + mrioc->reply_sz, mrioc->facts.reply_sz); + return -EPERM; + } + + if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) { + ioc_err(mrioc, + "cannot reduce number of operational reply queues from %d to %d\n", + mrioc->num_op_reply_q, + mrioc->facts.max_op_reply_q); + return -EPERM; + } + + if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) { + ioc_err(mrioc, + "cannot reduce number of operational request queues from %d to %d\n", + mrioc->num_op_req_q, mrioc->facts.max_op_req_q); + return -EPERM; + } + + dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; + if (mrioc->facts.max_devhandle % 8) + dev_handle_bitmap_sz++; + if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) { + removepend_bitmap = krealloc(mrioc->removepend_bitmap, + dev_handle_bitmap_sz, GFP_KERNEL); + if (!removepend_bitmap) { + ioc_err(mrioc, + "failed to increase removepend_bitmap sz from: %d to %d\n", + mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz); + return -EPERM; + } + memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0, + dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz); + mrioc->removepend_bitmap = removepend_bitmap; + ioc_info(mrioc, + "increased dev_handle_bitmap_sz from %d to %d\n", + mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz); + mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz; + } + + return 0; +} + +/** * mpi3mr_bring_ioc_ready - Bring controller to ready state * @mrioc: Adapter instance reference * * Set Enable IOC bit in IOC configuration register and wait for * the controller to become ready. * - * Return: 0 on success, -1 on failure. + * Return: 0 on success, appropriate error on failure. */ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) { - u32 ioc_config, timeout; - enum mpi3mr_iocstate current_state; + u32 ioc_config, ioc_status, timeout; + int retval = 0; + enum mpi3mr_iocstate ioc_state; + u64 base_info; + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); + ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n", + ioc_status, ioc_config, base_info); + + /*The timeout value is in 2sec unit, changing it to seconds*/ + mrioc->ready_timeout = + ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> + MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; + + ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout); + + ioc_state = mpi3mr_get_iocstate(mrioc); + ioc_info(mrioc, "controller is in %s state during detection\n", + mpi3mr_iocstate_name(ioc_state)); + + if (ioc_state == MRIOC_STATE_BECOMING_READY || + ioc_state == MRIOC_STATE_RESET_REQUESTED) { + timeout = mrioc->ready_timeout * 10; + do { + msleep(100); + } while (--timeout); + + ioc_state = mpi3mr_get_iocstate(mrioc); + ioc_info(mrioc, + "controller is in %s state after waiting to reset\n", + mpi3mr_iocstate_name(ioc_state)); + } + + if (ioc_state == MRIOC_STATE_READY) { + ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); + retval = mpi3mr_issue_and_process_mur(mrioc, + MPI3MR_RESET_FROM_BRINGUP); + ioc_state = mpi3mr_get_iocstate(mrioc); + if (retval) + ioc_err(mrioc, + "message unit reset failed with error %d current state %s\n", + retval, mpi3mr_iocstate_name(ioc_state)); + } + if (ioc_state != MRIOC_STATE_RESET) { + mpi3mr_print_fault_info(mrioc); + ioc_info(mrioc, "issuing soft reset to bring to reset state\n"); + retval = mpi3mr_issue_reset(mrioc, + MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, + MPI3MR_RESET_FROM_BRINGUP); + if (retval) { + ioc_err(mrioc, + "soft reset failed with error %d\n", retval); + goto out_failed; + } + } + ioc_state = mpi3mr_get_iocstate(mrioc); + if (ioc_state != MRIOC_STATE_RESET) { + ioc_err(mrioc, + "cannot bring controller to reset state, current state: %s\n", + mpi3mr_iocstate_name(ioc_state)); + goto out_failed; + } + mpi3mr_clear_reset_history(mrioc); + retval = mpi3mr_setup_admin_qpair(mrioc); + if (retval) { + ioc_err(mrioc, "failed to setup admin queues: error %d\n", + retval); + goto out_failed; + } + + ioc_info(mrioc, "bringing controller to ready state\n"); ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); timeout = mrioc->ready_timeout * 10; do { - current_state = mpi3mr_get_iocstate(mrioc); - if (current_state == MRIOC_STATE_READY) + ioc_state = mpi3mr_get_iocstate(mrioc); + if (ioc_state == MRIOC_STATE_READY) { + ioc_info(mrioc, + "successfully transistioned to %s state\n", + mpi3mr_iocstate_name(ioc_state)); return 0; + } msleep(100); } while (--timeout); - return -1; +out_failed: + ioc_state = mpi3mr_get_iocstate(mrioc); + ioc_err(mrioc, + "failed to bring to ready state, current state: %s\n", + mpi3mr_iocstate_name(ioc_state)); + return retval; } /** @@ -1026,7 +1271,6 @@ static inline bool mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config) { if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || - (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) return true; return false; @@ -1049,8 +1293,10 @@ static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc, if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) return false; fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; - if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) + if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) { + mpi3mr_print_fault_info(mrioc); return true; + } return false; } @@ -1089,26 +1335,36 @@ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason) { int retval = -1; - u8 unlock_retry_count, reset_retry_count = 0; - u32 host_diagnostic, timeout, ioc_status, ioc_config; + u8 unlock_retry_count = 0; + u32 host_diagnostic, ioc_status, ioc_config; + u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; - pci_cfg_access_lock(mrioc->pdev); if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) && (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)) - goto out; + return retval; if (mrioc->unrecoverable) - goto out; -retry_reset: - unlock_retry_count = 0; + return retval; + if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) { + retval = 0; + return retval; + } + + ioc_info(mrioc, "%s reset due to %s(0x%x)\n", + mpi3mr_reset_type_name(reset_type), + mpi3mr_reset_rc_name(reset_reason), reset_reason); + mpi3mr_clear_reset_history(mrioc); do { ioc_info(mrioc, "Write magic sequence to unlock host diag register (retry=%d)\n", ++unlock_retry_count); if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) { - writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); + ioc_err(mrioc, + "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n", + mpi3mr_reset_type_name(reset_type), + host_diagnostic); mrioc->unrecoverable = 1; - goto out; + return retval; } writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH, @@ -1133,31 +1389,26 @@ retry_reset: } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE)); writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); - ioc_info(mrioc, "%s reset due to %s(0x%x)\n", - mpi3mr_reset_type_name(reset_type), - mpi3mr_reset_rc_name(reset_reason), reset_reason); writel(host_diagnostic | reset_type, &mrioc->sysif_regs->host_diagnostic); - timeout = mrioc->ready_timeout * 10; - if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) { + switch (reset_type) { + case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET: do { ioc_status = readl(&mrioc->sysif_regs->ioc_status); - if (ioc_status & - MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { + ioc_config = + readl(&mrioc->sysif_regs->ioc_configuration); + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) + && mpi3mr_soft_reset_success(ioc_status, ioc_config) + ) { mpi3mr_clear_reset_history(mrioc); - ioc_config = - readl(&mrioc->sysif_regs->ioc_configuration); - if (mpi3mr_soft_reset_success(ioc_status, - ioc_config)) { - retval = 0; - break; - } + retval = 0; + break; } msleep(100); } while (--timeout); - writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, - &mrioc->sysif_regs->write_sequence); - } else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) { + mpi3mr_print_fault_info(mrioc); + break; + case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT: do { ioc_status = readl(&mrioc->sysif_regs->ioc_status); if (mpi3mr_diagfault_success(mrioc, ioc_status)) { @@ -1166,28 +1417,22 @@ retry_reset: } msleep(100); } while (--timeout); - mpi3mr_clear_reset_history(mrioc); - writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, - &mrioc->sysif_regs->write_sequence); - } - if (retval && ((++reset_retry_count) < MPI3MR_MAX_RESET_RETRY_COUNT)) { - ioc_status = readl(&mrioc->sysif_regs->ioc_status); - ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); - ioc_info(mrioc, - "Base IOC Sts/Config after reset try %d is (0x%x)/(0x%x)\n", - reset_retry_count, ioc_status, ioc_config); - goto retry_reset; + break; + default: + break; } -out: - pci_cfg_access_unlock(mrioc->pdev); - ioc_status = readl(&mrioc->sysif_regs->ioc_status); - ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, + &mrioc->sysif_regs->write_sequence); + ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + ioc_status = readl(&mrioc->sysif_regs->ioc_status); ioc_info(mrioc, - "Base IOC Sts/Config after %s reset is (0x%x)/(0x%x)\n", - (!retval) ? "successful" : "failed", ioc_status, + "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n", + (!retval)?"successful":"failed", ioc_status, ioc_config); + if (retval) + mrioc->unrecoverable = 1; return retval; } @@ -1278,7 +1523,7 @@ static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; } } else - size = mrioc->req_qinfo[q_idx].num_requests * + size = mrioc->req_qinfo[q_idx].segment_qd * mrioc->facts.op_req_sz; for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { @@ -1351,10 +1596,11 @@ static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) { struct mpi3_delete_reply_queue_request delq_req; + struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; int retval = 0; u16 reply_qid = 0, midx; - reply_qid = mrioc->op_reply_qinfo[qidx].qid; + reply_qid = op_reply_q->qid; midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); @@ -1364,6 +1610,9 @@ static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) goto out; } + (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- : + mrioc->active_poll_qcount--; + memset(&delq_req, 0, sizeof(delq_req)); mutex_lock(&mrioc->init_cmds.mutex); if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { @@ -1389,13 +1638,9 @@ static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) wait_for_completion_timeout(&mrioc->init_cmds.done, (MPI3MR_INTADMCMD_TIMEOUT * HZ)); if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { - ioc_err(mrioc, "Issue DelRepQ: command timed out\n"); - mpi3mr_set_diagsave(mrioc); - mpi3mr_issue_reset(mrioc, - MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, + ioc_err(mrioc, "delete reply queue timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); - mrioc->unrecoverable = 1; - retval = -1; goto out_unlock; } @@ -1565,6 +1810,8 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) reply_qid = qidx + 1; op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; + if (!mrioc->pdev->revision) + op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K; op_reply_q->ci = 0; op_reply_q->ephase = 1; atomic_set(&op_reply_q->pend_ios, 0); @@ -1592,8 +1839,26 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; create_req.queue_id = cpu_to_le16(reply_qid); - create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; - create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index); + + if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount)) + op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE; + else + op_reply_q->qtype = MPI3MR_POLL_QUEUE; + + if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) { + create_req.flags = + MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; + create_req.msix_index = + cpu_to_le16(mrioc->intr_info[midx].msix_index); + } else { + create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1); + ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n", + reply_qid, midx); + if (!mrioc->active_poll_qcount) + disable_irq_nosync(pci_irq_vector(mrioc->pdev, + mrioc->intr_info_count - 1)); + } + if (mrioc->enable_segqueue) { create_req.flags |= MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; @@ -1615,12 +1880,9 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) wait_for_completion_timeout(&mrioc->init_cmds.done, (MPI3MR_INTADMCMD_TIMEOUT * HZ)); if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { - ioc_err(mrioc, "CreateRepQ: command timed out\n"); - mpi3mr_set_diagsave(mrioc); - mpi3mr_issue_reset(mrioc, - MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, + ioc_err(mrioc, "create reply queue timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); - mrioc->unrecoverable = 1; retval = -1; goto out_unlock; } @@ -1634,7 +1896,11 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) goto out_unlock; } op_reply_q->qid = reply_qid; - mrioc->intr_info[midx].op_reply_q = op_reply_q; + if (midx < mrioc->intr_info_count) + mrioc->intr_info[midx].op_reply_q = op_reply_q; + + (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ : + mrioc->active_poll_qcount++; out_unlock: mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; @@ -1722,12 +1988,9 @@ static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, wait_for_completion_timeout(&mrioc->init_cmds.done, (MPI3MR_INTADMCMD_TIMEOUT * HZ)); if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { - ioc_err(mrioc, "CreateReqQ: command timed out\n"); - mpi3mr_set_diagsave(mrioc); - if (mpi3mr_issue_reset(mrioc, - MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, - MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT)) - mrioc->unrecoverable = 1; + ioc_err(mrioc, "create request queue timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT); retval = -1; goto out_unlock; } @@ -1771,8 +2034,13 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) mrioc->intr_info_count - mrioc->op_reply_q_offset; if (!mrioc->num_queues) mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); - num_queues = mrioc->num_queues; - ioc_info(mrioc, "Trying to create %d Operational Q pairs\n", + /* + * During reset set the num_queues to the number of queues + * that was set before the reset. + */ + num_queues = mrioc->num_op_reply_q ? + mrioc->num_op_reply_q : mrioc->num_queues; + ioc_info(mrioc, "trying to create %d operational queue pairs\n", num_queues); if (!mrioc->req_qinfo) { @@ -1814,8 +2082,10 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) goto out_failed; } mrioc->num_op_reply_q = mrioc->num_op_req_q = i; - ioc_info(mrioc, "Successfully created %d Operational Q pairs\n", - mrioc->num_op_reply_q); + ioc_info(mrioc, + "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n", + mrioc->num_op_reply_q, mrioc->default_qcount, + mrioc->active_poll_qcount); return retval; out_failed: @@ -1863,7 +2133,7 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, if (mpi3mr_check_req_qfull(op_req_q)) { midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( reply_qidx, mrioc->op_reply_q_offset); - mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]); + mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q); if (mpi3mr_check_req_qfull(op_req_q)) { retval = -EAGAIN; @@ -1901,6 +2171,42 @@ out: } /** + * mpi3mr_check_rh_fault_ioc - check reset history and fault + * controller + * @mrioc: Adapter instance reference + * @reason_code, reason code for the fault. + * + * This routine will save snapdump and fault the controller with + * the given reason code if it is not already in the fault or + * not asynchronosuly reset. This will be used to handle + * initilaization time faults/resets/timeout as in those cases + * immediate soft reset invocation is not required. + * + * Return: None. + */ +void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) +{ + u32 ioc_status, host_diagnostic, timeout; + + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || + (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { + mpi3mr_print_fault_info(mrioc); + return; + } + mpi3mr_set_diagsave(mrioc); + mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, + reason_code); + timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; + do { + host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); + if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) + break; + msleep(100); + } while (--timeout); +} + +/** * mpi3mr_sync_timestamp - Issue time stamp sync request * @mrioc: Adapter reference * @@ -1945,8 +2251,9 @@ static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc) if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n"); mrioc->init_cmds.is_waiting = 0; - mpi3mr_soft_reset_handler(mrioc, - MPI3MR_RESET_FROM_TSU_TIMEOUT, 1); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) + mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_TSU_TIMEOUT, 1); retval = -1; goto out_unlock; } @@ -1969,6 +2276,91 @@ out: } /** + * mpi3mr_print_pkg_ver - display controller fw package version + * @mrioc: Adapter reference + * + * Retrieve firmware package version from the component image + * header of the controller flash and display it. + * + * Return: 0 on success and non-zero on failure. + */ +static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc) +{ + struct mpi3_ci_upload_request ci_upload; + int retval = -1; + void *data = NULL; + dma_addr_t data_dma; + struct mpi3_ci_manifest_mpi *manifest; + u32 data_len = sizeof(struct mpi3_ci_manifest_mpi); + u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; + + data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, + GFP_KERNEL); + if (!data) + return -ENOMEM; + + memset(&ci_upload, 0, sizeof(ci_upload)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + ioc_err(mrioc, "sending get package version failed due to command in use\n"); + mutex_unlock(&mrioc->init_cmds.mutex); + goto out; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + ci_upload.function = MPI3_FUNCTION_CI_UPLOAD; + ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY; + ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST); + ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE); + ci_upload.segment_size = cpu_to_le32(data_len); + + mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len, + data_dma); + init_completion(&mrioc->init_cmds.done); + retval = mpi3mr_admin_request_post(mrioc, &ci_upload, + sizeof(ci_upload), 1); + if (retval) { + ioc_err(mrioc, "posting get package version failed\n"); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->init_cmds.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + ioc_err(mrioc, "get package version timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT); + retval = -1; + goto out_unlock; + } + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + == MPI3_IOCSTATUS_SUCCESS) { + manifest = (struct mpi3_ci_manifest_mpi *) data; + if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) { + ioc_info(mrioc, + "firmware package version(%d.%d.%d.%d.%05d-%05d)\n", + manifest->package_version.gen_major, + manifest->package_version.gen_minor, + manifest->package_version.phase_major, + manifest->package_version.phase_minor, + manifest->package_version.customer_id, + manifest->package_version.build_num); + } + } + retval = 0; +out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); + +out: + if (data) + dma_free_coherent(&mrioc->pdev->dev, data_len, data, + data_dma); + return retval; +} + +/** * mpi3mr_watchdog_work - watchdog thread to monitor faults * @work: work struct * @@ -1984,50 +2376,66 @@ static void mpi3mr_watchdog_work(struct work_struct *work) container_of(work, struct mpi3mr_ioc, watchdog_work.work); unsigned long flags; enum mpi3mr_iocstate ioc_state; - u32 fault, host_diagnostic; + u32 fault, host_diagnostic, ioc_status; + u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH; + + if (mrioc->reset_in_progress || mrioc->unrecoverable) + return; if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { mrioc->ts_update_counter = 0; mpi3mr_sync_timestamp(mrioc); } + if ((mrioc->prepare_for_reset) && + ((mrioc->prepare_for_reset_timeout_counter++) >= + MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) { + mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1); + return; + } + + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { + mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0); + return; + } + /*Check for fault state every one second and issue Soft reset*/ ioc_state = mpi3mr_get_iocstate(mrioc); - if (ioc_state == MRIOC_STATE_FAULT) { - fault = readl(&mrioc->sysif_regs->fault) & - MPI3_SYSIF_FAULT_CODE_MASK; - host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); - if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { - if (!mrioc->diagsave_timeout) { - mpi3mr_print_fault_info(mrioc); - ioc_warn(mrioc, "Diag save in progress\n"); - } - if ((mrioc->diagsave_timeout++) <= - MPI3_SYSIF_DIAG_SAVE_TIMEOUT) - goto schedule_work; - } else - mpi3mr_print_fault_info(mrioc); - mrioc->diagsave_timeout = 0; + if (ioc_state != MRIOC_STATE_FAULT) + goto schedule_work; - if (fault == MPI3_SYSIF_FAULT_CODE_FACTORY_RESET) { - ioc_info(mrioc, - "Factory Reset fault occurred marking controller as unrecoverable" - ); - mrioc->unrecoverable = 1; - goto out; + fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; + host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); + if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { + if (!mrioc->diagsave_timeout) { + mpi3mr_print_fault_info(mrioc); + ioc_warn(mrioc, "diag save in progress\n"); } + if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT) + goto schedule_work; + } - if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) || - (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) || - (mrioc->reset_in_progress)) - goto out; - if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET) - mpi3mr_soft_reset_handler(mrioc, - MPI3MR_RESET_FROM_CIACTIV_FAULT, 0); - else - mpi3mr_soft_reset_handler(mrioc, - MPI3MR_RESET_FROM_FAULT_WATCH, 0); + mpi3mr_print_fault_info(mrioc); + mrioc->diagsave_timeout = 0; + + switch (fault) { + case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: + ioc_info(mrioc, + "controller requires system power cycle, marking controller as unrecoverable\n"); + mrioc->unrecoverable = 1; + return; + case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS: + return; + case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET: + reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT; + break; + default: + break; } + mpi3mr_soft_reset_handler(mrioc, reset_reason, 0); + return; schedule_work: spin_lock_irqsave(&mrioc->watchdog_lock, flags); @@ -2036,7 +2444,6 @@ schedule_work: &mrioc->watchdog_work, msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); -out: return; } @@ -2097,41 +2504,6 @@ void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc) } /** - * mpi3mr_kill_ioc - Kill the controller - * @mrioc: Adapter instance reference - * @reason: reason for the failure. - * - * If fault debug is enabled, display the fault info else issue - * diag fault and freeze the system for controller debug - * purpose. - * - * Return: Nothing. - */ -static void mpi3mr_kill_ioc(struct mpi3mr_ioc *mrioc, u32 reason) -{ - enum mpi3mr_iocstate ioc_state; - - if (!mrioc->fault_dbg) - return; - - dump_stack(); - - ioc_state = mpi3mr_get_iocstate(mrioc); - if (ioc_state == MRIOC_STATE_FAULT) - mpi3mr_print_fault_info(mrioc); - else { - ioc_err(mrioc, "Firmware is halted due to the reason %d\n", - reason); - mpi3mr_diagfault_reset_handler(mrioc, reason); - } - if (mrioc->fault_dbg == 2) - for (;;) - ; - else - panic("panic in %s\n", __func__); -} - -/** * mpi3mr_setup_admin_qpair - Setup admin queue pair * @mrioc: Adapter instance reference * @@ -2258,12 +2630,9 @@ static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, wait_for_completion_timeout(&mrioc->init_cmds.done, (MPI3MR_INTADMCMD_TIMEOUT * HZ)); if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { - ioc_err(mrioc, "Issue IOCFacts: command timed out\n"); - mpi3mr_set_diagsave(mrioc); - mpi3mr_issue_reset(mrioc, - MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, + ioc_err(mrioc, "ioc_facts timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); - mrioc->unrecoverable = 1; retval = -1; goto out_unlock; } @@ -2277,6 +2646,7 @@ static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, goto out_unlock; } memcpy(facts_data, (u8 *)data, data_len); + mpi3mr_process_factsdata(mrioc, facts_data); out_unlock: mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; mutex_unlock(&mrioc->init_cmds.mutex); @@ -2374,14 +2744,13 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); - mrioc->facts.max_pds = le16_to_cpu(facts_data->max_pds); mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); - mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_advanced_host_pds); - mrioc->facts.max_raidpds = le16_to_cpu(facts_data->max_raid_pds); + mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds); + mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds); mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); mrioc->facts.max_pcie_switches = - le16_to_cpu(facts_data->max_pc_ie_switches); + le16_to_cpu(facts_data->max_pcie_switches); mrioc->facts.max_sasexpanders = le16_to_cpu(facts_data->max_sas_expanders); mrioc->facts.max_sasinitiators = @@ -2415,22 +2784,15 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); ioc_info(mrioc, - "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n", + "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n", mrioc->facts.max_reqs, mrioc->facts.min_devhandle, - mrioc->facts.max_pds, mrioc->facts.max_msix_vectors, - mrioc->facts.max_perids); + mrioc->facts.max_msix_vectors, mrioc->facts.max_perids); ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, mrioc->facts.sge_mod_shift); ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n", mrioc->facts.dma_mask, (facts_flags & MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK)); - - mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; - - if (reset_devices) - mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, - MPI3MR_HOST_IOS_KDUMP); } /** @@ -2446,23 +2808,29 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) { int retval = 0; u32 sz, i; - dma_addr_t phy_addr; if (mrioc->init_cmds.reply) - goto post_reply_sbuf; + return retval; - mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL); + mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); if (!mrioc->init_cmds.reply) goto out_failed; for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { - mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->facts.reply_sz, + mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); if (!mrioc->dev_rmhs_cmds[i].reply) goto out_failed; } - mrioc->host_tm_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL); + for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { + mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz, + GFP_KERNEL); + if (!mrioc->evtack_cmds[i].reply) + goto out_failed; + } + + mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); if (!mrioc->host_tm_cmds.reply) goto out_failed; @@ -2482,13 +2850,21 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) if (!mrioc->devrem_bitmap) goto out_failed; + mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8; + if (MPI3MR_NUM_EVTACKCMD % 8) + mrioc->evtack_cmds_bitmap_sz++; + mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz, + GFP_KERNEL); + if (!mrioc->evtack_cmds_bitmap) + goto out_failed; + mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; /* reply buffer pool, 16 byte align */ - sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz; + sz = mrioc->num_reply_bufs * mrioc->reply_sz; mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", &mrioc->pdev->dev, sz, 16, 0); if (!mrioc->reply_buf_pool) { @@ -2517,7 +2893,7 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) goto out_failed; /* sense buffer pool, 4 byte align */ - sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ; + sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", &mrioc->pdev->dev, sz, 4, 0); if (!mrioc->sense_buf_pool) { @@ -2542,21 +2918,42 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) if (!mrioc->sense_buf_q) goto out_failed; -post_reply_sbuf: - sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz; + return retval; + +out_failed: + retval = -1; + return retval; +} + +/** + * mpimr_initialize_reply_sbuf_queues - initialize reply sense + * buffers + * @mrioc: Adapter instance reference + * + * Helper function to initialize reply and sense buffers along + * with some debug prints. + * + * Return: None. + */ +static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc) +{ + u32 sz, i; + dma_addr_t phy_addr; + + sz = mrioc->num_reply_bufs * mrioc->reply_sz; ioc_info(mrioc, "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", - mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz, + mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz, (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); sz = mrioc->reply_free_qsz * 8; ioc_info(mrioc, "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), (unsigned long long)mrioc->reply_free_q_dma); - sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ; + sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; ioc_info(mrioc, "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", - mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSEBUF_SZ, + mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ, (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); sz = mrioc->sense_buf_q_sz * 8; ioc_info(mrioc, @@ -2566,20 +2963,15 @@ post_reply_sbuf: /* initialize Reply buffer Queue */ for (i = 0, phy_addr = mrioc->reply_buf_dma; - i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz) + i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz) mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); mrioc->reply_free_q[i] = cpu_to_le64(0); /* initialize Sense Buffer Queue */ for (i = 0, phy_addr = mrioc->sense_buf_dma; - i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSEBUF_SZ) + i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ) mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); mrioc->sense_buf_q[i] = cpu_to_le64(0); - return retval; - -out_failed: - retval = -1; - return retval; } /** @@ -2606,6 +2998,8 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) retval = -1; goto out; } + mpimr_initialize_reply_sbuf_queues(mrioc); + drv_info->information_length = cpu_to_le32(data_len); strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); @@ -2639,7 +3033,7 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); iocinit_req.reply_free_queue_address = cpu_to_le64(mrioc->reply_free_q_dma); - iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSEBUF_SZ); + iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ); iocinit_req.sense_buffer_free_queue_depth = cpu_to_le16(mrioc->sense_buf_q_sz); iocinit_req.sense_buffer_free_queue_address = @@ -2659,12 +3053,9 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) wait_for_completion_timeout(&mrioc->init_cmds.done, (MPI3MR_INTADMCMD_TIMEOUT * HZ)); if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { - mpi3mr_set_diagsave(mrioc); - mpi3mr_issue_reset(mrioc, - MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, + mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); - mrioc->unrecoverable = 1; - ioc_err(mrioc, "Issue IOCInit: command timed out\n"); + ioc_err(mrioc, "ioc_init timed out\n"); retval = -1; goto out_unlock; } @@ -2678,6 +3069,13 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) goto out_unlock; } + mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; + writel(mrioc->reply_free_queue_host_index, + &mrioc->sysif_regs->reply_free_host_index); + + mrioc->sbq_host_index = mrioc->num_sense_bufs; + writel(mrioc->sbq_host_index, + &mrioc->sysif_regs->sense_buffer_free_host_index); out_unlock: mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; mutex_unlock(&mrioc->init_cmds.mutex); @@ -2755,12 +3153,9 @@ static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc) wait_for_completion_timeout(&mrioc->init_cmds.done, (MPI3MR_INTADMCMD_TIMEOUT * HZ)); if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { - ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); - mpi3mr_set_diagsave(mrioc); - mpi3mr_issue_reset(mrioc, - MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, + ioc_err(mrioc, "event notification timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT); - mrioc->unrecoverable = 1; retval = -1; goto out_unlock; } @@ -2782,17 +3177,17 @@ out: } /** - * mpi3mr_send_event_ack - Send event acknowledgment + * mpi3mr_process_event_ack - Process event acknowledgment * @mrioc: Adapter instance reference * @event: MPI3 event ID - * @event_ctx: Event context + * @event_ctx: event context * * Send event acknowledgment through admin queue and wait for * it to complete. * * Return: 0 on success, non-zero on failures. */ -int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, +int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event, u32 event_ctx) { struct mpi3_event_ack_request evtack_req; @@ -2825,8 +3220,9 @@ int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, (MPI3MR_INTADMCMD_TIMEOUT * HZ)); if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); - mpi3mr_soft_reset_handler(mrioc, - MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) + mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1); retval = -1; goto out_unlock; } @@ -2863,6 +3259,9 @@ static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) u32 sz, i; u16 num_chains; + if (mrioc->chain_sgl_list) + return retval; + num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION @@ -2966,29 +3365,28 @@ int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); goto out_unlock; } - if (!async) { - wait_for_completion_timeout(&mrioc->init_cmds.done, - (pe_timeout * HZ)); - if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { - ioc_err(mrioc, "Issue PortEnable: command timed out\n"); - retval = -1; - mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; - mpi3mr_set_diagsave(mrioc); - mpi3mr_issue_reset(mrioc, - MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, - MPI3MR_RESET_FROM_PE_TIMEOUT); - mrioc->unrecoverable = 1; - goto out_unlock; - } - mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); + if (async) { + mutex_unlock(&mrioc->init_cmds.mutex); + goto out; } + + wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + ioc_err(mrioc, "port enable timed out\n"); + retval = -1; + mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT); + goto out_unlock; + } + mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); + out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; mutex_unlock(&mrioc->init_cmds.mutex); out: return retval; } -/* Protocol type to name mapper structure*/ +/* Protocol type to name mapper structure */ static const struct { u8 protocol; char *name; @@ -3181,6 +3579,10 @@ int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) mrioc->sysif_regs, memap_sz); ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", mrioc->msix_count); + + if (!reset_devices && poll_queues > 0) + mrioc->requested_poll_qcount = min_t(int, poll_queues, + mrioc->msix_count - 2); return retval; out_failed: @@ -3189,6 +3591,46 @@ out_failed: } /** + * mpi3mr_enable_events - Enable required events + * @mrioc: Adapter instance reference + * + * This routine unmasks the events required by the driver by + * sennding appropriate event mask bitmapt through an event + * notification request. + * + * Return: 0 on success and non-zero on failure. + */ +static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc) +{ + int retval = 0; + u32 i; + + for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + mrioc->event_masks[i] = -1; + + mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_TEMP_THRESHOLD); + + retval = mpi3mr_issue_event_notification(mrioc); + if (retval) + ioc_err(mrioc, "failed to issue event notification %d\n", + retval); + return retval; +} + +/** * mpi3mr_init_ioc - Initialize the controller * @mrioc: Adapter instance reference * @init_type: Flag to indicate is the init_type @@ -3204,227 +3646,240 @@ out_failed: * * Return: 0 on success and non-zero on failure. */ -int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type) +int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) { int retval = 0; - enum mpi3mr_iocstate ioc_state; - u64 base_info; - u32 timeout; - u32 ioc_status, ioc_config, i; + u8 retry = 0; struct mpi3_ioc_facts_data facts_data; - mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP; - mrioc->change_count = 0; - if (init_type == MPI3MR_IT_INIT) { - mrioc->cpu_count = num_online_cpus(); - retval = mpi3mr_setup_resources(mrioc); - if (retval) { - ioc_err(mrioc, "Failed to setup resources:error %d\n", - retval); - goto out_nocleanup; - } +retry_init: + retval = mpi3mr_bring_ioc_ready(mrioc); + if (retval) { + ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", + retval); + goto out_failed_noretry; } - ioc_status = readl(&mrioc->sysif_regs->ioc_status); - ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + retval = mpi3mr_setup_isr(mrioc, 1); + if (retval) { + ioc_err(mrioc, "Failed to setup ISR error %d\n", + retval); + goto out_failed_noretry; + } - ioc_info(mrioc, "SOD status %x configuration %x\n", - ioc_status, ioc_config); + retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); + if (retval) { + ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", + retval); + goto out_failed; + } - base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); - ioc_info(mrioc, "SOD base_info %llx\n", base_info); + mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; - /*The timeout value is in 2sec unit, changing it to seconds*/ - mrioc->ready_timeout = - ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> - MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; + if (reset_devices) + mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, + MPI3MR_HOST_IOS_KDUMP); - ioc_info(mrioc, "IOC ready timeout %d\n", mrioc->ready_timeout); + mrioc->reply_sz = mrioc->facts.reply_sz; - ioc_state = mpi3mr_get_iocstate(mrioc); - ioc_info(mrioc, "IOC in %s state during detection\n", - mpi3mr_iocstate_name(ioc_state)); + retval = mpi3mr_check_reset_dma_mask(mrioc); + if (retval) { + ioc_err(mrioc, "Resetting dma mask failed %d\n", + retval); + goto out_failed_noretry; + } - if (ioc_state == MRIOC_STATE_BECOMING_READY || - ioc_state == MRIOC_STATE_RESET_REQUESTED) { - timeout = mrioc->ready_timeout * 10; - do { - msleep(100); - } while (--timeout); + mpi3mr_print_ioc_info(mrioc); - ioc_state = mpi3mr_get_iocstate(mrioc); - ioc_info(mrioc, - "IOC in %s state after waiting for reset time\n", - mpi3mr_iocstate_name(ioc_state)); + retval = mpi3mr_alloc_reply_sense_bufs(mrioc); + if (retval) { + ioc_err(mrioc, + "%s :Failed to allocated reply sense buffers %d\n", + __func__, retval); + goto out_failed_noretry; } - if (ioc_state == MRIOC_STATE_READY) { - retval = mpi3mr_issue_and_process_mur(mrioc, - MPI3MR_RESET_FROM_BRINGUP); - if (retval) { - ioc_err(mrioc, "Failed to MU reset IOC error %d\n", - retval); - } - ioc_state = mpi3mr_get_iocstate(mrioc); + retval = mpi3mr_alloc_chain_bufs(mrioc); + if (retval) { + ioc_err(mrioc, "Failed to allocated chain buffers %d\n", + retval); + goto out_failed_noretry; } - if (ioc_state != MRIOC_STATE_RESET) { - mpi3mr_print_fault_info(mrioc); - retval = mpi3mr_issue_reset(mrioc, - MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, - MPI3MR_RESET_FROM_BRINGUP); - if (retval) { - ioc_err(mrioc, - "%s :Failed to soft reset IOC error %d\n", - __func__, retval); - goto out_failed; - } + + retval = mpi3mr_issue_iocinit(mrioc); + if (retval) { + ioc_err(mrioc, "Failed to Issue IOC Init %d\n", + retval); + goto out_failed; } - ioc_state = mpi3mr_get_iocstate(mrioc); - if (ioc_state != MRIOC_STATE_RESET) { - retval = -1; - ioc_err(mrioc, "Cannot bring IOC to reset state\n"); + + retval = mpi3mr_print_pkg_ver(mrioc); + if (retval) { + ioc_err(mrioc, "failed to get package version\n"); goto out_failed; } - retval = mpi3mr_setup_admin_qpair(mrioc); + retval = mpi3mr_setup_isr(mrioc, 0); if (retval) { - ioc_err(mrioc, "Failed to setup admin Qs: error %d\n", + ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", + retval); + goto out_failed_noretry; + } + + retval = mpi3mr_create_op_queues(mrioc); + if (retval) { + ioc_err(mrioc, "Failed to create OpQueues error %d\n", retval); goto out_failed; } - retval = mpi3mr_bring_ioc_ready(mrioc); + retval = mpi3mr_enable_events(mrioc); if (retval) { - ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", + ioc_err(mrioc, "failed to enable events %d\n", retval); goto out_failed; } - if (init_type != MPI3MR_IT_RESET) { + ioc_info(mrioc, "controller initialization completed successfully\n"); + return retval; +out_failed: + if (retry < 2) { + retry++; + ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n", + retry); + mpi3mr_memset_buffers(mrioc); + goto retry_init; + } +out_failed_noretry: + ioc_err(mrioc, "controller initialization failed\n"); + mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, + MPI3MR_RESET_FROM_CTLR_CLEANUP); + mrioc->unrecoverable = 1; + return retval; +} + +/** + * mpi3mr_reinit_ioc - Re-Initialize the controller + * @mrioc: Adapter instance reference + * @is_resume: Called from resume or reset path + * + * This the controller re-initialization routine, executed from + * the soft reset handler or resume callback. Creates + * operational reply queue pairs, allocate required memory for + * reply pool, sense buffer pool, issue IOC init request to the + * firmware, unmask the events and issue port enable to discover + * SAS/SATA/NVMe devices and RAID volumes. + * + * Return: 0 on success and non-zero on failure. + */ +int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume) +{ + int retval = 0; + u8 retry = 0; + struct mpi3_ioc_facts_data facts_data; + +retry_init: + dprint_reset(mrioc, "bringing up the controller to ready state\n"); + retval = mpi3mr_bring_ioc_ready(mrioc); + if (retval) { + ioc_err(mrioc, "failed to bring to ready state\n"); + goto out_failed_noretry; + } + + if (is_resume) { + dprint_reset(mrioc, "setting up single ISR\n"); retval = mpi3mr_setup_isr(mrioc, 1); if (retval) { - ioc_err(mrioc, "Failed to setup ISR error %d\n", - retval); - goto out_failed; + ioc_err(mrioc, "failed to setup ISR\n"); + goto out_failed_noretry; } } else mpi3mr_ioc_enable_intr(mrioc); + dprint_reset(mrioc, "getting ioc_facts\n"); retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); if (retval) { - ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", - retval); + ioc_err(mrioc, "failed to get ioc_facts\n"); goto out_failed; } - mpi3mr_process_factsdata(mrioc, &facts_data); - if (init_type == MPI3MR_IT_INIT) { - retval = mpi3mr_check_reset_dma_mask(mrioc); - if (retval) { - ioc_err(mrioc, "Resetting dma mask failed %d\n", - retval); - goto out_failed; - } + dprint_reset(mrioc, "validating ioc_facts\n"); + retval = mpi3mr_revalidate_factsdata(mrioc); + if (retval) { + ioc_err(mrioc, "failed to revalidate ioc_facts data\n"); + goto out_failed_noretry; } mpi3mr_print_ioc_info(mrioc); - retval = mpi3mr_alloc_reply_sense_bufs(mrioc); + dprint_reset(mrioc, "sending ioc_init\n"); + retval = mpi3mr_issue_iocinit(mrioc); if (retval) { - ioc_err(mrioc, - "%s :Failed to allocated reply sense buffers %d\n", - __func__, retval); + ioc_err(mrioc, "failed to send ioc_init\n"); goto out_failed; } - if (init_type == MPI3MR_IT_INIT) { - retval = mpi3mr_alloc_chain_bufs(mrioc); - if (retval) { - ioc_err(mrioc, "Failed to allocated chain buffers %d\n", - retval); - goto out_failed; - } - } - - retval = mpi3mr_issue_iocinit(mrioc); + dprint_reset(mrioc, "getting package version\n"); + retval = mpi3mr_print_pkg_ver(mrioc); if (retval) { - ioc_err(mrioc, "Failed to Issue IOC Init %d\n", - retval); + ioc_err(mrioc, "failed to get package version\n"); goto out_failed; } - mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; - writel(mrioc->reply_free_queue_host_index, - &mrioc->sysif_regs->reply_free_host_index); - - mrioc->sbq_host_index = mrioc->num_sense_bufs; - writel(mrioc->sbq_host_index, - &mrioc->sysif_regs->sense_buffer_free_host_index); - if (init_type != MPI3MR_IT_RESET) { + if (is_resume) { + dprint_reset(mrioc, "setting up multiple ISR\n"); retval = mpi3mr_setup_isr(mrioc, 0); if (retval) { - ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", - retval); - goto out_failed; + ioc_err(mrioc, "failed to re-setup ISR\n"); + goto out_failed_noretry; } } + dprint_reset(mrioc, "creating operational queue pairs\n"); retval = mpi3mr_create_op_queues(mrioc); if (retval) { - ioc_err(mrioc, "Failed to create OpQueues error %d\n", - retval); + ioc_err(mrioc, "failed to create operational queue pairs\n"); goto out_failed; } - if ((init_type != MPI3MR_IT_INIT) && - (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q)) { - retval = -1; + if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) { ioc_err(mrioc, - "Cannot create minimum number of OpQueues expected:%d created:%d\n", + "cannot create minimum number of operatioanl queues expected:%d created:%d\n", mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q); - goto out_failed; + goto out_failed_noretry; } - for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) - mrioc->event_masks[i] = -1; - - mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED); - mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED); - mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE); - mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE); - mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST); - mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY); - mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR); - mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE); - mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); - mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION); - mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); - mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); - - retval = mpi3mr_issue_event_notification(mrioc); + dprint_reset(mrioc, "enabling events\n"); + retval = mpi3mr_enable_events(mrioc); if (retval) { - ioc_err(mrioc, "Failed to issue event notification %d\n", - retval); + ioc_err(mrioc, "failed to enable events\n"); goto out_failed; } - if (init_type != MPI3MR_IT_INIT) { - ioc_info(mrioc, "Issuing Port Enable\n"); - retval = mpi3mr_issue_port_enable(mrioc, 0); - if (retval) { - ioc_err(mrioc, "Failed to issue port enable %d\n", - retval); - goto out_failed; - } + ioc_info(mrioc, "sending port enable\n"); + retval = mpi3mr_issue_port_enable(mrioc, 0); + if (retval) { + ioc_err(mrioc, "failed to issue port enable\n"); + goto out_failed; } - return retval; + ioc_info(mrioc, "controller %s completed successfully\n", + (is_resume)?"resume":"re-initialization"); + return retval; out_failed: - if (init_type == MPI3MR_IT_INIT) - mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP); - else - mpi3mr_cleanup_ioc(mrioc, MPI3MR_REINIT_FAILURE); -out_nocleanup: + if (retry < 2) { + retry++; + ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n", + (is_resume)?"resume":"re-initialization", retry); + mpi3mr_memset_buffers(mrioc); + goto retry_init; + } +out_failed_noretry: + ioc_err(mrioc, "controller %s is failed\n", + (is_resume)?"resume":"re-initialization"); + mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, + MPI3MR_RESET_FROM_CTLR_CLEANUP); + mrioc->unrecoverable = 1; return retval; } @@ -3488,17 +3943,29 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) { u16 i; - memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz); - memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); - - memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); - memset(mrioc->host_tm_cmds.reply, 0, - sizeof(*mrioc->host_tm_cmds.reply)); - for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) - memset(mrioc->dev_rmhs_cmds[i].reply, 0, - sizeof(*mrioc->dev_rmhs_cmds[i].reply)); - memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); - memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); + mrioc->change_count = 0; + mrioc->active_poll_qcount = 0; + mrioc->default_qcount = 0; + if (mrioc->admin_req_base) + memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz); + if (mrioc->admin_reply_base) + memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); + + if (mrioc->init_cmds.reply) { + memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); + memset(mrioc->host_tm_cmds.reply, 0, + sizeof(*mrioc->host_tm_cmds.reply)); + for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) + memset(mrioc->dev_rmhs_cmds[i].reply, 0, + sizeof(*mrioc->dev_rmhs_cmds[i].reply)); + for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) + memset(mrioc->evtack_cmds[i].reply, 0, + sizeof(*mrioc->evtack_cmds[i].reply)); + memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); + memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); + memset(mrioc->evtack_cmds_bitmap, 0, + mrioc->evtack_cmds_bitmap_sz); + } for (i = 0; i < mrioc->num_queues; i++) { mrioc->op_reply_qinfo[i].qid = 0; @@ -3527,7 +3994,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) * * Return: Nothing. */ -static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) +void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) { u16 i; struct mpi3mr_intr_info *intr_info; @@ -3591,12 +4058,20 @@ static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) kfree(mrioc->host_tm_cmds.reply); mrioc->host_tm_cmds.reply = NULL; + for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { + kfree(mrioc->evtack_cmds[i].reply); + mrioc->evtack_cmds[i].reply = NULL; + } + kfree(mrioc->removepend_bitmap); mrioc->removepend_bitmap = NULL; kfree(mrioc->devrem_bitmap); mrioc->devrem_bitmap = NULL; + kfree(mrioc->evtack_cmds_bitmap); + mrioc->evtack_cmds_bitmap = NULL; + kfree(mrioc->chain_bitmap); mrioc->chain_bitmap = NULL; @@ -3663,7 +4138,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; - ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN; + ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); @@ -3699,21 +4174,17 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) /** * mpi3mr_cleanup_ioc - Cleanup controller * @mrioc: Adapter instance reference - * @reason: Cleanup reason - * + * controller cleanup handler, Message unit reset or soft reset - * and shutdown notification is issued to the controller and the - * associated memory resources are freed. + * and shutdown notification is issued to the controller. * * Return: Nothing. */ -void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason) +void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) { enum mpi3mr_iocstate ioc_state; - if (reason == MPI3MR_COMPLETE_CLEANUP) - mpi3mr_stop_watchdog(mrioc); - + dprint_exit(mrioc, "cleaning up the controller\n"); mpi3mr_ioc_disable_intr(mrioc); ioc_state = mpi3mr_get_iocstate(mrioc); @@ -3725,15 +4196,9 @@ void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason) mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, MPI3MR_RESET_FROM_MUR_FAILURE); - - if (reason != MPI3MR_REINIT_FAILURE) - mpi3mr_issue_ioc_shutdown(mrioc); - } - - if (reason == MPI3MR_COMPLETE_CLEANUP) { - mpi3mr_free_mem(mrioc); - mpi3mr_cleanup_resources(mrioc); + mpi3mr_issue_ioc_shutdown(mrioc); } + dprint_exit(mrioc, "controller cleanup completed\n"); } /** @@ -3782,41 +4247,11 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc) cmdptr = &mrioc->dev_rmhs_cmds[i]; mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); } -} -/** - * mpi3mr_diagfault_reset_handler - Diag fault reset handler - * @mrioc: Adapter instance reference - * @reset_reason: Reset reason code - * - * This is an handler for issuing diag fault reset from the - * applications through IOCTL path to stop the execution of the - * controller - * - * Return: 0 on success, non-zero on failure. - */ -int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc, - u32 reset_reason) -{ - int retval = 0; - - ioc_info(mrioc, "Entry: reason code: %s\n", - mpi3mr_reset_rc_name(reset_reason)); - mrioc->reset_in_progress = 1; - - mpi3mr_ioc_disable_intr(mrioc); - - retval = mpi3mr_issue_reset(mrioc, - MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); - - if (retval) { - ioc_err(mrioc, "The diag fault reset failed: reason %d\n", - reset_reason); - mpi3mr_ioc_enable_intr(mrioc); + for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { + cmdptr = &mrioc->evtack_cmds[i]; + mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); } - ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED")); - mrioc->reset_in_progress = 0; - return retval; } /** @@ -3847,34 +4282,44 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, unsigned long flags; u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; - if (mrioc->fault_dbg) { - if (snapdump) - mpi3mr_set_diagsave(mrioc); - mpi3mr_kill_ioc(mrioc, reset_reason); - } - + /* Block the reset handler until diag save in progress*/ + dprint_reset(mrioc, + "soft_reset_handler: check and block on diagsave_timeout(%d)\n", + mrioc->diagsave_timeout); + while (mrioc->diagsave_timeout) + ssleep(1); /* * Block new resets until the currently executing one is finished and * return the status of the existing reset for all blocked resets */ + dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n"); if (!mutex_trylock(&mrioc->reset_mutex)) { - ioc_info(mrioc, "Another reset in progress\n"); - return -1; + ioc_info(mrioc, + "controller reset triggered by %s is blocked due to another reset in progress\n", + mpi3mr_reset_rc_name(reset_reason)); + do { + ssleep(1); + } while (mrioc->reset_in_progress == 1); + ioc_info(mrioc, + "returning previous reset result(%d) for the reset triggered by %s\n", + mrioc->prev_reset_result, + mpi3mr_reset_rc_name(reset_reason)); + return mrioc->prev_reset_result; } + ioc_info(mrioc, "controller reset is triggered by %s\n", + mpi3mr_reset_rc_name(reset_reason)); + mrioc->reset_in_progress = 1; + mrioc->prev_reset_result = -1; if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && + (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) && (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) mrioc->event_masks[i] = -1; - retval = mpi3mr_issue_event_notification(mrioc); - - if (retval) { - ioc_err(mrioc, - "Failed to turn off events prior to reset %d\n", - retval); - } + dprint_reset(mrioc, "soft_reset_handler: masking events\n"); + mpi3mr_issue_event_notification(mrioc); } mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); @@ -3904,15 +4349,20 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, goto out; } - mpi3mr_flush_delayed_rmhs_list(mrioc); + mpi3mr_flush_delayed_cmd_lists(mrioc); mpi3mr_flush_drv_cmds(mrioc); memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); + memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz); mpi3mr_cleanup_fwevt_list(mrioc); mpi3mr_flush_host_io(mrioc); mpi3mr_invalidate_devhandles(mrioc); + if (mrioc->prepare_for_reset) { + mrioc->prepare_for_reset = 0; + mrioc->prepare_for_reset_timeout_counter = 0; + } mpi3mr_memset_buffers(mrioc); - retval = mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESET); + retval = mpi3mr_reinit_ioc(mrioc, 0); if (retval) { pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", mrioc->name, reset_reason); @@ -3922,8 +4372,8 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, out: if (!retval) { + mrioc->diagsave_timeout = 0; mrioc->reset_in_progress = 0; - scsi_unblock_requests(mrioc->shost); mpi3mr_rfresh_tgtdevs(mrioc); mrioc->ts_update_counter = 0; spin_lock_irqsave(&mrioc->watchdog_lock, flags); @@ -3939,8 +4389,9 @@ out: mrioc->reset_in_progress = 0; retval = -1; } - + mrioc->prev_reset_result = retval; mutex_unlock(&mrioc->reset_mutex); - ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED")); + ioc_info(mrioc, "controller reset is %s\n", + ((retval == 0) ? "successful" : "failed")); return retval; } diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index fe10f257b5a4..284117da9086 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -34,6 +34,9 @@ MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info (default=0)"); /* Forward declarations*/ +static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, + struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); + /** * mpi3mr_host_tag_for_scmd - Get host tag for a scmd * @mrioc: Adapter instance reference @@ -418,6 +421,74 @@ out: } /** + * mpi3mr_count_dev_pending - Count commands pending for a lun + * @rq: Block request + * @data: SCSI device reference + * @reserved: Unused + * + * This is an iterator function called for each SCSI command in + * a host and if the command is pending in the LLD for the + * specific device(lun) then device specific pending I/O counter + * is updated in the device structure. + * + * Return: true always. + */ + +static bool mpi3mr_count_dev_pending(struct request *rq, + void *data, bool reserved) +{ + struct scsi_device *sdev = (struct scsi_device *)data; + struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + struct scmd_priv *priv; + + if (scmd) { + priv = scsi_cmd_priv(scmd); + if (!priv->in_lld_scope) + goto out; + if (scmd->device == sdev) + sdev_priv_data->pend_count++; + } + +out: + return true; +} + +/** + * mpi3mr_count_tgt_pending - Count commands pending for target + * @rq: Block request + * @data: SCSI target reference + * @reserved: Unused + * + * This is an iterator function called for each SCSI command in + * a host and if the command is pending in the LLD for the + * specific target then target specific pending I/O counter is + * updated in the target structure. + * + * Return: true always. + */ + +static bool mpi3mr_count_tgt_pending(struct request *rq, + void *data, bool reserved) +{ + struct scsi_target *starget = (struct scsi_target *)data; + struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + struct scmd_priv *priv; + + if (scmd) { + priv = scsi_cmd_priv(scmd); + if (!priv->in_lld_scope) + goto out; + if (scmd->device && (scsi_target(scmd->device) == starget)) + stgt_priv_data->pend_count++; + } + +out: + return true; +} + +/** * mpi3mr_flush_host_io - Flush host I/Os * @mrioc: Adapter instance reference * @@ -742,11 +813,18 @@ mpi3mr_update_sdev(struct scsi_device *sdev, void *data) switch (tgtdev->dev_type) { case MPI3_DEVICE_DEVFORM_PCIE: /*The block layer hw sector size = 512*/ - blk_queue_max_hw_sectors(sdev->request_queue, - tgtdev->dev_spec.pcie_inf.mdts / 512); - blk_queue_virt_boundary(sdev->request_queue, - ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); - + if ((tgtdev->dev_spec.pcie_inf.dev_info & + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { + blk_queue_max_hw_sectors(sdev->request_queue, + tgtdev->dev_spec.pcie_inf.mdts / 512); + if (tgtdev->dev_spec.pcie_inf.pgsz == 0) + blk_queue_virt_boundary(sdev->request_queue, + ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); + else + blk_queue_virt_boundary(sdev->request_queue, + ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); + } break; default: break; @@ -824,6 +902,17 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, scsi_tgt_priv_data->dev_type = tgtdev->dev_type; } + switch (dev_pg0->access_status) { + case MPI3_DEVICE0_ASTATUS_NO_ERRORS: + case MPI3_DEVICE0_ASTATUS_PREPARE: + case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: + case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: + break; + default: + tgtdev->is_hidden = 1; + break; + } + switch (tgtdev->dev_type) { case MPI3_DEVICE_DEVFORM_SAS_SATA: { @@ -848,6 +937,7 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, &dev_pg0->device_specific.pcie_format; u16 dev_info = le16_to_cpu(pcieinf->device_info); + tgtdev->dev_spec.pcie_inf.dev_info = dev_info; tgtdev->dev_spec.pcie_inf.capb = le32_to_cpu(pcieinf->capabilities); tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; @@ -858,14 +948,18 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, le32_to_cpu(pcieinf->maximum_data_transfer_size); tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; tgtdev->dev_spec.pcie_inf.reset_to = - pcieinf->controller_reset_to; + max_t(u8, pcieinf->controller_reset_to, + MPI3MR_INTADMCMD_TIMEOUT); tgtdev->dev_spec.pcie_inf.abort_to = - pcieinf->nv_me_abort_to; + max_t(u8, pcieinf->nvme_abort_to, + MPI3MR_INTADMCMD_TIMEOUT); } if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); - if ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != - MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) + if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && + ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) tgtdev->is_hidden = 1; if (!mrioc->shost) break; @@ -1313,7 +1407,7 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, evt_ack: if (fwevt->send_ack) - mpi3mr_send_event_ack(mrioc, fwevt->event_id, + mpi3mr_process_event_ack(mrioc, fwevt->event_id, fwevt->evt_ctx); out: /* Put fwevt reference count to neutralize kref_init increment */ @@ -1377,24 +1471,33 @@ static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, } /** - * mpi3mr_flush_delayed_rmhs_list - Flush pending commands + * mpi3mr_flush_delayed_cmd_lists - Flush pending commands * @mrioc: Adapter instance reference * - * Flush pending commands in the delayed removal handshake list - * due to a controller reset or driver removal as a cleanup. + * Flush pending commands in the delayed lists due to a + * controller reset or driver removal as a cleanup. * * Return: Nothing */ -void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc) +void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) { struct delayed_dev_rmhs_node *_rmhs_node; + struct delayed_evt_ack_node *_evtack_node; + dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); while (!list_empty(&mrioc->delayed_rmhs_list)) { _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, struct delayed_dev_rmhs_node, list); list_del(&_rmhs_node->list); kfree(_rmhs_node); } + dprint_reset(mrioc, "flushing delayed event ack commands\n"); + while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { + _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, + struct delayed_evt_ack_node, list); + list_del(&_evtack_node->list); + kfree(_evtack_node); + } } /** @@ -1611,6 +1714,141 @@ out_failed: } /** + * mpi3mr_complete_evt_ack - event ack request completion + * @mrioc: Adapter instance reference + * @drv_cmd: Internal command tracker + * + * This is the completion handler for non blocking event + * acknowledgment sent to the firmware and this will issue any + * pending event acknowledgment request. + * + * Return: Nothing + */ +static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd) +{ + u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; + struct delayed_evt_ack_node *delayed_evtack = NULL; + + if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { + dprint_event_th(mrioc, + "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", + (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + drv_cmd->ioc_loginfo); + } + + if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { + delayed_evtack = + list_entry(mrioc->delayed_evtack_cmds_list.next, + struct delayed_evt_ack_node, list); + mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, + delayed_evtack->event_ctx); + list_del(&delayed_evtack->list); + kfree(delayed_evtack); + return; + } + drv_cmd->state = MPI3MR_CMD_NOTUSED; + drv_cmd->callback = NULL; + clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); +} + +/** + * mpi3mr_send_event_ack - Issue event acknwoledgment request + * @mrioc: Adapter instance reference + * @event: MPI3 event id + * @cmdparam: Internal command tracker + * @event_ctx: event context + * + * Issues event acknowledgment request to the firmware if there + * is a free command to send the event ack else it to a pend + * list so that it will be processed on a completion of a prior + * event acknowledgment . + * + * Return: Nothing + */ +static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, + struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) +{ + struct mpi3_event_ack_request evtack_req; + int retval = 0; + u8 retrycount = 5; + u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; + struct mpi3mr_drv_cmd *drv_cmd = cmdparam; + struct delayed_evt_ack_node *delayed_evtack = NULL; + + if (drv_cmd) { + dprint_event_th(mrioc, + "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", + event, event_ctx); + goto issue_cmd; + } + dprint_event_th(mrioc, + "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", + event, event_ctx); + do { + cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, + MPI3MR_NUM_EVTACKCMD); + if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { + if (!test_and_set_bit(cmd_idx, + mrioc->evtack_cmds_bitmap)) + break; + cmd_idx = MPI3MR_NUM_EVTACKCMD; + } + } while (retrycount--); + + if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { + delayed_evtack = kzalloc(sizeof(*delayed_evtack), + GFP_ATOMIC); + if (!delayed_evtack) + return; + INIT_LIST_HEAD(&delayed_evtack->list); + delayed_evtack->event = event; + delayed_evtack->event_ctx = event_ctx; + list_add_tail(&delayed_evtack->list, + &mrioc->delayed_evtack_cmds_list); + dprint_event_th(mrioc, + "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", + event, event_ctx); + return; + } + drv_cmd = &mrioc->evtack_cmds[cmd_idx]; + +issue_cmd: + cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; + + memset(&evtack_req, 0, sizeof(evtack_req)); + if (drv_cmd->state & MPI3MR_CMD_PENDING) { + dprint_event_th(mrioc, + "sending event ack failed due to command in use\n"); + goto out; + } + drv_cmd->state = MPI3MR_CMD_PENDING; + drv_cmd->is_waiting = 0; + drv_cmd->callback = mpi3mr_complete_evt_ack; + evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); + evtack_req.function = MPI3_FUNCTION_EVENT_ACK; + evtack_req.event = event; + evtack_req.event_context = cpu_to_le32(event_ctx); + retval = mpi3mr_admin_request_post(mrioc, &evtack_req, + sizeof(evtack_req), 1); + if (retval) { + dprint_event_th(mrioc, + "posting event ack request is failed\n"); + goto out_failed; + } + + dprint_event_th(mrioc, + "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", + event, event_ctx); +out: + return; +out_failed: + drv_cmd->state = MPI3MR_CMD_NOTUSED; + drv_cmd->callback = NULL; + clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); +} + +/** * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf * @mrioc: Adapter instance reference * @event_reply: event data @@ -1819,6 +2057,40 @@ out: } /** + * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf + * @mrioc: Adapter instance reference + * @event_reply: event data + * + * Blocks and unblocks host level I/O based on the reason code + * + * Return: Nothing + */ +static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply) +{ + struct mpi3_event_data_prepare_for_reset *evtdata = + (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; + + if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { + dprint_event_th(mrioc, + "prepare for reset event top half with rc=start\n"); + if (mrioc->prepare_for_reset) + return; + mrioc->prepare_for_reset = 1; + mrioc->prepare_for_reset_timeout_counter = 0; + } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { + dprint_event_th(mrioc, + "prepare for reset top half with rc=abort\n"); + mrioc->prepare_for_reset = 0; + mrioc->prepare_for_reset_timeout_counter = 0; + } + if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) + == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) + mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, + le32_to_cpu(event_reply->event_context)); +} + +/** * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf * @mrioc: Adapter instance reference * @event_reply: event data @@ -1848,6 +2120,66 @@ static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, } /** + * mpi3mr_tempthreshold_evt_th - Temp threshold event tophalf + * @mrioc: Adapter instance reference + * @event_reply: event data + * + * Displays temperature threshold event details and fault code + * if any is hit due to temperature exceeding threshold. + * + * Return: Nothing + */ +static void mpi3mr_tempthreshold_evt_th(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply) +{ + struct mpi3_event_data_temp_threshold *evtdata = + (struct mpi3_event_data_temp_threshold *)event_reply->event_data; + + ioc_err(mrioc, "Temperature threshold levels %s%s%s exceeded for sensor: %d !!! Current temperature in Celsius: %d\n", + (le16_to_cpu(evtdata->status) & 0x1) ? "Warning " : " ", + (le16_to_cpu(evtdata->status) & 0x2) ? "Critical " : " ", + (le16_to_cpu(evtdata->status) & 0x4) ? "Fatal " : " ", evtdata->sensor_num, + le16_to_cpu(evtdata->current_temperature)); + mpi3mr_print_fault_info(mrioc); +} + +/** + * mpi3mr_cablemgmt_evt_th - Cable management event tophalf + * @mrioc: Adapter instance reference + * @event_reply: event data + * + * Displays Cable manegemt event details. + * + * Return: Nothing + */ +static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply) +{ + struct mpi3_event_data_cable_management *evtdata = + (struct mpi3_event_data_cable_management *)event_reply->event_data; + + switch (evtdata->status) { + case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: + { + ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" + "Devices connected to this cable are not detected.\n" + "This cable requires %d mW of power.\n", + evtdata->receptacle_id, + le32_to_cpu(evtdata->active_cable_power_requirement)); + break; + } + case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: + { + ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", + evtdata->receptacle_id); + break; + } + default: + break; + } +} + +/** * mpi3mr_os_handle_events - Firmware event handler * @mrioc: Adapter instance reference * @event_reply: event data @@ -1905,6 +2237,12 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, mpi3mr_pcietopochg_evt_th(mrioc, event_reply); break; } + case MPI3_EVENT_PREPARE_FOR_RESET: + { + mpi3mr_preparereset_evt_th(mrioc, event_reply); + ack_req = 0; + break; + } case MPI3_EVENT_DEVICE_INFO_CHANGED: { process_evt_bh = 1; @@ -1915,9 +2253,18 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, mpi3mr_energypackchg_evt_th(mrioc, event_reply); break; } + case MPI3_EVENT_TEMP_THRESHOLD: + { + mpi3mr_tempthreshold_evt_th(mrioc, event_reply); + break; + } + case MPI3_EVENT_CABLE_MGMT: + { + mpi3mr_cablemgmt_evt_th(mrioc, event_reply); + break; + } case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: case MPI3_EVENT_SAS_DISCOVERY: - case MPI3_EVENT_CABLE_MGMT: case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: case MPI3_EVENT_PCIE_ENUMERATION: @@ -2520,49 +2867,63 @@ static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, } /** - * mpi3mr_print_response_code - print TM response as a string - * @mrioc: Adapter instance reference + * mpi3mr_tm_response_name - get TM response as a string * @resp_code: TM response code * - * Print TM response code as a readable string. + * Convert known task management response code as a readable + * string. * - * Return: Nothing. + * Return: response code string. */ -static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code) +static const char *mpi3mr_tm_response_name(u8 resp_code) { char *desc; switch (resp_code) { - case MPI3MR_RSP_TM_COMPLETE: + case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: desc = "task management request completed"; break; - case MPI3MR_RSP_INVALID_FRAME: + case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: desc = "invalid frame"; break; - case MPI3MR_RSP_TM_NOT_SUPPORTED: + case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: desc = "task management request not supported"; break; - case MPI3MR_RSP_TM_FAILED: + case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: desc = "task management request failed"; break; - case MPI3MR_RSP_TM_SUCCEEDED: + case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: desc = "task management request succeeded"; break; - case MPI3MR_RSP_TM_INVALID_LUN: - desc = "invalid lun"; + case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: + desc = "invalid LUN"; break; - case MPI3MR_RSP_TM_OVERLAPPED_TAG: + case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: desc = "overlapped tag attempted"; break; - case MPI3MR_RSP_IO_QUEUED_ON_IOC: + case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: desc = "task queued, however not sent to target"; break; + case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: + desc = "task management request denied by NVMe device"; + break; default: desc = "unknown"; break; } - ioc_info(mrioc, "%s :response_code(0x%01x): %s\n", __func__, - resp_code, desc); + + return desc; +} + +inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) +{ + int i; + int num_of_reply_queues = + mrioc->num_op_reply_q + mrioc->op_reply_q_offset; + + for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) + mpi3mr_process_op_reply_q(mrioc, + mrioc->intr_info[i].op_reply_q); } /** @@ -2572,9 +2933,10 @@ static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code) * @handle: Device handle * @lun: lun ID * @htag: Host tag of the TM request + * @timeout: TM timeout value * @drv_cmd: Internal command tracker * @resp_code: Response code place holder - * @cmd_priv: SCSI command private data + * @scmd: SCSI command * * Issues a Task Management Request to the controller for a * specified target, lun and command and wait for its completion @@ -2586,14 +2948,16 @@ static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code) static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, u16 handle, uint lun, u16 htag, ulong timeout, struct mpi3mr_drv_cmd *drv_cmd, - u8 *resp_code, struct scmd_priv *cmd_priv) + u8 *resp_code, struct scsi_cmnd *scmd) { struct mpi3_scsi_task_mgmt_request tm_req; struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; int retval = 0; struct mpi3mr_tgt_dev *tgtdev = NULL; struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; - struct op_req_qinfo *op_req_q = NULL; + struct scmd_priv *cmd_priv = NULL; + struct scsi_device *sdev = NULL; + struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", __func__, tm_type, handle); @@ -2630,16 +2994,21 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); - if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) { - scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) - tgtdev->starget->hostdata; - atomic_inc(&scsi_tgt_priv_data->block_io); - } - if (cmd_priv) { - op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx]; - tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag); - tm_req.task_request_queue_id = cpu_to_le16(op_req_q->qid); + + if (scmd) { + sdev = scmd->device; + sdev_priv_data = sdev->hostdata; + scsi_tgt_priv_data = ((sdev_priv_data) ? + sdev_priv_data->tgt_priv_data : NULL); + } else { + if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) + scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) + tgtdev->starget->hostdata; } + + if (scsi_tgt_priv_data) + atomic_inc(&scsi_tgt_priv_data->block_io); + if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) timeout = tgtdev->dev_spec.pcie_inf.abort_to; @@ -2656,39 +3025,49 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { - ioc_err(mrioc, "%s :Issue TM: command timed out\n", __func__); drv_cmd->is_waiting = 0; retval = -1; - mpi3mr_soft_reset_handler(mrioc, - MPI3MR_RESET_FROM_TM_TIMEOUT, 1); + if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { + dprint_tm(mrioc, + "task management request timed out after %ld seconds\n", + timeout); + if (mrioc->logging_level & MPI3_DEBUG_TM) + dprint_dump_req(&tm_req, sizeof(tm_req)/4); + mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_TM_TIMEOUT, 1); + } goto out_unlock; } - if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) - tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; - - if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { - ioc_err(mrioc, - "%s :Issue TM: handle(0x%04x) Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", - __func__, handle, drv_cmd->ioc_status, - drv_cmd->ioc_loginfo); + if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { + dprint_tm(mrioc, "invalid task management reply message\n"); retval = -1; goto out_unlock; } - if (!tm_reply) { - ioc_err(mrioc, "%s :Issue TM: No TM Reply message\n", __func__); + tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; + + switch (drv_cmd->ioc_status) { + case MPI3_IOCSTATUS_SUCCESS: + *resp_code = le32_to_cpu(tm_reply->response_data) & + MPI3MR_RI_MASK_RESPCODE; + break; + case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: + *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; + break; + default: + dprint_tm(mrioc, + "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", + handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); retval = -1; goto out_unlock; } - *resp_code = le32_to_cpu(tm_reply->response_data) & - MPI3MR_RI_MASK_RESPCODE; switch (*resp_code) { - case MPI3MR_RSP_TM_SUCCEEDED: - case MPI3MR_RSP_TM_COMPLETE: + case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: + case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: break; - case MPI3MR_RSP_IO_QUEUED_ON_IOC: + case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) retval = -1; break; @@ -2697,14 +3076,37 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, break; } - ioc_info(mrioc, - "%s :Issue TM: Completed TM type (0x%x) handle(0x%04x) ", - __func__, tm_type, handle); - ioc_info(mrioc, - "with ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", - drv_cmd->ioc_status, drv_cmd->ioc_loginfo, - le32_to_cpu(tm_reply->termination_count)); - mpi3mr_print_response_code(mrioc, *resp_code); + dprint_tm(mrioc, + "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", + tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, + le32_to_cpu(tm_reply->termination_count), + mpi3mr_tm_response_name(*resp_code), *resp_code); + + if (!retval) { + mpi3mr_ioc_disable_intr(mrioc); + mpi3mr_poll_pend_io_completions(mrioc); + mpi3mr_ioc_enable_intr(mrioc); + mpi3mr_poll_pend_io_completions(mrioc); + } + switch (tm_type) { + case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if (!scsi_tgt_priv_data) + break; + scsi_tgt_priv_data->pend_count = 0; + blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, + mpi3mr_count_tgt_pending, + (void *)scsi_tgt_priv_data->starget); + break; + case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + if (!sdev_priv_data) + break; + sdev_priv_data->pend_count = 0; + blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, + mpi3mr_count_dev_pending, (void *)sdev); + break; + default: + break; + } out_unlock: drv_cmd->state = MPI3MR_CMD_NOTUSED; @@ -2713,14 +3115,6 @@ out_unlock: atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); if (tgtdev) mpi3mr_tgtdev_put(tgtdev); - if (!retval) { - /* - * Flush all IRQ handlers by calling synchronize_irq(). - * mpi3mr_ioc_disable_intr() takes care of it. - */ - mpi3mr_ioc_disable_intr(mrioc); - mpi3mr_ioc_enable_intr(mrioc); - } out: return retval; } @@ -2769,17 +3163,49 @@ static int mpi3mr_bios_param(struct scsi_device *sdev, * mpi3mr_map_queues - Map queues callback handler * @shost: SCSI host reference * - * Call the blk_mq_pci_map_queues with from which operational - * queue the mapping has to be done + * Maps default and poll queues. * - * Return: return of blk_mq_pci_map_queues + * Return: return zero. */ static int mpi3mr_map_queues(struct Scsi_Host *shost) { struct mpi3mr_ioc *mrioc = shost_priv(shost); + int i, qoff, offset; + struct blk_mq_queue_map *map = NULL; + + offset = mrioc->op_reply_q_offset; + + for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { + map = &shost->tag_set.map[i]; + + map->nr_queues = 0; + + if (i == HCTX_TYPE_DEFAULT) + map->nr_queues = mrioc->default_qcount; + else if (i == HCTX_TYPE_POLL) + map->nr_queues = mrioc->active_poll_qcount; + + if (!map->nr_queues) { + BUG_ON(i == HCTX_TYPE_DEFAULT); + continue; + } + + /* + * The poll queue(s) doesn't have an IRQ (and hence IRQ + * affinity), so use the regular blk-mq cpu mapping + */ + map->queue_offset = qoff; + if (i != HCTX_TYPE_POLL) + blk_mq_pci_map_queues(map, mrioc->pdev, offset); + else + blk_mq_map_queues(map); + + qoff += map->nr_queues; + offset += map->nr_queues; + } + + return 0; - return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], - mrioc->pdev, mrioc->op_reply_q_offset); } /** @@ -2938,6 +3364,13 @@ static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) stgt_priv_data = sdev_priv_data->tgt_priv_data; dev_handle = stgt_priv_data->dev_handle; + if (stgt_priv_data->dev_removed) { + sdev_printk(KERN_INFO, scmd->device, + "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", + mrioc->name, dev_handle); + retval = FAILED; + goto out; + } sdev_printk(KERN_INFO, scmd->device, "Target Reset is issued to handle(0x%04x)\n", dev_handle); @@ -2945,15 +3378,22 @@ static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, - MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL); + MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); if (ret) goto out; + if (stgt_priv_data->pend_count) { + sdev_printk(KERN_INFO, scmd->device, + "%s: target has %d pending commands, target reset is failed\n", + mrioc->name, sdev_priv_data->pend_count); + goto out; + } + retval = SUCCESS; out: sdev_printk(KERN_INFO, scmd->device, - "Target reset is %s for scmd(%p)\n", + "%s: target reset is %s for scmd(%p)\n", mrioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return retval; @@ -2992,21 +3432,34 @@ static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) stgt_priv_data = sdev_priv_data->tgt_priv_data; dev_handle = stgt_priv_data->dev_handle; + if (stgt_priv_data->dev_removed) { + sdev_printk(KERN_INFO, scmd->device, + "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", + mrioc->name, dev_handle); + retval = FAILED; + goto out; + } sdev_printk(KERN_INFO, scmd->device, "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, - MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL); + MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); if (ret) goto out; + if (sdev_priv_data->pend_count) { + sdev_printk(KERN_INFO, scmd->device, + "%s: device has %d pending commands, device(LUN) reset is failed\n", + mrioc->name, sdev_priv_data->pend_count); + goto out; + } retval = SUCCESS; out: sdev_printk(KERN_INFO, scmd->device, - "Device(lun) reset is %s for scmd(%p)\n", + "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return retval; @@ -3049,32 +3502,42 @@ static int mpi3mr_scan_finished(struct Scsi_Host *shost, { struct mpi3mr_ioc *mrioc = shost_priv(shost); u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; + u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); - if (time >= (pe_timeout * HZ)) { + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || + (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { + ioc_err(mrioc, "port enable failed due to fault or reset\n"); + mpi3mr_print_fault_info(mrioc); + mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; + mrioc->scan_started = 0; mrioc->init_cmds.is_waiting = 0; mrioc->init_cmds.callback = NULL; mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; - ioc_err(mrioc, "%s :port enable request timed out\n", __func__); - mrioc->is_driver_loading = 0; - mpi3mr_soft_reset_handler(mrioc, - MPI3MR_RESET_FROM_PE_TIMEOUT, 1); } - if (mrioc->scan_failed) { - ioc_err(mrioc, - "%s :port enable failed with (ioc_status=0x%08x)\n", - __func__, mrioc->scan_failed); - mrioc->is_driver_loading = 0; - mrioc->stop_drv_processing = 1; - return 1; + if (time >= (pe_timeout * HZ)) { + ioc_err(mrioc, "port enable failed due to time out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_PE_TIMEOUT); + mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; + mrioc->scan_started = 0; + mrioc->init_cmds.is_waiting = 0; + mrioc->init_cmds.callback = NULL; + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; } if (mrioc->scan_started) return 0; - ioc_info(mrioc, "%s :port enable: SUCCESS\n", __func__); + + if (mrioc->scan_failed) { + ioc_err(mrioc, + "port enable failed with status=0x%04x\n", + mrioc->scan_failed); + } else + ioc_info(mrioc, "port enable is successfully completed\n"); + mpi3mr_start_watchdog(mrioc); mrioc->is_driver_loading = 0; - return 1; } @@ -3189,10 +3652,18 @@ static int mpi3mr_slave_configure(struct scsi_device *sdev) switch (tgt_dev->dev_type) { case MPI3_DEVICE_DEVFORM_PCIE: /*The block layer hw sector size = 512*/ - blk_queue_max_hw_sectors(sdev->request_queue, - tgt_dev->dev_spec.pcie_inf.mdts / 512); - blk_queue_virt_boundary(sdev->request_queue, - ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); + if ((tgt_dev->dev_spec.pcie_inf.dev_info & + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { + blk_queue_max_hw_sectors(sdev->request_queue, + tgt_dev->dev_spec.pcie_inf.mdts / 512); + if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) + blk_queue_virt_boundary(sdev->request_queue, + ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); + else + blk_queue_virt_boundary(sdev->request_queue, + ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); + } break; default: break; @@ -3312,9 +3783,22 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, struct scsi_cmnd *scmd) { unsigned char *buf; - u16 param_len, desc_len; - - param_len = get_unaligned_be16(scmd->cmnd + 7); + u16 param_len, desc_len, trunc_param_len; + + trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); + + if (mrioc->pdev->revision) { + if ((param_len > 24) && ((param_len - 8) & 0xF)) { + trunc_param_len -= (param_len - 8) & 0xF; + dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); + dprint_scsi_err(mrioc, + "truncating param_len from (%d) to (%d)\n", + param_len, trunc_param_len); + put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); + dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); + } + return false; + } if (!param_len) { ioc_warn(mrioc, @@ -3374,12 +3858,12 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, } if (param_len > (desc_len + 8)) { + trunc_param_len = desc_len + 8; scsi_print_command(scmd); - ioc_warn(mrioc, - "%s: Truncating param_len(%d) to desc_len+8(%d)\n", - __func__, param_len, (desc_len + 8)); - param_len = desc_len + 8; - put_unaligned_be16(param_len, scmd->cmnd + 7); + dprint_scsi_err(mrioc, + "truncating param_len(%d) to desc_len+8(%d)\n", + param_len, trunc_param_len); + put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); scsi_print_command(scmd); } @@ -3434,6 +3918,7 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost, u32 scsiio_flags = 0; struct request *rq = scsi_cmd_to_rq(scmd); int iprio_class; + u8 is_pcie_dev = 0; sdev_priv_data = scmd->device->hostdata; if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { @@ -3478,8 +3963,10 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost, goto out; } - if ((scmd->cmnd[0] == UNMAP) && - (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && + if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) + is_pcie_dev = 1; + if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && + (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && mpi3mr_check_return_unmap(mrioc, scmd)) goto out; @@ -3559,6 +4046,7 @@ static struct scsi_host_template mpi3mr_driver_template = { .eh_host_reset_handler = mpi3mr_eh_host_reset, .bios_param = mpi3mr_bios_param, .map_queues = mpi3mr_map_queues, + .mq_poll = mpi3mr_blk_mq_poll, .no_write_same = 1, .can_queue = 1, .this_id = -1, @@ -3567,6 +4055,7 @@ static struct scsi_host_template mpi3mr_driver_template = { */ .max_sectors = 2048, .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, + .max_segment_size = 0xffffffff, .track_queue_depth = 1, .cmd_size = sizeof(struct scmd_priv), }; @@ -3714,6 +4203,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&mrioc->fwevt_list); INIT_LIST_HEAD(&mrioc->tgtdev_list); INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); + INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); mutex_init(&mrioc->reset_mutex); mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); @@ -3772,21 +4262,29 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc_err(mrioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); retval = -ENODEV; - goto out_fwevtthread_failed; + goto fwevtthread_failed; } mrioc->is_driver_loading = 1; - if (mpi3mr_init_ioc(mrioc, MPI3MR_IT_INIT)) { - ioc_err(mrioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); + mrioc->cpu_count = num_online_cpus(); + if (mpi3mr_setup_resources(mrioc)) { + ioc_err(mrioc, "setup resources failed\n"); retval = -ENODEV; - goto out_iocinit_failed; + goto resource_alloc_failed; + } + if (mpi3mr_init_ioc(mrioc)) { + ioc_err(mrioc, "initializing IOC failed\n"); + retval = -ENODEV; + goto init_ioc_failed; } shost->nr_hw_queues = mrioc->num_op_reply_q; + if (mrioc->active_poll_qcount) + shost->nr_maps = 3; + shost->can_queue = mrioc->max_host_ios; shost->sg_tablesize = MPI3MR_SG_DEPTH; - shost->max_id = mrioc->facts.max_perids; + shost->max_id = mrioc->facts.max_perids + 1; retval = scsi_add_host(shost, &pdev->dev); if (retval) { @@ -3799,10 +4297,14 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) return retval; addhost_failed: - mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP); -out_iocinit_failed: + mpi3mr_stop_watchdog(mrioc); + mpi3mr_cleanup_ioc(mrioc); +init_ioc_failed: + mpi3mr_free_mem(mrioc); + mpi3mr_cleanup_resources(mrioc); +resource_alloc_failed: destroy_workqueue(mrioc->fwevt_worker_thread); -out_fwevtthread_failed: +fwevtthread_failed: spin_lock(&mrioc_list_lock); list_del(&mrioc->list); spin_unlock(&mrioc_list_lock); @@ -3815,6 +4317,7 @@ shost_failed: * mpi3mr_remove - PCI remove callback * @pdev: PCI device instance * + * Cleanup the IOC by issuing MUR and shutdown notification. * Free up all memory and resources associated with the * controllerand target devices, unregister the shost. * @@ -3851,7 +4354,10 @@ static void mpi3mr_remove(struct pci_dev *pdev) mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); mpi3mr_tgtdev_put(tgtdev); } - mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP); + mpi3mr_stop_watchdog(mrioc); + mpi3mr_cleanup_ioc(mrioc); + mpi3mr_free_mem(mrioc); + mpi3mr_cleanup_resources(mrioc); spin_lock(&mrioc_list_lock); list_del(&mrioc->list); @@ -3891,7 +4397,10 @@ static void mpi3mr_shutdown(struct pci_dev *pdev) spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); if (wq) destroy_workqueue(wq); - mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP); + + mpi3mr_stop_watchdog(mrioc); + mpi3mr_cleanup_ioc(mrioc); + mpi3mr_cleanup_resources(mrioc); } #ifdef CONFIG_PM @@ -3921,7 +4430,7 @@ static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state) mpi3mr_cleanup_fwevt_list(mrioc); scsi_block_requests(shost); mpi3mr_stop_watchdog(mrioc); - mpi3mr_cleanup_ioc(mrioc, MPI3MR_SUSPEND); + mpi3mr_cleanup_ioc(mrioc); device_state = pci_choose_state(pdev, state); ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n", @@ -3970,7 +4479,11 @@ static int mpi3mr_resume(struct pci_dev *pdev) mrioc->stop_drv_processing = 0; mpi3mr_memset_buffers(mrioc); - mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESUME); + r = mpi3mr_reinit_ioc(mrioc, 1); + if (r) { + ioc_err(mrioc, "resuming controller failed[%d]\n", r); + return r; + } scsi_unblock_requests(shost); mpi3mr_start_watchdog(mrioc); @@ -3980,8 +4493,8 @@ static int mpi3mr_resume(struct pci_dev *pdev) static const struct pci_device_id mpi3mr_pci_id_table[] = { { - PCI_DEVICE_SUB(PCI_VENDOR_ID_LSI_LOGIC, 0x00A5, - PCI_ANY_ID, PCI_ANY_ID) + PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, + MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) }, { 0 } }; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 81dab9b82f79..511726f92d9a 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -3086,6 +3086,7 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc) { + unsigned int irq; struct adapter_reply_queue *reply_q, *next; if (list_empty(&ioc->reply_queue_list)) @@ -3098,9 +3099,10 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc) continue; } - if (ioc->smp_affinity_enable) - irq_set_affinity_hint(pci_irq_vector(ioc->pdev, - reply_q->msix_index), NULL); + if (ioc->smp_affinity_enable) { + irq = pci_irq_vector(ioc->pdev, reply_q->msix_index); + irq_update_affinity_hint(irq, NULL); + } free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), reply_q); kfree(reply_q); @@ -3167,18 +3169,15 @@ out: * @ioc: per adapter object * * The enduser would need to set the affinity via /proc/irq/#/smp_affinity - * - * It would nice if we could call irq_set_affinity, however it is not - * an exported symbol */ static void _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) { - unsigned int cpu, nr_cpus, nr_msix, index = 0; + unsigned int cpu, nr_cpus, nr_msix, index = 0, irq; struct adapter_reply_queue *reply_q; - int local_numa_node; int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + const struct cpumask *mask; if (!_base_is_controller_msix_enabled(ioc)) return; @@ -3201,11 +3200,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) * corresponding to high iops queues. */ if (ioc->high_iops_queues) { - local_numa_node = dev_to_node(&ioc->pdev->dev); + mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev)); for (index = 0; index < ioc->high_iops_queues; index++) { - irq_set_affinity_hint(pci_irq_vector(ioc->pdev, - index), cpumask_of_node(local_numa_node)); + irq = pci_irq_vector(ioc->pdev, index); + irq_set_affinity_and_hint(irq, mask); } } diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 31d1ea5a5dd2..1e52bc7febfa 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -67,8 +67,10 @@ static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) while (sha->sas_port[i]) { if (sha->sas_port[i] == dev->port) { + spin_lock(&sha->sas_port[i]->phy_list_lock); phy = container_of(sha->sas_port[i]->phy_list.next, struct asd_sas_phy, port_phy_el); + spin_unlock(&sha->sas_port[i]->phy_list_lock); j = 0; while (sha->sas_phy[j]) { if (sha->sas_phy[j] == phy) @@ -96,6 +98,8 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) while (sha->sas_port[i]) { if (sha->sas_port[i] == dev->port) { struct asd_sas_phy *phy; + + spin_lock(&sha->sas_port[i]->phy_list_lock); list_for_each_entry(phy, &sha->sas_port[i]->phy_list, port_phy_el) { j = 0; @@ -109,6 +113,7 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) num++; n++; } + spin_unlock(&sha->sas_port[i]->phy_list_lock); break; } i++; diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index 2a4506a5083e..71585528e8db 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -1674,7 +1674,7 @@ static int myrb_pdev_slave_alloc(struct scsi_device *sdev) if (sdev->id > MYRB_MAX_TARGETS) return -ENXIO; - pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA); + pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL); if (!pdev_info) return -ENOMEM; diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index 6ea323e9a2e3..253ceca54a84 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -538,13 +538,11 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs, cs->fwstat_buf = NULL; goto out_free; } - cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), - GFP_KERNEL | GFP_DMA); + cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), GFP_KERNEL); if (!cs->ctlr_info) goto out_free; - cs->event_buf = kzalloc(sizeof(struct myrs_event), - GFP_KERNEL | GFP_DMA); + cs->event_buf = kzalloc(sizeof(struct myrs_event), GFP_KERNEL); if (!cs->event_buf) goto out_free; @@ -1805,7 +1803,7 @@ static int myrs_slave_alloc(struct scsi_device *sdev) ldev_num = myrs_translate_ldev(cs, sdev); - ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA); + ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL); if (!ldev_info) return -ENOMEM; @@ -1867,7 +1865,7 @@ static int myrs_slave_alloc(struct scsi_device *sdev) } else { struct myrs_pdev_info *pdev_info; - pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA); + pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL); if (!pdev_info) return -ENOMEM; diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile index 02b7338999cc..bbb51b7312f1 100644 --- a/drivers/scsi/pm8001/Makefile +++ b/drivers/scsi/pm8001/Makefile @@ -6,9 +6,12 @@ obj-$(CONFIG_SCSI_PM8001) += pm80xx.o + +CFLAGS_pm80xx_tracepoints.o := -I$(src) + pm80xx-y += pm8001_init.o \ pm8001_sas.o \ pm8001_ctl.o \ pm8001_hwi.o \ - pm80xx_hwi.o - + pm80xx_hwi.o \ + pm80xx_tracepoints.o diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 397eb9f6a1dd..41a63c9b719b 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -889,14 +889,6 @@ static ssize_t pm8001_show_update_fw(struct device *cdev, static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP, pm8001_show_update_fw, pm8001_store_update_fw); -/** - * ctl_mpi_state_show - controller MPI state check - * @cdev: pointer to embedded class device - * @buf: the buffer returned - * - * A sysfs 'read-only' shost attribute. - */ - static const char *const mpiStateText[] = { "MPI is not initialized", "MPI is successfully initialized", @@ -904,6 +896,14 @@ static const char *const mpiStateText[] = { "MPI initialization failed with error in [31:16]" }; +/** + * ctl_mpi_state_show - controller MPI state check + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ static ssize_t ctl_mpi_state_show(struct device *cdev, struct device_attribute *attr, char *buf) { @@ -920,11 +920,11 @@ static DEVICE_ATTR_RO(ctl_mpi_state); /** * ctl_hmi_error_show - controller MPI initialization fails * @cdev: pointer to embedded class device + * @attr: device attribute (unused) * @buf: the buffer returned * * A sysfs 'read-only' shost attribute. */ - static ssize_t ctl_hmi_error_show(struct device *cdev, struct device_attribute *attr, char *buf) { @@ -941,11 +941,11 @@ static DEVICE_ATTR_RO(ctl_hmi_error); /** * ctl_raae_count_show - controller raae count check * @cdev: pointer to embedded class device + * @attr: device attribute (unused) * @buf: the buffer returned * * A sysfs 'read-only' shost attribute. */ - static ssize_t ctl_raae_count_show(struct device *cdev, struct device_attribute *attr, char *buf) { @@ -962,11 +962,11 @@ static DEVICE_ATTR_RO(ctl_raae_count); /** * ctl_iop0_count_show - controller iop0 count check * @cdev: pointer to embedded class device + * @attr: device attribute (unused) * @buf: the buffer returned * * A sysfs 'read-only' shost attribute. */ - static ssize_t ctl_iop0_count_show(struct device *cdev, struct device_attribute *attr, char *buf) { @@ -983,11 +983,11 @@ static DEVICE_ATTR_RO(ctl_iop0_count); /** * ctl_iop1_count_show - controller iop1 count check * @cdev: pointer to embedded class device + * @attr: device attribute (unused) * @buf: the buffer returned * * A sysfs 'read-only' shost attribute. */ - static ssize_t ctl_iop1_count_show(struct device *cdev, struct device_attribute *attr, char *buf) { diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 124cb69740c6..c814e5071712 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -42,6 +42,7 @@ #include "pm8001_hwi.h" #include "pm8001_chips.h" #include "pm8001_ctl.h" + #include "pm80xx_tracepoints.h" /** * read_main_config_table - read the configure table and save it. @@ -1324,8 +1325,14 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, unsigned long flags; int q_index = circularQ - pm8001_ha->inbnd_q_tbl; int rv; + u32 htag = le32_to_cpu(*(__le32 *)payload); + + trace_pm80xx_mpi_build_cmd(pm8001_ha->id, opCode, htag, q_index, + circularQ->producer_idx, le32_to_cpu(circularQ->consumer_index)); + + if (WARN_ON(q_index >= pm8001_ha->max_q_num)) + return -EINVAL; - WARN_ON(q_index >= PM8001_MAX_INB_NUM); spin_lock_irqsave(&circularQ->iq_lock, flags); rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size, &pMessage); @@ -2304,21 +2311,17 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) psataPayload = (struct sata_completion_resp *)(piomb + 4); status = le32_to_cpu(psataPayload->status); + param = le32_to_cpu(psataPayload->param); tag = le32_to_cpu(psataPayload->tag); if (!tag) { pm8001_dbg(pm8001_ha, FAIL, "tag null\n"); return; } + ccb = &pm8001_ha->ccb_info[tag]; - param = le32_to_cpu(psataPayload->param); - if (ccb) { - t = ccb->task; - pm8001_dev = ccb->device; - } else { - pm8001_dbg(pm8001_ha, FAIL, "ccb null\n"); - return; - } + t = ccb->task; + pm8001_dev = ccb->device; if (t) { if (t->dev && (t->dev->lldd_dev)) @@ -2335,10 +2338,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) } ts = &t->task_status; - if (!ts) { - pm8001_dbg(pm8001_ha, FAIL, "ts null\n"); - return; - } if (status) pm8001_dbg(pm8001_ha, IOERR, @@ -2695,14 +2694,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 dev_id = le32_to_cpu(psataPayload->device_id); unsigned long flags; - ccb = &pm8001_ha->ccb_info[tag]; - - if (ccb) { - t = ccb->task; - pm8001_dev = ccb->device; - } else { - pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n"); - } if (event) pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event); diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index fbfeb0b046dd..d8a2121cb8d9 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -179,7 +179,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) } PM8001_CHIP_DISP->chip_iounmap(pm8001_ha); flush_workqueue(pm8001_wq); - kfree(pm8001_ha->tags); + bitmap_free(pm8001_ha->tags); kfree(pm8001_ha); } @@ -1192,7 +1192,7 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost, can_queue = ccb_count - PM8001_RESERVE_SLOT; shost->can_queue = can_queue; - pm8001_ha->tags = kzalloc(ccb_count, GFP_KERNEL); + pm8001_ha->tags = bitmap_zalloc(ccb_count, GFP_KERNEL); if (!pm8001_ha->tags) goto err_out; diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 83e73009db5c..c9a16eef38c1 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -40,6 +40,7 @@ #include <linux/slab.h> #include "pm8001_sas.h" +#include "pm80xx_tracepoints.h" /** * pm8001_find_tag - from sas task to find out tag that belongs to this task @@ -527,6 +528,9 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags) void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx) { + struct ata_queued_cmd *qc; + struct pm8001_device *pm8001_dev; + if (!ccb->task) return; if (!sas_protocol_ata(task->task_proto)) @@ -549,6 +553,18 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, /* do nothing */ break; } + + if (sas_protocol_ata(task->task_proto)) { + // For SCSI/ATA commands uldd_task points to ata_queued_cmd + qc = task->uldd_task; + pm8001_dev = ccb->device; + trace_pm80xx_request_complete(pm8001_ha->id, + pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS, + ccb_idx, 0 /* ctlr_opcode not known */, + qc ? qc->tf.command : 0, // ata opcode + pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1); + } + task->lldd_task = NULL; ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 2101fc5761c3..ad3c6da12715 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -42,6 +42,7 @@ #include "pm80xx_hwi.h" #include "pm8001_chips.h" #include "pm8001_ctl.h" +#include "pm80xx_tracepoints.h" #define SMP_DIRECT 1 #define SMP_INDIRECT 2 @@ -2400,21 +2401,17 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, psataPayload = (struct sata_completion_resp *)(piomb + 4); status = le32_to_cpu(psataPayload->status); + param = le32_to_cpu(psataPayload->param); tag = le32_to_cpu(psataPayload->tag); if (!tag) { pm8001_dbg(pm8001_ha, FAIL, "tag null\n"); return; } + ccb = &pm8001_ha->ccb_info[tag]; - param = le32_to_cpu(psataPayload->param); - if (ccb) { - t = ccb->task; - pm8001_dev = ccb->device; - } else { - pm8001_dbg(pm8001_ha, FAIL, "ccb null\n"); - return; - } + t = ccb->task; + pm8001_dev = ccb->device; if (t) { if (t->dev && (t->dev->lldd_dev)) @@ -2431,10 +2428,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, } ts = &t->task_status; - if (!ts) { - pm8001_dbg(pm8001_ha, FAIL, "ts null\n"); - return; - } if (status != IO_SUCCESS) { pm8001_dbg(pm8001_ha, FAIL, @@ -2830,15 +2823,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, u32 dev_id = le32_to_cpu(psataPayload->device_id); unsigned long flags; - ccb = &pm8001_ha->ccb_info[tag]; - - if (ccb) { - t = ccb->task; - pm8001_dev = ccb->device; - } else { - pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n"); - return; - } if (event) pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event); @@ -2852,6 +2836,10 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, return; } + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (unlikely(!t || !t->lldd_task || !t->dev)) { pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n"); return; @@ -3522,7 +3510,7 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 status = le32_to_cpu(pPayload->status); u32 phy_id = - le32_to_cpu(pPayload->phyid); + le32_to_cpu(pPayload->phyid) & 0xFF; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; pm8001_dbg(pm8001_ha, INIT, @@ -4547,6 +4535,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, struct sas_task *task = ccb->task; struct domain_device *dev = task->dev; struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; + struct ata_queued_cmd *qc = task->uldd_task; u32 tag = ccb->ccb_tag; int ret; u32 q_index, cpu_id; @@ -4766,6 +4755,11 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, } } } + trace_pm80xx_request_issue(pm8001_ha->id, + ccb->device ? ccb->device->attached_phy : PM8001_MAX_PHYS, + ccb->ccb_tag, opc, + qc ? qc->tf.command : 0, // ata opcode + ccb->device ? atomic_read(&ccb->device->running_req) : 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, sizeof(sata_cmd), q_index); return ret; diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.c b/drivers/scsi/pm8001/pm80xx_tracepoints.c new file mode 100644 index 000000000000..344aface9cdb --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_tracepoints.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Trace events in pm8001 driver. + * + * Copyright 2020 Google LLC + * Author: Akshat Jain <akshatzen@google.com> + */ + +#define CREATE_TRACE_POINTS +#include "pm80xx_tracepoints.h" diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.h b/drivers/scsi/pm8001/pm80xx_tracepoints.h new file mode 100644 index 000000000000..5e669a8a9344 --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_tracepoints.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Trace events in pm8001 driver. + * + * Copyright 2020 Google LLC + * Author: Akshat Jain <akshatzen@google.com> + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pm80xx + +#if !defined(_TRACE_PM80XX_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PM80XX_H + +#include <linux/tracepoint.h> +#include "pm8001_sas.h" + +TRACE_EVENT(pm80xx_request_issue, + TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode, + u16 ata_opcode, int running_req), + + TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req), + + TP_STRUCT__entry( + __field(u32, id) + __field(u32, phy_id) + __field(u32, htag) + __field(u32, ctlr_opcode) + __field(u16, ata_opcode) + __field(int, running_req) + ), + + TP_fast_assign( + __entry->id = id; + __entry->phy_id = phy_id; + __entry->htag = htag; + __entry->ctlr_opcode = ctlr_opcode; + __entry->ata_opcode = ata_opcode; + __entry->running_req = running_req; + ), + + TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d", + __entry->id, __entry->phy_id, __entry->htag, + __entry->ctlr_opcode, __entry->ata_opcode, + __entry->running_req) +); + +TRACE_EVENT(pm80xx_request_complete, + TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode, + u16 ata_opcode, int running_req), + + TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req), + + TP_STRUCT__entry( + __field(u32, id) + __field(u32, phy_id) + __field(u32, htag) + __field(u32, ctlr_opcode) + __field(u16, ata_opcode) + __field(int, running_req) + ), + + TP_fast_assign( + __entry->id = id; + __entry->phy_id = phy_id; + __entry->htag = htag; + __entry->ctlr_opcode = ctlr_opcode; + __entry->ata_opcode = ata_opcode; + __entry->running_req = running_req; + ), + + TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d", + __entry->id, __entry->phy_id, __entry->htag, + __entry->ctlr_opcode, __entry->ata_opcode, + __entry->running_req) +); + +TRACE_EVENT(pm80xx_mpi_build_cmd, + TP_PROTO(u32 id, u32 opc, u32 htag, u32 qi, u32 pi, u32 ci), + + TP_ARGS(id, opc, htag, qi, pi, ci), + + TP_STRUCT__entry( + __field(u32, id) + __field(u32, opc) + __field(u32, htag) + __field(u32, qi) + __field(u32, pi) + __field(u32, ci) + ), + + TP_fast_assign( + __entry->id = id; + __entry->opc = opc; + __entry->htag = htag; + __entry->qi = qi; + __entry->pi = pi; + __entry->ci = ci; + ), + + TP_printk("ctlr_id = %u opc = %#x htag = %#x QI = %u PI = %u CI = %u", + __entry->id, __entry->opc, __entry->htag, __entry->qi, + __entry->pi, __entry->ci) +); + +#endif /* _TRACE_PM80XX_H_ */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE pm80xx_tracepoints + +#include <trace/define_trace.h> diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 88046a793767..928532180d32 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -3221,8 +3221,8 @@ static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen) return NULL; sglist->order = order; - sgl_alloc_order(buflen, order, false, - GFP_KERNEL | GFP_DMA | __GFP_ZERO, &sglist->num_sg); + sgl_alloc_order(buflen, order, false, GFP_KERNEL | __GFP_ZERO, + &sglist->num_sg); return sglist; } @@ -3302,7 +3302,6 @@ static int pmcraid_copy_sglist( /** * pmcraid_queuecommand_lck - Queue a mid-layer request * @scsi_cmd: scsi command struct - * @done: done function * * This function queues a request generated by the mid-layer. Midlayer calls * this routine within host->lock. Some of the functions called by queuecommand diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 1dec814d8788..832a856dd367 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1538,7 +1538,6 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi) int i; struct scsi_bd *pbl; u64 *list; - dma_addr_t page; /* Alloc dma memory for BDQ buffers */ for (i = 0; i < QEDI_BDQ_NUM; i++) { @@ -1608,11 +1607,9 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi) qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / QEDI_PAGE_SIZE; list = (u64 *)qedi->bdq_pbl_list; - page = qedi->bdq_pbl_list_dma; for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) { *list = qedi->bdq_pbl_dma; list++; - page += QEDI_PAGE_SIZE; } return 0; @@ -2089,8 +2086,7 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf) rc = snprintf(buf, ip_len, fmt, gw); break; case ISCSI_BOOT_ETH_FLAGS: - rc = snprintf(buf, 3, "%hhd\n", - SYSFS_FLAG_FW_SEL_BOOT); + rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); break; case ISCSI_BOOT_ETH_INDEX: rc = snprintf(buf, 3, "0\n"); @@ -2257,7 +2253,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, mchap_secret); break; case ISCSI_BOOT_TGT_FLAGS: - rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT); + rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); break; case ISCSI_BOOT_TGT_NIC_ASSOC: rc = snprintf(buf, 3, "0\n"); diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 032efb294ee5..db55737000ab 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2700,7 +2700,13 @@ qla2x00_get_starget_port_id(struct scsi_target *starget) static inline void qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { + fc_port_t *fcport = *(fc_port_t **)rport->dd_data; + rport->dev_loss_tmo = timeout ? timeout : 1; + + if (IS_ENABLED(CONFIG_NVME_FC) && fcport && fcport->nvme_remote_port) + nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, + rport->dev_loss_tmo); } static void diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 070b636802d0..1fe4966fc2f6 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -5828,13 +5828,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_dfs_create_rport(vha, fcport); - if (NVME_TARGET(vha->hw, fcport)) { - qla_nvme_register_remote(vha, fcport); - qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); - qla2x00_set_fcport_state(fcport, FCS_ONLINE); - return; - } - qla24xx_update_fcport_fcp_prio(vha, fcport); switch (vha->host->active_mode) { @@ -5856,6 +5849,9 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) break; } + if (NVME_TARGET(vha->hw, fcport)) + qla_nvme_register_remote(vha, fcport); + qla2x00_set_fcport_state(fcport, FCS_ONLINE); if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 138ffdb5c92c..e22ec7cb65db 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -43,7 +43,7 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) req.port_name = wwn_to_u64(fcport->port_name); req.node_name = wwn_to_u64(fcport->node_name); req.port_role = 0; - req.dev_loss_tmo = 0; + req.dev_loss_tmo = fcport->dev_loss_tmo; if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR) req.port_role = FC_PORT_ROLE_NVME_INITIATOR; @@ -70,6 +70,9 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) return ret; } + nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, + fcport->dev_loss_tmo); + if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER) ql_log(ql_log_info, vha, 0x212a, "PortID:%06x Supports SLER\n", req.port_id); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 8987acc24dac..0ae936d839f1 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -5734,7 +5734,7 @@ static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) switch (type) { case ISCSI_BOOT_ETH_FLAGS: - rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); + rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); break; case ISCSI_BOOT_ETH_INDEX: rc = sprintf(str, "0\n"); @@ -5843,7 +5843,7 @@ qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, (char *)&boot_conn->chap.intr_secret); break; case ISCSI_BOOT_TGT_FLAGS: - rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); + rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); break; case ISCSI_BOOT_TGT_NIC_ASSOC: rc = sprintf(str, "0\n"); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index f6af1562cba4..211aace69c22 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -55,7 +55,6 @@ #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/mutex.h> -#include <linux/async.h> #include <asm/unaligned.h> #include <scsi/scsi.h> @@ -201,11 +200,11 @@ void scsi_finish_command(struct scsi_cmnd *cmd) /* - * 1024 is big enough for saturating the fast scsi LUN now + * 1024 is big enough for saturating fast SCSI LUNs. */ int scsi_device_max_queue_depth(struct scsi_device *sdev) { - return max_t(int, sdev->host->can_queue, 1024); + return min_t(int, sdev->host->can_queue, 1024); } /** diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c index d9109771f274..db8517f1a485 100644 --- a/drivers/scsi/scsi_debugfs.c +++ b/drivers/scsi/scsi_debugfs.c @@ -9,6 +9,7 @@ static const char *const scsi_cmd_flags[] = { SCSI_CMD_FLAG_NAME(TAGGED), SCSI_CMD_FLAG_NAME(INITIALIZED), + SCSI_CMD_FLAG_NAME(LAST), }; #undef SCSI_CMD_FLAG_NAME diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 3eae2392ef15..60a6ae9d1219 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -133,23 +133,6 @@ static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd) return true; } -static void scsi_eh_complete_abort(struct scsi_cmnd *scmd, struct Scsi_Host *shost) -{ - unsigned long flags; - - spin_lock_irqsave(shost->host_lock, flags); - list_del_init(&scmd->eh_entry); - /* - * If the abort succeeds, and there is no further - * EH action, clear the ->last_reset time. - */ - if (list_empty(&shost->eh_abort_list) && - list_empty(&shost->eh_cmd_q)) - if (shost->eh_deadline != -1) - shost->last_reset = 0; - spin_unlock_irqrestore(shost->host_lock, flags); -} - /** * scmd_eh_abort_handler - Handle command aborts * @work: command to be aborted. @@ -166,54 +149,72 @@ scmd_eh_abort_handler(struct work_struct *work) struct scsi_cmnd *scmd = container_of(work, struct scsi_cmnd, abort_work.work); struct scsi_device *sdev = scmd->device; + struct Scsi_Host *shost = sdev->host; enum scsi_disposition rtn; unsigned long flags; - if (scsi_host_eh_past_deadline(sdev->host)) { + if (scsi_host_eh_past_deadline(shost)) { SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, "eh timeout, not aborting\n")); - } else { - SCSI_LOG_ERROR_RECOVERY(3, + goto out; + } + + SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, "aborting command\n")); - rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); - if (rtn == SUCCESS) { - set_host_byte(scmd, DID_TIME_OUT); - if (scsi_host_eh_past_deadline(sdev->host)) { - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_INFO, scmd, - "eh timeout, not retrying " - "aborted command\n")); - } else if (!scsi_noretry_cmd(scmd) && - scsi_cmd_retry_allowed(scmd) && - scsi_eh_should_retry_cmd(scmd)) { - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_WARNING, scmd, - "retry aborted command\n")); - scsi_eh_complete_abort(scmd, sdev->host); - scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); - return; - } else { - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_WARNING, scmd, - "finish aborted command\n")); - scsi_eh_complete_abort(scmd, sdev->host); - scsi_finish_command(scmd); - return; - } - } else { - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_INFO, scmd, - "cmd abort %s\n", - (rtn == FAST_IO_FAIL) ? - "not send" : "failed")); - } + rtn = scsi_try_to_abort_cmd(shost->hostt, scmd); + if (rtn != SUCCESS) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "cmd abort %s\n", + (rtn == FAST_IO_FAIL) ? + "not send" : "failed")); + goto out; + } + set_host_byte(scmd, DID_TIME_OUT); + if (scsi_host_eh_past_deadline(shost)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "eh timeout, not retrying " + "aborted command\n")); + goto out; } - spin_lock_irqsave(sdev->host->host_lock, flags); + spin_lock_irqsave(shost->host_lock, flags); list_del_init(&scmd->eh_entry); - spin_unlock_irqrestore(sdev->host->host_lock, flags); + + /* + * If the abort succeeds, and there is no further + * EH action, clear the ->last_reset time. + */ + if (list_empty(&shost->eh_abort_list) && + list_empty(&shost->eh_cmd_q)) + if (shost->eh_deadline != -1) + shost->last_reset = 0; + + spin_unlock_irqrestore(shost->host_lock, flags); + + if (!scsi_noretry_cmd(scmd) && + scsi_cmd_retry_allowed(scmd) && + scsi_eh_should_retry_cmd(scmd)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_WARNING, scmd, + "retry aborted command\n")); + scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); + } else { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_WARNING, scmd, + "finish aborted command\n")); + scsi_finish_command(scmd); + } + return; + +out: + spin_lock_irqsave(shost->host_lock, flags); + list_del_init(&scmd->eh_entry); + spin_unlock_irqrestore(shost->host_lock, flags); + scsi_eh_scmd_add(scmd); } @@ -1429,7 +1430,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd) enum scsi_disposition rtn = NEEDS_RETRY; for (i = 0; rtn == NEEDS_RETRY && i < 2; i++) - rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0); + rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, + scmd->device->eh_timeout, 0); if (rtn == SUCCESS) return 0; diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index b5a858c29488..d581613d87c7 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -8,7 +8,6 @@ #include <linux/pm_runtime.h> #include <linux/export.h> -#include <linux/async.h> #include <linux/blk-pm.h> #include <scsi/scsi.h> @@ -181,7 +180,7 @@ static int sdev_runtime_resume(struct device *dev) blk_pre_runtime_resume(sdev->request_queue); if (pm && pm->runtime_resume) err = pm->runtime_resume(dev); - blk_post_runtime_resume(sdev->request_queue, err); + blk_post_runtime_resume(sdev->request_queue); return err; } diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index a278fc8948f4..5c4786310a31 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -3,7 +3,6 @@ #define _SCSI_PRIV_H #include <linux/device.h> -#include <linux/async.h> #include <scsi/scsi_device.h> #include <linux/sbitmap.h> @@ -144,7 +143,7 @@ extern struct scsi_transport_template blank_transport_template; extern void __scsi_remove_device(struct scsi_device *); extern struct bus_type scsi_bus_type; -extern const struct attribute_group scsi_shost_attr_group; +extern const struct attribute_group *scsi_shost_groups[]; /* scsi_netlink.c */ #ifdef CONFIG_SCSI_NETLINK diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 23e1c0acdeae..3520b9384428 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns, #define SCSI_SCAN_TYPE_DEFAULT "sync" #endif -char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT; +static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT; module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO|S_IWUSR); @@ -122,7 +122,7 @@ struct async_scan_data { struct completion prev_finished; }; -/** +/* * scsi_enable_async_suspend - Enable async suspend and resume */ void scsi_enable_async_suspend(struct device *dev) diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index d4edce930a4a..f1e0c131b77c 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -424,10 +424,15 @@ static struct attribute *scsi_sysfs_shost_attrs[] = { NULL }; -const struct attribute_group scsi_shost_attr_group = { +static const struct attribute_group scsi_shost_attr_group = { .attrs = scsi_sysfs_shost_attrs, }; +const struct attribute_group *scsi_shost_groups[] = { + &scsi_shost_attr_group, + NULL +}; + static void scsi_device_cls_release(struct device *class_dev) { struct scsi_device *sdev; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 5ddb8e053a8e..0e73c3f2f381 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -51,7 +51,6 @@ #include <linux/major.h> #include <linux/mutex.h> #include <linux/string_helpers.h> -#include <linux/async.h> #include <linux/slab.h> #include <linux/sed-opal.h> #include <linux/pm_runtime.h> diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 65bfd1e170da..378d071e47cb 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -61,10 +61,10 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf, zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8])); zone.capacity = zone.len; zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16])); - zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24])); - if (zone.type != ZBC_ZONE_TYPE_CONV && - zone.cond == ZBC_ZONE_COND_FULL) + if (zone.cond == ZBC_ZONE_COND_FULL) zone.wp = zone.start + zone.len; + else + zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24])); ret = cb(&zone, idx, data); if (ret) diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index e9ccfb97773f..27e98df83b31 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -100,7 +100,7 @@ snic_queue_report_tgt_req(struct snic *snic) SNIC_BUG_ON(ntgts == 0); buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN; - buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA); + buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) { snic_req_free(snic, rqi); SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n"); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 14c122839c40..f925b1f1f9ad 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -855,7 +855,7 @@ static void get_capabilities(struct scsi_cd *cd) /* allocate transfer buffer */ - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) { sr_printk(KERN_ERR, cd, "out of memory.\n"); return; diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c index 1f988a1b9166..a61635326ae0 100644 --- a/drivers/scsi/sr_vendor.c +++ b/drivers/scsi/sr_vendor.c @@ -131,7 +131,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength) if (cd->vendor == VENDOR_TOSHIBA) density = (blocklength > 2048) ? 0x81 : 0x83; - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -179,7 +179,7 @@ int sr_cd_check(struct cdrom_device_info *cdi) if (cd->cdi.mask & CDC_MULTI_SESSION) return 0; - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 20595c0ba0ae..9a0bba5a51a7 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -21,6 +21,8 @@ #include <linux/device.h> #include <linux/hyperv.h> #include <linux/blkdev.h> +#include <linux/dma-mapping.h> + #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> @@ -1336,6 +1338,7 @@ static void storvsc_on_channel_callback(void *context) continue; } request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd); + scsi_dma_unmap(scmnd); } storvsc_on_receive(stor_device, packet, request); @@ -1749,9 +1752,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) struct hv_host_device *host_dev = shost_priv(host); struct hv_device *dev = host_dev->dev; struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd); - int i; struct scatterlist *sgl; - unsigned int sg_count; struct vmscsi_request *vm_srb; struct vmbus_packet_mpb_array *payload; u32 payload_sz; @@ -1824,17 +1825,17 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); sgl = (struct scatterlist *)scsi_sglist(scmnd); - sg_count = scsi_sg_count(scmnd); length = scsi_bufflen(scmnd); payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb; payload_sz = sizeof(cmd_request->mpb); - if (sg_count) { - unsigned int hvpgoff, hvpfns_to_add; + if (scsi_sg_count(scmnd)) { unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset); unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length); - u64 hvpfn; + struct scatterlist *sg; + unsigned long hvpfn, hvpfns_to_add; + int j, i = 0, sg_count; if (hvpg_count > MAX_PAGE_BUFFER_COUNT) { @@ -1848,21 +1849,24 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) payload->range.len = length; payload->range.offset = offset_in_hvpg; + sg_count = scsi_dma_map(scmnd); + if (sg_count < 0) { + ret = SCSI_MLQUEUE_DEVICE_BUSY; + goto err_free_payload; + } - for (i = 0; sgl != NULL; sgl = sg_next(sgl)) { + for_each_sg(sgl, sg, sg_count, j) { /* - * Init values for the current sgl entry. hvpgoff - * and hvpfns_to_add are in units of Hyper-V size - * pages. Handling the PAGE_SIZE != HV_HYP_PAGE_SIZE - * case also handles values of sgl->offset that are - * larger than PAGE_SIZE. Such offsets are handled - * even on other than the first sgl entry, provided - * they are a multiple of PAGE_SIZE. + * Init values for the current sgl entry. hvpfns_to_add + * is in units of Hyper-V size pages. Handling the + * PAGE_SIZE != HV_HYP_PAGE_SIZE case also handles + * values of sgl->offset that are larger than PAGE_SIZE. + * Such offsets are handled even on other than the first + * sgl entry, provided they are a multiple of PAGE_SIZE. */ - hvpgoff = HVPFN_DOWN(sgl->offset); - hvpfn = page_to_hvpfn(sg_page(sgl)) + hvpgoff; - hvpfns_to_add = HVPFN_UP(sgl->offset + sgl->length) - - hvpgoff; + hvpfn = HVPFN_DOWN(sg_dma_address(sg)); + hvpfns_to_add = HVPFN_UP(sg_dma_address(sg) + + sg_dma_len(sg)) - hvpfn; /* * Fill the next portion of the PFN array with @@ -1872,7 +1876,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) * the PFN array is filled. */ while (hvpfns_to_add--) - payload->range.pfn_array[i++] = hvpfn++; + payload->range.pfn_array[i++] = hvpfn++; } } @@ -1884,13 +1888,18 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) put_cpu(); if (ret == -EAGAIN) { - if (payload_sz > sizeof(cmd_request->mpb)) - kfree(payload); /* no more space */ - return SCSI_MLQUEUE_DEVICE_BUSY; + ret = SCSI_MLQUEUE_DEVICE_BUSY; + goto err_free_payload; } return 0; + +err_free_payload: + if (payload_sz > sizeof(cmd_request->mpb)) + kfree(payload); + + return ret; } static struct scsi_host_template scsi_driver = { @@ -2016,6 +2025,7 @@ static int storvsc_probe(struct hv_device *device, stor_device->vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); spin_lock_init(&stor_device->lock); hv_set_drvdata(device, stor_device); + dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1); stor_device->port_number = host->host_no; ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc); diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index b2521b830be7..9fe27b01904e 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -50,9 +50,11 @@ config SCSI_UFSHCD However, do not compile this as a module if your root file system (the one containing the directory /) is located on a UFS device. +if SCSI_UFSHCD + config SCSI_UFSHCD_PCI tristate "PCI bus based UFS Controller support" - depends on SCSI_UFSHCD && PCI + depends on PCI help This selects the PCI UFS Host Controller Interface. Select this if you have UFS Host Controller with PCI Interface. @@ -71,7 +73,6 @@ config SCSI_UFS_DWC_TC_PCI config SCSI_UFSHCD_PLATFORM tristate "Platform bus based UFS Controller support" - depends on SCSI_UFSHCD depends on HAS_IOMEM help This selects the UFS host controller support. Select this if @@ -147,7 +148,6 @@ config SCSI_UFS_TI_J721E config SCSI_UFS_BSG bool "Universal Flash Storage BSG device node" - depends on SCSI_UFSHCD select BLK_DEV_BSGLIB help Universal Flash Storage (UFS) is SCSI transport specification for @@ -177,7 +177,7 @@ config SCSI_UFS_EXYNOS config SCSI_UFS_CRYPTO bool "UFS Crypto Engine Support" - depends on SCSI_UFSHCD && BLK_INLINE_ENCRYPTION + depends on BLK_INLINE_ENCRYPTION help Enable Crypto Engine Support in UFS. Enabling this makes it possible for the kernel to use the crypto @@ -186,7 +186,6 @@ config SCSI_UFS_CRYPTO config SCSI_UFS_HPB bool "Support UFS Host Performance Booster" - depends on SCSI_UFSHCD help The UFS HPB feature improves random read performance. It caches L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB @@ -195,16 +194,18 @@ config SCSI_UFS_HPB config SCSI_UFS_FAULT_INJECTION bool "UFS Fault Injection Support" - depends on SCSI_UFSHCD && FAULT_INJECTION + depends on FAULT_INJECTION help Enable fault injection support in the UFS driver. This makes it easier to test the UFS error handler and abort handler. config SCSI_UFS_HWMON - bool "UFS Temperature Notification" + bool "UFS Temperature Notification" depends on SCSI_UFSHCD=HWMON || HWMON=y help This provides support for UFS hardware monitoring. If enabled, a hardware monitoring device will be created for the UFS device. If unsure, say N. + +endif diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c index 679289e1a78e..7b08e2e07cc5 100644 --- a/drivers/scsi/ufs/tc-dwc-g210-pci.c +++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c @@ -110,7 +110,6 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - pci_set_drvdata(pdev, hba); pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c index cd26bc82462e..474a4a064a68 100644 --- a/drivers/scsi/ufs/ufs-exynos.c +++ b/drivers/scsi/ufs/ufs-exynos.c @@ -853,14 +853,14 @@ static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba, } static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba, - int tag, bool op) + int tag, bool is_scsi_cmd) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); u32 type; type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE); - if (op) + if (is_scsi_cmd) hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE); else hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE); diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c index 8c7e8d321746..ab1a7ebd89b1 100644 --- a/drivers/scsi/ufs/ufs-hisi.c +++ b/drivers/scsi/ufs/ufs-hisi.c @@ -396,6 +396,12 @@ out: return ret; } +static int ufs_hisi_suspend_prepare(struct device *dev) +{ + /* RPM and SPM are different. Refer ufs_hisi_suspend() */ + return __ufshcd_suspend_prepare(dev, false); +} + static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, enum ufs_notify_change_status status) { @@ -578,7 +584,7 @@ static int ufs_hisi_remove(struct platform_device *pdev) static const struct dev_pm_ops ufs_hisi_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) - .prepare = ufshcd_suspend_prepare, + .prepare = ufs_hisi_suspend_prepare, .complete = ufshcd_resume_complete, }; diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index f725248ba57f..f76692053ca1 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -538,8 +538,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - pci_set_drvdata(pdev, hba); - hba->vops = (struct ufs_hba_variant_ops *)id->driver_data; err = ufshcd_init(hba, mmio_base, pdev->irq); diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index eaeae83b999f..8b16bbbcb806 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -361,8 +361,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, goto dealloc_host; } - platform_set_drvdata(pdev, hba); - pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 13c09dbd99b9..1049e41abd5b 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -128,8 +128,9 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs); enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, - UFSHCD_CMD_PER_LUN = 32, - UFSHCD_CAN_QUEUE = 32, + UFSHCD_NUM_RESERVED = 1, + UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED, + UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED, }; static const char *const ufshcd_state_name[] = { @@ -1069,13 +1070,32 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, return false; } +/* + * Determine the number of pending commands by counting the bits in the SCSI + * device budget maps. This approach has been selected because a bit is set in + * the budget map before scsi_host_queue_ready() checks the host_self_blocked + * flag. The host_self_blocked flag can be modified by calling + * scsi_block_requests() or scsi_unblock_requests(). + */ +static u32 ufshcd_pending_cmds(struct ufs_hba *hba) +{ + struct scsi_device *sdev; + u32 pending = 0; + + lockdep_assert_held(hba->host->host_lock); + __shost_for_each_device(sdev, hba->host) + pending += sbitmap_weight(&sdev->budget_map); + + return pending; +} + static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us) { unsigned long flags; int ret = 0; u32 tm_doorbell; - u32 tr_doorbell; + u32 tr_pending; bool timeout = false, do_last_check = false; ktime_t start; @@ -1093,8 +1113,8 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, } tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); - tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - if (!tm_doorbell && !tr_doorbell) { + tr_pending = ufshcd_pending_cmds(hba); + if (!tm_doorbell && !tr_pending) { timeout = false; break; } else if (do_last_check) { @@ -1114,12 +1134,12 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, do_last_check = true; } spin_lock_irqsave(hba->host->host_lock, flags); - } while (tm_doorbell || tr_doorbell); + } while (tm_doorbell || tr_pending); if (timeout) { dev_err(hba->dev, "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n", - __func__, tm_doorbell, tr_doorbell); + __func__, tm_doorbell, tr_pending); ret = -EBUSY; } out: @@ -1352,25 +1372,6 @@ out: return ret; } -static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved) -{ - int *busy = priv; - - WARN_ON_ONCE(reserved); - (*busy)++; - return false; -} - -/* Whether or not any tag is in use by a request that is in progress. */ -static bool ufshcd_any_tag_in_use(struct ufs_hba *hba) -{ - struct request_queue *q = hba->cmd_queue; - int busy = 0; - - blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy); - return busy; -} - static int ufshcd_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { @@ -1666,7 +1667,8 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) bool flush_result; unsigned long flags; - if (!ufshcd_is_clkgating_allowed(hba)) + if (!ufshcd_is_clkgating_allowed(hba) || + !hba->clk_gating.is_initialized) goto out; spin_lock_irqsave(hba->host->host_lock, flags); hba->clk_gating.active_reqs++; @@ -1769,7 +1771,7 @@ static void ufshcd_gate_work(struct work_struct *work) if (hba->clk_gating.active_reqs || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL - || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks + || hba->outstanding_reqs || hba->outstanding_tasks || hba->active_uic_cmd || hba->uic_async_done) goto rel_lock; @@ -1826,7 +1828,7 @@ static void __ufshcd_release(struct ufs_hba *hba) if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || - hba->outstanding_tasks || + hba->outstanding_tasks || !hba->clk_gating.is_initialized || hba->active_uic_cmd || hba->uic_async_done || hba->clk_gating.state == CLKS_OFF) return; @@ -1961,11 +1963,15 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) { if (!hba->clk_gating.is_initialized) return; + ufshcd_remove_clk_gating_sysfs(hba); - cancel_work_sync(&hba->clk_gating.ungate_work); - cancel_delayed_work_sync(&hba->clk_gating.gate_work); - destroy_workqueue(hba->clk_gating.clk_gating_workq); + + /* Ungate the clock if necessary. */ + ufshcd_hold(hba, false); hba->clk_gating.is_initialized = false; + ufshcd_release(hba); + + destroy_workqueue(hba->clk_gating.clk_gating_workq); } /* Must be called with host lock acquired */ @@ -2189,6 +2195,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; hba->nutmrs = ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; + hba->reserved_slot = hba->nutrs - 1; /* Read crypto capabilities */ err = ufshcd_hba_init_crypto_capabilities(hba); @@ -2650,17 +2657,42 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; } -static inline bool is_rpmb_wlun(struct scsi_device *sdev) -{ - return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN); -} - static inline bool is_device_wlun(struct scsi_device *sdev) { return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN); } +/* + * Associate the UFS controller queue with the default and poll HCTX types. + * Initialize the mq_map[] arrays. + */ +static int ufshcd_map_queues(struct Scsi_Host *shost) +{ + int i, ret; + + for (i = 0; i < shost->nr_maps; i++) { + struct blk_mq_queue_map *map = &shost->tag_set.map[i]; + + switch (i) { + case HCTX_TYPE_DEFAULT: + case HCTX_TYPE_POLL: + map->nr_queues = 1; + break; + case HCTX_TYPE_READ: + map->nr_queues = 0; + break; + default: + WARN_ON_ONCE(true); + } + map->queue_offset = 0; + ret = blk_mq_map_queues(map); + WARN_ON_ONCE(ret); + } + + return 0; +} + static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) { struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr; @@ -2696,10 +2728,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) struct ufshcd_lrb *lrbp; int err = 0; - WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); + WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag); - if (!down_read_trylock(&hba->clk_scaling_lock)) - return SCSI_MLQUEUE_HOST_BUSY; + /* + * Allows the UFS error handler to wait for prior ufshcd_queuecommand() + * calls. + */ + rcu_read_lock(); switch (hba->ufshcd_state) { case UFSHCD_STATE_OPERATIONAL: @@ -2779,8 +2814,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) } ufshcd_send_command(hba, tag); + out: - up_read(&hba->clk_scaling_lock); + rcu_read_unlock(); if (ufs_trigger_eh()) { unsigned long flags; @@ -2936,30 +2972,15 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, int timeout) { - struct request_queue *q = hba->cmd_queue; DECLARE_COMPLETION_ONSTACK(wait); - struct request *req; + const u32 tag = hba->reserved_slot; struct ufshcd_lrb *lrbp; int err; - int tag; - down_read(&hba->clk_scaling_lock); + /* Protects use of hba->reserved_slot. */ + lockdep_assert_held(&hba->dev_cmd.lock); - /* - * Get free slot, sleep if slots are unavailable. - * Even though we use wait_event() which sleeps indefinitely, - * the maximum wait time is bounded by SCSI request timeout. - */ - req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0); - if (IS_ERR(req)) { - err = PTR_ERR(req); - goto out_unlock; - } - tag = req->tag; - WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); - /* Set the timeout such that the SCSI error handler is not activated. */ - req->timeout = msecs_to_jiffies(2 * timeout); - blk_mq_start_request(req); + down_read(&hba->clk_scaling_lock); lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); @@ -2977,8 +2998,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); out: - blk_mq_free_request(req); -out_unlock: up_read(&hba->clk_scaling_lock); return err; } @@ -4960,11 +4979,7 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) */ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) { - struct ufs_hba *hba = shost_priv(sdev->host); - - if (depth > hba->nutrs) - depth = hba->nutrs; - return scsi_change_queue_depth(sdev, depth); + return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue)); } static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev) @@ -5256,6 +5271,18 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) return retval; } +/* Release the resources allocated for processing a SCSI command. */ +static void ufshcd_release_scsi_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + struct scsi_cmnd *cmd = lrbp->cmd; + + scsi_dma_unmap(cmd); + lrbp->cmd = NULL; /* Mark the command as completed. */ + ufshcd_release(hba); + ufshcd_clk_scaling_update_busy(hba); +} + /** * __ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance @@ -5266,9 +5293,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, { struct ufshcd_lrb *lrbp; struct scsi_cmnd *cmd; - int result; int index; - bool update_scaling = false; for_each_set_bit(index, &completed_reqs, hba->nutrs) { lrbp = &hba->lrb[index]; @@ -5278,29 +5303,47 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) ufshcd_update_monitor(hba, lrbp); ufshcd_add_command_trace(hba, index, UFS_CMD_COMP); - result = ufshcd_transfer_rsp_status(hba, lrbp); - scsi_dma_unmap(cmd); - cmd->result = result; - /* Mark completed command as NULL in LRB */ - lrbp->cmd = NULL; + cmd->result = ufshcd_transfer_rsp_status(hba, lrbp); + ufshcd_release_scsi_cmd(hba, lrbp); /* Do not touch lrbp after scsi done */ scsi_done(cmd); - ufshcd_release(hba); - update_scaling = true; } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { if (hba->dev_cmd.complete) { ufshcd_add_command_trace(hba, index, UFS_DEV_COMP); complete(hba->dev_cmd.complete); - update_scaling = true; + ufshcd_clk_scaling_update_busy(hba); } } - if (update_scaling) - ufshcd_clk_scaling_update_busy(hba); } } +/* + * Returns > 0 if one or more commands have been completed or 0 if no + * requests have been completed. + */ +static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + struct ufs_hba *hba = shost_priv(shost); + unsigned long completed_reqs, flags; + u32 tr_doorbell; + + spin_lock_irqsave(&hba->outstanding_lock, flags); + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + completed_reqs = ~tr_doorbell & hba->outstanding_reqs; + WARN_ONCE(completed_reqs & ~hba->outstanding_reqs, + "completed: %#lx; outstanding: %#lx\n", completed_reqs, + hba->outstanding_reqs); + hba->outstanding_reqs &= ~completed_reqs; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (completed_reqs) + __ufshcd_transfer_req_compl(hba, completed_reqs); + + return completed_reqs; +} + /** * ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance @@ -5311,9 +5354,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, */ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) { - unsigned long completed_reqs, flags; - u32 tr_doorbell; - /* Resetting interrupt aggregation counters first and reading the * DOOR_BELL afterward allows us to handle all the completed requests. * In order to prevent other interrupts starvation the DB is read once @@ -5328,21 +5368,13 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) if (ufs_fail_completion()) return IRQ_HANDLED; - spin_lock_irqsave(&hba->outstanding_lock, flags); - tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - completed_reqs = ~tr_doorbell & hba->outstanding_reqs; - WARN_ONCE(completed_reqs & ~hba->outstanding_reqs, - "completed: %#lx; outstanding: %#lx\n", completed_reqs, - hba->outstanding_reqs); - hba->outstanding_reqs &= ~completed_reqs; - spin_unlock_irqrestore(&hba->outstanding_lock, flags); + /* + * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we + * do not want polling to trigger spurious interrupt complaints. + */ + ufshcd_poll(hba->host, 0); - if (completed_reqs) { - __ufshcd_transfer_req_compl(hba, completed_reqs); - return IRQ_HANDLED; - } else { - return IRQ_NONE; - } + return IRQ_HANDLED; } int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask) @@ -5986,8 +6018,7 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) } ufshcd_scsi_block_requests(hba); /* Drain ufshcd_queuecommand() */ - down_write(&hba->clk_scaling_lock); - up_write(&hba->clk_scaling_lock); + synchronize_rcu(); cancel_work_sync(&hba->eeh_work); } @@ -6594,6 +6625,8 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, spin_lock_irqsave(host->host_lock, flags); task_tag = req->tag; + WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n", + task_tag); hba->tmf_rqs[req->tag] = req; treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag); @@ -6711,28 +6744,16 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, enum query_opcode desc_op) { - struct request_queue *q = hba->cmd_queue; DECLARE_COMPLETION_ONSTACK(wait); - struct request *req; + const u32 tag = hba->reserved_slot; struct ufshcd_lrb *lrbp; int err = 0; - int tag; u8 upiu_flags; - down_read(&hba->clk_scaling_lock); - - req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0); - if (IS_ERR(req)) { - err = PTR_ERR(req); - goto out_unlock; - } - tag = req->tag; - WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); + /* Protects use of hba->reserved_slot. */ + lockdep_assert_held(&hba->dev_cmd.lock); - if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { - err = -EBUSY; - goto out; - } + down_read(&hba->clk_scaling_lock); lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); @@ -6801,9 +6822,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); -out: - blk_mq_free_request(req); -out_unlock: up_read(&hba->clk_scaling_lock); return err; } @@ -7033,6 +7051,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) struct ufshcd_lrb *lrbp = &hba->lrb[tag]; unsigned long flags; int err = FAILED; + bool outstanding; u32 reg; WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); @@ -7110,7 +7129,17 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) goto release; } - lrbp->cmd = NULL; + /* + * Clear the corresponding bit from outstanding_reqs since the command + * has been aborted successfully. + */ + spin_lock_irqsave(&hba->outstanding_lock, flags); + outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs); + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (outstanding) + ufshcd_release_scsi_cmd(hba, lrbp); + err = SUCCESS; release: @@ -7412,7 +7441,7 @@ static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev) static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) { int ret = 0; - struct scsi_device *sdev_boot; + struct scsi_device *sdev_boot, *sdev_rpmb; hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); @@ -7423,14 +7452,14 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) } scsi_device_put(hba->sdev_ufs_device); - hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, + sdev_rpmb = __scsi_add_device(hba->host, 0, 0, ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); - if (IS_ERR(hba->sdev_rpmb)) { - ret = PTR_ERR(hba->sdev_rpmb); + if (IS_ERR(sdev_rpmb)) { + ret = PTR_ERR(sdev_rpmb); goto remove_sdev_ufs_device; } - ufshcd_blk_pm_runtime_init(hba->sdev_rpmb); - scsi_device_put(hba->sdev_rpmb); + ufshcd_blk_pm_runtime_init(sdev_rpmb); + scsi_device_put(sdev_rpmb); sdev_boot = __scsi_add_device(hba->host, 0, 0, ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); @@ -8156,7 +8185,9 @@ static struct scsi_host_template ufshcd_driver_template = { .module = THIS_MODULE, .name = UFSHCD, .proc_name = UFSHCD, + .map_queues = ufshcd_map_queues, .queuecommand = ufshcd_queuecommand, + .mq_poll = ufshcd_poll, .slave_alloc = ufshcd_slave_alloc, .slave_configure = ufshcd_slave_configure, .slave_destroy = ufshcd_slave_destroy, @@ -9384,7 +9415,6 @@ void ufshcd_remove(struct ufs_hba *hba) ufs_sysfs_remove_nodes(hba->dev); blk_cleanup_queue(hba->tmf_queue); blk_mq_free_tag_set(&hba->tmf_tag_set); - blk_cleanup_queue(hba->cmd_queue); scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); @@ -9445,6 +9475,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) err = -ENOMEM; goto out_error; } + host->nr_maps = HCTX_TYPE_POLL + 1; hba = shost_priv(host); hba->host = host; hba->dev = dev; @@ -9486,6 +9517,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) struct device *dev = hba->dev; char eh_wq_name[sizeof("ufs_eh_wq_00")]; + /* + * dev_set_drvdata() must be called before any callbacks are registered + * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon, + * sysfs). + */ + dev_set_drvdata(dev, hba); + if (!mmio_base) { dev_err(hba->dev, "Invalid memory reference for mmio_base is NULL\n"); @@ -9528,8 +9566,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Configure LRB */ ufshcd_host_memory_configure(hba); - host->can_queue = hba->nutrs; - host->cmd_per_lun = hba->nutrs; + host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; + host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED; host->max_id = UFSHCD_MAX_ID; host->max_lun = UFS_MAX_LUNS; host->max_channel = UFSHCD_MAX_CHANNEL; @@ -9597,12 +9635,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto out_disable; } - hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set); - if (IS_ERR(hba->cmd_queue)) { - err = PTR_ERR(hba->cmd_queue); - goto out_remove_scsi_host; - } - hba->tmf_tag_set = (struct blk_mq_tag_set) { .nr_hw_queues = 1, .queue_depth = hba->nutmrs, @@ -9611,7 +9643,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) }; err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); if (err < 0) - goto free_cmd_queue; + goto out_remove_scsi_host; hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); if (IS_ERR(hba->tmf_queue)) { err = PTR_ERR(hba->tmf_queue); @@ -9680,8 +9712,6 @@ free_tmf_queue: blk_cleanup_queue(hba->tmf_queue); free_tmf_tag_set: blk_mq_free_tag_set(&hba->tmf_tag_set); -free_cmd_queue: - blk_cleanup_queue(hba->cmd_queue); out_remove_scsi_host: scsi_remove_host(hba->host); out_disable: @@ -9703,7 +9733,27 @@ void ufshcd_resume_complete(struct device *dev) } EXPORT_SYMBOL_GPL(ufshcd_resume_complete); -int ufshcd_suspend_prepare(struct device *dev) +static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba) +{ + struct device *dev = &hba->sdev_ufs_device->sdev_gendev; + enum ufs_dev_pwr_mode dev_pwr_mode; + enum uic_link_state link_state; + unsigned long flags; + bool res; + + spin_lock_irqsave(&dev->power.lock, flags); + dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl); + link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl); + res = pm_runtime_suspended(dev) && + hba->curr_dev_pwr_mode == dev_pwr_mode && + hba->uic_link_state == link_state && + !hba->dev_info.b_rpm_dev_flush_capable; + spin_unlock_irqrestore(&dev->power.lock, flags); + + return res; +} + +int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm) { struct ufs_hba *hba = dev_get_drvdata(dev); int ret; @@ -9715,15 +9765,30 @@ int ufshcd_suspend_prepare(struct device *dev) * Refer ufshcd_resume_complete() */ if (hba->sdev_ufs_device) { - ret = ufshcd_rpm_get_sync(hba); - if (ret < 0 && ret != -EACCES) { - ufshcd_rpm_put(hba); - return ret; + /* Prevent runtime suspend */ + ufshcd_rpm_get_noresume(hba); + /* + * Check if already runtime suspended in same state as system + * suspend would be. + */ + if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) { + /* RPM state is not ok for SPM, so runtime resume */ + ret = ufshcd_rpm_resume(hba); + if (ret < 0 && ret != -EACCES) { + ufshcd_rpm_put(hba); + return ret; + } } hba->complete_put = true; } return 0; } +EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare); + +int ufshcd_suspend_prepare(struct device *dev) +{ + return __ufshcd_suspend_prepare(dev, true); +} EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare); #ifdef CONFIG_PM_SLEEP diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 54750d72c8fb..88c20f3608c2 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -338,7 +338,8 @@ struct ufs_hba_variant_ops { enum ufs_notify_change_status status, struct ufs_pa_layer_attr *, struct ufs_pa_layer_attr *); - void (*setup_xfer_req)(struct ufs_hba *, int, bool); + void (*setup_xfer_req)(struct ufs_hba *hba, int tag, + bool is_scsi_cmd); void (*setup_task_mgmt)(struct ufs_hba *, int, u8); void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, enum ufs_notify_change_status); @@ -737,13 +738,13 @@ struct ufs_hba_monitor { * @host: Scsi_Host instance of the driver * @dev: device handle * @lrb: local reference block - * @cmd_queue: Used to allocate command tags from hba->host->tag_set. * @outstanding_tasks: Bits representing outstanding task requests * @outstanding_lock: Protects @outstanding_reqs. * @outstanding_reqs: Bits representing outstanding transfer requests * @capabilities: UFS Controller Capabilities * @nutrs: Transfer Request Queue depth supported by controller * @nutmrs: Task Management Queue depth supported by controller + * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock. * @ufs_version: UFS Version to which controller complies * @vops: pointer to variant specific operations * @priv: pointer to variant specific private data @@ -777,6 +778,7 @@ struct ufs_hba_monitor { * @clk_list_head: UFS host controller clocks list node head * @pwr_info: holds current power mode * @max_pwr_info: keeps the device max valid pwm + * @clk_scaling_lock: used to serialize device commands and clock scaling * @desc_size: descriptor sizes reported by device * @urgent_bkops_lvl: keeps track of urgent bkops level for device * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for @@ -802,13 +804,11 @@ struct ufs_hba { struct Scsi_Host *host; struct device *dev; - struct request_queue *cmd_queue; /* * This field is to keep a reference to "scsi_device" corresponding to * "UFS device" W-LU. */ struct scsi_device *sdev_ufs_device; - struct scsi_device *sdev_rpmb; #ifdef CONFIG_SCSI_UFS_HWMON struct device *hwmon_device; @@ -836,6 +836,7 @@ struct ufs_hba { u32 capabilities; int nutrs; int nutmrs; + u32 reserved_slot; u32 ufs_version; const struct ufs_hba_variant_ops *vops; struct ufs_hba_variant_params *vps; @@ -1211,6 +1212,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable); int ufshcd_suspend_prepare(struct device *dev); +int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm); void ufshcd_resume_complete(struct device *dev); /* Wrapper functions for safely calling variant operations */ @@ -1420,6 +1422,16 @@ static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba) return pm_runtime_put_sync(&hba->sdev_ufs_device->sdev_gendev); } +static inline void ufshcd_rpm_get_noresume(struct ufs_hba *hba) +{ + pm_runtime_get_noresume(&hba->sdev_ufs_device->sdev_gendev); +} + +static inline int ufshcd_rpm_resume(struct ufs_hba *hba) +{ + return pm_runtime_resume(&hba->sdev_ufs_device->sdev_gendev); +} + static inline int ufshcd_rpm_put(struct ufs_hba *hba) { return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev); diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 13cd21204bf9..2d36a0715fca 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -10,7 +10,6 @@ */ #include <asm/unaligned.h> -#include <linux/async.h> #include "ufshcd.h" #include "ufshpb.h" diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 65c642b24ecf..0e6110da69e7 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -778,7 +778,7 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, static void virtscsi_remove_vqs(struct virtio_device *vdev) { /* Stop all the virtqueues. */ - vdev->config->reset(vdev); + virtio_reset_device(vdev); vdev->config->del_vqs(vdev); } diff --git a/drivers/soc/canaan/Kconfig b/drivers/soc/canaan/Kconfig index 853096b7e84c..2527cf5757ec 100644 --- a/drivers/soc/canaan/Kconfig +++ b/drivers/soc/canaan/Kconfig @@ -5,7 +5,6 @@ config SOC_K210_SYSCTL depends on RISCV && SOC_CANAAN && OF default SOC_CANAAN select PM - select SYSCON select MFD_SYSCON help Canaan Kendryte K210 SoC system controller driver. diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c index dd948889eeab..5a2edc48dd79 100644 --- a/drivers/soc/fsl/dpio/dpio-driver.c +++ b/drivers/soc/fsl/dpio/dpio-driver.c @@ -88,7 +88,7 @@ static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev) irq = dpio_dev->irqs[0]; /* clear the affinity hint */ - irq_set_affinity_hint(irq->msi_desc->irq, NULL); + irq_set_affinity_hint(irq->virq, NULL); } static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu) @@ -98,7 +98,7 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu) irq = dpio_dev->irqs[0]; error = devm_request_irq(&dpio_dev->dev, - irq->msi_desc->irq, + irq->virq, dpio_irq_handler, 0, dev_name(&dpio_dev->dev), @@ -111,10 +111,10 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu) } /* set the affinity hint */ - if (irq_set_affinity_hint(irq->msi_desc->irq, cpumask_of(cpu))) + if (irq_set_affinity_hint(irq->virq, cpumask_of(cpu))) dev_err(&dpio_dev->dev, "irq_set_affinity failed irq %d cpu %d\n", - irq->msi_desc->irq, cpu); + irq->virq, cpu); return 0; } diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c index ca75b14931ec..670cc82d17dc 100644 --- a/drivers/soc/mediatek/mtk-scpsys.c +++ b/drivers/soc/mediatek/mtk-scpsys.c @@ -411,12 +411,17 @@ out: return ret; } -static void init_clks(struct platform_device *pdev, struct clk **clk) +static int init_clks(struct platform_device *pdev, struct clk **clk) { int i; - for (i = CLK_NONE + 1; i < CLK_MAX; i++) + for (i = CLK_NONE + 1; i < CLK_MAX; i++) { clk[i] = devm_clk_get(&pdev->dev, clk_names[i]); + if (IS_ERR(clk[i])) + return PTR_ERR(clk[i]); + } + + return 0; } static struct scp *init_scp(struct platform_device *pdev, @@ -426,7 +431,7 @@ static struct scp *init_scp(struct platform_device *pdev, { struct genpd_onecell_data *pd_data; struct resource *res; - int i, j; + int i, j, ret; struct scp *scp; struct clk *clk[CLK_MAX]; @@ -481,7 +486,9 @@ static struct scp *init_scp(struct platform_device *pdev, pd_data->num_domains = num; - init_clks(pdev, clk); + ret = init_clks(pdev, clk); + if (ret) + return ERR_PTR(ret); for (i = 0; i < num; i++) { struct scp_domain *scpd = &scp->domains[i]; diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c index 312ba0f98ad7..56be39161489 100644 --- a/drivers/soc/ti/k3-ringacc.c +++ b/drivers/soc/ti/k3-ringacc.c @@ -647,7 +647,7 @@ int k3_ringacc_get_ring_irq_num(struct k3_ring *ring) if (!ring) return -EINVAL; - irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id); + irq_num = msi_get_virq(ring->parent->dev, ring->ring_id); if (irq_num <= 0) irq_num = -EINVAL; return irq_num; @@ -1356,9 +1356,9 @@ static int k3_ringacc_init(struct platform_device *pdev, struct resource *res; int ret, i; - dev->msi_domain = of_msi_get_domain(dev, dev->of_node, + dev->msi.domain = of_msi_get_domain(dev, dev->of_node, DOMAIN_BUS_TI_SCI_INTA_MSI); - if (!dev->msi_domain) { + if (!dev->msi.domain) { dev_err(dev, "Failed to get MSI domain\n"); return -EPROBE_DEFER; } diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c index a1d9c027022a..991c78b34745 100644 --- a/drivers/soc/ti/ti_sci_inta_msi.c +++ b/drivers/soc/ti/ti_sci_inta_msi.c @@ -51,6 +51,7 @@ struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnod struct irq_domain *domain; ti_sci_inta_msi_update_chip_ops(info); + info->flags |= MSI_FLAG_FREE_MSI_DESCS; domain = msi_create_irq_domain(fwnode, info, parent); if (domain) @@ -60,50 +61,32 @@ struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnod } EXPORT_SYMBOL_GPL(ti_sci_inta_msi_create_irq_domain); -static void ti_sci_inta_msi_free_descs(struct device *dev) -{ - struct msi_desc *desc, *tmp; - - list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { - list_del(&desc->list); - free_msi_entry(desc); - } -} - static int ti_sci_inta_msi_alloc_descs(struct device *dev, struct ti_sci_resource *res) { - struct msi_desc *msi_desc; + struct msi_desc msi_desc; int set, i, count = 0; + memset(&msi_desc, 0, sizeof(msi_desc)); + msi_desc.nvec_used = 1; + for (set = 0; set < res->sets; set++) { - for (i = 0; i < res->desc[set].num; i++) { - msi_desc = alloc_msi_entry(dev, 1, NULL); - if (!msi_desc) { - ti_sci_inta_msi_free_descs(dev); - return -ENOMEM; - } - - msi_desc->inta.dev_index = res->desc[set].start + i; - INIT_LIST_HEAD(&msi_desc->list); - list_add_tail(&msi_desc->list, dev_to_msi_list(dev)); - count++; + for (i = 0; i < res->desc[set].num; i++, count++) { + msi_desc.msi_index = res->desc[set].start + i; + if (msi_add_msi_desc(dev, &msi_desc)) + goto fail; } - for (i = 0; i < res->desc[set].num_sec; i++) { - msi_desc = alloc_msi_entry(dev, 1, NULL); - if (!msi_desc) { - ti_sci_inta_msi_free_descs(dev); - return -ENOMEM; - } - - msi_desc->inta.dev_index = res->desc[set].start_sec + i; - INIT_LIST_HEAD(&msi_desc->list); - list_add_tail(&msi_desc->list, dev_to_msi_list(dev)); - count++; + + for (i = 0; i < res->desc[set].num_sec; i++, count++) { + msi_desc.msi_index = res->desc[set].start_sec + i; + if (msi_add_msi_desc(dev, &msi_desc)) + goto fail; } } - return count; +fail: + msi_free_msi_descs(dev); + return -ENOMEM; } int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev, @@ -120,39 +103,22 @@ int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev, if (pdev->id < 0) return -ENODEV; - nvec = ti_sci_inta_msi_alloc_descs(dev, res); - if (nvec <= 0) - return nvec; + ret = msi_setup_device_data(dev); + if (ret) + return ret; - ret = msi_domain_alloc_irqs(msi_domain, dev, nvec); - if (ret) { - dev_err(dev, "Failed to allocate IRQs %d\n", ret); - goto cleanup; + msi_lock_descs(dev); + nvec = ti_sci_inta_msi_alloc_descs(dev, res); + if (nvec <= 0) { + ret = nvec; + goto unlock; } - return 0; - -cleanup: - ti_sci_inta_msi_free_descs(&pdev->dev); + ret = msi_domain_alloc_irqs_descs_locked(msi_domain, dev, nvec); + if (ret) + dev_err(dev, "Failed to allocate IRQs %d\n", ret); +unlock: + msi_unlock_descs(dev); return ret; } EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs); - -void ti_sci_inta_msi_domain_free_irqs(struct device *dev) -{ - msi_domain_free_irqs(dev->msi_domain, dev); - ti_sci_inta_msi_free_descs(dev); -} -EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_free_irqs); - -unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 dev_index) -{ - struct msi_desc *desc; - - for_each_msi_entry(desc, dev) - if (desc->inta.dev_index == dev_index) - return desc->irq; - - return -ENODEV; -} -EXPORT_SYMBOL_GPL(ti_sci_inta_msi_get_virq); diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig index 53af9115dc31..8a755a5c8836 100644 --- a/drivers/soc/xilinx/Kconfig +++ b/drivers/soc/xilinx/Kconfig @@ -25,4 +25,14 @@ config ZYNQMP_PM_DOMAINS Say yes to enable device power management through PM domains If in doubt, say N. +config XLNX_EVENT_MANAGER + bool "Enable Xilinx Event Management Driver" + depends on ZYNQMP_FIRMWARE + default ZYNQMP_FIRMWARE + help + Say yes to enable event management support for Xilinx. + This driver uses firmware driver as an interface for event/power + management request to firmware. + + If in doubt, say N. endmenu diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile index 9854e6f6086b..41e585bc9c67 100644 --- a/drivers/soc/xilinx/Makefile +++ b/drivers/soc/xilinx/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o +obj-$(CONFIG_XLNX_EVENT_MANAGER) += xlnx_event_manager.o diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c new file mode 100644 index 000000000000..b27f8853508e --- /dev/null +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -0,0 +1,600 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Xilinx Event Management Driver + * + * Copyright (C) 2021 Xilinx, Inc. + * + * Abhyuday Godhasara <abhyuday.godhasara@xilinx.com> + */ + +#include <linux/cpuhotplug.h> +#include <linux/firmware/xlnx-event-manager.h> +#include <linux/firmware/xlnx-zynqmp.h> +#include <linux/hashtable.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1); + +static int virq_sgi; +static int event_manager_availability = -EACCES; + +/* SGI number used for Event management driver */ +#define XLNX_EVENT_SGI_NUM (15) + +/* Max number of driver can register for same event */ +#define MAX_DRIVER_PER_EVENT (10U) + +/* Max HashMap Order for PM API feature check (1<<7 = 128) */ +#define REGISTERED_DRIVER_MAX_ORDER (7) + +#define MAX_BITS (32U) /* Number of bits available for error mask */ + +#define FIRMWARE_VERSION_MASK (0xFFFFU) +#define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U) + +static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER); +static int sgi_num = XLNX_EVENT_SGI_NUM; + +/** + * struct registered_event_data - Registered Event Data. + * @key: key is the combine id(Node-Id | Event-Id) of type u64 + * where upper u32 for Node-Id and lower u32 for Event-Id, + * And this used as key to index into hashmap. + * @agent_data: Data passed back to handler function. + * @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc. + * @eve_cb: Function pointer to store the callback function. + * @wake: If this flag set, firmware will wakeup processor if is + * in sleep or power down state. + * @hentry: hlist_node that hooks this entry into hashtable. + */ +struct registered_event_data { + u64 key; + enum pm_api_cb_id cb_type; + void *agent_data; + + event_cb_func_t eve_cb; + bool wake; + struct hlist_node hentry; +}; + +static bool xlnx_is_error_event(const u32 node_id) +{ + if (node_id == EVENT_ERROR_PMC_ERR1 || + node_id == EVENT_ERROR_PMC_ERR2 || + node_id == EVENT_ERROR_PSM_ERR1 || + node_id == EVENT_ERROR_PSM_ERR2) + return true; + + return false; +} + +static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake, + event_cb_func_t cb_fun, void *data) +{ + u64 key = 0; + struct registered_event_data *eve_data; + + key = ((u64)node_id << 32U) | (u64)event; + /* Check for existing entry in hash table for given key id */ + hash_for_each_possible(reg_driver_map, eve_data, hentry, key) { + if (eve_data->key == key) { + pr_err("Found as already registered\n"); + return -EINVAL; + } + } + + /* Add new entry if not present */ + eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL); + if (!eve_data) + return -ENOMEM; + + eve_data->key = key; + eve_data->cb_type = PM_NOTIFY_CB; + eve_data->eve_cb = cb_fun; + eve_data->wake = wake; + eve_data->agent_data = data; + + hash_add(reg_driver_map, &eve_data->hentry, key); + + return 0; +} + +static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data) +{ + struct registered_event_data *eve_data; + + /* Check for existing entry in hash table for given cb_type */ + hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) { + if (eve_data->cb_type == PM_INIT_SUSPEND_CB) { + pr_err("Found as already registered\n"); + return -EINVAL; + } + } + + /* Add new entry if not present */ + eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL); + if (!eve_data) + return -ENOMEM; + + eve_data->key = 0; + eve_data->cb_type = PM_INIT_SUSPEND_CB; + eve_data->eve_cb = cb_fun; + eve_data->agent_data = data; + + hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB); + + return 0; +} + +static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun) +{ + bool is_callback_found = false; + struct registered_event_data *eve_data; + + /* Check for existing entry in hash table for given cb_type */ + hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) { + if (eve_data->cb_type == PM_INIT_SUSPEND_CB && + eve_data->eve_cb == cb_fun) { + is_callback_found = true; + /* remove an object from a hashtable */ + hash_del(&eve_data->hentry); + kfree(eve_data); + } + } + if (!is_callback_found) { + pr_warn("Didn't find any registered callback for suspend event\n"); + return -EINVAL; + } + + return 0; +} + +static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event, + event_cb_func_t cb_fun) +{ + bool is_callback_found = false; + struct registered_event_data *eve_data; + u64 key = ((u64)node_id << 32U) | (u64)event; + + /* Check for existing entry in hash table for given key id */ + hash_for_each_possible(reg_driver_map, eve_data, hentry, key) { + if (eve_data->key == key && + eve_data->eve_cb == cb_fun) { + is_callback_found = true; + /* remove an object from a hashtable */ + hash_del(&eve_data->hentry); + kfree(eve_data); + } + } + if (!is_callback_found) { + pr_warn("Didn't find any registered callback for 0x%x 0x%x\n", + node_id, event); + return -EINVAL; + } + + return 0; +} + +/** + * xlnx_register_event() - Register for the event. + * @cb_type: Type of callback from pm_api_cb_id, + * PM_NOTIFY_CB - for Error Events, + * PM_INIT_SUSPEND_CB - for suspend callback. + * @node_id: Node-Id related to event. + * @event: Event Mask for the Error Event. + * @wake: Flag specifying whether the subsystem should be woken upon + * event notification. + * @cb_fun: Function pointer to store the callback function. + * @data: Pointer for the driver instance. + * + * Return: Returns 0 on successful registration else error code. + */ +int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event, + const bool wake, event_cb_func_t cb_fun, void *data) +{ + int ret = 0; + u32 eve; + int pos; + + if (event_manager_availability) + return event_manager_availability; + + if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) { + pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type); + return -EINVAL; + } + + if (!cb_fun) + return -EFAULT; + + if (cb_type == PM_INIT_SUSPEND_CB) { + ret = xlnx_add_cb_for_suspend(cb_fun, data); + } else { + if (!xlnx_is_error_event(node_id)) { + /* Add entry for Node-Id/Event in hash table */ + ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data); + } else { + /* Add into Hash table */ + for (pos = 0; pos < MAX_BITS; pos++) { + eve = event & (1 << pos); + if (!eve) + continue; + + /* Add entry for Node-Id/Eve in hash table */ + ret = xlnx_add_cb_for_notify_event(node_id, eve, wake, cb_fun, + data); + /* Break the loop if got error */ + if (ret) + break; + } + if (ret) { + /* Skip the Event for which got the error */ + pos--; + /* Remove registered(during this call) event from hash table */ + for ( ; pos >= 0; pos--) { + eve = event & (1 << pos); + if (!eve) + continue; + xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun); + } + } + } + + if (ret) { + pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id, + event, ret); + return ret; + } + + /* Register for Node-Id/Event combination in firmware */ + ret = zynqmp_pm_register_notifier(node_id, event, wake, true); + if (ret) { + pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id, + event, ret); + /* Remove already registered event from hash table */ + if (xlnx_is_error_event(node_id)) { + for (pos = 0; pos < MAX_BITS; pos++) { + eve = event & (1 << pos); + if (!eve) + continue; + xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun); + } + } else { + xlnx_remove_cb_for_notify_event(node_id, event, cb_fun); + } + return ret; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(xlnx_register_event); + +/** + * xlnx_unregister_event() - Unregister for the event. + * @cb_type: Type of callback from pm_api_cb_id, + * PM_NOTIFY_CB - for Error Events, + * PM_INIT_SUSPEND_CB - for suspend callback. + * @node_id: Node-Id related to event. + * @event: Event Mask for the Error Event. + * @cb_fun: Function pointer of callback function. + * + * Return: Returns 0 on successful unregistration else error code. + */ +int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event, + event_cb_func_t cb_fun) +{ + int ret; + u32 eve, pos; + + if (event_manager_availability) + return event_manager_availability; + + if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) { + pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type); + return -EINVAL; + } + + if (!cb_fun) + return -EFAULT; + + if (cb_type == PM_INIT_SUSPEND_CB) { + ret = xlnx_remove_cb_for_suspend(cb_fun); + } else { + /* Remove Node-Id/Event from hash table */ + if (!xlnx_is_error_event(node_id)) { + xlnx_remove_cb_for_notify_event(node_id, event, cb_fun); + } else { + for (pos = 0; pos < MAX_BITS; pos++) { + eve = event & (1 << pos); + if (!eve) + continue; + + xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun); + } + } + + /* Un-register for Node-Id/Event combination */ + ret = zynqmp_pm_register_notifier(node_id, event, false, false); + if (ret) { + pr_err("%s() failed for 0x%x and 0x%x: %d\n", + __func__, node_id, event, ret); + return ret; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(xlnx_unregister_event); + +static void xlnx_call_suspend_cb_handler(const u32 *payload) +{ + bool is_callback_found = false; + struct registered_event_data *eve_data; + u32 cb_type = payload[0]; + + /* Check for existing entry in hash table for given cb_type */ + hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) { + if (eve_data->cb_type == cb_type) { + eve_data->eve_cb(&payload[0], eve_data->agent_data); + is_callback_found = true; + } + } + if (!is_callback_found) + pr_warn("Didn't find any registered callback for suspend event\n"); +} + +static void xlnx_call_notify_cb_handler(const u32 *payload) +{ + bool is_callback_found = false; + struct registered_event_data *eve_data; + u64 key = ((u64)payload[1] << 32U) | (u64)payload[2]; + int ret; + + /* Check for existing entry in hash table for given key id */ + hash_for_each_possible(reg_driver_map, eve_data, hentry, key) { + if (eve_data->key == key) { + eve_data->eve_cb(&payload[0], eve_data->agent_data); + is_callback_found = true; + + /* re register with firmware to get future events */ + ret = zynqmp_pm_register_notifier(payload[1], payload[2], + eve_data->wake, true); + if (ret) { + pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, + payload[1], payload[2], ret); + /* Remove already registered event from hash table */ + xlnx_remove_cb_for_notify_event(payload[1], payload[2], + eve_data->eve_cb); + } + } + } + if (!is_callback_found) + pr_warn("Didn't find any registered callback for 0x%x 0x%x\n", + payload[1], payload[2]); +} + +static void xlnx_get_event_callback_data(u32 *buf) +{ + zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf); +} + +static irqreturn_t xlnx_event_handler(int irq, void *dev_id) +{ + u32 cb_type, node_id, event, pos; + u32 payload[CB_MAX_PAYLOAD_SIZE] = {0}; + u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0}; + + /* Get event data */ + xlnx_get_event_callback_data(payload); + + /* First element is callback type, others are callback arguments */ + cb_type = payload[0]; + + if (cb_type == PM_NOTIFY_CB) { + node_id = payload[1]; + event = payload[2]; + if (!xlnx_is_error_event(node_id)) { + xlnx_call_notify_cb_handler(payload); + } else { + /* + * Each call back function expecting payload as an input arguments. + * We can get multiple error events as in one call back through error + * mask. So payload[2] may can contain multiple error events. + * In reg_driver_map database we store data in the combination of single + * node_id-error combination. + * So coping the payload message into event_data and update the + * event_data[2] with Error Mask for single error event and use + * event_data as input argument for registered call back function. + * + */ + memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE)); + /* Support Multiple Error Event */ + for (pos = 0; pos < MAX_BITS; pos++) { + if ((0 == (event & (1 << pos)))) + continue; + event_data[2] = (event & (1 << pos)); + xlnx_call_notify_cb_handler(event_data); + } + } + } else if (cb_type == PM_INIT_SUSPEND_CB) { + xlnx_call_suspend_cb_handler(payload); + } else { + pr_err("%s() Unsupported Callback %d\n", __func__, cb_type); + } + + return IRQ_HANDLED; +} + +static int xlnx_event_cpuhp_start(unsigned int cpu) +{ + enable_percpu_irq(virq_sgi, IRQ_TYPE_NONE); + + return 0; +} + +static int xlnx_event_cpuhp_down(unsigned int cpu) +{ + disable_percpu_irq(virq_sgi); + + return 0; +} + +static void xlnx_disable_percpu_irq(void *data) +{ + disable_percpu_irq(virq_sgi); +} + +static int xlnx_event_init_sgi(struct platform_device *pdev) +{ + int ret = 0; + int cpu = smp_processor_id(); + /* + * IRQ related structures are used for the following: + * for each SGI interrupt ensure its mapped by GIC IRQ domain + * and that each corresponding linux IRQ for the HW IRQ has + * a handler for when receiving an interrupt from the remote + * processor. + */ + struct irq_domain *domain; + struct irq_fwspec sgi_fwspec; + struct device_node *interrupt_parent = NULL; + struct device *parent = pdev->dev.parent; + + /* Find GIC controller to map SGIs. */ + interrupt_parent = of_irq_find_parent(parent->of_node); + if (!interrupt_parent) { + dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n"); + return -EINVAL; + } + + /* Each SGI needs to be associated with GIC's IRQ domain. */ + domain = irq_find_host(interrupt_parent); + of_node_put(interrupt_parent); + + /* Each mapping needs GIC domain when finding IRQ mapping. */ + sgi_fwspec.fwnode = domain->fwnode; + + /* + * When irq domain looks at mapping each arg is as follows: + * 3 args for: interrupt type (SGI), interrupt # (set later), type + */ + sgi_fwspec.param_count = 1; + + /* Set SGI's hwirq */ + sgi_fwspec.param[0] = sgi_num; + virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec); + + per_cpu(cpu_number1, cpu) = cpu; + ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt", + &cpu_number1); + WARN_ON(ret); + if (ret) { + irq_dispose_mapping(virq_sgi); + return ret; + } + + irq_to_desc(virq_sgi); + irq_set_status_flags(virq_sgi, IRQ_PER_CPU); + + return ret; +} + +static void xlnx_event_cleanup_sgi(struct platform_device *pdev) +{ + int cpu = smp_processor_id(); + + per_cpu(cpu_number1, cpu) = cpu; + + cpuhp_remove_state(CPUHP_AP_ONLINE_DYN); + + on_each_cpu(xlnx_disable_percpu_irq, NULL, 1); + + irq_clear_status_flags(virq_sgi, IRQ_PER_CPU); + free_percpu_irq(virq_sgi, &cpu_number1); + irq_dispose_mapping(virq_sgi); +} + +static int xlnx_event_manager_probe(struct platform_device *pdev) +{ + int ret; + + ret = zynqmp_pm_feature(PM_REGISTER_NOTIFIER); + if (ret < 0) { + dev_err(&pdev->dev, "Feature check failed with %d\n", ret); + return ret; + } + + if ((ret & FIRMWARE_VERSION_MASK) < + REGISTER_NOTIFIER_FIRMWARE_VERSION) { + dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n", + REGISTER_NOTIFIER_FIRMWARE_VERSION, + ret & FIRMWARE_VERSION_MASK); + return -EOPNOTSUPP; + } + + /* Initialize the SGI */ + ret = xlnx_event_init_sgi(pdev); + if (ret) { + dev_err(&pdev->dev, "SGI Init has been failed with %d\n", ret); + return ret; + } + + /* Setup function for the CPU hot-plug cases */ + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting", + xlnx_event_cpuhp_start, xlnx_event_cpuhp_down); + + ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num, + 0, NULL); + if (ret) { + dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret); + xlnx_event_cleanup_sgi(pdev); + return ret; + } + + event_manager_availability = 0; + + dev_info(&pdev->dev, "SGI %d Registered over TF-A\n", sgi_num); + dev_info(&pdev->dev, "Xilinx Event Management driver probed\n"); + + return ret; +} + +static int xlnx_event_manager_remove(struct platform_device *pdev) +{ + int i; + struct registered_event_data *eve_data; + struct hlist_node *tmp; + int ret; + + hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) { + hash_del(&eve_data->hentry); + kfree(eve_data); + } + + ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, 0, 1, NULL); + if (ret) + dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret); + + xlnx_event_cleanup_sgi(pdev); + + event_manager_availability = -EACCES; + + return ret; +} + +static struct platform_driver xlnx_event_manager_driver = { + .probe = xlnx_event_manager_probe, + .remove = xlnx_event_manager_remove, + .driver = { + .name = "xlnx_event_manager", + }, +}; +module_param(sgi_num, uint, 0); +module_platform_driver(xlnx_event_manager_driver); diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c index f8c301984d4f..859dd31b6eff 100644 --- a/drivers/soc/xilinx/zynqmp_power.c +++ b/drivers/soc/xilinx/zynqmp_power.c @@ -16,6 +16,7 @@ #include <linux/suspend.h> #include <linux/firmware/xlnx-zynqmp.h> +#include <linux/firmware/xlnx-event-manager.h> #include <linux/mailbox/zynqmp-ipi-message.h> /** @@ -30,6 +31,7 @@ struct zynqmp_pm_work_struct { static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work; static struct mbox_chan *rx_chan; +static bool event_registered; enum pm_suspend_mode { PM_SUSPEND_MODE_FIRST = 0, @@ -46,17 +48,24 @@ static const char *const suspend_modes[] = { static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD; -enum pm_api_cb_id { - PM_INIT_SUSPEND_CB = 30, - PM_ACKNOWLEDGE_CB, - PM_NOTIFY_CB, -}; - static void zynqmp_pm_get_callback_data(u32 *buf) { zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf); } +static void suspend_event_callback(const u32 *payload, void *data) +{ + /* First element is callback API ID, others are callback arguments */ + if (work_pending(&zynqmp_pm_init_suspend_work->callback_work)) + return; + + /* Copy callback arguments into work's structure */ + memcpy(zynqmp_pm_init_suspend_work->args, &payload[1], + sizeof(zynqmp_pm_init_suspend_work->args)); + + queue_work(system_unbound_wq, &zynqmp_pm_init_suspend_work->callback_work); +} + static irqreturn_t zynqmp_pm_isr(int irq, void *data) { u32 payload[CB_PAYLOAD_SIZE]; @@ -184,7 +193,32 @@ static int zynqmp_pm_probe(struct platform_device *pdev) if (pm_api_version < ZYNQMP_PM_VERSION) return -ENODEV; - if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) { + /* + * First try to use Xilinx Event Manager by registering suspend_event_callback + * for suspend/shutdown event. + * If xlnx_register_event() returns -EACCES (Xilinx Event Manager + * is not available to use) or -ENODEV(Xilinx Event Manager not compiled), + * then use ipi-mailbox or interrupt method. + */ + ret = xlnx_register_event(PM_INIT_SUSPEND_CB, 0, 0, false, + suspend_event_callback, NULL); + if (!ret) { + zynqmp_pm_init_suspend_work = devm_kzalloc(&pdev->dev, + sizeof(struct zynqmp_pm_work_struct), + GFP_KERNEL); + if (!zynqmp_pm_init_suspend_work) { + xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, + suspend_event_callback); + return -ENOMEM; + } + event_registered = true; + + INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work, + zynqmp_pm_init_suspend_work_fn); + } else if (ret != -EACCES && ret != -ENODEV) { + dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n", ret); + return ret; + } else if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) { zynqmp_pm_init_suspend_work = devm_kzalloc(&pdev->dev, sizeof(struct zynqmp_pm_work_struct), @@ -228,6 +262,10 @@ static int zynqmp_pm_probe(struct platform_device *pdev) ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); if (ret) { + if (event_registered) { + xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback); + event_registered = false; + } dev_err(&pdev->dev, "unable to create sysfs interface\n"); return ret; } @@ -238,6 +276,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev) static int zynqmp_pm_remove(struct platform_device *pdev) { sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); + if (event_registered) + xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback); if (!rx_chan) mbox_free_channel(rx_chan); diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index 4fcc3ba93004..558390af44b6 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -1178,9 +1178,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns, cdns->pcm.num_bd = config.pcm_bd; cdns->pcm.num_in = config.pcm_in; cdns->pcm.num_out = config.pcm_out; - cdns->pdm.num_bd = config.pdm_bd; - cdns->pdm.num_in = config.pdm_in; - cdns->pdm.num_out = config.pdm_out; /* Allocate PDIs for PCMs */ stream = &cdns->pcm; @@ -1211,32 +1208,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns, stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out; cdns->num_ports = stream->num_pdi; - /* Allocate PDIs for PDMs */ - stream = &cdns->pdm; - ret = cdns_allocate_pdi(cdns, &stream->bd, - stream->num_bd, offset); - if (ret) - return ret; - - offset += stream->num_bd; - - ret = cdns_allocate_pdi(cdns, &stream->in, - stream->num_in, offset); - if (ret) - return ret; - - offset += stream->num_in; - - ret = cdns_allocate_pdi(cdns, &stream->out, - stream->num_out, offset); - - if (ret) - return ret; - - /* Update total number of PDM PDIs */ - stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out; - cdns->num_ports += stream->num_pdi; - return 0; } EXPORT_SYMBOL(sdw_cdns_pdi_init); @@ -1681,7 +1652,7 @@ int sdw_cdns_probe(struct sdw_cdns *cdns) EXPORT_SYMBOL(sdw_cdns_probe); int cdns_set_sdw_stream(struct snd_soc_dai *dai, - void *stream, bool pcm, int direction) + void *stream, int direction) { struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); struct sdw_cdns_dma_data *dma; @@ -1705,10 +1676,7 @@ int cdns_set_sdw_stream(struct snd_soc_dai *dai, if (!dma) return -ENOMEM; - if (pcm) - dma->stream_type = SDW_STREAM_PCM; - else - dma->stream_type = SDW_STREAM_PDM; + dma->stream_type = SDW_STREAM_PCM; dma->bus = &cdns->bus; dma->link_id = cdns->instance; diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h index e587aede63bf..595d72c15d97 100644 --- a/drivers/soundwire/cadence_master.h +++ b/drivers/soundwire/cadence_master.h @@ -17,7 +17,7 @@ * @h_ch_num: high channel for PDI * @ch_count: total channel count for PDI * @dir: data direction - * @type: stream type, PDM or PCM + * @type: stream type, (only PCM supported) */ struct sdw_cdns_pdi { int num; @@ -62,17 +62,11 @@ struct sdw_cdns_streams { * @pcm_bd: number of bidirectional PCM streams supported * @pcm_in: number of input PCM streams supported * @pcm_out: number of output PCM streams supported - * @pdm_bd: number of bidirectional PDM streams supported - * @pdm_in: number of input PDM streams supported - * @pdm_out: number of output PDM streams supported */ struct sdw_cdns_stream_config { unsigned int pcm_bd; unsigned int pcm_in; unsigned int pcm_out; - unsigned int pdm_bd; - unsigned int pdm_in; - unsigned int pdm_out; }; /** @@ -86,6 +80,7 @@ struct sdw_cdns_stream_config { * @link_id: Master link id * @hw_params: hw_params to be applied in .prepare step * @suspended: status set when suspended, to be used in .prepare + * @paused: status set in .trigger, to be used in suspend */ struct sdw_cdns_dma_data { char *name; @@ -96,6 +91,7 @@ struct sdw_cdns_dma_data { int link_id; struct snd_pcm_hw_params *hw_params; bool suspended; + bool paused; }; /** @@ -109,7 +105,6 @@ struct sdw_cdns_dma_data { * @ports: Data ports * @num_ports: Total number of data ports * @pcm: PCM streams - * @pdm: PDM streams * @registers: Cadence registers * @link_up: Link status * @msg_count: Messages sent on bus @@ -127,7 +122,6 @@ struct sdw_cdns { int num_ports; struct sdw_cdns_streams pcm; - struct sdw_cdns_streams pdm; int pdi_loopback_source; int pdi_loopback_target; @@ -186,7 +180,7 @@ cdns_xfer_msg_defer(struct sdw_bus *bus, int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params); int cdns_set_sdw_stream(struct snd_soc_dai *dai, - void *stream, bool pcm, int direction); + void *stream, int direction); void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string, bool initial_delay, int reset_iterations); diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index d082d18e41a9..122f7a29d8ca 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -564,7 +564,7 @@ static void intel_pdi_init(struct sdw_intel *sdw, { void __iomem *shim = sdw->link_res->shim; unsigned int link_id = sdw->instance; - int pcm_cap, pdm_cap; + int pcm_cap; /* PCM Stream Capability */ pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id)); @@ -575,41 +575,25 @@ static void intel_pdi_init(struct sdw_intel *sdw, dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n", config->pcm_bd, config->pcm_in, config->pcm_out); - - /* PDM Stream Capability */ - pdm_cap = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id)); - - config->pdm_bd = FIELD_GET(SDW_SHIM_PDMSCAP_BSS, pdm_cap); - config->pdm_in = FIELD_GET(SDW_SHIM_PDMSCAP_ISS, pdm_cap); - config->pdm_out = FIELD_GET(SDW_SHIM_PDMSCAP_OSS, pdm_cap); - - dev_dbg(sdw->cdns.dev, "PDM cap bd:%d in:%d out:%d\n", - config->pdm_bd, config->pdm_in, config->pdm_out); } static int -intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm) +intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num) { void __iomem *shim = sdw->link_res->shim; unsigned int link_id = sdw->instance; int count; - if (pcm) { - count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num)); + count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num)); - /* - * WORKAROUND: on all existing Intel controllers, pdi - * number 2 reports channel count as 1 even though it - * supports 8 channels. Performing hardcoding for pdi - * number 2. - */ - if (pdi_num == 2) - count = 7; - - } else { - count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id)); - count = FIELD_GET(SDW_SHIM_PDMSCAP_CPSS, count); - } + /* + * WORKAROUND: on all existing Intel controllers, pdi + * number 2 reports channel count as 1 even though it + * supports 8 channels. Performing hardcoding for pdi + * number 2. + */ + if (pdi_num == 2) + count = 7; /* zero based values for channel count in register */ count++; @@ -620,12 +604,12 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm) static int intel_pdi_get_ch_update(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi, unsigned int num_pdi, - unsigned int *num_ch, bool pcm) + unsigned int *num_ch) { int i, ch_count = 0; for (i = 0; i < num_pdi; i++) { - pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num, pcm); + pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num); ch_count += pdi->ch_count; pdi++; } @@ -635,25 +619,23 @@ static int intel_pdi_get_ch_update(struct sdw_intel *sdw, } static int intel_pdi_stream_ch_update(struct sdw_intel *sdw, - struct sdw_cdns_streams *stream, bool pcm) + struct sdw_cdns_streams *stream) { intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd, - &stream->num_ch_bd, pcm); + &stream->num_ch_bd); intel_pdi_get_ch_update(sdw, stream->in, stream->num_in, - &stream->num_ch_in, pcm); + &stream->num_ch_in); intel_pdi_get_ch_update(sdw, stream->out, stream->num_out, - &stream->num_ch_out, pcm); + &stream->num_ch_out); return 0; } static int intel_pdi_ch_update(struct sdw_intel *sdw) { - /* First update PCM streams followed by PDM streams */ - intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm, true); - intel_pdi_stream_ch_update(sdw, &sdw->cdns.pdm, false); + intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm); return 0; } @@ -711,7 +693,7 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) } static int intel_params_stream(struct sdw_intel *sdw, - struct snd_pcm_substream *substream, + int stream, struct snd_soc_dai *dai, struct snd_pcm_hw_params *hw_params, int link_id, int alh_stream_id) @@ -719,7 +701,7 @@ static int intel_params_stream(struct sdw_intel *sdw, struct sdw_intel_link_res *res = sdw->link_res; struct sdw_intel_stream_params_data params_data; - params_data.substream = substream; + params_data.stream = stream; /* direction */ params_data.dai = dai; params_data.hw_params = hw_params; params_data.link_id = link_id; @@ -732,14 +714,14 @@ static int intel_params_stream(struct sdw_intel *sdw, } static int intel_free_stream(struct sdw_intel *sdw, - struct snd_pcm_substream *substream, + int stream, struct snd_soc_dai *dai, int link_id) { struct sdw_intel_link_res *res = sdw->link_res; struct sdw_intel_stream_free_data free_data; - free_data.substream = substream; + free_data.stream = stream; /* direction */ free_data.dai = dai; free_data.link_id = link_id; @@ -840,7 +822,6 @@ static int intel_hw_params(struct snd_pcm_substream *substream, struct sdw_port_config *pconfig; int ch, dir; int ret; - bool pcm = true; dma = snd_soc_dai_get_dma_data(dai, substream); if (!dma) @@ -852,13 +833,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream, else dir = SDW_DATA_DIR_TX; - if (dma->stream_type == SDW_STREAM_PDM) - pcm = false; - - if (pcm) - pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id); - else - pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pdm, ch, dir, dai->id); + pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id); if (!pdi) { ret = -EINVAL; @@ -871,12 +846,13 @@ static int intel_hw_params(struct snd_pcm_substream *substream, sdw_cdns_config_stream(cdns, ch, dir, pdi); /* store pdi and hw_params, may be needed in prepare step */ + dma->paused = false; dma->suspended = false; dma->pdi = pdi; dma->hw_params = params; /* Inform DSP about PDI stream number */ - ret = intel_params_stream(sdw, substream, dai, params, + ret = intel_params_stream(sdw, substream->stream, dai, params, sdw->instance, pdi->intel_alh_id); if (ret) @@ -887,12 +863,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream, sconfig.frame_rate = params_rate(params); sconfig.type = dma->stream_type; - if (dma->stream_type == SDW_STREAM_PDM) { - sconfig.frame_rate *= 50; - sconfig.bps = 1; - } else { - sconfig.bps = snd_pcm_format_width(params_format(params)); - } + sconfig.bps = snd_pcm_format_width(params_format(params)); /* Port configuration */ pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL); @@ -953,7 +924,7 @@ static int intel_prepare(struct snd_pcm_substream *substream, sdw_cdns_config_stream(cdns, ch, dir, dma->pdi); /* Inform DSP about PDI stream number */ - ret = intel_params_stream(sdw, substream, dai, + ret = intel_params_stream(sdw, substream->stream, dai, dma->hw_params, sdw->instance, dma->pdi->intel_alh_id); @@ -987,7 +958,7 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) return ret; } - ret = intel_free_stream(sdw, substream, dai, sdw->instance); + ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance); if (ret < 0) { dev_err(dai->dev, "intel_free_stream: failed %d\n", ret); return ret; @@ -1008,39 +979,10 @@ static void intel_shutdown(struct snd_pcm_substream *substream, pm_runtime_put_autosuspend(cdns->dev); } -static int intel_component_dais_suspend(struct snd_soc_component *component) -{ - struct sdw_cdns_dma_data *dma; - struct snd_soc_dai *dai; - - for_each_component_dais(component, dai) { - /* - * we don't have a .suspend dai_ops, and we don't have access - * to the substream, so let's mark both capture and playback - * DMA contexts as suspended - */ - dma = dai->playback_dma_data; - if (dma) - dma->suspended = true; - - dma = dai->capture_dma_data; - if (dma) - dma->suspended = true; - } - - return 0; -} - static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction) { - return cdns_set_sdw_stream(dai, stream, true, direction); -} - -static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai, - void *stream, int direction) -{ - return cdns_set_sdw_stream(dai, stream, false, direction); + return cdns_set_sdw_stream(dai, stream, direction); } static void *intel_get_sdw_stream(struct snd_soc_dai *dai, @@ -1059,24 +1001,100 @@ static void *intel_get_sdw_stream(struct snd_soc_dai *dai, return dma->stream; } -static const struct snd_soc_dai_ops intel_pcm_dai_ops = { - .startup = intel_startup, - .hw_params = intel_hw_params, - .prepare = intel_prepare, - .hw_free = intel_hw_free, - .shutdown = intel_shutdown, - .set_sdw_stream = intel_pcm_set_sdw_stream, - .get_sdw_stream = intel_get_sdw_stream, -}; +static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) +{ + struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); + struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_cdns_dma_data *dma; + int ret = 0; -static const struct snd_soc_dai_ops intel_pdm_dai_ops = { + dma = snd_soc_dai_get_dma_data(dai, substream); + if (!dma) { + dev_err(dai->dev, "failed to get dma data in %s\n", + __func__); + return -EIO; + } + + switch (cmd) { + case SNDRV_PCM_TRIGGER_SUSPEND: + + /* + * The .prepare callback is used to deal with xruns and resume operations. + * In the case of xruns, the DMAs and SHIM registers cannot be touched, + * but for resume operations the DMAs and SHIM registers need to be initialized. + * the .trigger callback is used to track the suspend case only. + */ + + dma->suspended = true; + + ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance); + break; + + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + dma->paused = true; + break; + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + dma->paused = false; + break; + default: + break; + } + + return ret; +} + +static int intel_component_dais_suspend(struct snd_soc_component *component) +{ + struct snd_soc_dai *dai; + + /* + * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core + * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state. + * Since the component suspend is called last, we can trap this corner case + * and force the DAIs to release their resources. + */ + for_each_component_dais(component, dai) { + struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); + struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_cdns_dma_data *dma; + int stream; + int ret; + + dma = dai->playback_dma_data; + stream = SNDRV_PCM_STREAM_PLAYBACK; + if (!dma) { + dma = dai->capture_dma_data; + stream = SNDRV_PCM_STREAM_CAPTURE; + } + + if (!dma) + continue; + + if (dma->suspended) + continue; + + if (dma->paused) { + dma->suspended = true; + + ret = intel_free_stream(sdw, stream, dai, sdw->instance); + if (ret < 0) + return ret; + } + } + + return 0; +} + +static const struct snd_soc_dai_ops intel_pcm_dai_ops = { .startup = intel_startup, .hw_params = intel_hw_params, .prepare = intel_prepare, .hw_free = intel_hw_free, + .trigger = intel_trigger, .shutdown = intel_shutdown, - .set_sdw_stream = intel_pdm_set_sdw_stream, - .get_sdw_stream = intel_get_sdw_stream, + .set_stream = intel_pcm_set_sdw_stream, + .get_stream = intel_get_sdw_stream, }; static const struct snd_soc_component_driver dai_component = { @@ -1087,7 +1105,7 @@ static const struct snd_soc_component_driver dai_component = { static int intel_create_dai(struct sdw_cdns *cdns, struct snd_soc_dai_driver *dais, enum intel_pdi_type type, - u32 num, u32 off, u32 max_ch, bool pcm) + u32 num, u32 off, u32 max_ch) { int i; @@ -1116,10 +1134,7 @@ static int intel_create_dai(struct sdw_cdns *cdns, dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE; } - if (pcm) - dais[i].ops = &intel_pcm_dai_ops; - else - dais[i].ops = &intel_pdm_dai_ops; + dais[i].ops = &intel_pcm_dai_ops; } return 0; @@ -1133,7 +1148,7 @@ static int intel_register_dai(struct sdw_intel *sdw) int num_dai, ret, off = 0; /* DAIs are created based on total number of PDIs supported */ - num_dai = cdns->pcm.num_pdi + cdns->pdm.num_pdi; + num_dai = cdns->pcm.num_pdi; dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL); if (!dais) @@ -1143,39 +1158,19 @@ static int intel_register_dai(struct sdw_intel *sdw) stream = &cdns->pcm; ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in, - off, stream->num_ch_in, true); + off, stream->num_ch_in); if (ret) return ret; off += cdns->pcm.num_in; ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out, - off, stream->num_ch_out, true); + off, stream->num_ch_out); if (ret) return ret; off += cdns->pcm.num_out; ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd, - off, stream->num_ch_bd, true); - if (ret) - return ret; - - /* Create PDM DAIs */ - stream = &cdns->pdm; - off += cdns->pcm.num_bd; - ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pdm.num_in, - off, stream->num_ch_in, false); - if (ret) - return ret; - - off += cdns->pdm.num_in; - ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pdm.num_out, - off, stream->num_ch_out, false); - if (ret) - return ret; - - off += cdns->pdm.num_out; - ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd, - off, stream->num_ch_bd, false); + off, stream->num_ch_bd); if (ret) return ret; @@ -1549,7 +1544,7 @@ static int __maybe_unused intel_pm_prepare(struct device *dev) struct sdw_intel *sdw = cdns_to_intel(cdns); struct sdw_bus *bus = &cdns->bus; u32 clock_stop_quirks; - int ret = 0; + int ret; if (bus->prop.hw_disabled || !sdw->startup_done) { dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index a317bea2d42d..54813417ef8e 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -1024,8 +1024,8 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream, ctrl->sruntime[dai->id] = sruntime; for_each_rtd_codec_dais(rtd, i, codec_dai) { - ret = snd_soc_dai_set_sdw_stream(codec_dai, sruntime, - substream->stream); + ret = snd_soc_dai_set_stream(codec_dai, sruntime, + substream->stream); if (ret < 0 && ret != -ENOTSUPP) { dev_err(dai->dev, "Failed to set sdw stream on %s\n", codec_dai->name); @@ -1051,8 +1051,8 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = { .hw_free = qcom_swrm_hw_free, .startup = qcom_swrm_startup, .shutdown = qcom_swrm_shutdown, - .set_sdw_stream = qcom_swrm_set_sdw_stream, - .get_sdw_stream = qcom_swrm_get_sdw_stream, + .set_stream = qcom_swrm_set_sdw_stream, + .get_stream = qcom_swrm_get_sdw_stream, }; static const struct snd_soc_component_driver qcom_swrm_dai_component = { @@ -1156,11 +1156,7 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl) ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode", bp_mode, nports); if (ret) { - u32 version; - - ctrl->reg_read(ctrl, SWRM_COMP_HW_VERSION, &version); - - if (version <= 0x01030000) + if (ctrl->version <= 0x01030000) memset(bp_mode, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS); else return ret; diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index 5d4f6b308ef7..980f26d49b66 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -1863,7 +1863,7 @@ static int set_stream(struct snd_pcm_substream *substream, /* Set stream pointer on all DAIs */ for_each_rtd_dais(rtd, i, dai) { - ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream); + ret = snd_soc_dai_set_stream(dai, sdw_stream, substream->stream); if (ret < 0) { dev_err(rtd->dev, "failed to set stream pointer on dai %s\n", dai->name); break; @@ -1934,7 +1934,7 @@ void sdw_shutdown_stream(void *sdw_substream) /* Find stream from first CPU DAI */ dai = asoc_rtd_to_cpu(rtd, 0); - sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream); + sdw_stream = snd_soc_dai_get_stream(dai, substream->stream); if (IS_ERR(sdw_stream)) { dev_err(rtd->dev, "no stream found for DAI %s\n", dai->name); diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c index 5eb7b61bbb4d..f86433b29260 100644 --- a/drivers/spi/spi-pic32.c +++ b/drivers/spi/spi-pic32.c @@ -370,7 +370,6 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width) cfg.src_addr_width = dma_width; cfg.dst_addr_width = dma_width; /* tx channel */ - cfg.slave_id = pic32s->tx_irq; cfg.direction = DMA_MEM_TO_DEV; ret = dmaengine_slave_config(master->dma_tx, &cfg); if (ret) { @@ -378,7 +377,6 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width) return ret; } /* rx channel */ - cfg.slave_id = pic32s->rx_irq; cfg.direction = DMA_DEV_TO_MEM; ret = dmaengine_slave_config(master->dma_rx, &cfg); if (ret) diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig index 2874b6c26028..737802046314 100644 --- a/drivers/spmi/Kconfig +++ b/drivers/spmi/Kconfig @@ -34,4 +34,15 @@ config SPMI_MSM_PMIC_ARB This is required for communicating with Qualcomm PMICs and other devices that have the SPMI interface. +config SPMI_MTK_PMIF + tristate "Mediatek SPMI Controller (PMIC Arbiter)" + depends on ARCH_MEDIATEK || COMPILE_TEST + help + If you say yes to this option, support will be included for the + built-in SPMI PMIC Arbiter interface on Mediatek family + processors. + + This is required for communicating with Mediatek PMICs and + other devices that have the SPMI interface. + endif diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile index 6e092e6f290c..9d974424c8c1 100644 --- a/drivers/spmi/Makefile +++ b/drivers/spmi/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_SPMI) += spmi.o obj-$(CONFIG_SPMI_HISI3670) += hisi-spmi-controller.o obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o +obj-$(CONFIG_SPMI_MTK_PMIF) += spmi-mtk-pmif.o diff --git a/drivers/spmi/spmi-mtk-pmif.c b/drivers/spmi/spmi-mtk-pmif.c new file mode 100644 index 000000000000..ad511f2c3324 --- /dev/null +++ b/drivers/spmi/spmi-mtk-pmif.c @@ -0,0 +1,542 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2021 MediaTek Inc. + +#include <linux/clk.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/spmi.h> + +#define SWINF_IDLE 0x00 +#define SWINF_WFVLDCLR 0x06 + +#define GET_SWINF(x) (((x) >> 1) & 0x7) + +#define PMIF_CMD_REG_0 0 +#define PMIF_CMD_REG 1 +#define PMIF_CMD_EXT_REG 2 +#define PMIF_CMD_EXT_REG_LONG 3 + +#define PMIF_DELAY_US 10 +#define PMIF_TIMEOUT_US (10 * 1000) + +#define PMIF_CHAN_OFFSET 0x5 + +#define PMIF_MAX_CLKS 3 + +#define SPMI_OP_ST_BUSY 1 + +struct ch_reg { + u32 ch_sta; + u32 wdata; + u32 rdata; + u32 ch_send; + u32 ch_rdy; +}; + +struct pmif_data { + const u32 *regs; + const u32 *spmimst_regs; + u32 soc_chan; +}; + +struct pmif { + void __iomem *base; + void __iomem *spmimst_base; + struct ch_reg chan; + struct clk_bulk_data clks[PMIF_MAX_CLKS]; + size_t nclks; + const struct pmif_data *data; +}; + +static const char * const pmif_clock_names[] = { + "pmif_sys_ck", "pmif_tmr_ck", "spmimst_clk_mux", +}; + +enum pmif_regs { + PMIF_INIT_DONE, + PMIF_INF_EN, + PMIF_ARB_EN, + PMIF_CMDISSUE_EN, + PMIF_TIMER_CTRL, + PMIF_SPI_MODE_CTRL, + PMIF_IRQ_EVENT_EN_0, + PMIF_IRQ_FLAG_0, + PMIF_IRQ_CLR_0, + PMIF_IRQ_EVENT_EN_1, + PMIF_IRQ_FLAG_1, + PMIF_IRQ_CLR_1, + PMIF_IRQ_EVENT_EN_2, + PMIF_IRQ_FLAG_2, + PMIF_IRQ_CLR_2, + PMIF_IRQ_EVENT_EN_3, + PMIF_IRQ_FLAG_3, + PMIF_IRQ_CLR_3, + PMIF_IRQ_EVENT_EN_4, + PMIF_IRQ_FLAG_4, + PMIF_IRQ_CLR_4, + PMIF_WDT_EVENT_EN_0, + PMIF_WDT_FLAG_0, + PMIF_WDT_EVENT_EN_1, + PMIF_WDT_FLAG_1, + PMIF_SWINF_0_STA, + PMIF_SWINF_0_WDATA_31_0, + PMIF_SWINF_0_RDATA_31_0, + PMIF_SWINF_0_ACC, + PMIF_SWINF_0_VLD_CLR, + PMIF_SWINF_1_STA, + PMIF_SWINF_1_WDATA_31_0, + PMIF_SWINF_1_RDATA_31_0, + PMIF_SWINF_1_ACC, + PMIF_SWINF_1_VLD_CLR, + PMIF_SWINF_2_STA, + PMIF_SWINF_2_WDATA_31_0, + PMIF_SWINF_2_RDATA_31_0, + PMIF_SWINF_2_ACC, + PMIF_SWINF_2_VLD_CLR, + PMIF_SWINF_3_STA, + PMIF_SWINF_3_WDATA_31_0, + PMIF_SWINF_3_RDATA_31_0, + PMIF_SWINF_3_ACC, + PMIF_SWINF_3_VLD_CLR, +}; + +static const u32 mt6873_regs[] = { + [PMIF_INIT_DONE] = 0x0000, + [PMIF_INF_EN] = 0x0024, + [PMIF_ARB_EN] = 0x0150, + [PMIF_CMDISSUE_EN] = 0x03B4, + [PMIF_TIMER_CTRL] = 0x03E0, + [PMIF_SPI_MODE_CTRL] = 0x0400, + [PMIF_IRQ_EVENT_EN_0] = 0x0418, + [PMIF_IRQ_FLAG_0] = 0x0420, + [PMIF_IRQ_CLR_0] = 0x0424, + [PMIF_IRQ_EVENT_EN_1] = 0x0428, + [PMIF_IRQ_FLAG_1] = 0x0430, + [PMIF_IRQ_CLR_1] = 0x0434, + [PMIF_IRQ_EVENT_EN_2] = 0x0438, + [PMIF_IRQ_FLAG_2] = 0x0440, + [PMIF_IRQ_CLR_2] = 0x0444, + [PMIF_IRQ_EVENT_EN_3] = 0x0448, + [PMIF_IRQ_FLAG_3] = 0x0450, + [PMIF_IRQ_CLR_3] = 0x0454, + [PMIF_IRQ_EVENT_EN_4] = 0x0458, + [PMIF_IRQ_FLAG_4] = 0x0460, + [PMIF_IRQ_CLR_4] = 0x0464, + [PMIF_WDT_EVENT_EN_0] = 0x046C, + [PMIF_WDT_FLAG_0] = 0x0470, + [PMIF_WDT_EVENT_EN_1] = 0x0474, + [PMIF_WDT_FLAG_1] = 0x0478, + [PMIF_SWINF_0_ACC] = 0x0C00, + [PMIF_SWINF_0_WDATA_31_0] = 0x0C04, + [PMIF_SWINF_0_RDATA_31_0] = 0x0C14, + [PMIF_SWINF_0_VLD_CLR] = 0x0C24, + [PMIF_SWINF_0_STA] = 0x0C28, + [PMIF_SWINF_1_ACC] = 0x0C40, + [PMIF_SWINF_1_WDATA_31_0] = 0x0C44, + [PMIF_SWINF_1_RDATA_31_0] = 0x0C54, + [PMIF_SWINF_1_VLD_CLR] = 0x0C64, + [PMIF_SWINF_1_STA] = 0x0C68, + [PMIF_SWINF_2_ACC] = 0x0C80, + [PMIF_SWINF_2_WDATA_31_0] = 0x0C84, + [PMIF_SWINF_2_RDATA_31_0] = 0x0C94, + [PMIF_SWINF_2_VLD_CLR] = 0x0CA4, + [PMIF_SWINF_2_STA] = 0x0CA8, + [PMIF_SWINF_3_ACC] = 0x0CC0, + [PMIF_SWINF_3_WDATA_31_0] = 0x0CC4, + [PMIF_SWINF_3_RDATA_31_0] = 0x0CD4, + [PMIF_SWINF_3_VLD_CLR] = 0x0CE4, + [PMIF_SWINF_3_STA] = 0x0CE8, +}; + +static const u32 mt8195_regs[] = { + [PMIF_INIT_DONE] = 0x0000, + [PMIF_INF_EN] = 0x0024, + [PMIF_ARB_EN] = 0x0150, + [PMIF_CMDISSUE_EN] = 0x03B8, + [PMIF_TIMER_CTRL] = 0x03E4, + [PMIF_SPI_MODE_CTRL] = 0x0408, + [PMIF_IRQ_EVENT_EN_0] = 0x0420, + [PMIF_IRQ_FLAG_0] = 0x0428, + [PMIF_IRQ_CLR_0] = 0x042C, + [PMIF_IRQ_EVENT_EN_1] = 0x0430, + [PMIF_IRQ_FLAG_1] = 0x0438, + [PMIF_IRQ_CLR_1] = 0x043C, + [PMIF_IRQ_EVENT_EN_2] = 0x0440, + [PMIF_IRQ_FLAG_2] = 0x0448, + [PMIF_IRQ_CLR_2] = 0x044C, + [PMIF_IRQ_EVENT_EN_3] = 0x0450, + [PMIF_IRQ_FLAG_3] = 0x0458, + [PMIF_IRQ_CLR_3] = 0x045C, + [PMIF_IRQ_EVENT_EN_4] = 0x0460, + [PMIF_IRQ_FLAG_4] = 0x0468, + [PMIF_IRQ_CLR_4] = 0x046C, + [PMIF_WDT_EVENT_EN_0] = 0x0474, + [PMIF_WDT_FLAG_0] = 0x0478, + [PMIF_WDT_EVENT_EN_1] = 0x047C, + [PMIF_WDT_FLAG_1] = 0x0480, + [PMIF_SWINF_0_ACC] = 0x0800, + [PMIF_SWINF_0_WDATA_31_0] = 0x0804, + [PMIF_SWINF_0_RDATA_31_0] = 0x0814, + [PMIF_SWINF_0_VLD_CLR] = 0x0824, + [PMIF_SWINF_0_STA] = 0x0828, + [PMIF_SWINF_1_ACC] = 0x0840, + [PMIF_SWINF_1_WDATA_31_0] = 0x0844, + [PMIF_SWINF_1_RDATA_31_0] = 0x0854, + [PMIF_SWINF_1_VLD_CLR] = 0x0864, + [PMIF_SWINF_1_STA] = 0x0868, + [PMIF_SWINF_2_ACC] = 0x0880, + [PMIF_SWINF_2_WDATA_31_0] = 0x0884, + [PMIF_SWINF_2_RDATA_31_0] = 0x0894, + [PMIF_SWINF_2_VLD_CLR] = 0x08A4, + [PMIF_SWINF_2_STA] = 0x08A8, + [PMIF_SWINF_3_ACC] = 0x08C0, + [PMIF_SWINF_3_WDATA_31_0] = 0x08C4, + [PMIF_SWINF_3_RDATA_31_0] = 0x08D4, + [PMIF_SWINF_3_VLD_CLR] = 0x08E4, + [PMIF_SWINF_3_STA] = 0x08E8, +}; + +enum spmi_regs { + SPMI_OP_ST_CTRL, + SPMI_GRP_ID_EN, + SPMI_OP_ST_STA, + SPMI_MST_SAMPL, + SPMI_MST_REQ_EN, + SPMI_REC_CTRL, + SPMI_REC0, + SPMI_REC1, + SPMI_REC2, + SPMI_REC3, + SPMI_REC4, + SPMI_MST_DBG, + + /* MT8195 spmi regs */ + SPMI_MST_RCS_CTRL, + SPMI_SLV_3_0_EINT, + SPMI_SLV_7_4_EINT, + SPMI_SLV_B_8_EINT, + SPMI_SLV_F_C_EINT, + SPMI_REC_CMD_DEC, + SPMI_DEC_DBG, +}; + +static const u32 mt6873_spmi_regs[] = { + [SPMI_OP_ST_CTRL] = 0x0000, + [SPMI_GRP_ID_EN] = 0x0004, + [SPMI_OP_ST_STA] = 0x0008, + [SPMI_MST_SAMPL] = 0x000c, + [SPMI_MST_REQ_EN] = 0x0010, + [SPMI_REC_CTRL] = 0x0040, + [SPMI_REC0] = 0x0044, + [SPMI_REC1] = 0x0048, + [SPMI_REC2] = 0x004c, + [SPMI_REC3] = 0x0050, + [SPMI_REC4] = 0x0054, + [SPMI_MST_DBG] = 0x00fc, +}; + +static const u32 mt8195_spmi_regs[] = { + [SPMI_OP_ST_CTRL] = 0x0000, + [SPMI_GRP_ID_EN] = 0x0004, + [SPMI_OP_ST_STA] = 0x0008, + [SPMI_MST_SAMPL] = 0x000C, + [SPMI_MST_REQ_EN] = 0x0010, + [SPMI_MST_RCS_CTRL] = 0x0014, + [SPMI_SLV_3_0_EINT] = 0x0020, + [SPMI_SLV_7_4_EINT] = 0x0024, + [SPMI_SLV_B_8_EINT] = 0x0028, + [SPMI_SLV_F_C_EINT] = 0x002C, + [SPMI_REC_CTRL] = 0x0040, + [SPMI_REC0] = 0x0044, + [SPMI_REC1] = 0x0048, + [SPMI_REC2] = 0x004C, + [SPMI_REC3] = 0x0050, + [SPMI_REC4] = 0x0054, + [SPMI_REC_CMD_DEC] = 0x005C, + [SPMI_DEC_DBG] = 0x00F8, + [SPMI_MST_DBG] = 0x00FC, +}; + +static u32 pmif_readl(struct pmif *arb, enum pmif_regs reg) +{ + return readl(arb->base + arb->data->regs[reg]); +} + +static void pmif_writel(struct pmif *arb, u32 val, enum pmif_regs reg) +{ + writel(val, arb->base + arb->data->regs[reg]); +} + +static void mtk_spmi_writel(struct pmif *arb, u32 val, enum spmi_regs reg) +{ + writel(val, arb->spmimst_base + arb->data->spmimst_regs[reg]); +} + +static bool pmif_is_fsm_vldclr(struct pmif *arb) +{ + u32 reg_rdata; + + reg_rdata = pmif_readl(arb, arb->chan.ch_sta); + + return GET_SWINF(reg_rdata) == SWINF_WFVLDCLR; +} + +static int pmif_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid) +{ + struct pmif *arb = spmi_controller_get_drvdata(ctrl); + u32 rdata, cmd; + int ret; + + /* Check the opcode */ + if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP) + return -EINVAL; + + cmd = opc - SPMI_CMD_RESET; + + mtk_spmi_writel(arb, (cmd << 0x4) | sid, SPMI_OP_ST_CTRL); + ret = readl_poll_timeout_atomic(arb->spmimst_base + arb->data->spmimst_regs[SPMI_OP_ST_STA], + rdata, (rdata & SPMI_OP_ST_BUSY) == SPMI_OP_ST_BUSY, + PMIF_DELAY_US, PMIF_TIMEOUT_US); + if (ret < 0) + dev_err(&ctrl->dev, "timeout, err = %d\n", ret); + + return ret; +} + +static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, + u16 addr, u8 *buf, size_t len) +{ + struct pmif *arb = spmi_controller_get_drvdata(ctrl); + struct ch_reg *inf_reg; + int ret; + u32 data, cmd; + + /* Check for argument validation. */ + if (sid & ~0xf) { + dev_err(&ctrl->dev, "exceed the max slv id\n"); + return -EINVAL; + } + + if (len > 4) { + dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len); + + return -EINVAL; + } + + if (opc >= 0x60 && opc <= 0x7f) + opc = PMIF_CMD_REG; + else if ((opc >= 0x20 && opc <= 0x2f) || (opc >= 0x38 && opc <= 0x3f)) + opc = PMIF_CMD_EXT_REG_LONG; + else + return -EINVAL; + + /* Wait for Software Interface FSM state to be IDLE. */ + inf_reg = &arb->chan; + ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta], + data, GET_SWINF(data) == SWINF_IDLE, + PMIF_DELAY_US, PMIF_TIMEOUT_US); + if (ret < 0) { + /* set channel ready if the data has transferred */ + if (pmif_is_fsm_vldclr(arb)) + pmif_writel(arb, 1, inf_reg->ch_rdy); + dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n"); + return ret; + } + + /* Send the command. */ + cmd = (opc << 30) | (sid << 24) | ((len - 1) << 16) | addr; + pmif_writel(arb, cmd, inf_reg->ch_send); + + /* + * Wait for Software Interface FSM state to be WFVLDCLR, + * read the data and clear the valid flag. + */ + ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta], + data, GET_SWINF(data) == SWINF_WFVLDCLR, + PMIF_DELAY_US, PMIF_TIMEOUT_US); + if (ret < 0) { + dev_err(&ctrl->dev, "failed to wait for SWINF_WFVLDCLR\n"); + return ret; + } + + data = pmif_readl(arb, inf_reg->rdata); + memcpy(buf, &data, len); + pmif_writel(arb, 1, inf_reg->ch_rdy); + + return 0; +} + +static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, + u16 addr, const u8 *buf, size_t len) +{ + struct pmif *arb = spmi_controller_get_drvdata(ctrl); + struct ch_reg *inf_reg; + int ret; + u32 data, cmd; + + if (len > 4) { + dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len); + + return -EINVAL; + } + + /* Check the opcode */ + if (opc >= 0x40 && opc <= 0x5F) + opc = PMIF_CMD_REG; + else if ((opc <= 0xF) || (opc >= 0x30 && opc <= 0x37)) + opc = PMIF_CMD_EXT_REG_LONG; + else if (opc >= 0x80) + opc = PMIF_CMD_REG_0; + else + return -EINVAL; + + /* Wait for Software Interface FSM state to be IDLE. */ + inf_reg = &arb->chan; + ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta], + data, GET_SWINF(data) == SWINF_IDLE, + PMIF_DELAY_US, PMIF_TIMEOUT_US); + if (ret < 0) { + /* set channel ready if the data has transferred */ + if (pmif_is_fsm_vldclr(arb)) + pmif_writel(arb, 1, inf_reg->ch_rdy); + dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n"); + return ret; + } + + /* Set the write data. */ + memcpy(&data, buf, len); + pmif_writel(arb, data, inf_reg->wdata); + + /* Send the command. */ + cmd = (opc << 30) | BIT(29) | (sid << 24) | ((len - 1) << 16) | addr; + pmif_writel(arb, cmd, inf_reg->ch_send); + + return 0; +} + +static const struct pmif_data mt6873_pmif_arb = { + .regs = mt6873_regs, + .spmimst_regs = mt6873_spmi_regs, + .soc_chan = 2, +}; + +static const struct pmif_data mt8195_pmif_arb = { + .regs = mt8195_regs, + .spmimst_regs = mt8195_spmi_regs, + .soc_chan = 2, +}; + +static int mtk_spmi_probe(struct platform_device *pdev) +{ + struct pmif *arb; + struct spmi_controller *ctrl; + int err, i; + u32 chan_offset; + + ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*arb)); + if (!ctrl) + return -ENOMEM; + + arb = spmi_controller_get_drvdata(ctrl); + arb->data = device_get_match_data(&pdev->dev); + if (!arb->data) { + err = -EINVAL; + dev_err(&pdev->dev, "Cannot get drv_data\n"); + goto err_put_ctrl; + } + + arb->base = devm_platform_ioremap_resource_byname(pdev, "pmif"); + if (IS_ERR(arb->base)) { + err = PTR_ERR(arb->base); + goto err_put_ctrl; + } + + arb->spmimst_base = devm_platform_ioremap_resource_byname(pdev, "spmimst"); + if (IS_ERR(arb->spmimst_base)) { + err = PTR_ERR(arb->spmimst_base); + goto err_put_ctrl; + } + + arb->nclks = ARRAY_SIZE(pmif_clock_names); + for (i = 0; i < arb->nclks; i++) + arb->clks[i].id = pmif_clock_names[i]; + + err = devm_clk_bulk_get(&pdev->dev, arb->nclks, arb->clks); + if (err) { + dev_err(&pdev->dev, "Failed to get clocks: %d\n", err); + goto err_put_ctrl; + } + + err = clk_bulk_prepare_enable(arb->nclks, arb->clks); + if (err) { + dev_err(&pdev->dev, "Failed to enable clocks: %d\n", err); + goto err_put_ctrl; + } + + ctrl->cmd = pmif_arb_cmd; + ctrl->read_cmd = pmif_spmi_read_cmd; + ctrl->write_cmd = pmif_spmi_write_cmd; + + chan_offset = PMIF_CHAN_OFFSET * arb->data->soc_chan; + arb->chan.ch_sta = PMIF_SWINF_0_STA + chan_offset; + arb->chan.wdata = PMIF_SWINF_0_WDATA_31_0 + chan_offset; + arb->chan.rdata = PMIF_SWINF_0_RDATA_31_0 + chan_offset; + arb->chan.ch_send = PMIF_SWINF_0_ACC + chan_offset; + arb->chan.ch_rdy = PMIF_SWINF_0_VLD_CLR + chan_offset; + + platform_set_drvdata(pdev, ctrl); + + err = spmi_controller_add(ctrl); + if (err) + goto err_domain_remove; + + return 0; + +err_domain_remove: + clk_bulk_disable_unprepare(arb->nclks, arb->clks); +err_put_ctrl: + spmi_controller_put(ctrl); + return err; +} + +static int mtk_spmi_remove(struct platform_device *pdev) +{ + struct spmi_controller *ctrl = platform_get_drvdata(pdev); + struct pmif *arb = spmi_controller_get_drvdata(ctrl); + + clk_bulk_disable_unprepare(arb->nclks, arb->clks); + spmi_controller_remove(ctrl); + spmi_controller_put(ctrl); + return 0; +} + +static const struct of_device_id mtk_spmi_match_table[] = { + { + .compatible = "mediatek,mt6873-spmi", + .data = &mt6873_pmif_arb, + }, { + .compatible = "mediatek,mt8195-spmi", + .data = &mt8195_pmif_arb, + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, mtk_spmi_match_table); + +static struct platform_driver mtk_spmi_driver = { + .driver = { + .name = "spmi-mtk", + .of_match_table = of_match_ptr(mtk_spmi_match_table), + }, + .probe = mtk_spmi_probe, + .remove = mtk_spmi_remove, +}; +module_platform_driver(mtk_spmi_driver); + +MODULE_AUTHOR("Hsin-Hsiung Wang <hsin-hsiung.wang@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek SPMI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index bbbd311eda03..2113be40b5a9 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -261,20 +261,21 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl, if (status & PMIC_ARB_STATUS_DONE) { if (status & PMIC_ARB_STATUS_DENIED) { - dev_err(&ctrl->dev, "%s: transaction denied (0x%x)\n", - __func__, status); + dev_err(&ctrl->dev, "%s: %#x %#x: transaction denied (%#x)\n", + __func__, sid, addr, status); return -EPERM; } if (status & PMIC_ARB_STATUS_FAILURE) { - dev_err(&ctrl->dev, "%s: transaction failed (0x%x)\n", - __func__, status); + dev_err(&ctrl->dev, "%s: %#x %#x: transaction failed (%#x)\n", + __func__, sid, addr, status); + WARN_ON(1); return -EIO; } if (status & PMIC_ARB_STATUS_DROPPED) { - dev_err(&ctrl->dev, "%s: transaction dropped (0x%x)\n", - __func__, status); + dev_err(&ctrl->dev, "%s: %#x %#x: transaction dropped (%#x)\n", + __func__, sid, addr, status); return -EIO; } @@ -283,8 +284,8 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl, udelay(1); } - dev_err(&ctrl->dev, "%s: timeout, status 0x%x\n", - __func__, status); + dev_err(&ctrl->dev, "%s: %#x %#x: timeout, status %#x\n", + __func__, sid, addr, status); return -ETIMEDOUT; } @@ -333,24 +334,20 @@ static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid) return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid); } -static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, - u16 addr, u8 *buf, size_t len) +static int pmic_arb_fmt_read_cmd(struct spmi_pmic_arb *pmic_arb, u8 opc, u8 sid, + u16 addr, size_t len, u32 *cmd, u32 *offset) { - struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); - unsigned long flags; u8 bc = len - 1; - u32 cmd; int rc; - u32 offset; rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, PMIC_ARB_CHANNEL_OBS); if (rc < 0) return rc; - offset = rc; + *offset = rc; if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { - dev_err(&ctrl->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", + dev_err(&pmic_arb->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", PMIC_ARB_MAX_TRANS_BYTES, len); return -EINVAL; } @@ -365,14 +362,24 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, else return -EINVAL; - cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc); + *cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc); + + return 0; +} + +static int pmic_arb_read_cmd_unlocked(struct spmi_controller *ctrl, u32 cmd, + u32 offset, u8 sid, u16 addr, u8 *buf, + size_t len) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + u8 bc = len - 1; + int rc; - raw_spin_lock_irqsave(&pmic_arb->lock, flags); pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd); rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr, PMIC_ARB_CHANNEL_OBS); if (rc) - goto done; + return rc; pmic_arb_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0, min_t(u8, bc, 3)); @@ -380,30 +387,44 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, if (bc > 3) pmic_arb_read_data(pmic_arb, buf + 4, offset + PMIC_ARB_RDATA1, bc - 4); + return 0; +} -done: +static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, + u16 addr, u8 *buf, size_t len) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + unsigned long flags; + u32 cmd, offset; + int rc; + + rc = pmic_arb_fmt_read_cmd(pmic_arb, opc, sid, addr, len, &cmd, + &offset); + if (rc) + return rc; + + raw_spin_lock_irqsave(&pmic_arb->lock, flags); + rc = pmic_arb_read_cmd_unlocked(ctrl, cmd, offset, sid, addr, buf, len); raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); + return rc; } -static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, - u16 addr, const u8 *buf, size_t len) +static int pmic_arb_fmt_write_cmd(struct spmi_pmic_arb *pmic_arb, u8 opc, + u8 sid, u16 addr, size_t len, u32 *cmd, + u32 *offset) { - struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); - unsigned long flags; u8 bc = len - 1; - u32 cmd; int rc; - u32 offset; rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, PMIC_ARB_CHANNEL_RW); if (rc < 0) return rc; - offset = rc; + *offset = rc; if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { - dev_err(&ctrl->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", + dev_err(&pmic_arb->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", PMIC_ARB_MAX_TRANS_BYTES, len); return -EINVAL; } @@ -420,10 +441,19 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, else return -EINVAL; - cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc); + *cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc); + + return 0; +} + +static int pmic_arb_write_cmd_unlocked(struct spmi_controller *ctrl, u32 cmd, + u32 offset, u8 sid, u16 addr, + const u8 *buf, size_t len) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + u8 bc = len - 1; /* Write data to FIFOs */ - raw_spin_lock_irqsave(&pmic_arb->lock, flags); pmic_arb_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0, min_t(u8, bc, 3)); if (bc > 3) @@ -432,8 +462,62 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, /* Start the transaction */ pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd); - rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr, - PMIC_ARB_CHANNEL_RW); + return pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr, + PMIC_ARB_CHANNEL_RW); +} + +static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, + u16 addr, const u8 *buf, size_t len) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + unsigned long flags; + u32 cmd, offset; + int rc; + + rc = pmic_arb_fmt_write_cmd(pmic_arb, opc, sid, addr, len, &cmd, + &offset); + if (rc) + return rc; + + raw_spin_lock_irqsave(&pmic_arb->lock, flags); + rc = pmic_arb_write_cmd_unlocked(ctrl, cmd, offset, sid, addr, buf, + len); + raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); + + return rc; +} + +static int pmic_arb_masked_write(struct spmi_controller *ctrl, u8 sid, u16 addr, + const u8 *buf, const u8 *mask, size_t len) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + u32 read_cmd, read_offset, write_cmd, write_offset; + u8 temp[PMIC_ARB_MAX_TRANS_BYTES]; + unsigned long flags; + int rc, i; + + rc = pmic_arb_fmt_read_cmd(pmic_arb, SPMI_CMD_EXT_READL, sid, addr, len, + &read_cmd, &read_offset); + if (rc) + return rc; + + rc = pmic_arb_fmt_write_cmd(pmic_arb, SPMI_CMD_EXT_WRITEL, sid, addr, + len, &write_cmd, &write_offset); + if (rc) + return rc; + + raw_spin_lock_irqsave(&pmic_arb->lock, flags); + rc = pmic_arb_read_cmd_unlocked(ctrl, read_cmd, read_offset, sid, addr, + temp, len); + if (rc) + goto done; + + for (i = 0; i < len; i++) + temp[i] = (temp[i] & ~mask[i]) | (buf[i] & mask[i]); + + rc = pmic_arb_write_cmd_unlocked(ctrl, write_cmd, write_offset, sid, + addr, temp, len); +done: raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); return rc; @@ -482,6 +566,23 @@ static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len) d->irq); } +static int qpnpint_spmi_masked_write(struct irq_data *d, u8 reg, + const void *buf, const void *mask, + size_t len) +{ + struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d); + u8 sid = hwirq_to_sid(d->hwirq); + u8 per = hwirq_to_per(d->hwirq); + int rc; + + rc = pmic_arb_masked_write(pmic_arb->spmic, sid, (per << 8) + reg, buf, + mask, len); + if (rc) + dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x rc=%d\n", + d->irq, rc); + return rc; +} + static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id) { u16 ppid = pmic_arb->apid_data[apid].ppid; @@ -600,18 +701,18 @@ static void qpnpint_irq_unmask(struct irq_data *d) static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type) { - struct spmi_pmic_arb_qpnpint_type type; + struct spmi_pmic_arb_qpnpint_type type = {0}; + struct spmi_pmic_arb_qpnpint_type mask; irq_flow_handler_t flow_handler; - u8 irq = hwirq_to_irq(d->hwirq); - - qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type)); + u8 irq_bit = BIT(hwirq_to_irq(d->hwirq)); + int rc; if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { - type.type |= BIT(irq); + type.type = irq_bit; if (flow_type & IRQF_TRIGGER_RISING) - type.polarity_high |= BIT(irq); + type.polarity_high = irq_bit; if (flow_type & IRQF_TRIGGER_FALLING) - type.polarity_low |= BIT(irq); + type.polarity_low = irq_bit; flow_handler = handle_edge_irq; } else { @@ -619,19 +720,23 @@ static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type) (flow_type & (IRQF_TRIGGER_LOW))) return -EINVAL; - type.type &= ~BIT(irq); /* level trig */ if (flow_type & IRQF_TRIGGER_HIGH) - type.polarity_high |= BIT(irq); + type.polarity_high = irq_bit; else - type.polarity_low |= BIT(irq); + type.polarity_low = irq_bit; flow_handler = handle_level_irq; } - qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type)); + mask.type = irq_bit; + mask.polarity_high = irq_bit; + mask.polarity_low = irq_bit; + + rc = qpnpint_spmi_masked_write(d, QPNPINT_REG_SET_TYPE, &type, &mask, + sizeof(type)); irq_set_handler_locked(d, flow_handler); - return 0; + return rc; } static int qpnpint_irq_set_wake(struct irq_data *d, unsigned int on) diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c index 91fcf85e150a..5a58dac76c88 100644 --- a/drivers/staging/rts5208/rtsx.c +++ b/drivers/staging/rts5208/rtsx.c @@ -450,13 +450,13 @@ skip_for_abort: * after the down() -- that's necessary for the thread-shutdown * case. * - * complete_and_exit() goes even further than this -- it is safe in - * the case that the thread of the caller is going away (not just - * the structure) -- this is necessary for the module-remove case. - * This is important in preemption kernels, which transfer the flow - * of execution immediately upon a complete(). + * kthread_complete_and_exit() goes even further than this -- + * it is safe in the case that the thread of the caller is going away + * (not just the structure) -- this is necessary for the module-remove + * case. This is important in preemption kernels, which transfer the + * flow of execution immediately upon a complete(). */ - complete_and_exit(&dev->control_exit, 0); + kthread_complete_and_exit(&dev->control_exit, 0); } static int rtsx_polling_thread(void *__dev) @@ -501,7 +501,7 @@ static int rtsx_polling_thread(void *__dev) mutex_unlock(&dev->dev_mutex); } - complete_and_exit(&dev->polling_exit, 0); + kthread_complete_and_exit(&dev->polling_exit, 0); } /* @@ -682,7 +682,7 @@ static int rtsx_scan_thread(void *__dev) /* Should we unbind if no devices were detected? */ } - complete_and_exit(&dev->scanning_done, 0); + kthread_complete_and_exit(&dev->scanning_done, 0); } static void rtsx_init_options(struct rtsx_chip *chip) diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 489d19274f9a..23c94b927776 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -9,6 +9,7 @@ #include <linux/kernel.h> #include <linux/atomic.h> +#include <linux/dma/qcom_adm.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/module.h> @@ -290,6 +291,7 @@ static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base) { struct device *dev = msm_port->uart.dev; struct dma_slave_config conf; + struct qcom_adm_peripheral_config periph_conf = {}; struct msm_dma *dma; u32 crci = 0; int ret; @@ -308,7 +310,11 @@ static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base) conf.device_fc = true; conf.dst_addr = base + UARTDM_TF; conf.dst_maxburst = UARTDM_BURST_SIZE; - conf.slave_id = crci; + if (crci) { + conf.peripheral_config = &periph_conf; + conf.peripheral_size = sizeof(periph_conf); + periph_conf.crci = crci; + } ret = dmaengine_slave_config(dma->chan, &conf); if (ret) @@ -333,6 +339,7 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base) { struct device *dev = msm_port->uart.dev; struct dma_slave_config conf; + struct qcom_adm_peripheral_config periph_conf = {}; struct msm_dma *dma; u32 crci = 0; int ret; @@ -355,7 +362,11 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base) conf.device_fc = true; conf.src_addr = base + UARTDM_RF; conf.src_maxburst = UARTDM_BURST_SIZE; - conf.slave_id = crci; + if (crci) { + conf.peripheral_config = &periph_conf; + conf.peripheral_size = sizeof(periph_conf); + periph_conf.crci = crci; + } ret = dmaengine_slave_config(dma->chan, &conf); if (ret) diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index ea96e319c8a0..43afbb7c5ab9 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -83,13 +83,14 @@ static struct map_sysfs_entry size_attribute = static struct map_sysfs_entry offset_attribute = __ATTR(offset, S_IRUGO, map_offset_show, NULL); -static struct attribute *attrs[] = { +static struct attribute *map_attrs[] = { &name_attribute.attr, &addr_attribute.attr, &size_attribute.attr, &offset_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; +ATTRIBUTE_GROUPS(map); static void map_release(struct kobject *kobj) { @@ -119,7 +120,7 @@ static const struct sysfs_ops map_sysfs_ops = { static struct kobj_type map_attr_type = { .release = map_release, .sysfs_ops = &map_sysfs_ops, - .default_attrs = attrs, + .default_groups = map_groups, }; struct uio_portio { @@ -178,6 +179,7 @@ static struct attribute *portio_attrs[] = { &portio_porttype_attribute.attr, NULL, }; +ATTRIBUTE_GROUPS(portio); static void portio_release(struct kobject *kobj) { @@ -207,7 +209,7 @@ static const struct sysfs_ops portio_sysfs_ops = { static struct kobj_type portio_attr_type = { .release = portio_release, .sysfs_ops = &portio_sysfs_ops, - .default_attrs = portio_attrs, + .default_groups = portio_groups, }; static ssize_t name_show(struct device *dev, diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c index 6b5cfa5b0673..1106f3376404 100644 --- a/drivers/uio/uio_dmem_genirq.c +++ b/drivers/uio/uio_dmem_genirq.c @@ -188,7 +188,11 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) return -ENOMEM; } - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, "DMA enable failed\n"); + return ret; + } priv->uioinfo = uioinfo; spin_lock_init(&priv->lock); diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index da17be1ef64e..e3a49d837609 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -969,7 +969,7 @@ static int usbatm_do_heavy_init(void *arg) instance->thread = NULL; mutex_unlock(&instance->serialize); - complete_and_exit(&instance->thread_exited, ret); + kthread_complete_and_exit(&instance->thread_exited, ret); } static int usbatm_heavy_init(struct usbatm_data *instance) diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 752439690fda..46dd11dcb3a8 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -2547,7 +2547,7 @@ static int fsg_main_thread(void *common_) up_write(&common->filesem); /* Let fsg_unbind() know the thread has exited */ - complete_and_exit(&common->thread_notifier, 0); + kthread_complete_and_exit(&common->thread_notifier, 0); } diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c index 3f788794571a..f480d54f308c 100644 --- a/drivers/vdpa/alibaba/eni_vdpa.c +++ b/drivers/vdpa/alibaba/eni_vdpa.c @@ -58,7 +58,7 @@ static struct virtio_pci_legacy_device *vdpa_to_ldev(struct vdpa_device *vdpa) return &eni_vdpa->ldev; } -static u64 eni_vdpa_get_features(struct vdpa_device *vdpa) +static u64 eni_vdpa_get_device_features(struct vdpa_device *vdpa) { struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); u64 features = vp_legacy_get_features(ldev); @@ -69,7 +69,7 @@ static u64 eni_vdpa_get_features(struct vdpa_device *vdpa) return features; } -static int eni_vdpa_set_features(struct vdpa_device *vdpa, u64 features) +static int eni_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features) { struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); @@ -84,6 +84,13 @@ static int eni_vdpa_set_features(struct vdpa_device *vdpa, u64 features) return 0; } +static u64 eni_vdpa_get_driver_features(struct vdpa_device *vdpa) +{ + struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); + + return vp_legacy_get_driver_features(ldev); +} + static u8 eni_vdpa_get_status(struct vdpa_device *vdpa) { struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); @@ -401,8 +408,9 @@ static void eni_vdpa_set_config_cb(struct vdpa_device *vdpa, } static const struct vdpa_config_ops eni_vdpa_ops = { - .get_features = eni_vdpa_get_features, - .set_features = eni_vdpa_set_features, + .get_device_features = eni_vdpa_get_device_features, + .set_driver_features = eni_vdpa_set_driver_features, + .get_driver_features = eni_vdpa_get_driver_features, .get_status = eni_vdpa_get_status, .set_status = eni_vdpa_set_status, .reset = eni_vdpa_reset, @@ -450,11 +458,6 @@ static u16 eni_vdpa_get_num_queues(struct eni_vdpa *eni_vdpa) return num; } -static void eni_vdpa_free_irq_vectors(void *data) -{ - pci_free_irq_vectors(data); -} - static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; @@ -488,13 +491,6 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) eni_vdpa->vdpa.dma_dev = &pdev->dev; eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa); - ret = devm_add_action_or_reset(dev, eni_vdpa_free_irq_vectors, pdev); - if (ret) { - ENI_ERR(pdev, - "failed for adding devres for freeing irq vectors\n"); - goto err; - } - eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues, sizeof(*eni_vdpa->vring), GFP_KERNEL); diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index 2808f1ba9f7b..7d41dfe48ade 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -143,8 +143,8 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev) IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr); break; case VIRTIO_PCI_CAP_DEVICE_CFG: - hw->net_cfg = get_cap_addr(hw, &cap); - IFCVF_DBG(pdev, "hw->net_cfg = %p\n", hw->net_cfg); + hw->dev_cfg = get_cap_addr(hw, &cap); + IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg); break; } @@ -153,7 +153,7 @@ next: } if (hw->common_cfg == NULL || hw->notify_base == NULL || - hw->isr == NULL || hw->net_cfg == NULL) { + hw->isr == NULL || hw->dev_cfg == NULL) { IFCVF_ERR(pdev, "Incomplete PCI capabilities\n"); return -EIO; } @@ -174,7 +174,7 @@ next: IFCVF_DBG(pdev, "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n", hw->common_cfg, hw->notify_base, hw->isr, - hw->net_cfg, hw->notify_off_multiplier); + hw->dev_cfg, hw->notify_off_multiplier); return 0; } @@ -242,33 +242,54 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features) return 0; } -void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset, +u32 ifcvf_get_config_size(struct ifcvf_hw *hw) +{ + struct ifcvf_adapter *adapter; + u32 config_size; + + adapter = vf_to_adapter(hw); + switch (hw->dev_type) { + case VIRTIO_ID_NET: + config_size = sizeof(struct virtio_net_config); + break; + case VIRTIO_ID_BLOCK: + config_size = sizeof(struct virtio_blk_config); + break; + default: + config_size = 0; + IFCVF_ERR(adapter->pdev, "VIRTIO ID %u not supported\n", hw->dev_type); + } + + return config_size; +} + +void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset, void *dst, int length) { u8 old_gen, new_gen, *p; int i; - WARN_ON(offset + length > sizeof(struct virtio_net_config)); + WARN_ON(offset + length > hw->config_size); do { old_gen = ifc_ioread8(&hw->common_cfg->config_generation); p = dst; for (i = 0; i < length; i++) - *p++ = ifc_ioread8(hw->net_cfg + offset + i); + *p++ = ifc_ioread8(hw->dev_cfg + offset + i); new_gen = ifc_ioread8(&hw->common_cfg->config_generation); } while (old_gen != new_gen); } -void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset, +void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset, const void *src, int length) { const u8 *p; int i; p = src; - WARN_ON(offset + length > sizeof(struct virtio_net_config)); + WARN_ON(offset + length > hw->config_size); for (i = 0; i < length; i++) - ifc_iowrite8(*p++, hw->net_cfg + offset + i); + ifc_iowrite8(*p++, hw->dev_cfg + offset + i); } static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features) diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index 09918af3ecf8..c486873f370a 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -71,12 +71,14 @@ struct ifcvf_hw { u64 hw_features; u32 dev_type; struct virtio_pci_common_cfg __iomem *common_cfg; - void __iomem *net_cfg; + void __iomem *dev_cfg; struct vring_info vring[IFCVF_MAX_QUEUES]; void __iomem * const *base; char config_msix_name[256]; struct vdpa_callback config_cb; unsigned int config_irq; + /* virtio-net or virtio-blk device config size */ + u32 config_size; }; struct ifcvf_adapter { @@ -105,9 +107,9 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev); int ifcvf_start_hw(struct ifcvf_hw *hw); void ifcvf_stop_hw(struct ifcvf_hw *hw); void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid); -void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset, +void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset, void *dst, int length); -void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset, +void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset, const void *src, int length); u8 ifcvf_get_status(struct ifcvf_hw *hw); void ifcvf_set_status(struct ifcvf_hw *hw, u8 status); @@ -120,4 +122,5 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid); int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num); struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw); int ifcvf_probed_virtio_net(struct ifcvf_hw *hw); +u32 ifcvf_get_config_size(struct ifcvf_hw *hw); #endif /* _IFCVF_H_ */ diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index 6dc75ca70b37..d1a6b5ab543c 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -169,7 +169,7 @@ static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev) return &adapter->vf; } -static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev) +static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev) { struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); @@ -187,7 +187,7 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev) return features; } -static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features) +static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features) { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); int ret; @@ -201,6 +201,13 @@ static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features) return 0; } +static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + return vf->req_features; +} + static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev) { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); @@ -366,24 +373,9 @@ static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev) { - struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); - struct pci_dev *pdev = adapter->pdev; - size_t size; - - switch (vf->dev_type) { - case VIRTIO_ID_NET: - size = sizeof(struct virtio_net_config); - break; - case VIRTIO_ID_BLOCK: - size = sizeof(struct virtio_blk_config); - break; - default: - size = 0; - IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type); - } - return size; + return vf->config_size; } static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev, @@ -392,8 +384,7 @@ static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev, { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); - WARN_ON(offset + len > sizeof(struct virtio_net_config)); - ifcvf_read_net_config(vf, offset, buf, len); + ifcvf_read_dev_config(vf, offset, buf, len); } static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev, @@ -402,8 +393,7 @@ static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev, { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); - WARN_ON(offset + len > sizeof(struct virtio_net_config)); - ifcvf_write_net_config(vf, offset, buf, len); + ifcvf_write_dev_config(vf, offset, buf, len); } static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, @@ -443,8 +433,9 @@ static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_devic * implemented set_map()/dma_map()/dma_unmap() */ static const struct vdpa_config_ops ifc_vdpa_ops = { - .get_features = ifcvf_vdpa_get_features, - .set_features = ifcvf_vdpa_set_features, + .get_device_features = ifcvf_vdpa_get_device_features, + .set_driver_features = ifcvf_vdpa_set_driver_features, + .get_driver_features = ifcvf_vdpa_get_driver_features, .get_status = ifcvf_vdpa_get_status, .set_status = ifcvf_vdpa_set_status, .reset = ifcvf_vdpa_reset, @@ -542,6 +533,7 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, vf->vring[i].irq = -EINVAL; vf->hw_features = ifcvf_get_hw_features(vf); + vf->config_size = ifcvf_get_config_size(vf); adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev; ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index cf59f7e17c6d..f648f1c54a0f 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -131,25 +131,24 @@ struct mlx5_vdpa_virtqueue { struct mlx5_vq_restore_info ri; }; -/* We will remove this limitation once mlx5_vdpa_alloc_resources() - * provides for driver space allocation - */ -#define MLX5_MAX_SUPPORTED_VQS 16 - static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx) { - if (unlikely(idx > mvdev->max_idx)) - return false; + if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) { + if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) + return idx < 2; + else + return idx < 3; + } - return true; + return idx <= mvdev->max_idx; } struct mlx5_vdpa_net { struct mlx5_vdpa_dev mvdev; struct mlx5_vdpa_net_resources res; struct virtio_net_config config; - struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS]; - struct vdpa_callback event_cbs[MLX5_MAX_SUPPORTED_VQS + 1]; + struct mlx5_vdpa_virtqueue *vqs; + struct vdpa_callback *event_cbs; /* Serialize vq resources creation and destruction. This is required * since memory map might change and we need to destroy and create @@ -876,8 +875,6 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size); MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); - if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type)) - MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1); err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); if (err) @@ -1218,7 +1215,7 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev) { int i; - for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++) + for (i = 0; i < ndev->mvdev.max_vqs; i++) suspend_vq(ndev, &ndev->vqs[i]); } @@ -1244,8 +1241,14 @@ static int create_rqt(struct mlx5_vdpa_net *ndev) void *in; int i, j; int err; + int num; - max_rqt = min_t(int, MLX5_MAX_SUPPORTED_VQS / 2, + if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) + num = 1; + else + num = ndev->cur_num_vqs / 2; + + max_rqt = min_t(int, roundup_pow_of_two(num), 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size)); if (max_rqt < 1) return -EOPNOTSUPP; @@ -1261,17 +1264,10 @@ static int create_rqt(struct mlx5_vdpa_net *ndev) MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q); MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt); list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]); - for (i = 0, j = 0; j < max_rqt; j++) { - if (!ndev->vqs[j].initialized) - continue; - - if (!vq_is_tx(ndev->vqs[j].index)) { - list[i] = cpu_to_be32(ndev->vqs[j].virtq_id); - i++; - } - } - MLX5_SET(rqtc, rqtc, rqt_actual_size, i); + for (i = 0, j = 0; i < max_rqt; i++, j += 2) + list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id); + MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt); err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); kfree(in); if (err) @@ -1292,7 +1288,7 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num) int i, j; int err; - max_rqt = min_t(int, ndev->cur_num_vqs / 2, + max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2), 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size)); if (max_rqt < 1) return -EOPNOTSUPP; @@ -1308,16 +1304,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num) MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q); list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]); - for (i = 0, j = 0; j < num; j++) { - if (!ndev->vqs[j].initialized) - continue; + for (i = 0, j = 0; i < max_rqt; i++, j += 2) + list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id); - if (!vq_is_tx(ndev->vqs[j].index)) { - list[i] = cpu_to_be32(ndev->vqs[j].virtq_id); - i++; - } - } - MLX5_SET(rqtc, rqtc, rqt_actual_size, i); + MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt); err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); kfree(in); if (err) @@ -1554,9 +1544,11 @@ static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps) return 0; clean_added: - for (--i; i >= cur_qps; --i) + for (--i; i >= 2 * cur_qps; --i) teardown_vq(ndev, &ndev->vqs[i]); + ndev->cur_num_vqs = 2 * cur_qps; + return err; } @@ -1581,9 +1573,6 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd) break; } - if (newqps & (newqps - 1)) - break; - if (!change_num_qps(mvdev, newqps)) status = VIRTIO_NET_OK; @@ -1880,21 +1869,29 @@ static u64 mlx_to_vritio_features(u16 dev_features) return result; } -static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev) +static u64 get_supported_features(struct mlx5_core_dev *mdev) { - struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); - struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + u64 mlx_vdpa_features = 0; u16 dev_features; - dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask); - ndev->mvdev.mlx_features |= mlx_to_vritio_features(dev_features); - if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0)) - ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1); - ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM); - ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ); - ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR); - ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MQ); - ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_STATUS); + dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mdev, device_features_bits_mask); + mlx_vdpa_features |= mlx_to_vritio_features(dev_features); + if (MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_version_1_0)) + mlx_vdpa_features |= BIT_ULL(VIRTIO_F_VERSION_1); + mlx_vdpa_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM); + mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ); + mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR); + mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MQ); + mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_STATUS); + mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MTU); + + return mlx_vdpa_features; +} + +static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); print_features(mvdev, ndev->mvdev.mlx_features, false); return ndev->mvdev.mlx_features; @@ -1972,7 +1969,7 @@ static void update_cvq_info(struct mlx5_vdpa_dev *mvdev) } } -static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features) +static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features) { struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); @@ -1985,6 +1982,11 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features) return err; ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; + if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)) + ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs); + else + ndev->cur_num_vqs = 2; + update_cvq_info(mvdev); return err; } @@ -2235,7 +2237,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) clear_vqs_ready(ndev); mlx5_vdpa_destroy_mr(&ndev->mvdev); ndev->mvdev.status = 0; - memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs)); + ndev->cur_num_vqs = 0; + memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); ndev->mvdev.actual_features = 0; ++mvdev->generation; if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { @@ -2308,6 +2311,8 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev) } mlx5_vdpa_free_resources(&ndev->mvdev); mutex_destroy(&ndev->reslock); + kfree(ndev->event_cbs); + kfree(ndev->vqs); } static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx) @@ -2339,6 +2344,13 @@ static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx) return -EOPNOTSUPP; } +static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + + return mvdev->actual_features; +} + static const struct vdpa_config_ops mlx5_vdpa_ops = { .set_vq_address = mlx5_vdpa_set_vq_address, .set_vq_num = mlx5_vdpa_set_vq_num, @@ -2351,8 +2363,9 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = { .get_vq_notification = mlx5_get_vq_notification, .get_vq_irq = mlx5_get_vq_irq, .get_vq_align = mlx5_vdpa_get_vq_align, - .get_features = mlx5_vdpa_get_features, - .set_features = mlx5_vdpa_set_features, + .get_device_features = mlx5_vdpa_get_device_features, + .set_driver_features = mlx5_vdpa_set_driver_features, + .get_driver_features = mlx5_vdpa_get_driver_features, .set_config_cb = mlx5_vdpa_set_config_cb, .get_vq_num_max = mlx5_vdpa_get_vq_num_max, .get_device_id = mlx5_vdpa_get_device_id, @@ -2545,18 +2558,39 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, return -EOPNOTSUPP; } - /* we save one virtqueue for control virtqueue should we require it */ max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues); - max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); + if (max_vqs < 2) { + dev_warn(mdev->device, + "%d virtqueues are supported. At least 2 are required\n", + max_vqs); + return -EAGAIN; + } + + if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) { + if (add_config->net.max_vq_pairs > max_vqs / 2) + return -EINVAL; + max_vqs = min_t(u32, max_vqs, 2 * add_config->net.max_vq_pairs); + } else { + max_vqs = 2; + } ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, name, false); if (IS_ERR(ndev)) return PTR_ERR(ndev); + ndev->mvdev.mlx_features = mgtdev->mgtdev.supported_features; ndev->mvdev.max_vqs = max_vqs; mvdev = &ndev->mvdev; mvdev->mdev = mdev; + + ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); + ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); + if (!ndev->vqs || !ndev->event_cbs) { + err = -ENOMEM; + goto err_alloc; + } + init_mvqs(ndev); mutex_init(&ndev->reslock); config = &ndev->config; @@ -2612,9 +2646,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, ndev->nb.notifier_call = event_handler; mlx5_notifier_register(mdev, &ndev->nb); - ndev->cur_num_vqs = 2 * mlx5_vdpa_max_qps(max_vqs); mvdev->vdev.mdev = &mgtdev->mgtdev; - err = _vdpa_register_device(&mvdev->vdev, ndev->cur_num_vqs + 1); + err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1); if (err) goto err_reg; @@ -2634,6 +2667,7 @@ err_mpfs: mlx5_mpfs_del_mac(pfmdev, config->mac); err_mtu: mutex_destroy(&ndev->reslock); +err_alloc: put_device(&mvdev->vdev.dev); return err; } @@ -2676,7 +2710,11 @@ static int mlx5v_probe(struct auxiliary_device *adev, mgtdev->mgtdev.ops = &mdev_ops; mgtdev->mgtdev.device = mdev->device; mgtdev->mgtdev.id_table = id_table; - mgtdev->mgtdev.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR); + mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | + BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP); + mgtdev->mgtdev.max_supported_vqs = + MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1; + mgtdev->mgtdev.supported_features = get_supported_features(mdev); mgtdev->madev = madev; err = vdpa_mgmtdev_register(&mgtdev->mgtdev); diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index 09bbe53c3ac4..9846c9de4bfa 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -21,6 +21,14 @@ static LIST_HEAD(mdev_head); static DEFINE_MUTEX(vdpa_dev_mutex); static DEFINE_IDA(vdpa_index_ida); +void vdpa_set_status(struct vdpa_device *vdev, u8 status) +{ + mutex_lock(&vdev->cf_mutex); + vdev->config->set_status(vdev, status); + mutex_unlock(&vdev->cf_mutex); +} +EXPORT_SYMBOL(vdpa_set_status); + static struct genl_family vdpa_nl_family; static int vdpa_dev_probe(struct device *d) @@ -52,8 +60,81 @@ static void vdpa_dev_remove(struct device *d) drv->remove(vdev); } +static int vdpa_dev_match(struct device *dev, struct device_driver *drv) +{ + struct vdpa_device *vdev = dev_to_vdpa(dev); + + /* Check override first, and if set, only use the named driver */ + if (vdev->driver_override) + return strcmp(vdev->driver_override, drv->name) == 0; + + /* Currently devices must be supported by all vDPA bus drivers */ + return 1; +} + +static ssize_t driver_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct vdpa_device *vdev = dev_to_vdpa(dev); + const char *driver_override, *old; + char *cp; + + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) + return -EINVAL; + + driver_override = kstrndup(buf, count, GFP_KERNEL); + if (!driver_override) + return -ENOMEM; + + cp = strchr(driver_override, '\n'); + if (cp) + *cp = '\0'; + + device_lock(dev); + old = vdev->driver_override; + if (strlen(driver_override)) { + vdev->driver_override = driver_override; + } else { + kfree(driver_override); + vdev->driver_override = NULL; + } + device_unlock(dev); + + kfree(old); + + return count; +} + +static ssize_t driver_override_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct vdpa_device *vdev = dev_to_vdpa(dev); + ssize_t len; + + device_lock(dev); + len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override); + device_unlock(dev); + + return len; +} +static DEVICE_ATTR_RW(driver_override); + +static struct attribute *vdpa_dev_attrs[] = { + &dev_attr_driver_override.attr, + NULL, +}; + +static const struct attribute_group vdpa_dev_group = { + .attrs = vdpa_dev_attrs, +}; +__ATTRIBUTE_GROUPS(vdpa_dev); + static struct bus_type vdpa_bus = { .name = "vdpa", + .dev_groups = vdpa_dev_groups, + .match = vdpa_dev_match, .probe = vdpa_dev_probe, .remove = vdpa_dev_remove, }; @@ -68,6 +149,7 @@ static void vdpa_release_dev(struct device *d) ida_simple_remove(&vdpa_index_ida, vdev->index); mutex_destroy(&vdev->cf_mutex); + kfree(vdev->driver_override); kfree(vdev); } @@ -300,6 +382,21 @@ void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev) } EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister); +static void vdpa_get_config_unlocked(struct vdpa_device *vdev, + unsigned int offset, + void *buf, unsigned int len) +{ + const struct vdpa_config_ops *ops = vdev->config; + + /* + * Config accesses aren't supposed to trigger before features are set. + * If it does happen we assume a legacy guest. + */ + if (!vdev->features_valid) + vdpa_set_features(vdev, 0, true); + ops->get_config(vdev, offset, buf, len); +} + /** * vdpa_get_config - Get one or more device configuration fields. * @vdev: vdpa device to operate on @@ -310,16 +407,8 @@ EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister); void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf, unsigned int len) { - const struct vdpa_config_ops *ops = vdev->config; - mutex_lock(&vdev->cf_mutex); - /* - * Config accesses aren't supposed to trigger before features are set. - * If it does happen we assume a legacy guest. - */ - if (!vdev->features_valid) - vdpa_set_features(vdev, 0); - ops->get_config(vdev, offset, buf, len); + vdpa_get_config_unlocked(vdev, offset, buf, len); mutex_unlock(&vdev->cf_mutex); } EXPORT_SYMBOL_GPL(vdpa_get_config); @@ -414,6 +503,16 @@ static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *m err = -EMSGSIZE; goto msg_err; } + if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, + mdev->max_supported_vqs)) { + err = -EMSGSIZE; + goto msg_err; + } + if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES, + mdev->supported_features, VDPA_ATTR_PAD)) { + err = -EMSGSIZE; + goto msg_err; + } genlmsg_end(msg, hdr); return 0; @@ -480,8 +579,9 @@ out: return msg->len; } -#define VDPA_DEV_NET_ATTRS_MASK ((1 << VDPA_ATTR_DEV_NET_CFG_MACADDR) | \ - (1 << VDPA_ATTR_DEV_NET_CFG_MTU)) +#define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \ + BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \ + BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info) { @@ -500,12 +600,22 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) { macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]); memcpy(config.net.mac, macaddr, sizeof(config.net.mac)); - config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR); + config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR); } if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) { config.net.mtu = nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]); - config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MTU); + config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU); + } + if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) { + config.net.max_vq_pairs = + nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]); + if (!config.net.max_vq_pairs) { + NL_SET_ERR_MSG_MOD(info->extack, + "At least one pair of VQs is required"); + return -EINVAL; + } + config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP); } /* Skip checking capability if user didn't prefer to configure any @@ -707,7 +817,7 @@ static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev, { u16 val_u16; - if ((features & (1ULL << VIRTIO_NET_F_MQ)) == 0) + if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0) return 0; val_u16 = le16_to_cpu(config->max_virtqueue_pairs); @@ -720,7 +830,7 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms u64 features; u16 val_u16; - vdpa_get_config(vdev, 0, &config, sizeof(config)); + vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config)); if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac), config.mac)) @@ -734,7 +844,10 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16)) return -EMSGSIZE; - features = vdev->config->get_features(vdev); + features = vdev->config->get_driver_features(vdev); + if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features, + VDPA_ATTR_PAD)) + return -EMSGSIZE; return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config); } @@ -745,12 +858,23 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, { u32 device_id; void *hdr; + u8 status; int err; + mutex_lock(&vdev->cf_mutex); + status = vdev->config->get_status(vdev); + if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) { + NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed"); + err = -EAGAIN; + goto out; + } + hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_CONFIG_GET); - if (!hdr) - return -EMSGSIZE; + if (!hdr) { + err = -EMSGSIZE; + goto out; + } if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) { err = -EMSGSIZE; @@ -774,11 +898,14 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, if (err) goto msg_err; + mutex_unlock(&vdev->cf_mutex); genlmsg_end(msg, hdr); return 0; msg_err: genlmsg_cancel(msg, hdr); +out: + mutex_unlock(&vdev->cf_mutex); return err; } diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 41b0cd17fcba..ddbe142af09a 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -399,14 +399,14 @@ static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa) return VDPASIM_QUEUE_ALIGN; } -static u64 vdpasim_get_features(struct vdpa_device *vdpa) +static u64 vdpasim_get_device_features(struct vdpa_device *vdpa) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); return vdpasim->dev_attr.supported_features; } -static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features) +static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); @@ -419,6 +419,13 @@ static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features) return 0; } +static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + + return vdpasim->features; +} + static void vdpasim_set_config_cb(struct vdpa_device *vdpa, struct vdpa_callback *cb) { @@ -613,8 +620,9 @@ static const struct vdpa_config_ops vdpasim_config_ops = { .set_vq_state = vdpasim_set_vq_state, .get_vq_state = vdpasim_get_vq_state, .get_vq_align = vdpasim_get_vq_align, - .get_features = vdpasim_get_features, - .set_features = vdpasim_set_features, + .get_device_features = vdpasim_get_device_features, + .set_driver_features = vdpasim_set_driver_features, + .get_driver_features = vdpasim_get_driver_features, .set_config_cb = vdpasim_set_config_cb, .get_vq_num_max = vdpasim_get_vq_num_max, .get_device_id = vdpasim_get_device_id, @@ -642,8 +650,9 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = { .set_vq_state = vdpasim_set_vq_state, .get_vq_state = vdpasim_get_vq_state, .get_vq_align = vdpasim_get_vq_align, - .get_features = vdpasim_get_features, - .set_features = vdpasim_set_features, + .get_device_features = vdpasim_get_device_features, + .set_driver_features = vdpasim_set_driver_features, + .get_driver_features = vdpasim_get_driver_features, .set_config_cb = vdpasim_set_config_cb, .get_vq_num_max = vdpasim_get_vq_num_max, .get_device_id = vdpasim_get_device_id, diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c index 76dd24abc791..d5324f6fd8c7 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c @@ -191,6 +191,8 @@ static struct vdpa_mgmt_dev mgmt_dev = { .ops = &vdpasim_net_mgmtdev_ops, .config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR | 1 << VDPA_ATTR_DEV_NET_CFG_MTU), + .max_supported_vqs = VDPASIM_NET_VQ_NUM, + .supported_features = VDPASIM_NET_FEATURES, }; static int __init vdpasim_net_init(void) diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index eddcb64a910a..f85d1a08ed87 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -573,14 +573,14 @@ static u32 vduse_vdpa_get_vq_align(struct vdpa_device *vdpa) return dev->vq_align; } -static u64 vduse_vdpa_get_features(struct vdpa_device *vdpa) +static u64 vduse_vdpa_get_device_features(struct vdpa_device *vdpa) { struct vduse_dev *dev = vdpa_to_vduse(vdpa); return dev->device_features; } -static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features) +static int vduse_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features) { struct vduse_dev *dev = vdpa_to_vduse(vdpa); @@ -588,6 +588,13 @@ static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features) return 0; } +static u64 vduse_vdpa_get_driver_features(struct vdpa_device *vdpa) +{ + struct vduse_dev *dev = vdpa_to_vduse(vdpa); + + return dev->driver_features; +} + static void vduse_vdpa_set_config_cb(struct vdpa_device *vdpa, struct vdpa_callback *cb) { @@ -721,8 +728,9 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = { .set_vq_state = vduse_vdpa_set_vq_state, .get_vq_state = vduse_vdpa_get_vq_state, .get_vq_align = vduse_vdpa_get_vq_align, - .get_features = vduse_vdpa_get_features, - .set_features = vduse_vdpa_set_features, + .get_device_features = vduse_vdpa_get_device_features, + .set_driver_features = vduse_vdpa_set_driver_features, + .get_driver_features = vduse_vdpa_get_driver_features, .set_config_cb = vduse_vdpa_set_config_cb, .get_vq_num_max = vduse_vdpa_get_vq_num_max, .get_device_id = vduse_vdpa_get_device_id, @@ -1357,7 +1365,6 @@ err_domain: err_str: vduse_dev_destroy(dev); err: - kvfree(config_buf); return ret; } @@ -1408,6 +1415,8 @@ static long vduse_ioctl(struct file *file, unsigned int cmd, } config.name[VDUSE_NAME_MAX - 1] = '\0'; ret = vduse_create_dev(&config, buf, control->api_version); + if (ret) + kvfree(buf); break; } case VDUSE_DESTROY_DEV: { diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index e3ff7875e123..a57e381e830b 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -53,14 +53,14 @@ static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa) return &vp_vdpa->mdev; } -static u64 vp_vdpa_get_features(struct vdpa_device *vdpa) +static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa) { struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); return vp_modern_get_features(mdev); } -static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features) +static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features) { struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); @@ -69,6 +69,13 @@ static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features) return 0; } +static u64 vp_vdpa_get_driver_features(struct vdpa_device *vdpa) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + return vp_modern_get_driver_features(mdev); +} + static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) { struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); @@ -415,8 +422,9 @@ vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid) } static const struct vdpa_config_ops vp_vdpa_ops = { - .get_features = vp_vdpa_get_features, - .set_features = vp_vdpa_set_features, + .get_device_features = vp_vdpa_get_device_features, + .set_driver_features = vp_vdpa_set_driver_features, + .get_driver_features = vp_vdpa_get_driver_features, .get_status = vp_vdpa_get_status, .set_status = vp_vdpa_set_status, .reset = vp_vdpa_reset, diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c index 77e584093a23..7b428eac3d3e 100644 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c @@ -67,7 +67,7 @@ static int vfio_set_trigger(struct vfio_fsl_mc_device *vdev, int hwirq; int ret; - hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq; + hwirq = vdev->mc_dev->irqs[index]->virq; if (irq->trigger) { free_irq(hwirq, irq); kfree(irq->name); @@ -137,7 +137,7 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev, return vfio_set_trigger(vdev, index, fd); } - hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq; + hwirq = vdev->mc_dev->irqs[index]->virq; irq = &vdev->mc_irqs[index]; diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c index 362f91ec8845..352c725ccf18 100644 --- a/drivers/vfio/pci/vfio_pci_igd.c +++ b/drivers/vfio/pci/vfio_pci_igd.c @@ -309,13 +309,14 @@ static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev, if ((pos & 3) && size > 2) { u16 val; + __le16 lval; ret = pci_user_read_config_word(pdev, pos, &val); if (ret) return ret; - val = cpu_to_le16(val); - if (copy_to_user(buf + count - size, &val, 2)) + lval = cpu_to_le16(val); + if (copy_to_user(buf + count - size, &lval, 2)) return -EFAULT; pos += 2; @@ -324,13 +325,14 @@ static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev, while (size > 3) { u32 val; + __le32 lval; ret = pci_user_read_config_dword(pdev, pos, &val); if (ret) return ret; - val = cpu_to_le32(val); - if (copy_to_user(buf + count - size, &val, 4)) + lval = cpu_to_le32(val); + if (copy_to_user(buf + count - size, &lval, 4)) return -EFAULT; pos += 4; @@ -339,13 +341,14 @@ static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev, while (size >= 2) { u16 val; + __le16 lval; ret = pci_user_read_config_word(pdev, pos, &val); if (ret) return ret; - val = cpu_to_le16(val); - if (copy_to_user(buf + count - size, &val, 2)) + lval = cpu_to_le16(val); + if (copy_to_user(buf + count - size, &lval, 2)) return -EFAULT; pos += 2; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index f17490ab238f..9394aa9444c1 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -256,7 +256,7 @@ static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize) static void vfio_dma_bitmap_free(struct vfio_dma *dma) { - kfree(dma->bitmap); + kvfree(dma->bitmap); dma->bitmap = NULL; } diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index a09dedc79f68..05740cba1cd8 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -166,6 +166,7 @@ static int vhost_test_release(struct inode *inode, struct file *f) /* We do an extra flush before freeing memory, * since jobs can re-queue themselves. */ vhost_test_flush(n); + kfree(n->dev.vqs); kfree(n); return 0; } diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index e3c4f059b21a..851539807bc9 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -170,7 +170,7 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp) * Userspace shouldn't remove status bits unless reset the * status to 0. */ - if (status != 0 && (ops->get_status(vdpa) & ~status) != 0) + if (status != 0 && (status_old & ~status) != 0) return -EINVAL; if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK)) @@ -178,11 +178,11 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp) vhost_vdpa_unsetup_vq_irq(v, i); if (status == 0) { - ret = ops->reset(vdpa); + ret = vdpa_reset(vdpa); if (ret) return ret; } else - ops->set_status(vdpa, status); + vdpa_set_status(vdpa, status); if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) for (i = 0; i < nvqs; i++) @@ -195,7 +195,7 @@ static int vhost_vdpa_config_validate(struct vhost_vdpa *v, struct vhost_vdpa_config *c) { struct vdpa_device *vdpa = v->vdpa; - long size = vdpa->config->get_config_size(vdpa); + size_t size = vdpa->config->get_config_size(vdpa); if (c->len == 0 || c->off > size) return -EINVAL; @@ -262,7 +262,7 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep) const struct vdpa_config_ops *ops = vdpa->config; u64 features; - features = ops->get_features(vdpa); + features = ops->get_device_features(vdpa); if (copy_to_user(featurep, &features, sizeof(features))) return -EFAULT; @@ -286,7 +286,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep) if (copy_from_user(&features, featurep, sizeof(features))) return -EFAULT; - if (vdpa_set_features(vdpa, features)) + if (vdpa_set_features(vdpa, features, false)) return -EINVAL; return 0; diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c index e2757ff1c23d..96e312a3eac7 100644 --- a/drivers/video/fbdev/vga16fb.c +++ b/drivers/video/fbdev/vga16fb.c @@ -184,6 +184,25 @@ static inline void setindex(int index) vga_io_w(VGA_GFX_I, index); } +/* Check if the video mode is supported by the driver */ +static inline int check_mode_supported(void) +{ + /* non-x86 architectures treat orig_video_isVGA as a boolean flag */ +#if defined(CONFIG_X86) + /* only EGA and VGA in 16 color graphic mode are supported */ + if (screen_info.orig_video_isVGA != VIDEO_TYPE_EGAC && + screen_info.orig_video_isVGA != VIDEO_TYPE_VGAC) + return -ENODEV; + + if (screen_info.orig_video_mode != 0x0D && /* 320x200/4 (EGA) */ + screen_info.orig_video_mode != 0x0E && /* 640x200/4 (EGA) */ + screen_info.orig_video_mode != 0x10 && /* 640x350/4 (EGA) */ + screen_info.orig_video_mode != 0x12) /* 640x480/4 (VGA) */ + return -ENODEV; +#endif + return 0; +} + static void vga16fb_pan_var(struct fb_info *info, struct fb_var_screeninfo *var) { @@ -1422,6 +1441,11 @@ static int __init vga16fb_init(void) vga16fb_setup(option); #endif + + ret = check_mode_supported(); + if (ret) + return ret; + ret = platform_driver_register(&vga16fb_driver); if (!ret) { diff --git a/drivers/virt/nitro_enclaves/Kconfig b/drivers/virt/nitro_enclaves/Kconfig index f53740b941c0..2d3d98158121 100644 --- a/drivers/virt/nitro_enclaves/Kconfig +++ b/drivers/virt/nitro_enclaves/Kconfig @@ -14,3 +14,12 @@ config NITRO_ENCLAVES To compile this driver as a module, choose M here. The module will be called nitro_enclaves. + +config NITRO_ENCLAVES_MISC_DEV_TEST + bool "Tests for the misc device functionality of the Nitro Enclaves" + depends on NITRO_ENCLAVES && KUNIT=y + help + Enable KUnit tests for the misc device functionality of the Nitro + Enclaves. Select this option only if you will boot the kernel for + the purpose of running unit tests (e.g. under UML or qemu). If + unsure, say N. diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c index 6894ccb868a6..20c881b6a4b6 100644 --- a/drivers/virt/nitro_enclaves/ne_misc_dev.c +++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c @@ -24,6 +24,7 @@ #include <linux/nitro_enclaves.h> #include <linux/pci.h> #include <linux/poll.h> +#include <linux/range.h> #include <linux/slab.h> #include <linux/types.h> #include <uapi/linux/vm_sockets.h> @@ -126,6 +127,16 @@ struct ne_cpu_pool { static struct ne_cpu_pool ne_cpu_pool; /** + * struct ne_phys_contig_mem_regions - Contiguous physical memory regions. + * @num: The number of regions that currently has. + * @regions: The array of physical memory regions. + */ +struct ne_phys_contig_mem_regions { + unsigned long num; + struct range *regions; +}; + +/** * ne_check_enclaves_created() - Verify if at least one enclave has been created. * @void: No parameters provided. * @@ -825,6 +836,72 @@ static int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave, } /** + * ne_sanity_check_phys_mem_region() - Sanity check the start address and the size + * of a physical memory region. + * @phys_mem_region_paddr : Physical start address of the region to be sanity checked. + * @phys_mem_region_size : Length of the region to be sanity checked. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_sanity_check_phys_mem_region(u64 phys_mem_region_paddr, + u64 phys_mem_region_size) +{ + if (phys_mem_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Physical mem region size is not multiple of 2 MiB\n"); + + return -EINVAL; + } + + if (!IS_ALIGNED(phys_mem_region_paddr, NE_MIN_MEM_REGION_SIZE)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Physical mem region address is not 2 MiB aligned\n"); + + return -EINVAL; + } + + return 0; +} + +/** + * ne_merge_phys_contig_memory_regions() - Add a memory region and merge the adjacent + * regions if they are physically contiguous. + * @phys_contig_regions : Private data associated with the contiguous physical memory regions. + * @page_paddr : Physical start address of the region to be added. + * @page_size : Length of the region to be added. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int +ne_merge_phys_contig_memory_regions(struct ne_phys_contig_mem_regions *phys_contig_regions, + u64 page_paddr, u64 page_size) +{ + unsigned long num = phys_contig_regions->num; + int rc = 0; + + rc = ne_sanity_check_phys_mem_region(page_paddr, page_size); + if (rc < 0) + return rc; + + /* Physically contiguous, just merge */ + if (num && (phys_contig_regions->regions[num - 1].end + 1) == page_paddr) { + phys_contig_regions->regions[num - 1].end += page_size; + } else { + phys_contig_regions->regions[num].start = page_paddr; + phys_contig_regions->regions[num].end = page_paddr + page_size - 1; + phys_contig_regions->num++; + } + + return 0; +} + +/** * ne_set_user_memory_region_ioctl() - Add user space memory region to the slot * associated with the current enclave. * @ne_enclave : Private data associated with the current enclave. @@ -843,9 +920,8 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave, unsigned long max_nr_pages = 0; unsigned long memory_size = 0; struct ne_mem_region *ne_mem_region = NULL; - unsigned long nr_phys_contig_mem_regions = 0; struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev; - struct page **phys_contig_mem_regions = NULL; + struct ne_phys_contig_mem_regions phys_contig_mem_regions = {}; int rc = -EINVAL; rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region); @@ -866,9 +942,10 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave, goto free_mem_region; } - phys_contig_mem_regions = kcalloc(max_nr_pages, sizeof(*phys_contig_mem_regions), - GFP_KERNEL); - if (!phys_contig_mem_regions) { + phys_contig_mem_regions.regions = kcalloc(max_nr_pages, + sizeof(*phys_contig_mem_regions.regions), + GFP_KERNEL); + if (!phys_contig_mem_regions.regions) { rc = -ENOMEM; goto free_mem_region; @@ -902,26 +979,18 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave, if (rc < 0) goto put_pages; - /* - * TODO: Update once handled non-contiguous memory regions - * received from user space or contiguous physical memory regions - * larger than 2 MiB e.g. 8 MiB. - */ - phys_contig_mem_regions[i] = ne_mem_region->pages[i]; + rc = ne_merge_phys_contig_memory_regions(&phys_contig_mem_regions, + page_to_phys(ne_mem_region->pages[i]), + page_size(ne_mem_region->pages[i])); + if (rc < 0) + goto put_pages; memory_size += page_size(ne_mem_region->pages[i]); ne_mem_region->nr_pages++; } while (memory_size < mem_region.memory_size); - /* - * TODO: Update once handled non-contiguous memory regions received - * from user space or contiguous physical memory regions larger than - * 2 MiB e.g. 8 MiB. - */ - nr_phys_contig_mem_regions = ne_mem_region->nr_pages; - - if ((ne_enclave->nr_mem_regions + nr_phys_contig_mem_regions) > + if ((ne_enclave->nr_mem_regions + phys_contig_mem_regions.num) > ne_enclave->max_mem_regions) { dev_err_ratelimited(ne_misc_dev.this_device, "Reached max memory regions %lld\n", @@ -932,27 +1001,13 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave, goto put_pages; } - for (i = 0; i < nr_phys_contig_mem_regions; i++) { - u64 phys_region_addr = page_to_phys(phys_contig_mem_regions[i]); - u64 phys_region_size = page_size(phys_contig_mem_regions[i]); - - if (phys_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) { - dev_err_ratelimited(ne_misc_dev.this_device, - "Physical mem region size is not multiple of 2 MiB\n"); - - rc = -EINVAL; - - goto put_pages; - } - - if (!IS_ALIGNED(phys_region_addr, NE_MIN_MEM_REGION_SIZE)) { - dev_err_ratelimited(ne_misc_dev.this_device, - "Physical mem region address is not 2 MiB aligned\n"); - - rc = -EINVAL; + for (i = 0; i < phys_contig_mem_regions.num; i++) { + u64 phys_region_addr = phys_contig_mem_regions.regions[i].start; + u64 phys_region_size = range_len(&phys_contig_mem_regions.regions[i]); + rc = ne_sanity_check_phys_mem_region(phys_region_addr, phys_region_size); + if (rc < 0) goto put_pages; - } } ne_mem_region->memory_size = mem_region.memory_size; @@ -960,13 +1015,13 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave, list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list); - for (i = 0; i < nr_phys_contig_mem_regions; i++) { + for (i = 0; i < phys_contig_mem_regions.num; i++) { struct ne_pci_dev_cmd_reply cmd_reply = {}; struct slot_add_mem_req slot_add_mem_req = {}; slot_add_mem_req.slot_uid = ne_enclave->slot_uid; - slot_add_mem_req.paddr = page_to_phys(phys_contig_mem_regions[i]); - slot_add_mem_req.size = page_size(phys_contig_mem_regions[i]); + slot_add_mem_req.paddr = phys_contig_mem_regions.regions[i].start; + slot_add_mem_req.size = range_len(&phys_contig_mem_regions.regions[i]); rc = ne_do_request(pdev, SLOT_ADD_MEM, &slot_add_mem_req, sizeof(slot_add_mem_req), @@ -975,7 +1030,7 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave, dev_err_ratelimited(ne_misc_dev.this_device, "Error in slot add mem [rc=%d]\n", rc); - kfree(phys_contig_mem_regions); + kfree(phys_contig_mem_regions.regions); /* * Exit here without put pages as memory regions may @@ -988,7 +1043,7 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave, ne_enclave->nr_mem_regions++; } - kfree(phys_contig_mem_regions); + kfree(phys_contig_mem_regions.regions); return 0; @@ -996,7 +1051,7 @@ put_pages: for (i = 0; i < ne_mem_region->nr_pages; i++) put_page(ne_mem_region->pages[i]); free_mem_region: - kfree(phys_contig_mem_regions); + kfree(phys_contig_mem_regions.regions); kfree(ne_mem_region->pages); kfree(ne_mem_region); @@ -1702,8 +1757,37 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; } +#if defined(CONFIG_NITRO_ENCLAVES_MISC_DEV_TEST) +#include "ne_misc_dev_test.c" + +static inline int ne_misc_dev_test_init(void) +{ + return __kunit_test_suites_init(ne_misc_dev_test_suites); +} + +static inline void ne_misc_dev_test_exit(void) +{ + __kunit_test_suites_exit(ne_misc_dev_test_suites); +} +#else +static inline int ne_misc_dev_test_init(void) +{ + return 0; +} + +static inline void ne_misc_dev_test_exit(void) +{ +} +#endif + static int __init ne_init(void) { + int rc = 0; + + rc = ne_misc_dev_test_init(); + if (rc < 0) + return rc; + mutex_init(&ne_cpu_pool.mutex); return pci_register_driver(&ne_pci_driver); @@ -1714,6 +1798,8 @@ static void __exit ne_exit(void) pci_unregister_driver(&ne_pci_driver); ne_teardown_cpu_pool(); + + ne_misc_dev_test_exit(); } module_init(ne_init); diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev_test.c b/drivers/virt/nitro_enclaves/ne_misc_dev_test.c new file mode 100644 index 000000000000..265797bed0ea --- /dev/null +++ b/drivers/virt/nitro_enclaves/ne_misc_dev_test.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <kunit/test.h> + +#define MAX_PHYS_REGIONS 16 +#define INVALID_VALUE (~0ull) + +struct ne_phys_regions_test { + u64 paddr; + u64 size; + int expect_rc; + unsigned long expect_num; + u64 expect_last_paddr; + u64 expect_last_size; +} phys_regions_test_cases[] = { + /* + * Add the region from 0x1000 to (0x1000 + 0x200000 - 1): + * Expected result: + * Failed, start address is not 2M-aligned + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 0 + * regions = {} + */ + {0x1000, 0x200000, -EINVAL, 0, INVALID_VALUE, INVALID_VALUE}, + + /* + * Add the region from 0x200000 to (0x200000 + 0x1000 - 1): + * Expected result: + * Failed, size is not 2M-aligned + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 0 + * regions = {} + */ + {0x200000, 0x1000, -EINVAL, 0, INVALID_VALUE, INVALID_VALUE}, + + /* + * Add the region from 0x200000 to (0x200000 + 0x200000 - 1): + * Expected result: + * Successful + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 1 + * regions = { + * {start=0x200000, end=0x3fffff}, // len=0x200000 + * } + */ + {0x200000, 0x200000, 0, 1, 0x200000, 0x200000}, + + /* + * Add the region from 0x0 to (0x0 + 0x200000 - 1): + * Expected result: + * Successful + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 2 + * regions = { + * {start=0x200000, end=0x3fffff}, // len=0x200000 + * {start=0x0, end=0x1fffff}, // len=0x200000 + * } + */ + {0x0, 0x200000, 0, 2, 0x0, 0x200000}, + + /* + * Add the region from 0x600000 to (0x600000 + 0x400000 - 1): + * Expected result: + * Successful + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 3 + * regions = { + * {start=0x200000, end=0x3fffff}, // len=0x200000 + * {start=0x0, end=0x1fffff}, // len=0x200000 + * {start=0x600000, end=0x9fffff}, // len=0x400000 + * } + */ + {0x600000, 0x400000, 0, 3, 0x600000, 0x400000}, + + /* + * Add the region from 0xa00000 to (0xa00000 + 0x400000 - 1): + * Expected result: + * Successful, merging case! + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 3 + * regions = { + * {start=0x200000, end=0x3fffff}, // len=0x200000 + * {start=0x0, end=0x1fffff}, // len=0x200000 + * {start=0x600000, end=0xdfffff}, // len=0x800000 + * } + */ + {0xa00000, 0x400000, 0, 3, 0x600000, 0x800000}, + + /* + * Add the region from 0x1000 to (0x1000 + 0x200000 - 1): + * Expected result: + * Failed, start address is not 2M-aligned + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 3 + * regions = { + * {start=0x200000, end=0x3fffff}, // len=0x200000 + * {start=0x0, end=0x1fffff}, // len=0x200000 + * {start=0x600000, end=0xdfffff}, // len=0x800000 + * } + */ + {0x1000, 0x200000, -EINVAL, 3, 0x600000, 0x800000}, +}; + +static void ne_misc_dev_test_merge_phys_contig_memory_regions(struct kunit *test) +{ + struct ne_phys_contig_mem_regions phys_contig_mem_regions = {}; + int rc = 0; + int i = 0; + + phys_contig_mem_regions.regions = kunit_kcalloc(test, MAX_PHYS_REGIONS, + sizeof(*phys_contig_mem_regions.regions), + GFP_KERNEL); + KUNIT_ASSERT_TRUE(test, phys_contig_mem_regions.regions); + + for (i = 0; i < ARRAY_SIZE(phys_regions_test_cases); i++) { + struct ne_phys_regions_test *test_case = &phys_regions_test_cases[i]; + unsigned long num = 0; + + rc = ne_merge_phys_contig_memory_regions(&phys_contig_mem_regions, + test_case->paddr, test_case->size); + KUNIT_EXPECT_EQ(test, rc, test_case->expect_rc); + KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.num, test_case->expect_num); + + if (test_case->expect_last_paddr == INVALID_VALUE) + continue; + + num = phys_contig_mem_regions.num; + KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.regions[num - 1].start, + test_case->expect_last_paddr); + KUNIT_EXPECT_EQ(test, range_len(&phys_contig_mem_regions.regions[num - 1]), + test_case->expect_last_size); + } + + kunit_kfree(test, phys_contig_mem_regions.regions); +} + +static struct kunit_case ne_misc_dev_test_cases[] = { + KUNIT_CASE(ne_misc_dev_test_merge_phys_contig_memory_regions), + {} +}; + +static struct kunit_suite ne_misc_dev_test_suite = { + .name = "ne_misc_dev_test", + .test_cases = ne_misc_dev_test_cases, +}; + +static struct kunit_suite *ne_misc_dev_test_suites[] = { + &ne_misc_dev_test_suite, + NULL +}; diff --git a/drivers/virt/nitro_enclaves/ne_pci_dev.c b/drivers/virt/nitro_enclaves/ne_pci_dev.c index 40b49ec8e30b..6b81e8f3a5dc 100644 --- a/drivers/virt/nitro_enclaves/ne_pci_dev.c +++ b/drivers/virt/nitro_enclaves/ne_pci_dev.c @@ -376,7 +376,6 @@ static void ne_teardown_msix(struct pci_dev *pdev) free_irq(pci_irq_vector(pdev, NE_VEC_EVENT), ne_pci_dev); flush_work(&ne_pci_dev->notify_work); - flush_workqueue(ne_pci_dev->event_wq); destroy_workqueue(ne_pci_dev->event_wq); free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev); diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 236081afe9a2..00ac9db792a4 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -204,6 +204,12 @@ int virtio_finalize_features(struct virtio_device *dev) } EXPORT_SYMBOL_GPL(virtio_finalize_features); +void virtio_reset_device(struct virtio_device *dev) +{ + dev->config->reset(dev); +} +EXPORT_SYMBOL_GPL(virtio_reset_device); + static int virtio_dev_probe(struct device *_d) { int err, i; diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index c22ff0117b46..f4c34a2a6b8e 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -1056,7 +1056,7 @@ static void remove_common(struct virtio_balloon *vb) return_free_pages_to_mm(vb, ULONG_MAX); /* Now we reset the device so we can clean up the queues. */ - vb->vdev->config->reset(vb->vdev); + virtio_reset_device(vb->vdev); vb->vdev->config->del_vqs(vb->vdev); } diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index ce51ae165943..3aa46703872d 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -347,7 +347,7 @@ static void virtinput_remove(struct virtio_device *vdev) spin_unlock_irqrestore(&vi->lock, flags); input_unregister_device(vi->idev); - vdev->config->reset(vdev); + virtio_reset_device(vdev); while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL) kfree(buf); vdev->config->del_vqs(vdev); diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 96e5a8782769..38becd8d578c 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -20,6 +20,7 @@ #include <linux/mutex.h> #include <linux/bitmap.h> #include <linux/lockdep.h> +#include <linux/log2.h> #include <acpi/acpi_numa.h> @@ -592,7 +593,7 @@ static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm) return -ENOMEM; mutex_lock(&vm->hotplug_mutex); - if (new_bitmap) + if (vm->sbm.sb_states) memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE); old_bitmap = vm->sbm.sb_states; @@ -1120,15 +1121,18 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn, */ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages) { - const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES; + unsigned long order = MAX_ORDER - 1; unsigned long i; /* - * We are always called at least with MAX_ORDER_NR_PAGES - * granularity/alignment (e.g., the way subblocks work). All pages - * inside such a block are alike. + * We might get called for ranges that don't cover properly aligned + * MAX_ORDER - 1 pages; however, we can only online properly aligned + * pages with an order of MAX_ORDER - 1 at maximum. */ - for (i = 0; i < nr_pages; i += max_nr_pages) { + while (!IS_ALIGNED(pfn | nr_pages, 1 << order)) + order--; + + for (i = 0; i < nr_pages; i += 1 << order) { struct page *page = pfn_to_page(pfn + i); /* @@ -1138,14 +1142,12 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages) * alike. */ if (PageDirty(page)) { - virtio_mem_clear_fake_offline(pfn + i, max_nr_pages, - false); - generic_online_page(page, MAX_ORDER - 1); + virtio_mem_clear_fake_offline(pfn + i, 1 << order, false); + generic_online_page(page, order); } else { - virtio_mem_clear_fake_offline(pfn + i, max_nr_pages, - true); - free_contig_range(pfn + i, max_nr_pages); - adjust_managed_page_count(page, max_nr_pages); + virtio_mem_clear_fake_offline(pfn + i, 1 << order, true); + free_contig_range(pfn + i, 1 << order); + adjust_managed_page_count(page, 1 << order); } } } @@ -1228,28 +1230,46 @@ static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn, page_ref_inc(pfn_to_page(pfn + i)); } -static void virtio_mem_online_page_cb(struct page *page, unsigned int order) +static void virtio_mem_online_page(struct virtio_mem *vm, + struct page *page, unsigned int order) { - const unsigned long addr = page_to_phys(page); - unsigned long id, sb_id; - struct virtio_mem *vm; + const unsigned long start = page_to_phys(page); + const unsigned long end = start + PFN_PHYS(1 << order); + unsigned long addr, next, id, sb_id, count; bool do_online; - rcu_read_lock(); - list_for_each_entry_rcu(vm, &virtio_mem_devices, next) { - if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order))) - continue; + /* + * We can get called with any order up to MAX_ORDER - 1. If our + * subblock size is smaller than that and we have a mixture of plugged + * and unplugged subblocks within such a page, we have to process in + * smaller granularity. In that case we'll adjust the order exactly once + * within the loop. + */ + for (addr = start; addr < end; ) { + next = addr + PFN_PHYS(1 << order); if (vm->in_sbm) { - /* - * We exploit here that subblocks have at least - * MAX_ORDER_NR_PAGES size/alignment - so we cannot - * cross subblocks within one call. - */ id = virtio_mem_phys_to_mb_id(addr); sb_id = virtio_mem_phys_to_sb_id(vm, addr); - do_online = virtio_mem_sbm_test_sb_plugged(vm, id, - sb_id, 1); + count = virtio_mem_phys_to_sb_id(vm, next - 1) - sb_id + 1; + + if (virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, count)) { + /* Fully plugged. */ + do_online = true; + } else if (count == 1 || + virtio_mem_sbm_test_sb_unplugged(vm, id, sb_id, count)) { + /* Fully unplugged. */ + do_online = false; + } else { + /* + * Mixture, process sub-blocks instead. This + * will be at least the size of a pageblock. + * We'll run into this case exactly once. + */ + order = ilog2(vm->sbm.sb_size) - PAGE_SHIFT; + do_online = virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, 1); + continue; + } } else { /* * If the whole block is marked fake offline, keep @@ -1260,18 +1280,38 @@ static void virtio_mem_online_page_cb(struct page *page, unsigned int order) VIRTIO_MEM_BBM_BB_FAKE_OFFLINE; } + if (do_online) + generic_online_page(pfn_to_page(PFN_DOWN(addr)), order); + else + virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order, + false); + addr = next; + } +} + +static void virtio_mem_online_page_cb(struct page *page, unsigned int order) +{ + const unsigned long addr = page_to_phys(page); + struct virtio_mem *vm; + + rcu_read_lock(); + list_for_each_entry_rcu(vm, &virtio_mem_devices, next) { + /* + * Pages we're onlining will never cross memory blocks and, + * therefore, not virtio-mem devices. + */ + if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order))) + continue; + /* - * virtio_mem_set_fake_offline() might sleep, we don't need - * the device anymore. See virtio_mem_remove() how races + * virtio_mem_set_fake_offline() might sleep. We can safely + * drop the RCU lock at this point because the device + * cannot go away. See virtio_mem_remove() how races * between memory onlining and device removal are handled. */ rcu_read_unlock(); - if (do_online) - generic_online_page(page, order); - else - virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order, - false); + virtio_mem_online_page(vm, page, order); return; } rcu_read_unlock(); @@ -2438,8 +2478,6 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm) /* * We want subblocks to span at least MAX_ORDER_NR_PAGES and * pageblock_nr_pages pages. This: - * - Simplifies our page onlining code (virtio_mem_online_page_cb) - * and fake page onlining code (virtio_mem_fake_online). * - Is required for now for alloc_contig_range() to work reliably - * it doesn't properly handle smaller granularity on ZONE_NORMAL. */ @@ -2850,7 +2888,7 @@ static void virtio_mem_remove(struct virtio_device *vdev) virtio_mem_deinit_hotplug(vm); /* reset the device and cleanup the queues */ - vdev->config->reset(vdev); + virtio_reset_device(vdev); vdev->config->del_vqs(vdev); kfree(vm); diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index b3f8128b7983..34141b9abe27 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -138,7 +138,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; if (q_pfn >> 32) { dev_err(&vp_dev->pci_dev->dev, - "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n", + "platform bug: legacy virtio-pci must not be used with RAM above 0x%llxGB\n", 0x1ULL << (32 + PAGE_SHIFT - 30)); err = -E2BIG; goto out_del_vq; diff --git a/drivers/virtio/virtio_pci_legacy_dev.c b/drivers/virtio/virtio_pci_legacy_dev.c index 9b97680dd02b..677d1f68bc9b 100644 --- a/drivers/virtio/virtio_pci_legacy_dev.c +++ b/drivers/virtio/virtio_pci_legacy_dev.c @@ -45,8 +45,10 @@ int vp_legacy_probe(struct virtio_pci_legacy_device *ldev) return rc; ldev->ioaddr = pci_iomap(pci_dev, 0, 0); - if (!ldev->ioaddr) + if (!ldev->ioaddr) { + rc = -EIO; goto err_iomap; + } ldev->isr = ldev->ioaddr + VIRTIO_PCI_ISR; diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index e11ed748e661..e8b3ff2b9fbc 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -345,7 +345,7 @@ err_map_common: EXPORT_SYMBOL_GPL(vp_modern_probe); /* - * vp_modern_probe: remove and cleanup the modern virtio pci device + * vp_modern_remove: remove and cleanup the modern virtio pci device * @mdev: the modern virtio-pci device */ void vp_modern_remove(struct virtio_pci_modern_device *mdev) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 028b05d44546..962f1477b1fa 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1197,8 +1197,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, if (virtqueue_use_indirect(_vq, total_sg)) { err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); - if (err != -ENOMEM) + if (err != -ENOMEM) { + END_USE(vq); return err; + } /* fall back on direct */ } diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index f85f860bc10b..7767a7f0119b 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -91,9 +91,8 @@ static u8 virtio_vdpa_get_status(struct virtio_device *vdev) static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status) { struct vdpa_device *vdpa = vd_get_vdpa(vdev); - const struct vdpa_config_ops *ops = vdpa->config; - return ops->set_status(vdpa, status); + return vdpa_set_status(vdpa, status); } static void virtio_vdpa_reset(struct virtio_device *vdev) @@ -308,7 +307,7 @@ static u64 virtio_vdpa_get_features(struct virtio_device *vdev) struct vdpa_device *vdpa = vd_get_vdpa(vdev); const struct vdpa_config_ops *ops = vdpa->config; - return ops->get_features(vdpa); + return ops->get_device_features(vdpa); } static int virtio_vdpa_finalize_features(struct virtio_device *vdev) @@ -318,7 +317,7 @@ static int virtio_vdpa_finalize_features(struct virtio_device *vdev) /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); - return vdpa_set_features(vdpa, vdev->features); + return vdpa_set_features(vdpa, vdev->features, false); } static const char *virtio_vdpa_bus_name(struct virtio_device *vdev) diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c index e4f336111edc..6cef6e2edb89 100644 --- a/drivers/w1/slaves/w1_ds28e04.c +++ b/drivers/w1/slaves/w1_ds28e04.c @@ -32,7 +32,7 @@ static int w1_strong_pullup = 1; module_param_named(strong_pullup, w1_strong_pullup, int, 0); /* enable/disable CRC checking on DS28E04-100 memory accesses */ -static char w1_enable_crccheck = 1; +static bool w1_enable_crccheck = true; #define W1_EEPROM_SIZE 512 #define W1_PAGE_COUNT 16 @@ -339,32 +339,18 @@ static BIN_ATTR_RW(pio, 1); static ssize_t crccheck_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (put_user(w1_enable_crccheck + 0x30, buf)) - return -EFAULT; - - return sizeof(w1_enable_crccheck); + return sysfs_emit(buf, "%d\n", w1_enable_crccheck); } static ssize_t crccheck_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - char val; - - if (count != 1 || !buf) - return -EINVAL; + int err = kstrtobool(buf, &w1_enable_crccheck); - if (get_user(val, buf)) - return -EFAULT; + if (err) + return err; - /* convert to decimal */ - val = val - 0x30; - if (val != 0 && val != 1) - return -EINVAL; - - /* set the new value */ - w1_enable_crccheck = val; - - return sizeof(w1_enable_crccheck); + return count; } static DEVICE_ATTR_RW(crccheck); diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index ca70c5f03206..565578002d79 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c @@ -1785,7 +1785,7 @@ static ssize_t alarms_store(struct device *device, u8 new_config_register[3]; /* array of data to be written */ int temp, ret; char *token = NULL; - s8 tl, th, tt; /* 1 byte per value + temp ring order */ + s8 tl, th; /* 1 byte per value + temp ring order */ char *p_args, *orig; p_args = orig = kmalloc(size, GFP_KERNEL); @@ -1836,9 +1836,8 @@ static ssize_t alarms_store(struct device *device, th = int_to_short(temp); /* Reorder if required th and tl */ - if (tl > th) { - tt = tl; tl = th; th = tt; - } + if (tl > th) + swap(tl, th); /* * Read the scratchpad to change only the required bits diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 1dc86eb1361a..c8fa79da23b3 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -207,6 +207,7 @@ config DA9055_WATCHDOG config DA9063_WATCHDOG tristate "Dialog DA9063 Watchdog" depends on MFD_DA9063 || COMPILE_TEST + depends on I2C select WATCHDOG_CORE help Support for the watchdog in the DA9063 PMIC. @@ -680,10 +681,10 @@ config MAX77620_WATCHDOG depends on MFD_MAX77620 || COMPILE_TEST select WATCHDOG_CORE help - This is the driver for the Max77620 watchdog timer. - Say 'Y' here to enable the watchdog timer support for - MAX77620 chips. To compile this driver as a module, - choose M here: the module will be called max77620_wdt. + This is the driver for the Max77620 watchdog timer. + Say 'Y' here to enable the watchdog timer support for + MAX77620 chips. To compile this driver as a module, + choose M here: the module will be called max77620_wdt. config IMX2_WDT tristate "IMX2+ Watchdog" @@ -822,6 +823,7 @@ config MESON_WATCHDOG config MEDIATEK_WATCHDOG tristate "Mediatek SoCs watchdog support" depends on ARCH_MEDIATEK || COMPILE_TEST + default ARCH_MEDIATEK select WATCHDOG_CORE select RESET_CONTROLLER help @@ -881,6 +883,14 @@ config RENESAS_RZAWDT This driver adds watchdog support for the integrated watchdogs in the Renesas RZ/A SoCs. These watchdogs can be used to reset a system. +config RENESAS_RZG2LWDT + tristate "Renesas RZ/G2L WDT Watchdog" + depends on ARCH_RENESAS || COMPILE_TEST + select WATCHDOG_CORE + help + This driver adds watchdog support for the integrated watchdogs in the + Renesas RZ/G2L SoCs. These watchdogs can be used to reset a system. + config ASPEED_WATCHDOG tristate "Aspeed BMC watchdog support" depends on ARCH_ASPEED || COMPILE_TEST @@ -940,6 +950,19 @@ config RTD119X_WATCHDOG Say Y here to include support for the watchdog timer in Realtek RTD1295 SoCs. +config REALTEK_OTTO_WDT + tristate "Realtek Otto MIPS watchdog support" + depends on MACH_REALTEK_RTL || COMPILE_TEST + depends on COMMON_CLK + select WATCHDOG_CORE + default MACH_REALTEK_RTL + help + Say Y here to include support for the watchdog timer on Realtek + RTL838x, RTL839x, RTL930x SoCs. This watchdog has pretimeout + notifications and system reset on timeout. + + When built as a module this will be called realtek_otto_wdt. + config SPRD_WATCHDOG tristate "Spreadtrum watchdog support" depends on ARCH_SPRD || COMPILE_TEST @@ -976,6 +999,18 @@ config MSC313E_WATCHDOG To compile this driver as a module, choose M here: the module will be called msc313e_wdt. +config APPLE_WATCHDOG + tristate "Apple SoC watchdog" + depends on ARCH_APPLE || COMPILE_TEST + select WATCHDOG_CORE + help + Say Y here to include support for the Watchdog found in Apple + SoCs such as the M1. Next to the common watchdog features this + driver is also required in order to reboot these SoCs. + + To compile this driver as a module, choose M here: the + module will be called apple_wdt. + # X86 (i386 + ia64 + x86_64) Architecture config ACQUIRE_WDT @@ -1440,26 +1475,26 @@ config TQMX86_WDT depends on X86 select WATCHDOG_CORE help - This is the driver for the hardware watchdog timer in the TQMX86 IO - controller found on some of their ComExpress Modules. + This is the driver for the hardware watchdog timer in the TQMX86 IO + controller found on some of their ComExpress Modules. - To compile this driver as a module, choose M here; the module - will be called tqmx86_wdt. + To compile this driver as a module, choose M here; the module + will be called tqmx86_wdt. - Most people will say N. + Most people will say N. config VIA_WDT tristate "VIA Watchdog Timer" depends on X86 && PCI select WATCHDOG_CORE help - This is the driver for the hardware watchdog timer on VIA - southbridge chipset CX700, VX800/VX820 or VX855/VX875. + This is the driver for the hardware watchdog timer on VIA + southbridge chipset CX700, VX800/VX820 or VX855/VX875. - To compile this driver as a module, choose M here; the module - will be called via_wdt. + To compile this driver as a module, choose M here; the module + will be called via_wdt. - Most people will say N. + Most people will say N. config W83627HF_WDT tristate "Watchdog timer for W83627HF/W83627DHG and compatibles" @@ -1707,16 +1742,6 @@ config OCTEON_WDT from the first interrupt, it is then only poked when the device is written. -config BCM63XX_WDT - tristate "Broadcom BCM63xx hardware watchdog" - depends on BCM63XX - help - Watchdog driver for the built in watchdog hardware in Broadcom - BCM63xx SoC. - - To compile this driver as a loadable module, choose M here. - The module will be called bcm63xx_wdt. - config BCM2835_WDT tristate "Broadcom BCM2835 hardware watchdog" depends on ARCH_BCM2835 || (OF && COMPILE_TEST) @@ -1751,15 +1776,16 @@ config BCM_KONA_WDT_DEBUG If in doubt, say 'N'. config BCM7038_WDT - tristate "BCM7038 Watchdog" + tristate "BCM63xx/BCM7038 Watchdog" select WATCHDOG_CORE depends on HAS_IOMEM - depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST + depends on ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST help - Watchdog driver for the built-in hardware in Broadcom 7038 and - later SoCs used in set-top boxes. BCM7038 was made public - during the 2004 CES, and since then, many Broadcom chips use this - watchdog block, including some cable modem chips. + Watchdog driver for the built-in hardware in Broadcom 7038 and + later SoCs used in set-top boxes. BCM7038 was made public + during the 2004 CES, and since then, many Broadcom chips use this + watchdog block, including some cable modem chips and DSL (63xx) + chips. config IMGPDC_WDT tristate "Imagination Technologies PDC Watchdog Timer" @@ -2120,12 +2146,12 @@ config KEEMBAY_WATCHDOG depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST) select WATCHDOG_CORE help - This option enable support for an In-secure watchdog timer driver for - Intel Keem Bay SoC. This WDT has a 32 bit timer and decrements in every - count unit. An interrupt will be triggered, when the count crosses - the threshold configured in the register. + This option enable support for an In-secure watchdog timer driver for + Intel Keem Bay SoC. This WDT has a 32 bit timer and decrements in every + count unit. An interrupt will be triggered, when the count crosses + the threshold configured in the register. - To compile this driver as a module, choose M here: the - module will be called keembay_wdt. + To compile this driver as a module, choose M here: the + module will be called keembay_wdt. endif # WATCHDOG diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 31b931846e32..f7da867e8782 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -84,6 +84,7 @@ obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o obj-$(CONFIG_RENESAS_RZAWDT) += rza_wdt.o +obj-$(CONFIG_RENESAS_RZG2LWDT) += rzg2l_wdt.o obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o obj-$(CONFIG_STM32_WATCHDOG) += stm32_iwdg.o obj-$(CONFIG_UNIPHIER_WATCHDOG) += uniphier_wdt.o @@ -93,6 +94,7 @@ obj-$(CONFIG_PM8916_WATCHDOG) += pm8916_wdt.o obj-$(CONFIG_ARM_SMC_WATCHDOG) += arm_smc_wdt.o obj-$(CONFIG_VISCONTI_WATCHDOG) += visconti_wdt.o obj-$(CONFIG_MSC313E_WATCHDOG) += msc313e_wdt.o +obj-$(CONFIG_APPLE_WATCHDOG) += apple_wdt.o # X86 (i386 + ia64 + x86_64) Architecture obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o @@ -154,7 +156,6 @@ obj-$(CONFIG_XILINX_WATCHDOG) += of_xilinx_wdt.o # MIPS Architecture obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o -obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o obj-$(CONFIG_INDYDOG) += indydog.o obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o @@ -171,6 +172,7 @@ obj-$(CONFIG_IMGPDC_WDT) += imgpdc_wdt.o obj-$(CONFIG_MT7621_WDT) += mt7621_wdt.o obj-$(CONFIG_PIC32_WDT) += pic32-wdt.o obj-$(CONFIG_PIC32_DMT) += pic32-dmt.o +obj-$(CONFIG_REALTEK_OTTO_WDT) += realtek_otto_wdt.o # PARISC Architecture diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c new file mode 100644 index 000000000000..16aca21f13d6 --- /dev/null +++ b/drivers/watchdog/apple_wdt.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SoC Watchdog driver + * + * Copyright (C) The Asahi Linux Contributors + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/limits.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> + +/* + * Apple Watchdog MMIO registers + * + * This HW block has three separate watchdogs. WD0 resets the machine + * to recovery mode and is not very useful for us. WD1 and WD2 trigger a normal + * machine reset. WD0 additionally supports a configurable interrupt. + * This information can be used to implement pretimeout support at a later time. + * + * APPLE_WDT_WDx_CUR_TIME is a simple counter incremented for each tick of the + * reference clock. It can also be overwritten to any value. + * Whenever APPLE_WDT_CTRL_RESET_EN is set in APPLE_WDT_WDx_CTRL and + * APPLE_WDT_WDx_CUR_TIME >= APPLE_WDT_WDx_BITE_TIME the entire machine is + * reset. + * Whenever APPLE_WDT_CTRL_IRQ_EN is set and APPLE_WDTx_WD1_CUR_TIME >= + * APPLE_WDTx_WD1_BARK_TIME an interrupt is triggered and + * APPLE_WDT_CTRL_IRQ_STATUS is set. The interrupt can be cleared by writing + * 1 to APPLE_WDT_CTRL_IRQ_STATUS. + */ +#define APPLE_WDT_WD0_CUR_TIME 0x00 +#define APPLE_WDT_WD0_BITE_TIME 0x04 +#define APPLE_WDT_WD0_BARK_TIME 0x08 +#define APPLE_WDT_WD0_CTRL 0x0c + +#define APPLE_WDT_WD1_CUR_TIME 0x10 +#define APPLE_WDT_WD1_BITE_TIME 0x14 +#define APPLE_WDT_WD1_CTRL 0x1c + +#define APPLE_WDT_WD2_CUR_TIME 0x20 +#define APPLE_WDT_WD2_BITE_TIME 0x24 +#define APPLE_WDT_WD2_CTRL 0x2c + +#define APPLE_WDT_CTRL_IRQ_EN BIT(0) +#define APPLE_WDT_CTRL_IRQ_STATUS BIT(1) +#define APPLE_WDT_CTRL_RESET_EN BIT(2) + +#define APPLE_WDT_TIMEOUT_DEFAULT 30 + +struct apple_wdt { + struct watchdog_device wdd; + void __iomem *regs; + unsigned long clk_rate; +}; + +static struct apple_wdt *to_apple_wdt(struct watchdog_device *wdd) +{ + return container_of(wdd, struct apple_wdt, wdd); +} + +static int apple_wdt_start(struct watchdog_device *wdd) +{ + struct apple_wdt *wdt = to_apple_wdt(wdd); + + writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME); + writel_relaxed(APPLE_WDT_CTRL_RESET_EN, wdt->regs + APPLE_WDT_WD1_CTRL); + + return 0; +} + +static int apple_wdt_stop(struct watchdog_device *wdd) +{ + struct apple_wdt *wdt = to_apple_wdt(wdd); + + writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CTRL); + + return 0; +} + +static int apple_wdt_ping(struct watchdog_device *wdd) +{ + struct apple_wdt *wdt = to_apple_wdt(wdd); + + writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME); + + return 0; +} + +static int apple_wdt_set_timeout(struct watchdog_device *wdd, unsigned int s) +{ + struct apple_wdt *wdt = to_apple_wdt(wdd); + + writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME); + writel_relaxed(wdt->clk_rate * s, wdt->regs + APPLE_WDT_WD1_BITE_TIME); + + wdd->timeout = s; + + return 0; +} + +static unsigned int apple_wdt_get_timeleft(struct watchdog_device *wdd) +{ + struct apple_wdt *wdt = to_apple_wdt(wdd); + u32 cur_time, reset_time; + + cur_time = readl_relaxed(wdt->regs + APPLE_WDT_WD1_CUR_TIME); + reset_time = readl_relaxed(wdt->regs + APPLE_WDT_WD1_BITE_TIME); + + return (reset_time - cur_time) / wdt->clk_rate; +} + +static int apple_wdt_restart(struct watchdog_device *wdd, unsigned long mode, + void *cmd) +{ + struct apple_wdt *wdt = to_apple_wdt(wdd); + + writel_relaxed(APPLE_WDT_CTRL_RESET_EN, wdt->regs + APPLE_WDT_WD1_CTRL); + writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_BITE_TIME); + writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME); + + /* + * Flush writes and then wait for the SoC to reset. Even though the + * reset is queued almost immediately experiments have shown that it + * can take up to ~20-25ms until the SoC is actually reset. Just wait + * 50ms here to be safe. + */ + (void)readl_relaxed(wdt->regs + APPLE_WDT_WD1_CUR_TIME); + mdelay(50); + + return 0; +} + +static void apple_wdt_clk_disable_unprepare(void *data) +{ + clk_disable_unprepare(data); +} + +static struct watchdog_ops apple_wdt_ops = { + .owner = THIS_MODULE, + .start = apple_wdt_start, + .stop = apple_wdt_stop, + .ping = apple_wdt_ping, + .set_timeout = apple_wdt_set_timeout, + .get_timeleft = apple_wdt_get_timeleft, + .restart = apple_wdt_restart, +}; + +static struct watchdog_info apple_wdt_info = { + .identity = "Apple SoC Watchdog", + .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, +}; + +static int apple_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct apple_wdt *wdt; + struct clk *clk; + u32 wdt_ctrl; + int ret; + + wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + wdt->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(wdt->regs)) + return PTR_ERR(wdt->regs); + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + ret = clk_prepare_enable(clk); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, apple_wdt_clk_disable_unprepare, + clk); + if (ret) + return ret; + + wdt->clk_rate = clk_get_rate(clk); + if (!wdt->clk_rate) + return -EINVAL; + + wdt->wdd.ops = &apple_wdt_ops; + wdt->wdd.info = &apple_wdt_info; + wdt->wdd.max_timeout = U32_MAX / wdt->clk_rate; + wdt->wdd.timeout = APPLE_WDT_TIMEOUT_DEFAULT; + + wdt_ctrl = readl_relaxed(wdt->regs + APPLE_WDT_WD1_CTRL); + if (wdt_ctrl & APPLE_WDT_CTRL_RESET_EN) + set_bit(WDOG_HW_RUNNING, &wdt->wdd.status); + + watchdog_init_timeout(&wdt->wdd, 0, dev); + apple_wdt_set_timeout(&wdt->wdd, wdt->wdd.timeout); + watchdog_stop_on_unregister(&wdt->wdd); + watchdog_set_restart_priority(&wdt->wdd, 128); + + return devm_watchdog_register_device(dev, &wdt->wdd); +} + +static const struct of_device_id apple_wdt_of_match[] = { + { .compatible = "apple,wdt" }, + {}, +}; +MODULE_DEVICE_TABLE(of, apple_wdt_of_match); + +static struct platform_driver apple_wdt_driver = { + .driver = { + .name = "apple-watchdog", + .of_match_table = apple_wdt_of_match, + }, + .probe = apple_wdt_probe, +}; +module_platform_driver(apple_wdt_driver); + +MODULE_DESCRIPTION("Apple SoC watchdog driver"); +MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c deleted file mode 100644 index 56cc262571a5..000000000000 --- a/drivers/watchdog/bcm63xx_wdt.c +++ /dev/null @@ -1,317 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Broadcom BCM63xx SoC watchdog driver - * - * Copyright (C) 2007, Miguel Gaio <miguel.gaio@efixo.com> - * Copyright (C) 2008, Florian Fainelli <florian@openwrt.org> - * - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/bitops.h> -#include <linux/errno.h> -#include <linux/fs.h> -#include <linux/io.h> -#include <linux/kernel.h> -#include <linux/miscdevice.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/types.h> -#include <linux/uaccess.h> -#include <linux/watchdog.h> -#include <linux/timer.h> -#include <linux/jiffies.h> -#include <linux/interrupt.h> -#include <linux/ptrace.h> -#include <linux/resource.h> -#include <linux/platform_device.h> - -#include <bcm63xx_cpu.h> -#include <bcm63xx_io.h> -#include <bcm63xx_regs.h> -#include <bcm63xx_timer.h> - -#define PFX KBUILD_MODNAME - -#define WDT_HZ 50000000 /* Fclk */ -#define WDT_DEFAULT_TIME 30 /* seconds */ -#define WDT_MAX_TIME 256 /* seconds */ - -static struct { - void __iomem *regs; - struct timer_list timer; - unsigned long inuse; - atomic_t ticks; -} bcm63xx_wdt_device; - -static int expect_close; - -static int wdt_time = WDT_DEFAULT_TIME; -static bool nowayout = WATCHDOG_NOWAYOUT; -module_param(nowayout, bool, 0); -MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" - __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); - -/* HW functions */ -static void bcm63xx_wdt_hw_start(void) -{ - bcm_writel(0xfffffffe, bcm63xx_wdt_device.regs + WDT_DEFVAL_REG); - bcm_writel(WDT_START_1, bcm63xx_wdt_device.regs + WDT_CTL_REG); - bcm_writel(WDT_START_2, bcm63xx_wdt_device.regs + WDT_CTL_REG); -} - -static void bcm63xx_wdt_hw_stop(void) -{ - bcm_writel(WDT_STOP_1, bcm63xx_wdt_device.regs + WDT_CTL_REG); - bcm_writel(WDT_STOP_2, bcm63xx_wdt_device.regs + WDT_CTL_REG); -} - -static void bcm63xx_wdt_isr(void *data) -{ - struct pt_regs *regs = get_irq_regs(); - - die(PFX " fire", regs); -} - -static void bcm63xx_timer_tick(struct timer_list *unused) -{ - if (!atomic_dec_and_test(&bcm63xx_wdt_device.ticks)) { - bcm63xx_wdt_hw_start(); - mod_timer(&bcm63xx_wdt_device.timer, jiffies + HZ); - } else - pr_crit("watchdog will restart system\n"); -} - -static void bcm63xx_wdt_pet(void) -{ - atomic_set(&bcm63xx_wdt_device.ticks, wdt_time); -} - -static void bcm63xx_wdt_start(void) -{ - bcm63xx_wdt_pet(); - bcm63xx_timer_tick(0); -} - -static void bcm63xx_wdt_pause(void) -{ - del_timer_sync(&bcm63xx_wdt_device.timer); - bcm63xx_wdt_hw_stop(); -} - -static int bcm63xx_wdt_settimeout(int new_time) -{ - if ((new_time <= 0) || (new_time > WDT_MAX_TIME)) - return -EINVAL; - - wdt_time = new_time; - - return 0; -} - -static int bcm63xx_wdt_open(struct inode *inode, struct file *file) -{ - if (test_and_set_bit(0, &bcm63xx_wdt_device.inuse)) - return -EBUSY; - - bcm63xx_wdt_start(); - return stream_open(inode, file); -} - -static int bcm63xx_wdt_release(struct inode *inode, struct file *file) -{ - if (expect_close == 42) - bcm63xx_wdt_pause(); - else { - pr_crit("Unexpected close, not stopping watchdog!\n"); - bcm63xx_wdt_start(); - } - clear_bit(0, &bcm63xx_wdt_device.inuse); - expect_close = 0; - return 0; -} - -static ssize_t bcm63xx_wdt_write(struct file *file, const char *data, - size_t len, loff_t *ppos) -{ - if (len) { - if (!nowayout) { - size_t i; - - /* In case it was set long ago */ - expect_close = 0; - - for (i = 0; i != len; i++) { - char c; - if (get_user(c, data + i)) - return -EFAULT; - if (c == 'V') - expect_close = 42; - } - } - bcm63xx_wdt_pet(); - } - return len; -} - -static struct watchdog_info bcm63xx_wdt_info = { - .identity = PFX, - .options = WDIOF_SETTIMEOUT | - WDIOF_KEEPALIVEPING | - WDIOF_MAGICCLOSE, -}; - - -static long bcm63xx_wdt_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - void __user *argp = (void __user *)arg; - int __user *p = argp; - int new_value, retval = -EINVAL; - - switch (cmd) { - case WDIOC_GETSUPPORT: - return copy_to_user(argp, &bcm63xx_wdt_info, - sizeof(bcm63xx_wdt_info)) ? -EFAULT : 0; - - case WDIOC_GETSTATUS: - case WDIOC_GETBOOTSTATUS: - return put_user(0, p); - - case WDIOC_SETOPTIONS: - if (get_user(new_value, p)) - return -EFAULT; - - if (new_value & WDIOS_DISABLECARD) { - bcm63xx_wdt_pause(); - retval = 0; - } - if (new_value & WDIOS_ENABLECARD) { - bcm63xx_wdt_start(); - retval = 0; - } - - return retval; - - case WDIOC_KEEPALIVE: - bcm63xx_wdt_pet(); - return 0; - - case WDIOC_SETTIMEOUT: - if (get_user(new_value, p)) - return -EFAULT; - - if (bcm63xx_wdt_settimeout(new_value)) - return -EINVAL; - - bcm63xx_wdt_pet(); - - fallthrough; - - case WDIOC_GETTIMEOUT: - return put_user(wdt_time, p); - - default: - return -ENOTTY; - - } -} - -static const struct file_operations bcm63xx_wdt_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .write = bcm63xx_wdt_write, - .unlocked_ioctl = bcm63xx_wdt_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .open = bcm63xx_wdt_open, - .release = bcm63xx_wdt_release, -}; - -static struct miscdevice bcm63xx_wdt_miscdev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &bcm63xx_wdt_fops, -}; - - -static int bcm63xx_wdt_probe(struct platform_device *pdev) -{ - int ret; - struct resource *r; - - timer_setup(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "failed to get resources\n"); - return -ENODEV; - } - - bcm63xx_wdt_device.regs = devm_ioremap(&pdev->dev, r->start, - resource_size(r)); - if (!bcm63xx_wdt_device.regs) { - dev_err(&pdev->dev, "failed to remap I/O resources\n"); - return -ENXIO; - } - - ret = bcm63xx_timer_register(TIMER_WDT_ID, bcm63xx_wdt_isr, NULL); - if (ret < 0) { - dev_err(&pdev->dev, "failed to register wdt timer isr\n"); - return ret; - } - - if (bcm63xx_wdt_settimeout(wdt_time)) { - bcm63xx_wdt_settimeout(WDT_DEFAULT_TIME); - dev_info(&pdev->dev, - ": wdt_time value must be 1 <= wdt_time <= 256, using %d\n", - wdt_time); - } - - ret = misc_register(&bcm63xx_wdt_miscdev); - if (ret < 0) { - dev_err(&pdev->dev, "failed to register watchdog device\n"); - goto unregister_timer; - } - - dev_info(&pdev->dev, " started, timer margin: %d sec\n", - WDT_DEFAULT_TIME); - - return 0; - -unregister_timer: - bcm63xx_timer_unregister(TIMER_WDT_ID); - return ret; -} - -static int bcm63xx_wdt_remove(struct platform_device *pdev) -{ - if (!nowayout) - bcm63xx_wdt_pause(); - - misc_deregister(&bcm63xx_wdt_miscdev); - bcm63xx_timer_unregister(TIMER_WDT_ID); - return 0; -} - -static void bcm63xx_wdt_shutdown(struct platform_device *pdev) -{ - bcm63xx_wdt_pause(); -} - -static struct platform_driver bcm63xx_wdt_driver = { - .probe = bcm63xx_wdt_probe, - .remove = bcm63xx_wdt_remove, - .shutdown = bcm63xx_wdt_shutdown, - .driver = { - .name = "bcm63xx-wdt", - } -}; - -module_platform_driver(bcm63xx_wdt_driver); - -MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>"); -MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); -MODULE_DESCRIPTION("Driver for the Broadcom BCM63xx SoC watchdog"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:bcm63xx-wdt"); diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c index acaaa0005d5b..8656a137e9a4 100644 --- a/drivers/watchdog/bcm7038_wdt.c +++ b/drivers/watchdog/bcm7038_wdt.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/platform_data/bcm7038_wdt.h> #include <linux/pm.h> #include <linux/watchdog.h> @@ -133,8 +134,10 @@ static void bcm7038_clk_disable_unprepare(void *data) static int bcm7038_wdt_probe(struct platform_device *pdev) { + struct bcm7038_wdt_platform_data *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct bcm7038_watchdog *wdt; + const char *clk_name = NULL; int err; wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); @@ -147,7 +150,10 @@ static int bcm7038_wdt_probe(struct platform_device *pdev) if (IS_ERR(wdt->base)) return PTR_ERR(wdt->base); - wdt->clk = devm_clk_get(dev, NULL); + if (pdata && pdata->clk_name) + clk_name = pdata->clk_name; + + wdt->clk = devm_clk_get(dev, clk_name); /* If unable to get clock, use default frequency */ if (!IS_ERR(wdt->clk)) { err = clk_prepare_enable(wdt->clk); @@ -217,8 +223,15 @@ static const struct of_device_id bcm7038_wdt_match[] = { }; MODULE_DEVICE_TABLE(of, bcm7038_wdt_match); +static const struct platform_device_id bcm7038_wdt_devtype[] = { + { .name = "bcm63xx-wdt" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(platform, bcm7038_wdt_devtype); + static struct platform_driver bcm7038_wdt_driver = { .probe = bcm7038_wdt_probe, + .id_table = bcm7038_wdt_devtype, .driver = { .name = "bcm7038-wdt", .of_match_table = bcm7038_wdt_match, diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c index d79ce64e26a9..9adad1862bbd 100644 --- a/drivers/watchdog/da9063_wdt.c +++ b/drivers/watchdog/da9063_wdt.c @@ -14,6 +14,7 @@ #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/slab.h> +#include <linux/i2c.h> #include <linux/delay.h> #include <linux/mfd/da9063/registers.h> #include <linux/mfd/da9063/core.h> @@ -169,14 +170,19 @@ static int da9063_wdt_restart(struct watchdog_device *wdd, unsigned long action, void *data) { struct da9063 *da9063 = watchdog_get_drvdata(wdd); + struct i2c_client *client = to_i2c_client(da9063->dev); int ret; - ret = regmap_write(da9063->regmap, DA9063_REG_CONTROL_F, - DA9063_SHUTDOWN); - if (ret) + /* Don't use regmap because it is not atomic safe */ + ret = i2c_smbus_write_byte_data(client, DA9063_REG_CONTROL_F, + DA9063_SHUTDOWN); + if (ret < 0) dev_alert(da9063->dev, "Failed to shutdown (err = %d)\n", ret); + /* wait for reset to assert... */ + mdelay(500); + return ret; } diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c index e6eaba6bae5b..584a56893b81 100644 --- a/drivers/watchdog/davinci_wdt.c +++ b/drivers/watchdog/davinci_wdt.c @@ -134,7 +134,7 @@ static unsigned int davinci_wdt_get_timeleft(struct watchdog_device *wdd) timer_counter = ioread32(davinci_wdt->base + TIM12); timer_counter |= ((u64)ioread32(davinci_wdt->base + TIM34) << 32); - do_div(timer_counter, freq); + timer_counter = div64_ul(timer_counter, freq); return wdd->timeout - timer_counter; } diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index ee90c5f943f9..7f59c680de25 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -49,6 +49,7 @@ #define SIO_F81803_ID 0x1210 /* Chipset ID */ #define SIO_F81865_ID 0x0704 /* Chipset ID */ #define SIO_F81866_ID 0x1010 /* Chipset ID */ +#define SIO_F81966_ID 0x1502 /* F81804 chipset ID, same for f81966 */ #define F71808FG_REG_WDO_CONF 0xf0 #define F71808FG_REG_WDT_CONF 0xf5 @@ -105,7 +106,7 @@ MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with" " given initial timeout. Zero (default) disables this feature."); enum chips { f71808fg, f71858fg, f71862fg, f71868, f71869, f71882fg, f71889fg, - f81803, f81865, f81866}; + f81803, f81865, f81866, f81966}; static const char * const fintek_wdt_names[] = { "f71808fg", @@ -118,6 +119,7 @@ static const char * const fintek_wdt_names[] = { "f81803", "f81865", "f81866", + "f81966" }; /* Super-I/O Function prototypes */ @@ -347,6 +349,7 @@ static int fintek_wdt_start(struct watchdog_device *wdd) break; case f81866: + case f81966: /* * GPIO1 Control Register when 27h BIT3:2 = 01 & BIT0 = 0. * The PIN 70(GPIO15/WDTRST) is controlled by 2Ch: @@ -373,7 +376,7 @@ static int fintek_wdt_start(struct watchdog_device *wdd) superio_select(wd->sioaddr, SIO_F71808FG_LD_WDT); superio_set_bit(wd->sioaddr, SIO_REG_ENABLE, 0); - if (wd->type == f81865 || wd->type == f81866) + if (wd->type == f81865 || wd->type == f81866 || wd->type == f81966) superio_set_bit(wd->sioaddr, F81865_REG_WDO_CONF, F81865_FLAG_WDOUT_EN); else @@ -580,6 +583,9 @@ static int __init fintek_wdt_find(int sioaddr) case SIO_F81866_ID: type = f81866; break; + case SIO_F81966_ID: + type = f81966; + break; default: pr_info("Unrecognized Fintek device: %04x\n", (unsigned int)devid); diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c index 945f5e65db57..d3c9e2f6e63b 100644 --- a/drivers/watchdog/meson_gxbb_wdt.c +++ b/drivers/watchdog/meson_gxbb_wdt.c @@ -198,7 +198,6 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev) meson_gxbb_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout); - watchdog_stop_on_reboot(&data->wdt_dev); return devm_watchdog_register_device(dev, &data->wdt_dev); } diff --git a/drivers/watchdog/msc313e_wdt.c b/drivers/watchdog/msc313e_wdt.c index 0d497aa0fb7d..90171431fc59 100644 --- a/drivers/watchdog/msc313e_wdt.c +++ b/drivers/watchdog/msc313e_wdt.c @@ -120,6 +120,10 @@ static int msc313e_wdt_probe(struct platform_device *pdev) priv->wdev.max_timeout = U32_MAX / clk_get_rate(priv->clk); priv->wdev.timeout = MSC313E_WDT_DEFAULT_TIMEOUT; + /* If the period is non-zero the WDT is running */ + if (readw(priv->base + REG_WDT_MAX_PRD_L) | (readw(priv->base + REG_WDT_MAX_PRD_H) << 16)) + set_bit(WDOG_HW_RUNNING, &priv->wdev.status); + watchdog_set_drvdata(&priv->wdev, priv); watchdog_init_timeout(&priv->wdev, timeout, dev); diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index 543cf38bd04e..4577a76dd464 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c @@ -339,7 +339,7 @@ static int mtk_wdt_probe(struct platform_device *pdev) if (IS_ERR(mtk_wdt->wdt_base)) return PTR_ERR(mtk_wdt->wdt_base); - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { err = devm_request_irq(&pdev->dev, irq, mtk_wdt_isr, 0, "wdt_bark", &mtk_wdt->wdt_dev); diff --git a/drivers/watchdog/realtek_otto_wdt.c b/drivers/watchdog/realtek_otto_wdt.c new file mode 100644 index 000000000000..60058a0c3ec4 --- /dev/null +++ b/drivers/watchdog/realtek_otto_wdt.c @@ -0,0 +1,384 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Realtek Otto MIPS platform watchdog + * + * Watchdog timer that will reset the system after timeout, using the selected + * reset mode. + * + * Counter scaling and timeouts: + * - Base prescale of (2 << 25), providing tick duration T_0: 168ms @ 200MHz + * - PRESCALE: logarithmic prescaler adding a factor of {1, 2, 4, 8} + * - Phase 1: Times out after (PHASE1 + 1) × PRESCALE × T_0 + * Generates an interrupt, WDT cannot be stopped after phase 1 + * - Phase 2: starts after phase 1, times out after (PHASE2 + 1) × PRESCALE × T_0 + * Resets the system according to RST_MODE + */ + +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/math.h> +#include <linux/minmax.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/reboot.h> +#include <linux/watchdog.h> + +#define OTTO_WDT_REG_CNTR 0x0 +#define OTTO_WDT_CNTR_PING BIT(31) + +#define OTTO_WDT_REG_INTR 0x4 +#define OTTO_WDT_INTR_PHASE_1 BIT(31) +#define OTTO_WDT_INTR_PHASE_2 BIT(30) + +#define OTTO_WDT_REG_CTRL 0x8 +#define OTTO_WDT_CTRL_ENABLE BIT(31) +#define OTTO_WDT_CTRL_PRESCALE GENMASK(30, 29) +#define OTTO_WDT_CTRL_PHASE1 GENMASK(26, 22) +#define OTTO_WDT_CTRL_PHASE2 GENMASK(19, 15) +#define OTTO_WDT_CTRL_RST_MODE GENMASK(1, 0) +#define OTTO_WDT_MODE_SOC 0 +#define OTTO_WDT_MODE_CPU 1 +#define OTTO_WDT_MODE_SOFTWARE 2 +#define OTTO_WDT_CTRL_DEFAULT OTTO_WDT_MODE_CPU + +#define OTTO_WDT_PRESCALE_MAX 3 + +/* + * One higher than the max values contained in PHASE{1,2}, since a value of 0 + * corresponds to one tick. + */ +#define OTTO_WDT_PHASE_TICKS_MAX 32 + +/* + * The maximum reset delay is actually 2×32 ticks, but that would require large + * pretimeout values for timeouts longer than 32 ticks. Limit the maximum timeout + * to 32 + 1 to ensure small pretimeout values can be configured as expected. + */ +#define OTTO_WDT_TIMEOUT_TICKS_MAX (OTTO_WDT_PHASE_TICKS_MAX + 1) + +struct otto_wdt_ctrl { + struct watchdog_device wdev; + struct device *dev; + void __iomem *base; + unsigned int clk_rate_khz; + int irq_phase1; +}; + +static int otto_wdt_start(struct watchdog_device *wdev) +{ + struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev); + u32 v; + + v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL); + v |= OTTO_WDT_CTRL_ENABLE; + iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL); + + return 0; +} + +static int otto_wdt_stop(struct watchdog_device *wdev) +{ + struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev); + u32 v; + + v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL); + v &= ~OTTO_WDT_CTRL_ENABLE; + iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL); + + return 0; +} + +static int otto_wdt_ping(struct watchdog_device *wdev) +{ + struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev); + + iowrite32(OTTO_WDT_CNTR_PING, ctrl->base + OTTO_WDT_REG_CNTR); + + return 0; +} + +static int otto_wdt_tick_ms(struct otto_wdt_ctrl *ctrl, int prescale) +{ + return DIV_ROUND_CLOSEST(1 << (25 + prescale), ctrl->clk_rate_khz); +} + +/* + * The timer asserts the PHASE1/PHASE2 IRQs when the number of ticks exceeds + * the value stored in those fields. This means each phase will run for at least + * one tick, so small values need to be clamped to correctly reflect the timeout. + */ +static inline unsigned int div_round_ticks(unsigned int val, unsigned int tick_duration, + unsigned int min_ticks) +{ + return max(min_ticks, DIV_ROUND_UP(val, tick_duration)); +} + +static int otto_wdt_determine_timeouts(struct watchdog_device *wdev, unsigned int timeout, + unsigned int pretimeout) +{ + struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev); + unsigned int pretimeout_ms = pretimeout * 1000; + unsigned int timeout_ms = timeout * 1000; + unsigned int prescale_next = 0; + unsigned int phase1_ticks; + unsigned int phase2_ticks; + unsigned int total_ticks; + unsigned int prescale; + unsigned int tick_ms; + u32 v; + + do { + prescale = prescale_next; + if (prescale > OTTO_WDT_PRESCALE_MAX) + return -EINVAL; + + tick_ms = otto_wdt_tick_ms(ctrl, prescale); + total_ticks = div_round_ticks(timeout_ms, tick_ms, 2); + phase1_ticks = div_round_ticks(timeout_ms - pretimeout_ms, tick_ms, 1); + phase2_ticks = total_ticks - phase1_ticks; + + prescale_next++; + } while (phase1_ticks > OTTO_WDT_PHASE_TICKS_MAX + || phase2_ticks > OTTO_WDT_PHASE_TICKS_MAX); + + v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL); + + v &= ~(OTTO_WDT_CTRL_PRESCALE | OTTO_WDT_CTRL_PHASE1 | OTTO_WDT_CTRL_PHASE2); + v |= FIELD_PREP(OTTO_WDT_CTRL_PHASE1, phase1_ticks - 1); + v |= FIELD_PREP(OTTO_WDT_CTRL_PHASE2, phase2_ticks - 1); + v |= FIELD_PREP(OTTO_WDT_CTRL_PRESCALE, prescale); + + iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL); + + timeout_ms = total_ticks * tick_ms; + ctrl->wdev.timeout = timeout_ms / 1000; + + pretimeout_ms = phase2_ticks * tick_ms; + ctrl->wdev.pretimeout = pretimeout_ms / 1000; + + return 0; +} + +static int otto_wdt_set_timeout(struct watchdog_device *wdev, unsigned int val) +{ + return otto_wdt_determine_timeouts(wdev, val, min(wdev->pretimeout, val - 1)); +} + +static int otto_wdt_set_pretimeout(struct watchdog_device *wdev, unsigned int val) +{ + return otto_wdt_determine_timeouts(wdev, wdev->timeout, val); +} + +static int otto_wdt_restart(struct watchdog_device *wdev, unsigned long reboot_mode, + void *data) +{ + struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev); + u32 reset_mode; + u32 v; + + disable_irq(ctrl->irq_phase1); + + switch (reboot_mode) { + case REBOOT_SOFT: + reset_mode = OTTO_WDT_MODE_SOFTWARE; + break; + case REBOOT_WARM: + reset_mode = OTTO_WDT_MODE_CPU; + break; + default: + reset_mode = OTTO_WDT_MODE_SOC; + break; + } + + /* Configure for shortest timeout and wait for reset to occur */ + v = FIELD_PREP(OTTO_WDT_CTRL_RST_MODE, reset_mode) | OTTO_WDT_CTRL_ENABLE; + iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL); + + mdelay(3 * otto_wdt_tick_ms(ctrl, 0)); + + return 0; +} + +static irqreturn_t otto_wdt_phase1_isr(int irq, void *dev_id) +{ + struct otto_wdt_ctrl *ctrl = dev_id; + + iowrite32(OTTO_WDT_INTR_PHASE_1, ctrl->base + OTTO_WDT_REG_INTR); + dev_crit(ctrl->dev, "phase 1 timeout\n"); + watchdog_notify_pretimeout(&ctrl->wdev); + + return IRQ_HANDLED; +} + +static const struct watchdog_ops otto_wdt_ops = { + .owner = THIS_MODULE, + .start = otto_wdt_start, + .stop = otto_wdt_stop, + .ping = otto_wdt_ping, + .set_timeout = otto_wdt_set_timeout, + .set_pretimeout = otto_wdt_set_pretimeout, + .restart = otto_wdt_restart, +}; + +static const struct watchdog_info otto_wdt_info = { + .identity = "Realtek Otto watchdog timer", + .options = WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE | + WDIOF_SETTIMEOUT | + WDIOF_PRETIMEOUT, +}; + +static void otto_wdt_clock_action(void *data) +{ + clk_disable_unprepare(data); +} + +static int otto_wdt_probe_clk(struct otto_wdt_ctrl *ctrl) +{ + struct clk *clk = devm_clk_get(ctrl->dev, NULL); + int ret; + + if (IS_ERR(clk)) + return dev_err_probe(ctrl->dev, PTR_ERR(clk), "Failed to get clock\n"); + + ret = clk_prepare_enable(clk); + if (ret) + return dev_err_probe(ctrl->dev, ret, "Failed to enable clock\n"); + + ret = devm_add_action_or_reset(ctrl->dev, otto_wdt_clock_action, clk); + if (ret) + return ret; + + ctrl->clk_rate_khz = clk_get_rate(clk) / 1000; + if (ctrl->clk_rate_khz == 0) + return dev_err_probe(ctrl->dev, -ENXIO, "Failed to get clock rate\n"); + + return 0; +} + +static int otto_wdt_probe_reset_mode(struct otto_wdt_ctrl *ctrl) +{ + static const char *mode_property = "realtek,reset-mode"; + const struct fwnode_handle *node = ctrl->dev->fwnode; + int mode_count; + u32 mode; + u32 v; + + if (!node) + return -ENXIO; + + mode_count = fwnode_property_string_array_count(node, mode_property); + if (mode_count < 0) + return mode_count; + else if (mode_count == 0) + return 0; + else if (mode_count != 1) + return -EINVAL; + + if (fwnode_property_match_string(node, mode_property, "soc") == 0) + mode = OTTO_WDT_MODE_SOC; + else if (fwnode_property_match_string(node, mode_property, "cpu") == 0) + mode = OTTO_WDT_MODE_CPU; + else if (fwnode_property_match_string(node, mode_property, "software") == 0) + mode = OTTO_WDT_MODE_SOFTWARE; + else + return -EINVAL; + + v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL); + v &= ~OTTO_WDT_CTRL_RST_MODE; + v |= FIELD_PREP(OTTO_WDT_CTRL_RST_MODE, mode); + iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL); + + return 0; +} + +static int otto_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct otto_wdt_ctrl *ctrl; + unsigned int max_tick_ms; + int ret; + + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + ctrl->dev = dev; + ctrl->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ctrl->base)) + return PTR_ERR(ctrl->base); + + /* Clear any old interrupts and reset initial state */ + iowrite32(OTTO_WDT_INTR_PHASE_1 | OTTO_WDT_INTR_PHASE_2, + ctrl->base + OTTO_WDT_REG_INTR); + iowrite32(OTTO_WDT_CTRL_DEFAULT, ctrl->base + OTTO_WDT_REG_CTRL); + + ret = otto_wdt_probe_clk(ctrl); + if (ret) + return ret; + + ctrl->irq_phase1 = platform_get_irq_byname(pdev, "phase1"); + if (ctrl->irq_phase1 < 0) + return ctrl->irq_phase1; + + ret = devm_request_irq(dev, ctrl->irq_phase1, otto_wdt_phase1_isr, 0, + "realtek-otto-wdt", ctrl); + if (ret) + return dev_err_probe(dev, ret, "Failed to get IRQ for phase1\n"); + + ret = otto_wdt_probe_reset_mode(ctrl); + if (ret) + return dev_err_probe(dev, ret, "Invalid reset mode specified\n"); + + ctrl->wdev.parent = dev; + ctrl->wdev.info = &otto_wdt_info; + ctrl->wdev.ops = &otto_wdt_ops; + + /* + * Since pretimeout cannot be disabled, min. timeout is twice the + * subsystem resolution. Max. timeout is ca. 43s at a bus clock of 200MHz. + */ + ctrl->wdev.min_timeout = 2; + max_tick_ms = otto_wdt_tick_ms(ctrl, OTTO_WDT_PRESCALE_MAX); + ctrl->wdev.max_hw_heartbeat_ms = max_tick_ms * OTTO_WDT_TIMEOUT_TICKS_MAX; + ctrl->wdev.timeout = min(30U, ctrl->wdev.max_hw_heartbeat_ms / 1000); + + watchdog_set_drvdata(&ctrl->wdev, ctrl); + watchdog_init_timeout(&ctrl->wdev, 0, dev); + watchdog_stop_on_reboot(&ctrl->wdev); + watchdog_set_restart_priority(&ctrl->wdev, 128); + + ret = otto_wdt_determine_timeouts(&ctrl->wdev, ctrl->wdev.timeout, 1); + if (ret) + return dev_err_probe(dev, ret, "Failed to set timeout\n"); + + return devm_watchdog_register_device(dev, &ctrl->wdev); +} + +static const struct of_device_id otto_wdt_ids[] = { + { .compatible = "realtek,rtl8380-wdt" }, + { .compatible = "realtek,rtl8390-wdt" }, + { .compatible = "realtek,rtl9300-wdt" }, + { } +}; +MODULE_DEVICE_TABLE(of, otto_wdt_ids); + +static struct platform_driver otto_wdt_driver = { + .probe = otto_wdt_probe, + .driver = { + .name = "realtek-otto-watchdog", + .of_match_table = otto_wdt_ids, + }, +}; +module_platform_driver(otto_wdt_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>"); +MODULE_DESCRIPTION("Realtek Otto watchdog timer driver"); diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c new file mode 100644 index 000000000000..6b426df34fd6 --- /dev/null +++ b/drivers/watchdog/rzg2l_wdt.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/G2L WDT Watchdog Driver + * + * Copyright (C) 2021 Renesas Electronics Corporation + */ +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/units.h> +#include <linux/watchdog.h> + +#define WDTCNT 0x00 +#define WDTSET 0x04 +#define WDTTIM 0x08 +#define WDTINT 0x0C +#define WDTCNT_WDTEN BIT(0) +#define WDTINT_INTDISP BIT(0) + +#define WDT_DEFAULT_TIMEOUT 60U + +/* Setting period time register only 12 bit set in WDTSET[31:20] */ +#define WDTSET_COUNTER_MASK (0xFFF00000) +#define WDTSET_COUNTER_VAL(f) ((f) << 20) + +#define F2CYCLE_NSEC(f) (1000000000 / (f)) + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +struct rzg2l_wdt_priv { + void __iomem *base; + struct watchdog_device wdev; + struct reset_control *rstc; + unsigned long osc_clk_rate; + unsigned long delay; +}; + +static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv) +{ + /* delay timer when change the setting register */ + ndelay(priv->delay); +} + +static u32 rzg2l_wdt_get_cycle_usec(unsigned long cycle, u32 wdttime) +{ + u64 timer_cycle_us = 1024 * 1024 * (wdttime + 1) * MICRO; + + return div64_ul(timer_cycle_us, cycle); +} + +static void rzg2l_wdt_write(struct rzg2l_wdt_priv *priv, u32 val, unsigned int reg) +{ + if (reg == WDTSET) + val &= WDTSET_COUNTER_MASK; + + writel_relaxed(val, priv->base + reg); + /* Registers other than the WDTINT is always synchronized with WDT_CLK */ + if (reg != WDTINT) + rzg2l_wdt_wait_delay(priv); +} + +static void rzg2l_wdt_init_timeout(struct watchdog_device *wdev) +{ + struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); + u32 time_out; + + /* Clear Lapsed Time Register and clear Interrupt */ + rzg2l_wdt_write(priv, WDTINT_INTDISP, WDTINT); + /* 2 consecutive overflow cycle needed to trigger reset */ + time_out = (wdev->timeout * (MICRO / 2)) / + rzg2l_wdt_get_cycle_usec(priv->osc_clk_rate, 0); + rzg2l_wdt_write(priv, WDTSET_COUNTER_VAL(time_out), WDTSET); +} + +static int rzg2l_wdt_start(struct watchdog_device *wdev) +{ + struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); + + reset_control_deassert(priv->rstc); + pm_runtime_get_sync(wdev->parent); + + /* Initialize time out */ + rzg2l_wdt_init_timeout(wdev); + + /* Initialize watchdog counter register */ + rzg2l_wdt_write(priv, 0, WDTTIM); + + /* Enable watchdog timer*/ + rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT); + + return 0; +} + +static int rzg2l_wdt_stop(struct watchdog_device *wdev) +{ + struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); + + pm_runtime_put(wdev->parent); + reset_control_assert(priv->rstc); + + return 0; +} + +static int rzg2l_wdt_restart(struct watchdog_device *wdev, + unsigned long action, void *data) +{ + struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); + + /* Reset the module before we modify any register */ + reset_control_reset(priv->rstc); + pm_runtime_get_sync(wdev->parent); + + /* smallest counter value to reboot soon */ + rzg2l_wdt_write(priv, WDTSET_COUNTER_VAL(1), WDTSET); + + /* Enable watchdog timer*/ + rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT); + + return 0; +} + +static const struct watchdog_info rzg2l_wdt_ident = { + .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, + .identity = "Renesas RZ/G2L WDT Watchdog", +}; + +static int rzg2l_wdt_ping(struct watchdog_device *wdev) +{ + struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); + + rzg2l_wdt_write(priv, WDTINT_INTDISP, WDTINT); + + return 0; +} + +static const struct watchdog_ops rzg2l_wdt_ops = { + .owner = THIS_MODULE, + .start = rzg2l_wdt_start, + .stop = rzg2l_wdt_stop, + .ping = rzg2l_wdt_ping, + .restart = rzg2l_wdt_restart, +}; + +static void rzg2l_wdt_reset_assert_pm_disable_put(void *data) +{ + struct watchdog_device *wdev = data; + struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); + + pm_runtime_put(wdev->parent); + pm_runtime_disable(wdev->parent); + reset_control_assert(priv->rstc); +} + +static int rzg2l_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rzg2l_wdt_priv *priv; + unsigned long pclk_rate; + struct clk *wdt_clk; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + /* Get watchdog main clock */ + wdt_clk = clk_get(&pdev->dev, "oscclk"); + if (IS_ERR(wdt_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(wdt_clk), "no oscclk"); + + priv->osc_clk_rate = clk_get_rate(wdt_clk); + clk_put(wdt_clk); + if (!priv->osc_clk_rate) + return dev_err_probe(&pdev->dev, -EINVAL, "oscclk rate is 0"); + + /* Get Peripheral clock */ + wdt_clk = clk_get(&pdev->dev, "pclk"); + if (IS_ERR(wdt_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(wdt_clk), "no pclk"); + + pclk_rate = clk_get_rate(wdt_clk); + clk_put(wdt_clk); + if (!pclk_rate) + return dev_err_probe(&pdev->dev, -EINVAL, "pclk rate is 0"); + + priv->delay = F2CYCLE_NSEC(priv->osc_clk_rate) * 6 + F2CYCLE_NSEC(pclk_rate) * 9; + + priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(priv->rstc)) + return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc), + "failed to get cpg reset"); + + reset_control_deassert(priv->rstc); + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_resume_and_get failed ret=%pe", ERR_PTR(ret)); + goto out_pm_get; + } + + priv->wdev.info = &rzg2l_wdt_ident; + priv->wdev.ops = &rzg2l_wdt_ops; + priv->wdev.parent = dev; + priv->wdev.min_timeout = 1; + priv->wdev.max_timeout = rzg2l_wdt_get_cycle_usec(priv->osc_clk_rate, 0xfff) / + USEC_PER_SEC; + priv->wdev.timeout = WDT_DEFAULT_TIMEOUT; + + watchdog_set_drvdata(&priv->wdev, priv); + ret = devm_add_action_or_reset(&pdev->dev, + rzg2l_wdt_reset_assert_pm_disable_put, + &priv->wdev); + if (ret < 0) + return ret; + + watchdog_set_nowayout(&priv->wdev, nowayout); + watchdog_stop_on_unregister(&priv->wdev); + + ret = watchdog_init_timeout(&priv->wdev, 0, dev); + if (ret) + dev_warn(dev, "Specified timeout invalid, using default"); + + return devm_watchdog_register_device(&pdev->dev, &priv->wdev); + +out_pm_get: + pm_runtime_disable(dev); + reset_control_assert(priv->rstc); + + return ret; +} + +static const struct of_device_id rzg2l_wdt_ids[] = { + { .compatible = "renesas,rzg2l-wdt", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rzg2l_wdt_ids); + +static struct platform_driver rzg2l_wdt_driver = { + .driver = { + .name = "rzg2l_wdt", + .of_match_table = rzg2l_wdt_ids, + }, + .probe = rzg2l_wdt_probe, +}; +module_platform_driver(rzg2l_wdt_driver); + +MODULE_DESCRIPTION("Renesas RZ/G2L WDT Watchdog Driver"); +MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 2395f353e52d..6db22f2e3a4f 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -56,13 +56,58 @@ #define EXYNOS5_RST_STAT_REG_OFFSET 0x0404 #define EXYNOS5_WDT_DISABLE_REG_OFFSET 0x0408 #define EXYNOS5_WDT_MASK_RESET_REG_OFFSET 0x040c -#define QUIRK_HAS_PMU_CONFIG (1 << 0) -#define QUIRK_HAS_RST_STAT (1 << 1) -#define QUIRK_HAS_WTCLRINT_REG (1 << 2) +#define EXYNOS850_CLUSTER0_NONCPU_OUT 0x1220 +#define EXYNOS850_CLUSTER0_NONCPU_INT_EN 0x1244 +#define EXYNOS850_CLUSTER1_NONCPU_OUT 0x1620 +#define EXYNOS850_CLUSTER1_NONCPU_INT_EN 0x1644 + +#define EXYNOS850_CLUSTER0_WDTRESET_BIT 24 +#define EXYNOS850_CLUSTER1_WDTRESET_BIT 23 + +/** + * DOC: Quirk flags for different Samsung watchdog IP-cores + * + * This driver supports multiple Samsung SoCs, each of which might have + * different set of registers and features supported. As watchdog block + * sometimes requires modifying PMU registers for proper functioning, register + * differences in both watchdog and PMU IP-cores should be accounted for. Quirk + * flags described below serve the purpose of telling the driver about mentioned + * SoC traits, and can be specified in driver data for each particular supported + * device. + * + * %QUIRK_HAS_WTCLRINT_REG: Watchdog block has WTCLRINT register. It's used to + * clear the interrupt once the interrupt service routine is complete. It's + * write-only, writing any values to this register clears the interrupt, but + * reading is not permitted. + * + * %QUIRK_HAS_PMU_MASK_RESET: PMU block has the register for disabling/enabling + * WDT reset request. On old SoCs it's usually called MASK_WDT_RESET_REQUEST, + * new SoCs have CLUSTERx_NONCPU_INT_EN register, which 'mask_bit' value is + * inverted compared to the former one. + * + * %QUIRK_HAS_PMU_RST_STAT: PMU block has RST_STAT (reset status) register, + * which contains bits indicating the reason for most recent CPU reset. If + * present, driver will use this register to check if previous reboot was due to + * watchdog timer reset. + * + * %QUIRK_HAS_PMU_AUTO_DISABLE: PMU block has AUTOMATIC_WDT_RESET_DISABLE + * register. If 'mask_bit' bit is set, PMU will disable WDT reset when + * corresponding processor is in reset state. + * + * %QUIRK_HAS_PMU_CNT_EN: PMU block has some register (e.g. CLUSTERx_NONCPU_OUT) + * with "watchdog counter enable" bit. That bit should be set to make watchdog + * counter running. + */ +#define QUIRK_HAS_WTCLRINT_REG (1 << 0) +#define QUIRK_HAS_PMU_MASK_RESET (1 << 1) +#define QUIRK_HAS_PMU_RST_STAT (1 << 2) +#define QUIRK_HAS_PMU_AUTO_DISABLE (1 << 3) +#define QUIRK_HAS_PMU_CNT_EN (1 << 4) /* These quirks require that we have a PMU register map */ -#define QUIRKS_HAVE_PMUREG (QUIRK_HAS_PMU_CONFIG | \ - QUIRK_HAS_RST_STAT) +#define QUIRKS_HAVE_PMUREG \ + (QUIRK_HAS_PMU_MASK_RESET | QUIRK_HAS_PMU_RST_STAT | \ + QUIRK_HAS_PMU_AUTO_DISABLE | QUIRK_HAS_PMU_CNT_EN) static bool nowayout = WATCHDOG_NOWAYOUT; static int tmr_margin; @@ -90,26 +135,33 @@ MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, 0 to * timer reset functionality. * @mask_reset_reg: Offset in pmureg for the register that masks the watchdog * timer reset functionality. + * @mask_reset_inv: If set, mask_reset_reg value will have inverted meaning. * @mask_bit: Bit number for the watchdog timer in the disable register and the * mask reset register. * @rst_stat_reg: Offset in pmureg for the register that has the reset status. * @rst_stat_bit: Bit number in the rst_stat register indicating a watchdog * reset. + * @cnt_en_reg: Offset in pmureg for the register that enables WDT counter. + * @cnt_en_bit: Bit number for "watchdog counter enable" in cnt_en register. * @quirks: A bitfield of quirks. */ struct s3c2410_wdt_variant { int disable_reg; int mask_reset_reg; + bool mask_reset_inv; int mask_bit; int rst_stat_reg; int rst_stat_bit; + int cnt_en_reg; + int cnt_en_bit; u32 quirks; }; struct s3c2410_wdt { struct device *dev; - struct clk *clock; + struct clk *bus_clk; /* for register interface (PCLK) */ + struct clk *src_clk; /* for WDT counter */ void __iomem *reg_base; unsigned int count; spinlock_t lock; @@ -136,8 +188,8 @@ static const struct s3c2410_wdt_variant drv_data_exynos5250 = { .mask_bit = 20, .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, .rst_stat_bit = 20, - .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \ - | QUIRK_HAS_WTCLRINT_REG, + .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \ + QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_AUTO_DISABLE, }; static const struct s3c2410_wdt_variant drv_data_exynos5420 = { @@ -146,8 +198,8 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = { .mask_bit = 0, .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, .rst_stat_bit = 9, - .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \ - | QUIRK_HAS_WTCLRINT_REG, + .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \ + QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_AUTO_DISABLE, }; static const struct s3c2410_wdt_variant drv_data_exynos7 = { @@ -156,8 +208,32 @@ static const struct s3c2410_wdt_variant drv_data_exynos7 = { .mask_bit = 23, .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, .rst_stat_bit = 23, /* A57 WDTRESET */ - .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \ - | QUIRK_HAS_WTCLRINT_REG, + .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \ + QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_AUTO_DISABLE, +}; + +static const struct s3c2410_wdt_variant drv_data_exynos850_cl0 = { + .mask_reset_reg = EXYNOS850_CLUSTER0_NONCPU_INT_EN, + .mask_bit = 2, + .mask_reset_inv = true, + .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, + .rst_stat_bit = EXYNOS850_CLUSTER0_WDTRESET_BIT, + .cnt_en_reg = EXYNOS850_CLUSTER0_NONCPU_OUT, + .cnt_en_bit = 7, + .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \ + QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN, +}; + +static const struct s3c2410_wdt_variant drv_data_exynos850_cl1 = { + .mask_reset_reg = EXYNOS850_CLUSTER1_NONCPU_INT_EN, + .mask_bit = 2, + .mask_reset_inv = true, + .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, + .rst_stat_bit = EXYNOS850_CLUSTER1_WDTRESET_BIT, + .cnt_en_reg = EXYNOS850_CLUSTER1_NONCPU_OUT, + .cnt_en_bit = 7, + .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \ + QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN, }; static const struct of_device_id s3c2410_wdt_match[] = { @@ -171,6 +247,8 @@ static const struct of_device_id s3c2410_wdt_match[] = { .data = &drv_data_exynos5420 }, { .compatible = "samsung,exynos7-wdt", .data = &drv_data_exynos7 }, + { .compatible = "samsung,exynos850-wdt", + .data = &drv_data_exynos850_cl0 }, {}, }; MODULE_DEVICE_TABLE(of, s3c2410_wdt_match); @@ -187,9 +265,14 @@ MODULE_DEVICE_TABLE(platform, s3c2410_wdt_ids); /* functions */ -static inline unsigned int s3c2410wdt_max_timeout(struct clk *clock) +static inline unsigned long s3c2410wdt_get_freq(struct s3c2410_wdt *wdt) { - unsigned long freq = clk_get_rate(clock); + return clk_get_rate(wdt->src_clk ? wdt->src_clk : wdt->bus_clk); +} + +static inline unsigned int s3c2410wdt_max_timeout(struct s3c2410_wdt *wdt) +{ + const unsigned long freq = s3c2410wdt_get_freq(wdt); return S3C2410_WTCNT_MAXCNT / (freq / (S3C2410_WTCON_PRESCALE_MAX + 1) / S3C2410_WTCON_MAXDIV); @@ -200,35 +283,74 @@ static inline struct s3c2410_wdt *freq_to_wdt(struct notifier_block *nb) return container_of(nb, struct s3c2410_wdt, freq_transition); } -static int s3c2410wdt_mask_and_disable_reset(struct s3c2410_wdt *wdt, bool mask) +static int s3c2410wdt_disable_wdt_reset(struct s3c2410_wdt *wdt, bool mask) { + const u32 mask_val = BIT(wdt->drv_data->mask_bit); + const u32 val = mask ? mask_val : 0; int ret; - u32 mask_val = 1 << wdt->drv_data->mask_bit; - u32 val = 0; - /* No need to do anything if no PMU CONFIG needed */ - if (!(wdt->drv_data->quirks & QUIRK_HAS_PMU_CONFIG)) - return 0; + ret = regmap_update_bits(wdt->pmureg, wdt->drv_data->disable_reg, + mask_val, val); + if (ret < 0) + dev_err(wdt->dev, "failed to update reg(%d)\n", ret); - if (mask) - val = mask_val; + return ret; +} + +static int s3c2410wdt_mask_wdt_reset(struct s3c2410_wdt *wdt, bool mask) +{ + const u32 mask_val = BIT(wdt->drv_data->mask_bit); + const bool val_inv = wdt->drv_data->mask_reset_inv; + const u32 val = (mask ^ val_inv) ? mask_val : 0; + int ret; - ret = regmap_update_bits(wdt->pmureg, - wdt->drv_data->disable_reg, - mask_val, val); + ret = regmap_update_bits(wdt->pmureg, wdt->drv_data->mask_reset_reg, + mask_val, val); if (ret < 0) - goto error; + dev_err(wdt->dev, "failed to update reg(%d)\n", ret); + + return ret; +} - ret = regmap_update_bits(wdt->pmureg, - wdt->drv_data->mask_reset_reg, - mask_val, val); - error: +static int s3c2410wdt_enable_counter(struct s3c2410_wdt *wdt, bool en) +{ + const u32 mask_val = BIT(wdt->drv_data->cnt_en_bit); + const u32 val = en ? mask_val : 0; + int ret; + + ret = regmap_update_bits(wdt->pmureg, wdt->drv_data->cnt_en_reg, + mask_val, val); if (ret < 0) dev_err(wdt->dev, "failed to update reg(%d)\n", ret); return ret; } +static int s3c2410wdt_enable(struct s3c2410_wdt *wdt, bool en) +{ + int ret; + + if (wdt->drv_data->quirks & QUIRK_HAS_PMU_AUTO_DISABLE) { + ret = s3c2410wdt_disable_wdt_reset(wdt, !en); + if (ret < 0) + return ret; + } + + if (wdt->drv_data->quirks & QUIRK_HAS_PMU_MASK_RESET) { + ret = s3c2410wdt_mask_wdt_reset(wdt, !en); + if (ret < 0) + return ret; + } + + if (wdt->drv_data->quirks & QUIRK_HAS_PMU_CNT_EN) { + ret = s3c2410wdt_enable_counter(wdt, en); + if (ret < 0) + return ret; + } + + return 0; +} + static int s3c2410wdt_keepalive(struct watchdog_device *wdd) { struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd); @@ -300,7 +422,7 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned int timeout) { struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd); - unsigned long freq = clk_get_rate(wdt->clock); + unsigned long freq = s3c2410wdt_get_freq(wdt); unsigned int count; unsigned int divisor = 1; unsigned long wtcon; @@ -482,7 +604,7 @@ static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt) unsigned int rst_stat; int ret; - if (!(wdt->drv_data->quirks & QUIRK_HAS_RST_STAT)) + if (!(wdt->drv_data->quirks & QUIRK_HAS_PMU_RST_STAT)) return 0; ret = regmap_read(wdt->pmureg, wdt->drv_data->rst_stat_reg, &rst_stat); @@ -498,14 +620,40 @@ static inline const struct s3c2410_wdt_variant * s3c2410_get_wdt_drv_data(struct platform_device *pdev) { const struct s3c2410_wdt_variant *variant; + struct device *dev = &pdev->dev; - variant = of_device_get_match_data(&pdev->dev); + variant = of_device_get_match_data(dev); if (!variant) { /* Device matched by platform_device_id */ variant = (struct s3c2410_wdt_variant *) platform_get_device_id(pdev)->driver_data; } +#ifdef CONFIG_OF + /* Choose Exynos850 driver data w.r.t. cluster index */ + if (variant == &drv_data_exynos850_cl0) { + u32 index; + int err; + + err = of_property_read_u32(dev->of_node, + "samsung,cluster-index", &index); + if (err) { + dev_err(dev, "failed to get cluster index\n"); + return NULL; + } + + switch (index) { + case 0: + return &drv_data_exynos850_cl0; + case 1: + return &drv_data_exynos850_cl1; + default: + dev_err(dev, "wrong cluster index: %u\n", index); + return NULL; + } + } +#endif + return variant; } @@ -513,9 +661,8 @@ static int s3c2410wdt_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct s3c2410_wdt *wdt; - struct resource *wdt_irq; unsigned int wtcon; - int started = 0; + int wdt_irq; int ret; wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); @@ -527,6 +674,9 @@ static int s3c2410wdt_probe(struct platform_device *pdev) wdt->wdt_device = s3c2410_wdd; wdt->drv_data = s3c2410_get_wdt_drv_data(pdev); + if (!wdt->drv_data) + return -EINVAL; + if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) { wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node, "samsung,syscon-phandle"); @@ -536,40 +686,52 @@ static int s3c2410wdt_probe(struct platform_device *pdev) } } - wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (wdt_irq == NULL) { - dev_err(dev, "no irq resource specified\n"); - ret = -ENOENT; - goto err; - } + wdt_irq = platform_get_irq(pdev, 0); + if (wdt_irq < 0) + return wdt_irq; /* get the memory region for the watchdog timer */ wdt->reg_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(wdt->reg_base)) { - ret = PTR_ERR(wdt->reg_base); - goto err; - } + if (IS_ERR(wdt->reg_base)) + return PTR_ERR(wdt->reg_base); - wdt->clock = devm_clk_get(dev, "watchdog"); - if (IS_ERR(wdt->clock)) { - dev_err(dev, "failed to find watchdog clock source\n"); - ret = PTR_ERR(wdt->clock); - goto err; + wdt->bus_clk = devm_clk_get(dev, "watchdog"); + if (IS_ERR(wdt->bus_clk)) { + dev_err(dev, "failed to find bus clock\n"); + return PTR_ERR(wdt->bus_clk); } - ret = clk_prepare_enable(wdt->clock); + ret = clk_prepare_enable(wdt->bus_clk); if (ret < 0) { - dev_err(dev, "failed to enable clock\n"); + dev_err(dev, "failed to enable bus clock\n"); return ret; } + /* + * "watchdog_src" clock is optional; if it's not present -- just skip it + * and use "watchdog" clock as both bus and source clock. + */ + wdt->src_clk = devm_clk_get_optional(dev, "watchdog_src"); + if (IS_ERR(wdt->src_clk)) { + dev_err_probe(dev, PTR_ERR(wdt->src_clk), + "failed to get source clock\n"); + ret = PTR_ERR(wdt->src_clk); + goto err_bus_clk; + } + + ret = clk_prepare_enable(wdt->src_clk); + if (ret) { + dev_err(dev, "failed to enable source clock\n"); + goto err_bus_clk; + } + wdt->wdt_device.min_timeout = 1; - wdt->wdt_device.max_timeout = s3c2410wdt_max_timeout(wdt->clock); + wdt->wdt_device.max_timeout = s3c2410wdt_max_timeout(wdt); ret = s3c2410wdt_cpufreq_register(wdt); if (ret < 0) { dev_err(dev, "failed to register cpufreq\n"); - goto err_clk; + goto err_src_clk; } watchdog_set_drvdata(&wdt->wdt_device, wdt); @@ -581,19 +743,19 @@ static int s3c2410wdt_probe(struct platform_device *pdev) ret = s3c2410wdt_set_heartbeat(&wdt->wdt_device, wdt->wdt_device.timeout); if (ret) { - started = s3c2410wdt_set_heartbeat(&wdt->wdt_device, - S3C2410_WATCHDOG_DEFAULT_TIME); - - if (started == 0) - dev_info(dev, - "tmr_margin value out of range, default %d used\n", + ret = s3c2410wdt_set_heartbeat(&wdt->wdt_device, + S3C2410_WATCHDOG_DEFAULT_TIME); + if (ret == 0) { + dev_warn(dev, "tmr_margin value out of range, default %d used\n", S3C2410_WATCHDOG_DEFAULT_TIME); - else - dev_info(dev, "default timer value is out of range, cannot start\n"); + } else { + dev_err(dev, "failed to use default timeout\n"); + goto err_cpufreq; + } } - ret = devm_request_irq(dev, wdt_irq->start, s3c2410wdt_irq, 0, - pdev->name, pdev); + ret = devm_request_irq(dev, wdt_irq, s3c2410wdt_irq, 0, + pdev->name, pdev); if (ret != 0) { dev_err(dev, "failed to install irq (%d)\n", ret); goto err_cpufreq; @@ -605,25 +767,29 @@ static int s3c2410wdt_probe(struct platform_device *pdev) wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt); wdt->wdt_device.parent = dev; + /* + * If "tmr_atboot" param is non-zero, start the watchdog right now. Also + * set WDOG_HW_RUNNING bit, so that watchdog core can kick the watchdog. + * + * If we're not enabling the watchdog, then ensure it is disabled if it + * has been left running from the bootloader or other source. + */ + if (tmr_atboot) { + dev_info(dev, "starting watchdog timer\n"); + s3c2410wdt_start(&wdt->wdt_device); + set_bit(WDOG_HW_RUNNING, &wdt->wdt_device.status); + } else { + s3c2410wdt_stop(&wdt->wdt_device); + } + ret = watchdog_register_device(&wdt->wdt_device); if (ret) goto err_cpufreq; - ret = s3c2410wdt_mask_and_disable_reset(wdt, false); + ret = s3c2410wdt_enable(wdt, true); if (ret < 0) goto err_unregister; - if (tmr_atboot && started == 0) { - dev_info(dev, "starting watchdog timer\n"); - s3c2410wdt_start(&wdt->wdt_device); - } else if (!tmr_atboot) { - /* if we're not enabling the watchdog, then ensure it is - * disabled if it has been left running from the bootloader - * or other source */ - - s3c2410wdt_stop(&wdt->wdt_device); - } - platform_set_drvdata(pdev, wdt); /* print out a statement of readiness */ @@ -643,10 +809,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev) err_cpufreq: s3c2410wdt_cpufreq_deregister(wdt); - err_clk: - clk_disable_unprepare(wdt->clock); + err_src_clk: + clk_disable_unprepare(wdt->src_clk); + + err_bus_clk: + clk_disable_unprepare(wdt->bus_clk); - err: return ret; } @@ -655,7 +823,7 @@ static int s3c2410wdt_remove(struct platform_device *dev) int ret; struct s3c2410_wdt *wdt = platform_get_drvdata(dev); - ret = s3c2410wdt_mask_and_disable_reset(wdt, true); + ret = s3c2410wdt_enable(wdt, false); if (ret < 0) return ret; @@ -663,7 +831,8 @@ static int s3c2410wdt_remove(struct platform_device *dev) s3c2410wdt_cpufreq_deregister(wdt); - clk_disable_unprepare(wdt->clock); + clk_disable_unprepare(wdt->src_clk); + clk_disable_unprepare(wdt->bus_clk); return 0; } @@ -672,8 +841,7 @@ static void s3c2410wdt_shutdown(struct platform_device *dev) { struct s3c2410_wdt *wdt = platform_get_drvdata(dev); - s3c2410wdt_mask_and_disable_reset(wdt, true); - + s3c2410wdt_enable(wdt, false); s3c2410wdt_stop(&wdt->wdt_device); } @@ -688,7 +856,7 @@ static int s3c2410wdt_suspend(struct device *dev) wdt->wtcon_save = readl(wdt->reg_base + S3C2410_WTCON); wdt->wtdat_save = readl(wdt->reg_base + S3C2410_WTDAT); - ret = s3c2410wdt_mask_and_disable_reset(wdt, true); + ret = s3c2410wdt_enable(wdt, false); if (ret < 0) return ret; @@ -708,7 +876,7 @@ static int s3c2410wdt_resume(struct device *dev) writel(wdt->wtdat_save, wdt->reg_base + S3C2410_WTCNT);/* Reset count */ writel(wdt->wtcon_save, wdt->reg_base + S3C2410_WTCON); - ret = s3c2410wdt_mask_and_disable_reset(wdt, false); + ret = s3c2410wdt_enable(wdt, true); if (ret < 0) return ret; |