diff options
Diffstat (limited to 'drivers')
574 files changed, 18528 insertions, 10144 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 7bc40c2735ac..b594780a57d7 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -394,6 +394,7 @@ config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD config ACPI_DEBUG bool "Debug Statements" + default y help The ACPI subsystem can produce debug output. Saying Y enables this output and increases the kernel size by around 50K. diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 7cf6101cb4c7..2a99f5eb6962 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -275,7 +275,7 @@ static inline int acpi_processor_hotadd_init(struct acpi_processor *pr, static int acpi_processor_get_info(struct acpi_device *device) { - union acpi_object object = { 0 }; + union acpi_object object = { .processor = { 0 } }; struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; struct acpi_processor *pr = acpi_driver_data(device); int device_declaration = 0; diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c index 825c2a8acea4..91d7d90c47da 100644 --- a/drivers/acpi/acpi_tad.c +++ b/drivers/acpi/acpi_tad.c @@ -233,7 +233,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr, if (ret) return ret; - return sprintf(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n", + return sysfs_emit(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n", rt.year, rt.month, rt.day, rt.hour, rt.minute, rt.second, rt.tz, rt.daylight); } @@ -428,7 +428,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, { struct acpi_tad_driver_data *dd = dev_get_drvdata(dev); - return sprintf(buf, "0x%02X\n", dd->capabilities); + return sysfs_emit(buf, "0x%02X\n", dd->capabilities); } static DEVICE_ATTR_RO(caps); diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c index d34497f3576a..36934d4f26fb 100644 --- a/drivers/acpi/acpica/extrace.c +++ b/drivers/acpi/acpica/extrace.c @@ -136,9 +136,9 @@ acpi_ex_trace_point(acpi_trace_event_type type, if (pathname) { ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, - "%s %s [0x%p:%s] execution.\n", + "%s %s [%s] execution.\n", acpi_ex_get_trace_event_name(type), - begin ? "Begin" : "End", aml, pathname)); + begin ? "Begin" : "End", pathname)); } else { ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, "%s %s [0x%p] execution.\n", diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index cd2766c69d78..77c10a7a7a9f 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -131,7 +131,7 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus) int apei_osc_setup(void); -int einj_get_available_error_type(u32 *type); +int einj_get_available_error_type(u32 *type, int einj_action); int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3, u64 param4); int einj_cxl_rch_error_inject(u32 type, u32 flags, u64 param1, u64 param2, diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c index 9b041415a9d0..bf8dc92a373a 100644 --- a/drivers/acpi/apei/einj-core.c +++ b/drivers/acpi/apei/einj-core.c @@ -33,6 +33,8 @@ #define SLEEP_UNIT_MAX 5000 /* 5ms */ /* Firmware should respond within 1 seconds */ #define FIRMWARE_TIMEOUT (1 * USEC_PER_SEC) +#define COMPONENT_LEN 16 +#define ACPI65_EINJV2_SUPP BIT(30) #define ACPI5_VENDOR_BIT BIT(31) #define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \ ACPI_EINJ_MEMORY_UNCORRECTABLE | \ @@ -49,6 +51,28 @@ */ static int acpi5; +struct syndrome_array { + union { + u8 acpi_id[COMPONENT_LEN]; + u8 device_id[COMPONENT_LEN]; + u8 pcie_sbdf[COMPONENT_LEN]; + u8 vendor_id[COMPONENT_LEN]; + } comp_id; + union { + u8 proc_synd[COMPONENT_LEN]; + u8 mem_synd[COMPONENT_LEN]; + u8 pcie_synd[COMPONENT_LEN]; + u8 vendor_synd[COMPONENT_LEN]; + } comp_synd; +}; + +struct einjv2_extension_struct { + u32 length; + u16 revision; + u16 component_arr_count; + struct syndrome_array component_arr[] __counted_by(component_arr_count); +}; + struct set_error_type_with_address { u32 type; u32 vendor_extension; @@ -57,11 +81,13 @@ struct set_error_type_with_address { u64 memory_address; u64 memory_address_range; u32 pcie_sbdf; + struct einjv2_extension_struct einjv2_struct; }; enum { SETWA_FLAGS_APICID = 1, SETWA_FLAGS_MEM = 2, SETWA_FLAGS_PCIE_SBDF = 4, + SETWA_FLAGS_EINJV2 = 8, }; /* @@ -83,7 +109,10 @@ static struct debugfs_blob_wrapper vendor_blob; static struct debugfs_blob_wrapper vendor_errors; static char vendor_dev[64]; +static u32 max_nr_components; static u32 available_error_type; +static u32 available_error_type_v2; +static struct syndrome_array *syndrome_data; /* * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the @@ -151,7 +180,9 @@ static DEFINE_MUTEX(einj_mutex); */ bool einj_initialized __ro_after_init; -static void *einj_param; +static void __iomem *einj_param; +static u32 v5param_size; +static bool is_v2; static void einj_exec_ctx_init(struct apei_exec_context *ctx) { @@ -159,13 +190,13 @@ static void einj_exec_ctx_init(struct apei_exec_context *ctx) EINJ_TAB_ENTRY(einj_tab), einj_tab->entries); } -static int __einj_get_available_error_type(u32 *type) +static int __einj_get_available_error_type(u32 *type, int einj_action) { struct apei_exec_context ctx; int rc; einj_exec_ctx_init(&ctx); - rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE); + rc = apei_exec_run(&ctx, einj_action); if (rc) return rc; *type = apei_exec_ctx_get_output(&ctx); @@ -174,17 +205,34 @@ static int __einj_get_available_error_type(u32 *type) } /* Get error injection capabilities of the platform */ -int einj_get_available_error_type(u32 *type) +int einj_get_available_error_type(u32 *type, int einj_action) { int rc; mutex_lock(&einj_mutex); - rc = __einj_get_available_error_type(type); + rc = __einj_get_available_error_type(type, einj_action); mutex_unlock(&einj_mutex); return rc; } +static int einj_get_available_error_types(u32 *type1, u32 *type2) +{ + int rc; + + rc = einj_get_available_error_type(type1, ACPI_EINJ_GET_ERROR_TYPE); + if (rc) + return rc; + if (*type1 & ACPI65_EINJV2_SUPP) { + rc = einj_get_available_error_type(type2, + ACPI_EINJV2_GET_ERROR_TYPE); + if (rc) + return rc; + } + + return 0; +} + static int einj_timedout(u64 *t) { if ((s64)*t < SLEEP_UNIT_MIN) { @@ -216,24 +264,26 @@ static void check_vendor_extension(u64 paddr, struct set_error_type_with_address *v5param) { int offset = v5param->vendor_extension; - struct vendor_error_type_extension *v; + struct vendor_error_type_extension v; + struct vendor_error_type_extension __iomem *p; u32 sbdf; if (!offset) return; - v = acpi_os_map_iomem(paddr + offset, sizeof(*v)); - if (!v) + p = acpi_os_map_iomem(paddr + offset, sizeof(*p)); + if (!p) return; - get_oem_vendor_struct(paddr, offset, v); - sbdf = v->pcie_sbdf; + memcpy_fromio(&v, p, sizeof(v)); + get_oem_vendor_struct(paddr, offset, &v); + sbdf = v.pcie_sbdf; sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n", sbdf >> 24, (sbdf >> 16) & 0xff, (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7, - v->vendor_id, v->device_id, v->rev_id); - acpi_os_unmap_iomem(v, sizeof(*v)); + v.vendor_id, v.device_id, v.rev_id); + acpi_os_unmap_iomem(p, sizeof(v)); } -static void *einj_get_parameter_address(void) +static void __iomem *einj_get_parameter_address(void) { int i; u64 pa_v4 = 0, pa_v5 = 0; @@ -254,26 +304,50 @@ static void *einj_get_parameter_address(void) entry++; } if (pa_v5) { - struct set_error_type_with_address *v5param; + struct set_error_type_with_address v5param; + struct set_error_type_with_address __iomem *p; + + v5param_size = sizeof(v5param); + p = acpi_os_map_iomem(pa_v5, sizeof(*p)); + if (p) { + int offset, len; - v5param = acpi_os_map_iomem(pa_v5, sizeof(*v5param)); - if (v5param) { + memcpy_fromio(&v5param, p, v5param_size); acpi5 = 1; - check_vendor_extension(pa_v5, v5param); - return v5param; + check_vendor_extension(pa_v5, &v5param); + if (available_error_type & ACPI65_EINJV2_SUPP) { + len = v5param.einjv2_struct.length; + offset = offsetof(struct einjv2_extension_struct, component_arr); + max_nr_components = (len - offset) / + sizeof(v5param.einjv2_struct.component_arr[0]); + /* + * The first call to acpi_os_map_iomem above does not include the + * component array, instead it is used to read and calculate maximum + * number of components supported by the system. Below, the mapping + * is expanded to include the component array. + */ + acpi_os_unmap_iomem(p, v5param_size); + offset = offsetof(struct set_error_type_with_address, einjv2_struct); + v5param_size = offset + struct_size(&v5param.einjv2_struct, + component_arr, max_nr_components); + p = acpi_os_map_iomem(pa_v5, v5param_size); + } + return p; } } if (param_extension && pa_v4) { - struct einj_parameter *v4param; + struct einj_parameter v4param; + struct einj_parameter __iomem *p; - v4param = acpi_os_map_iomem(pa_v4, sizeof(*v4param)); - if (!v4param) + p = acpi_os_map_iomem(pa_v4, sizeof(*p)); + if (!p) return NULL; - if (v4param->reserved1 || v4param->reserved2) { - acpi_os_unmap_iomem(v4param, sizeof(*v4param)); + memcpy_fromio(&v4param, p, sizeof(v4param)); + if (v4param.reserved1 || v4param.reserved2) { + acpi_os_unmap_iomem(p, sizeof(v4param)); return NULL; } - return v4param; + return p; } return NULL; @@ -319,7 +393,8 @@ static struct acpi_generic_address *einj_get_trigger_parameter_region( static int __einj_error_trigger(u64 trigger_paddr, u32 type, u64 param1, u64 param2) { - struct acpi_einj_trigger *trigger_tab = NULL; + struct acpi_einj_trigger trigger_tab; + struct acpi_einj_trigger *full_trigger_tab; struct apei_exec_context trigger_ctx; struct apei_resources trigger_resources; struct acpi_whea_header *trigger_entry; @@ -327,54 +402,60 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type, u32 table_size; int rc = -EIO; struct acpi_generic_address *trigger_param_region = NULL; + struct acpi_einj_trigger __iomem *p = NULL; - r = request_mem_region(trigger_paddr, sizeof(*trigger_tab), + r = request_mem_region(trigger_paddr, sizeof(trigger_tab), "APEI EINJ Trigger Table"); if (!r) { pr_err("Can not request [mem %#010llx-%#010llx] for Trigger table\n", (unsigned long long)trigger_paddr, (unsigned long long)trigger_paddr + - sizeof(*trigger_tab) - 1); + sizeof(trigger_tab) - 1); goto out; } - trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab)); - if (!trigger_tab) { + p = ioremap_cache(trigger_paddr, sizeof(*p)); + if (!p) { pr_err("Failed to map trigger table!\n"); goto out_rel_header; } - rc = einj_check_trigger_header(trigger_tab); + memcpy_fromio(&trigger_tab, p, sizeof(trigger_tab)); + rc = einj_check_trigger_header(&trigger_tab); if (rc) { pr_warn(FW_BUG "Invalid trigger error action table.\n"); goto out_rel_header; } /* No action structures in the TRIGGER_ERROR table, nothing to do */ - if (!trigger_tab->entry_count) + if (!trigger_tab.entry_count) goto out_rel_header; rc = -EIO; - table_size = trigger_tab->table_size; - r = request_mem_region(trigger_paddr + sizeof(*trigger_tab), - table_size - sizeof(*trigger_tab), + table_size = trigger_tab.table_size; + full_trigger_tab = kmalloc(table_size, GFP_KERNEL); + if (!full_trigger_tab) + goto out_rel_header; + r = request_mem_region(trigger_paddr + sizeof(trigger_tab), + table_size - sizeof(trigger_tab), "APEI EINJ Trigger Table"); if (!r) { pr_err("Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n", - (unsigned long long)trigger_paddr + sizeof(*trigger_tab), + (unsigned long long)trigger_paddr + sizeof(trigger_tab), (unsigned long long)trigger_paddr + table_size - 1); - goto out_rel_header; + goto out_free_trigger_tab; } - iounmap(trigger_tab); - trigger_tab = ioremap_cache(trigger_paddr, table_size); - if (!trigger_tab) { + iounmap(p); + p = ioremap_cache(trigger_paddr, table_size); + if (!p) { pr_err("Failed to map trigger table!\n"); goto out_rel_entry; } + memcpy_fromio(full_trigger_tab, p, table_size); trigger_entry = (struct acpi_whea_header *) - ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); + ((char *)full_trigger_tab + sizeof(struct acpi_einj_trigger)); apei_resources_init(&trigger_resources); apei_exec_ctx_init(&trigger_ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), - trigger_entry, trigger_tab->entry_count); + trigger_entry, trigger_tab.entry_count); rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources); if (rc) goto out_fini; @@ -392,7 +473,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type, apei_resources_init(&addr_resources); trigger_param_region = einj_get_trigger_parameter_region( - trigger_tab, param1, param2); + full_trigger_tab, param1, param2); if (trigger_param_region) { rc = apei_resources_add(&addr_resources, trigger_param_region->address, @@ -421,23 +502,33 @@ out_release: out_fini: apei_resources_fini(&trigger_resources); out_rel_entry: - release_mem_region(trigger_paddr + sizeof(*trigger_tab), - table_size - sizeof(*trigger_tab)); + release_mem_region(trigger_paddr + sizeof(trigger_tab), + table_size - sizeof(trigger_tab)); +out_free_trigger_tab: + kfree(full_trigger_tab); out_rel_header: - release_mem_region(trigger_paddr, sizeof(*trigger_tab)); + release_mem_region(trigger_paddr, sizeof(trigger_tab)); out: - if (trigger_tab) - iounmap(trigger_tab); + if (p) + iounmap(p); return rc; } +static bool is_end_of_list(u8 *val) +{ + for (int i = 0; i < COMPONENT_LEN; ++i) { + if (val[i] != 0xFF) + return false; + } + return true; +} static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3, u64 param4) { struct apei_exec_context ctx; u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT; - int rc; + int i, rc; einj_exec_ctx_init(&ctx); @@ -446,8 +537,10 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, return rc; apei_exec_ctx_set_input(&ctx, type); if (acpi5) { - struct set_error_type_with_address *v5param = einj_param; + struct set_error_type_with_address *v5param; + v5param = kmalloc(v5param_size, GFP_KERNEL); + memcpy_fromio(v5param, einj_param, v5param_size); v5param->type = type; if (type & ACPI5_VENDOR_BIT) { switch (vendor_flags) { @@ -467,8 +560,21 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, v5param->flags = flags; v5param->memory_address = param1; v5param->memory_address_range = param2; - v5param->apicid = param3; - v5param->pcie_sbdf = param4; + + if (is_v2) { + for (i = 0; i < max_nr_components; i++) { + if (is_end_of_list(syndrome_data[i].comp_id.acpi_id)) + break; + v5param->einjv2_struct.component_arr[i].comp_id = + syndrome_data[i].comp_id; + v5param->einjv2_struct.component_arr[i].comp_synd = + syndrome_data[i].comp_synd; + } + v5param->einjv2_struct.component_arr_count = i; + } else { + v5param->apicid = param3; + v5param->pcie_sbdf = param4; + } } else { switch (type) { case ACPI_EINJ_PROCESSOR_CORRECTABLE: @@ -492,15 +598,19 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, break; } } + memcpy_toio(einj_param, v5param, v5param_size); + kfree(v5param); } else { rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); if (rc) return rc; if (einj_param) { - struct einj_parameter *v4param = einj_param; + struct einj_parameter v4param; - v4param->param1 = param1; - v4param->param2 = param2; + memcpy_fromio(&v4param, einj_param, sizeof(v4param)); + v4param.param1 = param1; + v4param.param2 = param2; + memcpy_toio(einj_param, &v4param, sizeof(v4param)); } } rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); @@ -551,10 +661,15 @@ int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3, u64 base_addr, size; /* If user manually set "flags", make sure it is legal */ - if (flags && (flags & - ~(SETWA_FLAGS_APICID|SETWA_FLAGS_MEM|SETWA_FLAGS_PCIE_SBDF))) + if (flags && (flags & ~(SETWA_FLAGS_APICID | SETWA_FLAGS_MEM | + SETWA_FLAGS_PCIE_SBDF | SETWA_FLAGS_EINJV2))) return -EINVAL; + /* check if type is a valid EINJv2 error type */ + if (is_v2) { + if (!(type & available_error_type_v2)) + return -EINVAL; + } /* * We need extra sanity checks for memory errors. * Other types leap directly to injection. @@ -632,6 +747,8 @@ static u64 error_param2; static u64 error_param3; static u64 error_param4; static struct dentry *einj_debug_dir; +static char einj_buf[32]; +static bool einj_v2_enabled; static struct { u32 mask; const char *str; } const einj_error_type_string[] = { { BIT(0), "Processor Correctable" }, { BIT(1), "Processor Uncorrectable non-fatal" }, @@ -648,6 +765,12 @@ static struct { u32 mask; const char *str; } const einj_error_type_string[] = { { BIT(31), "Vendor Defined Error Types" }, }; +static struct { u32 mask; const char *str; } const einjv2_error_type_string[] = { + { BIT(0), "EINJV2 Processor Error" }, + { BIT(1), "EINJV2 Memory Error" }, + { BIT(2), "EINJV2 PCI Express Error" }, +}; + static int available_error_type_show(struct seq_file *m, void *v) { @@ -655,17 +778,22 @@ static int available_error_type_show(struct seq_file *m, void *v) if (available_error_type & einj_error_type_string[pos].mask) seq_printf(m, "0x%08x\t%s\n", einj_error_type_string[pos].mask, einj_error_type_string[pos].str); - + if ((available_error_type & ACPI65_EINJV2_SUPP) && einj_v2_enabled) { + for (int pos = 0; pos < ARRAY_SIZE(einjv2_error_type_string); pos++) { + if (available_error_type_v2 & einjv2_error_type_string[pos].mask) + seq_printf(m, "V2_0x%08x\t%s\n", einjv2_error_type_string[pos].mask, + einjv2_error_type_string[pos].str); + } + } return 0; } DEFINE_SHOW_ATTRIBUTE(available_error_type); -static int error_type_get(void *data, u64 *val) +static ssize_t error_type_get(struct file *file, char __user *buf, + size_t count, loff_t *ppos) { - *val = error_type; - - return 0; + return simple_read_from_buffer(buf, count, ppos, einj_buf, strlen(einj_buf)); } bool einj_is_cxl_error_type(u64 type) @@ -692,15 +820,35 @@ int einj_validate_error_type(u64 type) if (tval & (tval - 1)) return -EINVAL; if (!vendor) - if (!(type & available_error_type)) + if (!(type & (available_error_type | available_error_type_v2))) return -EINVAL; return 0; } -static int error_type_set(void *data, u64 val) +static ssize_t error_type_set(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) { int rc; + u64 val; + + /* Leave the last character for the NUL terminator */ + if (count > sizeof(einj_buf) - 1) + return -EINVAL; + + memset(einj_buf, 0, sizeof(einj_buf)); + if (copy_from_user(einj_buf, buf, count)) + return -EFAULT; + + if (strncmp(einj_buf, "V2_", 3) == 0) { + if (!sscanf(einj_buf, "V2_%llx", &val)) + return -EINVAL; + is_v2 = true; + } else { + if (!sscanf(einj_buf, "%llx", &val)) + return -EINVAL; + is_v2 = false; + } rc = einj_validate_error_type(val); if (rc) @@ -708,17 +856,24 @@ static int error_type_set(void *data, u64 val) error_type = val; - return 0; + return count; } -DEFINE_DEBUGFS_ATTRIBUTE(error_type_fops, error_type_get, error_type_set, - "0x%llx\n"); +static const struct file_operations error_type_fops = { + .read = error_type_get, + .write = error_type_set, +}; static int error_inject_set(void *data, u64 val) { if (!error_type) return -EINVAL; + if (is_v2) + error_flags |= SETWA_FLAGS_EINJV2; + else + error_flags &= ~SETWA_FLAGS_EINJV2; + return einj_error_inject(error_type, error_flags, error_param1, error_param2, error_param3, error_param4); } @@ -741,6 +896,98 @@ static int einj_check_table(struct acpi_table_einj *einj_tab) return 0; } +static ssize_t u128_read(struct file *f, char __user *buf, size_t count, loff_t *off) +{ + char output[2 * COMPONENT_LEN + 1]; + u8 *data = f->f_inode->i_private; + int i; + + if (*off >= sizeof(output)) + return 0; + + for (i = 0; i < COMPONENT_LEN; i++) + sprintf(output + 2 * i, "%.02x", data[COMPONENT_LEN - i - 1]); + output[2 * COMPONENT_LEN] = '\n'; + + return simple_read_from_buffer(buf, count, off, output, sizeof(output)); +} + +static ssize_t u128_write(struct file *f, const char __user *buf, size_t count, loff_t *off) +{ + char input[2 + 2 * COMPONENT_LEN + 2]; + u8 *save = f->f_inode->i_private; + u8 tmp[COMPONENT_LEN]; + char byte[3] = {}; + char *s, *e; + ssize_t c; + long val; + int i; + + /* Require that user supply whole input line in one write(2) syscall */ + if (*off) + return -EINVAL; + + c = simple_write_to_buffer(input, sizeof(input), off, buf, count); + if (c < 0) + return c; + + if (c < 1 || input[c - 1] != '\n') + return -EINVAL; + + /* Empty line means invalidate this entry */ + if (c == 1) { + memset(save, 0xff, COMPONENT_LEN); + return c; + } + + if (input[0] == '0' && (input[1] == 'x' || input[1] == 'X')) + s = input + 2; + else + s = input; + e = input + c - 1; + + for (i = 0; i < COMPONENT_LEN; i++) { + byte[1] = *--e; + byte[0] = e > s ? *--e : '0'; + if (kstrtol(byte, 16, &val)) + return -EINVAL; + tmp[i] = val; + if (e <= s) + break; + } + while (++i < COMPONENT_LEN) + tmp[i] = 0; + + memcpy(save, tmp, COMPONENT_LEN); + + return c; +} + +static const struct file_operations u128_fops = { + .read = u128_read, + .write = u128_write, +}; + +static bool setup_einjv2_component_files(void) +{ + char name[32]; + + syndrome_data = kcalloc(max_nr_components, sizeof(syndrome_data[0]), GFP_KERNEL); + if (!syndrome_data) + return false; + + for (int i = 0; i < max_nr_components; i++) { + sprintf(name, "component_id%d", i); + debugfs_create_file(name, 0600, einj_debug_dir, + &syndrome_data[i].comp_id, &u128_fops); + sprintf(name, "component_syndrome%d", i); + debugfs_create_file(name, 0600, einj_debug_dir, + &syndrome_data[i].comp_synd, &u128_fops); + } + + return true; +} + static int __init einj_probe(struct faux_device *fdev) { int rc; @@ -764,7 +1011,7 @@ static int __init einj_probe(struct faux_device *fdev) goto err_put_table; } - rc = einj_get_available_error_type(&available_error_type); + rc = einj_get_available_error_types(&available_error_type, &available_error_type_v2); if (rc) goto err_put_table; @@ -812,6 +1059,8 @@ static int __init einj_probe(struct faux_device *fdev) &error_param4); debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR, einj_debug_dir, ¬rigger); + if (available_error_type & ACPI65_EINJV2_SUPP) + einj_v2_enabled = setup_einjv2_component_files(); } if (vendor_dev[0]) { @@ -848,7 +1097,7 @@ static void __exit einj_remove(struct faux_device *fdev) if (einj_param) { acpi_size size = (acpi5) ? - sizeof(struct set_error_type_with_address) : + v5param_size : sizeof(struct einj_parameter); acpi_os_unmap_iomem(einj_param, size); @@ -860,6 +1109,7 @@ static void __exit einj_remove(struct faux_device *fdev) apei_resources_release(&einj_resources); apei_resources_fini(&einj_resources); debugfs_remove_recursive(einj_debug_dir); + kfree(syndrome_data); acpi_put_table((struct acpi_table_header *)einj_tab); } diff --git a/drivers/acpi/apei/einj-cxl.c b/drivers/acpi/apei/einj-cxl.c index 78da9ae543a2..e70a416ec925 100644 --- a/drivers/acpi/apei/einj-cxl.c +++ b/drivers/acpi/apei/einj-cxl.c @@ -30,7 +30,7 @@ int einj_cxl_available_error_type_show(struct seq_file *m, void *v) int cxl_err, rc; u32 available_error_type = 0; - rc = einj_get_available_error_type(&available_error_type); + rc = einj_get_available_error_type(&available_error_type, ACPI_EINJ_GET_ERROR_TYPE); if (rc) return rc; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index f0584ccad451..a0d54993edb3 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -464,28 +464,41 @@ static void ghes_clear_estatus(struct ghes *ghes, ghes_ack_error(ghes->generic_v2); } -/* - * Called as task_work before returning to user-space. - * Ensure any queued work has been done before we return to the context that - * triggered the notification. +/** + * struct ghes_task_work - for synchronous RAS event + * + * @twork: callback_head for task work + * @pfn: page frame number of corrupted page + * @flags: work control flags + * + * Structure to pass task work to be handled before + * returning to user-space via task_work_add(). */ -static void ghes_kick_task_work(struct callback_head *head) +struct ghes_task_work { + struct callback_head twork; + u64 pfn; + int flags; +}; + +static void memory_failure_cb(struct callback_head *twork) { - struct acpi_hest_generic_status *estatus; - struct ghes_estatus_node *estatus_node; - u32 node_len; + struct ghes_task_work *twcb = container_of(twork, struct ghes_task_work, twork); + int ret; - estatus_node = container_of(head, struct ghes_estatus_node, task_work); - if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE)) - memory_failure_queue_kick(estatus_node->task_work_cpu); + ret = memory_failure(twcb->pfn, twcb->flags); + gen_pool_free(ghes_estatus_pool, (unsigned long)twcb, sizeof(*twcb)); - estatus = GHES_ESTATUS_FROM_NODE(estatus_node); - node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus)); - gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len); + if (!ret || ret == -EHWPOISON || ret == -EOPNOTSUPP) + return; + + pr_err("%#llx: Sending SIGBUS to %s:%d due to hardware memory corruption\n", + twcb->pfn, current->comm, task_pid_nr(current)); + force_sig(SIGBUS); } static bool ghes_do_memory_failure(u64 physical_addr, int flags) { + struct ghes_task_work *twcb; unsigned long pfn; if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE)) @@ -499,6 +512,18 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags) return false; } + if (flags == MF_ACTION_REQUIRED && current->mm) { + twcb = (void *)gen_pool_alloc(ghes_estatus_pool, sizeof(*twcb)); + if (!twcb) + return false; + + twcb->pfn = pfn; + twcb->flags = flags; + init_task_work(&twcb->twork, memory_failure_cb); + task_work_add(current, &twcb->twork, TWA_RESUME); + return true; + } + memory_failure_queue(pfn, flags); return true; } @@ -842,7 +867,7 @@ int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd) } EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL"); -static bool ghes_do_proc(struct ghes *ghes, +static void ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { int sev, sec_sev; @@ -902,7 +927,16 @@ static bool ghes_do_proc(struct ghes *ghes, } } - return queued; + /* + * If no memory failure work is queued for abnormal synchronous + * errors, do a force kill. + */ + if (sync && !queued) { + dev_err(ghes->dev, + HW_ERR GHES_PFX "%s:%d: synchronous unrecoverable error (SIGBUS)\n", + current->comm, task_pid_nr(current)); + force_sig(SIGBUS); + } } static void __ghes_print_estatus(const char *pfx, @@ -1088,6 +1122,8 @@ static void __ghes_panic(struct ghes *ghes, __ghes_print_estatus(KERN_EMERG, ghes->generic, estatus); + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); + ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx); if (!panic_timeout) @@ -1206,9 +1242,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) struct ghes_estatus_node *estatus_node; struct acpi_hest_generic *generic; struct acpi_hest_generic_status *estatus; - bool task_work_pending; u32 len, node_len; - int ret; llnode = llist_del_all(&ghes_estatus_llist); /* @@ -1223,25 +1257,16 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) estatus = GHES_ESTATUS_FROM_NODE(estatus_node); len = cper_estatus_len(estatus); node_len = GHES_ESTATUS_NODE_LEN(len); - task_work_pending = ghes_do_proc(estatus_node->ghes, estatus); + + ghes_do_proc(estatus_node->ghes, estatus); + if (!ghes_estatus_cached(estatus)) { generic = estatus_node->generic; if (ghes_print_estatus(NULL, generic, estatus)) ghes_estatus_cache_add(generic, estatus); } - - if (task_work_pending && current->mm) { - estatus_node->task_work.func = ghes_kick_task_work; - estatus_node->task_work_cpu = smp_processor_id(); - ret = task_work_add(current, &estatus_node->task_work, - TWA_RESUME); - if (ret) - estatus_node->task_work.func = NULL; - } - - if (!estatus_node->task_work.func) - gen_pool_free(ghes_estatus_pool, - (unsigned long)estatus_node, node_len); + gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, + node_len); llnode = next; } @@ -1302,7 +1327,6 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, estatus_node->ghes = ghes; estatus_node->generic = ghes->generic; - estatus_node->task_work.func = NULL; estatus = GHES_ESTATUS_FROM_NODE(estatus_node); if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) { diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index c2ab2783303f..a984ccd4a2a0 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1406,7 +1406,7 @@ static int __init acpi_bus_init(void) goto error1; /* - * Register the for all standard device notifications. + * Register for all standard device notifications. */ status = acpi_install_notify_handler(ACPI_ROOT_OBJECT, ACPI_SYSTEM_NOTIFY, diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index dbd4446025ec..4e0583274b8f 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -1119,6 +1119,8 @@ int acpi_subsys_prepare(struct device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev); + dev_pm_set_strict_midlayer(dev, true); + if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) { int ret = dev->driver->pm->prepare(dev); @@ -1147,6 +1149,8 @@ void acpi_subsys_complete(struct device *dev) */ if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) pm_request_resume(dev); + + dev_pm_set_strict_midlayer(dev, false); } EXPORT_SYMBOL_GPL(acpi_subsys_complete); @@ -1362,6 +1366,8 @@ static int acpi_subsys_poweroff_noirq(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ +static void acpi_dev_pm_detach(struct device *dev, bool power_off); + static struct dev_pm_domain acpi_general_pm_domain = { .ops = { .runtime_suspend = acpi_subsys_runtime_suspend, @@ -1382,6 +1388,7 @@ static struct dev_pm_domain acpi_general_pm_domain = { .restore_early = acpi_subsys_restore_early, #endif }, + .detach = acpi_dev_pm_detach, }; /** @@ -1465,7 +1472,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) acpi_device_wakeup_disable(adev); } - dev->pm_domain->detach = acpi_dev_pm_detach; return 1; } EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c index e8caf4106ff9..776914f31b9e 100644 --- a/drivers/acpi/dptf/dptf_power.c +++ b/drivers/acpi/dptf/dptf_power.c @@ -238,6 +238,8 @@ static const struct acpi_device_id int3407_device_ids[] = { {"INTC10A5", 0}, {"INTC10D8", 0}, {"INTC10D9", 0}, + {"INTC1100", 0}, + {"INTC1101", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, int3407_device_ids); diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c index aef7aca2161d..a222df059a16 100644 --- a/drivers/acpi/dptf/int340x_thermal.c +++ b/drivers/acpi/dptf/int340x_thermal.c @@ -61,6 +61,13 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = { {"INTC10D7"}, {"INTC10D8"}, {"INTC10D9"}, + {"INTC10FC"}, + {"INTC10FD"}, + {"INTC10FE"}, + {"INTC10FF"}, + {"INTC1100"}, + {"INTC1101"}, + {"INTC1102"}, {""}, }; diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h index 15eba1c70e66..8a28a72a7c6a 100644 --- a/drivers/acpi/fan.h +++ b/drivers/acpi/fan.h @@ -20,6 +20,7 @@ {"INTC106A", }, /* Fan for Lunar Lake generation */ \ {"INTC10A2", }, /* Fan for Raptor Lake generation */ \ {"INTC10D6", }, /* Fan for Panther Lake generation */ \ + {"INTC10FE", }, /* Fan for Wildcat Lake generation */ \ {"PNP0C0B", } /* Generic ACPI fan */ #define ACPI_FPS_NAME_LEN 20 diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c index 22d29ac2447c..c1afb7b5ed3d 100644 --- a/drivers/acpi/fan_attr.c +++ b/drivers/acpi/fan_attr.c @@ -22,9 +22,9 @@ static ssize_t show_state(struct device *dev, struct device_attribute *attr, cha int count; if (fps->control == 0xFFFFFFFF || fps->control > 100) - count = scnprintf(buf, PAGE_SIZE, "not-defined:"); + count = sysfs_emit(buf, "not-defined:"); else - count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control); + count = sysfs_emit(buf, "%lld:", fps->control); if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9) count += sysfs_emit_at(buf, count, "not-defined:"); @@ -59,7 +59,7 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr, if (status) return status; - return sprintf(buf, "%lld\n", fst.speed); + return sysfs_emit(buf, "%lld\n", fst.speed); } static ssize_t show_fine_grain_control(struct device *dev, struct device_attribute *attr, char *buf) @@ -67,7 +67,7 @@ static ssize_t show_fine_grain_control(struct device *dev, struct device_attribu struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev); struct acpi_fan *fan = acpi_driver_data(acpi_dev); - return sprintf(buf, "%d\n", fan->fif.fine_grain_ctrl); + return sysfs_emit(buf, "%d\n", fan->fif.fine_grain_ctrl); } int acpi_fan_create_attributes(struct acpi_device *device) diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c index 8ad12ad3aaaf..095502086b41 100644 --- a/drivers/acpi/fan_core.c +++ b/drivers/acpi/fan_core.c @@ -102,7 +102,7 @@ match_fps: break; } if (i == fan->fps_count) { - dev_dbg(&device->dev, "Invalid control value returned\n"); + dev_dbg(&device->dev, "No matching fps control value\n"); return -EINVAL; } diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index 3902759abcba..bce6f6a18426 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c @@ -55,10 +55,9 @@ static unsigned long intel_security_flags(struct nvdimm *nvdimm, { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); unsigned long security_flags = 0; - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_get_security_state cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_GET_SECURITY_STATE, .nd_family = NVDIMM_FAMILY_INTEL, @@ -120,10 +119,9 @@ static unsigned long intel_security_flags(struct nvdimm *nvdimm, static int intel_security_freeze(struct nvdimm *nvdimm) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_freeze_lock cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_FREEZE_LOCK, .nd_family = NVDIMM_FAMILY_INTEL, @@ -153,10 +151,9 @@ static int intel_security_change_key(struct nvdimm *nvdimm, unsigned int cmd = ptype == NVDIMM_MASTER ? NVDIMM_INTEL_SET_MASTER_PASSPHRASE : NVDIMM_INTEL_SET_PASSPHRASE; - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_set_passphrase cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2, @@ -195,10 +192,9 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, const struct nvdimm_key_data *key_data) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_unlock_unit cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_UNLOCK_UNIT, .nd_family = NVDIMM_FAMILY_INTEL, @@ -234,10 +230,9 @@ static int intel_security_disable(struct nvdimm *nvdimm, { int rc; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_disable_passphrase cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE, .nd_family = NVDIMM_FAMILY_INTEL, @@ -277,10 +272,9 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); unsigned int cmd = ptype == NVDIMM_MASTER ? NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE; - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_secure_erase cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, @@ -318,10 +312,9 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) { int rc; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_query_overwrite cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_QUERY_OVERWRITE, .nd_family = NVDIMM_FAMILY_INTEL, @@ -354,10 +347,9 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, { int rc; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_overwrite cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_OVERWRITE, .nd_family = NVDIMM_FAMILY_INTEL, @@ -407,10 +399,9 @@ const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops; static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc, struct nd_intel_bus_fw_activate_businfo *info) { - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_bus_fw_activate_businfo cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO, .nd_family = NVDIMM_BUS_FAMILY_INTEL, @@ -518,33 +509,31 @@ static enum nvdimm_fwa_capability intel_bus_fwa_capability( static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_bus_fw_activate cmd; - } nd_cmd = { - .pkg = { - .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE, - .nd_family = NVDIMM_BUS_FAMILY_INTEL, - .nd_size_in = sizeof(nd_cmd.cmd.iodev_state), - .nd_size_out = - sizeof(struct nd_intel_bus_fw_activate), - .nd_fw_size = - sizeof(struct nd_intel_bus_fw_activate), - }, + ) nd_cmd; + int rc; + + nd_cmd.pkg = (struct nd_cmd_pkg) { + .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE, + .nd_family = NVDIMM_BUS_FAMILY_INTEL, + .nd_size_in = sizeof(nd_cmd.cmd.iodev_state), + .nd_size_out = + sizeof(struct nd_intel_bus_fw_activate), + .nd_fw_size = + sizeof(struct nd_intel_bus_fw_activate), + }; + nd_cmd.cmd = (struct nd_intel_bus_fw_activate) { /* * Even though activate is run from a suspended context, * for safety, still ask platform firmware to force * quiesce devices by default. Let a module * parameter override that policy. */ - .cmd = { - .iodev_state = acpi_desc->fwa_noidle - ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE - : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE, - }, + .iodev_state = acpi_desc->fwa_noidle + ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE + : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE, }; - int rc; - switch (intel_bus_fwa_state(nd_desc)) { case NVDIMM_FWA_ARMED: case NVDIMM_FWA_ARM_OVERFLOW: @@ -582,10 +571,9 @@ const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops; static int intel_fwa_dimminfo(struct nvdimm *nvdimm, struct nd_intel_fw_activate_dimminfo *info) { - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_fw_activate_dimminfo cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO, .nd_family = NVDIMM_FAMILY_INTEL, @@ -688,27 +676,24 @@ static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_fw_activate_arm cmd; - } nd_cmd = { - .pkg = { - .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM, - .nd_family = NVDIMM_FAMILY_INTEL, - .nd_size_in = sizeof(nd_cmd.cmd.activate_arm), - .nd_size_out = - sizeof(struct nd_intel_fw_activate_arm), - .nd_fw_size = - sizeof(struct nd_intel_fw_activate_arm), - }, - .cmd = { - .activate_arm = arm == NVDIMM_FWA_ARM - ? ND_INTEL_DIMM_FWA_ARM - : ND_INTEL_DIMM_FWA_DISARM, - }, - }; + ) nd_cmd; int rc; + nd_cmd.pkg = (struct nd_cmd_pkg) { + .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM, + .nd_family = NVDIMM_FAMILY_INTEL, + .nd_size_in = sizeof(nd_cmd.cmd.activate_arm), + .nd_size_out = sizeof(struct nd_intel_fw_activate_arm), + .nd_fw_size = sizeof(struct nd_intel_fw_activate_arm), + }; + nd_cmd.cmd = (struct nd_intel_fw_activate_arm) { + .activate_arm = arm == NVDIMM_FWA_ARM ? + ND_INTEL_DIMM_FWA_ARM : + ND_INTEL_DIMM_FWA_DISARM, + }; + switch (intel_fwa_state(nvdimm)) { case NVDIMM_FWA_INVALID: return -ENXIO; diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 08e10b6226dc..e4560b33b8ad 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -268,7 +268,7 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link) link->irq.active = irq; - acpi_handle_debug(handle, "Link at IRQ %d \n", link->irq.active); + acpi_handle_debug(handle, "Link at IRQ %d\n", link->irq.active); end: return result; diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c index 031d1ba81b86..318683744ed1 100644 --- a/drivers/acpi/pfr_update.c +++ b/drivers/acpi/pfr_update.c @@ -127,8 +127,11 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr, pfru_dev->rev_id, PFRU_FUNC_QUERY_UPDATE_CAP, NULL, ACPI_TYPE_PACKAGE); - if (!out_obj) + if (!out_obj) { + dev_dbg(pfru_dev->parent_dev, + "Query cap failed with no object\n"); return ret; + } if (out_obj->package.count < CAP_NR_IDX || out_obj->package.elements[CAP_STATUS_IDX].type != ACPI_TYPE_INTEGER || @@ -141,13 +144,17 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr, out_obj->package.elements[CAP_DRV_SVN_IDX].type != ACPI_TYPE_INTEGER || out_obj->package.elements[CAP_PLAT_ID_IDX].type != ACPI_TYPE_BUFFER || out_obj->package.elements[CAP_OEM_ID_IDX].type != ACPI_TYPE_BUFFER || - out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER) + out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER) { + dev_dbg(pfru_dev->parent_dev, + "Query cap failed with invalid package count/type\n"); goto free_acpi_buffer; + } cap_hdr->status = out_obj->package.elements[CAP_STATUS_IDX].integer.value; if (cap_hdr->status != DSM_SUCCEED) { ret = -EBUSY; - dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", cap_hdr->status); + dev_dbg(pfru_dev->parent_dev, "Query cap Error Status:%d\n", + cap_hdr->status); goto free_acpi_buffer; } @@ -193,24 +200,32 @@ static int query_buffer(struct pfru_com_buf_info *info, out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid, pfru_dev->rev_id, PFRU_FUNC_QUERY_BUF, NULL, ACPI_TYPE_PACKAGE); - if (!out_obj) + if (!out_obj) { + dev_dbg(pfru_dev->parent_dev, + "Query buf failed with no object\n"); return ret; + } if (out_obj->package.count < BUF_NR_IDX || out_obj->package.elements[BUF_STATUS_IDX].type != ACPI_TYPE_INTEGER || out_obj->package.elements[BUF_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER || out_obj->package.elements[BUF_ADDR_LOW_IDX].type != ACPI_TYPE_INTEGER || out_obj->package.elements[BUF_ADDR_HI_IDX].type != ACPI_TYPE_INTEGER || - out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER) + out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER) { + dev_dbg(pfru_dev->parent_dev, + "Query buf failed with invalid package count/type\n"); goto free_acpi_buffer; + } info->status = out_obj->package.elements[BUF_STATUS_IDX].integer.value; info->ext_status = out_obj->package.elements[BUF_EXT_STATUS_IDX].integer.value; if (info->status != DSM_SUCCEED) { ret = -EBUSY; - dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", info->status); - dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n", info->ext_status); + dev_dbg(pfru_dev->parent_dev, + "Query buf failed with Error Status:%d\n", info->status); + dev_dbg(pfru_dev->parent_dev, + "Query buf failed with Error Extended Status:%d\n", info->ext_status); goto free_acpi_buffer; } @@ -295,12 +310,16 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap, m_img_hdr = data + size; type = get_image_type(m_img_hdr, pfru_dev); - if (type < 0) + if (type < 0) { + dev_dbg(pfru_dev->parent_dev, "Invalid image type\n"); return false; + } size = adjust_efi_size(m_img_hdr, size); - if (size < 0) + if (size < 0) { + dev_dbg(pfru_dev->parent_dev, "Invalid image size\n"); return false; + } auth = data + size; size += sizeof(u64) + auth->auth_info.hdr.len; @@ -346,8 +365,11 @@ static int start_update(int action, struct pfru_device *pfru_dev) out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid, pfru_dev->rev_id, PFRU_FUNC_START, &in_obj, ACPI_TYPE_PACKAGE); - if (!out_obj) + if (!out_obj) { + dev_dbg(pfru_dev->parent_dev, + "Update failed to start with no object\n"); return ret; + } if (out_obj->package.count < UPDATE_NR_IDX || out_obj->package.elements[UPDATE_STATUS_IDX].type != ACPI_TYPE_INTEGER || @@ -355,8 +377,11 @@ static int start_update(int action, struct pfru_device *pfru_dev) out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER || out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].type != ACPI_TYPE_INTEGER || out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER || - out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER) + out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER) { + dev_dbg(pfru_dev->parent_dev, + "Update failed with invalid package count/type\n"); goto free_acpi_buffer; + } update_result.status = out_obj->package.elements[UPDATE_STATUS_IDX].integer.value; @@ -365,8 +390,10 @@ static int start_update(int action, struct pfru_device *pfru_dev) if (update_result.status != DSM_SUCCEED) { ret = -EBUSY; - dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", update_result.status); - dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n", + dev_dbg(pfru_dev->parent_dev, + "Update failed with Error Status:%d\n", update_result.status); + dev_dbg(pfru_dev->parent_dev, + "Update failed with Error Extended Status:%d\n", update_result.ext_status); goto free_acpi_buffer; @@ -450,8 +477,10 @@ static ssize_t pfru_write(struct file *file, const char __user *buf, if (ret) return ret; - if (len > buf_info.buf_size) + if (len > buf_info.buf_size) { + dev_dbg(pfru_dev->parent_dev, "Capsule image size too large\n"); return -EINVAL; + } iov.iov_base = (void __user *)buf; iov.iov_len = len; @@ -460,10 +489,14 @@ static ssize_t pfru_write(struct file *file, const char __user *buf, /* map the communication buffer */ phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo); buf_ptr = memremap(phy_addr, buf_info.buf_size, MEMREMAP_WB); - if (!buf_ptr) + if (!buf_ptr) { + dev_dbg(pfru_dev->parent_dev, "Failed to remap the buffer\n"); return -ENOMEM; + } if (!copy_from_iter_full(buf_ptr, len, &iter)) { + dev_dbg(pfru_dev->parent_dev, + "Failed to copy the data from the user space buffer\n"); ret = -EINVAL; goto unmap; } diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c index e549914a636c..be033bbb126a 100644 --- a/drivers/acpi/prmt.c +++ b/drivers/acpi/prmt.c @@ -85,8 +85,6 @@ static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa) } } - pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa); - return 0; } @@ -154,13 +152,37 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end) guid_copy(&th->guid, (guid_t *)handler_info->handler_guid); th->handler_addr = (void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address); + /* + * Print a warning message if handler_addr is zero which is not expected to + * ever happen. + */ + if (unlikely(!th->handler_addr)) + pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx", + &th->guid, handler_info->handler_address); th->static_data_buffer_addr = efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address); + /* + * According to the PRM specification, static_data_buffer_address can be zero, + * so avoid printing a warning message in that case. Otherwise, if the + * return value of efi_pa_va_lookup() is zero, print the message. + */ + if (unlikely(!th->static_data_buffer_addr && handler_info->static_data_buffer_address)) + pr_warn("Failed to find VA of static data buffer for GUID: %pUL, PA: 0x%llx", + &th->guid, handler_info->static_data_buffer_address); th->acpi_param_buffer_addr = efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address); + /* + * According to the PRM specification, acpi_param_buffer_address can be zero, + * so avoid printing a warning message in that case. Otherwise, if the + * return value of efi_pa_va_lookup() is zero, print the message. + */ + if (unlikely(!th->acpi_param_buffer_addr && handler_info->acpi_param_buffer_address)) + pr_warn("Failed to find VA of acpi param buffer for GUID: %pUL, PA: 0x%llx", + &th->guid, handler_info->acpi_param_buffer_address); + } while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info))); return 0; diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index 4322f2da6d10..c08ead07252b 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/export.h> +#include <linux/string_choices.h> #include <linux/suspend.h> #include <linux/bcd.h> #include <linux/acpi.h> @@ -30,17 +30,16 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) if (!dev->wakeup.flags.valid) continue; - seq_printf(seq, "%s\t S%d\t", + seq_printf(seq, "%s\t S%llu\t", dev->pnp.bus_id, - (u32) dev->wakeup.sleep_state); + dev->wakeup.sleep_state); mutex_lock(&dev->physical_node_lock); if (!dev->physical_node_count) { seq_printf(seq, "%c%-8s\n", dev->wakeup.flags.valid ? '*' : ' ', - device_may_wakeup(&dev->dev) ? - "enabled" : "disabled"); + str_enabled_disabled(device_may_wakeup(&dev->dev))); } else { struct device *ldev; list_for_each_entry(entry, &dev->physical_node_list, @@ -55,9 +54,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) seq_printf(seq, "%c%-8s %s:%s\n", dev->wakeup.flags.valid ? '*' : ' ', - (device_may_wakeup(&dev->dev) || - device_may_wakeup(ldev)) ? - "enabled" : "disabled", + str_enabled_disabled(device_may_wakeup(ldev) || + device_may_wakeup(&dev->dev)), ldev->bus ? ldev->bus->name : "no-bus", dev_name(ldev)); put_device(ldev); @@ -141,6 +139,5 @@ static const struct proc_ops acpi_system_wakeup_device_proc_ops = { void __init acpi_sleep_proc_init(void) { /* 'wakeup device' [R/W] */ - proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR, - acpi_root_dir, &acpi_system_wakeup_device_proc_ops); + proc_create("wakeup", 0644, acpi_root_dir, &acpi_system_wakeup_device_proc_ops); } diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 64b8d1e19594..755003bf3a45 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -173,11 +173,14 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy) { unsigned int cpu; + if (ignore_ppc == 1) + return; + for_each_cpu(cpu, policy->related_cpus) { struct acpi_processor *pr = per_cpu(processors, cpu); int ret; - if (!pr) + if (!pr || !pr->performance) continue; /* @@ -193,6 +196,11 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy) if (ret < 0) pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, ret); + + ret = acpi_processor_get_platform_limit(pr); + if (ret) + pr_err("Failed to update freq constraint for CPU%d (%d)\n", + cpu, ret); } } diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index d1541a386fbc..f9c2bc1d4a3a 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -235,7 +235,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data) if (pr->throttling_platform_limit > target_state) target_state = pr->throttling_platform_limit; if (target_state >= p_throttling->state_count) { - pr_warn("Exceed the limit of T-state \n"); + pr_warn("Exceed the limit of T-state\n"); target_state = p_throttling->state_count - 1; } p_tstate->target_state = target_state; diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index b02bf770aead..ff6dc957bc11 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c @@ -42,7 +42,7 @@ void acpi_enable_wakeup_devices(u8 sleep_state) list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list, wakeup_list) { if (!dev->wakeup.flags.valid - || sleep_state > (u32) dev->wakeup.sleep_state + || sleep_state > dev->wakeup.sleep_state || !(device_may_wakeup(&dev->dev) || dev->wakeup.prepare_count)) continue; @@ -67,7 +67,7 @@ void acpi_disable_wakeup_devices(u8 sleep_state) list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list, wakeup_list) { if (!dev->wakeup.flags.valid - || sleep_state > (u32) dev->wakeup.sleep_state + || sleep_state > dev->wakeup.sleep_state || !(device_may_wakeup(&dev->dev) || dev->wakeup.prepare_count)) continue; diff --git a/drivers/acpi/x86/lpss.c b/drivers/acpi/x86/lpss.c index 258440b899a9..6daa6372f980 100644 --- a/drivers/acpi/x86/lpss.c +++ b/drivers/acpi/x86/lpss.c @@ -387,9 +387,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) }, { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) }, - /* Wildcat Point LPSS devices */ - { "INT3438", LPSS_ADDR(lpt_spi_dev_desc) }, - { } }; diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 71482d639a6d..74e34a07ef72 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -138,7 +138,7 @@ static int amba_read_periphid(struct amba_device *dev) void __iomem *tmp; int i, ret; - ret = dev_pm_domain_attach(&dev->dev, true); + ret = dev_pm_domain_attach(&dev->dev, PD_FLAG_ATTACH_POWER_ON); if (ret) { dev_dbg(&dev->dev, "can't get PM domain: %d\n", ret); goto err_out; @@ -291,7 +291,7 @@ static int amba_probe(struct device *dev) if (ret < 0) break; - ret = dev_pm_domain_attach(dev, true); + ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON); if (ret) break; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index c463ca4a8fff..1ba2192ae667 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -5384,10 +5384,9 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg, void __user *ubuf = (void __user *)arg; struct binder_write_read bwr; - if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { - ret = -EFAULT; - goto out; - } + if (copy_from_user(&bwr, ubuf, sizeof(bwr))) + return -EFAULT; + binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d write %lld at %016llx, read %lld at %016llx\n", proc->pid, thread->pid, @@ -5402,8 +5401,6 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg, trace_binder_write_done(ret); if (ret < 0) { bwr.read_consumed = 0; - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) - ret = -EFAULT; goto out; } } @@ -5417,22 +5414,17 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg, if (!binder_worklist_empty_ilocked(&proc->todo)) binder_wakeup_proc_ilocked(proc); binder_inner_proc_unlock(proc); - if (ret < 0) { - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) - ret = -EFAULT; + if (ret < 0) goto out; - } } binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d wrote %lld of %lld, read return %lld of %lld\n", proc->pid, thread->pid, (u64)bwr.write_consumed, (u64)bwr.write_size, (u64)bwr.read_consumed, (u64)bwr.read_size); - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { - ret = -EFAULT; - goto out; - } out: + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) + ret = -EFAULT; return ret; } @@ -6128,7 +6120,7 @@ static int binder_release(struct inode *nodp, struct file *filp) debugfs_remove(proc->debugfs_entry); if (proc->binderfs_entry) { - binderfs_remove_file(proc->binderfs_entry); + simple_recursive_removal(proc->binderfs_entry, NULL); proc->binderfs_entry = NULL; } diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 1ba5caf1d88d..a5fd23dcafcd 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -81,7 +81,6 @@ extern bool is_binderfs_device(const struct inode *inode); extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name, const struct file_operations *fops, void *data); -extern void binderfs_remove_file(struct dentry *dentry); #else static inline bool is_binderfs_device(const struct inode *inode) { @@ -94,7 +93,6 @@ static inline struct dentry *binderfs_create_file(struct dentry *dir, { return NULL; } -static inline void binderfs_remove_file(struct dentry *dentry) {} #endif #ifdef CONFIG_ANDROID_BINDERFS diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 024275dbfdd8..acc946bb1457 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -500,21 +500,6 @@ static struct dentry *binderfs_create_dentry(struct dentry *parent, return dentry; } -void binderfs_remove_file(struct dentry *dentry) -{ - struct inode *parent_inode; - - parent_inode = d_inode(dentry->d_parent); - inode_lock(parent_inode); - if (simple_positive(dentry)) { - dget(dentry); - simple_unlink(parent_inode, dentry); - d_delete(dentry); - dput(dentry); - } - inode_unlock(parent_inode); -} - struct dentry *binderfs_create_file(struct dentry *parent, const char *name, const struct file_operations *fops, void *data) diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index e00536b49552..120a2b7067fc 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -117,23 +117,39 @@ config SATA_AHCI config SATA_MOBILE_LPM_POLICY int "Default SATA Link Power Management policy" - range 0 4 + range 0 5 default 3 depends on SATA_AHCI help Select the Default SATA Link Power Management (LPM) policy to use for chipsets / "South Bridges" supporting low-power modes. Such chipsets are ubiquitous across laptops, desktops and servers. - - The value set has the following meanings: + Each policy combines power saving states and features: + - Partial: The Phy logic is powered but is in a reduced power + state. The exit latency from this state is no longer than + 10us). + - Slumber: The Phy logic is powered but is in an even lower power + state. The exit latency from this state is potentially + longer, but no longer than 10ms. + - DevSleep: The Phy logic may be powered down. The exit latency from + this state is no longer than 20 ms, unless otherwise + specified by DETO in the device Identify Device Data log. + - HIPM: Host Initiated Power Management (host automatically + transitions to partial and slumber). + - DIPM: Device Initiated Power Management (device automatically + transitions to partial and slumber). + + The possible values for the default SATA link power management + policies are: 0 => Keep firmware settings - 1 => Maximum performance - 2 => Medium power - 3 => Medium power with Device Initiated PM enabled - 4 => Minimum power - - Note "Minimum power" is known to cause issues, including disk - corruption, with some disks and should not be used. + 1 => No power savings (maximum performance) + 2 => HIPM (Partial) + 3 => HIPM (Partial) and DIPM (Partial and Slumber) + 4 => HIPM (Partial and DevSleep) and DIPM (Partial and Slumber) + 5 => HIPM (Slumber and DevSleep) and DIPM (Partial and Slumber) + + Excluding the value 0, higher values represent policies with higher + power savings. config SATA_AHCI_PLATFORM tristate "Platform AHCI SATA support" diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index aa93b0ecbbc6..e1c24bbacf64 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -110,17 +110,17 @@ static const struct scsi_host_template ahci_sht = { static struct ata_port_operations ahci_vt8251_ops = { .inherits = &ahci_ops, - .hardreset = ahci_vt8251_hardreset, + .reset.hardreset = ahci_vt8251_hardreset, }; static struct ata_port_operations ahci_p5wdh_ops = { .inherits = &ahci_ops, - .hardreset = ahci_p5wdh_hardreset, + .reset.hardreset = ahci_p5wdh_hardreset, }; static struct ata_port_operations ahci_avn_ops = { .inherits = &ahci_ops, - .hardreset = ahci_avn_hardreset, + .reset.hardreset = ahci_avn_hardreset, }; static const struct ata_port_info ahci_port_info[] = { @@ -674,7 +674,9 @@ MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); static int mobile_lpm_policy = -1; module_param(mobile_lpm_policy, int, 0644); -MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets"); +MODULE_PARM_DESC(mobile_lpm_policy, + "Default LPM policy. Despite its name, this parameter applies " + "to all chipsets, including desktop and server chipsets"); static char *ahci_mask_port_map; module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444); @@ -1774,15 +1776,26 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap) * LPM if the port advertises itself as an external port. */ if (ap->pflags & ATA_PFLAG_EXTERNAL) { - ata_port_dbg(ap, "external port, not enabling LPM\n"); + ap->flags |= ATA_FLAG_NO_LPM; + ap->target_lpm_policy = ATA_LPM_MAX_POWER; return; } + /* If no Partial or no Slumber, we cannot support DIPM. */ + if ((ap->host->flags & ATA_HOST_NO_PART) || + (ap->host->flags & ATA_HOST_NO_SSC)) { + ata_port_dbg(ap, "Host does not support DIPM\n"); + ap->flags |= ATA_FLAG_NO_DIPM; + } + /* If no LPM states are supported by the HBA, do not bother with LPM */ if ((ap->host->flags & ATA_HOST_NO_PART) && (ap->host->flags & ATA_HOST_NO_SSC) && (ap->host->flags & ATA_HOST_NO_DEVSLP)) { - ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n"); + ata_port_dbg(ap, + "No LPM states supported, forcing LPM max_power\n"); + ap->flags |= ATA_FLAG_NO_LPM; + ap->target_lpm_policy = ATA_LPM_MAX_POWER; return; } diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c index ca0924dc5bd2..f97566c420f8 100644 --- a/drivers/ata/ahci_da850.c +++ b/drivers/ata/ahci_da850.c @@ -137,13 +137,13 @@ static int ahci_da850_hardreset(struct ata_link *link, static struct ata_port_operations ahci_da850_port_ops = { .inherits = &ahci_platform_ops, - .softreset = ahci_da850_softreset, + .reset.softreset = ahci_da850_softreset, /* * No need to override .pmp_softreset - it's only used for actual * PMP-enabled ports. */ - .hardreset = ahci_da850_hardreset, - .pmp_hardreset = ahci_da850_hardreset, + .reset.hardreset = ahci_da850_hardreset, + .pmp_reset.hardreset = ahci_da850_hardreset, }; static const struct ata_port_info ahci_da850_port_info = { diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c index b08547b877a1..93faed2cfeb6 100644 --- a/drivers/ata/ahci_dm816.c +++ b/drivers/ata/ahci_dm816.c @@ -124,7 +124,7 @@ static int ahci_dm816_softreset(struct ata_link *link, static struct ata_port_operations ahci_dm816_port_ops = { .inherits = &ahci_platform_ops, - .softreset = ahci_dm816_softreset, + .reset.softreset = ahci_dm816_softreset, }; static const struct ata_port_info ahci_dm816_port_info = { diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index f01f08048f97..86aedd5923ac 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -642,18 +642,19 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class, int ret; if (imxpriv->type == AHCI_IMX53) - ret = ahci_pmp_retry_srst_ops.softreset(link, class, deadline); + ret = ahci_pmp_retry_srst_ops.reset.softreset(link, class, + deadline); else - ret = ahci_ops.softreset(link, class, deadline); + ret = ahci_ops.reset.softreset(link, class, deadline); return ret; } static struct ata_port_operations ahci_imx_ops = { - .inherits = &ahci_ops, - .host_stop = ahci_imx_host_stop, - .error_handler = ahci_imx_error_handler, - .softreset = ahci_imx_softreset, + .inherits = &ahci_ops, + .host_stop = ahci_imx_host_stop, + .error_handler = ahci_imx_error_handler, + .reset.softreset = ahci_imx_softreset, }; static const struct ata_port_info ahci_imx_port_info = { diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 30e39885b64e..0dec1a17e5b1 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -146,8 +146,8 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class, } static struct ata_port_operations ahci_qoriq_ops = { - .inherits = &ahci_ops, - .hardreset = ahci_qoriq_hardreset, + .inherits = &ahci_ops, + .reset.hardreset = ahci_qoriq_hardreset, }; static const struct ata_port_info ahci_qoriq_port_info = { diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index dfbd8c53abcb..5d5a51a77f5d 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -613,11 +613,11 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance) static struct ata_port_operations xgene_ahci_v1_ops = { .inherits = &ahci_ops, .host_stop = xgene_ahci_host_stop, - .hardreset = xgene_ahci_hardreset, + .reset.hardreset = xgene_ahci_hardreset, + .reset.softreset = xgene_ahci_softreset, + .pmp_reset.softreset = xgene_ahci_pmp_softreset, .read_id = xgene_ahci_read_id, .qc_issue = xgene_ahci_qc_issue, - .softreset = xgene_ahci_softreset, - .pmp_softreset = xgene_ahci_pmp_softreset }; static const struct ata_port_info xgene_ahci_v1_port_info = { @@ -630,7 +630,7 @@ static const struct ata_port_info xgene_ahci_v1_port_info = { static struct ata_port_operations xgene_ahci_v2_ops = { .inherits = &ahci_ops, .host_stop = xgene_ahci_host_stop, - .hardreset = xgene_ahci_hardreset, + .reset.hardreset = xgene_ahci_hardreset, .read_id = xgene_ahci_read_id, }; diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index d441246fa357..229429ba5027 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -1074,7 +1074,7 @@ static struct ata_port_operations piix_pata_ops = { .cable_detect = ata_cable_40wire, .set_piomode = piix_set_piomode, .set_dmamode = piix_set_dmamode, - .prereset = piix_pata_prereset, + .reset.prereset = piix_pata_prereset, }; static struct ata_port_operations piix_vmw_ops = { @@ -1102,7 +1102,7 @@ static const struct scsi_host_template piix_sidpr_sht = { static struct ata_port_operations piix_sidpr_sata_ops = { .inherits = &piix_sata_ops, - .hardreset = sata_std_hardreset, + .reset.hardreset = sata_std_hardreset, .scr_read = piix_sidpr_scr_read, .scr_write = piix_sidpr_scr_write, .set_lpm = piix_sidpr_set_lpm, diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 4e9c82f36df1..b335fb7e5cb4 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -162,10 +162,10 @@ struct ata_port_operations ahci_ops = { .freeze = ahci_freeze, .thaw = ahci_thaw, - .softreset = ahci_softreset, - .hardreset = ahci_hardreset, - .postreset = ahci_postreset, - .pmp_softreset = ahci_softreset, + .reset.softreset = ahci_softreset, + .reset.hardreset = ahci_hardreset, + .reset.postreset = ahci_postreset, + .pmp_reset.softreset = ahci_softreset, .error_handler = ahci_error_handler, .post_internal_cmd = ahci_post_internal_cmd, .dev_config = ahci_dev_config, @@ -192,7 +192,7 @@ EXPORT_SYMBOL_GPL(ahci_ops); struct ata_port_operations ahci_pmp_retry_srst_ops = { .inherits = &ahci_ops, - .softreset = ahci_pmp_retry_softreset, + .reset.softreset = ahci_pmp_retry_softreset, }; EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 79b20da0a256..97d9f0488cc1 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -65,8 +65,8 @@ #include "libata-transport.h" const struct ata_port_operations ata_base_port_ops = { - .prereset = ata_std_prereset, - .postreset = ata_std_postreset, + .reset.prereset = ata_std_prereset, + .reset.postreset = ata_std_postreset, .error_handler = ata_std_error_handler, .sched_eh = ata_std_sched_eh, .end_eh = ata_std_end_eh, @@ -2154,14 +2154,46 @@ retry: return err_mask; } +static inline void ata_clear_log_directory(struct ata_device *dev) +{ + memset(dev->gp_log_dir, 0, ATA_SECT_SIZE); +} + +static int ata_read_log_directory(struct ata_device *dev) +{ + u16 version; + + /* If the log page is already cached, do nothing. */ + version = get_unaligned_le16(&dev->gp_log_dir[0]); + if (version == 0x0001) + return 0; + + if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->gp_log_dir, 1)) { + ata_clear_log_directory(dev); + return -EIO; + } + + version = get_unaligned_le16(&dev->gp_log_dir[0]); + if (version != 0x0001) { + ata_dev_err(dev, "Invalid log directory version 0x%04x\n", + version); + ata_clear_log_directory(dev); + dev->quirks |= ATA_QUIRK_NO_LOG_DIR; + return -EINVAL; + } + + return 0; +} + static int ata_log_supported(struct ata_device *dev, u8 log) { if (dev->quirks & ATA_QUIRK_NO_LOG_DIR) return 0; - if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->sector_buf, 1)) + if (ata_read_log_directory(dev)) return 0; - return get_unaligned_le16(&dev->sector_buf[log * 2]); + + return get_unaligned_le16(&dev->gp_log_dir[log * 2]); } static bool ata_identify_page_supported(struct ata_device *dev, u8 page) @@ -2421,18 +2453,7 @@ static void ata_dev_config_zac(struct ata_device *dev) dev->zac_zones_optimal_nonseq = U32_MAX; dev->zac_zones_max_open = U32_MAX; - /* - * Always set the 'ZAC' flag for Host-managed devices. - */ - if (dev->class == ATA_DEV_ZAC) - dev->flags |= ATA_DFLAG_ZAC; - else if (ata_id_zoned_cap(dev->id) == 0x01) - /* - * Check for host-aware devices. - */ - dev->flags |= ATA_DFLAG_ZAC; - - if (!(dev->flags & ATA_DFLAG_ZAC)) + if (!ata_dev_is_zac(dev)) return; if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) { @@ -2495,7 +2516,7 @@ static void ata_dev_config_trusted(struct ata_device *dev) dev->flags |= ATA_DFLAG_TRUSTED; } -void ata_dev_cleanup_cdl_resources(struct ata_device *dev) +static void ata_dev_cleanup_cdl_resources(struct ata_device *dev) { kfree(dev->cdl); dev->cdl = NULL; @@ -2801,17 +2822,70 @@ out: kfree(buf); } +/* + * Configure features related to link power management. + */ +static void ata_dev_config_lpm(struct ata_device *dev) +{ + struct ata_port *ap = dev->link->ap; + unsigned int err_mask; + + if (ap->flags & ATA_FLAG_NO_LPM) { + /* + * When the port does not support LPM, we cannot support it on + * the device either. + */ + dev->quirks |= ATA_QUIRK_NOLPM; + } else { + /* + * Some WD SATA-1 drives have issues with LPM, turn on NOLPM for + * them. + */ + if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) && + (dev->id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) + dev->quirks |= ATA_QUIRK_NOLPM; + + /* ATI specific quirk */ + if ((dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI) && + ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) + dev->quirks |= ATA_QUIRK_NOLPM; + } + + if (dev->quirks & ATA_QUIRK_NOLPM && + ap->target_lpm_policy != ATA_LPM_MAX_POWER) { + ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); + ap->target_lpm_policy = ATA_LPM_MAX_POWER; + } + + /* + * Device Initiated Power Management (DIPM) is normally disabled by + * default on a device. However, DIPM may have been enabled and that + * setting kept even after COMRESET because of the Software Settings + * Preservation feature. So if the port does not support DIPM and the + * device does, disable DIPM on the device. + */ + if (ap->flags & ATA_FLAG_NO_DIPM && ata_id_has_dipm(dev->id)) { + err_mask = ata_dev_set_feature(dev, + SETFEATURES_SATA_DISABLE, SATA_DIPM); + if (err_mask && err_mask != AC_ERR_DEV) + ata_dev_err(dev, "Disable DIPM failed, Emask 0x%x\n", + err_mask); + } +} + static void ata_dev_print_features(struct ata_device *dev) { if (!(dev->flags & ATA_DFLAG_FEATURES_MASK)) return; ata_dev_info(dev, - "Features:%s%s%s%s%s%s%s%s\n", + "Features:%s%s%s%s%s%s%s%s%s%s\n", dev->flags & ATA_DFLAG_FUA ? " FUA" : "", dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "", dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "", dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "", + ata_id_has_hipm(dev->id) ? " HIPM" : "", + ata_id_has_dipm(dev->id) ? " DIPM" : "", dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "", dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "", dev->flags & ATA_DFLAG_CDL ? " CDL" : "", @@ -2848,6 +2922,9 @@ int ata_dev_configure(struct ata_device *dev) return 0; } + /* Clear the general purpose log directory cache. */ + ata_clear_log_directory(dev); + /* Set quirks */ dev->quirks |= ata_dev_quirks(dev); ata_force_quirks(dev); @@ -2871,23 +2948,6 @@ int ata_dev_configure(struct ata_device *dev) if (rc) return rc; - /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ - if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) && - (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) - dev->quirks |= ATA_QUIRK_NOLPM; - - if (dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI && - ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) - dev->quirks |= ATA_QUIRK_NOLPM; - - if (ap->flags & ATA_FLAG_NO_LPM) - dev->quirks |= ATA_QUIRK_NOLPM; - - if (dev->quirks & ATA_QUIRK_NOLPM) { - ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); - dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; - } - /* let ACPI work its magic */ rc = ata_acpi_on_devcfg(dev); if (rc) @@ -2974,6 +3034,7 @@ int ata_dev_configure(struct ata_device *dev) ata_dev_config_chs(dev); } + ata_dev_config_lpm(dev); ata_dev_config_fua(dev); ata_dev_config_devslp(dev); ata_dev_config_sense_reporting(dev); @@ -3449,7 +3510,7 @@ static int ata_dev_set_mode(struct ata_device *dev) } /** - * ata_do_set_mode - Program timings and issue SET FEATURES - XFER + * ata_set_mode - Program timings and issue SET FEATURES - XFER * @link: link on which timings will be programmed * @r_failed_dev: out parameter for failed device * @@ -3465,7 +3526,7 @@ static int ata_dev_set_mode(struct ata_device *dev) * 0 on success, negative errno otherwise */ -int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) +int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) { struct ata_port *ap = link->ap; struct ata_device *dev; @@ -3546,7 +3607,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) *r_failed_dev = dev; return rc; } -EXPORT_SYMBOL_GPL(ata_do_set_mode); +EXPORT_SYMBOL_GPL(ata_set_mode); /** * ata_wait_ready - wait for link to become ready diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 1f90fcd2b3c4..2946ae6d4b2c 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -153,8 +153,6 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { #undef CMDS static void __ata_port_freeze(struct ata_port *ap); -static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, - struct ata_device **r_failed_dev); #ifdef CONFIG_PM static void ata_eh_handle_port_suspend(struct ata_port *ap); static void ata_eh_handle_port_resume(struct ata_port *ap); @@ -825,7 +823,7 @@ void ata_port_wait_eh(struct ata_port *ap) retry: spin_lock_irqsave(ap->lock, flags); - while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { + while (ata_port_eh_scheduled(ap)) { prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_irqrestore(ap->lock, flags); schedule(); @@ -909,7 +907,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t) * LOCKING: * spin_lock_irqsave(host lock) */ -static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) +static void ata_eh_set_pending(struct ata_port *ap, bool fastdrain) { unsigned int cnt; @@ -949,7 +947,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; qc->flags |= ATA_QCFLAG_EH; - ata_eh_set_pending(ap, 1); + ata_eh_set_pending(ap, true); /* The following will fail if timeout has already expired. * ata_scsi_error() takes care of such scmds on EH entry. @@ -971,7 +969,7 @@ void ata_std_sched_eh(struct ata_port *ap) if (ap->pflags & ATA_PFLAG_INITIALIZING) return; - ata_eh_set_pending(ap, 1); + ata_eh_set_pending(ap, true); scsi_schedule_eh(ap->scsi_host); trace_ata_std_sched_eh(ap); @@ -1022,7 +1020,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) int tag, nr_aborted = 0; /* we're gonna abort all commands, no need for fast drain */ - ata_eh_set_pending(ap, 0); + ata_eh_set_pending(ap, false); /* include internal tag in iteration */ ata_qc_for_each_with_internal(ap, qc, tag) { @@ -2073,6 +2071,183 @@ out: ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE); } +/* + * Check if a link is established. This is a relaxed version of + * ata_phys_link_online() which accounts for the fact that this is potentially + * called after changing the link power management policy, which may not be + * reflected immediately in the SSTAUS register (e.g., we may still be seeing + * the PHY in partial, slumber or devsleep Partial power management state. + * So check that: + * - A device is still present, that is, DET is 1h (Device presence detected + * but Phy communication not established) or 3h (Device presence detected and + * Phy communication established) + * - Communication is established, that is, IPM is not 0h, indicating that PHY + * is online or in a low power state. + */ +static bool ata_eh_link_established(struct ata_link *link) +{ + u32 sstatus; + u8 det, ipm; + + if (sata_scr_read(link, SCR_STATUS, &sstatus)) + return false; + + det = sstatus & 0x0f; + ipm = (sstatus >> 8) & 0x0f; + + return (det & 0x01) && ipm; +} + +/** + * ata_eh_link_set_lpm - configure SATA interface power management + * @link: link to configure + * @policy: the link power management policy + * @r_failed_dev: out parameter for failed device + * + * Enable SATA Interface power management. This will enable + * Device Interface Power Management (DIPM) for min_power and + * medium_power_with_dipm policies, and then call driver specific + * callbacks for enabling Host Initiated Power management. + * + * LOCKING: + * EH context. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +static int ata_eh_link_set_lpm(struct ata_link *link, + enum ata_lpm_policy policy, + struct ata_device **r_failed_dev) +{ + struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; + struct ata_eh_context *ehc = &link->eh_context; + struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; + enum ata_lpm_policy old_policy = link->lpm_policy; + bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM); + unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; + unsigned int err_mask; + int rc; + + /* if the link or host doesn't do LPM, noop */ + if (!IS_ENABLED(CONFIG_SATA_HOST) || + (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) + return 0; + + /* + * This function currently assumes that it will never be supplied policy + * ATA_LPM_UNKNOWN. + */ + if (WARN_ON_ONCE(policy == ATA_LPM_UNKNOWN)) + return 0; + + ata_link_dbg(link, "Set LPM policy: %d -> %d\n", old_policy, policy); + + /* + * DIPM is enabled only for ATA_LPM_MIN_POWER, + * ATA_LPM_MIN_POWER_WITH_PARTIAL, and ATA_LPM_MED_POWER_WITH_DIPM, as + * some devices misbehave when the host NACKs transition to SLUMBER. + */ + ata_for_each_dev(dev, link, ENABLED) { + bool dev_has_hipm = ata_id_has_hipm(dev->id); + bool dev_has_dipm = ata_id_has_dipm(dev->id); + + /* find the first enabled and LPM enabled devices */ + if (!link_dev) + link_dev = dev; + + if (!lpm_dev && + (dev_has_hipm || (dev_has_dipm && host_has_dipm))) + lpm_dev = dev; + + hints &= ~ATA_LPM_EMPTY; + if (!dev_has_hipm) + hints &= ~ATA_LPM_HIPM; + + /* disable DIPM before changing link config */ + if (dev_has_dipm) { + err_mask = ata_dev_set_feature(dev, + SETFEATURES_SATA_DISABLE, SATA_DIPM); + if (err_mask && err_mask != AC_ERR_DEV) { + ata_dev_warn(dev, + "failed to disable DIPM, Emask 0x%x\n", + err_mask); + rc = -EIO; + goto fail; + } + } + } + + if (ap) { + rc = ap->ops->set_lpm(link, policy, hints); + if (!rc && ap->slave_link) + rc = ap->ops->set_lpm(ap->slave_link, policy, hints); + } else + rc = sata_pmp_set_lpm(link, policy, hints); + + /* + * Attribute link config failure to the first (LPM) enabled + * device on the link. + */ + if (rc) { + if (rc == -EOPNOTSUPP) { + link->flags |= ATA_LFLAG_NO_LPM; + return 0; + } + dev = lpm_dev ? lpm_dev : link_dev; + goto fail; + } + + /* + * Low level driver acked the transition. Issue DIPM command + * with the new policy set. + */ + link->lpm_policy = policy; + if (ap && ap->slave_link) + ap->slave_link->lpm_policy = policy; + + /* + * Host config updated, enable DIPM if transitioning to + * ATA_LPM_MIN_POWER, ATA_LPM_MIN_POWER_WITH_PARTIAL, or + * ATA_LPM_MED_POWER_WITH_DIPM. + */ + ata_for_each_dev(dev, link, ENABLED) { + bool dev_has_dipm = ata_id_has_dipm(dev->id); + + if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && host_has_dipm && + dev_has_dipm) { + err_mask = ata_dev_set_feature(dev, + SETFEATURES_SATA_ENABLE, SATA_DIPM); + if (err_mask && err_mask != AC_ERR_DEV) { + ata_dev_warn(dev, + "failed to enable DIPM, Emask 0x%x\n", + err_mask); + rc = -EIO; + goto fail; + } + } + } + + link->last_lpm_change = jiffies; + link->flags |= ATA_LFLAG_CHANGED; + + return 0; + +fail: + /* restore the old policy */ + link->lpm_policy = old_policy; + if (ap && ap->slave_link) + ap->slave_link->lpm_policy = old_policy; + + /* if no device or only one more chance is left, disable LPM */ + if (!dev || ehc->tries[dev->devno] <= 2) { + ata_link_warn(link, "disabling LPM on the link\n"); + link->flags |= ATA_LFLAG_NO_LPM; + } + if (r_failed_dev) + *r_failed_dev = dev; + return rc; +} + /** * ata_eh_link_autopsy - analyze error and determine recovery action * @link: host link to perform autopsy on @@ -2606,25 +2781,28 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, return reset(link, classes, deadline); } -static int ata_eh_followup_srst_needed(struct ata_link *link, int rc) +static bool ata_eh_followup_srst_needed(struct ata_link *link, int rc) { if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) - return 0; + return false; if (rc == -EAGAIN) - return 1; + return true; if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) - return 1; - return 0; + return true; + return false; } int ata_eh_reset(struct ata_link *link, int classify, - ata_prereset_fn_t prereset, ata_reset_fn_t softreset, - ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) + struct ata_reset_operations *reset_ops) { struct ata_port *ap = link->ap; struct ata_link *slave = ap->slave_link; struct ata_eh_context *ehc = &link->eh_context; struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; + ata_reset_fn_t hardreset = reset_ops->hardreset; + ata_reset_fn_t softreset = reset_ops->softreset; + ata_prereset_fn_t prereset = reset_ops->prereset; + ata_postreset_fn_t postreset = reset_ops->postreset; unsigned int *classes = ehc->classes; unsigned int lflags = link->flags; int verbose = !(ehc->i.flags & ATA_EHI_QUIET); @@ -3123,13 +3301,13 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, * to ap->target_lpm_policy after revalidation is done. */ if (link->lpm_policy > ATA_LPM_MAX_POWER) { - rc = ata_eh_set_lpm(link, ATA_LPM_MAX_POWER, - r_failed_dev); + rc = ata_eh_link_set_lpm(link, ATA_LPM_MAX_POWER, + r_failed_dev); if (rc) goto err; } - if (ata_phys_link_offline(ata_dev_phys_link(dev))) { + if (!ata_eh_link_established(ata_dev_phys_link(dev))) { rc = -EIO; goto err; } @@ -3233,12 +3411,12 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, } /** - * ata_set_mode - Program timings and issue SET FEATURES - XFER + * ata_eh_set_mode - Program timings and issue SET FEATURES - XFER * @link: link on which timings will be programmed * @r_failed_dev: out parameter for failed device * * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If - * ata_set_mode() fails, pointer to the failing device is + * ata_eh_set_mode() fails, pointer to the failing device is * returned in @r_failed_dev. * * LOCKING: @@ -3247,7 +3425,8 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, * RETURNS: * 0 on success, negative errno otherwise */ -int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) +static int ata_eh_set_mode(struct ata_link *link, + struct ata_device **r_failed_dev) { struct ata_port *ap = link->ap; struct ata_device *dev; @@ -3268,7 +3447,7 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) if (ap->ops->set_mode) rc = ap->ops->set_mode(link, r_failed_dev); else - rc = ata_do_set_mode(link, r_failed_dev); + rc = ata_set_mode(link, r_failed_dev); /* if transfer mode has changed, set DUBIOUS_XFER on device */ ata_for_each_dev(dev, link, ENABLED) { @@ -3408,153 +3587,6 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev) return rc; } -/** - * ata_eh_set_lpm - configure SATA interface power management - * @link: link to configure power management - * @policy: the link power management policy - * @r_failed_dev: out parameter for failed device - * - * Enable SATA Interface power management. This will enable - * Device Interface Power Management (DIPM) for min_power and - * medium_power_with_dipm policies, and then call driver specific - * callbacks for enabling Host Initiated Power management. - * - * LOCKING: - * EH context. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, - struct ata_device **r_failed_dev) -{ - struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; - struct ata_eh_context *ehc = &link->eh_context; - struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; - enum ata_lpm_policy old_policy = link->lpm_policy; - bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM); - unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; - unsigned int err_mask; - int rc; - - /* if the link or host doesn't do LPM, noop */ - if (!IS_ENABLED(CONFIG_SATA_HOST) || - (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) - return 0; - - /* - * This function currently assumes that it will never be supplied policy - * ATA_LPM_UNKNOWN. - */ - if (WARN_ON_ONCE(policy == ATA_LPM_UNKNOWN)) - return 0; - - /* - * DIPM is enabled only for ATA_LPM_MIN_POWER, - * ATA_LPM_MIN_POWER_WITH_PARTIAL, and ATA_LPM_MED_POWER_WITH_DIPM, as - * some devices misbehave when the host NACKs transition to SLUMBER. - */ - ata_for_each_dev(dev, link, ENABLED) { - bool dev_has_hipm = ata_id_has_hipm(dev->id); - bool dev_has_dipm = ata_id_has_dipm(dev->id); - - /* find the first enabled and LPM enabled devices */ - if (!link_dev) - link_dev = dev; - - if (!lpm_dev && - (dev_has_hipm || (dev_has_dipm && host_has_dipm))) - lpm_dev = dev; - - hints &= ~ATA_LPM_EMPTY; - if (!dev_has_hipm) - hints &= ~ATA_LPM_HIPM; - - /* disable DIPM before changing link config */ - if (dev_has_dipm) { - err_mask = ata_dev_set_feature(dev, - SETFEATURES_SATA_DISABLE, SATA_DIPM); - if (err_mask && err_mask != AC_ERR_DEV) { - ata_dev_warn(dev, - "failed to disable DIPM, Emask 0x%x\n", - err_mask); - rc = -EIO; - goto fail; - } - } - } - - if (ap) { - rc = ap->ops->set_lpm(link, policy, hints); - if (!rc && ap->slave_link) - rc = ap->ops->set_lpm(ap->slave_link, policy, hints); - } else - rc = sata_pmp_set_lpm(link, policy, hints); - - /* - * Attribute link config failure to the first (LPM) enabled - * device on the link. - */ - if (rc) { - if (rc == -EOPNOTSUPP) { - link->flags |= ATA_LFLAG_NO_LPM; - return 0; - } - dev = lpm_dev ? lpm_dev : link_dev; - goto fail; - } - - /* - * Low level driver acked the transition. Issue DIPM command - * with the new policy set. - */ - link->lpm_policy = policy; - if (ap && ap->slave_link) - ap->slave_link->lpm_policy = policy; - - /* - * Host config updated, enable DIPM if transitioning to - * ATA_LPM_MIN_POWER, ATA_LPM_MIN_POWER_WITH_PARTIAL, or - * ATA_LPM_MED_POWER_WITH_DIPM. - */ - ata_for_each_dev(dev, link, ENABLED) { - bool dev_has_dipm = ata_id_has_dipm(dev->id); - - if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && host_has_dipm && - dev_has_dipm) { - err_mask = ata_dev_set_feature(dev, - SETFEATURES_SATA_ENABLE, SATA_DIPM); - if (err_mask && err_mask != AC_ERR_DEV) { - ata_dev_warn(dev, - "failed to enable DIPM, Emask 0x%x\n", - err_mask); - rc = -EIO; - goto fail; - } - } - } - - link->last_lpm_change = jiffies; - link->flags |= ATA_LFLAG_CHANGED; - - return 0; - -fail: - /* restore the old policy */ - link->lpm_policy = old_policy; - if (ap && ap->slave_link) - ap->slave_link->lpm_policy = old_policy; - - /* if no device or only one more chance is left, disable LPM */ - if (!dev || ehc->tries[dev->devno] <= 2) { - ata_link_warn(link, "disabling LPM on the link\n"); - link->flags |= ATA_LFLAG_NO_LPM; - } - if (r_failed_dev) - *r_failed_dev = dev; - return rc; -} - int ata_link_nr_enabled(struct ata_link *link) { struct ata_device *dev; @@ -3727,10 +3759,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) /** * ata_eh_recover - recover host port after error * @ap: host port to recover - * @prereset: prereset method (can be NULL) - * @softreset: softreset method (can be NULL) - * @hardreset: hardreset method (can be NULL) - * @postreset: postreset method (can be NULL) + * @reset_ops: The set of reset operations to use * @r_failed_link: out parameter for failed link * * This is the alpha and omega, eum and yang, heart and soul of @@ -3746,9 +3775,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) * RETURNS: * 0 on success, -errno on failure. */ -int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, - ata_reset_fn_t softreset, ata_reset_fn_t hardreset, - ata_postreset_fn_t postreset, +int ata_eh_recover(struct ata_port *ap, struct ata_reset_operations *reset_ops, struct ata_link **r_failed_link) { struct ata_link *link; @@ -3816,8 +3843,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, if (!(ehc->i.action & ATA_EH_RESET)) continue; - rc = ata_eh_reset(link, ata_link_nr_vacant(link), - prereset, softreset, hardreset, postreset); + rc = ata_eh_reset(link, ata_link_nr_vacant(link), reset_ops); if (rc) { ata_link_err(link, "reset failed, giving up\n"); goto out; @@ -3898,7 +3924,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, /* configure transfer mode if necessary */ if (ehc->i.flags & ATA_EHI_SETMODE) { - rc = ata_set_mode(link, &dev); + rc = ata_eh_set_mode(link, &dev); if (rc) goto rest_fail; ehc->i.flags &= ~ATA_EHI_SETMODE; @@ -3943,7 +3969,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, config_lpm: /* configure link power saving */ if (link->lpm_policy != ap->target_lpm_policy) { - rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); + rc = ata_eh_link_set_lpm(link, ap->target_lpm_policy, + &dev); if (rc) goto rest_fail; } @@ -4037,59 +4064,39 @@ void ata_eh_finish(struct ata_port *ap) } /** - * ata_do_eh - do standard error handling + * ata_std_error_handler - standard error handler * @ap: host port to handle error for * - * @prereset: prereset method (can be NULL) - * @softreset: softreset method (can be NULL) - * @hardreset: hardreset method (can be NULL) - * @postreset: postreset method (can be NULL) - * * Perform standard error handling sequence. * * LOCKING: * Kernel thread context (may sleep). */ -void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, - ata_reset_fn_t softreset, ata_reset_fn_t hardreset, - ata_postreset_fn_t postreset) +void ata_std_error_handler(struct ata_port *ap) { - struct ata_device *dev; + struct ata_reset_operations *reset_ops = &ap->ops->reset; + struct ata_link *link = &ap->link; int rc; + /* Ignore built-in hardresets if SCR access is not available */ + if ((reset_ops->hardreset == sata_std_hardreset || + reset_ops->hardreset == sata_sff_hardreset) && + !sata_scr_valid(link)) + link->flags |= ATA_LFLAG_NO_HRST; + ata_eh_autopsy(ap); ata_eh_report(ap); - rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, - NULL); + rc = ata_eh_recover(ap, reset_ops, NULL); if (rc) { - ata_for_each_dev(dev, &ap->link, ALL) + struct ata_device *dev; + + ata_for_each_dev(dev, link, ALL) ata_dev_disable(dev); } ata_eh_finish(ap); } - -/** - * ata_std_error_handler - standard error handler - * @ap: host port to handle error for - * - * Standard error handler - * - * LOCKING: - * Kernel thread context (may sleep). - */ -void ata_std_error_handler(struct ata_port *ap) -{ - struct ata_port_operations *ops = ap->ops; - ata_reset_fn_t hardreset = ops->hardreset; - - /* ignore built-in hardreset if SCR access is not available */ - if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) - hardreset = NULL; - - ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); -} EXPORT_SYMBOL_GPL(ata_std_error_handler); #ifdef CONFIG_PM diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index d5d189328ae6..57023324a56f 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -15,9 +15,9 @@ const struct ata_port_operations sata_pmp_port_ops = { .inherits = &sata_port_ops, - .pmp_prereset = ata_std_prereset, - .pmp_hardreset = sata_std_hardreset, - .pmp_postreset = ata_std_postreset, + .pmp_reset.prereset = ata_std_prereset, + .pmp_reset.hardreset = sata_std_hardreset, + .pmp_reset.postreset = ata_std_postreset, .error_handler = sata_pmp_error_handler, }; @@ -727,10 +727,7 @@ static int sata_pmp_revalidate_quick(struct ata_device *dev) /** * sata_pmp_eh_recover_pmp - recover PMP * @ap: ATA port PMP is attached to - * @prereset: prereset method (can be NULL) - * @softreset: softreset method - * @hardreset: hardreset method - * @postreset: postreset method (can be NULL) + * @reset_ops: The set of reset operations to use * * Recover PMP attached to @ap. Recovery procedure is somewhat * similar to that of ata_eh_recover() except that reset should @@ -744,8 +741,7 @@ static int sata_pmp_revalidate_quick(struct ata_device *dev) * 0 on success, -errno on failure. */ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, - ata_prereset_fn_t prereset, ata_reset_fn_t softreset, - ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) + struct ata_reset_operations *reset_ops) { struct ata_link *link = &ap->link; struct ata_eh_context *ehc = &link->eh_context; @@ -767,8 +763,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, struct ata_link *tlink; /* reset */ - rc = ata_eh_reset(link, 0, prereset, softreset, hardreset, - postreset); + rc = ata_eh_reset(link, 0, reset_ops); if (rc) { ata_link_err(link, "failed to reset PMP, giving up\n"); goto fail; @@ -932,8 +927,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap) retry: /* PMP attached? */ if (!sata_pmp_attached(ap)) { - rc = ata_eh_recover(ap, ops->prereset, ops->softreset, - ops->hardreset, ops->postreset, NULL); + rc = ata_eh_recover(ap, &ops->reset, NULL); if (rc) { ata_for_each_dev(dev, &ap->link, ALL) ata_dev_disable(dev); @@ -951,8 +945,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap) } /* recover pmp */ - rc = sata_pmp_eh_recover_pmp(ap, ops->prereset, ops->softreset, - ops->hardreset, ops->postreset); + rc = sata_pmp_eh_recover_pmp(ap, &ops->reset); if (rc) goto pmp_fail; @@ -978,8 +971,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap) goto pmp_fail; /* recover links */ - rc = ata_eh_recover(ap, ops->pmp_prereset, ops->pmp_softreset, - ops->pmp_hardreset, ops->pmp_postreset, &link); + rc = ata_eh_recover(ap, &ops->pmp_reset, &link); if (rc) goto link_fail; diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index cb46ce276bb1..4734465d3b1e 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -924,6 +924,11 @@ static ssize_t ata_scsi_lpm_store(struct device *device, spin_lock_irqsave(ap->lock, flags); + if (ap->flags & ATA_FLAG_NO_LPM) { + count = -EOPNOTSUPP; + goto out_unlock; + } + ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, &ap->link, ENABLED) { if (dev->quirks & ATA_QUIRK_NOLPM) { @@ -1699,6 +1704,6 @@ const struct ata_port_operations sata_port_ops = { .inherits = &ata_base_port_ops, .qc_defer = ata_std_qc_defer, - .hardreset = sata_std_hardreset, + .reset.hardreset = sata_std_hardreset, }; EXPORT_SYMBOL_GPL(sata_port_ops); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a21c9895408d..27b15176db56 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1923,8 +1923,7 @@ static unsigned int ata_scsiop_inq_00(struct ata_device *dev, }; for (i = 0; i < sizeof(pages); i++) { - if (pages[i] == 0xb6 && - !(dev->flags & ATA_DFLAG_ZAC)) + if (pages[i] == 0xb6 && !ata_dev_is_zac(dev)) continue; rbuf[num_pages + 4] = pages[i]; num_pages++; @@ -2181,7 +2180,7 @@ static unsigned int ata_scsiop_inq_b2(struct ata_device *dev, static unsigned int ata_scsiop_inq_b6(struct ata_device *dev, struct scsi_cmnd *cmd, u8 *rbuf) { - if (!(dev->flags & ATA_DFLAG_ZAC)) { + if (!ata_dev_is_zac(dev)) { ata_scsi_set_invalid_field(dev, cmd, 2, 0xff); return 0; } @@ -4317,9 +4316,10 @@ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev) * scsi_queue_rq() will defer commands if scsi_host_in_recovery(). * However, this check is done without holding the ap->lock (a libata * specific lock), so we can have received an error irq since then, - * therefore we must check if EH is pending, while holding ap->lock. + * therefore we must check if EH is pending or running, while holding + * ap->lock. */ - if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) + if (ata_port_eh_scheduled(ap)) return SCSI_MLQUEUE_DEVICE_BUSY; if (unlikely(!scmd->cmd_len)) @@ -4634,24 +4634,23 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync) * ata_scsi_offline_dev - offline attached SCSI device * @dev: ATA device to offline attached SCSI device for * - * This function is called from ata_eh_hotplug() and responsible - * for taking the SCSI device attached to @dev offline. This - * function is called with host lock which protects dev->sdev - * against clearing. + * This function is called from ata_eh_detach_dev() and is responsible for + * taking the SCSI device attached to @dev offline. This function is + * called with host lock which protects dev->sdev against clearing. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: - * 1 if attached SCSI device exists, 0 otherwise. + * true if attached SCSI device exists, false otherwise. */ -int ata_scsi_offline_dev(struct ata_device *dev) +bool ata_scsi_offline_dev(struct ata_device *dev) { if (dev->sdev) { scsi_device_set_state(dev->sdev, SDEV_OFFLINE); - return 1; + return true; } - return 0; + return false; } /** diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 5a46c066abc3..7fc407255eb4 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -31,10 +31,10 @@ const struct ata_port_operations ata_sff_port_ops = { .freeze = ata_sff_freeze, .thaw = ata_sff_thaw, - .prereset = ata_sff_prereset, - .softreset = ata_sff_softreset, - .hardreset = sata_sff_hardreset, - .postreset = ata_sff_postreset, + .reset.prereset = ata_sff_prereset, + .reset.softreset = ata_sff_softreset, + .reset.hardreset = sata_sff_hardreset, + .reset.postreset = ata_sff_postreset, .error_handler = ata_sff_error_handler, .sff_dev_select = ata_sff_dev_select, @@ -2054,8 +2054,6 @@ EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); */ void ata_sff_error_handler(struct ata_port *ap) { - ata_reset_fn_t softreset = ap->ops->softreset; - ata_reset_fn_t hardreset = ap->ops->hardreset; struct ata_queued_cmd *qc; unsigned long flags; @@ -2077,13 +2075,7 @@ void ata_sff_error_handler(struct ata_port *ap) spin_unlock_irqrestore(ap->lock, flags); - /* ignore built-in hardresets if SCR access is not available */ - if ((hardreset == sata_std_hardreset || - hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) - hardreset = NULL; - - ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, - ap->ops->postreset); + ata_std_error_handler(ap); } EXPORT_SYMBOL_GPL(ata_sff_error_handler); diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index e898be49df6b..62415fe67a11 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -202,7 +202,7 @@ show_ata_port_##name(struct device *dev, \ { \ struct ata_port *ap = transport_class_to_port(dev); \ \ - return scnprintf(buf, 20, format_string, cast ap->field); \ + return sysfs_emit(buf, format_string, cast ap->field); \ } #define ata_port_simple_attr(field, name, format_string, type) \ @@ -389,7 +389,7 @@ show_ata_dev_##field(struct device *dev, \ { \ struct ata_device *ata_dev = transport_class_to_dev(dev); \ \ - return scnprintf(buf, 20, format_string, cast ata_dev->field); \ + return sysfs_emit(buf, format_string, cast ata_dev->field); \ } #define ata_dev_simple_attr(field, format_string, type) \ diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index ce5c628fa6fd..e5b977a8d3e1 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -44,6 +44,18 @@ static inline bool ata_sstatus_online(u32 sstatus) return (sstatus & 0xf) == 0x3; } +static inline bool ata_dev_is_zac(struct ata_device *dev) +{ + /* Host managed device or host aware device */ + return dev->class == ATA_DEV_ZAC || + ata_id_zoned_cap(dev->id) == 0x01; +} + +static inline bool ata_port_eh_scheduled(struct ata_port *ap) +{ + return ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS); +} + #ifdef CONFIG_ATA_FORCE extern void ata_force_cbl(struct ata_port *ap); #else @@ -90,7 +102,6 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); extern const char *sata_spd_string(unsigned int spd); extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log, u8 page, void *buf, unsigned int sectors); -void ata_dev_cleanup_cdl_resources(struct ata_device *dev); #define to_ata_port(d) container_of(d, struct ata_port, tdev) @@ -137,7 +148,7 @@ extern struct ata_device *ata_scsi_find_dev(struct ata_port *ap, extern int ata_scsi_add_hosts(struct ata_host *host, const struct scsi_host_template *sht); extern void ata_scsi_scan_host(struct ata_port *ap, int sync); -extern int ata_scsi_offline_dev(struct ata_device *dev); +extern bool ata_scsi_offline_dev(struct ata_device *dev); extern bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq); extern void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq); @@ -169,12 +180,9 @@ extern void ata_eh_autopsy(struct ata_port *ap); const char *ata_get_cmd_name(u8 command); extern void ata_eh_report(struct ata_port *ap); extern int ata_eh_reset(struct ata_link *link, int classify, - ata_prereset_fn_t prereset, ata_reset_fn_t softreset, - ata_reset_fn_t hardreset, ata_postreset_fn_t postreset); -extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); -extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, - ata_reset_fn_t softreset, ata_reset_fn_t hardreset, - ata_postreset_fn_t postreset, + struct ata_reset_operations *reset_ops); +extern int ata_eh_recover(struct ata_port *ap, + struct ata_reset_operations *reset_ops, struct ata_link **r_failed_disk); extern void ata_eh_finish(struct ata_port *ap); extern int ata_ering_map(struct ata_ering *ering, diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c index ab38871b5e00..23fff10af2ac 100644 --- a/drivers/ata/pata_acpi.c +++ b/drivers/ata/pata_acpi.c @@ -216,7 +216,7 @@ static struct ata_port_operations pacpi_ops = { .mode_filter = pacpi_mode_filter, .set_piomode = pacpi_set_piomode, .set_dmamode = pacpi_set_dmamode, - .prereset = pacpi_pre_reset, + .reset.prereset = pacpi_pre_reset, .port_start = pacpi_port_start, }; diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index bb790edd6036..9d5cb9c34c52 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c @@ -392,11 +392,11 @@ static struct ata_port_operations ali_20_port_ops = { * Port operations for DMA capable ALi with cable detect */ static struct ata_port_operations ali_c2_port_ops = { - .inherits = &ali_dma_base_ops, - .check_atapi_dma = ali_check_atapi_dma, - .cable_detect = ali_c2_cable_detect, - .dev_config = ali_lock_sectors, - .postreset = ali_c2_c3_postreset, + .inherits = &ali_dma_base_ops, + .check_atapi_dma = ali_check_atapi_dma, + .cable_detect = ali_c2_cable_detect, + .dev_config = ali_lock_sectors, + .reset.postreset = ali_c2_c3_postreset, }; /* diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index 5b02b89748b7..a2fecadc927d 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -394,7 +394,7 @@ static const struct scsi_host_template amd_sht = { static const struct ata_port_operations amd_base_port_ops = { .inherits = &ata_bmdma32_port_ops, - .prereset = amd_pre_reset, + .reset.prereset = amd_pre_reset, }; static struct ata_port_operations amd33_port_ops = { @@ -429,7 +429,7 @@ static const struct ata_port_operations nv_base_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_ignore, .mode_filter = nv_mode_filter, - .prereset = nv_pre_reset, + .reset.prereset = nv_pre_reset, .host_stop = nv_host_stop, }; diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index 40544282f455..6160414172a3 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c @@ -301,7 +301,7 @@ static struct ata_port_operations artop6210_ops = { .cable_detect = ata_cable_40wire, .set_piomode = artop6210_set_piomode, .set_dmamode = artop6210_set_dmamode, - .prereset = artop62x0_pre_reset, + .reset.prereset = artop62x0_pre_reset, .qc_defer = artop6210_qc_defer, }; @@ -310,7 +310,7 @@ static struct ata_port_operations artop6260_ops = { .cable_detect = artop6260_cable_detect, .set_piomode = artop6260_set_piomode, .set_dmamode = artop6260_set_dmamode, - .prereset = artop62x0_pre_reset, + .reset.prereset = artop62x0_pre_reset, }; static void atp8xx_fixup(struct pci_dev *pdev) diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index 8c5cc803aab3..4c612f9543f6 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c @@ -264,7 +264,7 @@ static struct ata_port_operations atiixp_port_ops = { .bmdma_start = atiixp_bmdma_start, .bmdma_stop = atiixp_bmdma_stop, - .prereset = atiixp_prereset, + .reset.prereset = atiixp_prereset, .cable_detect = atiixp_cable_detect, .set_piomode = atiixp_set_piomode, .set_dmamode = atiixp_set_dmamode, diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c index 2e6eccf2902f..6fe49b303fee 100644 --- a/drivers/ata/pata_efar.c +++ b/drivers/ata/pata_efar.c @@ -243,7 +243,7 @@ static struct ata_port_operations efar_ops = { .cable_detect = efar_cable_detect, .set_piomode = efar_set_piomode, .set_dmamode = efar_set_dmamode, - .prereset = efar_pre_reset, + .reset.prereset = efar_pre_reset, }; diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c index e8cda988feb5..b2b9e0058333 100644 --- a/drivers/ata/pata_ep93xx.c +++ b/drivers/ata/pata_ep93xx.c @@ -879,8 +879,8 @@ static const struct scsi_host_template ep93xx_pata_sht = { static struct ata_port_operations ep93xx_pata_port_ops = { .inherits = &ata_bmdma_port_ops, - .softreset = ep93xx_pata_softreset, - .hardreset = ATA_OP_NULL, + .reset.softreset = ep93xx_pata_softreset, + .reset.hardreset = ATA_OP_NULL, .sff_dev_select = ep93xx_pata_dev_select, .sff_set_devctl = ep93xx_pata_set_devctl, diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index 5280e9960025..b96e8bd2a3f8 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -322,7 +322,7 @@ static const struct scsi_host_template hpt36x_sht = { static struct ata_port_operations hpt366_port_ops = { .inherits = &ata_bmdma_port_ops, - .prereset = hpt366_prereset, + .reset.prereset = hpt366_prereset, .cable_detect = hpt36x_cable_detect, .mode_filter = hpt366_filter, .set_piomode = hpt366_set_piomode, diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 4af22b819416..07e3a984cbb1 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -543,7 +543,7 @@ static struct ata_port_operations hpt370_port_ops = { .cable_detect = hpt37x_cable_detect, .set_piomode = hpt37x_set_piomode, .set_dmamode = hpt37x_set_dmamode, - .prereset = hpt37x_pre_reset, + .reset.prereset = hpt37x_pre_reset, }; /* @@ -567,7 +567,7 @@ static struct ata_port_operations hpt302_port_ops = { .cable_detect = hpt37x_cable_detect, .set_piomode = hpt37x_set_piomode, .set_dmamode = hpt37x_set_dmamode, - .prereset = hpt37x_pre_reset, + .reset.prereset = hpt37x_pre_reset, }; /* diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index 5b1ecccf3c83..2cc57fcf2c46 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c @@ -356,7 +356,7 @@ static struct ata_port_operations hpt3xxn_port_ops = { .cable_detect = hpt3x2n_cable_detect, .set_piomode = hpt3x2n_set_piomode, .set_dmamode = hpt3x2n_set_dmamode, - .prereset = hpt3x2n_pre_reset, + .reset.prereset = hpt3x2n_pre_reset, }; /* diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index 61d8760f09d9..70f056e47e6b 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c @@ -336,7 +336,7 @@ static struct ata_port_operations pata_icside_port_ops = { .cable_detect = ata_cable_40wire, .set_dmamode = pata_icside_set_dmamode, - .postreset = pata_icside_postreset, + .reset.postreset = pata_icside_postreset, .port_start = ATA_OP_NULL, /* don't need PRD table */ }; diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c index 9cbe2132ce59..a6f2cfc1602e 100644 --- a/drivers/ata/pata_it8213.c +++ b/drivers/ata/pata_it8213.c @@ -238,7 +238,7 @@ static struct ata_port_operations it8213_ops = { .cable_detect = it8213_cable_detect, .set_piomode = it8213_set_piomode, .set_dmamode = it8213_set_dmamode, - .prereset = it8213_pre_reset, + .reset.prereset = it8213_pre_reset, }; diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index f51fb8219762..b885f33e8980 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c @@ -113,7 +113,7 @@ static const struct scsi_host_template jmicron_sht = { static struct ata_port_operations jmicron_ops = { .inherits = &ata_bmdma_port_ops, - .prereset = jmicron_pre_reset, + .reset.prereset = jmicron_pre_reset, }; diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index 8119caaad605..deab67328388 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c @@ -99,7 +99,7 @@ static const struct scsi_host_template marvell_sht = { static struct ata_port_operations marvell_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = marvell_cable_detect, - .prereset = marvell_pre_reset, + .reset.prereset = marvell_pre_reset, }; diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c index 69e4baf27d72..ce310ae7c93a 100644 --- a/drivers/ata/pata_mpiix.c +++ b/drivers/ata/pata_mpiix.c @@ -145,7 +145,7 @@ static struct ata_port_operations mpiix_port_ops = { .qc_issue = mpiix_qc_issue, .cable_detect = ata_cable_40wire, .set_piomode = mpiix_set_piomode, - .prereset = mpiix_pre_reset, + .reset.prereset = mpiix_pre_reset, .sff_data_xfer = ata_sff_data_xfer32, }; diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c index 44cc24d21d5f..bdb55c1a3280 100644 --- a/drivers/ata/pata_ns87410.c +++ b/drivers/ata/pata_ns87410.c @@ -123,7 +123,7 @@ static struct ata_port_operations ns87410_port_ops = { .qc_issue = ns87410_qc_issue, .cable_detect = ata_cable_40wire, .set_piomode = ns87410_set_piomode, - .prereset = ns87410_pre_reset, + .reset.prereset = ns87410_pre_reset, }; static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id) diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 2d32125c16fd..df42ebe98db7 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -941,7 +941,7 @@ static int octeon_cf_probe(struct platform_device *pdev) /* 16 bit but not True IDE */ base = cs0 + 0x800; octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16; - octeon_cf_ops.softreset = octeon_cf_softreset16; + octeon_cf_ops.reset.softreset = octeon_cf_softreset16; octeon_cf_ops.sff_check_status = octeon_cf_check_status16; octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16; octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16; diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c index 3d01b7000e41..81a7f3eb5654 100644 --- a/drivers/ata/pata_oldpiix.c +++ b/drivers/ata/pata_oldpiix.c @@ -214,7 +214,7 @@ static struct ata_port_operations oldpiix_pata_ops = { .cable_detect = ata_cable_40wire, .set_piomode = oldpiix_set_piomode, .set_dmamode = oldpiix_set_dmamode, - .prereset = oldpiix_pre_reset, + .reset.prereset = oldpiix_pre_reset, }; diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c index 3d23f57eb128..3db1b95d1404 100644 --- a/drivers/ata/pata_opti.c +++ b/drivers/ata/pata_opti.c @@ -156,7 +156,7 @@ static struct ata_port_operations opti_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = opti_set_piomode, - .prereset = opti_pre_reset, + .reset.prereset = opti_pre_reset, }; static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id) diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c index dfc36b4ec9c6..b42dba5f4e05 100644 --- a/drivers/ata/pata_optidma.c +++ b/drivers/ata/pata_optidma.c @@ -322,7 +322,9 @@ static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed) u8 r; int nybble = 4 * ap->port_no; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - int rc = ata_do_set_mode(link, r_failed); + int rc; + + rc = ata_set_mode(link, r_failed); if (rc == 0) { pci_read_config_byte(pdev, 0x43, &r); @@ -344,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = { .set_piomode = optidma_set_pio_mode, .set_dmamode = optidma_set_dma_mode, .set_mode = optidma_set_mode, - .prereset = optidma_pre_reset, + .reset.prereset = optidma_pre_reset, }; static struct ata_port_operations optiplus_port_ops = { diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c index 93ebf566b54e..22bd3ff6b7ae 100644 --- a/drivers/ata/pata_parport/pata_parport.c +++ b/drivers/ata/pata_parport/pata_parport.c @@ -321,8 +321,8 @@ static void pata_parport_drain_fifo(struct ata_queued_cmd *qc) static struct ata_port_operations pata_parport_port_ops = { .inherits = &ata_sff_port_ops, - .softreset = pata_parport_softreset, - .hardreset = NULL, + .reset.softreset = pata_parport_softreset, + .reset.hardreset = NULL, .sff_dev_select = pata_parport_dev_select, .sff_set_devctl = pata_parport_set_devctl, diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index 5b602206c522..cf3810933a27 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c @@ -46,7 +46,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d struct ata_device *slave = &link->device[1]; if (!ata_dev_enabled(master) || !ata_dev_enabled(slave)) - return ata_do_set_mode(link, r_failed_dev); + return ata_set_mode(link, r_failed_dev); if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV, ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) { @@ -58,7 +58,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d ata_dev_disable(slave); } } - return ata_do_set_mode(link, r_failed_dev); + return ata_set_mode(link, r_failed_dev); } /** diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index 6820c5597b14..d792ce6d97bf 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c @@ -130,7 +130,7 @@ static struct ata_port_operations pdc2027x_pata100_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma = pdc2027x_check_atapi_dma, .cable_detect = pdc2027x_cable_detect, - .prereset = pdc2027x_prereset, + .reset.prereset = pdc2027x_prereset, }; static struct ata_port_operations pdc2027x_pata133_ops = { @@ -387,7 +387,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed struct ata_device *dev; int rc; - rc = ata_do_set_mode(link, r_failed); + rc = ata_set_mode(link, r_failed); if (rc < 0) return rc; diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c index 09792aac7f9d..6ff4c11e937d 100644 --- a/drivers/ata/pata_rdc.c +++ b/drivers/ata/pata_rdc.c @@ -276,7 +276,7 @@ static struct ata_port_operations rdc_pata_ops = { .cable_detect = rdc_pata_cable_detect, .set_piomode = rdc_set_piomode, .set_dmamode = rdc_set_dmamode, - .prereset = rdc_pata_prereset, + .reset.prereset = rdc_pata_prereset, }; static const struct ata_port_info rdc_port_info = { @@ -359,8 +359,8 @@ static void rdc_remove_one(struct pci_dev *pdev) } static const struct pci_device_id rdc_pci_tbl[] = { - { PCI_DEVICE(0x17F3, 0x1011), }, - { PCI_DEVICE(0x17F3, 0x1012), }, + { PCI_VDEVICE(RDC, 0x1011) }, + { PCI_VDEVICE(RDC, 0x1012) }, { } /* terminate list */ }; diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index 31de06b66221..2b751e393771 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c @@ -552,7 +552,7 @@ static struct ata_port_operations sis_133_for_sata_ops = { static struct ata_port_operations sis_base_ops = { .inherits = &ata_bmdma_port_ops, - .prereset = sis_pre_reset, + .reset.prereset = sis_pre_reset, }; static struct ata_port_operations sis_133_ops = { diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c index 93882e976ede..2d24c6b3e9d9 100644 --- a/drivers/ata/pata_sl82c105.c +++ b/drivers/ata/pata_sl82c105.c @@ -248,7 +248,7 @@ static struct ata_port_operations sl82c105_port_ops = { .bmdma_stop = sl82c105_bmdma_stop, .cable_detect = ata_cable_40wire, .set_piomode = sl82c105_set_piomode, - .prereset = sl82c105_pre_reset, + .reset.prereset = sl82c105_pre_reset, .sff_irq_check = sl82c105_sff_irq_check, }; diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c index 26d448a869e2..596e86a031b3 100644 --- a/drivers/ata/pata_triflex.c +++ b/drivers/ata/pata_triflex.c @@ -170,7 +170,7 @@ static struct ata_port_operations triflex_port_ops = { .bmdma_stop = triflex_bmdma_stop, .cable_detect = ata_cable_40wire, .set_piomode = triflex_set_piomode, - .prereset = triflex_prereset, + .reset.prereset = triflex_prereset, }; static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index bb80e7800dcb..a8c9cf685b4b 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -451,7 +451,7 @@ static struct ata_port_operations via_port_ops = { .cable_detect = via_cable_detect, .set_piomode = via_set_piomode, .set_dmamode = via_set_dmamode, - .prereset = via_pre_reset, + .reset.prereset = via_pre_reset, .sff_tf_load = via_tf_load, .port_start = via_port_start, .mode_filter = via_mode_filter, diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index 8e6b2599f0d5..17a5a59861c3 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c @@ -140,7 +140,7 @@ static struct ata_port_operations adma_ata_ops = { .freeze = adma_freeze, .thaw = adma_thaw, - .prereset = adma_prereset, + .reset.prereset = adma_prereset, .port_start = adma_port_start, .port_stop = adma_port_stop, diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 6e1dd0d9c035..7a4f59202156 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -1097,7 +1097,7 @@ static struct ata_port_operations sata_dwc_ops = { .inherits = &ata_sff_port_ops, .error_handler = sata_dwc_error_handler, - .hardreset = sata_dwc_hardreset, + .reset.hardreset = sata_dwc_hardreset, .qc_issue = sata_dwc_qc_issue, diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 87e91a937a44..84da8d6ef28e 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1395,9 +1395,9 @@ static struct ata_port_operations sata_fsl_ops = { .freeze = sata_fsl_freeze, .thaw = sata_fsl_thaw, - .softreset = sata_fsl_softreset, - .hardreset = sata_fsl_hardreset, - .pmp_softreset = sata_fsl_softreset, + .reset.softreset = sata_fsl_softreset, + .reset.hardreset = sata_fsl_hardreset, + .pmp_reset.softreset = sata_fsl_softreset, .error_handler = sata_fsl_error_handler, .post_internal_cmd = sata_fsl_post_internal_cmd, diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index c8c817c51230..3421039f4bae 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -428,7 +428,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, static struct ata_port_operations ahci_highbank_ops = { .inherits = &ahci_ops, - .hardreset = ahci_highbank_hardreset, + .reset.hardreset = ahci_highbank_hardreset, .transmit_led_message = ecx_transmit_led_message, }; diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index db9c255dc9f2..46a8c20daf18 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -730,7 +730,7 @@ static struct ata_port_operations inic_port_ops = { .freeze = inic_freeze, .thaw = inic_thaw, - .hardreset = inic_hardreset, + .reset.hardreset = inic_hardreset, .error_handler = inic_error_handler, .post_internal_cmd = inic_post_internal_cmd, diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index bcbf96867f89..ffb396f61731 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -687,7 +687,7 @@ static struct ata_port_operations mv5_ops = { .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, - .hardreset = mv_hardreset, + .reset.hardreset = mv_hardreset, .scr_read = mv5_scr_read, .scr_write = mv5_scr_write, @@ -709,10 +709,10 @@ static struct ata_port_operations mv6_ops = { .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, - .hardreset = mv_hardreset, - .softreset = mv_softreset, - .pmp_hardreset = mv_pmp_hardreset, - .pmp_softreset = mv_softreset, + .reset.hardreset = mv_hardreset, + .reset.softreset = mv_softreset, + .pmp_reset.hardreset = mv_pmp_hardreset, + .pmp_reset.softreset = mv_softreset, .error_handler = mv_pmp_error_handler, .scr_read = mv_scr_read, diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index f36e2915ccf1..841e7de2bba6 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -462,7 +462,7 @@ static struct ata_port_operations nv_generic_ops = { .lost_interrupt = ATA_OP_NULL, .scr_read = nv_scr_read, .scr_write = nv_scr_write, - .hardreset = nv_hardreset, + .reset.hardreset = nv_hardreset, }; static struct ata_port_operations nv_nf2_ops = { diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 2df1a070b25a..2a005aede123 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -188,7 +188,7 @@ static struct ata_port_operations pdc_sata_ops = { .scr_read = pdc_sata_scr_read, .scr_write = pdc_sata_scr_write, .port_start = pdc_sata_port_start, - .hardreset = pdc_sata_hardreset, + .reset.hardreset = pdc_sata_hardreset, }; /* First-generation chips need a more restrictive ->check_atapi_dma op, @@ -206,7 +206,7 @@ static struct ata_port_operations pdc_pata_ops = { .freeze = pdc_freeze, .thaw = pdc_thaw, .port_start = pdc_common_port_start, - .softreset = pdc_pata_softreset, + .reset.softreset = pdc_pata_softreset, }; static const struct ata_port_info pdc_port_info[] = { diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index 8a6286159044..cfb9b5b61cd7 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c @@ -123,8 +123,8 @@ static struct ata_port_operations qs_ata_ops = { .freeze = qs_freeze, .thaw = qs_thaw, - .prereset = qs_prereset, - .softreset = ATA_OP_NULL, + .reset.prereset = qs_prereset, + .reset.softreset = ATA_OP_NULL, .error_handler = qs_error_handler, .lost_interrupt = ATA_OP_NULL, diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 22820a02d740..487eadd4073f 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -624,7 +624,7 @@ static struct ata_port_operations sata_rcar_port_ops = { .freeze = sata_rcar_freeze, .thaw = sata_rcar_thaw, - .softreset = sata_rcar_softreset, + .reset.softreset = sata_rcar_softreset, .scr_read = sata_rcar_scr_read, .scr_write = sata_rcar_scr_write, diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 3a99f66198a9..1b6dc950a42a 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -351,7 +351,7 @@ static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed) u32 tmp, dev_mode[2] = { }; int rc; - rc = ata_do_set_mode(link, r_failed); + rc = ata_set_mode(link, r_failed); if (rc) return rc; diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 87f4cde6a686..d642ece9f07a 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -393,10 +393,10 @@ static struct ata_port_operations sil24_ops = { .freeze = sil24_freeze, .thaw = sil24_thaw, - .softreset = sil24_softreset, - .hardreset = sil24_hardreset, - .pmp_softreset = sil24_softreset, - .pmp_hardreset = sil24_pmp_hardreset, + .reset.softreset = sil24_softreset, + .reset.hardreset = sil24_hardreset, + .pmp_reset.softreset = sil24_softreset, + .pmp_reset.hardreset = sil24_pmp_hardreset, .error_handler = sil24_error_handler, .post_internal_cmd = sil24_post_internal_cmd, .dev_config = sil24_dev_config, diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 598a872f6a08..c5d6aa36c9c3 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -340,8 +340,8 @@ static const struct scsi_host_template k2_sata_sht = { static struct ata_port_operations k2_sata_ops = { .inherits = &ata_bmdma_port_ops, - .softreset = k2_sata_softreset, - .hardreset = k2_sata_hardreset, + .reset.softreset = k2_sata_softreset, + .reset.hardreset = k2_sata_hardreset, .sff_tf_load = k2_sata_tf_load, .sff_tf_read = k2_sata_tf_read, .sff_check_status = k2_stat_check_status, diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index f7f5131af937..0986ebd1eb4e 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -241,7 +241,7 @@ static struct ata_port_operations pdc_20621_ops = { .freeze = pdc_freeze, .thaw = pdc_thaw, - .softreset = pdc_softreset, + .reset.softreset = pdc_softreset, .error_handler = pdc_error_handler, .lost_interrupt = ATA_OP_NULL, .post_internal_cmd = pdc_post_internal_cmd, diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index 52894ff49dcb..44985796cc47 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c @@ -67,7 +67,7 @@ static struct ata_port_operations uli_ops = { .inherits = &ata_bmdma_port_ops, .scr_read = uli_scr_read, .scr_write = uli_scr_write, - .hardreset = ATA_OP_NULL, + .reset.hardreset = ATA_OP_NULL, }; static const struct ata_port_info uli_port_info = { diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 4ecd8f33b082..68e9003ec2d4 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -120,7 +120,7 @@ static struct ata_port_operations svia_base_ops = { static struct ata_port_operations vt6420_sata_ops = { .inherits = &svia_base_ops, .freeze = svia_noop_freeze, - .prereset = vt6420_prereset, + .reset.prereset = vt6420_prereset, .bmdma_start = vt6420_bmdma_start, }; @@ -140,7 +140,7 @@ static struct ata_port_operations vt6421_sata_ops = { static struct ata_port_operations vt8251_ops = { .inherits = &svia_base_ops, - .hardreset = sata_std_hardreset, + .reset.hardreset = sata_std_hardreset, .scr_read = vt8251_scr_read, .scr_write = vt8251_scr_write, }; diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c index dba7c8e13a53..44cd3f85b659 100644 --- a/drivers/base/auxiliary.c +++ b/drivers/base/auxiliary.c @@ -217,7 +217,7 @@ static int auxiliary_bus_probe(struct device *dev) struct auxiliary_device *auxdev = to_auxiliary_dev(dev); int ret; - ret = dev_pm_domain_attach(dev, true); + ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON); if (ret) { dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret); return ret; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index b526e0e0f52d..13ab98e033ea 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -25,6 +25,7 @@ #include <linux/kthread.h> #include <linux/wait.h> #include <linux/async.h> +#include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/pinctrl/devinfo.h> #include <linux/slab.h> @@ -552,6 +553,7 @@ static void device_unbind_cleanup(struct device *dev) dev->dma_range_map = NULL; device_set_driver(dev, NULL); dev_set_drvdata(dev, NULL); + dev_pm_domain_detach(dev, dev->power.detach_power_off); if (dev->pm_domain && dev->pm_domain->dismiss) dev->pm_domain->dismiss(dev); pm_runtime_reinit(dev); diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 44486b2c7172..6942c62fa59d 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -822,26 +822,6 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, {} #endif -/* - * Reject firmware file names with ".." path components. - * There are drivers that construct firmware file names from device-supplied - * strings, and we don't want some device to be able to tell us "I would like to - * be sent my firmware from ../../../etc/shadow, please". - * - * Search for ".." surrounded by either '/' or start/end of string. - * - * This intentionally only looks at the firmware name, not at the firmware base - * directory or at symlink contents. - */ -static bool name_contains_dotdot(const char *name) -{ - size_t name_len = strlen(name); - - return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 || - strstr(name, "/../") != NULL || - (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0); -} - /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -862,6 +842,17 @@ _request_firmware(const struct firmware **firmware_p, const char *name, goto out; } + + /* + * Reject firmware file names with ".." path components. + * There are drivers that construct firmware file names from + * device-supplied strings, and we don't want some device to be + * able to tell us "I would like to be sent my firmware from + * ../../../etc/shadow, please". + * + * This intentionally only looks at the firmware name, not at + * the firmware base directory or at symlink contents. + */ if (name_contains_dotdot(name)) { dev_warn(device, "Firmware load for '%s' refused, path contains '..' component\n", diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 075ec1d1b73a..09450349cf32 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1396,15 +1396,13 @@ static int platform_probe(struct device *_dev) if (ret < 0) return ret; - ret = dev_pm_domain_attach(_dev, true); + ret = dev_pm_domain_attach(_dev, PD_FLAG_ATTACH_POWER_ON | + PD_FLAG_DETACH_POWER_OFF); if (ret) goto out; - if (drv->probe) { + if (drv->probe) ret = drv->probe(dev); - if (ret) - dev_pm_domain_detach(_dev, true); - } out: if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { @@ -1422,7 +1420,6 @@ static void platform_remove(struct device *_dev) if (drv->remove) drv->remove(dev); - dev_pm_domain_detach(_dev, true); } static void platform_shutdown(struct device *_dev) diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 781968a128ff..6ecf9ce4a4e6 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -83,7 +83,7 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); /** * dev_pm_domain_attach - Attach a device to its PM domain. * @dev: Device to attach. - * @power_on: Used to indicate whether we should power on the device. + * @flags: indicate whether we should power on/off the device on attach/detach * * The @dev may only be attached to a single PM domain. By iterating through * the available alternatives we try to find a valid PM domain for the device. @@ -100,17 +100,20 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); * Returns 0 on successfully attached PM domain, or when it is found that the * device doesn't need a PM domain, else a negative error code. */ -int dev_pm_domain_attach(struct device *dev, bool power_on) +int dev_pm_domain_attach(struct device *dev, u32 flags) { int ret; if (dev->pm_domain) return 0; - ret = acpi_dev_pm_attach(dev, power_on); + ret = acpi_dev_pm_attach(dev, !!(flags & PD_FLAG_ATTACH_POWER_ON)); if (!ret) ret = genpd_dev_pm_attach(dev); + if (dev->pm_domain) + dev->power.detach_power_off = !!(flags & PD_FLAG_DETACH_POWER_OFF); + return ret < 0 ? ret : 0; } EXPORT_SYMBOL_GPL(dev_pm_domain_attach); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 7a50af416cac..8aa06d59a2ee 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -647,14 +647,27 @@ static void dpm_async_resume_children(struct device *dev, async_func_t func) /* * Start processing "async" children of the device unless it's been * started already for them. - * - * This could have been done for the device's "async" consumers too, but - * they either need to wait for their parents or the processing has - * already started for them after their parents were processed. */ device_for_each_child(dev, func, dpm_async_with_cleanup); } +static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + dpm_async_resume_children(dev, func); + + idx = device_links_read_lock(); + + /* Start processing the device's "async" consumers. */ + list_for_each_entry_rcu(link, &dev->links.consumers, s_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->consumer, func); + + device_links_read_unlock(idx); +} + static void dpm_clear_async_state(struct device *dev) { reinit_completion(&dev->power.completion); @@ -663,7 +676,14 @@ static void dpm_clear_async_state(struct device *dev) static bool dpm_root_device(struct device *dev) { - return !dev->parent; + lockdep_assert_held(&dpm_list_mtx); + + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return !dev->parent && list_empty(&dev->links.suppliers); } static void async_resume_noirq(void *data, async_cookie_t cookie); @@ -747,12 +767,12 @@ Out: TRACE_RESUME(error); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); } - dpm_async_resume_children(dev, async_resume_noirq); + dpm_async_resume_subordinate(dev, async_resume_noirq); } static void async_resume_noirq(void *data, async_cookie_t cookie) @@ -804,7 +824,7 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, "noirq"); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); @@ -890,12 +910,12 @@ Out: complete_all(&dev->power.completion); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async early" : " early", error); } - dpm_async_resume_children(dev, async_resume_early); + dpm_async_resume_subordinate(dev, async_resume_early); } static void async_resume_early(void *data, async_cookie_t cookie) @@ -951,7 +971,7 @@ void dpm_resume_early(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, "early"); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME_EARLY); trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); @@ -1066,12 +1086,12 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) TRACE_RESUME(error); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } - dpm_async_resume_children(dev, async_resume); + dpm_async_resume_subordinate(dev, async_resume); } static void async_resume(void *data, async_cookie_t cookie) @@ -1095,7 +1115,6 @@ void dpm_resume(pm_message_t state) ktime_t starttime = ktime_get(); trace_suspend_resume(TPS("dpm_resume"), state.event, true); - might_sleep(); pm_transition = state; async_error = 0; @@ -1131,7 +1150,7 @@ void dpm_resume(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, NULL); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME); cpufreq_resume(); @@ -1198,7 +1217,6 @@ void dpm_complete(pm_message_t state) struct list_head list; trace_suspend_resume(TPS("dpm_complete"), state.event, true); - might_sleep(); INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); @@ -1258,10 +1276,15 @@ static bool dpm_leaf_device(struct device *dev) return false; } - return true; + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return list_empty(&dev->links.consumers); } -static void dpm_async_suspend_parent(struct device *dev, async_func_t func) +static bool dpm_async_suspend_parent(struct device *dev, async_func_t func) { guard(mutex)(&dpm_list_mtx); @@ -1273,11 +1296,31 @@ static void dpm_async_suspend_parent(struct device *dev, async_func_t func) * deleted before it. */ if (!device_pm_initialized(dev)) - return; + return false; /* Start processing the device's parent if it is "async". */ if (dev->parent) dpm_async_with_cleanup(dev->parent, func); + + return true; +} + +static void dpm_async_suspend_superior(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + if (!dpm_async_suspend_parent(dev, func)) + return; + + idx = device_links_read_lock(); + + /* Start processing the device's "async" suppliers. */ + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->supplier, func); + + device_links_read_unlock(idx); } static void dpm_async_suspend_complete_all(struct list_head *device_list) @@ -1344,7 +1387,7 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie); * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async) +static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1355,7 +1398,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy dpm_wait_for_subordinate(dev, async); - if (async_error) + if (READ_ONCE(async_error)) goto Complete; if (dev->power.syscore || dev->power.direct_complete) @@ -1388,7 +1431,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy Run: error = dpm_run_callback(callback, dev, state, info); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); goto Complete; @@ -1414,12 +1457,10 @@ Complete: complete_all(&dev->power.completion); TRACE_SUSPEND(error); - if (error || async_error) - return error; - - dpm_async_suspend_parent(dev, async_suspend_noirq); + if (error || READ_ONCE(async_error)) + return; - return 0; + dpm_async_suspend_superior(dev, async_suspend_noirq); } static void async_suspend_noirq(void *data, async_cookie_t cookie) @@ -1434,7 +1475,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state) { ktime_t starttime = ktime_get(); struct device *dev; - int error = 0; + int error; trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); @@ -1465,13 +1506,13 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend_noirq(dev, state, false); + device_suspend_noirq(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) { + if (READ_ONCE(async_error)) { dpm_async_suspend_complete_all(&dpm_late_early_list); /* * Move all devices to the target list to resume them @@ -1485,9 +1526,8 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); @@ -1542,7 +1582,7 @@ static void async_suspend_late(void *data, async_cookie_t cookie); * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_suspend_late(struct device *dev, pm_message_t state, bool async) +static void device_suspend_late(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1559,11 +1599,11 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn dpm_wait_for_subordinate(dev, async); - if (async_error) + if (READ_ONCE(async_error)) goto Complete; if (pm_wakeup_pending()) { - async_error = -EBUSY; + WRITE_ONCE(async_error, -EBUSY); goto Complete; } @@ -1597,7 +1637,7 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn Run: error = dpm_run_callback(callback, dev, state, info); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async late" : " late", error); goto Complete; @@ -1611,12 +1651,10 @@ Complete: TRACE_SUSPEND(error); complete_all(&dev->power.completion); - if (error || async_error) - return error; - - dpm_async_suspend_parent(dev, async_suspend_late); + if (error || READ_ONCE(async_error)) + return; - return 0; + dpm_async_suspend_superior(dev, async_suspend_late); } static void async_suspend_late(void *data, async_cookie_t cookie) @@ -1635,7 +1673,7 @@ int dpm_suspend_late(pm_message_t state) { ktime_t starttime = ktime_get(); struct device *dev; - int error = 0; + int error; trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); @@ -1668,13 +1706,13 @@ int dpm_suspend_late(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend_late(dev, state, false); + device_suspend_late(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) { + if (READ_ONCE(async_error)) { dpm_async_suspend_complete_all(&dpm_suspended_list); /* * Move all devices to the target list to resume them @@ -1688,9 +1726,8 @@ int dpm_suspend_late(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) { dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); @@ -1779,7 +1816,7 @@ static void async_suspend(void *data, async_cookie_t cookie); * @state: PM transition of the system being carried out. * @async: If true, the device is being suspended asynchronously. */ -static int device_suspend(struct device *dev, pm_message_t state, bool async) +static void device_suspend(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1791,7 +1828,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) dpm_wait_for_subordinate(dev, async); - if (async_error) { + if (READ_ONCE(async_error)) { dev->power.direct_complete = false; goto Complete; } @@ -1811,7 +1848,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) if (pm_wakeup_pending()) { dev->power.direct_complete = false; - async_error = -EBUSY; + WRITE_ONCE(async_error, -EBUSY); goto Complete; } @@ -1895,7 +1932,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) Complete: if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } @@ -1903,12 +1940,10 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) complete_all(&dev->power.completion); TRACE_SUSPEND(error); - if (error || async_error) - return error; - - dpm_async_suspend_parent(dev, async_suspend); + if (error || READ_ONCE(async_error)) + return; - return 0; + dpm_async_suspend_superior(dev, async_suspend); } static void async_suspend(void *data, async_cookie_t cookie) @@ -1927,7 +1962,7 @@ int dpm_suspend(pm_message_t state) { ktime_t starttime = ktime_get(); struct device *dev; - int error = 0; + int error; trace_suspend_resume(TPS("dpm_suspend"), state.event, true); might_sleep(); @@ -1962,13 +1997,13 @@ int dpm_suspend(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend(dev, state, false); + device_suspend(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) { + if (READ_ONCE(async_error)) { dpm_async_suspend_complete_all(&dpm_prepared_list); /* * Move all devices to the target list to resume them @@ -1982,9 +2017,8 @@ int dpm_suspend(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) dpm_save_failed_step(SUSPEND_SUSPEND); @@ -2129,7 +2163,6 @@ int dpm_prepare(pm_message_t state) int error = 0; trace_suspend_resume(TPS("dpm_prepare"), state.event, true); - might_sleep(); /* * Give a chance for the known devices to complete their probes, before diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index c55a7c70bc1a..f835b85bd4d9 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -19,10 +19,24 @@ typedef int (*pm_callback_t)(struct device *); +static inline pm_callback_t get_callback_ptr(const void *start, size_t offset) +{ + return *(pm_callback_t *)(start + offset); +} + +static pm_callback_t __rpm_get_driver_callback(struct device *dev, + size_t cb_offset) +{ + if (dev->driver && dev->driver->pm) + return get_callback_ptr(dev->driver->pm, cb_offset); + + return NULL; +} + static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) { - pm_callback_t cb; const struct dev_pm_ops *ops; + pm_callback_t cb = NULL; if (dev->pm_domain) ops = &dev->pm_domain->ops; @@ -36,12 +50,10 @@ static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) ops = NULL; if (ops) - cb = *(pm_callback_t *)((void *)ops + cb_offset); - else - cb = NULL; + cb = get_callback_ptr(ops, cb_offset); - if (!cb && dev->driver && dev->driver->pm) - cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); + if (!cb) + cb = __rpm_get_driver_callback(dev, cb_offset); return cb; } @@ -1191,10 +1203,12 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume); * * Return -EINVAL if runtime PM is disabled for @dev. * - * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either - * @ign_usage_count is %true or the runtime PM usage counter of @dev is not - * zero, increment the usage counter of @dev and return 1. Otherwise, return 0 - * without changing the usage counter. + * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count + * is set, or (2) @dev is not ignoring children and its active child count is + * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment + * the usage counter of @dev and return 1. + * + * Otherwise, return 0 without changing the usage counter. * * If @ign_usage_count is %true, this function can be used to prevent suspending * the device when its runtime PM status is %RPM_ACTIVE. @@ -1216,7 +1230,8 @@ static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count) retval = -EINVAL; } else if (dev->power.runtime_status != RPM_ACTIVE) { retval = 0; - } else if (ign_usage_count) { + } else if (ign_usage_count || (!dev->power.ignore_children && + atomic_read(&dev->power.child_count) > 0)) { retval = 1; atomic_inc(&dev->power.usage_count); } else { @@ -1249,10 +1264,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); * @dev: Target device. * * Increment the runtime PM usage counter of @dev if its runtime PM status is - * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case - * it returns 1. If the device is in a different state or its usage_count is 0, - * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device, - * in which case also the usage_count will remain unmodified. + * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not + * ignoring children and its active child count is nonzero. 1 is returned in + * this case. + * + * If @dev is in a different state or it is not in use (that is, its usage + * counter is 0, or it is ignoring children, or its active child count is 0), + * 0 is returned. + * + * -EINVAL is returned if runtime PM is disabled for the device, in which case + * also the usage counter of @dev is not updated. */ int pm_runtime_get_if_in_use(struct device *dev) { @@ -1827,7 +1848,7 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; - dev->power.needs_force_resume = 0; + dev->power.needs_force_resume = false; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; @@ -1854,6 +1875,11 @@ void pm_runtime_reinit(struct device *dev) pm_runtime_put(dev->parent); } } + /* + * Clear power.needs_force_resume in case it has been set by + * pm_runtime_force_suspend() invoked from a driver remove callback. + */ + dev->power.needs_force_resume = false; } /** @@ -1941,13 +1967,23 @@ void pm_runtime_drop_link(struct device_link *link) pm_request_idle(link->supplier); } -bool pm_runtime_need_not_resume(struct device *dev) +static pm_callback_t get_callback(struct device *dev, size_t cb_offset) { - return atomic_read(&dev->power.usage_count) <= 1 && - (atomic_read(&dev->power.child_count) == 0 || - dev->power.ignore_children); + /* + * Setting power.strict_midlayer means that the middle layer + * code does not want its runtime PM callbacks to be invoked via + * pm_runtime_force_suspend() and pm_runtime_force_resume(), so + * return a direct pointer to the driver callback in that case. + */ + if (dev_pm_strict_midlayer_is_set(dev)) + return __rpm_get_driver_callback(dev, cb_offset); + + return __rpm_get_callback(dev, cb_offset); } +#define GET_CALLBACK(dev, callback) \ + get_callback(dev, offsetof(struct dev_pm_ops, callback)) + /** * pm_runtime_force_suspend - Force a device into suspend state if needed. * @dev: Device to suspend. @@ -1964,10 +2000,6 @@ bool pm_runtime_need_not_resume(struct device *dev) * sure the device is put into low power state and it should only be used during * system-wide PM transitions to sleep states. It assumes that the analogous * pm_runtime_force_resume() will be used to resume the device. - * - * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent - * state where this function has called the ->runtime_suspend callback but the - * PM core marks the driver as runtime active. */ int pm_runtime_force_suspend(struct device *dev) { @@ -1975,10 +2007,10 @@ int pm_runtime_force_suspend(struct device *dev) int ret; pm_runtime_disable(dev); - if (pm_runtime_status_suspended(dev)) + if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume) return 0; - callback = RPM_GET_CALLBACK(dev, runtime_suspend); + callback = GET_CALLBACK(dev, runtime_suspend); dev_pm_enable_wake_irq_check(dev, true); ret = callback ? callback(dev) : 0; @@ -1990,15 +2022,16 @@ int pm_runtime_force_suspend(struct device *dev) /* * If the device can stay in suspend after the system-wide transition * to the working state that will follow, drop the children counter of - * its parent, but set its status to RPM_SUSPENDED anyway in case this - * function will be called again for it in the meantime. + * its parent and the usage counters of its suppliers. Otherwise, set + * power.needs_force_resume to let pm_runtime_force_resume() know that + * the device needs to be taken care of and to prevent this function + * from handling the device again in case the device is passed to it + * once more subsequently. */ - if (pm_runtime_need_not_resume(dev)) { + if (pm_runtime_need_not_resume(dev)) pm_runtime_set_suspended(dev); - } else { - __update_runtime_status(dev, RPM_SUSPENDED); - dev->power.needs_force_resume = 1; - } + else + dev->power.needs_force_resume = true; return 0; @@ -2009,33 +2042,37 @@ err: } EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); +#ifdef CONFIG_PM_SLEEP + /** * pm_runtime_force_resume - Force a device into resume state if needed. * @dev: Device to resume. * - * Prior invoking this function we expect the user to have brought the device - * into low power state by a call to pm_runtime_force_suspend(). Here we reverse - * those actions and bring the device into full power, if it is expected to be - * used on system resume. In the other case, we defer the resume to be managed - * via runtime PM. + * This function expects that either pm_runtime_force_suspend() has put the + * device into a low-power state prior to calling it, or the device had been + * runtime-suspended before the preceding system-wide suspend transition and it + * was left in suspend during that transition. * - * Typically this function may be invoked from a system resume callback. + * The actions carried out by pm_runtime_force_suspend(), or by a runtime + * suspend in general, are reversed and the device is brought back into full + * power if it is expected to be used on system resume, which is the case when + * its needs_force_resume flag is set or when its smart_suspend flag is set and + * its runtime PM status is "active". + * + * In other cases, the resume is deferred to be managed via runtime PM. + * + * Typically, this function may be invoked from a system resume callback. */ int pm_runtime_force_resume(struct device *dev) { int (*callback)(struct device *); int ret = 0; - if (!dev->power.needs_force_resume) + if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) || + pm_runtime_status_suspended(dev))) goto out; - /* - * The value of the parent's children counter is correct already, so - * just update the status of the device. - */ - __update_runtime_status(dev, RPM_ACTIVE); - - callback = RPM_GET_CALLBACK(dev, runtime_resume); + callback = GET_CALLBACK(dev, runtime_resume); dev_pm_disable_wake_irq_check(dev, false); ret = callback ? callback(dev) : 0; @@ -2046,9 +2083,30 @@ int pm_runtime_force_resume(struct device *dev) } pm_runtime_mark_last_busy(dev); + out: - dev->power.needs_force_resume = 0; + /* + * The smart_suspend flag can be cleared here because it is not going + * to be necessary until the next system-wide suspend transition that + * will update it again. + */ + dev->power.smart_suspend = false; + /* + * Also clear needs_force_resume to make this function skip devices that + * have been seen by it once. + */ + dev->power.needs_force_resume = false; + pm_runtime_enable(dev); return ret; } EXPORT_SYMBOL_GPL(pm_runtime_force_resume); + +bool pm_runtime_need_not_resume(struct device *dev) +{ + return atomic_read(&dev->power.usage_count) <= 1 && + (atomic_read(&dev->power.child_count) == 0 || + dev->power.ignore_children); +} + +#endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index fb84cda92a75..c9b4c04b1cf6 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -470,10 +470,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file, if (err) return count; - err = debugfs_file_get(file->f_path.dentry); - if (err) - return err; - map->lock(map->lock_arg); if (new_val && !map->cache_only) { @@ -486,7 +482,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file, map->cache_only = new_val; map->unlock(map->lock_arg); - debugfs_file_put(file->f_path.dentry); if (require_sync) { err = regcache_sync(map); @@ -517,10 +512,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file, if (err) return count; - err = debugfs_file_get(file->f_path.dentry); - if (err) - return err; - map->lock(map->lock_arg); if (new_val && !map->cache_bypass) { @@ -532,7 +523,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file, map->cache_bypass = new_val; map->unlock(map->lock_arg); - debugfs_file_put(file->f_path.dentry); return count; } diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c index 64ea340950b6..95c5bf2a78ee 100644 --- a/drivers/base/regmap/regmap-kunit.c +++ b/drivers/base/regmap/regmap-kunit.c @@ -736,7 +736,7 @@ static void stride(struct kunit *test) } } -static struct regmap_range_cfg test_range = { +static const struct regmap_range_cfg test_range = { .selector_reg = 1, .selector_mask = 0xff, diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 0f70e2374e7f..df38fb364904 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -256,49 +256,6 @@ config BLK_DEV_RAM_SIZE The default value is 4096 kilobytes. Only change this if you know what you are doing. -config CDROM_PKTCDVD - tristate "Packet writing on CD/DVD media (DEPRECATED)" - depends on !UML - depends on SCSI - select CDROM - help - Note: This driver is deprecated and will be removed from the - kernel in the near future! - - If you have a CDROM/DVD drive that supports packet writing, say - Y to include support. It should work with any MMC/Mt Fuji - compliant ATAPI or SCSI drive, which is just about any newer - DVD/CD writer. - - Currently only writing to CD-RW, DVD-RW, DVD+RW and DVDRAM discs - is possible. - DVD-RW disks must be in restricted overwrite mode. - - See the file <file:Documentation/cdrom/packet-writing.rst> - for further information on the use of this driver. - - To compile this driver as a module, choose M here: the - module will be called pktcdvd. - -config CDROM_PKTCDVD_BUFFERS - int "Free buffers for data gathering" - depends on CDROM_PKTCDVD - default "8" - help - This controls the maximum number of active concurrent packets. More - concurrent packets can increase write performance, but also require - more memory. Each concurrent packet will require approximately 64Kb - of non-swappable kernel memory, memory which will be allocated when - a disc is opened for writing. - -config CDROM_PKTCDVD_WCACHE - bool "Enable write caching" - depends on CDROM_PKTCDVD - help - If enabled, write caching will be set for the CD-R/W device. For now - this option is dangerous unless the CD-RW media is known good, as we - don't do deferred write error handling yet. - config ATA_OVER_ETH tristate "ATA over Ethernet support" depends on NET diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 097707aca725..a695ce74ef22 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o obj-$(CONFIG_N64CART) += n64cart.o obj-$(CONFIG_BLK_DEV_RAM) += brd.o obj-$(CONFIG_BLK_DEV_LOOP) += loop.o -obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o obj-$(CONFIG_SUNVDC) += sunvdc.o obj-$(CONFIG_BLK_DEV_NBD) += nbd.o diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index e5a2e5f7887b..975024cf03c5 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2500,7 +2500,11 @@ static int handle_write_conflicts(struct drbd_device *device, peer_req->w.cb = superseded ? e_send_superseded : e_send_retry_write; list_add_tail(&peer_req->w.list, &device->done_ee); - queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work); + /* put is in drbd_send_acks_wf() */ + kref_get(&device->kref); + if (!queue_work(connection->ack_sender, + &peer_req->peer_device->send_acks_work)) + kref_put(&device->kref, drbd_destroy_device); err = -ENOENT; goto out; diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index e97432032f01..24be0c2c4075 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3411,7 +3411,7 @@ static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode, struct floppy_max_errors max_errors; struct floppy_drive_params dp; } inparam; /* parameters coming from user space */ - const void *outparam; /* parameters passed back to user space */ + const void *outparam = NULL; /* parameters passed back to user space */ /* convert compatibility eject ioctls into floppy eject ioctl. * We do this in order to provide a means to eject floppy disks before diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 8d994cae3b83..1b6ee91f8eb9 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1431,17 +1431,34 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg) return 0; } -static int loop_set_block_size(struct loop_device *lo, unsigned long arg) +static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode, + struct block_device *bdev, unsigned long arg) { struct queue_limits lim; unsigned int memflags; int err = 0; - if (lo->lo_state != Lo_bound) - return -ENXIO; + /* + * If we don't hold exclusive handle for the device, upgrade to it + * here to avoid changing device under exclusive owner. + */ + if (!(mode & BLK_OPEN_EXCL)) { + err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL); + if (err) + return err; + } + + err = mutex_lock_killable(&lo->lo_mutex); + if (err) + goto abort_claim; + + if (lo->lo_state != Lo_bound) { + err = -ENXIO; + goto unlock; + } if (lo->lo_queue->limits.logical_block_size == arg) - return 0; + goto unlock; sync_blockdev(lo->lo_device); invalidate_bdev(lo->lo_device); @@ -1454,6 +1471,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) loop_update_dio(lo); blk_mq_unfreeze_queue(lo->lo_queue, memflags); +unlock: + mutex_unlock(&lo->lo_mutex); +abort_claim: + if (!(mode & BLK_OPEN_EXCL)) + bd_abort_claiming(bdev, loop_set_block_size); return err; } @@ -1472,9 +1494,6 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, case LOOP_SET_DIRECT_IO: err = loop_set_dio(lo, arg); break; - case LOOP_SET_BLOCK_SIZE: - err = loop_set_block_size(lo, arg); - break; default: err = -EINVAL; } @@ -1529,9 +1548,12 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode, break; case LOOP_GET_STATUS64: return loop_get_status64(lo, argp); + case LOOP_SET_BLOCK_SIZE: + if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) + return -EPERM; + return loop_set_block_size(lo, mode, bdev, arg); case LOOP_SET_CAPACITY: case LOOP_SET_DIRECT_IO: - case LOOP_SET_BLOCK_SIZE: if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) return -EPERM; fallthrough; diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 66ce6b81c7d9..8fc7761397bd 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -2040,11 +2040,12 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, * @dir Direction (read or write) * * return value - * None + * 0 The IO completed successfully. + * -ENOMEM The DMA mapping failed. */ -static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, - struct mtip_cmd *command, - struct blk_mq_hw_ctx *hctx) +static int mtip_hw_submit_io(struct driver_data *dd, struct request *rq, + struct mtip_cmd *command, + struct blk_mq_hw_ctx *hctx) { struct mtip_cmd_hdr *hdr = dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag; @@ -2056,12 +2057,14 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, unsigned int nents; /* Map the scatter list for DMA access */ - nents = blk_rq_map_sg(rq, command->sg); - nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); + command->scatter_ents = blk_rq_map_sg(rq, command->sg); + nents = dma_map_sg(&dd->pdev->dev, command->sg, + command->scatter_ents, dma_dir); + if (!nents) + return -ENOMEM; - prefetch(&port->flags); - command->scatter_ents = nents; + prefetch(&port->flags); /* * The number of retries for this command before it is @@ -2112,11 +2115,13 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) { set_bit(rq->tag, port->cmds_to_issue); set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); - return; + return 0; } /* Issue the command to the hardware */ mtip_issue_ncq_command(port, rq->tag); + + return 0; } /* @@ -3315,7 +3320,9 @@ static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(rq); - mtip_hw_submit_io(dd, rq, cmd, hctx); + if (mtip_hw_submit_io(dd, rq, cmd, hctx)) + return BLK_STS_IOERR; + return BLK_STS_OK; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 2592bd19ebc1..6463d0e8d0ce 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1473,7 +1473,17 @@ static int nbd_start_device(struct nbd_device *nbd) return -EINVAL; } - blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); +retry: + mutex_unlock(&nbd->config_lock); + blk_mq_update_nr_hw_queues(&nbd->tag_set, num_connections); + mutex_lock(&nbd->config_lock); + + /* if another code path updated nr_hw_queues, retry until succeed */ + if (num_connections != config->num_connections) { + num_connections = config->num_connections; + goto retry; + } + nbd->pid = task_pid_nr(current); nbd_parse_flags(nbd); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c deleted file mode 100644 index d5cc7bd2875c..000000000000 --- a/drivers/block/pktcdvd.c +++ /dev/null @@ -1,2916 +0,0 @@ -/* - * Copyright (C) 2000 Jens Axboe <axboe@suse.de> - * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com> - * Copyright (C) 2006 Thomas Maier <balagi@justmail.de> - * - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and - * DVD-RAM devices. - * - * Theory of operation: - * - * At the lowest level, there is the standard driver for the CD/DVD device, - * such as drivers/scsi/sr.c. This driver can handle read and write requests, - * but it doesn't know anything about the special restrictions that apply to - * packet writing. One restriction is that write requests must be aligned to - * packet boundaries on the physical media, and the size of a write request - * must be equal to the packet size. Another restriction is that a - * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read - * command, if the previous command was a write. - * - * The purpose of the packet writing driver is to hide these restrictions from - * higher layers, such as file systems, and present a block device that can be - * randomly read and written using 2kB-sized blocks. - * - * The lowest layer in the packet writing driver is the packet I/O scheduler. - * Its data is defined by the struct packet_iosched and includes two bio - * queues with pending read and write requests. These queues are processed - * by the pkt_iosched_process_queue() function. The write requests in this - * queue are already properly aligned and sized. This layer is responsible for - * issuing the flush cache commands and scheduling the I/O in a good order. - * - * The next layer transforms unaligned write requests to aligned writes. This - * transformation requires reading missing pieces of data from the underlying - * block device, assembling the pieces to full packets and queuing them to the - * packet I/O scheduler. - * - * At the top layer there is a custom ->submit_bio function that forwards - * read requests directly to the iosched queue and puts write requests in the - * unaligned write queue. A kernel thread performs the necessary read - * gathering to convert the unaligned writes to aligned writes and then feeds - * them to the packet I/O scheduler. - * - *************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/backing-dev.h> -#include <linux/compat.h> -#include <linux/debugfs.h> -#include <linux/device.h> -#include <linux/errno.h> -#include <linux/file.h> -#include <linux/freezer.h> -#include <linux/kernel.h> -#include <linux/kthread.h> -#include <linux/miscdevice.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/nospec.h> -#include <linux/pktcdvd.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/types.h> -#include <linux/uaccess.h> - -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_ioctl.h> - -#include <linux/unaligned.h> - -#define DRIVER_NAME "pktcdvd" - -#define MAX_SPEED 0xffff - -static DEFINE_MUTEX(pktcdvd_mutex); -static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; -static struct proc_dir_entry *pkt_proc; -static int pktdev_major; -static int write_congestion_on = PKT_WRITE_CONGESTION_ON; -static int write_congestion_off = PKT_WRITE_CONGESTION_OFF; -static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */ -static mempool_t psd_pool; -static struct bio_set pkt_bio_set; - -/* /sys/class/pktcdvd */ -static struct class class_pktcdvd; -static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */ - -/* forward declaration */ -static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev); -static int pkt_remove_dev(dev_t pkt_dev); - -static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) -{ - return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); -} - -/********************************************************** - * sysfs interface for pktcdvd - * by (C) 2006 Thomas Maier <balagi@justmail.de> - - /sys/class/pktcdvd/pktcdvd[0-7]/ - stat/reset - stat/packets_started - stat/packets_finished - stat/kb_written - stat/kb_read - stat/kb_read_gather - write_queue/size - write_queue/congestion_off - write_queue/congestion_on - **********************************************************/ - -static ssize_t packets_started_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pktcdvd_device *pd = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started); -} -static DEVICE_ATTR_RO(packets_started); - -static ssize_t packets_finished_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pktcdvd_device *pd = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended); -} -static DEVICE_ATTR_RO(packets_finished); - -static ssize_t kb_written_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pktcdvd_device *pd = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1); -} -static DEVICE_ATTR_RO(kb_written); - -static ssize_t kb_read_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pktcdvd_device *pd = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1); -} -static DEVICE_ATTR_RO(kb_read); - -static ssize_t kb_read_gather_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pktcdvd_device *pd = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1); -} -static DEVICE_ATTR_RO(kb_read_gather); - -static ssize_t reset_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len) -{ - struct pktcdvd_device *pd = dev_get_drvdata(dev); - - if (len > 0) { - pd->stats.pkt_started = 0; - pd->stats.pkt_ended = 0; - pd->stats.secs_w = 0; - pd->stats.secs_rg = 0; - pd->stats.secs_r = 0; - } - return len; -} -static DEVICE_ATTR_WO(reset); - -static struct attribute *pkt_stat_attrs[] = { - &dev_attr_packets_finished.attr, - &dev_attr_packets_started.attr, - &dev_attr_kb_read.attr, - &dev_attr_kb_written.attr, - &dev_attr_kb_read_gather.attr, - &dev_attr_reset.attr, - NULL, -}; - -static const struct attribute_group pkt_stat_group = { - .name = "stat", - .attrs = pkt_stat_attrs, -}; - -static ssize_t size_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pktcdvd_device *pd = dev_get_drvdata(dev); - int n; - - spin_lock(&pd->lock); - n = sysfs_emit(buf, "%d\n", pd->bio_queue_size); - spin_unlock(&pd->lock); - return n; -} -static DEVICE_ATTR_RO(size); - -static void init_write_congestion_marks(int* lo, int* hi) -{ - if (*hi > 0) { - *hi = max(*hi, 500); - *hi = min(*hi, 1000000); - if (*lo <= 0) - *lo = *hi - 100; - else { - *lo = min(*lo, *hi - 100); - *lo = max(*lo, 100); - } - } else { - *hi = -1; - *lo = -1; - } -} - -static ssize_t congestion_off_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pktcdvd_device *pd = dev_get_drvdata(dev); - int n; - - spin_lock(&pd->lock); - n = sysfs_emit(buf, "%d\n", pd->write_congestion_off); - spin_unlock(&pd->lock); - return n; -} - -static ssize_t congestion_off_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct pktcdvd_device *pd = dev_get_drvdata(dev); - int val, ret; - - ret = kstrtoint(buf, 10, &val); - if (ret) - return ret; - - spin_lock(&pd->lock); - pd->write_congestion_off = val; - init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on); - spin_unlock(&pd->lock); - return len; -} -static DEVICE_ATTR_RW(congestion_off); - -static ssize_t congestion_on_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pktcdvd_device *pd = dev_get_drvdata(dev); - int n; - - spin_lock(&pd->lock); - n = sysfs_emit(buf, "%d\n", pd->write_congestion_on); - spin_unlock(&pd->lock); - return n; -} - -static ssize_t congestion_on_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct pktcdvd_device *pd = dev_get_drvdata(dev); - int val, ret; - - ret = kstrtoint(buf, 10, &val); - if (ret) - return ret; - - spin_lock(&pd->lock); - pd->write_congestion_on = val; - init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on); - spin_unlock(&pd->lock); - return len; -} -static DEVICE_ATTR_RW(congestion_on); - -static struct attribute *pkt_wq_attrs[] = { - &dev_attr_congestion_on.attr, - &dev_attr_congestion_off.attr, - &dev_attr_size.attr, - NULL, -}; - -static const struct attribute_group pkt_wq_group = { - .name = "write_queue", - .attrs = pkt_wq_attrs, -}; - -static const struct attribute_group *pkt_groups[] = { - &pkt_stat_group, - &pkt_wq_group, - NULL, -}; - -static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) -{ - if (class_is_registered(&class_pktcdvd)) { - pd->dev = device_create_with_groups(&class_pktcdvd, NULL, - MKDEV(0, 0), pd, pkt_groups, - "%s", pd->disk->disk_name); - if (IS_ERR(pd->dev)) - pd->dev = NULL; - } -} - -static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) -{ - if (class_is_registered(&class_pktcdvd)) - device_unregister(pd->dev); -} - - -/******************************************************************** - /sys/class/pktcdvd/ - add map block device - remove unmap packet dev - device_map show mappings - *******************************************************************/ - -static ssize_t device_map_show(const struct class *c, const struct class_attribute *attr, - char *data) -{ - int n = 0; - int idx; - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - for (idx = 0; idx < MAX_WRITERS; idx++) { - struct pktcdvd_device *pd = pkt_devs[idx]; - if (!pd) - continue; - n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n", - pd->disk->disk_name, - MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), - MAJOR(file_bdev(pd->bdev_file)->bd_dev), - MINOR(file_bdev(pd->bdev_file)->bd_dev)); - } - mutex_unlock(&ctl_mutex); - return n; -} -static CLASS_ATTR_RO(device_map); - -static ssize_t add_store(const struct class *c, const struct class_attribute *attr, - const char *buf, size_t count) -{ - unsigned int major, minor; - - if (sscanf(buf, "%u:%u", &major, &minor) == 2) { - /* pkt_setup_dev() expects caller to hold reference to self */ - if (!try_module_get(THIS_MODULE)) - return -ENODEV; - - pkt_setup_dev(MKDEV(major, minor), NULL); - - module_put(THIS_MODULE); - - return count; - } - - return -EINVAL; -} -static CLASS_ATTR_WO(add); - -static ssize_t remove_store(const struct class *c, const struct class_attribute *attr, - const char *buf, size_t count) -{ - unsigned int major, minor; - if (sscanf(buf, "%u:%u", &major, &minor) == 2) { - pkt_remove_dev(MKDEV(major, minor)); - return count; - } - return -EINVAL; -} -static CLASS_ATTR_WO(remove); - -static struct attribute *class_pktcdvd_attrs[] = { - &class_attr_add.attr, - &class_attr_remove.attr, - &class_attr_device_map.attr, - NULL, -}; -ATTRIBUTE_GROUPS(class_pktcdvd); - -static struct class class_pktcdvd = { - .name = DRIVER_NAME, - .class_groups = class_pktcdvd_groups, -}; - -static int pkt_sysfs_init(void) -{ - /* - * create control files in sysfs - * /sys/class/pktcdvd/... - */ - return class_register(&class_pktcdvd); -} - -static void pkt_sysfs_cleanup(void) -{ - class_unregister(&class_pktcdvd); -} - -/******************************************************************** - entries in debugfs - - /sys/kernel/debug/pktcdvd[0-7]/ - info - - *******************************************************************/ - -static void pkt_count_states(struct pktcdvd_device *pd, int *states) -{ - struct packet_data *pkt; - int i; - - for (i = 0; i < PACKET_NUM_STATES; i++) - states[i] = 0; - - spin_lock(&pd->cdrw.active_list_lock); - list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { - states[pkt->state]++; - } - spin_unlock(&pd->cdrw.active_list_lock); -} - -static int pkt_seq_show(struct seq_file *m, void *p) -{ - struct pktcdvd_device *pd = m->private; - char *msg; - int states[PACKET_NUM_STATES]; - - seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name, - file_bdev(pd->bdev_file)); - - seq_printf(m, "\nSettings:\n"); - seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); - - if (pd->settings.write_type == 0) - msg = "Packet"; - else - msg = "Unknown"; - seq_printf(m, "\twrite type:\t\t%s\n", msg); - - seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); - seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); - - seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); - - if (pd->settings.block_mode == PACKET_BLOCK_MODE1) - msg = "Mode 1"; - else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) - msg = "Mode 2"; - else - msg = "Unknown"; - seq_printf(m, "\tblock mode:\t\t%s\n", msg); - - seq_printf(m, "\nStatistics:\n"); - seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); - seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); - seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); - seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); - seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); - - seq_printf(m, "\nMisc:\n"); - seq_printf(m, "\treference count:\t%d\n", pd->refcnt); - seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); - seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); - seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); - seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); - seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); - - seq_printf(m, "\nQueue state:\n"); - seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); - seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); - seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", pd->current_sector); - - pkt_count_states(pd, states); - seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", - states[0], states[1], states[2], states[3], states[4], states[5]); - - seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n", - pd->write_congestion_off, - pd->write_congestion_on); - return 0; -} -DEFINE_SHOW_ATTRIBUTE(pkt_seq); - -static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) -{ - if (!pkt_debugfs_root) - return; - pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root); - - pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root, - pd, &pkt_seq_fops); -} - -static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) -{ - if (!pkt_debugfs_root) - return; - debugfs_remove(pd->dfs_f_info); - debugfs_remove(pd->dfs_d_root); - pd->dfs_f_info = NULL; - pd->dfs_d_root = NULL; -} - -static void pkt_debugfs_init(void) -{ - pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL); -} - -static void pkt_debugfs_cleanup(void) -{ - debugfs_remove(pkt_debugfs_root); - pkt_debugfs_root = NULL; -} - -/* ----------------------------------------------------------*/ - - -static void pkt_bio_finished(struct pktcdvd_device *pd) -{ - struct device *ddev = disk_to_dev(pd->disk); - - BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); - if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { - dev_dbg(ddev, "queue empty\n"); - atomic_set(&pd->iosched.attention, 1); - wake_up(&pd->wqueue); - } -} - -/* - * Allocate a packet_data struct - */ -static struct packet_data *pkt_alloc_packet_data(int frames) -{ - int i; - struct packet_data *pkt; - - pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL); - if (!pkt) - goto no_pkt; - - pkt->frames = frames; - pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL); - if (!pkt->w_bio) - goto no_bio; - - for (i = 0; i < frames / FRAMES_PER_PAGE; i++) { - pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); - if (!pkt->pages[i]) - goto no_page; - } - - spin_lock_init(&pkt->lock); - bio_list_init(&pkt->orig_bios); - - for (i = 0; i < frames; i++) { - pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL); - if (!pkt->r_bios[i]) - goto no_rd_bio; - } - - return pkt; - -no_rd_bio: - for (i = 0; i < frames; i++) - kfree(pkt->r_bios[i]); -no_page: - for (i = 0; i < frames / FRAMES_PER_PAGE; i++) - if (pkt->pages[i]) - __free_page(pkt->pages[i]); - kfree(pkt->w_bio); -no_bio: - kfree(pkt); -no_pkt: - return NULL; -} - -/* - * Free a packet_data struct - */ -static void pkt_free_packet_data(struct packet_data *pkt) -{ - int i; - - for (i = 0; i < pkt->frames; i++) - kfree(pkt->r_bios[i]); - for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++) - __free_page(pkt->pages[i]); - kfree(pkt->w_bio); - kfree(pkt); -} - -static void pkt_shrink_pktlist(struct pktcdvd_device *pd) -{ - struct packet_data *pkt, *next; - - BUG_ON(!list_empty(&pd->cdrw.pkt_active_list)); - - list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { - pkt_free_packet_data(pkt); - } - INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); -} - -static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) -{ - struct packet_data *pkt; - - BUG_ON(!list_empty(&pd->cdrw.pkt_free_list)); - - while (nr_packets > 0) { - pkt = pkt_alloc_packet_data(pd->settings.size >> 2); - if (!pkt) { - pkt_shrink_pktlist(pd); - return 0; - } - pkt->id = nr_packets; - pkt->pd = pd; - list_add(&pkt->list, &pd->cdrw.pkt_free_list); - nr_packets--; - } - return 1; -} - -static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) -{ - struct rb_node *n = rb_next(&node->rb_node); - if (!n) - return NULL; - return rb_entry(n, struct pkt_rb_node, rb_node); -} - -static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node) -{ - rb_erase(&node->rb_node, &pd->bio_queue); - mempool_free(node, &pd->rb_pool); - pd->bio_queue_size--; - BUG_ON(pd->bio_queue_size < 0); -} - -/* - * Find the first node in the pd->bio_queue rb tree with a starting sector >= s. - */ -static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s) -{ - struct rb_node *n = pd->bio_queue.rb_node; - struct rb_node *next; - struct pkt_rb_node *tmp; - - if (!n) { - BUG_ON(pd->bio_queue_size > 0); - return NULL; - } - - for (;;) { - tmp = rb_entry(n, struct pkt_rb_node, rb_node); - if (s <= tmp->bio->bi_iter.bi_sector) - next = n->rb_left; - else - next = n->rb_right; - if (!next) - break; - n = next; - } - - if (s > tmp->bio->bi_iter.bi_sector) { - tmp = pkt_rbtree_next(tmp); - if (!tmp) - return NULL; - } - BUG_ON(s > tmp->bio->bi_iter.bi_sector); - return tmp; -} - -/* - * Insert a node into the pd->bio_queue rb tree. - */ -static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node) -{ - struct rb_node **p = &pd->bio_queue.rb_node; - struct rb_node *parent = NULL; - sector_t s = node->bio->bi_iter.bi_sector; - struct pkt_rb_node *tmp; - - while (*p) { - parent = *p; - tmp = rb_entry(parent, struct pkt_rb_node, rb_node); - if (s < tmp->bio->bi_iter.bi_sector) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&node->rb_node, parent, p); - rb_insert_color(&node->rb_node, &pd->bio_queue); - pd->bio_queue_size++; -} - -/* - * Send a packet_command to the underlying block device and - * wait for completion. - */ -static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) -{ - struct request_queue *q = bdev_get_queue(file_bdev(pd->bdev_file)); - struct scsi_cmnd *scmd; - struct request *rq; - int ret = 0; - - rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? - REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); - if (IS_ERR(rq)) - return PTR_ERR(rq); - scmd = blk_mq_rq_to_pdu(rq); - - if (cgc->buflen) { - ret = blk_rq_map_kern(rq, cgc->buffer, cgc->buflen, - GFP_NOIO); - if (ret) - goto out; - } - - scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]); - memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE); - - rq->timeout = 60*HZ; - if (cgc->quiet) - rq->rq_flags |= RQF_QUIET; - - blk_execute_rq(rq, false); - if (scmd->result) - ret = -EIO; -out: - blk_mq_free_request(rq); - return ret; -} - -static const char *sense_key_string(__u8 index) -{ - static const char * const info[] = { - "No sense", "Recovered error", "Not ready", - "Medium error", "Hardware error", "Illegal request", - "Unit attention", "Data protect", "Blank check", - }; - - return index < ARRAY_SIZE(info) ? info[index] : "INVALID"; -} - -/* - * A generic sense dump / resolve mechanism should be implemented across - * all ATAPI + SCSI devices. - */ -static void pkt_dump_sense(struct pktcdvd_device *pd, - struct packet_command *cgc) -{ - struct device *ddev = disk_to_dev(pd->disk); - struct scsi_sense_hdr *sshdr = cgc->sshdr; - - if (sshdr) - dev_err(ddev, "%*ph - sense %02x.%02x.%02x (%s)\n", - CDROM_PACKET_SIZE, cgc->cmd, - sshdr->sense_key, sshdr->asc, sshdr->ascq, - sense_key_string(sshdr->sense_key)); - else - dev_err(ddev, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); -} - -/* - * flush the drive cache to media - */ -static int pkt_flush_cache(struct pktcdvd_device *pd) -{ - struct packet_command cgc; - - init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); - cgc.cmd[0] = GPCMD_FLUSH_CACHE; - cgc.quiet = 1; - - /* - * the IMMED bit -- we default to not setting it, although that - * would allow a much faster close, this is safer - */ -#if 0 - cgc.cmd[1] = 1 << 1; -#endif - return pkt_generic_packet(pd, &cgc); -} - -/* - * speed is given as the normal factor, e.g. 4 for 4x - */ -static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, - unsigned write_speed, unsigned read_speed) -{ - struct packet_command cgc; - struct scsi_sense_hdr sshdr; - int ret; - - init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); - cgc.sshdr = &sshdr; - cgc.cmd[0] = GPCMD_SET_SPEED; - put_unaligned_be16(read_speed, &cgc.cmd[2]); - put_unaligned_be16(write_speed, &cgc.cmd[4]); - - ret = pkt_generic_packet(pd, &cgc); - if (ret) - pkt_dump_sense(pd, &cgc); - - return ret; -} - -/* - * Queue a bio for processing by the low-level CD device. Must be called - * from process context. - */ -static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) -{ - /* - * Some CDRW drives can not handle writes larger than one packet, - * even if the size is a multiple of the packet size. - */ - bio->bi_opf |= REQ_NOMERGE; - - spin_lock(&pd->iosched.lock); - if (bio_data_dir(bio) == READ) - bio_list_add(&pd->iosched.read_queue, bio); - else - bio_list_add(&pd->iosched.write_queue, bio); - spin_unlock(&pd->iosched.lock); - - atomic_set(&pd->iosched.attention, 1); - wake_up(&pd->wqueue); -} - -/* - * Process the queued read/write requests. This function handles special - * requirements for CDRW drives: - * - A cache flush command must be inserted before a read request if the - * previous request was a write. - * - Switching between reading and writing is slow, so don't do it more often - * than necessary. - * - Optimize for throughput at the expense of latency. This means that streaming - * writes will never be interrupted by a read, but if the drive has to seek - * before the next write, switch to reading instead if there are any pending - * read requests. - * - Set the read speed according to current usage pattern. When only reading - * from the device, it's best to use the highest possible read speed, but - * when switching often between reading and writing, it's better to have the - * same read and write speeds. - */ -static void pkt_iosched_process_queue(struct pktcdvd_device *pd) -{ - struct device *ddev = disk_to_dev(pd->disk); - - if (atomic_read(&pd->iosched.attention) == 0) - return; - atomic_set(&pd->iosched.attention, 0); - - for (;;) { - struct bio *bio; - int reads_queued, writes_queued; - - spin_lock(&pd->iosched.lock); - reads_queued = !bio_list_empty(&pd->iosched.read_queue); - writes_queued = !bio_list_empty(&pd->iosched.write_queue); - spin_unlock(&pd->iosched.lock); - - if (!reads_queued && !writes_queued) - break; - - if (pd->iosched.writing) { - int need_write_seek = 1; - spin_lock(&pd->iosched.lock); - bio = bio_list_peek(&pd->iosched.write_queue); - spin_unlock(&pd->iosched.lock); - if (bio && (bio->bi_iter.bi_sector == - pd->iosched.last_write)) - need_write_seek = 0; - if (need_write_seek && reads_queued) { - if (atomic_read(&pd->cdrw.pending_bios) > 0) { - dev_dbg(ddev, "write, waiting\n"); - break; - } - pkt_flush_cache(pd); - pd->iosched.writing = 0; - } - } else { - if (!reads_queued && writes_queued) { - if (atomic_read(&pd->cdrw.pending_bios) > 0) { - dev_dbg(ddev, "read, waiting\n"); - break; - } - pd->iosched.writing = 1; - } - } - - spin_lock(&pd->iosched.lock); - if (pd->iosched.writing) - bio = bio_list_pop(&pd->iosched.write_queue); - else - bio = bio_list_pop(&pd->iosched.read_queue); - spin_unlock(&pd->iosched.lock); - - if (!bio) - continue; - - if (bio_data_dir(bio) == READ) - pd->iosched.successive_reads += - bio->bi_iter.bi_size >> 10; - else { - pd->iosched.successive_reads = 0; - pd->iosched.last_write = bio_end_sector(bio); - } - if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { - if (pd->read_speed == pd->write_speed) { - pd->read_speed = MAX_SPEED; - pkt_set_speed(pd, pd->write_speed, pd->read_speed); - } - } else { - if (pd->read_speed != pd->write_speed) { - pd->read_speed = pd->write_speed; - pkt_set_speed(pd, pd->write_speed, pd->read_speed); - } - } - - atomic_inc(&pd->cdrw.pending_bios); - submit_bio_noacct(bio); - } -} - -/* - * Special care is needed if the underlying block device has a small - * max_phys_segments value. - */ -static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) -{ - struct device *ddev = disk_to_dev(pd->disk); - - if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) { - /* - * The cdrom device can handle one segment/frame - */ - clear_bit(PACKET_MERGE_SEGS, &pd->flags); - return 0; - } - - if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) { - /* - * We can handle this case at the expense of some extra memory - * copies during write operations - */ - set_bit(PACKET_MERGE_SEGS, &pd->flags); - return 0; - } - - dev_err(ddev, "cdrom max_phys_segments too small\n"); - return -EIO; -} - -static void pkt_end_io_read(struct bio *bio) -{ - struct packet_data *pkt = bio->bi_private; - struct pktcdvd_device *pd = pkt->pd; - BUG_ON(!pd); - - dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n", - bio, pkt->sector, bio->bi_iter.bi_sector, bio->bi_status); - - if (bio->bi_status) - atomic_inc(&pkt->io_errors); - bio_uninit(bio); - if (atomic_dec_and_test(&pkt->io_wait)) { - atomic_inc(&pkt->run_sm); - wake_up(&pd->wqueue); - } - pkt_bio_finished(pd); -} - -static void pkt_end_io_packet_write(struct bio *bio) -{ - struct packet_data *pkt = bio->bi_private; - struct pktcdvd_device *pd = pkt->pd; - BUG_ON(!pd); - - dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status); - - pd->stats.pkt_ended++; - - bio_uninit(bio); - pkt_bio_finished(pd); - atomic_dec(&pkt->io_wait); - atomic_inc(&pkt->run_sm); - wake_up(&pd->wqueue); -} - -/* - * Schedule reads for the holes in a packet - */ -static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) -{ - struct device *ddev = disk_to_dev(pd->disk); - int frames_read = 0; - struct bio *bio; - int f; - char written[PACKET_MAX_SIZE]; - - BUG_ON(bio_list_empty(&pkt->orig_bios)); - - atomic_set(&pkt->io_wait, 0); - atomic_set(&pkt->io_errors, 0); - - /* - * Figure out which frames we need to read before we can write. - */ - memset(written, 0, sizeof(written)); - spin_lock(&pkt->lock); - bio_list_for_each(bio, &pkt->orig_bios) { - int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / - (CD_FRAMESIZE >> 9); - int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE; - pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); - BUG_ON(first_frame < 0); - BUG_ON(first_frame + num_frames > pkt->frames); - for (f = first_frame; f < first_frame + num_frames; f++) - written[f] = 1; - } - spin_unlock(&pkt->lock); - - if (pkt->cache_valid) { - dev_dbg(ddev, "zone %llx cached\n", pkt->sector); - goto out_account; - } - - /* - * Schedule reads for missing parts of the packet. - */ - for (f = 0; f < pkt->frames; f++) { - int p, offset; - - if (written[f]) - continue; - - bio = pkt->r_bios[f]; - bio_init(bio, file_bdev(pd->bdev_file), bio->bi_inline_vecs, 1, - REQ_OP_READ); - bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); - bio->bi_end_io = pkt_end_io_read; - bio->bi_private = pkt; - - p = (f * CD_FRAMESIZE) / PAGE_SIZE; - offset = (f * CD_FRAMESIZE) % PAGE_SIZE; - dev_dbg(ddev, "Adding frame %d, page:%p offs:%d\n", f, - pkt->pages[p], offset); - if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) - BUG(); - - atomic_inc(&pkt->io_wait); - pkt_queue_bio(pd, bio); - frames_read++; - } - -out_account: - dev_dbg(ddev, "need %d frames for zone %llx\n", frames_read, pkt->sector); - pd->stats.pkt_started++; - pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); -} - -/* - * Find a packet matching zone, or the least recently used packet if - * there is no match. - */ -static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone) -{ - struct packet_data *pkt; - - list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) { - if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) { - list_del_init(&pkt->list); - if (pkt->sector != zone) - pkt->cache_valid = 0; - return pkt; - } - } - BUG(); - return NULL; -} - -static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) -{ - if (pkt->cache_valid) { - list_add(&pkt->list, &pd->cdrw.pkt_free_list); - } else { - list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list); - } -} - -static inline void pkt_set_state(struct device *ddev, struct packet_data *pkt, - enum packet_data_state state) -{ - static const char *state_name[] = { - "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED" - }; - enum packet_data_state old_state = pkt->state; - - dev_dbg(ddev, "pkt %2d : s=%6llx %s -> %s\n", - pkt->id, pkt->sector, state_name[old_state], state_name[state]); - - pkt->state = state; -} - -/* - * Scan the work queue to see if we can start a new packet. - * returns non-zero if any work was done. - */ -static int pkt_handle_queue(struct pktcdvd_device *pd) -{ - struct device *ddev = disk_to_dev(pd->disk); - struct packet_data *pkt, *p; - struct bio *bio = NULL; - sector_t zone = 0; /* Suppress gcc warning */ - struct pkt_rb_node *node, *first_node; - struct rb_node *n; - - atomic_set(&pd->scan_queue, 0); - - if (list_empty(&pd->cdrw.pkt_free_list)) { - dev_dbg(ddev, "no pkt\n"); - return 0; - } - - /* - * Try to find a zone we are not already working on. - */ - spin_lock(&pd->lock); - first_node = pkt_rbtree_find(pd, pd->current_sector); - if (!first_node) { - n = rb_first(&pd->bio_queue); - if (n) - first_node = rb_entry(n, struct pkt_rb_node, rb_node); - } - node = first_node; - while (node) { - bio = node->bio; - zone = get_zone(bio->bi_iter.bi_sector, pd); - list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { - if (p->sector == zone) { - bio = NULL; - goto try_next_bio; - } - } - break; -try_next_bio: - node = pkt_rbtree_next(node); - if (!node) { - n = rb_first(&pd->bio_queue); - if (n) - node = rb_entry(n, struct pkt_rb_node, rb_node); - } - if (node == first_node) - node = NULL; - } - spin_unlock(&pd->lock); - if (!bio) { - dev_dbg(ddev, "no bio\n"); - return 0; - } - - pkt = pkt_get_packet_data(pd, zone); - - pd->current_sector = zone + pd->settings.size; - pkt->sector = zone; - BUG_ON(pkt->frames != pd->settings.size >> 2); - pkt->write_size = 0; - - /* - * Scan work queue for bios in the same zone and link them - * to this packet. - */ - spin_lock(&pd->lock); - dev_dbg(ddev, "looking for zone %llx\n", zone); - while ((node = pkt_rbtree_find(pd, zone)) != NULL) { - sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd); - - bio = node->bio; - dev_dbg(ddev, "found zone=%llx\n", tmp); - if (tmp != zone) - break; - pkt_rbtree_erase(pd, node); - spin_lock(&pkt->lock); - bio_list_add(&pkt->orig_bios, bio); - pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE; - spin_unlock(&pkt->lock); - } - /* check write congestion marks, and if bio_queue_size is - * below, wake up any waiters - */ - if (pd->congested && - pd->bio_queue_size <= pd->write_congestion_off) { - pd->congested = false; - wake_up_var(&pd->congested); - } - spin_unlock(&pd->lock); - - pkt->sleep_time = max(PACKET_WAIT_TIME, 1); - pkt_set_state(ddev, pkt, PACKET_WAITING_STATE); - atomic_set(&pkt->run_sm, 1); - - spin_lock(&pd->cdrw.active_list_lock); - list_add(&pkt->list, &pd->cdrw.pkt_active_list); - spin_unlock(&pd->cdrw.active_list_lock); - - return 1; -} - -/** - * bio_list_copy_data - copy contents of data buffers from one chain of bios to - * another - * @src: source bio list - * @dst: destination bio list - * - * Stops when it reaches the end of either the @src list or @dst list - that is, - * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of - * bios). - */ -static void bio_list_copy_data(struct bio *dst, struct bio *src) -{ - struct bvec_iter src_iter = src->bi_iter; - struct bvec_iter dst_iter = dst->bi_iter; - - while (1) { - if (!src_iter.bi_size) { - src = src->bi_next; - if (!src) - break; - - src_iter = src->bi_iter; - } - - if (!dst_iter.bi_size) { - dst = dst->bi_next; - if (!dst) - break; - - dst_iter = dst->bi_iter; - } - - bio_copy_data_iter(dst, &dst_iter, src, &src_iter); - } -} - -/* - * Assemble a bio to write one packet and queue the bio for processing - * by the underlying block device. - */ -static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) -{ - struct device *ddev = disk_to_dev(pd->disk); - int f; - - bio_init(pkt->w_bio, file_bdev(pd->bdev_file), pkt->w_bio->bi_inline_vecs, - pkt->frames, REQ_OP_WRITE); - pkt->w_bio->bi_iter.bi_sector = pkt->sector; - pkt->w_bio->bi_end_io = pkt_end_io_packet_write; - pkt->w_bio->bi_private = pkt; - - /* XXX: locking? */ - for (f = 0; f < pkt->frames; f++) { - struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; - unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE; - - if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset)) - BUG(); - } - dev_dbg(ddev, "vcnt=%d\n", pkt->w_bio->bi_vcnt); - - /* - * Fill-in bvec with data from orig_bios. - */ - spin_lock(&pkt->lock); - bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head); - - pkt_set_state(ddev, pkt, PACKET_WRITE_WAIT_STATE); - spin_unlock(&pkt->lock); - - dev_dbg(ddev, "Writing %d frames for zone %llx\n", pkt->write_size, pkt->sector); - - if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) - pkt->cache_valid = 1; - else - pkt->cache_valid = 0; - - /* Start the write request */ - atomic_set(&pkt->io_wait, 1); - pkt_queue_bio(pd, pkt->w_bio); -} - -static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status) -{ - struct bio *bio; - - if (status) - pkt->cache_valid = 0; - - /* Finish all bios corresponding to this packet */ - while ((bio = bio_list_pop(&pkt->orig_bios))) { - bio->bi_status = status; - bio_endio(bio); - } -} - -static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) -{ - struct device *ddev = disk_to_dev(pd->disk); - - dev_dbg(ddev, "pkt %d\n", pkt->id); - - for (;;) { - switch (pkt->state) { - case PACKET_WAITING_STATE: - if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0)) - return; - - pkt->sleep_time = 0; - pkt_gather_data(pd, pkt); - pkt_set_state(ddev, pkt, PACKET_READ_WAIT_STATE); - break; - - case PACKET_READ_WAIT_STATE: - if (atomic_read(&pkt->io_wait) > 0) - return; - - if (atomic_read(&pkt->io_errors) > 0) { - pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE); - } else { - pkt_start_write(pd, pkt); - } - break; - - case PACKET_WRITE_WAIT_STATE: - if (atomic_read(&pkt->io_wait) > 0) - return; - - if (!pkt->w_bio->bi_status) { - pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE); - } else { - pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE); - } - break; - - case PACKET_RECOVERY_STATE: - dev_dbg(ddev, "No recovery possible\n"); - pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE); - break; - - case PACKET_FINISHED_STATE: - pkt_finish_packet(pkt, pkt->w_bio->bi_status); - return; - - default: - BUG(); - break; - } - } -} - -static void pkt_handle_packets(struct pktcdvd_device *pd) -{ - struct device *ddev = disk_to_dev(pd->disk); - struct packet_data *pkt, *next; - - /* - * Run state machine for active packets - */ - list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { - if (atomic_read(&pkt->run_sm) > 0) { - atomic_set(&pkt->run_sm, 0); - pkt_run_state_machine(pd, pkt); - } - } - - /* - * Move no longer active packets to the free list - */ - spin_lock(&pd->cdrw.active_list_lock); - list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) { - if (pkt->state == PACKET_FINISHED_STATE) { - list_del(&pkt->list); - pkt_put_packet_data(pd, pkt); - pkt_set_state(ddev, pkt, PACKET_IDLE_STATE); - atomic_set(&pd->scan_queue, 1); - } - } - spin_unlock(&pd->cdrw.active_list_lock); -} - -/* - * kcdrwd is woken up when writes have been queued for one of our - * registered devices - */ -static int kcdrwd(void *foobar) -{ - struct pktcdvd_device *pd = foobar; - struct device *ddev = disk_to_dev(pd->disk); - struct packet_data *pkt; - int states[PACKET_NUM_STATES]; - long min_sleep_time, residue; - - set_user_nice(current, MIN_NICE); - set_freezable(); - - for (;;) { - DECLARE_WAITQUEUE(wait, current); - - /* - * Wait until there is something to do - */ - add_wait_queue(&pd->wqueue, &wait); - for (;;) { - set_current_state(TASK_INTERRUPTIBLE); - - /* Check if we need to run pkt_handle_queue */ - if (atomic_read(&pd->scan_queue) > 0) - goto work_to_do; - - /* Check if we need to run the state machine for some packet */ - list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { - if (atomic_read(&pkt->run_sm) > 0) - goto work_to_do; - } - - /* Check if we need to process the iosched queues */ - if (atomic_read(&pd->iosched.attention) != 0) - goto work_to_do; - - /* Otherwise, go to sleep */ - pkt_count_states(pd, states); - dev_dbg(ddev, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", - states[0], states[1], states[2], states[3], states[4], states[5]); - - min_sleep_time = MAX_SCHEDULE_TIMEOUT; - list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { - if (pkt->sleep_time && pkt->sleep_time < min_sleep_time) - min_sleep_time = pkt->sleep_time; - } - - dev_dbg(ddev, "sleeping\n"); - residue = schedule_timeout(min_sleep_time); - dev_dbg(ddev, "wake up\n"); - - /* make swsusp happy with our thread */ - try_to_freeze(); - - list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { - if (!pkt->sleep_time) - continue; - pkt->sleep_time -= min_sleep_time - residue; - if (pkt->sleep_time <= 0) { - pkt->sleep_time = 0; - atomic_inc(&pkt->run_sm); - } - } - - if (kthread_should_stop()) - break; - } -work_to_do: - set_current_state(TASK_RUNNING); - remove_wait_queue(&pd->wqueue, &wait); - - if (kthread_should_stop()) - break; - - /* - * if pkt_handle_queue returns true, we can queue - * another request. - */ - while (pkt_handle_queue(pd)) - ; - - /* - * Handle packet state machine - */ - pkt_handle_packets(pd); - - /* - * Handle iosched queues - */ - pkt_iosched_process_queue(pd); - } - - return 0; -} - -static void pkt_print_settings(struct pktcdvd_device *pd) -{ - dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n", - pd->settings.fp ? "Fixed" : "Variable", - pd->settings.size >> 2, - pd->settings.block_mode == 8 ? '1' : '2'); -} - -static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control) -{ - memset(cgc->cmd, 0, sizeof(cgc->cmd)); - - cgc->cmd[0] = GPCMD_MODE_SENSE_10; - cgc->cmd[2] = page_code | (page_control << 6); - put_unaligned_be16(cgc->buflen, &cgc->cmd[7]); - cgc->data_direction = CGC_DATA_READ; - return pkt_generic_packet(pd, cgc); -} - -static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc) -{ - memset(cgc->cmd, 0, sizeof(cgc->cmd)); - memset(cgc->buffer, 0, 2); - cgc->cmd[0] = GPCMD_MODE_SELECT_10; - cgc->cmd[1] = 0x10; /* PF */ - put_unaligned_be16(cgc->buflen, &cgc->cmd[7]); - cgc->data_direction = CGC_DATA_WRITE; - return pkt_generic_packet(pd, cgc); -} - -static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di) -{ - struct packet_command cgc; - int ret; - - /* set up command and get the disc info */ - init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ); - cgc.cmd[0] = GPCMD_READ_DISC_INFO; - cgc.cmd[8] = cgc.buflen = 2; - cgc.quiet = 1; - - ret = pkt_generic_packet(pd, &cgc); - if (ret) - return ret; - - /* not all drives have the same disc_info length, so requeue - * packet with the length the drive tells us it can supply - */ - cgc.buflen = be16_to_cpu(di->disc_information_length) + - sizeof(di->disc_information_length); - - if (cgc.buflen > sizeof(disc_information)) - cgc.buflen = sizeof(disc_information); - - cgc.cmd[8] = cgc.buflen; - return pkt_generic_packet(pd, &cgc); -} - -static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti) -{ - struct packet_command cgc; - int ret; - - init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); - cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO; - cgc.cmd[1] = type & 3; - put_unaligned_be16(track, &cgc.cmd[4]); - cgc.cmd[8] = 8; - cgc.quiet = 1; - - ret = pkt_generic_packet(pd, &cgc); - if (ret) - return ret; - - cgc.buflen = be16_to_cpu(ti->track_information_length) + - sizeof(ti->track_information_length); - - if (cgc.buflen > sizeof(track_information)) - cgc.buflen = sizeof(track_information); - - cgc.cmd[8] = cgc.buflen; - return pkt_generic_packet(pd, &cgc); -} - -static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, - long *last_written) -{ - disc_information di; - track_information ti; - __u32 last_track; - int ret; - - ret = pkt_get_disc_info(pd, &di); - if (ret) - return ret; - - last_track = (di.last_track_msb << 8) | di.last_track_lsb; - ret = pkt_get_track_info(pd, last_track, 1, &ti); - if (ret) - return ret; - - /* if this track is blank, try the previous. */ - if (ti.blank) { - last_track--; - ret = pkt_get_track_info(pd, last_track, 1, &ti); - if (ret) - return ret; - } - - /* if last recorded field is valid, return it. */ - if (ti.lra_v) { - *last_written = be32_to_cpu(ti.last_rec_address); - } else { - /* make it up instead */ - *last_written = be32_to_cpu(ti.track_start) + - be32_to_cpu(ti.track_size); - if (ti.free_blocks) - *last_written -= (be32_to_cpu(ti.free_blocks) + 7); - } - return 0; -} - -/* - * write mode select package based on pd->settings - */ -static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) -{ - struct device *ddev = disk_to_dev(pd->disk); - struct packet_command cgc; - struct scsi_sense_hdr sshdr; - write_param_page *wp; - char buffer[128]; - int ret, size; - - /* doesn't apply to DVD+RW or DVD-RAM */ - if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12)) - return 0; - - memset(buffer, 0, sizeof(buffer)); - init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ); - cgc.sshdr = &sshdr; - ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); - if (ret) { - pkt_dump_sense(pd, &cgc); - return ret; - } - - size = 2 + get_unaligned_be16(&buffer[0]); - pd->mode_offset = get_unaligned_be16(&buffer[6]); - if (size > sizeof(buffer)) - size = sizeof(buffer); - - /* - * now get it all - */ - init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ); - cgc.sshdr = &sshdr; - ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); - if (ret) { - pkt_dump_sense(pd, &cgc); - return ret; - } - - /* - * write page is offset header + block descriptor length - */ - wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset]; - - wp->fp = pd->settings.fp; - wp->track_mode = pd->settings.track_mode; - wp->write_type = pd->settings.write_type; - wp->data_block_type = pd->settings.block_mode; - - wp->multi_session = 0; - -#ifdef PACKET_USE_LS - wp->link_size = 7; - wp->ls_v = 1; -#endif - - if (wp->data_block_type == PACKET_BLOCK_MODE1) { - wp->session_format = 0; - wp->subhdr2 = 0x20; - } else if (wp->data_block_type == PACKET_BLOCK_MODE2) { - wp->session_format = 0x20; - wp->subhdr2 = 8; -#if 0 - wp->mcn[0] = 0x80; - memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1); -#endif - } else { - /* - * paranoia - */ - dev_err(ddev, "write mode wrong %d\n", wp->data_block_type); - return 1; - } - wp->packet_size = cpu_to_be32(pd->settings.size >> 2); - - cgc.buflen = cgc.cmd[8] = size; - ret = pkt_mode_select(pd, &cgc); - if (ret) { - pkt_dump_sense(pd, &cgc); - return ret; - } - - pkt_print_settings(pd); - return 0; -} - -/* - * 1 -- we can write to this track, 0 -- we can't - */ -static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) -{ - struct device *ddev = disk_to_dev(pd->disk); - - switch (pd->mmc3_profile) { - case 0x1a: /* DVD+RW */ - case 0x12: /* DVD-RAM */ - /* The track is always writable on DVD+RW/DVD-RAM */ - return 1; - default: - break; - } - - if (!ti->packet || !ti->fp) - return 0; - - /* - * "good" settings as per Mt Fuji. - */ - if (ti->rt == 0 && ti->blank == 0) - return 1; - - if (ti->rt == 0 && ti->blank == 1) - return 1; - - if (ti->rt == 1 && ti->blank == 0) - return 1; - - dev_err(ddev, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); - return 0; -} - -/* - * 1 -- we can write to this disc, 0 -- we can't - */ -static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) -{ - struct device *ddev = disk_to_dev(pd->disk); - - switch (pd->mmc3_profile) { - case 0x0a: /* CD-RW */ - case 0xffff: /* MMC3 not supported */ - break; - case 0x1a: /* DVD+RW */ - case 0x13: /* DVD-RW */ - case 0x12: /* DVD-RAM */ - return 1; - default: - dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile); - return 0; - } - - /* - * for disc type 0xff we should probably reserve a new track. - * but i'm not sure, should we leave this to user apps? probably. - */ - if (di->disc_type == 0xff) { - dev_notice(ddev, "unknown disc - no track?\n"); - return 0; - } - - if (di->disc_type != 0x20 && di->disc_type != 0) { - dev_err(ddev, "wrong disc type (%x)\n", di->disc_type); - return 0; - } - - if (di->erasable == 0) { - dev_err(ddev, "disc not erasable\n"); - return 0; - } - - if (di->border_status == PACKET_SESSION_RESERVED) { - dev_err(ddev, "can't write to last track (reserved)\n"); - return 0; - } - - return 1; -} - -static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) -{ - struct device *ddev = disk_to_dev(pd->disk); - struct packet_command cgc; - unsigned char buf[12]; - disc_information di; - track_information ti; - int ret, track; - - init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); - cgc.cmd[0] = GPCMD_GET_CONFIGURATION; - cgc.cmd[8] = 8; - ret = pkt_generic_packet(pd, &cgc); - pd->mmc3_profile = ret ? 0xffff : get_unaligned_be16(&buf[6]); - - memset(&di, 0, sizeof(disc_information)); - memset(&ti, 0, sizeof(track_information)); - - ret = pkt_get_disc_info(pd, &di); - if (ret) { - dev_err(ddev, "failed get_disc\n"); - return ret; - } - - if (!pkt_writable_disc(pd, &di)) - return -EROFS; - - pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; - - track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ - ret = pkt_get_track_info(pd, track, 1, &ti); - if (ret) { - dev_err(ddev, "failed get_track\n"); - return ret; - } - - if (!pkt_writable_track(pd, &ti)) { - dev_err(ddev, "can't write to this track\n"); - return -EROFS; - } - - /* - * we keep packet size in 512 byte units, makes it easier to - * deal with request calculations. - */ - pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; - if (pd->settings.size == 0) { - dev_notice(ddev, "detected zero packet size!\n"); - return -ENXIO; - } - if (pd->settings.size > PACKET_MAX_SECTORS) { - dev_err(ddev, "packet size is too big\n"); - return -EROFS; - } - pd->settings.fp = ti.fp; - pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); - - if (ti.nwa_v) { - pd->nwa = be32_to_cpu(ti.next_writable); - set_bit(PACKET_NWA_VALID, &pd->flags); - } - - /* - * in theory we could use lra on -RW media as well and just zero - * blocks that haven't been written yet, but in practice that - * is just a no-go. we'll use that for -R, naturally. - */ - if (ti.lra_v) { - pd->lra = be32_to_cpu(ti.last_rec_address); - set_bit(PACKET_LRA_VALID, &pd->flags); - } else { - pd->lra = 0xffffffff; - set_bit(PACKET_LRA_VALID, &pd->flags); - } - - /* - * fine for now - */ - pd->settings.link_loss = 7; - pd->settings.write_type = 0; /* packet */ - pd->settings.track_mode = ti.track_mode; - - /* - * mode1 or mode2 disc - */ - switch (ti.data_mode) { - case PACKET_MODE1: - pd->settings.block_mode = PACKET_BLOCK_MODE1; - break; - case PACKET_MODE2: - pd->settings.block_mode = PACKET_BLOCK_MODE2; - break; - default: - dev_err(ddev, "unknown data mode\n"); - return -EROFS; - } - return 0; -} - -/* - * enable/disable write caching on drive - */ -static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd) -{ - struct device *ddev = disk_to_dev(pd->disk); - struct packet_command cgc; - struct scsi_sense_hdr sshdr; - unsigned char buf[64]; - bool set = IS_ENABLED(CONFIG_CDROM_PKTCDVD_WCACHE); - int ret; - - init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); - cgc.sshdr = &sshdr; - cgc.buflen = pd->mode_offset + 12; - - /* - * caching mode page might not be there, so quiet this command - */ - cgc.quiet = 1; - - ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0); - if (ret) - return ret; - - /* - * use drive write caching -- we need deferred error handling to be - * able to successfully recover with this option (drive will return good - * status as soon as the cdb is validated). - */ - buf[pd->mode_offset + 10] |= (set << 2); - - cgc.buflen = cgc.cmd[8] = 2 + get_unaligned_be16(&buf[0]); - ret = pkt_mode_select(pd, &cgc); - if (ret) { - dev_err(ddev, "write caching control failed\n"); - pkt_dump_sense(pd, &cgc); - } else if (!ret && set) - dev_notice(ddev, "enabled write caching\n"); - return ret; -} - -static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) -{ - struct packet_command cgc; - - init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); - cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; - cgc.cmd[4] = lockflag ? 1 : 0; - return pkt_generic_packet(pd, &cgc); -} - -/* - * Returns drive maximum write speed - */ -static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, - unsigned *write_speed) -{ - struct packet_command cgc; - struct scsi_sense_hdr sshdr; - unsigned char buf[256+18]; - unsigned char *cap_buf; - int ret, offset; - - cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; - init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN); - cgc.sshdr = &sshdr; - - ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); - if (ret) { - cgc.buflen = pd->mode_offset + cap_buf[1] + 2 + - sizeof(struct mode_page_header); - ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); - if (ret) { - pkt_dump_sense(pd, &cgc); - return ret; - } - } - - offset = 20; /* Obsoleted field, used by older drives */ - if (cap_buf[1] >= 28) - offset = 28; /* Current write speed selected */ - if (cap_buf[1] >= 30) { - /* If the drive reports at least one "Logical Unit Write - * Speed Performance Descriptor Block", use the information - * in the first block. (contains the highest speed) - */ - int num_spdb = get_unaligned_be16(&cap_buf[30]); - if (num_spdb > 0) - offset = 34; - } - - *write_speed = get_unaligned_be16(&cap_buf[offset]); - return 0; -} - -/* These tables from cdrecord - I don't have orange book */ -/* standard speed CD-RW (1-4x) */ -static char clv_to_speed[16] = { - /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ - 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 -}; -/* high speed CD-RW (-10x) */ -static char hs_clv_to_speed[16] = { - /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ - 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 -}; -/* ultra high speed CD-RW */ -static char us_clv_to_speed[16] = { - /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ - 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0 -}; - -/* - * reads the maximum media speed from ATIP - */ -static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, - unsigned *speed) -{ - struct device *ddev = disk_to_dev(pd->disk); - struct packet_command cgc; - struct scsi_sense_hdr sshdr; - unsigned char buf[64]; - unsigned int size, st, sp; - int ret; - - init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ); - cgc.sshdr = &sshdr; - cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; - cgc.cmd[1] = 2; - cgc.cmd[2] = 4; /* READ ATIP */ - cgc.cmd[8] = 2; - ret = pkt_generic_packet(pd, &cgc); - if (ret) { - pkt_dump_sense(pd, &cgc); - return ret; - } - size = 2 + get_unaligned_be16(&buf[0]); - if (size > sizeof(buf)) - size = sizeof(buf); - - init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); - cgc.sshdr = &sshdr; - cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; - cgc.cmd[1] = 2; - cgc.cmd[2] = 4; - cgc.cmd[8] = size; - ret = pkt_generic_packet(pd, &cgc); - if (ret) { - pkt_dump_sense(pd, &cgc); - return ret; - } - - if (!(buf[6] & 0x40)) { - dev_notice(ddev, "disc type is not CD-RW\n"); - return 1; - } - if (!(buf[6] & 0x4)) { - dev_notice(ddev, "A1 values on media are not valid, maybe not CDRW?\n"); - return 1; - } - - st = (buf[6] >> 3) & 0x7; /* disc sub-type */ - - sp = buf[16] & 0xf; /* max speed from ATIP A1 field */ - - /* Info from cdrecord */ - switch (st) { - case 0: /* standard speed */ - *speed = clv_to_speed[sp]; - break; - case 1: /* high speed */ - *speed = hs_clv_to_speed[sp]; - break; - case 2: /* ultra high speed */ - *speed = us_clv_to_speed[sp]; - break; - default: - dev_notice(ddev, "unknown disc sub-type %d\n", st); - return 1; - } - if (*speed) { - dev_info(ddev, "maximum media speed: %d\n", *speed); - return 0; - } else { - dev_notice(ddev, "unknown speed %d for sub-type %d\n", sp, st); - return 1; - } -} - -static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) -{ - struct device *ddev = disk_to_dev(pd->disk); - struct packet_command cgc; - struct scsi_sense_hdr sshdr; - int ret; - - dev_dbg(ddev, "Performing OPC\n"); - - init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); - cgc.sshdr = &sshdr; - cgc.timeout = 60*HZ; - cgc.cmd[0] = GPCMD_SEND_OPC; - cgc.cmd[1] = 1; - ret = pkt_generic_packet(pd, &cgc); - if (ret) - pkt_dump_sense(pd, &cgc); - return ret; -} - -static int pkt_open_write(struct pktcdvd_device *pd) -{ - struct device *ddev = disk_to_dev(pd->disk); - int ret; - unsigned int write_speed, media_write_speed, read_speed; - - ret = pkt_probe_settings(pd); - if (ret) { - dev_dbg(ddev, "failed probe\n"); - return ret; - } - - ret = pkt_set_write_settings(pd); - if (ret) { - dev_notice(ddev, "failed saving write settings\n"); - return -EIO; - } - - pkt_write_caching(pd); - - ret = pkt_get_max_speed(pd, &write_speed); - if (ret) - write_speed = 16 * 177; - switch (pd->mmc3_profile) { - case 0x13: /* DVD-RW */ - case 0x1a: /* DVD+RW */ - case 0x12: /* DVD-RAM */ - dev_notice(ddev, "write speed %ukB/s\n", write_speed); - break; - default: - ret = pkt_media_speed(pd, &media_write_speed); - if (ret) - media_write_speed = 16; - write_speed = min(write_speed, media_write_speed * 177); - dev_notice(ddev, "write speed %ux\n", write_speed / 176); - break; - } - read_speed = write_speed; - - ret = pkt_set_speed(pd, write_speed, read_speed); - if (ret) { - dev_notice(ddev, "couldn't set write speed\n"); - return -EIO; - } - pd->write_speed = write_speed; - pd->read_speed = read_speed; - - ret = pkt_perform_opc(pd); - if (ret) - dev_notice(ddev, "Optimum Power Calibration failed\n"); - - return 0; -} - -/* - * called at open time. - */ -static int pkt_open_dev(struct pktcdvd_device *pd, bool write) -{ - struct device *ddev = disk_to_dev(pd->disk); - int ret; - long lba; - struct request_queue *q; - struct file *bdev_file; - - /* - * We need to re-open the cdrom device without O_NONBLOCK to be able - * to read/write from/to it. It is already opened in O_NONBLOCK mode - * so open should not fail. - */ - bdev_file = bdev_file_open_by_dev(file_bdev(pd->bdev_file)->bd_dev, - BLK_OPEN_READ, pd, NULL); - if (IS_ERR(bdev_file)) { - ret = PTR_ERR(bdev_file); - goto out; - } - pd->f_open_bdev = bdev_file; - - ret = pkt_get_last_written(pd, &lba); - if (ret) { - dev_err(ddev, "pkt_get_last_written failed\n"); - goto out_putdev; - } - - set_capacity(pd->disk, lba << 2); - set_capacity_and_notify(file_bdev(pd->bdev_file)->bd_disk, lba << 2); - - q = bdev_get_queue(file_bdev(pd->bdev_file)); - if (write) { - ret = pkt_open_write(pd); - if (ret) - goto out_putdev; - set_bit(PACKET_WRITABLE, &pd->flags); - } else { - pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); - clear_bit(PACKET_WRITABLE, &pd->flags); - } - - ret = pkt_set_segment_merging(pd, q); - if (ret) - goto out_putdev; - - if (write) { - if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { - dev_err(ddev, "not enough memory for buffers\n"); - ret = -ENOMEM; - goto out_putdev; - } - dev_info(ddev, "%lukB available on disc\n", lba << 1); - } - set_blocksize(bdev_file, CD_FRAMESIZE); - - return 0; - -out_putdev: - fput(bdev_file); -out: - return ret; -} - -/* - * called when the device is closed. makes sure that the device flushes - * the internal cache before we close. - */ -static void pkt_release_dev(struct pktcdvd_device *pd, int flush) -{ - struct device *ddev = disk_to_dev(pd->disk); - - if (flush && pkt_flush_cache(pd)) - dev_notice(ddev, "not flushing cache\n"); - - pkt_lock_door(pd, 0); - - pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); - fput(pd->f_open_bdev); - pd->f_open_bdev = NULL; - - pkt_shrink_pktlist(pd); -} - -static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) -{ - if (dev_minor >= MAX_WRITERS) - return NULL; - - dev_minor = array_index_nospec(dev_minor, MAX_WRITERS); - return pkt_devs[dev_minor]; -} - -static int pkt_open(struct gendisk *disk, blk_mode_t mode) -{ - struct pktcdvd_device *pd = NULL; - int ret; - - mutex_lock(&pktcdvd_mutex); - mutex_lock(&ctl_mutex); - pd = pkt_find_dev_from_minor(disk->first_minor); - if (!pd) { - ret = -ENODEV; - goto out; - } - BUG_ON(pd->refcnt < 0); - - pd->refcnt++; - if (pd->refcnt > 1) { - if ((mode & BLK_OPEN_WRITE) && - !test_bit(PACKET_WRITABLE, &pd->flags)) { - ret = -EBUSY; - goto out_dec; - } - } else { - ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE); - if (ret) - goto out_dec; - } - mutex_unlock(&ctl_mutex); - mutex_unlock(&pktcdvd_mutex); - return 0; - -out_dec: - pd->refcnt--; -out: - mutex_unlock(&ctl_mutex); - mutex_unlock(&pktcdvd_mutex); - return ret; -} - -static void pkt_release(struct gendisk *disk) -{ - struct pktcdvd_device *pd = disk->private_data; - - mutex_lock(&pktcdvd_mutex); - mutex_lock(&ctl_mutex); - pd->refcnt--; - BUG_ON(pd->refcnt < 0); - if (pd->refcnt == 0) { - int flush = test_bit(PACKET_WRITABLE, &pd->flags); - pkt_release_dev(pd, flush); - } - mutex_unlock(&ctl_mutex); - mutex_unlock(&pktcdvd_mutex); -} - - -static void pkt_end_io_read_cloned(struct bio *bio) -{ - struct packet_stacked_data *psd = bio->bi_private; - struct pktcdvd_device *pd = psd->pd; - - psd->bio->bi_status = bio->bi_status; - bio_put(bio); - bio_endio(psd->bio); - mempool_free(psd, &psd_pool); - pkt_bio_finished(pd); -} - -static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) -{ - struct bio *cloned_bio = bio_alloc_clone(file_bdev(pd->bdev_file), bio, - GFP_NOIO, &pkt_bio_set); - struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO); - - psd->pd = pd; - psd->bio = bio; - cloned_bio->bi_private = psd; - cloned_bio->bi_end_io = pkt_end_io_read_cloned; - pd->stats.secs_r += bio_sectors(bio); - pkt_queue_bio(pd, cloned_bio); -} - -static void pkt_make_request_write(struct bio *bio) -{ - struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data; - sector_t zone; - struct packet_data *pkt; - int was_empty, blocked_bio; - struct pkt_rb_node *node; - - zone = get_zone(bio->bi_iter.bi_sector, pd); - - /* - * If we find a matching packet in state WAITING or READ_WAIT, we can - * just append this bio to that packet. - */ - spin_lock(&pd->cdrw.active_list_lock); - blocked_bio = 0; - list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { - if (pkt->sector == zone) { - spin_lock(&pkt->lock); - if ((pkt->state == PACKET_WAITING_STATE) || - (pkt->state == PACKET_READ_WAIT_STATE)) { - bio_list_add(&pkt->orig_bios, bio); - pkt->write_size += - bio->bi_iter.bi_size / CD_FRAMESIZE; - if ((pkt->write_size >= pkt->frames) && - (pkt->state == PACKET_WAITING_STATE)) { - atomic_inc(&pkt->run_sm); - wake_up(&pd->wqueue); - } - spin_unlock(&pkt->lock); - spin_unlock(&pd->cdrw.active_list_lock); - return; - } else { - blocked_bio = 1; - } - spin_unlock(&pkt->lock); - } - } - spin_unlock(&pd->cdrw.active_list_lock); - - /* - * Test if there is enough room left in the bio work queue - * (queue size >= congestion on mark). - * If not, wait till the work queue size is below the congestion off mark. - */ - spin_lock(&pd->lock); - if (pd->write_congestion_on > 0 - && pd->bio_queue_size >= pd->write_congestion_on) { - struct wait_bit_queue_entry wqe; - - init_wait_var_entry(&wqe, &pd->congested, 0); - for (;;) { - prepare_to_wait_event(__var_waitqueue(&pd->congested), - &wqe.wq_entry, - TASK_UNINTERRUPTIBLE); - if (pd->bio_queue_size <= pd->write_congestion_off) - break; - pd->congested = true; - spin_unlock(&pd->lock); - schedule(); - spin_lock(&pd->lock); - } - } - spin_unlock(&pd->lock); - - /* - * No matching packet found. Store the bio in the work queue. - */ - node = mempool_alloc(&pd->rb_pool, GFP_NOIO); - node->bio = bio; - spin_lock(&pd->lock); - BUG_ON(pd->bio_queue_size < 0); - was_empty = (pd->bio_queue_size == 0); - pkt_rbtree_insert(pd, node); - spin_unlock(&pd->lock); - - /* - * Wake up the worker thread. - */ - atomic_set(&pd->scan_queue, 1); - if (was_empty) { - /* This wake_up is required for correct operation */ - wake_up(&pd->wqueue); - } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) { - /* - * This wake up is not required for correct operation, - * but improves performance in some cases. - */ - wake_up(&pd->wqueue); - } -} - -static void pkt_submit_bio(struct bio *bio) -{ - struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data; - struct device *ddev = disk_to_dev(pd->disk); - struct bio *split; - - bio = bio_split_to_limits(bio); - if (!bio) - return; - - dev_dbg(ddev, "start = %6llx stop = %6llx\n", - bio->bi_iter.bi_sector, bio_end_sector(bio)); - - /* - * Clone READ bios so we can have our own bi_end_io callback. - */ - if (bio_data_dir(bio) == READ) { - pkt_make_request_read(pd, bio); - return; - } - - if (!test_bit(PACKET_WRITABLE, &pd->flags)) { - dev_notice(ddev, "WRITE for ro device (%llu)\n", bio->bi_iter.bi_sector); - goto end_io; - } - - if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { - dev_err(ddev, "wrong bio size\n"); - goto end_io; - } - - do { - sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); - sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); - - if (last_zone != zone) { - BUG_ON(last_zone != zone + pd->settings.size); - - split = bio_split(bio, last_zone - - bio->bi_iter.bi_sector, - GFP_NOIO, &pkt_bio_set); - bio_chain(split, bio); - } else { - split = bio; - } - - pkt_make_request_write(split); - } while (split != bio); - - return; -end_io: - bio_io_error(bio); -} - -static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) -{ - struct device *ddev = disk_to_dev(pd->disk); - int i; - struct file *bdev_file; - struct scsi_device *sdev; - - if (pd->pkt_dev == dev) { - dev_err(ddev, "recursive setup not allowed\n"); - return -EBUSY; - } - for (i = 0; i < MAX_WRITERS; i++) { - struct pktcdvd_device *pd2 = pkt_devs[i]; - if (!pd2) - continue; - if (file_bdev(pd2->bdev_file)->bd_dev == dev) { - dev_err(ddev, "%pg already setup\n", - file_bdev(pd2->bdev_file)); - return -EBUSY; - } - if (pd2->pkt_dev == dev) { - dev_err(ddev, "can't chain pktcdvd devices\n"); - return -EBUSY; - } - } - - bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY, - NULL, NULL); - if (IS_ERR(bdev_file)) - return PTR_ERR(bdev_file); - sdev = scsi_device_from_queue(file_bdev(bdev_file)->bd_disk->queue); - if (!sdev) { - fput(bdev_file); - return -EINVAL; - } - put_device(&sdev->sdev_gendev); - - /* This is safe, since we have a reference from open(). */ - __module_get(THIS_MODULE); - - pd->bdev_file = bdev_file; - - atomic_set(&pd->cdrw.pending_bios, 0); - pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name); - if (IS_ERR(pd->cdrw.thread)) { - dev_err(ddev, "can't start kernel thread\n"); - goto out_mem; - } - - proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd); - dev_notice(ddev, "writer mapped to %pg\n", file_bdev(bdev_file)); - return 0; - -out_mem: - fput(bdev_file); - /* This is safe: open() is still holding a reference. */ - module_put(THIS_MODULE); - return -ENOMEM; -} - -static int pkt_ioctl(struct block_device *bdev, blk_mode_t mode, - unsigned int cmd, unsigned long arg) -{ - struct pktcdvd_device *pd = bdev->bd_disk->private_data; - struct device *ddev = disk_to_dev(pd->disk); - int ret; - - dev_dbg(ddev, "cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); - - mutex_lock(&pktcdvd_mutex); - switch (cmd) { - case CDROMEJECT: - /* - * The door gets locked when the device is opened, so we - * have to unlock it or else the eject command fails. - */ - if (pd->refcnt == 1) - pkt_lock_door(pd, 0); - fallthrough; - /* - * forward selected CDROM ioctls to CD-ROM, for UDF - */ - case CDROMMULTISESSION: - case CDROMREADTOCENTRY: - case CDROM_LAST_WRITTEN: - case CDROM_SEND_PACKET: - case SCSI_IOCTL_SEND_COMMAND: - if (!bdev->bd_disk->fops->ioctl) - ret = -ENOTTY; - else - ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); - break; - default: - dev_dbg(ddev, "Unknown ioctl (%x)\n", cmd); - ret = -ENOTTY; - } - mutex_unlock(&pktcdvd_mutex); - - return ret; -} - -static unsigned int pkt_check_events(struct gendisk *disk, - unsigned int clearing) -{ - struct pktcdvd_device *pd = disk->private_data; - struct gendisk *attached_disk; - - if (!pd) - return 0; - if (!pd->bdev_file) - return 0; - attached_disk = file_bdev(pd->bdev_file)->bd_disk; - if (!attached_disk || !attached_disk->fops->check_events) - return 0; - return attached_disk->fops->check_events(attached_disk, clearing); -} - -static char *pkt_devnode(struct gendisk *disk, umode_t *mode) -{ - return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name); -} - -static const struct block_device_operations pktcdvd_ops = { - .owner = THIS_MODULE, - .submit_bio = pkt_submit_bio, - .open = pkt_open, - .release = pkt_release, - .ioctl = pkt_ioctl, - .compat_ioctl = blkdev_compat_ptr_ioctl, - .check_events = pkt_check_events, - .devnode = pkt_devnode, -}; - -/* - * Set up mapping from pktcdvd device to CD-ROM device. - */ -static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) -{ - struct queue_limits lim = { - .max_hw_sectors = PACKET_MAX_SECTORS, - .logical_block_size = CD_FRAMESIZE, - .features = BLK_FEAT_ROTATIONAL, - }; - int idx; - int ret = -ENOMEM; - struct pktcdvd_device *pd; - struct gendisk *disk; - - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - - for (idx = 0; idx < MAX_WRITERS; idx++) - if (!pkt_devs[idx]) - break; - if (idx == MAX_WRITERS) { - pr_err("max %d writers supported\n", MAX_WRITERS); - ret = -EBUSY; - goto out_mutex; - } - - pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); - if (!pd) - goto out_mutex; - - ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE, - sizeof(struct pkt_rb_node)); - if (ret) - goto out_mem; - - INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); - INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); - spin_lock_init(&pd->cdrw.active_list_lock); - - spin_lock_init(&pd->lock); - spin_lock_init(&pd->iosched.lock); - bio_list_init(&pd->iosched.read_queue); - bio_list_init(&pd->iosched.write_queue); - init_waitqueue_head(&pd->wqueue); - pd->bio_queue = RB_ROOT; - - pd->write_congestion_on = write_congestion_on; - pd->write_congestion_off = write_congestion_off; - - disk = blk_alloc_disk(&lim, NUMA_NO_NODE); - if (IS_ERR(disk)) { - ret = PTR_ERR(disk); - goto out_mem; - } - pd->disk = disk; - disk->major = pktdev_major; - disk->first_minor = idx; - disk->minors = 1; - disk->fops = &pktcdvd_ops; - disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART; - snprintf(disk->disk_name, sizeof(disk->disk_name), DRIVER_NAME"%d", idx); - disk->private_data = pd; - - pd->pkt_dev = MKDEV(pktdev_major, idx); - ret = pkt_new_dev(pd, dev); - if (ret) - goto out_mem2; - - /* inherit events of the host device */ - disk->events = file_bdev(pd->bdev_file)->bd_disk->events; - - ret = add_disk(disk); - if (ret) - goto out_mem2; - - pkt_sysfs_dev_new(pd); - pkt_debugfs_dev_new(pd); - - pkt_devs[idx] = pd; - if (pkt_dev) - *pkt_dev = pd->pkt_dev; - - mutex_unlock(&ctl_mutex); - return 0; - -out_mem2: - put_disk(disk); -out_mem: - mempool_exit(&pd->rb_pool); - kfree(pd); -out_mutex: - mutex_unlock(&ctl_mutex); - pr_err("setup of pktcdvd device failed\n"); - return ret; -} - -/* - * Tear down mapping from pktcdvd device to CD-ROM device. - */ -static int pkt_remove_dev(dev_t pkt_dev) -{ - struct pktcdvd_device *pd; - struct device *ddev; - int idx; - int ret = 0; - - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - - for (idx = 0; idx < MAX_WRITERS; idx++) { - pd = pkt_devs[idx]; - if (pd && (pd->pkt_dev == pkt_dev)) - break; - } - if (idx == MAX_WRITERS) { - pr_debug("dev not setup\n"); - ret = -ENXIO; - goto out; - } - - if (pd->refcnt > 0) { - ret = -EBUSY; - goto out; - } - - ddev = disk_to_dev(pd->disk); - - if (!IS_ERR(pd->cdrw.thread)) - kthread_stop(pd->cdrw.thread); - - pkt_devs[idx] = NULL; - - pkt_debugfs_dev_remove(pd); - pkt_sysfs_dev_remove(pd); - - fput(pd->bdev_file); - - remove_proc_entry(pd->disk->disk_name, pkt_proc); - dev_notice(ddev, "writer unmapped\n"); - - del_gendisk(pd->disk); - put_disk(pd->disk); - - mempool_exit(&pd->rb_pool); - kfree(pd); - - /* This is safe: open() is still holding a reference. */ - module_put(THIS_MODULE); - -out: - mutex_unlock(&ctl_mutex); - return ret; -} - -static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd) -{ - struct pktcdvd_device *pd; - - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - - pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index); - if (pd) { - ctrl_cmd->dev = new_encode_dev(file_bdev(pd->bdev_file)->bd_dev); - ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev); - } else { - ctrl_cmd->dev = 0; - ctrl_cmd->pkt_dev = 0; - } - ctrl_cmd->num_devices = MAX_WRITERS; - - mutex_unlock(&ctl_mutex); -} - -static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - void __user *argp = (void __user *)arg; - struct pkt_ctrl_command ctrl_cmd; - int ret = 0; - dev_t pkt_dev = 0; - - if (cmd != PACKET_CTRL_CMD) - return -ENOTTY; - - if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command))) - return -EFAULT; - - switch (ctrl_cmd.command) { - case PKT_CTRL_CMD_SETUP: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev); - ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev); - break; - case PKT_CTRL_CMD_TEARDOWN: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev)); - break; - case PKT_CTRL_CMD_STATUS: - pkt_get_status(&ctrl_cmd); - break; - default: - return -ENOTTY; - } - - if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command))) - return -EFAULT; - return ret; -} - -#ifdef CONFIG_COMPAT -static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); -} -#endif - -static const struct file_operations pkt_ctl_fops = { - .open = nonseekable_open, - .unlocked_ioctl = pkt_ctl_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = pkt_ctl_compat_ioctl, -#endif - .owner = THIS_MODULE, -}; - -static struct miscdevice pkt_misc = { - .minor = MISC_DYNAMIC_MINOR, - .name = DRIVER_NAME, - .nodename = "pktcdvd/control", - .fops = &pkt_ctl_fops -}; - -static int __init pkt_init(void) -{ - int ret; - - mutex_init(&ctl_mutex); - - ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE, - sizeof(struct packet_stacked_data)); - if (ret) - return ret; - ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0); - if (ret) { - mempool_exit(&psd_pool); - return ret; - } - - ret = register_blkdev(pktdev_major, DRIVER_NAME); - if (ret < 0) { - pr_err("unable to register block device\n"); - goto out2; - } - if (!pktdev_major) - pktdev_major = ret; - - ret = pkt_sysfs_init(); - if (ret) - goto out; - - pkt_debugfs_init(); - - ret = misc_register(&pkt_misc); - if (ret) { - pr_err("unable to register misc device\n"); - goto out_misc; - } - - pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL); - - return 0; - -out_misc: - pkt_debugfs_cleanup(); - pkt_sysfs_cleanup(); -out: - unregister_blkdev(pktdev_major, DRIVER_NAME); -out2: - mempool_exit(&psd_pool); - bioset_exit(&pkt_bio_set); - return ret; -} - -static void __exit pkt_exit(void) -{ - remove_proc_entry("driver/"DRIVER_NAME, NULL); - misc_deregister(&pkt_misc); - - pkt_debugfs_cleanup(); - pkt_sysfs_cleanup(); - - unregister_blkdev(pktdev_major, DRIVER_NAME); - mempool_exit(&psd_pool); - bioset_exit(&pkt_bio_set); -} - -MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives"); -MODULE_AUTHOR("Jens Axboe <axboe@suse.de>"); -MODULE_LICENSE("GPL"); - -module_init(pkt_init); -module_exit(pkt_exit); diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index b5727dea15bd..7af21fe67671 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -957,8 +957,10 @@ static bool vdc_port_mpgroup_check(struct vio_dev *vdev) dev = device_find_child(vdev->dev.parent, &port_data, vdc_device_probed); - if (dev) + if (dev) { + put_device(dev); return true; + } return false; } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 9fd284fa76dc..6561d2a561fa 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -48,6 +48,8 @@ #define UBLK_MINORS (1U << MINORBITS) +#define UBLK_INVALID_BUF_IDX ((u16)-1) + /* private ioctl command mirror */ #define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC) #define UBLK_CMD_UPDATE_SIZE _IOC_NR(UBLK_U_CMD_UPDATE_SIZE) @@ -70,7 +72,8 @@ | UBLK_F_UPDATE_SIZE \ | UBLK_F_AUTO_BUF_REG \ | UBLK_F_QUIESCE \ - | UBLK_F_PER_IO_DAEMON) + | UBLK_F_PER_IO_DAEMON \ + | UBLK_F_BUF_REG_OFF_DAEMON) #define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \ | UBLK_F_USER_RECOVERY_REISSUE \ @@ -82,14 +85,6 @@ UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED | \ UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT) -struct ublk_rq_data { - refcount_t ref; - - /* for auto-unregister buffer in case of UBLK_F_AUTO_BUF_REG */ - u16 buf_index; - void *buf_ctx_handle; -}; - struct ublk_uring_cmd_pdu { /* * Store requests in same batch temporarily for queuing them to @@ -110,8 +105,6 @@ struct ublk_uring_cmd_pdu { */ struct ublk_queue *ubq; - struct ublk_auto_buf_reg buf; - u16 tag; }; @@ -155,9 +148,19 @@ struct ublk_uring_cmd_pdu { /* atomic RW with ubq->cancel_lock */ #define UBLK_IO_FLAG_CANCELED 0x80000000 +/* + * Initialize refcount to a large number to include any registered buffers. + * UBLK_IO_COMMIT_AND_FETCH_REQ will release these references minus those for + * any buffers registered on the io daemon task. + */ +#define UBLK_REFCOUNT_INIT (REFCOUNT_MAX / 2) + struct ublk_io { /* userspace buffer address from io cmd */ - __u64 addr; + union { + __u64 addr; + struct ublk_auto_buf_reg buf; + }; unsigned int flags; int res; @@ -169,7 +172,24 @@ struct ublk_io { }; struct task_struct *task; -}; + + /* + * The number of uses of this I/O by the ublk server + * if user copy or zero copy are enabled: + * - UBLK_REFCOUNT_INIT from dispatch to the server + * until UBLK_IO_COMMIT_AND_FETCH_REQ + * - 1 for each inflight ublk_ch_{read,write}_iter() call + * - 1 for each io_uring registered buffer not registered on task + * The I/O can only be completed once all references are dropped. + * User copy and buffer registration operations are only permitted + * if the reference count is nonzero. + */ + refcount_t ref; + /* Count of buffers registered on task and not yet unregistered */ + unsigned task_registered_buffers; + + void *buf_ctx_handle; +} ____cacheline_aligned_in_smp; struct ublk_queue { int q_id; @@ -216,6 +236,9 @@ struct ublk_device { struct completion completion; unsigned int nr_queues_ready; unsigned int nr_privileged_daemon; + struct mutex cancel_mutex; + bool canceling; + pid_t ublksrv_tgid; }; /* header of ublk_params */ @@ -228,7 +251,8 @@ static void ublk_io_release(void *priv); static void ublk_stop_dev_unlocked(struct ublk_device *ub); static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq); static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, - const struct ublk_queue *ubq, int tag, size_t offset); + const struct ublk_queue *ubq, struct ublk_io *io, + size_t offset); static inline unsigned int ublk_req_build_flags(struct request *req); static inline struct ublksrv_io_desc * @@ -673,38 +697,29 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq) } static inline void ublk_init_req_ref(const struct ublk_queue *ubq, - struct request *req) + struct ublk_io *io) { - if (ublk_need_req_ref(ubq)) { - struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - - refcount_set(&data->ref, 1); - } + if (ublk_need_req_ref(ubq)) + refcount_set(&io->ref, UBLK_REFCOUNT_INIT); } -static inline bool ublk_get_req_ref(const struct ublk_queue *ubq, - struct request *req) +static inline bool ublk_get_req_ref(struct ublk_io *io) { - if (ublk_need_req_ref(ubq)) { - struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - - return refcount_inc_not_zero(&data->ref); - } + return refcount_inc_not_zero(&io->ref); +} - return true; +static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req) +{ + if (refcount_dec_and_test(&io->ref)) + __ublk_complete_rq(req); } -static inline void ublk_put_req_ref(const struct ublk_queue *ubq, - struct request *req) +static inline bool ublk_sub_req_ref(struct ublk_io *io) { - if (ublk_need_req_ref(ubq)) { - struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + unsigned sub_refs = UBLK_REFCOUNT_INIT - io->task_registered_buffers; - if (refcount_dec_and_test(&data->ref)) - __ublk_complete_rq(req); - } else { - __ublk_complete_rq(req); - } + io->task_registered_buffers = 0; + return refcount_sub_and_test(sub_refs, &io->ref); } static inline bool ublk_need_get_data(const struct ublk_queue *ubq) @@ -981,7 +996,7 @@ static inline bool ublk_need_unmap_req(const struct request *req) } static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, - struct ublk_io *io) + const struct ublk_io *io) { const unsigned int rq_bytes = blk_rq_bytes(req); @@ -1005,7 +1020,7 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, static int ublk_unmap_io(const struct ublk_queue *ubq, const struct request *req, - struct ublk_io *io) + const struct ublk_io *io) { const unsigned int rq_bytes = blk_rq_bytes(req); @@ -1140,7 +1155,7 @@ static inline void __ublk_complete_rq(struct request *req) if (blk_update_request(req, BLK_STS_OK, io->res)) blk_mq_requeue_request(req, true); - else + else if (likely(!blk_should_fake_timeout(req->q))) __blk_mq_end_request(req, BLK_STS_OK); return; @@ -1188,39 +1203,33 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq, blk_mq_end_request(rq, BLK_STS_IOERR); } -static void ublk_auto_buf_reg_fallback(struct request *req) +static void +ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, struct ublk_io *io) { - const struct ublk_queue *ubq = req->mq_hctx->driver_data; - struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); - struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + unsigned tag = io - ubq->ios; + struct ublksrv_io_desc *iod = ublk_get_iod(ubq, tag); iod->op_flags |= UBLK_IO_F_NEED_REG_BUF; - refcount_set(&data->ref, 1); } -static bool ublk_auto_buf_reg(struct request *req, struct ublk_io *io, - unsigned int issue_flags) +static bool ublk_auto_buf_reg(const struct ublk_queue *ubq, struct request *req, + struct ublk_io *io, unsigned int issue_flags) { - struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(io->cmd); - struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); int ret; ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release, - pdu->buf.index, issue_flags); + io->buf.index, issue_flags); if (ret) { - if (pdu->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) { - ublk_auto_buf_reg_fallback(req); + if (io->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) { + ublk_auto_buf_reg_fallback(ubq, io); return true; } blk_mq_end_request(req, BLK_STS_IOERR); return false; } - /* one extra reference is dropped by ublk_io_release */ - refcount_set(&data->ref, 2); - data->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd); - /* store buffer index in request payload */ - data->buf_index = pdu->buf.index; + io->task_registered_buffers = 1; + io->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd); io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG; return true; } @@ -1229,10 +1238,10 @@ static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq, struct request *req, struct ublk_io *io, unsigned int issue_flags) { + ublk_init_req_ref(ubq, io); if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req)) - return ublk_auto_buf_reg(req, io, issue_flags); + return ublk_auto_buf_reg(ubq, req, io, issue_flags); - ublk_init_req_ref(ubq, req); return true; } @@ -1356,14 +1365,23 @@ static void ublk_queue_cmd_list(struct ublk_io *io, struct rq_list *l) static enum blk_eh_timer_return ublk_timeout(struct request *rq) { struct ublk_queue *ubq = rq->mq_hctx->driver_data; - struct ublk_io *io = &ubq->ios[rq->tag]; + pid_t tgid = ubq->dev->ublksrv_tgid; + struct task_struct *p; + struct pid *pid; - if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) { - send_sig(SIGKILL, io->task, 0); - return BLK_EH_DONE; - } + if (!(ubq->flags & UBLK_F_UNPRIVILEGED_DEV)) + return BLK_EH_RESET_TIMER; - return BLK_EH_RESET_TIMER; + if (unlikely(!tgid)) + return BLK_EH_RESET_TIMER; + + rcu_read_lock(); + pid = find_vpid(tgid); + p = pid_task(pid, PIDTYPE_PID); + if (p) + send_sig(SIGKILL, p, 0); + rcu_read_unlock(); + return BLK_EH_DONE; } static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq, @@ -1504,6 +1522,9 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq) put_task_struct(io->task); io->task = NULL; } + + WARN_ON_ONCE(refcount_read(&io->ref)); + WARN_ON_ONCE(io->task_registered_buffers); } } @@ -1515,6 +1536,7 @@ static int ublk_ch_open(struct inode *inode, struct file *filp) if (test_and_set_bit(UB_STATE_OPEN, &ub->state)) return -EBUSY; filp->private_data = ub; + ub->ublksrv_tgid = current->tgid; return 0; } @@ -1529,6 +1551,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub) ub->mm = NULL; ub->nr_queues_ready = 0; ub->nr_privileged_daemon = 0; + ub->ublksrv_tgid = -1; } static struct gendisk *ublk_get_disk(struct ublk_device *ub) @@ -1550,6 +1573,27 @@ static void ublk_put_disk(struct gendisk *disk) put_device(disk_to_dev(disk)); } +/* + * Use this function to ensure that ->canceling is consistently set for + * the device and all queues. Do not set these flags directly. + * + * Caller must ensure that: + * - cancel_mutex is held. This ensures that there is no concurrent + * access to ub->canceling and no concurrent writes to ubq->canceling. + * - there are no concurrent reads of ubq->canceling from the queue_rq + * path. This can be done by quiescing the queue, or through other + * means. + */ +static void ublk_set_canceling(struct ublk_device *ub, bool canceling) + __must_hold(&ub->cancel_mutex) +{ + int i; + + ub->canceling = canceling; + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) + ublk_get_queue(ub, i)->canceling = canceling; +} + static int ublk_ch_release(struct inode *inode, struct file *filp) { struct ublk_device *ub = filp->private_data; @@ -1578,12 +1622,11 @@ static int ublk_ch_release(struct inode *inode, struct file *filp) * All requests may be inflight, so ->canceling may not be set, set * it now. */ - for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { - struct ublk_queue *ubq = ublk_get_queue(ub, i); - - ubq->canceling = true; - ublk_abort_queue(ub, ubq); - } + mutex_lock(&ub->cancel_mutex); + ublk_set_canceling(ub, true); + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) + ublk_abort_queue(ub, ublk_get_queue(ub, i)); + mutex_unlock(&ub->cancel_mutex); blk_mq_kick_requeue_list(disk->queue); /* @@ -1706,23 +1749,17 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq) } } -/* Must be called when queue is frozen */ -static void ublk_mark_queue_canceling(struct ublk_queue *ubq) +static void ublk_start_cancel(struct ublk_device *ub) { - spin_lock(&ubq->cancel_lock); - if (!ubq->canceling) - ubq->canceling = true; - spin_unlock(&ubq->cancel_lock); -} - -static void ublk_start_cancel(struct ublk_queue *ubq) -{ - struct ublk_device *ub = ubq->dev; struct gendisk *disk = ublk_get_disk(ub); /* Our disk has been dead */ if (!disk) return; + + mutex_lock(&ub->cancel_mutex); + if (ub->canceling) + goto out; /* * Now we are serialized with ublk_queue_rq() * @@ -1731,8 +1768,10 @@ static void ublk_start_cancel(struct ublk_queue *ubq) * touch completed uring_cmd */ blk_mq_quiesce_queue(disk->queue); - ublk_mark_queue_canceling(ubq); + ublk_set_canceling(ub, true); blk_mq_unquiesce_queue(disk->queue); +out: + mutex_unlock(&ub->cancel_mutex); ublk_put_disk(disk); } @@ -1805,8 +1844,7 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd, if (WARN_ON_ONCE(task && task != io->task)) return; - if (!ubq->canceling) - ublk_start_cancel(ubq); + ublk_start_cancel(ubq->dev); WARN_ON_ONCE(io->cmd != cmd); ublk_cancel_cmd(ubq, pdu->tag, issue_flags); @@ -1930,9 +1968,11 @@ static void ublk_reset_io_flags(struct ublk_device *ub) for (j = 0; j < ubq->q_depth; j++) ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED; spin_unlock(&ubq->cancel_lock); - ubq->canceling = false; ubq->fail_io = false; } + mutex_lock(&ub->cancel_mutex); + ublk_set_canceling(ub, false); + mutex_unlock(&ub->cancel_mutex); } /* device can only be started after all IOs are ready */ @@ -1967,12 +2007,66 @@ static inline int ublk_check_cmd_op(u32 cmd_op) return 0; } -static inline void ublk_fill_io_cmd(struct ublk_io *io, - struct io_uring_cmd *cmd, unsigned long buf_addr) +static inline int ublk_set_auto_buf_reg(struct ublk_io *io, struct io_uring_cmd *cmd) +{ + io->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr)); + + if (io->buf.reserved0 || io->buf.reserved1) + return -EINVAL; + + if (io->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK) + return -EINVAL; + return 0; +} + +static int ublk_handle_auto_buf_reg(struct ublk_io *io, + struct io_uring_cmd *cmd, + u16 *buf_idx) { + if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) { + io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG; + + /* + * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ` + * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same + * `io_ring_ctx`. + * + * If this uring_cmd's io_ring_ctx isn't same with the + * one for registering the buffer, it is ublk server's + * responsibility for unregistering the buffer, otherwise + * this ublk request gets stuck. + */ + if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd)) + *buf_idx = io->buf.index; + } + + return ublk_set_auto_buf_reg(io, cmd); +} + +/* Once we return, `io->req` can't be used any more */ +static inline struct request * +ublk_fill_io_cmd(struct ublk_io *io, struct io_uring_cmd *cmd) +{ + struct request *req = io->req; + io->cmd = cmd; io->flags |= UBLK_IO_FLAG_ACTIVE; + /* now this cmd slot is owned by ublk driver */ + io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV; + + return req; +} + +static inline int +ublk_config_io_buf(const struct ublk_queue *ubq, struct ublk_io *io, + struct io_uring_cmd *cmd, unsigned long buf_addr, + u16 *buf_idx) +{ + if (ublk_support_auto_buf_reg(ubq)) + return ublk_handle_auto_buf_reg(io, cmd, buf_idx); + io->addr = buf_addr; + return 0; } static inline void ublk_prep_cancel(struct io_uring_cmd *cmd, @@ -1990,30 +2084,25 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd, io_uring_cmd_mark_cancelable(cmd, issue_flags); } -static inline int ublk_set_auto_buf_reg(struct io_uring_cmd *cmd) -{ - struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); - - pdu->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr)); - - if (pdu->buf.reserved0 || pdu->buf.reserved1) - return -EINVAL; - - if (pdu->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK) - return -EINVAL; - return 0; -} - static void ublk_io_release(void *priv) { struct request *rq = priv; struct ublk_queue *ubq = rq->mq_hctx->driver_data; + struct ublk_io *io = &ubq->ios[rq->tag]; - ublk_put_req_ref(ubq, rq); + /* + * task_registered_buffers may be 0 if buffers were registered off task + * but unregistered on task. Or after UBLK_IO_COMMIT_AND_FETCH_REQ. + */ + if (current == io->task && io->task_registered_buffers) + io->task_registered_buffers--; + else + ublk_put_req_ref(io, rq); } static int ublk_register_io_buf(struct io_uring_cmd *cmd, - const struct ublk_queue *ubq, unsigned int tag, + const struct ublk_queue *ubq, + struct ublk_io *io, unsigned int index, unsigned int issue_flags) { struct ublk_device *ub = cmd->file->private_data; @@ -2023,30 +2112,75 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd, if (!ublk_support_zero_copy(ubq)) return -EINVAL; - req = __ublk_check_and_get_req(ub, ubq, tag, 0); + req = __ublk_check_and_get_req(ub, ubq, io, 0); if (!req) return -EINVAL; ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index, issue_flags); if (ret) { - ublk_put_req_ref(ubq, req); + ublk_put_req_ref(io, req); return ret; } return 0; } +static int +ublk_daemon_register_io_buf(struct io_uring_cmd *cmd, + const struct ublk_queue *ubq, struct ublk_io *io, + unsigned index, unsigned issue_flags) +{ + unsigned new_registered_buffers; + struct request *req = io->req; + int ret; + + /* + * Ensure there are still references for ublk_sub_req_ref() to release. + * If not, fall back on the thread-safe buffer registration. + */ + new_registered_buffers = io->task_registered_buffers + 1; + if (unlikely(new_registered_buffers >= UBLK_REFCOUNT_INIT)) + return ublk_register_io_buf(cmd, ubq, io, index, issue_flags); + + if (!ublk_support_zero_copy(ubq) || !ublk_rq_has_data(req)) + return -EINVAL; + + ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index, + issue_flags); + if (ret) + return ret; + + io->task_registered_buffers = new_registered_buffers; + return 0; +} + static int ublk_unregister_io_buf(struct io_uring_cmd *cmd, - const struct ublk_queue *ubq, + const struct ublk_device *ub, unsigned int index, unsigned int issue_flags) { - if (!ublk_support_zero_copy(ubq)) + if (!(ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY)) return -EINVAL; return io_buffer_unregister_bvec(cmd, index, issue_flags); } +static int ublk_check_fetch_buf(const struct ublk_queue *ubq, __u64 buf_addr) +{ + if (ublk_need_map_io(ubq)) { + /* + * FETCH_RQ has to provide IO buffer if NEED GET + * DATA is not enabled + */ + if (!buf_addr && !ublk_need_get_data(ubq)) + return -EINVAL; + } else if (buf_addr) { + /* User copy requires addr to be unset */ + return -EINVAL; + } + return 0; +} + static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq, struct ublk_io *io, __u64 buf_addr) { @@ -2073,26 +2207,11 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq, WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV); - if (ublk_need_map_io(ubq)) { - /* - * FETCH_RQ has to provide IO buffer if NEED GET - * DATA is not enabled - */ - if (!buf_addr && !ublk_need_get_data(ubq)) - goto out; - } else if (buf_addr) { - /* User copy requires addr to be unset */ - ret = -EINVAL; + ublk_fill_io_cmd(io, cmd); + ret = ublk_config_io_buf(ubq, io, cmd, buf_addr, NULL); + if (ret) goto out; - } - if (ublk_support_auto_buf_reg(ubq)) { - ret = ublk_set_auto_buf_reg(cmd); - if (ret) - goto out; - } - - ublk_fill_io_cmd(io, cmd, buf_addr); WRITE_ONCE(io->task, get_task_struct(current)); ublk_mark_io_ready(ub, ubq); out: @@ -2100,10 +2219,8 @@ out: return ret; } -static int ublk_commit_and_fetch(const struct ublk_queue *ubq, - struct ublk_io *io, struct io_uring_cmd *cmd, - const struct ublksrv_io_cmd *ub_cmd, - unsigned int issue_flags) +static int ublk_check_commit_and_fetch(const struct ublk_queue *ubq, + struct ublk_io *io, __u64 buf_addr) { struct request *req = io->req; @@ -2112,10 +2229,10 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq, * COMMIT_AND_FETCH_REQ has to provide IO buffer if * NEED GET DATA is not enabled or it is Read IO. */ - if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || + if (!buf_addr && (!ublk_need_get_data(ubq) || req_op(req) == REQ_OP_READ)) return -EINVAL; - } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) { + } else if (req_op(req) != REQ_OP_ZONE_APPEND && buf_addr) { /* * User copy requires addr to be unset when command is * not zone append @@ -2123,48 +2240,17 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq, return -EINVAL; } - if (ublk_support_auto_buf_reg(ubq)) { - int ret; - - /* - * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ` - * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same - * `io_ring_ctx`. - * - * If this uring_cmd's io_ring_ctx isn't same with the - * one for registering the buffer, it is ublk server's - * responsibility for unregistering the buffer, otherwise - * this ublk request gets stuck. - */ - if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) { - struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - - if (data->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd)) - io_buffer_unregister_bvec(cmd, data->buf_index, - issue_flags); - io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG; - } - - ret = ublk_set_auto_buf_reg(cmd); - if (ret) - return ret; - } - - ublk_fill_io_cmd(io, cmd, ub_cmd->addr); - - /* now this cmd slot is owned by ublk driver */ - io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV; - io->res = ub_cmd->result; - - if (req_op(req) == REQ_OP_ZONE_APPEND) - req->__sector = ub_cmd->zone_append_lba; - - if (likely(!blk_should_fake_timeout(req->q))) - ublk_put_req_ref(ubq, req); - return 0; } +static bool ublk_need_complete_req(const struct ublk_queue *ubq, + struct ublk_io *io) +{ + if (ublk_need_req_ref(ubq)) + return ublk_sub_req_ref(io); + return true; +} + static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io, struct request *req) { @@ -2187,19 +2273,33 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags, const struct ublksrv_io_cmd *ub_cmd) { + u16 buf_idx = UBLK_INVALID_BUF_IDX; struct ublk_device *ub = cmd->file->private_data; - struct task_struct *task; struct ublk_queue *ubq; struct ublk_io *io; u32 cmd_op = cmd->cmd_op; unsigned tag = ub_cmd->tag; - int ret = -EINVAL; struct request *req; + int ret; + bool compl; pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n", __func__, cmd->cmd_op, ub_cmd->q_id, tag, ub_cmd->result); + ret = ublk_check_cmd_op(cmd_op); + if (ret) + goto out; + + /* + * io_buffer_unregister_bvec() doesn't access the ubq or io, + * so no need to validate the q_id, tag, or task + */ + if (_IOC_NR(cmd_op) == UBLK_IO_UNREGISTER_IO_BUF) + return ublk_unregister_io_buf(cmd, ub, ub_cmd->addr, + issue_flags); + + ret = -EINVAL; if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues) goto out; @@ -2209,21 +2309,37 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, goto out; io = &ubq->ios[tag]; - task = READ_ONCE(io->task); - if (task && task != current) + /* UBLK_IO_FETCH_REQ can be handled on any task, which sets io->task */ + if (unlikely(_IOC_NR(cmd_op) == UBLK_IO_FETCH_REQ)) { + ret = ublk_check_fetch_buf(ubq, ub_cmd->addr); + if (ret) + goto out; + ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr); + if (ret) + goto out; + + ublk_prep_cancel(cmd, issue_flags, ubq, tag); + return -EIOCBQUEUED; + } + + if (READ_ONCE(io->task) != current) { + /* + * ublk_register_io_buf() accesses only the io's refcount, + * so can be handled on any task + */ + if (_IOC_NR(cmd_op) == UBLK_IO_REGISTER_IO_BUF) + return ublk_register_io_buf(cmd, ubq, io, ub_cmd->addr, + issue_flags); + goto out; + } /* there is pending io cmd, something must be wrong */ - if (io->flags & UBLK_IO_FLAG_ACTIVE) { + if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) { ret = -EBUSY; goto out; } - /* only UBLK_IO_FETCH_REQ is allowed if io is not OWNED_BY_SRV */ - if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) && - _IOC_NR(cmd_op) != UBLK_IO_FETCH_REQ) - goto out; - /* * ensure that the user issues UBLK_IO_NEED_GET_DATA * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA. @@ -2232,23 +2348,27 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA)) goto out; - ret = ublk_check_cmd_op(cmd_op); - if (ret) - goto out; - - ret = -EINVAL; switch (_IOC_NR(cmd_op)) { case UBLK_IO_REGISTER_IO_BUF: - return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags); - case UBLK_IO_UNREGISTER_IO_BUF: - return ublk_unregister_io_buf(cmd, ubq, ub_cmd->addr, issue_flags); - case UBLK_IO_FETCH_REQ: - ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr); + return ublk_daemon_register_io_buf(cmd, ubq, io, ub_cmd->addr, + issue_flags); + case UBLK_IO_COMMIT_AND_FETCH_REQ: + ret = ublk_check_commit_and_fetch(ubq, io, ub_cmd->addr); if (ret) goto out; - break; - case UBLK_IO_COMMIT_AND_FETCH_REQ: - ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd, issue_flags); + io->res = ub_cmd->result; + req = ublk_fill_io_cmd(io, cmd); + ret = ublk_config_io_buf(ubq, io, cmd, ub_cmd->addr, &buf_idx); + compl = ublk_need_complete_req(ubq, io); + + /* can't touch 'ublk_io' any more */ + if (buf_idx != UBLK_INVALID_BUF_IDX) + io_buffer_unregister_bvec(cmd, buf_idx, issue_flags); + if (req_op(req) == REQ_OP_ZONE_APPEND) + req->__sector = ub_cmd->zone_append_lba; + if (compl) + __ublk_complete_rq(req); + if (ret) goto out; break; @@ -2258,9 +2378,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, * uring_cmd active first and prepare for handling new requeued * request */ - req = io->req; - ublk_fill_io_cmd(io, cmd, ub_cmd->addr); - io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV; + req = ublk_fill_io_cmd(io, cmd); + ret = ublk_config_io_buf(ubq, io, cmd, ub_cmd->addr, NULL); + WARN_ON_ONCE(ret); if (likely(ublk_get_data(ubq, io, req))) { __ublk_prep_compl_io_cmd(io, req); return UBLK_IO_RES_OK; @@ -2279,15 +2399,20 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, } static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, - const struct ublk_queue *ubq, int tag, size_t offset) + const struct ublk_queue *ubq, struct ublk_io *io, size_t offset) { + unsigned tag = io - ubq->ios; struct request *req; + /* + * can't use io->req in case of concurrent UBLK_IO_COMMIT_AND_FETCH_REQ, + * which would overwrite it with io->cmd + */ req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag); if (!req) return NULL; - if (!ublk_get_req_ref(ubq, req)) + if (!ublk_get_req_ref(io)) return NULL; if (unlikely(!blk_mq_request_started(req) || req->tag != tag)) @@ -2301,7 +2426,7 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, return req; fail_put: - ublk_put_req_ref(ubq, req); + ublk_put_req_ref(io, req); return NULL; } @@ -2368,7 +2493,8 @@ static inline bool ublk_check_ubuf_dir(const struct request *req, } static struct request *ublk_check_and_get_req(struct kiocb *iocb, - struct iov_iter *iter, size_t *off, int dir) + struct iov_iter *iter, size_t *off, int dir, + struct ublk_io **io) { struct ublk_device *ub = iocb->ki_filp->private_data; struct ublk_queue *ubq; @@ -2402,7 +2528,8 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb, if (tag >= ubq->q_depth) return ERR_PTR(-EINVAL); - req = __ublk_check_and_get_req(ub, ubq, tag, buf_off); + *io = &ubq->ios[tag]; + req = __ublk_check_and_get_req(ub, ubq, *io, buf_off); if (!req) return ERR_PTR(-EINVAL); @@ -2415,42 +2542,40 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb, *off = buf_off; return req; fail: - ublk_put_req_ref(ubq, req); + ublk_put_req_ref(*io, req); return ERR_PTR(-EACCES); } static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to) { - struct ublk_queue *ubq; struct request *req; + struct ublk_io *io; size_t buf_off; size_t ret; - req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST); + req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST, &io); if (IS_ERR(req)) return PTR_ERR(req); ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST); - ubq = req->mq_hctx->driver_data; - ublk_put_req_ref(ubq, req); + ublk_put_req_ref(io, req); return ret; } static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from) { - struct ublk_queue *ubq; struct request *req; + struct ublk_io *io; size_t buf_off; size_t ret; - req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE); + req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE, &io); if (IS_ERR(req)) return PTR_ERR(req); ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE); - ubq = req->mq_hctx->driver_data; - ublk_put_req_ref(ubq, req); + ublk_put_req_ref(io, req); return ret; } @@ -2475,6 +2600,8 @@ static void ublk_deinit_queue(struct ublk_device *ub, int q_id) struct ublk_io *io = &ubq->ios[i]; if (io->task) put_task_struct(io->task); + WARN_ON_ONCE(refcount_read(&io->ref)); + WARN_ON_ONCE(io->task_registered_buffers); } if (ubq->io_cmd_buf) @@ -2513,7 +2640,7 @@ static void ublk_deinit_queues(struct ublk_device *ub) for (i = 0; i < nr_queues; i++) ublk_deinit_queue(ub, i); - kfree(ub->__queues); + kvfree(ub->__queues); } static int ublk_init_queues(struct ublk_device *ub) @@ -2524,7 +2651,7 @@ static int ublk_init_queues(struct ublk_device *ub) int i, ret = -ENOMEM; ub->queue_size = ubq_size; - ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL); + ub->__queues = kvcalloc(nr_queues, ubq_size, GFP_KERNEL); if (!ub->__queues) return ret; @@ -2580,6 +2707,7 @@ static void ublk_cdev_rel(struct device *dev) ublk_deinit_queues(ub); ublk_free_dev_number(ub); mutex_destroy(&ub->mutex); + mutex_destroy(&ub->cancel_mutex); kfree(ub); } @@ -2627,7 +2755,6 @@ static int ublk_add_tag_set(struct ublk_device *ub) ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues; ub->tag_set.queue_depth = ub->dev_info.queue_depth; ub->tag_set.numa_node = NUMA_NO_NODE; - ub->tag_set.cmd_size = sizeof(struct ublk_rq_data); ub->tag_set.driver_data = ub; return blk_mq_alloc_tag_set(&ub->tag_set); } @@ -2729,6 +2856,9 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, if (wait_for_completion_interruptible(&ub->completion) != 0) return -EINTR; + if (ub->ublksrv_tgid != ublksrv_pid) + return -EINVAL; + mutex_lock(&ub->mutex); if (ub->dev_info.state == UBLK_S_DEV_LIVE || test_bit(UB_STATE_USED, &ub->state)) { @@ -2933,6 +3063,7 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header) goto out_unlock; mutex_init(&ub->mutex); spin_lock_init(&ub->lock); + mutex_init(&ub->cancel_mutex); ret = ublk_alloc_dev_number(ub, header->dev_id); if (ret < 0) @@ -2953,7 +3084,8 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header) ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE | UBLK_F_URING_CMD_COMP_IN_TASK | - UBLK_F_PER_IO_DAEMON; + UBLK_F_PER_IO_DAEMON | + UBLK_F_BUF_REG_OFF_DAEMON; /* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */ if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY | @@ -3003,6 +3135,7 @@ out_free_dev_number: ublk_free_dev_number(ub); out_free_ub: mutex_destroy(&ub->mutex); + mutex_destroy(&ub->cancel_mutex); kfree(ub); out_unlock: mutex_unlock(&ublk_ctl_mutex); @@ -3227,6 +3360,9 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub, pr_devel("%s: All FETCH_REQs received, dev id %d\n", __func__, header->dev_id); + if (ub->ublksrv_tgid != ublksrv_pid) + return -EINVAL; + mutex_lock(&ub->mutex); if (ublk_nosrv_should_stop_dev(ub)) goto out_unlock; @@ -3340,7 +3476,7 @@ static int ublk_ctrl_quiesce_dev(struct ublk_device *ub, /* zero means wait forever */ u64 timeout_ms = header->data[0]; struct gendisk *disk; - int i, ret = -ENODEV; + int ret = -ENODEV; if (!(ub->dev_info.flags & UBLK_F_QUIESCE)) return -EOPNOTSUPP; @@ -3357,14 +3493,12 @@ static int ublk_ctrl_quiesce_dev(struct ublk_device *ub, if (ub->dev_info.state != UBLK_S_DEV_LIVE) goto put_disk; - /* Mark all queues as canceling */ + /* Mark the device as canceling */ + mutex_lock(&ub->cancel_mutex); blk_mq_quiesce_queue(disk->queue); - for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { - struct ublk_queue *ubq = ublk_get_queue(ub, i); - - ubq->canceling = true; - } + ublk_set_canceling(ub, true); blk_mq_unquiesce_queue(disk->queue); + mutex_unlock(&ub->cancel_mutex); if (!timeout_ms) timeout_ms = UINT_MAX; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 30bca8cb7106..e649fa67bac1 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -976,9 +976,8 @@ static int init_vq(struct virtio_blk *vblk) return -EINVAL; } - num_vqs = min_t(unsigned int, - min_not_zero(num_request_queues, nr_cpu_ids), - num_vqs); + num_vqs = blk_mq_num_possible_queues( + min_not_zero(num_request_queues, num_vqs)); num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1); diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index d26a58c67e95..b1bd1daa0060 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -8,6 +8,7 @@ #include <linux/sched.h> #include <linux/cpuhotplug.h> #include <linux/vmalloc.h> +#include <linux/sysfs.h> #include "zcomp.h" @@ -89,23 +90,21 @@ bool zcomp_available_algorithm(const char *comp) } /* show available compressors */ -ssize_t zcomp_available_show(const char *comp, char *buf) +ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at) { - ssize_t sz = 0; int i; for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) { if (!strcmp(comp, backends[i]->name)) { - sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, - "[%s] ", backends[i]->name); + at += sysfs_emit_at(buf, at, "[%s] ", + backends[i]->name); } else { - sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, - "%s ", backends[i]->name); + at += sysfs_emit_at(buf, at, "%s ", backends[i]->name); } } - sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n"); - return sz; + at += sysfs_emit_at(buf, at, "\n"); + return at; } struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index 4acffe671a5e..eacfd3f7d61d 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -79,7 +79,7 @@ struct zcomp { int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node); int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node); -ssize_t zcomp_available_show(const char *comp, char *buf); +ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at); bool zcomp_available_algorithm(const char *comp); struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 54c57103715f..8acad3cc6e6e 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -373,7 +373,7 @@ static ssize_t initstate_show(struct device *dev, val = init_done(zram); up_read(&zram->init_lock); - return scnprintf(buf, PAGE_SIZE, "%u\n", val); + return sysfs_emit(buf, "%u\n", val); } static ssize_t disksize_show(struct device *dev, @@ -381,7 +381,7 @@ static ssize_t disksize_show(struct device *dev, { struct zram *zram = dev_to_zram(dev); - return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); + return sysfs_emit(buf, "%llu\n", zram->disksize); } static ssize_t mem_limit_store(struct device *dev, @@ -532,7 +532,7 @@ static ssize_t writeback_limit_enable_show(struct device *dev, spin_unlock(&zram->wb_limit_lock); up_read(&zram->init_lock); - return scnprintf(buf, PAGE_SIZE, "%d\n", val); + return sysfs_emit(buf, "%d\n", val); } static ssize_t writeback_limit_store(struct device *dev, @@ -567,7 +567,7 @@ static ssize_t writeback_limit_show(struct device *dev, spin_unlock(&zram->wb_limit_lock); up_read(&zram->init_lock); - return scnprintf(buf, PAGE_SIZE, "%llu\n", val); + return sysfs_emit(buf, "%llu\n", val); } static void reset_bdev(struct zram *zram) @@ -1225,12 +1225,13 @@ static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg) zram->comp_algs[prio] = alg; } -static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf) +static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, + char *buf, ssize_t at) { ssize_t sz; down_read(&zram->init_lock); - sz = zcomp_available_show(zram->comp_algs[prio], buf); + sz = zcomp_available_show(zram->comp_algs[prio], buf, at); up_read(&zram->init_lock); return sz; @@ -1387,7 +1388,7 @@ static ssize_t comp_algorithm_show(struct device *dev, { struct zram *zram = dev_to_zram(dev); - return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf); + return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf, 0); } static ssize_t comp_algorithm_store(struct device *dev, @@ -1415,8 +1416,8 @@ static ssize_t recomp_algorithm_show(struct device *dev, if (!zram->comp_algs[prio]) continue; - sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "#%d: ", prio); - sz += __comp_algorithm_show(zram, prio, buf + sz); + sz += sysfs_emit_at(buf, sz, "#%d: ", prio); + sz += __comp_algorithm_show(zram, prio, buf, sz); } return sz; @@ -1488,7 +1489,7 @@ static ssize_t io_stat_show(struct device *dev, ssize_t ret; down_read(&zram->init_lock); - ret = scnprintf(buf, PAGE_SIZE, + ret = sysfs_emit(buf, "%8llu %8llu 0 %8llu\n", (u64)atomic64_read(&zram->stats.failed_reads), (u64)atomic64_read(&zram->stats.failed_writes), @@ -1518,7 +1519,7 @@ static ssize_t mm_stat_show(struct device *dev, orig_size = atomic64_read(&zram->stats.pages_stored); max_used = atomic_long_read(&zram->stats.max_used_pages); - ret = scnprintf(buf, PAGE_SIZE, + ret = sysfs_emit(buf, "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n", orig_size << PAGE_SHIFT, (u64)atomic64_read(&zram->stats.compr_data_size), @@ -1543,8 +1544,8 @@ static ssize_t bd_stat_show(struct device *dev, ssize_t ret; down_read(&zram->init_lock); - ret = scnprintf(buf, PAGE_SIZE, - "%8llu %8llu %8llu\n", + ret = sysfs_emit(buf, + "%8llu %8llu %8llu\n", FOUR_K((u64)atomic64_read(&zram->stats.bd_count)), FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)), FOUR_K((u64)atomic64_read(&zram->stats.bd_writes))); @@ -1562,7 +1563,7 @@ static ssize_t debug_stat_show(struct device *dev, ssize_t ret; down_read(&zram->init_lock); - ret = scnprintf(buf, PAGE_SIZE, + ret = sysfs_emit(buf, "version: %d\n0 %8llu\n", version, (u64)atomic64_read(&zram->stats.miss_free)); @@ -2810,7 +2811,7 @@ static ssize_t hot_add_show(const struct class *class, if (ret < 0) return ret; - return scnprintf(buf, PAGE_SIZE, "%d\n", ret); + return sysfs_emit(buf, "%d\n", ret); } /* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */ static struct class_attribute class_attr_hot_add = diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 7671bd158545..c1c0a4759c7e 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -943,6 +943,7 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, struct fsl_mc_obj_desc endpoint_desc = {{ 0 }}; struct dprc_endpoint endpoint1 = {{ 0 }}; struct dprc_endpoint endpoint2 = {{ 0 }}; + struct fsl_mc_bus *mc_bus; int state, err; mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); @@ -966,6 +967,8 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, strcpy(endpoint_desc.type, endpoint2.type); endpoint_desc.id = endpoint2.id; endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev); + if (endpoint) + return endpoint; /* * We know that the device has an endpoint because we verified by @@ -973,17 +976,13 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, * yet discovered by the fsl-mc bus, thus the lookup returned NULL. * Force a rescan of the devices in this container and retry the lookup. */ - if (!endpoint) { - struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); - - if (mutex_trylock(&mc_bus->scan_mutex)) { - err = dprc_scan_objects(mc_bus_dev, true); - mutex_unlock(&mc_bus->scan_mutex); - } - - if (err < 0) - return ERR_PTR(err); + mc_bus = to_fsl_mc_bus(mc_bus_dev); + if (mutex_trylock(&mc_bus->scan_mutex)) { + err = dprc_scan_objects(mc_bus_dev, true); + mutex_unlock(&mc_bus->scan_mutex); } + if (err < 0) + return ERR_PTR(err); endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev); /* diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 21a10552da61..31ba1f8c1f78 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -624,9 +624,6 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi) if (check_media_type == 1) cdi->options |= (int) CDO_CHECK_TYPE; - if (CDROM_CAN(CDC_MRW_W)) - cdi->exit = cdrom_mrw_exit; - if (cdi->ops->read_cdda_bpc) cdi->cdda_method = CDDA_BPC_FULL; else @@ -651,9 +648,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi) list_del(&cdi->list); mutex_unlock(&cdrom_mutex); - if (cdi->exit) - cdi->exit(cdi); - cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name); } EXPORT_SYMBOL(unregister_cdrom); @@ -1264,6 +1258,8 @@ void cdrom_release(struct cdrom_device_info *cdi) cd_dbg(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name); cdrom_dvd_rw_close_write(cdi); + if (CDROM_CAN(CDC_MRW_W)) + cdrom_mrw_exit(cdi); if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) { cd_dbg(CD_CLOSE, "Unlocking door!\n"); diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c index 4c0bbba64ee5..691813d2a5a2 100644 --- a/drivers/char/tpm/eventlog/common.c +++ b/drivers/char/tpm/eventlog/common.c @@ -32,7 +32,7 @@ static int tpm_bios_measurements_open(struct inode *inode, struct tpm_chip *chip; inode_lock(inode); - if (!inode->i_private) { + if (!inode->i_nlink) { inode_unlock(inode); return -ENODEV; } @@ -105,7 +105,7 @@ static int tpm_read_log(struct tpm_chip *chip) void tpm_bios_log_setup(struct tpm_chip *chip) { const char *name = dev_name(&chip->dev); - unsigned int cnt; + struct dentry *dentry; int log_version; int rc = 0; @@ -117,14 +117,12 @@ void tpm_bios_log_setup(struct tpm_chip *chip) return; log_version = rc; - cnt = 0; - chip->bios_dir[cnt] = securityfs_create_dir(name, NULL); + chip->bios_dir = securityfs_create_dir(name, NULL); /* NOTE: securityfs_create_dir can return ENODEV if securityfs is * compiled out. The caller should ignore the ENODEV return code. */ - if (IS_ERR(chip->bios_dir[cnt])) - goto err; - cnt++; + if (IS_ERR(chip->bios_dir)) + return; chip->bin_log_seqops.chip = chip; if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) @@ -135,14 +133,13 @@ void tpm_bios_log_setup(struct tpm_chip *chip) &tpm1_binary_b_measurements_seqops; - chip->bios_dir[cnt] = + dentry = securityfs_create_file("binary_bios_measurements", - 0440, chip->bios_dir[0], + 0440, chip->bios_dir, (void *)&chip->bin_log_seqops, &tpm_bios_measurements_ops); - if (IS_ERR(chip->bios_dir[cnt])) + if (IS_ERR(dentry)) goto err; - cnt++; if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { @@ -150,42 +147,23 @@ void tpm_bios_log_setup(struct tpm_chip *chip) chip->ascii_log_seqops.seqops = &tpm1_ascii_b_measurements_seqops; - chip->bios_dir[cnt] = + dentry = securityfs_create_file("ascii_bios_measurements", - 0440, chip->bios_dir[0], + 0440, chip->bios_dir, (void *)&chip->ascii_log_seqops, &tpm_bios_measurements_ops); - if (IS_ERR(chip->bios_dir[cnt])) + if (IS_ERR(dentry)) goto err; - cnt++; } return; err: - chip->bios_dir[cnt] = NULL; tpm_bios_log_teardown(chip); return; } void tpm_bios_log_teardown(struct tpm_chip *chip) { - int i; - struct inode *inode; - - /* securityfs_remove currently doesn't take care of handling sync - * between removal and opening of pseudo files. To handle this, a - * workaround is added by making i_private = NULL here during removal - * and to check it during open(), both within inode_lock()/unlock(). - * This design ensures that open() either safely gets kref or fails. - */ - for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) { - if (chip->bios_dir[i]) { - inode = d_inode(chip->bios_dir[i]); - inode_lock(inode); - inode->i_private = NULL; - inode_unlock(inode); - securityfs_remove(chip->bios_dir[i]); - } - } + securityfs_remove(chip->bios_dir); } diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c index 930fe43d5daf..92cec9722ee4 100644 --- a/drivers/char/tpm/eventlog/of.c +++ b/drivers/char/tpm/eventlog/of.c @@ -24,16 +24,10 @@ static int tpm_read_log_memory_region(struct tpm_chip *chip) { - struct device_node *node; struct resource res; int rc; - node = of_parse_phandle(chip->dev.parent->of_node, "memory-region", 0); - if (!node) - return -ENODEV; - - rc = of_address_to_resource(node, 0, &res); - of_node_put(node); + rc = of_reserved_mem_region_to_resource(chip->dev.parent->of_node, 0, &res); if (rc) return rc; diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index c0771980bc2f..2ed7815e4899 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c @@ -300,7 +300,7 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id) * send TPM commands through the I2C bus. */ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, - size_t len) + size_t bufsiz, size_t len) { struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); u32 status, i, size, ordinal; diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 8d7e4da6ed53..b71725827743 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -82,6 +82,13 @@ static bool tpm_chip_req_canceled(struct tpm_chip *chip, u8 status) return chip->ops->req_canceled(chip, status); } +static bool tpm_transmit_completed(u8 status, struct tpm_chip *chip) +{ + u8 status_masked = status & chip->ops->req_complete_mask; + + return status_masked == chip->ops->req_complete_val; +} + static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) { struct tpm_header *header = buf; @@ -106,7 +113,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) return -E2BIG; } - rc = chip->ops->send(chip, buf, count); + rc = chip->ops->send(chip, buf, bufsiz, count); if (rc < 0) { if (rc != -EPIPE) dev_err(&chip->dev, @@ -114,8 +121,19 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) return rc; } - /* A sanity check. send() should just return zero on success e.g. - * not the command length. + /* + * Synchronous devices return the response directly during the send() + * call in the same buffer. + */ + if (chip->flags & TPM_CHIP_FLAG_SYNC) { + len = rc; + rc = 0; + goto out_sync; + } + + /* + * A sanity check. send() of asynchronous devices should just return + * zero on success e.g. not the command length. */ if (rc > 0) { dev_warn(&chip->dev, @@ -129,8 +147,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal); do { u8 status = tpm_chip_status(chip); - if ((status & chip->ops->req_complete_mask) == - chip->ops->req_complete_val) + if (tpm_transmit_completed(status, chip)) goto out_recv; if (tpm_chip_req_canceled(chip, status)) { @@ -142,6 +159,13 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) rmb(); } while (time_before(jiffies, stop)); + /* + * Check for completion one more time, just in case the device reported + * it while the driver was sleeping in the busy loop above. + */ + if (tpm_transmit_completed(tpm_chip_status(chip), chip)) + goto out_recv; + tpm_chip_cancel(chip); dev_err(&chip->dev, "Operation Timed out\n"); return -ETIME; @@ -151,7 +175,10 @@ out_recv: if (len < 0) { rc = len; dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %d\n", rc); - } else if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length)) + return rc; + } +out_sync: + if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length)) rc = -EFAULT; return rc ? rc : len; diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c index 7b5049b3d476..bdb119453dfb 100644 --- a/drivers/char/tpm/tpm2-sessions.c +++ b/drivers/char/tpm/tpm2-sessions.c @@ -390,7 +390,7 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy, * on every operation, so we weld the hmac init and final functions in * here to give it the same usage characteristics as a regular hash */ -static void tpm2_hmac_init(struct sha256_state *sctx, u8 *key, u32 key_len) +static void tpm2_hmac_init(struct sha256_ctx *sctx, u8 *key, u32 key_len) { u8 pad[SHA256_BLOCK_SIZE]; int i; @@ -406,7 +406,7 @@ static void tpm2_hmac_init(struct sha256_state *sctx, u8 *key, u32 key_len) sha256_update(sctx, pad, sizeof(pad)); } -static void tpm2_hmac_final(struct sha256_state *sctx, u8 *key, u32 key_len, +static void tpm2_hmac_final(struct sha256_ctx *sctx, u8 *key, u32 key_len, u8 *out) { u8 pad[SHA256_BLOCK_SIZE]; @@ -440,7 +440,7 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u, const __be32 bits = cpu_to_be32(bytes * 8); while (bytes > 0) { - struct sha256_state sctx; + struct sha256_ctx sctx; __be32 c = cpu_to_be32(counter); tpm2_hmac_init(&sctx, key, key_len); @@ -467,7 +467,7 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u, static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v, u8 *out) { - struct sha256_state sctx; + struct sha256_ctx sctx; /* * this should be an iterative counter, but because we know * we're only taking 32 bytes for the point using a sha256 @@ -592,7 +592,7 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf) u8 *hmac = NULL; u32 attrs; u8 cphash[SHA256_DIGEST_SIZE]; - struct sha256_state sctx; + struct sha256_ctx sctx; if (!auth) return; @@ -750,7 +750,7 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, off_t offset_s, offset_p; u8 rphash[SHA256_DIGEST_SIZE]; u32 attrs, cc; - struct sha256_state sctx; + struct sha256_ctx sctx; u16 tag = be16_to_cpu(head->tag); int parm_len, len, i, handles; diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index 54a0360a3c95..f25faf468bba 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -148,7 +148,8 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) return size; } -static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) +static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t count) { struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); int i; diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 876edf2705ab..ed97344f2324 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -426,7 +426,7 @@ static int tpm_crb_smc_start(struct device *dev, unsigned long func_id) } #endif -static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int crb_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t len) { struct crb_priv *priv = dev_get_drvdata(&chip->dev); int rc = 0; diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c index 4ead61f01299..755b77b32ea4 100644 --- a/drivers/char/tpm/tpm_crb_ffa.c +++ b/drivers/char/tpm/tpm_crb_ffa.c @@ -10,8 +10,16 @@ #define pr_fmt(fmt) "CRB_FFA: " fmt #include <linux/arm_ffa.h> +#include <linux/delay.h> +#include <linux/moduleparam.h> #include "tpm_crb_ffa.h" +static unsigned int busy_timeout_ms = 2000; + +module_param(busy_timeout_ms, uint, 0644); +MODULE_PARM_DESC(busy_timeout_ms, + "Maximum time in ms to retry before giving up on busy"); + /* TPM service function status codes */ #define CRB_FFA_OK 0x05000001 #define CRB_FFA_OK_RESULTS_RETURNED 0x05000002 @@ -115,6 +123,7 @@ struct tpm_crb_ffa { }; static struct tpm_crb_ffa *tpm_crb_ffa; +static struct ffa_driver tpm_crb_ffa_driver; static int tpm_crb_ffa_to_linux_errno(int errno) { @@ -168,50 +177,51 @@ static int tpm_crb_ffa_to_linux_errno(int errno) */ int tpm_crb_ffa_init(void) { + int ret = 0; + + if (!IS_MODULE(CONFIG_TCG_ARM_CRB_FFA)) { + ret = ffa_register(&tpm_crb_ffa_driver); + if (ret) { + tpm_crb_ffa = ERR_PTR(-ENODEV); + return ret; + } + } + if (!tpm_crb_ffa) - return -ENOENT; + ret = -ENOENT; if (IS_ERR_VALUE(tpm_crb_ffa)) - return -ENODEV; + ret = -ENODEV; - return 0; + return ret; } EXPORT_SYMBOL_GPL(tpm_crb_ffa_init); -static int __tpm_crb_ffa_send_recieve(unsigned long func_id, - unsigned long a0, - unsigned long a1, - unsigned long a2) +static int __tpm_crb_ffa_try_send_receive(unsigned long func_id, + unsigned long a0, unsigned long a1, + unsigned long a2) { const struct ffa_msg_ops *msg_ops; int ret; - if (!tpm_crb_ffa) - return -ENOENT; - msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops; if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) { - memset(&tpm_crb_ffa->direct_msg_data2, 0x00, - sizeof(struct ffa_send_direct_data2)); - - tpm_crb_ffa->direct_msg_data2.data[0] = func_id; - tpm_crb_ffa->direct_msg_data2.data[1] = a0; - tpm_crb_ffa->direct_msg_data2.data[2] = a1; - tpm_crb_ffa->direct_msg_data2.data[3] = a2; + tpm_crb_ffa->direct_msg_data2 = (struct ffa_send_direct_data2){ + .data = { func_id, a0, a1, a2 }, + }; ret = msg_ops->sync_send_receive2(tpm_crb_ffa->ffa_dev, &tpm_crb_ffa->direct_msg_data2); if (!ret) ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data2.data[0]); } else { - memset(&tpm_crb_ffa->direct_msg_data, 0x00, - sizeof(struct ffa_send_direct_data)); - - tpm_crb_ffa->direct_msg_data.data1 = func_id; - tpm_crb_ffa->direct_msg_data.data2 = a0; - tpm_crb_ffa->direct_msg_data.data3 = a1; - tpm_crb_ffa->direct_msg_data.data4 = a2; + tpm_crb_ffa->direct_msg_data = (struct ffa_send_direct_data){ + .data1 = func_id, + .data2 = a0, + .data3 = a1, + .data4 = a2, + }; ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev, &tpm_crb_ffa->direct_msg_data); @@ -219,6 +229,33 @@ static int __tpm_crb_ffa_send_recieve(unsigned long func_id, ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1); } + return ret; +} + +static int __tpm_crb_ffa_send_receive(unsigned long func_id, unsigned long a0, + unsigned long a1, unsigned long a2) +{ + ktime_t start, stop; + int ret; + + if (!tpm_crb_ffa) + return -ENOENT; + + start = ktime_get(); + stop = ktime_add(start, ms_to_ktime(busy_timeout_ms)); + + for (;;) { + ret = __tpm_crb_ffa_try_send_receive(func_id, a0, a1, a2); + if (ret != -EBUSY) + break; + + usleep_range(50, 100); + if (ktime_after(ktime_get(), stop)) { + dev_warn(&tpm_crb_ffa->ffa_dev->dev, + "Busy retry timed out\n"); + break; + } + } return ret; } @@ -236,7 +273,7 @@ static int __tpm_crb_ffa_send_recieve(unsigned long func_id, * * Return: 0 on success, negative error code on failure. */ -int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) +static int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) { int rc; @@ -251,7 +288,7 @@ int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) guard(mutex)(&tpm_crb_ffa->msg_data_lock); - rc = __tpm_crb_ffa_send_recieve(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00); + rc = __tpm_crb_ffa_send_receive(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00); if (!rc) { if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) { *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]); @@ -264,7 +301,6 @@ int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) return rc; } -EXPORT_SYMBOL_GPL(tpm_crb_ffa_get_interface_version); /** * tpm_crb_ffa_start() - signals the TPM that a field has changed in the CRB @@ -289,7 +325,7 @@ int tpm_crb_ffa_start(int request_type, int locality) guard(mutex)(&tpm_crb_ffa->msg_data_lock); - return __tpm_crb_ffa_send_recieve(CRB_FFA_START, request_type, locality, 0x00); + return __tpm_crb_ffa_send_receive(CRB_FFA_START, request_type, locality, 0x00); } EXPORT_SYMBOL_GPL(tpm_crb_ffa_start); @@ -369,7 +405,9 @@ static struct ffa_driver tpm_crb_ffa_driver = { .id_table = tpm_crb_ffa_device_id, }; +#ifdef MODULE module_ffa_driver(tpm_crb_ffa_driver); +#endif MODULE_AUTHOR("Arm"); MODULE_DESCRIPTION("TPM CRB FFA driver"); diff --git a/drivers/char/tpm/tpm_crb_ffa.h b/drivers/char/tpm/tpm_crb_ffa.h index 645c41ede10e..d7e1344ea003 100644 --- a/drivers/char/tpm/tpm_crb_ffa.h +++ b/drivers/char/tpm/tpm_crb_ffa.h @@ -11,11 +11,9 @@ #if IS_REACHABLE(CONFIG_TCG_ARM_CRB_FFA) int tpm_crb_ffa_init(void); -int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor); int tpm_crb_ffa_start(int request_type, int locality); #else static inline int tpm_crb_ffa_init(void) { return 0; } -static inline int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) { return 0; } static inline int tpm_crb_ffa_start(int request_type, int locality) { return 0; } #endif diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 53ba28ccd5d3..4e63c30aeaf1 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -31,45 +31,19 @@ static const uuid_t ftpm_ta_uuid = 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96); /** - * ftpm_tee_tpm_op_recv() - retrieve fTPM response. - * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h. - * @buf: the buffer to store data. - * @count: the number of bytes to read. - * - * Return: - * In case of success the number of bytes received. - * On failure, -errno. - */ -static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) -{ - struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent); - size_t len; - - len = pvt_data->resp_len; - if (count < len) { - dev_err(&chip->dev, - "%s: Invalid size in recv: count=%zd, resp_len=%zd\n", - __func__, count, len); - return -EIO; - } - - memcpy(buf, pvt_data->resp_buf, len); - pvt_data->resp_len = 0; - - return len; -} - -/** - * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory. + * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory + * and retrieve the response. * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h - * @buf: the buffer to send. - * @len: the number of bytes to send. + * @buf: the buffer to send and to store the response. + * @bufsiz: the size of the buffer. + * @cmd_len: the number of bytes to send. * * Return: - * In case of success, returns 0. + * In case of success, returns the number of bytes received. * On failure, -errno */ -static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t cmd_len) { struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent); size_t resp_len; @@ -80,16 +54,15 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len) struct tee_param command_params[4]; struct tee_shm *shm = pvt_data->shm; - if (len > MAX_COMMAND_SIZE) { + if (cmd_len > MAX_COMMAND_SIZE) { dev_err(&chip->dev, "%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n", - __func__, len); + __func__, cmd_len); return -EIO; } memset(&transceive_args, 0, sizeof(transceive_args)); memset(command_params, 0, sizeof(command_params)); - pvt_data->resp_len = 0; /* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */ transceive_args = (struct tee_ioctl_invoke_arg) { @@ -103,7 +76,7 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len) .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT, .u.memref = { .shm = shm, - .size = len, + .size = cmd_len, .shm_offs = 0, }, }; @@ -115,7 +88,7 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len) return PTR_ERR(temp_buf); } memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE)); - memcpy(temp_buf, buf, len); + memcpy(temp_buf, buf, cmd_len); command_params[1] = (struct tee_param) { .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT, @@ -156,17 +129,20 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len) __func__, resp_len); return -EIO; } + if (resp_len > bufsiz) { + dev_err(&chip->dev, + "%s: resp_len=%zd exceeds bufsiz=%zd\n", + __func__, resp_len, bufsiz); + return -EIO; + } - /* sanity checks look good, cache the response */ - memcpy(pvt_data->resp_buf, temp_buf, resp_len); - pvt_data->resp_len = resp_len; + memcpy(buf, temp_buf, resp_len); - return 0; + return resp_len; } static const struct tpm_class_ops ftpm_tee_tpm_ops = { .flags = TPM_OPS_AUTO_STARTUP, - .recv = ftpm_tee_tpm_op_recv, .send = ftpm_tee_tpm_op_send, }; @@ -251,7 +227,7 @@ static int ftpm_tee_probe(struct device *dev) } pvt_data->chip = chip; - pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2; + pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_SYNC; /* Create a character device for the fTPM */ rc = tpm_chip_register(pvt_data->chip); diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h index e39903b7ea07..8d5c3f0d2879 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.h +++ b/drivers/char/tpm/tpm_ftpm_tee.h @@ -22,16 +22,12 @@ * struct ftpm_tee_private - fTPM's private data * @chip: struct tpm_chip instance registered with tpm framework. * @session: fTPM TA session identifier. - * @resp_len: cached response buffer length. - * @resp_buf: cached response buffer. * @ctx: TEE context handler. * @shm: Memory pool shared with fTPM TA in TEE. */ struct ftpm_tee_private { struct tpm_chip *chip; u32 session; - size_t resp_len; - u8 resp_buf[MAX_RESPONSE_SIZE]; struct tee_context *ctx; struct tee_shm *shm; }; diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c index d1d27fdfe523..4f229656a8e2 100644 --- a/drivers/char/tpm/tpm_i2c_atmel.c +++ b/drivers/char/tpm/tpm_i2c_atmel.c @@ -37,7 +37,8 @@ struct priv_data { u8 buffer[sizeof(struct tpm_header) + 25]; }; -static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t len) { struct priv_data *priv = dev_get_drvdata(&chip->dev); struct i2c_client *client = to_i2c_client(chip->dev.parent); diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index 81d8a78dc655..bdf1f329a679 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -514,7 +514,8 @@ out: return size; } -static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t len) { int rc, status; ssize_t burstcnt; diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index 3c3ee5f551db..d44903b29929 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -350,7 +350,8 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ -static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t len) { struct priv_data *priv = dev_get_drvdata(&chip->dev); struct device *dev = chip->dev.parent; diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 76d048f63d55..4734a69406ce 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -191,13 +191,15 @@ static int tpm_ibmvtpm_resume(struct device *dev) * tpm_ibmvtpm_send() - Send a TPM command * @chip: tpm chip struct * @buf: buffer contains data to send - * @count: size of buffer + * @bufsiz: size of the buffer + * @count: length of the command * * Return: * 0 on success, * -errno on error */ -static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) +static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t count) { struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); bool retry = true; diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index 2d2ae37153ba..7638b65b851b 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -312,7 +312,8 @@ recv_begin: return -EIO; } -static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) +static int tpm_inf_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t count) { int i; int ret; diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 0f62bbc940da..879ac88f5783 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c @@ -178,7 +178,8 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) return size; } -static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) +static int tpm_nsc_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t count) { struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); u8 data; diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index bc7b1b4501b3..d53fce1c9d6f 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c @@ -52,7 +52,7 @@ static ssize_t tpm_show_ppi_version(struct device *dev, { struct tpm_chip *chip = to_tpm_chip(dev); - return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version); + return sysfs_emit(buf, "%s\n", chip->ppi_version); } static ssize_t tpm_show_ppi_request(struct device *dev, @@ -87,12 +87,10 @@ static ssize_t tpm_show_ppi_request(struct device *dev, else { req = obj->package.elements[1].integer.value; if (tpm_ppi_req_has_parameter(req)) - size = scnprintf(buf, PAGE_SIZE, - "%llu %llu\n", req, - obj->package.elements[2].integer.value); + size = sysfs_emit(buf, "%llu %llu\n", req, + obj->package.elements[2].integer.value); else - size = scnprintf(buf, PAGE_SIZE, - "%llu\n", req); + size = sysfs_emit(buf, "%llu\n", req); } } else if (obj->package.count == 2 && obj->package.elements[0].type == ACPI_TYPE_INTEGER && @@ -100,8 +98,8 @@ static ssize_t tpm_show_ppi_request(struct device *dev, if (obj->package.elements[0].integer.value) size = -EFAULT; else - size = scnprintf(buf, PAGE_SIZE, "%llu\n", - obj->package.elements[1].integer.value); + size = sysfs_emit(buf, "%llu\n", + obj->package.elements[1].integer.value); } ACPI_FREE(obj); @@ -211,10 +209,10 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev, } if (ret < ARRAY_SIZE(info) - 1) - status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]); + status = sysfs_emit(buf, "%d: %s\n", ret, info[ret]); else - status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, - info[ARRAY_SIZE(info)-1]); + status = sysfs_emit(buf, "%d: %s\n", ret, + info[ARRAY_SIZE(info) - 1]); return status; } @@ -255,23 +253,23 @@ static ssize_t tpm_show_ppi_response(struct device *dev, res = ret_obj[2].integer.value; if (req) { if (res == 0) - status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req, - "0: Success"); + status = sysfs_emit(buf, "%llu %s\n", req, + "0: Success"); else if (res == 0xFFFFFFF0) - status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req, - "0xFFFFFFF0: User Abort"); + status = sysfs_emit(buf, "%llu %s\n", req, + "0xFFFFFFF0: User Abort"); else if (res == 0xFFFFFFF1) - status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req, - "0xFFFFFFF1: BIOS Failure"); + status = sysfs_emit(buf, "%llu %s\n", req, + "0xFFFFFFF1: BIOS Failure"); else if (res >= 1 && res <= 0x00000FFF) - status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n", - req, res, "Corresponding TPM error"); + status = sysfs_emit(buf, "%llu %llu: %s\n", + req, res, "Corresponding TPM error"); else - status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n", - req, res, "Error"); + status = sysfs_emit(buf, "%llu %llu: %s\n", + req, res, "Error"); } else { - status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n", - req, "No Recent Request"); + status = sysfs_emit(buf, "%llu: %s\n", + req, "No Recent Request"); } cleanup: @@ -284,7 +282,7 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start, { int i; u32 ret; - char *str = buf; + int len = 0; union acpi_object *obj, tmp; union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp); @@ -314,11 +312,11 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start, } if (ret > 0 && ret < ARRAY_SIZE(info)) - str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n", - i, ret, info[ret]); + len += sysfs_emit_at(buf, len, "%d %d: %s\n", + i, ret, info[ret]); } - return str - buf; + return len; } static ssize_t tpm_show_ppi_tcg_operations(struct device *dev, diff --git a/drivers/char/tpm/tpm_svsm.c b/drivers/char/tpm/tpm_svsm.c index 4280edf427d6..f5ba0f64850b 100644 --- a/drivers/char/tpm/tpm_svsm.c +++ b/drivers/char/tpm/tpm_svsm.c @@ -25,37 +25,32 @@ struct tpm_svsm_priv { void *buffer; }; -static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t cmd_len) { struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev); int ret; - ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, len); + ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, cmd_len); if (ret) return ret; /* * The SVSM call uses the same buffer for the command and for the - * response, so after this call, the buffer will contain the response - * that can be used by .recv() op. + * response, so after this call, the buffer will contain the response. + * + * Note: we have to use an internal buffer because the device in SVSM + * expects the svsm_vtpm header + data to be physically contiguous. */ - return snp_svsm_vtpm_send_command(priv->buffer); -} - -static int tpm_svsm_recv(struct tpm_chip *chip, u8 *buf, size_t len) -{ - struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev); + ret = snp_svsm_vtpm_send_command(priv->buffer); + if (ret) + return ret; - /* - * The internal buffer contains the response after we send the command - * to SVSM. - */ - return svsm_vtpm_cmd_response_parse(priv->buffer, buf, len); + return svsm_vtpm_cmd_response_parse(priv->buffer, buf, bufsiz); } static struct tpm_class_ops tpm_chip_ops = { .flags = TPM_OPS_AUTO_STARTUP, - .recv = tpm_svsm_recv, .send = tpm_svsm_send, }; @@ -84,6 +79,7 @@ static int __init tpm_svsm_probe(struct platform_device *pdev) dev_set_drvdata(&chip->dev, priv); + chip->flags |= TPM_CHIP_FLAG_SYNC; err = tpm2_probe(chip); if (err) return err; diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index ed0d3d8449b3..4b12c4b9da8b 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -580,7 +580,8 @@ out_err: return rc; } -static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t len) { int rc, irq; struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c index 3b55a7b05c46..fc6891a0b693 100644 --- a/drivers/char/tpm/tpm_tis_i2c_cr50.c +++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c @@ -546,13 +546,15 @@ out_err: * tpm_cr50_i2c_tis_send() - TPM transmission callback. * @chip: A TPM chip. * @buf: Buffer to send. - * @len: Buffer length. + * @bufsiz: Buffer size. + * @len: Command length. * * Return: * - 0: Success. * - -errno: A POSIX error code. */ -static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t len) { size_t burstcnt, limit, sent = 0; u8 tpm_go[4] = { TPM_STS_GO }; diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c index 8fe4a01eea12..0818bb517805 100644 --- a/drivers/char/tpm/tpm_vtpm_proxy.c +++ b/drivers/char/tpm/tpm_vtpm_proxy.c @@ -321,12 +321,14 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip, * * @chip: tpm chip to use * @buf: send buffer + * @bufsiz: size of the buffer * @count: bytes to send * * Return: * 0 in case of success, negative error value otherwise. */ -static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) +static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t count) { struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 80cca3b83b22..556bf2256716 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -131,7 +131,8 @@ static size_t shr_data_offset(struct vtpm_shared_page *shr) return struct_size(shr, extra_pages, shr->nr_extra_pages); } -static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) +static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t count) { struct tpm_private *priv = dev_get_drvdata(&chip->dev); struct vtpm_shared_page *shr = priv->shr; diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c index 3ba01622d8f0..90dd1f1855c2 100644 --- a/drivers/clk/qcom/apcs-sdx55.c +++ b/drivers/clk/qcom/apcs-sdx55.c @@ -111,7 +111,7 @@ static int qcom_apcs_sdx55_clk_probe(struct platform_device *pdev) * driver, there seems to be no better place to do this. So do it here! */ cpu_dev = get_cpu_device(0); - ret = dev_pm_domain_attach(cpu_dev, true); + ret = dev_pm_domain_attach(cpu_dev, PD_FLAG_ATTACH_POWER_ON); if (ret) { dev_err_probe(dev, ret, "can't get PM domain: %d\n", ret); goto err; diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c index 9efb9fd24b42..1a9a1cb869e2 100644 --- a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c +++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c @@ -385,7 +385,8 @@ static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(mbus_clk, "mbus", mbus_parents, 0, 0, /* no P */ 24, 3, /* mux */ BIT(31), /* gate */ - 0, CCU_FEATURE_UPDATE_BIT); + CLK_IS_CRITICAL, + CCU_FEATURE_UPDATE_BIT); static const struct clk_hw *mbus_hws[] = { &mbus_clk.common.hw }; diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index 579a81bb46df..52e4369664c5 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -350,7 +350,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, 0x104, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT); -static const char * const tcon_parents[] = { "pll-video" }; +static const char * const tcon_parents[] = { "pll-video", "pll-periph0" }; static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents, 0x118, 0, 4, 24, 3, BIT(31), 0); @@ -362,11 +362,11 @@ static const char * const csi_mclk_parents[] = { "osc24M", "pll-video", static SUNXI_CCU_M_WITH_MUX_GATE(csi0_mclk_clk, "csi0-mclk", csi_mclk_parents, 0x130, 0, 5, 8, 3, BIT(15), 0); -static const char * const csi1_sclk_parents[] = { "pll-video", "pll-isp" }; -static SUNXI_CCU_M_WITH_MUX_GATE(csi1_sclk_clk, "csi-sclk", csi1_sclk_parents, +static const char * const csi_sclk_parents[] = { "pll-video", "pll-isp" }; +static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents, 0x134, 16, 4, 24, 3, BIT(31), 0); -static SUNXI_CCU_M_WITH_MUX_GATE(csi1_mclk_clk, "csi-mclk", csi_mclk_parents, +static SUNXI_CCU_M_WITH_MUX_GATE(csi1_mclk_clk, "csi1-mclk", csi_mclk_parents, 0x134, 0, 5, 8, 3, BIT(15), 0); static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", @@ -452,7 +452,7 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = { &tcon_clk.common, &csi_misc_clk.common, &csi0_mclk_clk.common, - &csi1_sclk_clk.common, + &csi_sclk_clk.common, &csi1_mclk_clk.common, &ve_clk.common, &ac_dig_clk.common, @@ -551,7 +551,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { [CLK_TCON0] = &tcon_clk.common.hw, [CLK_CSI_MISC] = &csi_misc_clk.common.hw, [CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw, - [CLK_CSI1_SCLK] = &csi1_sclk_clk.common.hw, + [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw, [CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw, [CLK_VE] = &ve_clk.common.hw, [CLK_AC_DIG] = &ac_dig_clk.common.hw, @@ -633,7 +633,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = { [CLK_TCON0] = &tcon_clk.common.hw, [CLK_CSI_MISC] = &csi_misc_clk.common.hw, [CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw, - [CLK_CSI1_SCLK] = &csi1_sclk_clk.common.hw, + [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw, [CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw, [CLK_VE] = &ve_clk.common.hw, [CLK_AC_DIG] = &ac_dig_clk.common.hw, diff --git a/drivers/clocksource/timer-orion.c b/drivers/clocksource/timer-orion.c index 49e86cb70a7a..61f1e27fc41e 100644 --- a/drivers/clocksource/timer-orion.c +++ b/drivers/clocksource/timer-orion.c @@ -43,7 +43,7 @@ static struct delay_timer orion_delay_timer = { .read_current_timer = orion_read_timer, }; -static void orion_delay_timer_init(unsigned long rate) +static void __init orion_delay_timer_init(unsigned long rate) { orion_delay_timer.freq = rate; register_current_timer_delay(&orion_delay_timer); diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 0d46402e3094..9be0503df55a 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -28,7 +28,6 @@ config ARM_APPLE_SOC_CPUFREQ tristate "Apple Silicon SoC CPUFreq support" depends on ARCH_APPLE || (COMPILE_TEST && 64BIT) select PM_OPP - default ARCH_APPLE help This adds the CPUFreq driver for Apple Silicon machines (e.g. Apple M1). @@ -238,7 +237,7 @@ config ARM_TEGRA20_CPUFREQ This adds the CPUFreq driver support for Tegra20/30 SOCs. config ARM_TEGRA124_CPUFREQ - bool "Tegra124 CPUFreq support" + tristate "Tegra124 CPUFreq support" depends on ARCH_TEGRA || COMPILE_TEST depends on CPUFREQ_DT default ARCH_TEGRA diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c index 5a3545bd0d8d..d96c1718f7f8 100644 --- a/drivers/cpufreq/armada-8k-cpufreq.c +++ b/drivers/cpufreq/armada-8k-cpufreq.c @@ -103,7 +103,7 @@ static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables) { int opps_index, nb_cpus = num_possible_cpus(); - for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) { + for (opps_index = 0 ; opps_index < nb_cpus; opps_index++) { int i; /* If cpu_dev is NULL then we reached the end of the array */ @@ -132,7 +132,7 @@ static int __init armada_8k_cpufreq_init(void) int ret = 0, opps_index = 0, cpu, nb_cpus; struct freq_table *freq_tables; struct device_node *node; - static struct cpumask cpus; + static struct cpumask cpus, shared_cpus; node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match, NULL); @@ -154,7 +154,6 @@ static int __init armada_8k_cpufreq_init(void) * divisions of it). */ for_each_cpu(cpu, &cpus) { - struct cpumask shared_cpus; struct device *cpu_dev; struct clk *clk; diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 7b841a086acc..5940d262374f 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -765,7 +765,7 @@ static void brcm_avs_cpufreq_remove(struct platform_device *pdev) } static const struct of_device_id brcm_avs_cpufreq_match[] = { - { .compatible = BRCM_AVS_CPU_DATA }, + { .compatible = "brcm,avs-cpu-data-mem" }, { } }; MODULE_DEVICE_TABLE(of, brcm_avs_cpufreq_match); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index b7c688a5659c..4a17162a392d 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -26,14 +26,6 @@ #include <acpi/cppc_acpi.h> -/* - * This list contains information parsed from per CPU ACPI _CPC and _PSD - * structures: e.g. the highest and lowest supported performance, capabilities, - * desired performance, level requested etc. Depending on the share_type, not - * all CPUs will have an entry in the list. - */ -static LIST_HEAD(cpu_data_list); - static struct cpufreq_driver cppc_cpufreq_driver; #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE @@ -352,7 +344,6 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) #if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL) static DEFINE_PER_CPU(unsigned int, efficiency_class); -static void cppc_cpufreq_register_em(struct cpufreq_policy *policy); /* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */ #define CPPC_EM_CAP_STEP (20) @@ -488,7 +479,19 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz, return 0; } -static int populate_efficiency_class(void) +static void cppc_cpufreq_register_em(struct cpufreq_policy *policy) +{ + struct cppc_cpudata *cpu_data; + struct em_data_callback em_cb = + EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost); + + cpu_data = policy->driver_data; + em_dev_register_perf_domain(get_cpu_device(policy->cpu), + get_perf_level_count(policy), &em_cb, + cpu_data->shared_cpu_map, 0); +} + +static void populate_efficiency_class(void) { struct acpi_madt_generic_interrupt *gicc; DECLARE_BITMAP(used_classes, 256) = {}; @@ -503,7 +506,7 @@ static int populate_efficiency_class(void) if (bitmap_weight(used_classes, 256) <= 1) { pr_debug("Efficiency classes are all equal (=%d). " "No EM registered", class); - return -EINVAL; + return; } /* @@ -520,26 +523,11 @@ static int populate_efficiency_class(void) index++; } cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em; - - return 0; -} - -static void cppc_cpufreq_register_em(struct cpufreq_policy *policy) -{ - struct cppc_cpudata *cpu_data; - struct em_data_callback em_cb = - EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost); - - cpu_data = policy->driver_data; - em_dev_register_perf_domain(get_cpu_device(policy->cpu), - get_perf_level_count(policy), &em_cb, - cpu_data->shared_cpu_map, 0); } #else -static int populate_efficiency_class(void) +static void populate_efficiency_class(void) { - return 0; } #endif @@ -567,8 +555,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu) goto free_mask; } - list_add(&cpu_data->node, &cpu_data_list); - return cpu_data; free_mask: @@ -583,7 +569,6 @@ static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy) { struct cppc_cpudata *cpu_data = policy->driver_data; - list_del(&cpu_data->node); free_cpumask_var(cpu_data->shared_cpu_map); kfree(cpu_data); policy->driver_data = NULL; @@ -925,7 +910,7 @@ static struct freq_attr *cppc_cpufreq_attr[] = { }; static struct cpufreq_driver cppc_cpufreq_driver = { - .flags = CPUFREQ_CONST_LOOPS, + .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, .verify = cppc_verify_policy, .target = cppc_cpufreq_set_target, .get = cppc_cpufreq_get_rate, @@ -954,24 +939,10 @@ static int __init cppc_cpufreq_init(void) return ret; } -static inline void free_cpu_data(void) -{ - struct cppc_cpudata *iter, *tmp; - - list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) { - free_cpumask_var(iter->shared_cpu_map); - list_del(&iter->node); - kfree(iter); - } - -} - static void __exit cppc_cpufreq_exit(void) { cpufreq_unregister_driver(&cppc_cpufreq_driver); cppc_freq_invariance_exit(); - - free_cpu_data(); } module_exit(cppc_cpufreq_exit); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index a010da0f6337..015dd393eaba 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -143,6 +143,7 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "nvidia,tegra20", }, { .compatible = "nvidia,tegra30", }, + { .compatible = "nvidia,tegra114", }, { .compatible = "nvidia,tegra124", }, { .compatible = "nvidia,tegra210", }, { .compatible = "nvidia,tegra234", }, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index e80dd982a3e2..506437489b4d 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -329,6 +329,17 @@ static struct platform_driver dt_cpufreq_platdrv = { }; module_platform_driver(dt_cpufreq_platdrv); +struct platform_device *cpufreq_dt_pdev_register(struct device *dev) +{ + struct platform_device_info cpufreq_dt_devinfo = {}; + + cpufreq_dt_devinfo.name = "cpufreq-dt"; + cpufreq_dt_devinfo.parent = dev; + + return platform_device_register_full(&cpufreq_dt_devinfo); +} +EXPORT_SYMBOL_GPL(cpufreq_dt_pdev_register); + MODULE_ALIAS("platform:cpufreq-dt"); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); diff --git a/drivers/cpufreq/cpufreq-dt.h b/drivers/cpufreq/cpufreq-dt.h index 28c8af7ec5ef..fc1889aeb4f1 100644 --- a/drivers/cpufreq/cpufreq-dt.h +++ b/drivers/cpufreq/cpufreq-dt.h @@ -22,4 +22,6 @@ struct cpufreq_dt_platform_data { int (*resume)(struct cpufreq_policy *policy); }; +struct platform_device *cpufreq_dt_pdev_register(struct device *dev); + #endif /* __CPUFREQ_DT_H__ */ diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index d7426e1d8bdd..fc7eace8b65b 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -109,6 +109,8 @@ void disable_cpufreq(void) { off = 1; } +EXPORT_SYMBOL_GPL(disable_cpufreq); + static DEFINE_MUTEX(cpufreq_governor_mutex); bool have_governor_per_policy(void) @@ -967,6 +969,7 @@ static struct attribute *cpufreq_attrs[] = { &cpuinfo_min_freq.attr, &cpuinfo_max_freq.attr, &cpuinfo_transition_latency.attr, + &scaling_cur_freq.attr, &scaling_min_freq.attr, &scaling_max_freq.attr, &affected_cpus.attr, @@ -1095,10 +1098,6 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) return ret; } - ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); - if (ret) - return ret; - if (cpufreq_driver->bios_limit) { ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); if (ret) @@ -1284,6 +1283,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) goto err_free_real_cpus; } + init_rwsem(&policy->rwsem); + freq_constraints_init(&policy->constraints); policy->nb_min.notifier_call = cpufreq_notifier_min; @@ -1306,7 +1307,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) } INIT_LIST_HEAD(&policy->policy_list); - init_rwsem(&policy->rwsem); spin_lock_init(&policy->transition_lock); init_waitqueue_head(&policy->transition_wait); INIT_WORK(&policy->update, handle_update); @@ -1694,14 +1694,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy) return; } - if (has_target()) + if (has_target()) { strscpy(policy->last_governor, policy->governor->name, CPUFREQ_NAME_LEN); - else - policy->last_policy = policy->policy; - - if (has_target()) cpufreq_exit_governor(policy); + } else { + policy->last_policy = policy->policy; + } /* * Perform the ->offline() during light-weight tear-down, as @@ -1803,6 +1802,9 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b { unsigned int new_freq; + if (!cpufreq_driver->get) + return 0; + new_freq = cpufreq_driver->get(policy->cpu); if (!new_freq) return 0; @@ -1925,10 +1927,7 @@ unsigned int cpufreq_get(unsigned int cpu) guard(cpufreq_policy_read)(policy); - if (cpufreq_driver->get) - return __cpufreq_get(policy); - - return 0; + return __cpufreq_get(policy); } EXPORT_SYMBOL(cpufreq_get); @@ -2482,8 +2481,7 @@ int cpufreq_start_governor(struct cpufreq_policy *policy) pr_debug("%s: for CPU %u\n", __func__, policy->cpu); - if (cpufreq_driver->get) - cpufreq_verify_current_freq(policy, false); + cpufreq_verify_current_freq(policy, false); if (policy->governor->start) { ret = policy->governor->start(policy); @@ -2715,10 +2713,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, pr_debug("starting governor %s failed\n", policy->governor->name); if (old_gov) { policy->governor = old_gov; - if (cpufreq_init_governor(policy)) + if (cpufreq_init_governor(policy)) { policy->governor = NULL; - else - cpufreq_start_governor(policy); + } else if (cpufreq_start_governor(policy)) { + cpufreq_exit_governor(policy); + policy->governor = NULL; + } } return ret; @@ -2944,15 +2944,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) cpufreq_driver = driver_data; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - /* - * Mark support for the scheduler's frequency invariance engine for - * drivers that implement target(), target_index() or fast_switch(). - */ - if (!cpufreq_driver->setpolicy) { - static_branch_enable_cpuslocked(&cpufreq_freq_invariance); - pr_debug("supports frequency invariance"); - } - if (driver_data->setpolicy) driver_data->flags |= CPUFREQ_CONST_LOOPS; @@ -2983,6 +2974,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) hp_online = ret; ret = 0; + /* + * Mark support for the scheduler's frequency invariance engine for + * drivers that implement target(), target_index() or fast_switch(). + */ + if (!cpufreq_driver->setpolicy) { + static_branch_enable_cpuslocked(&cpufreq_freq_invariance); + pr_debug("supports frequency invariance"); + } + pr_debug("driver %s up and running\n", driver_data->name); goto out; diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 2c42fee76daa..77d62152cd38 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -134,6 +134,7 @@ static struct cpufreq_governor cpufreq_gov_userspace = { .store_setspeed = cpufreq_set, .show_setspeed = show_speed, .owner = THIS_MODULE, + .flags = CPUFREQ_GOV_STRICT_TARGET, }; MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, " diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 64587d318267..06a1c7dd081f 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2775,6 +2775,8 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { X86_MATCH(INTEL_TIGERLAKE, core_funcs), X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs), X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs), + X86_MATCH(INTEL_GRANITERAPIDS_D, core_funcs), + X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); @@ -3249,8 +3251,8 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, int max_pstate = policy->strict_target ? target_pstate : cpu->max_perf_ratio; - intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0, - fast_switch); + intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, + target_pstate, fast_switch); } else if (target_pstate != old_pstate) { intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch); } diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c index 514146d98bca..f8a76bbecef9 100644 --- a/drivers/cpufreq/tegra124-cpufreq.c +++ b/drivers/cpufreq/tegra124-cpufreq.c @@ -16,6 +16,10 @@ #include <linux/pm_opp.h> #include <linux/types.h> +#include "cpufreq-dt.h" + +static struct platform_device *tegra124_cpufreq_pdev; + struct tegra124_cpufreq_priv { struct clk *cpu_clk; struct clk *pllp_clk; @@ -55,7 +59,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev) struct device_node *np __free(device_node) = of_cpu_device_node_get(0); struct tegra124_cpufreq_priv *priv; struct device *cpu_dev; - struct platform_device_info cpufreq_dt_devinfo = {}; int ret; if (!np) @@ -95,11 +98,7 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev) if (ret) goto out_put_pllp_clk; - cpufreq_dt_devinfo.name = "cpufreq-dt"; - cpufreq_dt_devinfo.parent = &pdev->dev; - - priv->cpufreq_dt_pdev = - platform_device_register_full(&cpufreq_dt_devinfo); + priv->cpufreq_dt_pdev = cpufreq_dt_pdev_register(&pdev->dev); if (IS_ERR(priv->cpufreq_dt_pdev)) { ret = PTR_ERR(priv->cpufreq_dt_pdev); goto out_put_pllp_clk; @@ -173,6 +172,21 @@ disable_cpufreq: return err; } +static void tegra124_cpufreq_remove(struct platform_device *pdev) +{ + struct tegra124_cpufreq_priv *priv = dev_get_drvdata(&pdev->dev); + + if (!IS_ERR(priv->cpufreq_dt_pdev)) { + platform_device_unregister(priv->cpufreq_dt_pdev); + priv->cpufreq_dt_pdev = ERR_PTR(-ENODEV); + } + + clk_put(priv->pllp_clk); + clk_put(priv->pllx_clk); + clk_put(priv->dfll_clk); + clk_put(priv->cpu_clk); +} + static const struct dev_pm_ops tegra124_cpufreq_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(tegra124_cpufreq_suspend, tegra124_cpufreq_resume) @@ -182,15 +196,16 @@ static struct platform_driver tegra124_cpufreq_platdrv = { .driver.name = "cpufreq-tegra124", .driver.pm = &tegra124_cpufreq_pm_ops, .probe = tegra124_cpufreq_probe, + .remove = tegra124_cpufreq_remove, }; static int __init tegra_cpufreq_init(void) { int ret; - struct platform_device *pdev; - if (!(of_machine_is_compatible("nvidia,tegra124") || - of_machine_is_compatible("nvidia,tegra210"))) + if (!(of_machine_is_compatible("nvidia,tegra114") || + of_machine_is_compatible("nvidia,tegra124") || + of_machine_is_compatible("nvidia,tegra210"))) return -ENODEV; /* @@ -201,15 +216,25 @@ static int __init tegra_cpufreq_init(void) if (ret) return ret; - pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0); - if (IS_ERR(pdev)) { + tegra124_cpufreq_pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0); + if (IS_ERR(tegra124_cpufreq_pdev)) { platform_driver_unregister(&tegra124_cpufreq_platdrv); - return PTR_ERR(pdev); + return PTR_ERR(tegra124_cpufreq_pdev); } return 0; } module_init(tegra_cpufreq_init); +static void __exit tegra_cpufreq_module_exit(void) +{ + if (!IS_ERR_OR_NULL(tegra124_cpufreq_pdev)) + platform_device_unregister(tegra124_cpufreq_pdev); + + platform_driver_unregister(&tegra124_cpufreq_platdrv); +} +module_exit(tegra_cpufreq_module_exit); + MODULE_AUTHOR("Tuomas Tynkkynen <ttynkkynen@nvidia.com>"); MODULE_DESCRIPTION("cpufreq driver for NVIDIA Tegra124"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index 97feb7d8fb23..558d49838990 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -98,7 +98,6 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx, { int cpu; struct device_node *cpu_node, *curr_state_node; - bool valid = true; /* * Compare idle state phandles for index idx on all CPUs in the @@ -107,20 +106,17 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx, * retrieved from. If a mismatch is found bail out straight * away since we certainly hit a firmware misconfiguration. */ - for (cpu = cpumask_next(cpumask_first(cpumask), cpumask); - cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) { + cpu = cpumask_first(cpumask) + 1; + for_each_cpu_from(cpu, cpumask) { cpu_node = of_cpu_device_node_get(cpu); curr_state_node = of_get_cpu_state_node(cpu_node, idx); - if (state_node != curr_state_node) - valid = false; - of_node_put(curr_state_node); of_node_put(cpu_node); - if (!valid) - break; + if (state_node != curr_state_node) + return false; } - return valid; + return true; } /** diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index e050f5ff5efb..a8f735390f0d 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -705,17 +705,17 @@ static int img_hash_cra_md5_init(struct crypto_tfm *tfm) static int img_hash_cra_sha1_init(struct crypto_tfm *tfm) { - return img_hash_cra_init(tfm, "sha1-generic"); + return img_hash_cra_init(tfm, "sha1-lib"); } static int img_hash_cra_sha224_init(struct crypto_tfm *tfm) { - return img_hash_cra_init(tfm, "sha224-generic"); + return img_hash_cra_init(tfm, "sha224-lib"); } static int img_hash_cra_sha256_init(struct crypto_tfm *tfm) { - return img_hash_cra_init(tfm, "sha256-generic"); + return img_hash_cra_init(tfm, "sha256-lib"); } static void img_hash_cra_exit(struct crypto_tfm *tfm) diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 9ca80d082c4f..c3b2b22934b7 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1218,7 +1218,6 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_xts_aes, &safexcel_alg_gcm, &safexcel_alg_ccm, - &safexcel_alg_crc32, &safexcel_alg_cbcmac, &safexcel_alg_xcbcmac, &safexcel_alg_cmac, diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 0c79ad78d1c0..0f27367a85fa 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -959,7 +959,6 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes; extern struct safexcel_alg_template safexcel_alg_xts_aes; extern struct safexcel_alg_template safexcel_alg_gcm; extern struct safexcel_alg_template safexcel_alg_ccm; -extern struct safexcel_alg_template safexcel_alg_crc32; extern struct safexcel_alg_template safexcel_alg_cbcmac; extern struct safexcel_alg_template safexcel_alg_xcbcmac; extern struct safexcel_alg_template safexcel_alg_cmac; diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index d2b632193beb..fd34dc8f5707 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -289,14 +289,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, return 1; } - if (unlikely(sreq->digest == CONTEXT_CONTROL_DIGEST_XCM && - ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_CRC32)) { - /* Undo final XOR with 0xffffffff ...*/ - *(__le32 *)areq->result = ~sreq->state[0]; - } else { - memcpy(areq->result, sreq->state, - crypto_ahash_digestsize(ahash)); - } + memcpy(areq->result, sreq->state, + crypto_ahash_digestsize(ahash)); } cache_len = safexcel_queued_len(sreq); @@ -1881,88 +1875,6 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = { }, }; -static int safexcel_crc32_cra_init(struct crypto_tfm *tfm) -{ - struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); - int ret = safexcel_ahash_cra_init(tfm); - - /* Default 'key' is all zeroes */ - memset(&ctx->base.ipad, 0, sizeof(u32)); - return ret; -} - -static int safexcel_crc32_init(struct ahash_request *areq) -{ - struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); - - memset(req, 0, sizeof(*req)); - - /* Start from loaded key */ - req->state[0] = cpu_to_le32(~ctx->base.ipad.word[0]); - /* Set processed to non-zero to enable invalidation detection */ - req->len = sizeof(u32); - req->processed = sizeof(u32); - - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_CRC32; - req->digest = CONTEXT_CONTROL_DIGEST_XCM; - req->state_sz = sizeof(u32); - req->digest_sz = sizeof(u32); - req->block_sz = sizeof(u32); - - return 0; -} - -static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); - - if (keylen != sizeof(u32)) - return -EINVAL; - - memcpy(&ctx->base.ipad, key, sizeof(u32)); - return 0; -} - -static int safexcel_crc32_digest(struct ahash_request *areq) -{ - return safexcel_crc32_init(areq) ?: safexcel_ahash_finup(areq); -} - -struct safexcel_alg_template safexcel_alg_crc32 = { - .type = SAFEXCEL_ALG_TYPE_AHASH, - .algo_mask = 0, - .alg.ahash = { - .init = safexcel_crc32_init, - .update = safexcel_ahash_update, - .final = safexcel_ahash_final, - .finup = safexcel_ahash_finup, - .digest = safexcel_crc32_digest, - .setkey = safexcel_crc32_setkey, - .export = safexcel_ahash_export, - .import = safexcel_ahash_import, - .halg = { - .digestsize = sizeof(u32), - .statesize = sizeof(struct safexcel_ahash_export_state), - .base = { - .cra_name = "crc32", - .cra_driver_name = "safexcel-crc32", - .cra_priority = SAFEXCEL_CRA_PRIORITY, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_ALLOCATES_MEMORY | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), - .cra_init = safexcel_crc32_cra_init, - .cra_exit = safexcel_ahash_cra_exit, - .cra_module = THIS_MODULE, - }, - }, - }, -}; - static int safexcel_cbcmac_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c index 2c60a1047bc3..6cfe0238f615 100644 --- a/drivers/crypto/starfive/jh7110-hash.c +++ b/drivers/crypto/starfive/jh7110-hash.c @@ -493,25 +493,25 @@ static int starfive_hash_setkey(struct crypto_ahash *hash, static int starfive_sha224_init_tfm(struct crypto_ahash *hash) { - return starfive_hash_init_tfm(hash, "sha224-generic", + return starfive_hash_init_tfm(hash, "sha224-lib", STARFIVE_HASH_SHA224, 0); } static int starfive_sha256_init_tfm(struct crypto_ahash *hash) { - return starfive_hash_init_tfm(hash, "sha256-generic", + return starfive_hash_init_tfm(hash, "sha256-lib", STARFIVE_HASH_SHA256, 0); } static int starfive_sha384_init_tfm(struct crypto_ahash *hash) { - return starfive_hash_init_tfm(hash, "sha384-generic", + return starfive_hash_init_tfm(hash, "sha384-lib", STARFIVE_HASH_SHA384, 0); } static int starfive_sha512_init_tfm(struct crypto_ahash *hash) { - return starfive_hash_init_tfm(hash, "sha512-generic", + return starfive_hash_init_tfm(hash, "sha512-lib", STARFIVE_HASH_SHA512, 0); } @@ -523,25 +523,25 @@ static int starfive_sm3_init_tfm(struct crypto_ahash *hash) static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash) { - return starfive_hash_init_tfm(hash, "hmac(sha224-generic)", + return starfive_hash_init_tfm(hash, "hmac-sha224-lib", STARFIVE_HASH_SHA224, 1); } static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash) { - return starfive_hash_init_tfm(hash, "hmac(sha256-generic)", + return starfive_hash_init_tfm(hash, "hmac-sha256-lib", STARFIVE_HASH_SHA256, 1); } static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash) { - return starfive_hash_init_tfm(hash, "hmac(sha384-generic)", + return starfive_hash_init_tfm(hash, "hmac-sha384-lib", STARFIVE_HASH_SHA384, 1); } static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash) { - return starfive_hash_init_tfm(hash, "hmac(sha512-generic)", + return starfive_hash_init_tfm(hash, "hmac-sha512-lib", STARFIVE_HASH_SHA512, 1); } diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig index 49dfd161e9b9..d6dc848c82ee 100644 --- a/drivers/crypto/stm32/Kconfig +++ b/drivers/crypto/stm32/Kconfig @@ -1,13 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -config CRYPTO_DEV_STM32_CRC - tristate "Support for STM32 crc accelerators" - depends on ARCH_STM32 - select CRYPTO_HASH - select CRC32 - help - This enables support for the CRC32 hw accelerator which can be found - on STMicroelectronics STM32 SOC. - config CRYPTO_DEV_STM32_HASH tristate "Support for STM32 hash accelerators" depends on ARCH_STM32 || ARCH_U8500 diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile index 518e0e0b11a9..c63004026afb 100644 --- a/drivers/crypto/stm32/Makefile +++ b/drivers/crypto/stm32/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c deleted file mode 100644 index fd29785a3ecf..000000000000 --- a/drivers/crypto/stm32/stm32-crc32.c +++ /dev/null @@ -1,480 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) STMicroelectronics SA 2017 - * Author: Fabien Dessenne <fabien.dessenne@st.com> - */ - -#include <linux/bitrev.h> -#include <linux/clk.h> -#include <linux/crc32.h> -#include <linux/crc32poly.h> -#include <linux/io.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/mod_devicetable.h> -#include <linux/platform_device.h> -#include <linux/pm_runtime.h> - -#include <crypto/internal/hash.h> - -#include <linux/unaligned.h> - -#define DRIVER_NAME "stm32-crc32" -#define CHKSUM_DIGEST_SIZE 4 -#define CHKSUM_BLOCK_SIZE 1 - -/* Registers */ -#define CRC_DR 0x00000000 -#define CRC_CR 0x00000008 -#define CRC_INIT 0x00000010 -#define CRC_POL 0x00000014 - -/* Registers values */ -#define CRC_CR_RESET BIT(0) -#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5)) -#define CRC_CR_REV_IN_BYTE BIT(5) -#define CRC_CR_REV_OUT BIT(7) -#define CRC32C_INIT_DEFAULT 0xFFFFFFFF - -#define CRC_AUTOSUSPEND_DELAY 50 - -static unsigned int burst_size; -module_param(burst_size, uint, 0644); -MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)"); - -struct stm32_crc { - struct list_head list; - struct device *dev; - void __iomem *regs; - struct clk *clk; - spinlock_t lock; -}; - -struct stm32_crc_list { - struct list_head dev_list; - spinlock_t lock; /* protect dev_list */ -}; - -static struct stm32_crc_list crc_list = { - .dev_list = LIST_HEAD_INIT(crc_list.dev_list), - .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock), -}; - -struct stm32_crc_ctx { - u32 key; - u32 poly; -}; - -struct stm32_crc_desc_ctx { - u32 partial; /* crc32c: partial in first 4 bytes of that struct */ -}; - -static int stm32_crc32_cra_init(struct crypto_tfm *tfm) -{ - struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); - - mctx->key = 0; - mctx->poly = CRC32_POLY_LE; - return 0; -} - -static int stm32_crc32c_cra_init(struct crypto_tfm *tfm) -{ - struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); - - mctx->key = CRC32C_INIT_DEFAULT; - mctx->poly = CRC32C_POLY_LE; - return 0; -} - -static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) -{ - struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm); - - if (keylen != sizeof(u32)) - return -EINVAL; - - mctx->key = get_unaligned_le32(key); - return 0; -} - -static struct stm32_crc *stm32_crc_get_next_crc(void) -{ - struct stm32_crc *crc; - - spin_lock_bh(&crc_list.lock); - crc = list_first_entry_or_null(&crc_list.dev_list, struct stm32_crc, list); - if (crc) - list_move_tail(&crc->list, &crc_list.dev_list); - spin_unlock_bh(&crc_list.lock); - - return crc; -} - -static int stm32_crc_init(struct shash_desc *desc) -{ - struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); - struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); - struct stm32_crc *crc; - unsigned long flags; - - crc = stm32_crc_get_next_crc(); - if (!crc) - return -ENODEV; - - pm_runtime_get_sync(crc->dev); - - spin_lock_irqsave(&crc->lock, flags); - - /* Reset, set key, poly and configure in bit reverse mode */ - writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT); - writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); - writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, - crc->regs + CRC_CR); - - /* Store partial result */ - ctx->partial = readl_relaxed(crc->regs + CRC_DR); - - spin_unlock_irqrestore(&crc->lock, flags); - - pm_runtime_mark_last_busy(crc->dev); - pm_runtime_put_autosuspend(crc->dev); - - return 0; -} - -static int burst_update(struct shash_desc *desc, const u8 *d8, - size_t length) -{ - struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); - struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); - struct stm32_crc *crc; - - crc = stm32_crc_get_next_crc(); - if (!crc) - return -ENODEV; - - pm_runtime_get_sync(crc->dev); - - if (!spin_trylock(&crc->lock)) { - /* Hardware is busy, calculate crc32 by software */ - if (mctx->poly == CRC32_POLY_LE) - ctx->partial = crc32_le(ctx->partial, d8, length); - else - ctx->partial = crc32c(ctx->partial, d8, length); - - goto pm_out; - } - - /* - * Restore previously calculated CRC for this context as init value - * Restore polynomial configuration - * Configure in register for word input data, - * Configure out register in reversed bit mode data. - */ - writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT); - writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); - writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, - crc->regs + CRC_CR); - - if (d8 != PTR_ALIGN(d8, sizeof(u32))) { - /* Configure for byte data */ - writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, - crc->regs + CRC_CR); - while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) { - writeb_relaxed(*d8++, crc->regs + CRC_DR); - length--; - } - /* Configure for word data */ - writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, - crc->regs + CRC_CR); - } - - for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32)) - writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR); - - if (length) { - /* Configure for byte data */ - writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, - crc->regs + CRC_CR); - while (length--) - writeb_relaxed(*d8++, crc->regs + CRC_DR); - } - - /* Store partial result */ - ctx->partial = readl_relaxed(crc->regs + CRC_DR); - - spin_unlock(&crc->lock); - -pm_out: - pm_runtime_mark_last_busy(crc->dev); - pm_runtime_put_autosuspend(crc->dev); - - return 0; -} - -static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, - unsigned int length) -{ - const unsigned int burst_sz = burst_size; - unsigned int rem_sz; - const u8 *cur; - size_t size; - int ret; - - if (!burst_sz) - return burst_update(desc, d8, length); - - /* Digest first bytes not 32bit aligned at first pass in the loop */ - size = min_t(size_t, length, burst_sz + (size_t)d8 - - ALIGN_DOWN((size_t)d8, sizeof(u32))); - for (rem_sz = length, cur = d8; rem_sz; - rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) { - ret = burst_update(desc, cur, size); - if (ret) - return ret; - } - - return 0; -} - -static int stm32_crc_final(struct shash_desc *desc, u8 *out) -{ - struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); - struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); - - /* Send computed CRC */ - put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ? - ~ctx->partial : ctx->partial, out); - - return 0; -} - -static int stm32_crc_finup(struct shash_desc *desc, const u8 *data, - unsigned int length, u8 *out) -{ - return stm32_crc_update(desc, data, length) ?: - stm32_crc_final(desc, out); -} - -static int stm32_crc_digest(struct shash_desc *desc, const u8 *data, - unsigned int length, u8 *out) -{ - return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out); -} - -static unsigned int refcnt; -static DEFINE_MUTEX(refcnt_lock); -static struct shash_alg algs[] = { - /* CRC-32 */ - { - .setkey = stm32_crc_setkey, - .init = stm32_crc_init, - .update = stm32_crc_update, - .final = stm32_crc_final, - .finup = stm32_crc_finup, - .digest = stm32_crc_digest, - .descsize = sizeof(struct stm32_crc_desc_ctx), - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "crc32", - .cra_driver_name = "stm32-crc32-crc32", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct stm32_crc_ctx), - .cra_module = THIS_MODULE, - .cra_init = stm32_crc32_cra_init, - } - }, - /* CRC-32Castagnoli */ - { - .setkey = stm32_crc_setkey, - .init = stm32_crc_init, - .update = stm32_crc_update, - .final = stm32_crc_final, - .finup = stm32_crc_finup, - .digest = stm32_crc_digest, - .descsize = sizeof(struct stm32_crc_desc_ctx), - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "crc32c", - .cra_driver_name = "stm32-crc32-crc32c", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct stm32_crc_ctx), - .cra_module = THIS_MODULE, - .cra_init = stm32_crc32c_cra_init, - } - } -}; - -static int stm32_crc_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct stm32_crc *crc; - int ret; - - crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL); - if (!crc) - return -ENOMEM; - - crc->dev = dev; - - crc->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(crc->regs)) { - dev_err(dev, "Cannot map CRC IO\n"); - return PTR_ERR(crc->regs); - } - - crc->clk = devm_clk_get(dev, NULL); - if (IS_ERR(crc->clk)) { - dev_err(dev, "Could not get clock\n"); - return PTR_ERR(crc->clk); - } - - ret = clk_prepare_enable(crc->clk); - if (ret) { - dev_err(crc->dev, "Failed to enable clock\n"); - return ret; - } - - pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(dev); - - pm_runtime_get_noresume(dev); - pm_runtime_set_active(dev); - pm_runtime_irq_safe(dev); - pm_runtime_enable(dev); - - spin_lock_init(&crc->lock); - - platform_set_drvdata(pdev, crc); - - spin_lock(&crc_list.lock); - list_add(&crc->list, &crc_list.dev_list); - spin_unlock(&crc_list.lock); - - mutex_lock(&refcnt_lock); - if (!refcnt) { - ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); - if (ret) { - mutex_unlock(&refcnt_lock); - dev_err(dev, "Failed to register\n"); - clk_disable_unprepare(crc->clk); - return ret; - } - } - refcnt++; - mutex_unlock(&refcnt_lock); - - dev_info(dev, "Initialized\n"); - - pm_runtime_put_sync(dev); - - return 0; -} - -static void stm32_crc_remove(struct platform_device *pdev) -{ - struct stm32_crc *crc = platform_get_drvdata(pdev); - int ret = pm_runtime_get_sync(crc->dev); - - spin_lock(&crc_list.lock); - list_del(&crc->list); - spin_unlock(&crc_list.lock); - - mutex_lock(&refcnt_lock); - if (!--refcnt) - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); - mutex_unlock(&refcnt_lock); - - pm_runtime_disable(crc->dev); - pm_runtime_put_noidle(crc->dev); - - if (ret >= 0) - clk_disable(crc->clk); - clk_unprepare(crc->clk); -} - -static int __maybe_unused stm32_crc_suspend(struct device *dev) -{ - struct stm32_crc *crc = dev_get_drvdata(dev); - int ret; - - ret = pm_runtime_force_suspend(dev); - if (ret) - return ret; - - clk_unprepare(crc->clk); - - return 0; -} - -static int __maybe_unused stm32_crc_resume(struct device *dev) -{ - struct stm32_crc *crc = dev_get_drvdata(dev); - int ret; - - ret = clk_prepare(crc->clk); - if (ret) { - dev_err(crc->dev, "Failed to prepare clock\n"); - return ret; - } - - return pm_runtime_force_resume(dev); -} - -static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev) -{ - struct stm32_crc *crc = dev_get_drvdata(dev); - - clk_disable(crc->clk); - - return 0; -} - -static int __maybe_unused stm32_crc_runtime_resume(struct device *dev) -{ - struct stm32_crc *crc = dev_get_drvdata(dev); - int ret; - - ret = clk_enable(crc->clk); - if (ret) { - dev_err(crc->dev, "Failed to enable clock\n"); - return ret; - } - - return 0; -} - -static const struct dev_pm_ops stm32_crc_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend, - stm32_crc_resume) - SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend, - stm32_crc_runtime_resume, NULL) -}; - -static const struct of_device_id stm32_dt_ids[] = { - { .compatible = "st,stm32f7-crc", }, - {}, -}; -MODULE_DEVICE_TABLE(of, stm32_dt_ids); - -static struct platform_driver stm32_crc_driver = { - .probe = stm32_crc_probe, - .remove = stm32_crc_remove, - .driver = { - .name = DRIVER_NAME, - .pm = &stm32_crc_pm_ops, - .of_match_table = stm32_dt_ids, - }, -}; - -module_platform_driver(stm32_crc_driver); - -MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>"); -MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 3c4862a752b5..c999c4a1e567 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -90,6 +90,17 @@ config ARM_EXYNOS_BUS_DEVFREQ and adjusts the operating frequencies and voltages with OPP support. This does not yet operate with optimal voltages. +config ARM_HISI_UNCORE_DEVFREQ + tristate "HiSilicon uncore DEVFREQ Driver" + depends on ACPI && ACPI_PPTT && PCC + select DEVFREQ_GOV_PERFORMANCE + select DEVFREQ_GOV_USERSPACE + help + This adds a DEVFREQ driver that manages uncore frequency scaling for + HiSilicon Kunpeng SoCs. This enables runtime management of uncore + frequency scaling from kernel and userspace. The uncore domain + contains system interconnects and L3 cache. + config ARM_IMX_BUS_DEVFREQ tristate "i.MX Generic Bus DEVFREQ Driver" depends on ARCH_MXC || COMPILE_TEST diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile index bf40d04928d0..404179d79a9d 100644 --- a/drivers/devfreq/Makefile +++ b/drivers/devfreq/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o # DEVFREQ Drivers obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o +obj-$(CONFIG_ARM_HISI_UNCORE_DEVFREQ) += hisi_uncore_freq.o obj-$(CONFIG_ARM_IMX_BUS_DEVFREQ) += imx-bus.o obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o obj-$(CONFIG_ARM_MEDIATEK_CCI_DEVFREQ) += mtk-cci-devfreq.o diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 98657d3b9435..2e8d01d47f69 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -152,11 +152,8 @@ void devfreq_get_freq_range(struct devfreq *devfreq, (unsigned long)HZ_PER_KHZ * qos_max_freq); /* Apply constraints from OPP interface */ - *min_freq = max(*min_freq, devfreq->scaling_min_freq); - *max_freq = min(*max_freq, devfreq->scaling_max_freq); - - if (*min_freq > *max_freq) - *min_freq = *max_freq; + *max_freq = clamp(*max_freq, devfreq->scaling_min_freq, devfreq->scaling_max_freq); + *min_freq = clamp(*min_freq, devfreq->scaling_min_freq, *max_freq); } EXPORT_SYMBOL(devfreq_get_freq_range); @@ -807,7 +804,6 @@ struct devfreq *devfreq_add_device(struct device *dev, { struct devfreq *devfreq; struct devfreq_governor *governor; - unsigned long min_freq, max_freq; int err = 0; if (!dev || !profile || !governor_name) { @@ -835,6 +831,7 @@ struct devfreq *devfreq_add_device(struct device *dev, mutex_lock(&devfreq->lock); devfreq->dev.parent = dev; devfreq->dev.class = devfreq_class; + devfreq->dev.groups = profile->dev_groups; devfreq->dev.release = devfreq_dev_release; INIT_LIST_HEAD(&devfreq->node); devfreq->profile = profile; @@ -875,8 +872,6 @@ struct devfreq *devfreq_add_device(struct device *dev, goto err_dev; } - devfreq_get_freq_range(devfreq, &min_freq, &max_freq); - devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev); devfreq->opp_table = dev_pm_opp_get_opp_table(dev); if (IS_ERR(devfreq->opp_table)) @@ -1382,15 +1377,11 @@ int devfreq_remove_governor(struct devfreq_governor *governor) int ret; struct device *dev = devfreq->dev.parent; + if (!devfreq->governor) + continue; + if (!strncmp(devfreq->governor->name, governor->name, DEVFREQ_NAME_LEN)) { - /* we should have a devfreq governor! */ - if (!devfreq->governor) { - dev_warn(dev, "%s: Governor %s NOT present\n", - __func__, governor->name); - continue; - /* Fall through */ - } ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL); if (ret) { @@ -1743,7 +1734,7 @@ static ssize_t trans_stat_show(struct device *dev, for (i = 0; i < max_state; i++) { if (len >= PAGE_SIZE - 1) break; - if (df->freq_table[2] == df->previous_freq) + if (df->freq_table[i] == df->previous_freq) len += sysfs_emit_at(buf, len, "*"); else len += sysfs_emit_at(buf, len, " "); diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c index d1aa6806b683..175de0c0b50e 100644 --- a/drivers/devfreq/governor_userspace.c +++ b/drivers/devfreq/governor_userspace.c @@ -9,6 +9,7 @@ #include <linux/slab.h> #include <linux/device.h> #include <linux/devfreq.h> +#include <linux/kstrtox.h> #include <linux/pm.h> #include <linux/mutex.h> #include <linux/module.h> @@ -39,10 +40,13 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr, unsigned long wanted; int err = 0; + err = kstrtoul(buf, 0, &wanted); + if (err) + return err; + mutex_lock(&devfreq->lock); data = devfreq->governor_data; - sscanf(buf, "%lu", &wanted); data->user_frequency = wanted; data->valid = true; err = update_devfreq(devfreq); diff --git a/drivers/devfreq/hisi_uncore_freq.c b/drivers/devfreq/hisi_uncore_freq.c new file mode 100644 index 000000000000..96d1815059e3 --- /dev/null +++ b/drivers/devfreq/hisi_uncore_freq.c @@ -0,0 +1,658 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon uncore frequency scaling driver + * + * Copyright (c) 2025 HiSilicon Co., Ltd + */ + +#include <linux/acpi.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/devfreq.h> +#include <linux/device.h> +#include <linux/dev_printk.h> +#include <linux/errno.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/ktime.h> +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/property.h> +#include <linux/topology.h> +#include <linux/units.h> +#include <acpi/pcc.h> + +#include "governor.h" + +struct hisi_uncore_pcc_data { + u16 status; + u16 resv; + u32 data; +}; + +struct hisi_uncore_pcc_shmem { + struct acpi_pcct_shared_memory head; + struct hisi_uncore_pcc_data pcc_data; +}; + +enum hisi_uncore_pcc_cmd_type { + HUCF_PCC_CMD_GET_CAP = 0, + HUCF_PCC_CMD_GET_FREQ, + HUCF_PCC_CMD_SET_FREQ, + HUCF_PCC_CMD_GET_MODE, + HUCF_PCC_CMD_SET_MODE, + HUCF_PCC_CMD_GET_PLAT_FREQ_NUM, + HUCF_PCC_CMD_GET_PLAT_FREQ_BY_IDX, + HUCF_PCC_CMD_MAX = 256 +}; + +static int hisi_platform_gov_usage; +static DEFINE_MUTEX(hisi_platform_gov_usage_lock); + +enum hisi_uncore_freq_mode { + HUCF_MODE_PLATFORM = 0, + HUCF_MODE_OS, + HUCF_MODE_MAX +}; + +#define HUCF_CAP_PLATFORM_CTRL BIT(0) + +/** + * struct hisi_uncore_freq - hisi uncore frequency scaling device data + * @dev: device of this frequency scaling driver + * @cl: mailbox client object + * @pchan: PCC mailbox channel + * @chan_id: PCC channel ID + * @last_cmd_cmpl_time: timestamp of the last completed PCC command + * @pcc_lock: PCC channel lock + * @devfreq: devfreq data of this hisi_uncore_freq device + * @related_cpus: CPUs whose performance is majorly affected by this + * uncore frequency domain + * @cap: capability flag + */ +struct hisi_uncore_freq { + struct device *dev; + struct mbox_client cl; + struct pcc_mbox_chan *pchan; + int chan_id; + ktime_t last_cmd_cmpl_time; + struct mutex pcc_lock; + struct devfreq *devfreq; + struct cpumask related_cpus; + u32 cap; +}; + +/* PCC channel timeout = PCC nominal latency * NUM */ +#define HUCF_PCC_POLL_TIMEOUT_NUM 1000 +#define HUCF_PCC_POLL_INTERVAL_US 5 + +/* Default polling interval in ms for devfreq governors*/ +#define HUCF_DEFAULT_POLLING_MS 100 + +static void hisi_uncore_free_pcc_chan(struct hisi_uncore_freq *uncore) +{ + guard(mutex)(&uncore->pcc_lock); + pcc_mbox_free_channel(uncore->pchan); + uncore->pchan = NULL; +} + +static void devm_hisi_uncore_free_pcc_chan(void *data) +{ + hisi_uncore_free_pcc_chan(data); +} + +static int hisi_uncore_request_pcc_chan(struct hisi_uncore_freq *uncore) +{ + struct device *dev = uncore->dev; + struct pcc_mbox_chan *pcc_chan; + + uncore->cl = (struct mbox_client) { + .dev = dev, + .tx_block = false, + .knows_txdone = true, + }; + + pcc_chan = pcc_mbox_request_channel(&uncore->cl, uncore->chan_id); + if (IS_ERR(pcc_chan)) + return dev_err_probe(dev, PTR_ERR(pcc_chan), + "Failed to request PCC channel %u\n", uncore->chan_id); + + if (!pcc_chan->shmem_base_addr) { + pcc_mbox_free_channel(pcc_chan); + return dev_err_probe(dev, -EINVAL, + "Invalid PCC shared memory address\n"); + } + + if (pcc_chan->shmem_size < sizeof(struct hisi_uncore_pcc_shmem)) { + pcc_mbox_free_channel(pcc_chan); + return dev_err_probe(dev, -EINVAL, + "Invalid PCC shared memory size (%lluB)\n", + pcc_chan->shmem_size); + } + + uncore->pchan = pcc_chan; + + return devm_add_action_or_reset(uncore->dev, + devm_hisi_uncore_free_pcc_chan, uncore); +} + +static acpi_status hisi_uncore_pcc_reg_scan(struct acpi_resource *res, + void *ctx) +{ + struct acpi_resource_generic_register *reg; + struct hisi_uncore_freq *uncore; + + if (!res || res->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER) + return AE_OK; + + reg = &res->data.generic_reg; + if (reg->space_id != ACPI_ADR_SPACE_PLATFORM_COMM) + return AE_OK; + + if (!ctx) + return AE_ERROR; + + uncore = ctx; + /* PCC subspace ID stored in Access Size */ + uncore->chan_id = reg->access_size; + + return AE_CTRL_TERMINATE; +} + +static int hisi_uncore_init_pcc_chan(struct hisi_uncore_freq *uncore) +{ + acpi_handle handle = ACPI_HANDLE(uncore->dev); + acpi_status status; + int rc; + + uncore->chan_id = -1; + status = acpi_walk_resources(handle, METHOD_NAME__CRS, + hisi_uncore_pcc_reg_scan, uncore); + if (ACPI_FAILURE(status) || uncore->chan_id < 0) + return dev_err_probe(uncore->dev, -ENODEV, + "Failed to get a PCC channel\n"); + + + rc = devm_mutex_init(uncore->dev, &uncore->pcc_lock); + if (rc) + return rc; + + return hisi_uncore_request_pcc_chan(uncore); +} + +static int hisi_uncore_cmd_send(struct hisi_uncore_freq *uncore, + u8 cmd, u32 *data) +{ + struct hisi_uncore_pcc_shmem __iomem *addr; + struct hisi_uncore_pcc_shmem shmem; + struct pcc_mbox_chan *pchan; + unsigned int mrtt; + s64 time_delta; + u16 status; + int rc; + + guard(mutex)(&uncore->pcc_lock); + + pchan = uncore->pchan; + if (!pchan) + return -ENODEV; + + addr = (struct hisi_uncore_pcc_shmem __iomem *)pchan->shmem; + if (!addr) + return -EINVAL; + + /* Handle the Minimum Request Turnaround Time (MRTT) */ + mrtt = pchan->min_turnaround_time; + time_delta = ktime_us_delta(ktime_get(), uncore->last_cmd_cmpl_time); + if (mrtt > time_delta) + udelay(mrtt - time_delta); + + /* Copy data */ + shmem.head = (struct acpi_pcct_shared_memory) { + .signature = PCC_SIGNATURE | uncore->chan_id, + .command = cmd, + }; + shmem.pcc_data.data = *data; + memcpy_toio(addr, &shmem, sizeof(shmem)); + + /* Ring doorbell */ + rc = mbox_send_message(pchan->mchan, &cmd); + if (rc < 0) { + dev_err(uncore->dev, "Failed to send mbox message, %d\n", rc); + return rc; + } + + /* Wait status */ + rc = readw_poll_timeout(&addr->head.status, status, + status & (PCC_STATUS_CMD_COMPLETE | + PCC_STATUS_ERROR), + HUCF_PCC_POLL_INTERVAL_US, + pchan->latency * HUCF_PCC_POLL_TIMEOUT_NUM); + if (rc) { + dev_err(uncore->dev, "PCC channel response timeout, cmd=%u\n", cmd); + } else if (status & PCC_STATUS_ERROR) { + dev_err(uncore->dev, "PCC cmd error, cmd=%u\n", cmd); + rc = -EIO; + } + + uncore->last_cmd_cmpl_time = ktime_get(); + + /* Copy data back */ + memcpy_fromio(data, &addr->pcc_data.data, sizeof(*data)); + + /* Clear mailbox active req */ + mbox_client_txdone(pchan->mchan, rc); + + return rc; +} + +static int hisi_uncore_target(struct device *dev, unsigned long *freq, + u32 flags) +{ + struct hisi_uncore_freq *uncore = dev_get_drvdata(dev); + struct dev_pm_opp *opp; + u32 data; + + if (WARN_ON(!uncore || !uncore->pchan)) + return -ENODEV; + + opp = devfreq_recommended_opp(dev, freq, flags); + if (IS_ERR(opp)) { + dev_err(dev, "Failed to get opp for freq %lu hz\n", *freq); + return PTR_ERR(opp); + } + dev_pm_opp_put(opp); + + data = (u32)(dev_pm_opp_get_freq(opp) / HZ_PER_MHZ); + + return hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_FREQ, &data); +} + +static int hisi_uncore_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + /* Not used */ + return 0; +} + +static int hisi_uncore_get_cur_freq(struct device *dev, unsigned long *freq) +{ + struct hisi_uncore_freq *uncore = dev_get_drvdata(dev); + u32 data = 0; + int rc; + + if (WARN_ON(!uncore || !uncore->pchan)) + return -ENODEV; + + rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_FREQ, &data); + + /* + * Upon a failure, 'data' remains 0 and 'freq' is set to 0 rather than a + * random value. devfreq shouldn't use 'freq' in that case though. + */ + *freq = data * HZ_PER_MHZ; + + return rc; +} + +static void devm_hisi_uncore_remove_opp(void *data) +{ + struct hisi_uncore_freq *uncore = data; + + dev_pm_opp_remove_all_dynamic(uncore->dev); +} + +static int hisi_uncore_init_opp(struct hisi_uncore_freq *uncore) +{ + struct device *dev = uncore->dev; + unsigned long freq_mhz; + u32 num, index; + u32 data = 0; + int rc; + + rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_PLAT_FREQ_NUM, + &data); + if (rc) + return dev_err_probe(dev, rc, "Failed to get plat freq num\n"); + + num = data; + + for (index = 0; index < num; index++) { + data = index; + rc = hisi_uncore_cmd_send(uncore, + HUCF_PCC_CMD_GET_PLAT_FREQ_BY_IDX, + &data); + if (rc) { + dev_pm_opp_remove_all_dynamic(dev); + return dev_err_probe(dev, rc, + "Failed to get plat freq at index %u\n", index); + } + freq_mhz = data; + + /* Don't care OPP voltage, take 1V as default */ + rc = dev_pm_opp_add(dev, freq_mhz * HZ_PER_MHZ, 1000000); + if (rc) { + dev_pm_opp_remove_all_dynamic(dev); + return dev_err_probe(dev, rc, + "Add OPP %lu failed\n", freq_mhz); + } + } + + return devm_add_action_or_reset(dev, devm_hisi_uncore_remove_opp, + uncore); +} + +static int hisi_platform_gov_func(struct devfreq *df, unsigned long *freq) +{ + /* + * Platform-controlled mode doesn't care the frequency issued from + * devfreq, so just pick the max freq. + */ + *freq = DEVFREQ_MAX_FREQ; + + return 0; +} + +static int hisi_platform_gov_handler(struct devfreq *df, unsigned int event, + void *val) +{ + struct hisi_uncore_freq *uncore = dev_get_drvdata(df->dev.parent); + int rc = 0; + u32 data; + + if (WARN_ON(!uncore || !uncore->pchan)) + return -ENODEV; + + switch (event) { + case DEVFREQ_GOV_START: + data = HUCF_MODE_PLATFORM; + rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data); + if (rc) + dev_err(uncore->dev, "Failed to set platform mode (%d)\n", rc); + break; + case DEVFREQ_GOV_STOP: + data = HUCF_MODE_OS; + rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data); + if (rc) + dev_err(uncore->dev, "Failed to set os mode (%d)\n", rc); + break; + default: + break; + } + + return rc; +} + +/* + * In the platform-controlled mode, the platform decides the uncore frequency + * and ignores the frequency issued from the driver. + * Thus, create a pseudo 'hisi_platform' governor that stops devfreq monitor + * from working so as to save meaningless overhead. + */ +static struct devfreq_governor hisi_platform_governor = { + .name = "hisi_platform", + /* + * Set interrupt_driven to skip the devfreq monitor mechanism, though + * this governor is not interrupt-driven. + */ + .flags = DEVFREQ_GOV_FLAG_IRQ_DRIVEN, + .get_target_freq = hisi_platform_gov_func, + .event_handler = hisi_platform_gov_handler, +}; + +static void hisi_uncore_remove_platform_gov(struct hisi_uncore_freq *uncore) +{ + u32 data = HUCF_MODE_PLATFORM; + int rc; + + if (!(uncore->cap & HUCF_CAP_PLATFORM_CTRL)) + return; + + guard(mutex)(&hisi_platform_gov_usage_lock); + + if (--hisi_platform_gov_usage == 0) { + rc = devfreq_remove_governor(&hisi_platform_governor); + if (rc) + dev_err(uncore->dev, "Failed to remove hisi_platform gov (%d)\n", rc); + } + + /* + * Set to the platform-controlled mode on exit if supported, so as to + * have a certain behaviour when the driver is detached. + */ + rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data); + if (rc) + dev_err(uncore->dev, "Failed to set platform mode on exit (%d)\n", rc); +} + +static void devm_hisi_uncore_remove_platform_gov(void *data) +{ + hisi_uncore_remove_platform_gov(data); +} + +static int hisi_uncore_add_platform_gov(struct hisi_uncore_freq *uncore) +{ + if (!(uncore->cap & HUCF_CAP_PLATFORM_CTRL)) + return 0; + + guard(mutex)(&hisi_platform_gov_usage_lock); + + if (hisi_platform_gov_usage == 0) { + int rc = devfreq_add_governor(&hisi_platform_governor); + if (rc) + return rc; + } + hisi_platform_gov_usage++; + + return devm_add_action_or_reset(uncore->dev, + devm_hisi_uncore_remove_platform_gov, + uncore); +} + +/* + * Returns: + * 0 if success, uncore->related_cpus is set. + * -EINVAL if property not found, or property found but without elements in it, + * or invalid arguments received in any of the subroutine. + * Other error codes if it goes wrong. + */ +static int hisi_uncore_mark_related_cpus(struct hisi_uncore_freq *uncore, + char *property, int (*get_topo_id)(int cpu), + const struct cpumask *(*get_cpumask)(int cpu)) +{ + unsigned int i, cpu; + size_t len; + int rc; + + rc = device_property_count_u32(uncore->dev, property); + if (rc < 0) + return rc; + if (rc == 0) + return -EINVAL; + + len = rc; + u32 *num __free(kfree) = kcalloc(len, sizeof(*num), GFP_KERNEL); + if (!num) + return -ENOMEM; + + rc = device_property_read_u32_array(uncore->dev, property, num, len); + if (rc) + return rc; + + for (i = 0; i < len; i++) { + for_each_possible_cpu(cpu) { + if (get_topo_id(cpu) != num[i]) + continue; + + cpumask_or(&uncore->related_cpus, + &uncore->related_cpus, get_cpumask(cpu)); + break; + } + } + + return 0; +} + +static int get_package_id(int cpu) +{ + return topology_physical_package_id(cpu); +} + +static const struct cpumask *get_package_cpumask(int cpu) +{ + return topology_core_cpumask(cpu); +} + +static int get_cluster_id(int cpu) +{ + return topology_cluster_id(cpu); +} + +static const struct cpumask *get_cluster_cpumask(int cpu) +{ + return topology_cluster_cpumask(cpu); +} + +static int hisi_uncore_mark_related_cpus_wrap(struct hisi_uncore_freq *uncore) +{ + int rc; + + cpumask_clear(&uncore->related_cpus); + + rc = hisi_uncore_mark_related_cpus(uncore, "related-package", + get_package_id, + get_package_cpumask); + /* Success, or firmware probably broken */ + if (!rc || rc != -EINVAL) + return rc; + + /* Try another property name if rc == -EINVAL */ + return hisi_uncore_mark_related_cpus(uncore, "related-cluster", + get_cluster_id, + get_cluster_cpumask); +} + +static ssize_t related_cpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hisi_uncore_freq *uncore = dev_get_drvdata(dev->parent); + + return cpumap_print_to_pagebuf(true, buf, &uncore->related_cpus); +} + +static DEVICE_ATTR_RO(related_cpus); + +static struct attribute *hisi_uncore_freq_attrs[] = { + &dev_attr_related_cpus.attr, + NULL +}; +ATTRIBUTE_GROUPS(hisi_uncore_freq); + +static int hisi_uncore_devfreq_register(struct hisi_uncore_freq *uncore) +{ + struct devfreq_dev_profile *profile; + struct device *dev = uncore->dev; + unsigned long freq; + u32 data; + int rc; + + rc = hisi_uncore_get_cur_freq(dev, &freq); + if (rc) + return dev_err_probe(dev, rc, "Failed to get plat init freq\n"); + + profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL); + if (!profile) + return -ENOMEM; + + *profile = (struct devfreq_dev_profile) { + .initial_freq = freq, + .polling_ms = HUCF_DEFAULT_POLLING_MS, + .timer = DEVFREQ_TIMER_DELAYED, + .target = hisi_uncore_target, + .get_dev_status = hisi_uncore_get_dev_status, + .get_cur_freq = hisi_uncore_get_cur_freq, + .dev_groups = hisi_uncore_freq_groups, + }; + + rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_MODE, &data); + if (rc) + return dev_err_probe(dev, rc, "Failed to get operate mode\n"); + + if (data == HUCF_MODE_PLATFORM) + uncore->devfreq = devm_devfreq_add_device(dev, profile, + hisi_platform_governor.name, NULL); + else + uncore->devfreq = devm_devfreq_add_device(dev, profile, + DEVFREQ_GOV_PERFORMANCE, NULL); + if (IS_ERR(uncore->devfreq)) + return dev_err_probe(dev, PTR_ERR(uncore->devfreq), + "Failed to add devfreq device\n"); + + return 0; +} + +static int hisi_uncore_freq_probe(struct platform_device *pdev) +{ + struct hisi_uncore_freq *uncore; + struct device *dev = &pdev->dev; + u32 cap; + int rc; + + uncore = devm_kzalloc(dev, sizeof(*uncore), GFP_KERNEL); + if (!uncore) + return -ENOMEM; + + uncore->dev = dev; + platform_set_drvdata(pdev, uncore); + + rc = hisi_uncore_init_pcc_chan(uncore); + if (rc) + return dev_err_probe(dev, rc, "Failed to init PCC channel\n"); + + rc = hisi_uncore_init_opp(uncore); + if (rc) + return dev_err_probe(dev, rc, "Failed to init OPP\n"); + + rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_CAP, &cap); + if (rc) + return dev_err_probe(dev, rc, "Failed to get capability\n"); + + uncore->cap = cap; + + rc = hisi_uncore_add_platform_gov(uncore); + if (rc) + return dev_err_probe(dev, rc, "Failed to add hisi_platform governor\n"); + + rc = hisi_uncore_mark_related_cpus_wrap(uncore); + if (rc) + return dev_err_probe(dev, rc, "Failed to mark related cpus\n"); + + rc = hisi_uncore_devfreq_register(uncore); + if (rc) + return dev_err_probe(dev, rc, "Failed to register devfreq\n"); + + return 0; +} + +static const struct acpi_device_id hisi_uncore_freq_acpi_match[] = { + { "HISI04F1", }, + { } +}; +MODULE_DEVICE_TABLE(acpi, hisi_uncore_freq_acpi_match); + +static struct platform_driver hisi_uncore_freq_drv = { + .probe = hisi_uncore_freq_probe, + .driver = { + .name = "hisi_uncore_freq", + .acpi_match_table = hisi_uncore_freq_acpi_match, + }, +}; +module_platform_driver(hisi_uncore_freq_drv); + +MODULE_DESCRIPTION("HiSilicon uncore frequency scaling driver"); +MODULE_AUTHOR("Jie Zhan <zhanjie9@hisilicon.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/devfreq/sun8i-a33-mbus.c b/drivers/devfreq/sun8i-a33-mbus.c index 7c6ae91ede1f..4bd5657558d6 100644 --- a/drivers/devfreq/sun8i-a33-mbus.c +++ b/drivers/devfreq/sun8i-a33-mbus.c @@ -360,7 +360,7 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev) if (IS_ERR(priv->reg_mbus)) return PTR_ERR(priv->reg_mbus); - priv->clk_bus = devm_clk_get(dev, "bus"); + priv->clk_bus = devm_clk_get_enabled(dev, "bus"); if (IS_ERR(priv->clk_bus)) return dev_err_probe(dev, PTR_ERR(priv->clk_bus), "failed to get bus clock\n"); @@ -375,24 +375,15 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(priv->clk_mbus), "failed to get mbus clock\n"); - ret = clk_prepare_enable(priv->clk_bus); - if (ret) - return dev_err_probe(dev, ret, - "failed to enable bus clock\n"); - /* Lock the DRAM clock rate to keep priv->nominal_bw in sync. */ - ret = clk_rate_exclusive_get(priv->clk_dram); - if (ret) { - err = "failed to lock dram clock rate\n"; - goto err_disable_bus; - } + ret = devm_clk_rate_exclusive_get(dev, priv->clk_dram); + if (ret) + return dev_err_probe(dev, ret, "failed to lock dram clock rate\n"); /* Lock the MBUS clock rate to keep MBUS_TMR_PERIOD in sync. */ - ret = clk_rate_exclusive_get(priv->clk_mbus); - if (ret) { - err = "failed to lock mbus clock rate\n"; - goto err_unlock_dram; - } + ret = devm_clk_rate_exclusive_get(dev, priv->clk_mbus); + if (ret) + return dev_err_probe(dev, ret, "failed to lock mbus clock rate\n"); priv->gov_data.upthreshold = 10; priv->gov_data.downdifferential = 5; @@ -405,10 +396,8 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev) priv->profile.max_state = max_state; ret = devm_pm_opp_set_clkname(dev, "dram"); - if (ret) { - err = "failed to add OPP table\n"; - goto err_unlock_mbus; - } + if (ret) + return dev_err_probe(dev, ret, "failed to add OPP table\n"); base_freq = clk_get_rate(clk_get_parent(priv->clk_dram)); for (i = 0; i < max_state; ++i) { @@ -448,12 +437,6 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev) err_remove_opps: dev_pm_opp_remove_all_dynamic(dev); -err_unlock_mbus: - clk_rate_exclusive_put(priv->clk_mbus); -err_unlock_dram: - clk_rate_exclusive_put(priv->clk_dram); -err_disable_bus: - clk_disable_unprepare(priv->clk_bus); return dev_err_probe(dev, ret, err); } @@ -472,9 +455,6 @@ static void sun8i_a33_mbus_remove(struct platform_device *pdev) dev_warn(dev, "failed to restore DRAM frequency: %d\n", ret); dev_pm_opp_remove_all_dynamic(dev); - clk_rate_exclusive_put(priv->clk_mbus); - clk_rate_exclusive_put(priv->clk_dram); - clk_disable_unprepare(priv->clk_bus); } static const struct sun8i_a33_mbus_variant sun50i_a64_mbus = { diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 758fcd0546d8..ca13cd39330b 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -926,6 +926,36 @@ void dma_release_channel(struct dma_chan *chan) } EXPORT_SYMBOL_GPL(dma_release_channel); +static void dmaenginem_release_channel(void *chan) +{ + dma_release_channel(chan); +} + +/** + * devm_dma_request_chan - try to allocate an exclusive slave channel + * @dev: pointer to client device structure + * @name: slave channel name + * + * Returns pointer to appropriate DMA channel on success or an error pointer. + * + * The operation is managed and will be undone on driver detach. + */ + +struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name) +{ + struct dma_chan *chan = dma_request_chan(dev, name); + int ret = 0; + + if (!IS_ERR(chan)) + ret = devm_add_action_or_reset(dev, dmaenginem_release_channel, chan); + + if (ret) + return ERR_PTR(ret); + + return chan; +} +EXPORT_SYMBOL_GPL(devm_dma_request_chan); + /** * dmaengine_get - register interest in dma_channels */ diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 37eb2e6c2f9f..65bf1685350a 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -2059,7 +2059,7 @@ free_drv_info: kfree(drv_info); return ret; } -module_init(ffa_init); +rootfs_initcall(ffa_init); static void __exit ffa_exit(void) { diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index 560724ce21aa..f51047d8ea64 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -311,6 +311,11 @@ static const struct cs_dsp_ops cs_dsp_adsp2_ops[]; static const struct cs_dsp_ops cs_dsp_halo_ops; static const struct cs_dsp_ops cs_dsp_halo_ao_ops; +struct cs_dsp_alg_region_list_item { + struct list_head list; + struct cs_dsp_alg_region alg_region; +}; + struct cs_dsp_buf { struct list_head list; void *buf; @@ -1752,13 +1757,13 @@ static void *cs_dsp_read_algs(struct cs_dsp *dsp, size_t n_algs, struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp, int type, unsigned int id) { - struct cs_dsp_alg_region *alg_region; + struct cs_dsp_alg_region_list_item *item; lockdep_assert_held(&dsp->pwr_lock); - list_for_each_entry(alg_region, &dsp->alg_regions, list) { - if (id == alg_region->alg && type == alg_region->type) - return alg_region; + list_for_each_entry(item, &dsp->alg_regions, list) { + if (id == item->alg_region.alg && type == item->alg_region.type) + return &item->alg_region; } return NULL; @@ -1769,35 +1774,35 @@ static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp, int type, __be32 id, __be32 ver, __be32 base) { - struct cs_dsp_alg_region *alg_region; + struct cs_dsp_alg_region_list_item *item; - alg_region = kzalloc(sizeof(*alg_region), GFP_KERNEL); - if (!alg_region) + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) return ERR_PTR(-ENOMEM); - alg_region->type = type; - alg_region->alg = be32_to_cpu(id); - alg_region->ver = be32_to_cpu(ver); - alg_region->base = be32_to_cpu(base); + item->alg_region.type = type; + item->alg_region.alg = be32_to_cpu(id); + item->alg_region.ver = be32_to_cpu(ver); + item->alg_region.base = be32_to_cpu(base); - list_add_tail(&alg_region->list, &dsp->alg_regions); + list_add_tail(&item->list, &dsp->alg_regions); if (dsp->wmfw_ver > 0) - cs_dsp_ctl_fixup_base(dsp, alg_region); + cs_dsp_ctl_fixup_base(dsp, &item->alg_region); - return alg_region; + return &item->alg_region; } static void cs_dsp_free_alg_regions(struct cs_dsp *dsp) { - struct cs_dsp_alg_region *alg_region; + struct cs_dsp_alg_region_list_item *item; while (!list_empty(&dsp->alg_regions)) { - alg_region = list_first_entry(&dsp->alg_regions, - struct cs_dsp_alg_region, - list); - list_del(&alg_region->list); - kfree(alg_region); + item = list_first_entry(&dsp->alg_regions, + struct cs_dsp_alg_region_list_item, + list); + list_del(&item->list); + kfree(item); } } diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 939a4955e00b..94b05e4451dd 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -22,16 +22,16 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \ # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly # disable the stackleak plugin -cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_STACKLEAK_PLUGIN) \ +cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_KSTACK_ERASE) \ -fno-unwind-tables -fno-asynchronous-unwind-tables cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \ -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \ -DEFI_HAVE_STRCMP -fno-builtin -fpic \ $(call cc-option,-mno-single-pic-base) \ - $(DISABLE_STACKLEAK_PLUGIN) + $(DISABLE_KSTACK_ERASE) cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \ - $(DISABLE_STACKLEAK_PLUGIN) -cflags-$(CONFIG_LOONGARCH) += -fpie $(DISABLE_STACKLEAK_PLUGIN) + $(DISABLE_KSTACK_ERASE) +cflags-$(CONFIG_LOONGARCH) += -fpie $(DISABLE_KSTACK_ERASE) cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 44f922e10db2..e43abb322fa6 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -12,6 +12,9 @@ menuconfig GPIOLIB If unsure, say N. +config GPIOLIB_LEGACY + def_bool y + if GPIOLIB config GPIOLIB_FASTPATH_LIMIT @@ -69,6 +72,14 @@ config GPIO_SYSFS use the character device /dev/gpiochipN with the appropriate ioctl() operations instead. +config GPIO_SYSFS_LEGACY + bool "Enable legacy functionalities of the sysfs interface" + depends on GPIO_SYSFS + default y if GPIO_SYSFS + help + Say Y here if you want to enable the legacy, global GPIO + numberspace-based functionalities of the sysfs interface. + config GPIO_CDEV bool "Character device (/dev/gpiochipN) support" if EXPERT default y @@ -1263,6 +1274,7 @@ config GPIO_ADP5520 config GPIO_ADP5585 tristate "GPIO Support for ADP5585" depends on MFD_ADP5585 + select GPIOLIB_IRQCHIP help This option enables support for the GPIO function found in the Analog Devices ADP5585. @@ -1464,6 +1476,16 @@ config GPIO_LP87565 This driver can also be built as a module. If so, the module will be called gpio-lp87565. +config GPIO_MACSMC + tristate "Apple Mac SMC GPIO" + depends on MFD_MACSMC + help + Support for GPIOs controlled by the SMC microcontroller on Apple Mac + systems. + + This driver can also be built as a module. If so, the module will be + called gpio-macsmc. + config GPIO_MADERA tristate "Cirrus Logic Madera class codecs" depends on PINCTRL_MADERA @@ -1501,7 +1523,7 @@ config GPIO_MAX77759 called gpio-max77759. config GPIO_PALMAS - bool "TI PALMAS series PMICs GPIO" + tristate "TI PALMAS series PMICs GPIO" depends on MFD_PALMAS help Select this option to enable GPIO driver for the TI PALMAS diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 88dedd298256..379f55e9ed1e 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -5,7 +5,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG obj-$(CONFIG_GPIOLIB) += gpiolib.o obj-$(CONFIG_GPIOLIB) += gpiolib-devres.o -obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o +obj-$(CONFIG_GPIOLIB_LEGACY) += gpiolib-legacy.o obj-$(CONFIG_OF_GPIO) += gpiolib-of.o obj-$(CONFIG_GPIO_CDEV) += gpiolib-cdev.o obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o @@ -99,6 +99,7 @@ obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o obj-$(CONFIG_GPIO_LPC32XX) += gpio-lpc32xx.o +obj-$(CONFIG_GPIO_MACSMC) += gpio-macsmc.o obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO index 4a8b349f2483..7a09a4f58551 100644 --- a/drivers/gpio/TODO +++ b/drivers/gpio/TODO @@ -131,6 +131,11 @@ Work items: helpers (x86 inb()/outb()) and convert port-mapped I/O drivers to use this with dry-coding and sending to maintainers to test +- Move the MMIO GPIO specific fields out of struct gpio_chip into a + dedicated structure. Currently every GPIO chip has them if gpio-mmio is + enabled in Kconfig even if it itself doesn't register with the helper + library. + ------------------------------------------------------------------------------- Generic regmap GPIO @@ -183,16 +188,12 @@ remove the old ones and finally rename the new ones back to the old names. ------------------------------------------------------------------------------- -Extend the sysfs ABI to allow exporting lines by their HW offsets - -The need to support the sysfs GPIO class is one of the main obstacles to -removing the global GPIO numberspace from the kernel. In order to wean users -off using global numbers from user-space, extend the existing interface with -new per-gpiochip export/unexport attributes that allow to refer to GPIOs using -their hardware offsets within the chip. +Remove legacy sysfs features -Encourage users to switch to using them and eventually remove the existing -global export/unexport attribues. +We have two parallel per-chip class devices and per-exported-line attribute +groups in sysfs. One is using the obsolete global GPIO numberspace and the +second relies on hardware offsets of pins within the chip. Remove the former +once user-space has switched to using the latter. ------------------------------------------------------------------------------- diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c index c7ac5a9ffb1f..bd2cc5f4f851 100644 --- a/drivers/gpio/gpio-74xx-mmio.c +++ b/drivers/gpio/gpio-74xx-mmio.c @@ -8,6 +8,7 @@ #include <linux/bits.h> #include <linux/err.h> #include <linux/gpio/driver.h> +#include <linux/gpio/generic.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -18,8 +19,8 @@ #define MMIO_74XX_BIT_CNT(x) ((x) & GENMASK(7, 0)) struct mmio_74xx_gpio_priv { - struct gpio_chip gc; - unsigned flags; + struct gpio_generic_chip gen_gc; + unsigned int flags; }; static const struct of_device_id mmio_74xx_gpio_ids[] = { @@ -99,16 +100,15 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { struct mmio_74xx_gpio_priv *priv = gpiochip_get_data(gc); - if (priv->flags & MMIO_74XX_DIR_OUT) { - gc->set(gc, gpio, val); - return 0; - } + if (priv->flags & MMIO_74XX_DIR_OUT) + return gpio_generic_chip_set(&priv->gen_gc, gpio, val); return -ENOTSUPP; } static int mmio_74xx_gpio_probe(struct platform_device *pdev) { + struct gpio_generic_chip_config config = { }; struct mmio_74xx_gpio_priv *priv; void __iomem *dat; int err; @@ -123,19 +123,21 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev) if (IS_ERR(dat)) return PTR_ERR(dat); - err = bgpio_init(&priv->gc, &pdev->dev, - DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8), - dat, NULL, NULL, NULL, NULL, 0); + config.dev = &pdev->dev; + config.sz = DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8); + config.dat = dat; + + err = gpio_generic_chip_init(&priv->gen_gc, &config); if (err) return err; - priv->gc.direction_input = mmio_74xx_dir_in; - priv->gc.direction_output = mmio_74xx_dir_out; - priv->gc.get_direction = mmio_74xx_get_direction; - priv->gc.ngpio = MMIO_74XX_BIT_CNT(priv->flags); - priv->gc.owner = THIS_MODULE; + priv->gen_gc.gc.direction_input = mmio_74xx_dir_in; + priv->gen_gc.gc.direction_output = mmio_74xx_dir_out; + priv->gen_gc.gc.get_direction = mmio_74xx_get_direction; + priv->gen_gc.gc.ngpio = MMIO_74XX_BIT_CNT(priv->flags); + priv->gen_gc.gc.owner = THIS_MODULE; - return devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv); + return devm_gpiochip_add_data(&pdev->dev, &priv->gen_gc.gc, priv); } static struct platform_driver mmio_74xx_gpio_driver = { diff --git a/drivers/gpio/gpio-adp5585.c b/drivers/gpio/gpio-adp5585.c index d5c0f1b267c8..b2c8836c5f84 100644 --- a/drivers/gpio/gpio-adp5585.c +++ b/drivers/gpio/gpio-adp5585.c @@ -4,67 +4,131 @@ * * Copyright 2022 NXP * Copyright 2024 Ideas on Board Oy + * Copyright 2025 Analog Devices, Inc. */ +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/container_of.h> #include <linux/device.h> #include <linux/gpio/driver.h> #include <linux/mfd/adp5585.h> #include <linux/module.h> +#include <linux/mutex.h> +#include <linux/notifier.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/types.h> -#define ADP5585_GPIO_MAX 11 +/* + * Bank 0 covers pins "GPIO 1/R0" to "GPIO 6/R5", numbered 0 to 5 by the + * driver, and bank 1 covers pins "GPIO 7/C0" to "GPIO 11/C4", numbered 6 to + * 10. Some variants of the ADP5585 don't support "GPIO 6/R5". As the driver + * uses identical GPIO numbering for all variants to avoid confusion, GPIO 5 is + * marked as reserved in the device tree for variants that don't support it. + */ +#define ADP5585_BANK(n) ((n) >= 6 ? 1 : 0) +#define ADP5585_BIT(n) ((n) >= 6 ? BIT((n) - 6) : BIT(n)) + +/* + * Bank 0 covers pins "GPIO 1/R0" to "GPIO 8/R7", numbered 0 to 7 by the + * driver, bank 1 covers pins "GPIO 9/C0" to "GPIO 16/C7", numbered 8 to + * 15 and bank 3 covers pins "GPIO 17/C8" to "GPIO 19/C10", numbered 16 to 18. + */ +#define ADP5589_BANK(n) ((n) >> 3) +#define ADP5589_BIT(n) BIT((n) & 0x7) + +struct adp5585_gpio_chip { + int (*bank)(unsigned int off); + int (*bit)(unsigned int off); + unsigned int debounce_dis_a; + unsigned int rpull_cfg_a; + unsigned int gpo_data_a; + unsigned int gpo_out_a; + unsigned int gpio_dir_a; + unsigned int gpi_stat_a; + unsigned int gpi_int_lvl_a; + unsigned int gpi_ev_a; + unsigned int gpi_ev_min; + unsigned int gpi_ev_max; + bool has_bias_hole; +}; struct adp5585_gpio_dev { struct gpio_chip gpio_chip; + struct notifier_block nb; + const struct adp5585_gpio_chip *info; struct regmap *regmap; + unsigned long irq_mask; + unsigned long irq_en; + unsigned long irq_active_high; + /* used for irqchip bus locking */ + struct mutex bus_lock; }; +static int adp5585_gpio_bank(unsigned int off) +{ + return ADP5585_BANK(off); +} + +static int adp5585_gpio_bit(unsigned int off) +{ + return ADP5585_BIT(off); +} + +static int adp5589_gpio_bank(unsigned int off) +{ + return ADP5589_BANK(off); +} + +static int adp5589_gpio_bit(unsigned int off) +{ + return ADP5589_BIT(off); +} + static int adp5585_gpio_get_direction(struct gpio_chip *chip, unsigned int off) { struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); - unsigned int bank = ADP5585_BANK(off); - unsigned int bit = ADP5585_BIT(off); + const struct adp5585_gpio_chip *info = adp5585_gpio->info; unsigned int val; - regmap_read(adp5585_gpio->regmap, ADP5585_GPIO_DIRECTION_A + bank, &val); + regmap_read(adp5585_gpio->regmap, info->gpio_dir_a + info->bank(off), &val); - return val & bit ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; + return val & info->bit(off) ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; } static int adp5585_gpio_direction_input(struct gpio_chip *chip, unsigned int off) { struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); - unsigned int bank = ADP5585_BANK(off); - unsigned int bit = ADP5585_BIT(off); + const struct adp5585_gpio_chip *info = adp5585_gpio->info; - return regmap_clear_bits(adp5585_gpio->regmap, - ADP5585_GPIO_DIRECTION_A + bank, bit); + return regmap_clear_bits(adp5585_gpio->regmap, info->gpio_dir_a + info->bank(off), + info->bit(off)); } static int adp5585_gpio_direction_output(struct gpio_chip *chip, unsigned int off, int val) { struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); - unsigned int bank = ADP5585_BANK(off); - unsigned int bit = ADP5585_BIT(off); + const struct adp5585_gpio_chip *info = adp5585_gpio->info; + unsigned int bank = info->bank(off); + unsigned int bit = info->bit(off); int ret; - ret = regmap_update_bits(adp5585_gpio->regmap, - ADP5585_GPO_DATA_OUT_A + bank, bit, - val ? bit : 0); + ret = regmap_update_bits(adp5585_gpio->regmap, info->gpo_data_a + bank, + bit, val ? bit : 0); if (ret) return ret; - return regmap_set_bits(adp5585_gpio->regmap, - ADP5585_GPIO_DIRECTION_A + bank, bit); + return regmap_set_bits(adp5585_gpio->regmap, info->gpio_dir_a + bank, + bit); } static int adp5585_gpio_get_value(struct gpio_chip *chip, unsigned int off) { struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); - unsigned int bank = ADP5585_BANK(off); - unsigned int bit = ADP5585_BIT(off); + const struct adp5585_gpio_chip *info = adp5585_gpio->info; + unsigned int bank = info->bank(off); + unsigned int bit = info->bit(off); unsigned int reg; unsigned int val; @@ -79,8 +143,8 @@ static int adp5585_gpio_get_value(struct gpio_chip *chip, unsigned int off) * .direction_input(), .direction_output() or .set() operations racing * with this. */ - regmap_read(adp5585_gpio->regmap, ADP5585_GPIO_DIRECTION_A + bank, &val); - reg = val & bit ? ADP5585_GPO_DATA_OUT_A : ADP5585_GPI_STATUS_A; + regmap_read(adp5585_gpio->regmap, info->gpio_dir_a + bank, &val); + reg = val & bit ? info->gpo_data_a : info->gpi_stat_a; regmap_read(adp5585_gpio->regmap, reg + bank, &val); return !!(val & bit); @@ -90,17 +154,17 @@ static int adp5585_gpio_set_value(struct gpio_chip *chip, unsigned int off, int val) { struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); - unsigned int bank = ADP5585_BANK(off); - unsigned int bit = ADP5585_BIT(off); + const struct adp5585_gpio_chip *info = adp5585_gpio->info; + unsigned int bit = adp5585_gpio->info->bit(off); - return regmap_update_bits(adp5585_gpio->regmap, - ADP5585_GPO_DATA_OUT_A + bank, + return regmap_update_bits(adp5585_gpio->regmap, info->gpo_data_a + info->bank(off), bit, val ? bit : 0); } static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio, unsigned int off, unsigned int bias) { + const struct adp5585_gpio_chip *info = adp5585_gpio->info; unsigned int bit, reg, mask, val; /* @@ -108,8 +172,10 @@ static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio, * consecutive registers ADP5585_RPULL_CONFIG_*, with a hole of 4 bits * after R5. */ - bit = off * 2 + (off > 5 ? 4 : 0); - reg = ADP5585_RPULL_CONFIG_A + bit / 8; + bit = off * 2; + if (info->has_bias_hole) + bit += (off > 5 ? 4 : 0); + reg = info->rpull_cfg_a + bit / 8; mask = ADP5585_Rx_PULL_CFG_MASK << (bit % 8); val = bias << (bit % 8); @@ -119,22 +185,22 @@ static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio, static int adp5585_gpio_set_drive(struct adp5585_gpio_dev *adp5585_gpio, unsigned int off, enum pin_config_param drive) { - unsigned int bank = ADP5585_BANK(off); - unsigned int bit = ADP5585_BIT(off); + const struct adp5585_gpio_chip *info = adp5585_gpio->info; + unsigned int bit = adp5585_gpio->info->bit(off); return regmap_update_bits(adp5585_gpio->regmap, - ADP5585_GPO_OUT_MODE_A + bank, bit, + info->gpo_out_a + info->bank(off), bit, drive == PIN_CONFIG_DRIVE_OPEN_DRAIN ? bit : 0); } static int adp5585_gpio_set_debounce(struct adp5585_gpio_dev *adp5585_gpio, unsigned int off, unsigned int debounce) { - unsigned int bank = ADP5585_BANK(off); - unsigned int bit = ADP5585_BIT(off); + const struct adp5585_gpio_chip *info = adp5585_gpio->info; + unsigned int bit = adp5585_gpio->info->bit(off); return regmap_update_bits(adp5585_gpio->regmap, - ADP5585_DEBOUNCE_DIS_A + bank, bit, + info->debounce_dis_a + info->bank(off), bit, debounce ? 0 : bit); } @@ -172,11 +238,175 @@ static int adp5585_gpio_set_config(struct gpio_chip *chip, unsigned int off, }; } +static int adp5585_gpio_request(struct gpio_chip *chip, unsigned int off) +{ + struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); + const struct adp5585_gpio_chip *info = adp5585_gpio->info; + struct device *dev = chip->parent; + struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent); + const struct adp5585_regs *regs = adp5585->regs; + int ret; + + ret = test_and_set_bit(off, adp5585->pin_usage); + if (ret) + return -EBUSY; + + /* make sure it's configured for GPIO */ + return regmap_clear_bits(adp5585_gpio->regmap, + regs->pin_cfg_a + info->bank(off), + info->bit(off)); +} + +static void adp5585_gpio_free(struct gpio_chip *chip, unsigned int off) +{ + struct device *dev = chip->parent; + struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent); + + clear_bit(off, adp5585->pin_usage); +} + +static int adp5585_gpio_key_event(struct notifier_block *nb, unsigned long key, + void *data) +{ + struct adp5585_gpio_dev *adp5585_gpio = container_of(nb, struct adp5585_gpio_dev, nb); + struct device *dev = adp5585_gpio->gpio_chip.parent; + unsigned long key_press = (unsigned long)data; + unsigned int irq, irq_type; + struct irq_data *irqd; + bool active_high; + unsigned int off; + + /* make sure the event is for me */ + if (key < adp5585_gpio->info->gpi_ev_min || key > adp5585_gpio->info->gpi_ev_max) + return NOTIFY_DONE; + + off = key - adp5585_gpio->info->gpi_ev_min; + active_high = test_bit(off, &adp5585_gpio->irq_active_high); + + irq = irq_find_mapping(adp5585_gpio->gpio_chip.irq.domain, off); + if (!irq) + return NOTIFY_BAD; + + irqd = irq_get_irq_data(irq); + if (!irqd) { + dev_err(dev, "Could not get irq(%u) data\n", irq); + return NOTIFY_BAD; + } + + dev_dbg_ratelimited(dev, "gpio-keys event(%u) press=%lu, a_high=%u\n", + off, key_press, active_high); + + if (!active_high) + key_press = !key_press; + + irq_type = irqd_get_trigger_type(irqd); + + if ((irq_type & IRQ_TYPE_EDGE_RISING && key_press) || + (irq_type & IRQ_TYPE_EDGE_FALLING && !key_press)) + handle_nested_irq(irq); + + return NOTIFY_STOP; +} + +static void adp5585_irq_bus_lock(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc); + + mutex_lock(&adp5585_gpio->bus_lock); +} + +static void adp5585_irq_bus_sync_unlock(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); + const struct adp5585_gpio_chip *info = adp5585_gpio->info; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + bool active_high = test_bit(hwirq, &adp5585_gpio->irq_active_high); + bool enabled = test_bit(hwirq, &adp5585_gpio->irq_en); + bool masked = test_bit(hwirq, &adp5585_gpio->irq_mask); + unsigned int bank = adp5585_gpio->info->bank(hwirq); + unsigned int bit = adp5585_gpio->info->bit(hwirq); + + if (masked && !enabled) + goto out_unlock; + if (!masked && enabled) + goto out_unlock; + + regmap_update_bits(adp5585_gpio->regmap, info->gpi_int_lvl_a + bank, bit, + active_high ? bit : 0); + regmap_update_bits(adp5585_gpio->regmap, info->gpi_ev_a + bank, bit, + masked ? 0 : bit); + assign_bit(hwirq, &adp5585_gpio->irq_en, !masked); + +out_unlock: + mutex_unlock(&adp5585_gpio->bus_lock); +} + +static void adp5585_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + __set_bit(hwirq, &adp5585_gpio->irq_mask); + gpiochip_disable_irq(gc, hwirq); +} + +static void adp5585_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + gpiochip_enable_irq(gc, hwirq); + __clear_bit(hwirq, &adp5585_gpio->irq_mask); +} + +static int adp5585_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + if (!(type & IRQ_TYPE_EDGE_BOTH)) + return -EINVAL; + + assign_bit(hwirq, &adp5585_gpio->irq_active_high, + type == IRQ_TYPE_EDGE_RISING); + + irq_set_handler_locked(d, handle_edge_irq); + return 0; +} + +static const struct irq_chip adp5585_irq_chip = { + .name = "adp5585", + .irq_mask = adp5585_irq_mask, + .irq_unmask = adp5585_irq_unmask, + .irq_bus_lock = adp5585_irq_bus_lock, + .irq_bus_sync_unlock = adp5585_irq_bus_sync_unlock, + .irq_set_type = adp5585_irq_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + +static void adp5585_gpio_unreg_notifier(void *data) +{ + struct adp5585_gpio_dev *adp5585_gpio = data; + struct device *dev = adp5585_gpio->gpio_chip.parent; + struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent); + + blocking_notifier_chain_unregister(&adp5585->event_notifier, + &adp5585_gpio->nb); +} + static int adp5585_gpio_probe(struct platform_device *pdev) { struct adp5585_dev *adp5585 = dev_get_drvdata(pdev->dev.parent); + const struct platform_device_id *id = platform_get_device_id(pdev); struct adp5585_gpio_dev *adp5585_gpio; struct device *dev = &pdev->dev; + struct gpio_irq_chip *girq; struct gpio_chip *gc; int ret; @@ -186,6 +416,10 @@ static int adp5585_gpio_probe(struct platform_device *pdev) adp5585_gpio->regmap = adp5585->regmap; + adp5585_gpio->info = (const struct adp5585_gpio_chip *)id->driver_data; + if (!adp5585_gpio->info) + return -ENODEV; + device_set_of_node_from_dev(dev, dev->parent); gc = &adp5585_gpio->gpio_chip; @@ -196,13 +430,43 @@ static int adp5585_gpio_probe(struct platform_device *pdev) gc->get = adp5585_gpio_get_value; gc->set_rv = adp5585_gpio_set_value; gc->set_config = adp5585_gpio_set_config; + gc->request = adp5585_gpio_request; + gc->free = adp5585_gpio_free; gc->can_sleep = true; gc->base = -1; - gc->ngpio = ADP5585_GPIO_MAX; + gc->ngpio = adp5585->n_pins; gc->label = pdev->name; gc->owner = THIS_MODULE; + if (device_property_present(dev->parent, "interrupt-controller")) { + if (!adp5585->irq) + return dev_err_probe(dev, -EINVAL, + "Unable to serve as interrupt controller without IRQ\n"); + + girq = &adp5585_gpio->gpio_chip.irq; + gpio_irq_chip_set_chip(girq, &adp5585_irq_chip); + girq->handler = handle_bad_irq; + girq->threaded = true; + + adp5585_gpio->nb.notifier_call = adp5585_gpio_key_event; + ret = blocking_notifier_chain_register(&adp5585->event_notifier, + &adp5585_gpio->nb); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, adp5585_gpio_unreg_notifier, + adp5585_gpio); + if (ret) + return ret; + } + + /* everything masked by default */ + adp5585_gpio->irq_mask = ~0UL; + + ret = devm_mutex_init(dev, &adp5585_gpio->bus_lock); + if (ret) + return ret; ret = devm_gpiochip_add_data(dev, &adp5585_gpio->gpio_chip, adp5585_gpio); if (ret) @@ -211,8 +475,40 @@ static int adp5585_gpio_probe(struct platform_device *pdev) return 0; } +static const struct adp5585_gpio_chip adp5585_gpio_chip_info = { + .bank = adp5585_gpio_bank, + .bit = adp5585_gpio_bit, + .debounce_dis_a = ADP5585_DEBOUNCE_DIS_A, + .rpull_cfg_a = ADP5585_RPULL_CONFIG_A, + .gpo_data_a = ADP5585_GPO_DATA_OUT_A, + .gpo_out_a = ADP5585_GPO_OUT_MODE_A, + .gpio_dir_a = ADP5585_GPIO_DIRECTION_A, + .gpi_stat_a = ADP5585_GPI_STATUS_A, + .has_bias_hole = true, + .gpi_ev_min = ADP5585_GPI_EVENT_START, + .gpi_ev_max = ADP5585_GPI_EVENT_END, + .gpi_int_lvl_a = ADP5585_GPI_INT_LEVEL_A, + .gpi_ev_a = ADP5585_GPI_EVENT_EN_A, +}; + +static const struct adp5585_gpio_chip adp5589_gpio_chip_info = { + .bank = adp5589_gpio_bank, + .bit = adp5589_gpio_bit, + .debounce_dis_a = ADP5589_DEBOUNCE_DIS_A, + .rpull_cfg_a = ADP5589_RPULL_CONFIG_A, + .gpo_data_a = ADP5589_GPO_DATA_OUT_A, + .gpo_out_a = ADP5589_GPO_OUT_MODE_A, + .gpio_dir_a = ADP5589_GPIO_DIRECTION_A, + .gpi_stat_a = ADP5589_GPI_STATUS_A, + .gpi_ev_min = ADP5589_GPI_EVENT_START, + .gpi_ev_max = ADP5589_GPI_EVENT_END, + .gpi_int_lvl_a = ADP5589_GPI_INT_LEVEL_A, + .gpi_ev_a = ADP5589_GPI_EVENT_EN_A, +}; + static const struct platform_device_id adp5585_gpio_id_table[] = { - { "adp5585-gpio" }, + { "adp5585-gpio", (kernel_ulong_t)&adp5585_gpio_chip_info }, + { "adp5589-gpio", (kernel_ulong_t)&adp5589_gpio_chip_info }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(platform, adp5585_gpio_id_table); diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c index e530c94dcce8..89ffde693019 100644 --- a/drivers/gpio/gpio-arizona.c +++ b/drivers/gpio/gpio-arizona.c @@ -39,7 +39,6 @@ static int arizona_gpio_direction_in(struct gpio_chip *chip, unsigned offset) return ret; if (change && persistent) { - pm_runtime_mark_last_busy(chip->parent); pm_runtime_put_autosuspend(chip->parent); } @@ -82,7 +81,6 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset) return ret; } - pm_runtime_mark_last_busy(chip->parent); pm_runtime_put_autosuspend(chip->parent); } diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index e7671bcd5c07..e29a9589b3cc 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -436,10 +436,8 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev, struct device_node *np = dev->of_node; int err; - priv->irq_domain = - irq_domain_create_linear(of_fwnode_handle(np), priv->num_gpios, - &brcmstb_gpio_irq_domain_ops, - priv); + priv->irq_domain = irq_domain_create_linear(dev_fwnode(dev), priv->num_gpios, + &brcmstb_gpio_irq_domain_ops, priv); if (!priv->irq_domain) { dev_err(dev, "Couldn't allocate IRQ domain\n"); return -ENXIO; diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c index e9dd2564c54f..c647953521c7 100644 --- a/drivers/gpio/gpio-cadence.c +++ b/drivers/gpio/gpio-cadence.c @@ -8,9 +8,11 @@ * Boris Brezillon <boris.brezillon@free-electrons.com> */ -#include <linux/gpio/driver.h> +#include <linux/cleanup.h> #include <linux/clk.h> +#include <linux/gpio/driver.h> #include <linux/interrupt.h> +#include <linux/gpio/generic.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -30,7 +32,7 @@ #define CDNS_GPIO_IRQ_ANY_EDGE 0x2c struct cdns_gpio_chip { - struct gpio_chip gc; + struct gpio_generic_chip gen_gc; void __iomem *regs; u32 bypass_orig; }; @@ -38,29 +40,24 @@ struct cdns_gpio_chip { static int cdns_gpio_request(struct gpio_chip *chip, unsigned int offset) { struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip); - unsigned long flags; - raw_spin_lock_irqsave(&chip->bgpio_lock, flags); + guard(gpio_generic_lock)(&cgpio->gen_gc); iowrite32(ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE) & ~BIT(offset), cgpio->regs + CDNS_GPIO_BYPASS_MODE); - raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags); return 0; } static void cdns_gpio_free(struct gpio_chip *chip, unsigned int offset) { struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip); - unsigned long flags; - raw_spin_lock_irqsave(&chip->bgpio_lock, flags); + guard(gpio_generic_lock)(&cgpio->gen_gc); iowrite32(ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE) | (BIT(offset) & cgpio->bypass_orig), cgpio->regs + CDNS_GPIO_BYPASS_MODE); - - raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags); } static void cdns_gpio_irq_mask(struct irq_data *d) @@ -85,13 +82,12 @@ static int cdns_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *chip = irq_data_get_irq_chip_data(d); struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip); - unsigned long flags; u32 int_value; u32 int_type; u32 mask = BIT(d->hwirq); int ret = 0; - raw_spin_lock_irqsave(&chip->bgpio_lock, flags); + guard(gpio_generic_lock)(&cgpio->gen_gc); int_value = ioread32(cgpio->regs + CDNS_GPIO_IRQ_VALUE) & ~mask; int_type = ioread32(cgpio->regs + CDNS_GPIO_IRQ_TYPE) & ~mask; @@ -108,15 +104,12 @@ static int cdns_gpio_irq_set_type(struct irq_data *d, unsigned int type) } else if (type == IRQ_TYPE_LEVEL_LOW) { int_type |= mask; } else { - ret = -EINVAL; - goto err_irq_type; + return -EINVAL; } iowrite32(int_value, cgpio->regs + CDNS_GPIO_IRQ_VALUE); iowrite32(int_type, cgpio->regs + CDNS_GPIO_IRQ_TYPE); -err_irq_type: - raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags); return ret; } @@ -150,6 +143,7 @@ static const struct irq_chip cdns_gpio_irqchip = { static int cdns_gpio_probe(struct platform_device *pdev) { + struct gpio_generic_chip_config config = { }; struct cdns_gpio_chip *cgpio; int ret, irq; u32 dir_prev; @@ -176,32 +170,33 @@ static int cdns_gpio_probe(struct platform_device *pdev) * gpiochip_lock_as_irq: * tried to flag a GPIO set as output for IRQ * Generic GPIO driver stores the direction value internally, - * so it needs to be changed before bgpio_init() is called. + * so it needs to be changed before gpio_generic_chip_init() is called. */ dir_prev = ioread32(cgpio->regs + CDNS_GPIO_DIRECTION_MODE); iowrite32(GENMASK(num_gpios - 1, 0), cgpio->regs + CDNS_GPIO_DIRECTION_MODE); - ret = bgpio_init(&cgpio->gc, &pdev->dev, 4, - cgpio->regs + CDNS_GPIO_INPUT_VALUE, - cgpio->regs + CDNS_GPIO_OUTPUT_VALUE, - NULL, - NULL, - cgpio->regs + CDNS_GPIO_DIRECTION_MODE, - BGPIOF_READ_OUTPUT_REG_SET); + config.dev = &pdev->dev; + config.sz = 4; + config.dat = cgpio->regs + CDNS_GPIO_INPUT_VALUE; + config.set = cgpio->regs + CDNS_GPIO_OUTPUT_VALUE; + config.dirin = cgpio->regs + CDNS_GPIO_DIRECTION_MODE; + config.flags = BGPIOF_READ_OUTPUT_REG_SET; + + ret = gpio_generic_chip_init(&cgpio->gen_gc, &config); if (ret) { dev_err(&pdev->dev, "Failed to register generic gpio, %d\n", ret); goto err_revert_dir; } - cgpio->gc.label = dev_name(&pdev->dev); - cgpio->gc.ngpio = num_gpios; - cgpio->gc.parent = &pdev->dev; - cgpio->gc.base = -1; - cgpio->gc.owner = THIS_MODULE; - cgpio->gc.request = cdns_gpio_request; - cgpio->gc.free = cdns_gpio_free; + cgpio->gen_gc.gc.label = dev_name(&pdev->dev); + cgpio->gen_gc.gc.ngpio = num_gpios; + cgpio->gen_gc.gc.parent = &pdev->dev; + cgpio->gen_gc.gc.base = -1; + cgpio->gen_gc.gc.owner = THIS_MODULE; + cgpio->gen_gc.gc.request = cdns_gpio_request; + cgpio->gen_gc.gc.free = cdns_gpio_free; clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) { @@ -218,7 +213,7 @@ static int cdns_gpio_probe(struct platform_device *pdev) if (irq >= 0) { struct gpio_irq_chip *girq; - girq = &cgpio->gc.irq; + girq = &cgpio->gen_gc.gc.irq; gpio_irq_chip_set_chip(girq, &cdns_gpio_irqchip); girq->parent_handler = cdns_gpio_irq_handler; girq->num_parents = 1; @@ -234,7 +229,7 @@ static int cdns_gpio_probe(struct platform_device *pdev) girq->handler = handle_level_irq; } - ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gc, cgpio); + ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gen_gc.gc, cgpio); if (ret < 0) { dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret); goto err_revert_dir; diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c index d69a24dd4828..24ff2347d599 100644 --- a/drivers/gpio/gpio-clps711x.c +++ b/drivers/gpio/gpio-clps711x.c @@ -8,13 +8,15 @@ #include <linux/err.h> #include <linux/module.h> #include <linux/gpio/driver.h> +#include <linux/gpio/generic.h> #include <linux/platform_device.h> static int clps711x_gpio_probe(struct platform_device *pdev) { + struct gpio_generic_chip_config config = { }; struct device_node *np = pdev->dev.of_node; + struct gpio_generic_chip *gen_gc; void __iomem *dat, *dir; - struct gpio_chip *gc; int err, id; if (!np) @@ -24,8 +26,8 @@ static int clps711x_gpio_probe(struct platform_device *pdev) if ((id < 0) || (id > 4)) return -ENODEV; - gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL); - if (!gc) + gen_gc = devm_kzalloc(&pdev->dev, sizeof(*gen_gc), GFP_KERNEL); + if (!gen_gc) return -ENOMEM; dat = devm_platform_ioremap_resource(pdev, 0); @@ -36,35 +38,37 @@ static int clps711x_gpio_probe(struct platform_device *pdev) if (IS_ERR(dir)) return PTR_ERR(dir); + config.dev = &pdev->dev; + config.sz = 1; + config.dat = dat; + switch (id) { case 3: /* PORTD is inverted logic for direction register */ - err = bgpio_init(gc, &pdev->dev, 1, dat, NULL, NULL, - NULL, dir, 0); + config.dirin = dir; break; default: - err = bgpio_init(gc, &pdev->dev, 1, dat, NULL, NULL, - dir, NULL, 0); + config.dirout = dir; break; } + err = gpio_generic_chip_init(gen_gc, &config); if (err) return err; switch (id) { case 4: /* PORTE is 3 lines only */ - gc->ngpio = 3; + gen_gc->gc.ngpio = 3; break; default: break; } - gc->base = -1; - gc->owner = THIS_MODULE; - platform_set_drvdata(pdev, gc); + gen_gc->gc.base = -1; + gen_gc->gc.owner = THIS_MODULE; - return devm_gpiochip_add_data(&pdev->dev, gc, NULL); + return devm_gpiochip_add_data(&pdev->dev, &gen_gc->gc, NULL); } static const struct of_device_id clps711x_gpio_ids[] = { diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 80a82492171e..8f3a36d0191d 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -478,7 +478,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) return irq; } - irq_domain = irq_domain_create_legacy(of_fwnode_handle(dev->of_node), ngpio, irq, 0, + irq_domain = irq_domain_create_legacy(dev_fwnode(dev), ngpio, irq, 0, &davinci_gpio_irq_ops, chips); if (!irq_domain) { dev_err(dev, "Couldn't register an IRQ domain\n"); diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index a5e6e446f39c..015f1ac32dd9 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c @@ -325,8 +325,7 @@ static int em_gio_probe(struct platform_device *pdev) irq_chip->irq_release_resources = em_gio_irq_relres; irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND; - p->irq_domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node), - ngpios, 0, + p->irq_domain = irq_domain_create_simple(dev_fwnode(dev), ngpios, 0, &em_gio_irq_domain_ops, p); if (!p->irq_domain) { dev_err(dev, "cannot initialize irq domain\n"); diff --git a/drivers/gpio/gpio-en7523.c b/drivers/gpio/gpio-en7523.c index 69834db2c1cf..cf47afc578a9 100644 --- a/drivers/gpio/gpio-en7523.c +++ b/drivers/gpio/gpio-en7523.c @@ -4,6 +4,7 @@ #include <linux/io.h> #include <linux/bits.h> #include <linux/gpio/driver.h> +#include <linux/gpio/generic.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -13,28 +14,23 @@ /** * struct airoha_gpio_ctrl - Airoha GPIO driver data - * @gc: Associated gpio_chip instance. + * @gen_gc: Associated gpio_generic_chip instance. * @data: The data register. * @dir: [0] The direction register for the lower 16 pins. * [1]: The direction register for the higher 16 pins. * @output: The output enable register. */ struct airoha_gpio_ctrl { - struct gpio_chip gc; + struct gpio_generic_chip gen_gc; void __iomem *data; void __iomem *dir[2]; void __iomem *output; }; -static struct airoha_gpio_ctrl *gc_to_ctrl(struct gpio_chip *gc) -{ - return container_of(gc, struct airoha_gpio_ctrl, gc); -} - static int airoha_dir_set(struct gpio_chip *gc, unsigned int gpio, int val, int out) { - struct airoha_gpio_ctrl *ctrl = gc_to_ctrl(gc); + struct airoha_gpio_ctrl *ctrl = gpiochip_get_data(gc); u32 dir = ioread32(ctrl->dir[gpio / 16]); u32 output = ioread32(ctrl->output); u32 mask = BIT((gpio % 16) * 2); @@ -50,7 +46,7 @@ static int airoha_dir_set(struct gpio_chip *gc, unsigned int gpio, iowrite32(dir, ctrl->dir[gpio / 16]); if (out) - gc->set(gc, gpio, val); + gpio_generic_chip_set(&ctrl->gen_gc, gpio, val); iowrite32(output, ctrl->output); @@ -70,7 +66,7 @@ static int airoha_dir_in(struct gpio_chip *gc, unsigned int gpio) static int airoha_get_dir(struct gpio_chip *gc, unsigned int gpio) { - struct airoha_gpio_ctrl *ctrl = gc_to_ctrl(gc); + struct airoha_gpio_ctrl *ctrl = gpiochip_get_data(gc); u32 dir = ioread32(ctrl->dir[gpio / 16]); u32 mask = BIT((gpio % 16) * 2); @@ -79,6 +75,7 @@ static int airoha_get_dir(struct gpio_chip *gc, unsigned int gpio) static int airoha_gpio_probe(struct platform_device *pdev) { + struct gpio_generic_chip_config config = { }; struct device *dev = &pdev->dev; struct airoha_gpio_ctrl *ctrl; int err; @@ -103,18 +100,21 @@ static int airoha_gpio_probe(struct platform_device *pdev) if (IS_ERR(ctrl->output)) return PTR_ERR(ctrl->output); - err = bgpio_init(&ctrl->gc, dev, 4, ctrl->data, NULL, - NULL, NULL, NULL, 0); + config.dev = dev; + config.sz = 4; + config.dat = ctrl->data; + + err = gpio_generic_chip_init(&ctrl->gen_gc, &config); if (err) return dev_err_probe(dev, err, "unable to init generic GPIO"); - ctrl->gc.ngpio = AIROHA_GPIO_MAX; - ctrl->gc.owner = THIS_MODULE; - ctrl->gc.direction_output = airoha_dir_out; - ctrl->gc.direction_input = airoha_dir_in; - ctrl->gc.get_direction = airoha_get_dir; + ctrl->gen_gc.gc.ngpio = AIROHA_GPIO_MAX; + ctrl->gen_gc.gc.owner = THIS_MODULE; + ctrl->gen_gc.gc.direction_output = airoha_dir_out; + ctrl->gen_gc.gc.direction_input = airoha_dir_in; + ctrl->gen_gc.gc.get_direction = airoha_get_dir; - return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl); + return devm_gpiochip_add_data(dev, &ctrl->gen_gc.gc, ctrl); } static const struct of_device_id airoha_gpio_of_match[] = { diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index d38a2d9854ca..f3f8bab62f94 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c @@ -402,9 +402,8 @@ static int grgpio_probe(struct platform_device *ofdev) return -EINVAL; } - priv->domain = irq_domain_create_linear(of_fwnode_handle(np), gc->ngpio, - &grgpio_irq_domain_ops, - priv); + priv->domain = irq_domain_create_linear(dev_fwnode(&ofdev->dev), gc->ngpio, + &grgpio_irq_domain_ops, priv); if (!priv->domain) { dev_err(dev, "Could not add irq domain\n"); return -EINVAL; diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c index 70a01c5b8ad1..add09971d26a 100644 --- a/drivers/gpio/gpio-loongson-64bit.c +++ b/drivers/gpio/gpio-loongson-64bit.c @@ -222,6 +222,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data0 = { .conf_offset = 0x0, .in_offset = 0xc, .out_offset = 0x8, + .inten_offset = 0x14, }; static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = { @@ -230,6 +231,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = { .conf_offset = 0x0, .in_offset = 0x20, .out_offset = 0x10, + .inten_offset = 0x30, }; static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data2 = { @@ -246,6 +248,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls3a5000_data = { .conf_offset = 0x0, .in_offset = 0xc, .out_offset = 0x8, + .inten_offset = 0x14, }; static const struct loongson_gpio_chip_data loongson_gpio_ls7a_data = { @@ -254,6 +257,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls7a_data = { .conf_offset = 0x800, .in_offset = 0xa00, .out_offset = 0x900, + .inten_offset = 0xb00, }; /* LS7A2000 chipset GPIO */ @@ -263,6 +267,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data0 = { .conf_offset = 0x800, .in_offset = 0xa00, .out_offset = 0x900, + .inten_offset = 0xb00, }; /* LS7A2000 ACPI GPIO */ @@ -281,6 +286,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls3a6000_data = { .conf_offset = 0x0, .in_offset = 0xc, .out_offset = 0x8, + .inten_offset = 0x14, }; static const struct of_device_id loongson_gpio_of_match[] = { diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c index b0a8da5c058d..2dbfbf90176c 100644 --- a/drivers/gpio/gpio-lpc18xx.c +++ b/drivers/gpio/gpio-lpc18xx.c @@ -249,8 +249,8 @@ static int lpc18xx_gpio_pin_ic_probe(struct lpc18xx_gpio_chip *gc) raw_spin_lock_init(&ic->lock); ic->domain = irq_domain_create_hierarchy(parent_domain, 0, NR_LPC18XX_GPIO_PIN_IC_IRQS, - of_fwnode_handle(dev->of_node), - &lpc18xx_gpio_pin_ic_domain_ops, ic); + dev_fwnode(dev), &lpc18xx_gpio_pin_ic_domain_ops, + ic); if (!ic->domain) { pr_err("unable to add irq domain\n"); ret = -ENODEV; diff --git a/drivers/gpio/gpio-macsmc.c b/drivers/gpio/gpio-macsmc.c new file mode 100644 index 000000000000..7570d9e89adf --- /dev/null +++ b/drivers/gpio/gpio-macsmc.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SMC GPIO driver + * Copyright The Asahi Linux Contributors + * + * This driver implements basic SMC PMU GPIO support that can read inputs + * and write outputs. Mode changes and IRQ config are not yet implemented. + */ + +#include <linux/bitmap.h> +#include <linux/device.h> +#include <linux/gpio/driver.h> +#include <linux/mfd/core.h> +#include <linux/mfd/macsmc.h> + +#define MAX_GPIO 64 + +/* + * Commands 0-6 are, presumably, the intended API. + * Command 0xff lets you get/set the pin configuration in detail directly, + * but the bit meanings seem not to be stable between devices/PMU hardware + * versions. + * + * We're going to try to make do with the low commands for now. + * We don't implement pin mode changes at this time. + */ + +#define CMD_ACTION (0 << 24) +#define CMD_OUTPUT (1 << 24) +#define CMD_INPUT (2 << 24) +#define CMD_PINMODE (3 << 24) +#define CMD_IRQ_ENABLE (4 << 24) +#define CMD_IRQ_ACK (5 << 24) +#define CMD_IRQ_MODE (6 << 24) +#define CMD_CONFIG (0xff << 24) + +#define MODE_INPUT 0 +#define MODE_OUTPUT 1 +#define MODE_VALUE_0 0 +#define MODE_VALUE_1 2 + +#define IRQ_MODE_HIGH 0 +#define IRQ_MODE_LOW 1 +#define IRQ_MODE_RISING 2 +#define IRQ_MODE_FALLING 3 +#define IRQ_MODE_BOTH 4 + +#define CONFIG_MASK GENMASK(23, 16) +#define CONFIG_VAL GENMASK(7, 0) + +#define CONFIG_OUTMODE GENMASK(7, 6) +#define CONFIG_IRQMODE GENMASK(5, 3) +#define CONFIG_PULLDOWN BIT(2) +#define CONFIG_PULLUP BIT(1) +#define CONFIG_OUTVAL BIT(0) + +/* + * Output modes seem to differ depending on the PMU in use... ? + * j274 / M1 (Sera PMU): + * 0 = input + * 1 = output + * 2 = open drain + * 3 = disable + * j314 / M1Pro (Maverick PMU): + * 0 = input + * 1 = open drain + * 2 = output + * 3 = ? + */ + +struct macsmc_gpio { + struct device *dev; + struct apple_smc *smc; + struct gpio_chip gc; + + int first_index; +}; + +static int macsmc_gpio_nr(smc_key key) +{ + int low = hex_to_bin(key & 0xff); + int high = hex_to_bin((key >> 8) & 0xff); + + if (low < 0 || high < 0) + return -1; + + return low | (high << 4); +} + +static int macsmc_gpio_key(unsigned int offset) +{ + return _SMC_KEY("gP\0\0") | hex_asc_hi(offset) << 8 | hex_asc_lo(offset); +} + +static int macsmc_gpio_find_first_gpio_index(struct macsmc_gpio *smcgp) +{ + struct apple_smc *smc = smcgp->smc; + smc_key key = macsmc_gpio_key(0); + smc_key first_key, last_key; + int start, count, ret; + + /* Return early if the key is out of bounds */ + ret = apple_smc_get_key_by_index(smc, 0, &first_key); + if (ret) + return ret; + if (key <= first_key) + return -ENODEV; + + ret = apple_smc_get_key_by_index(smc, smc->key_count - 1, &last_key); + if (ret) + return ret; + if (key > last_key) + return -ENODEV; + + /* Binary search to find index of first SMC key bigger or equal to key */ + start = 0; + count = smc->key_count; + while (count > 1) { + smc_key pkey; + int pivot = start + ((count - 1) >> 1); + + ret = apple_smc_get_key_by_index(smc, pivot, &pkey); + if (ret < 0) + return ret; + + if (pkey == key) + return pivot; + + pivot++; + + if (pkey < key) { + count -= pivot - start; + start = pivot; + } else { + count = pivot - start; + } + } + + return start; +} + +static int macsmc_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + smc_key key = macsmc_gpio_key(offset); + u32 val; + int ret; + + /* First try reading the explicit pin mode register */ + ret = apple_smc_rw_u32(smcgp->smc, key, CMD_PINMODE, &val); + if (!ret) + return (val & MODE_OUTPUT) ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; + + /* + * Less common IRQ configs cause CMD_PINMODE to fail, and so does open drain mode. + * Fall back to reading IRQ mode, which will only succeed for inputs. + */ + ret = apple_smc_rw_u32(smcgp->smc, key, CMD_IRQ_MODE, &val); + return ret ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; +} + +static int macsmc_gpio_get(struct gpio_chip *gc, unsigned int offset) +{ + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + smc_key key = macsmc_gpio_key(offset); + u32 cmd, val; + int ret; + + ret = macsmc_gpio_get_direction(gc, offset); + if (ret < 0) + return ret; + + if (ret == GPIO_LINE_DIRECTION_OUT) + cmd = CMD_OUTPUT; + else + cmd = CMD_INPUT; + + ret = apple_smc_rw_u32(smcgp->smc, key, cmd, &val); + if (ret < 0) + return ret; + + return val ? 1 : 0; +} + +static int macsmc_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +{ + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + smc_key key = macsmc_gpio_key(offset); + int ret; + + value |= CMD_OUTPUT; + ret = apple_smc_write_u32(smcgp->smc, key, CMD_OUTPUT | value); + if (ret < 0) + dev_err(smcgp->dev, "GPIO set failed %p4ch = 0x%x\n", + &key, value); + + return ret; +} + +static int macsmc_gpio_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, unsigned int ngpios) +{ + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + int count; + int i; + + count = min(smcgp->smc->key_count, MAX_GPIO); + + bitmap_zero(valid_mask, ngpios); + + for (i = 0; i < count; i++) { + int ret, gpio_nr; + smc_key key; + + ret = apple_smc_get_key_by_index(smcgp->smc, smcgp->first_index + i, &key); + if (ret < 0) + return ret; + + if (key > SMC_KEY(gPff)) + break; + + gpio_nr = macsmc_gpio_nr(key); + if (gpio_nr < 0 || gpio_nr > MAX_GPIO) { + dev_err(smcgp->dev, "Bad GPIO key %p4ch\n", &key); + continue; + } + + set_bit(gpio_nr, valid_mask); + } + + return 0; +} + +static int macsmc_gpio_probe(struct platform_device *pdev) +{ + struct macsmc_gpio *smcgp; + struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent); + smc_key key; + int ret; + + smcgp = devm_kzalloc(&pdev->dev, sizeof(*smcgp), GFP_KERNEL); + if (!smcgp) + return -ENOMEM; + + smcgp->dev = &pdev->dev; + smcgp->smc = smc; + + smcgp->first_index = macsmc_gpio_find_first_gpio_index(smcgp); + if (smcgp->first_index < 0) + return smcgp->first_index; + + ret = apple_smc_get_key_by_index(smc, smcgp->first_index, &key); + if (ret < 0) + return ret; + + if (key > macsmc_gpio_key(MAX_GPIO - 1)) + return -ENODEV; + + dev_info(smcgp->dev, "First GPIO key: %p4ch\n", &key); + + smcgp->gc.label = "macsmc-pmu-gpio"; + smcgp->gc.owner = THIS_MODULE; + smcgp->gc.get = macsmc_gpio_get; + smcgp->gc.set_rv = macsmc_gpio_set; + smcgp->gc.get_direction = macsmc_gpio_get_direction; + smcgp->gc.init_valid_mask = macsmc_gpio_init_valid_mask; + smcgp->gc.can_sleep = true; + smcgp->gc.ngpio = MAX_GPIO; + smcgp->gc.base = -1; + smcgp->gc.parent = &pdev->dev; + + return devm_gpiochip_add_data(&pdev->dev, &smcgp->gc, smcgp); +} + +static const struct of_device_id macsmc_gpio_of_table[] = { + { .compatible = "apple,smc-gpio", }, + {} +}; +MODULE_DEVICE_TABLE(of, macsmc_gpio_of_table); + +static struct platform_driver macsmc_gpio_driver = { + .driver = { + .name = "macsmc-gpio", + .of_match_table = macsmc_gpio_of_table, + }, + .probe = macsmc_gpio_probe, +}; +module_platform_driver(macsmc_gpio_driver); + +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple SMC GPIO driver"); diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c index 14ae25783438..897a1e004681 100644 --- a/drivers/gpio/gpio-mm-lantiq.c +++ b/drivers/gpio/gpio-mm-lantiq.c @@ -55,9 +55,9 @@ static void ltq_mm_apply(struct ltq_mm *chip) * @gpio: GPIO signal number. * @val: Value to be written to specified signal. * - * Set the shadow value and call ltq_mm_apply. + * Set the shadow value and call ltq_mm_apply. Always returns 0. */ -static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value) +static int ltq_mm_set(struct gpio_chip *gc, unsigned int offset, int value) { struct ltq_mm *chip = gpiochip_get_data(gc); @@ -66,6 +66,8 @@ static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value) else chip->shadow &= ~(1 << offset); ltq_mm_apply(chip); + + return 0; } /** @@ -78,9 +80,7 @@ static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value) */ static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value) { - ltq_mm_set(gc, offset, value); - - return 0; + return ltq_mm_set(gc, offset, value); } /** @@ -111,7 +111,7 @@ static int ltq_mm_probe(struct platform_device *pdev) chip->mmchip.gc.ngpio = 16; chip->mmchip.gc.direction_output = ltq_mm_dir_out; - chip->mmchip.gc.set = ltq_mm_set; + chip->mmchip.gc.set_rv = ltq_mm_set; chip->mmchip.save_regs = ltq_mm_save_regs; /* store the shadow value if one was passed by the devicetree */ diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c index 4841e4ebe7a6..cf878c2ea6bf 100644 --- a/drivers/gpio/gpio-mmio.c +++ b/drivers/gpio/gpio-mmio.c @@ -211,11 +211,12 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask, return 0; } -static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val) +static int bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val) { + return 0; } -static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +static int bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long mask = bgpio_line2mask(gc, gpio); unsigned long flags; @@ -230,10 +231,12 @@ static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) gc->write_reg(gc->reg_dat, gc->bgpio_data); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; } -static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio, - int val) +static int bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio, + int val) { unsigned long mask = bgpio_line2mask(gc, gpio); @@ -241,9 +244,11 @@ static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio, gc->write_reg(gc->reg_set, mask); else gc->write_reg(gc->reg_clr, mask); + + return 0; } -static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val) +static int bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long mask = bgpio_line2mask(gc, gpio); unsigned long flags; @@ -258,6 +263,8 @@ static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val) gc->write_reg(gc->reg_set, gc->bgpio_data); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; } static void bgpio_multiple_get_masks(struct gpio_chip *gc, @@ -298,21 +305,25 @@ static void bgpio_set_multiple_single_reg(struct gpio_chip *gc, raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); } -static void bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, +static int bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_dat); + + return 0; } -static void bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask, - unsigned long *bits) +static int bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask, + unsigned long *bits) { bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_set); + + return 0; } -static void bgpio_set_multiple_with_clear(struct gpio_chip *gc, - unsigned long *mask, - unsigned long *bits) +static int bgpio_set_multiple_with_clear(struct gpio_chip *gc, + unsigned long *mask, + unsigned long *bits) { unsigned long set_mask, clear_mask; @@ -322,6 +333,8 @@ static void bgpio_set_multiple_with_clear(struct gpio_chip *gc, gc->write_reg(gc->reg_set, set_mask); if (clear_mask) gc->write_reg(gc->reg_clr, clear_mask); + + return 0; } static int bgpio_dir_return(struct gpio_chip *gc, unsigned int gpio, bool dir_out) @@ -335,6 +348,11 @@ static int bgpio_dir_return(struct gpio_chip *gc, unsigned int gpio, bool dir_ou return pinctrl_gpio_direction_input(gc, gpio); } +static int bgpio_dir_in_err(struct gpio_chip *gc, unsigned int gpio) +{ + return -EINVAL; +} + static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio) { return bgpio_dir_return(gc, gpio, false); @@ -349,7 +367,7 @@ static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio, static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - gc->set(gc, gpio, val); + gc->set_rv(gc, gpio, val); return bgpio_dir_return(gc, gpio, true); } @@ -414,14 +432,14 @@ static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio, int val) { bgpio_dir_out(gc, gpio, val); - gc->set(gc, gpio, val); + gc->set_rv(gc, gpio, val); return bgpio_dir_return(gc, gpio, true); } static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio, int val) { - gc->set(gc, gpio, val); + gc->set_rv(gc, gpio, val); bgpio_dir_out(gc, gpio, val); return bgpio_dir_return(gc, gpio, true); } @@ -510,18 +528,18 @@ static int bgpio_setup_io(struct gpio_chip *gc, if (set && clr) { gc->reg_set = set; gc->reg_clr = clr; - gc->set = bgpio_set_with_clear; - gc->set_multiple = bgpio_set_multiple_with_clear; + gc->set_rv = bgpio_set_with_clear; + gc->set_multiple_rv = bgpio_set_multiple_with_clear; } else if (set && !clr) { gc->reg_set = set; - gc->set = bgpio_set_set; - gc->set_multiple = bgpio_set_multiple_set; + gc->set_rv = bgpio_set_set; + gc->set_multiple_rv = bgpio_set_multiple_set; } else if (flags & BGPIOF_NO_OUTPUT) { - gc->set = bgpio_set_none; - gc->set_multiple = NULL; + gc->set_rv = bgpio_set_none; + gc->set_multiple_rv = NULL; } else { - gc->set = bgpio_set; - gc->set_multiple = bgpio_set_multiple; + gc->set_rv = bgpio_set; + gc->set_multiple_rv = bgpio_set_multiple; } if (!(flags & BGPIOF_UNREADABLE_REG_SET) && @@ -566,7 +584,11 @@ static int bgpio_setup_direction(struct gpio_chip *gc, gc->direction_output = bgpio_dir_out_err; else gc->direction_output = bgpio_simple_dir_out; - gc->direction_input = bgpio_simple_dir_in; + + if (flags & BGPIOF_NO_INPUT) + gc->direction_input = bgpio_dir_in_err; + else + gc->direction_input = bgpio_simple_dir_in; } return 0; @@ -654,7 +676,7 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev, } gc->bgpio_data = gc->read_reg(gc->reg_dat); - if (gc->set == bgpio_set_set && + if (gc->set_rv == bgpio_set_set && !(flags & BGPIOF_UNREADABLE_REG_SET)) gc->bgpio_data = gc->read_reg(gc->reg_set); @@ -712,28 +734,6 @@ static const struct of_device_id bgpio_of_match[] = { }; MODULE_DEVICE_TABLE(of, bgpio_of_match); -static struct bgpio_pdata *bgpio_parse_fw(struct device *dev, unsigned long *flags) -{ - struct bgpio_pdata *pdata; - - if (!dev_fwnode(dev)) - return NULL; - - pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return ERR_PTR(-ENOMEM); - - pdata->base = -1; - - if (device_is_big_endian(dev)) - *flags |= BGPIOF_BIG_ENDIAN_BYTE_ORDER; - - if (device_property_read_bool(dev, "no-output")) - *flags |= BGPIOF_NO_OUTPUT; - - return pdata; -} - static int bgpio_pdev_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -745,18 +745,10 @@ static int bgpio_pdev_probe(struct platform_device *pdev) void __iomem *dirin; unsigned long sz; unsigned long flags = 0; + unsigned int base; int err; struct gpio_chip *gc; - struct bgpio_pdata *pdata; - - pdata = bgpio_parse_fw(dev, &flags); - if (IS_ERR(pdata)) - return PTR_ERR(pdata); - - if (!pdata) { - pdata = dev_get_platdata(dev); - flags = pdev->id_entry->driver_data; - } + const char *label; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat"); if (!r) @@ -788,17 +780,27 @@ static int bgpio_pdev_probe(struct platform_device *pdev) if (!gc) return -ENOMEM; + if (device_is_big_endian(dev)) + flags |= BGPIOF_BIG_ENDIAN_BYTE_ORDER; + + if (device_property_read_bool(dev, "no-output")) + flags |= BGPIOF_NO_OUTPUT; + err = bgpio_init(gc, dev, sz, dat, set, clr, dirout, dirin, flags); if (err) return err; - if (pdata) { - if (pdata->label) - gc->label = pdata->label; - gc->base = pdata->base; - if (pdata->ngpio > 0) - gc->ngpio = pdata->ngpio; - } + err = device_property_read_string(dev, "label", &label); + if (!err) + gc->label = label; + + /* + * This property *must not* be used in device-tree sources, it's only + * meant to be passed to the driver from board files and MFD core. + */ + err = device_property_read_u32(dev, "gpio-mmio,base", &base); + if (!err && base <= INT_MAX) + gc->base = base; platform_set_drvdata(pdev, gc); @@ -809,9 +811,6 @@ static const struct platform_device_id bgpio_id_table[] = { { .name = "basic-mmio-gpio", .driver_data = 0, - }, { - .name = "basic-mmio-gpio-be", - .driver_data = BGPIOF_BIG_ENDIAN, }, { } }; diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c index 61f9efd6c64f..27dd9c3e7b77 100644 --- a/drivers/gpio/gpio-moxtet.c +++ b/drivers/gpio/gpio-moxtet.c @@ -52,15 +52,15 @@ static int moxtet_gpio_get_value(struct gpio_chip *gc, unsigned int offset) return !!(ret & BIT(offset)); } -static void moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset, - int val) +static int moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset, + int val) { struct moxtet_gpio_chip *chip = gpiochip_get_data(gc); int state; state = moxtet_device_written(chip->dev); if (state < 0) - return; + return state; offset -= MOXTET_GPIO_INPUTS; @@ -69,7 +69,7 @@ static void moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset, else state &= ~BIT(offset); - moxtet_device_write(chip->dev, state); + return moxtet_device_write(chip->dev, state); } static int moxtet_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) @@ -104,13 +104,11 @@ static int moxtet_gpio_direction_output(struct gpio_chip *gc, struct moxtet_gpio_chip *chip = gpiochip_get_data(gc); if (chip->desc->out_mask & BIT(offset)) - moxtet_gpio_set_value(gc, offset, val); + return moxtet_gpio_set_value(gc, offset, val); else if (chip->desc->in_mask & BIT(offset)) return -ENOTSUPP; - else - return -EINVAL; - return 0; + return -EINVAL; } static int moxtet_gpio_probe(struct device *dev) @@ -142,7 +140,7 @@ static int moxtet_gpio_probe(struct device *dev) chip->gpio_chip.direction_input = moxtet_gpio_direction_input; chip->gpio_chip.direction_output = moxtet_gpio_direction_output; chip->gpio_chip.get = moxtet_gpio_get_value; - chip->gpio_chip.set = moxtet_gpio_set_value; + chip->gpio_chip.set_rv = moxtet_gpio_set_value; chip->gpio_chip.base = -1; chip->gpio_chip.ngpio = MOXTET_GPIO_NGPIOS; diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c index 091d96f2d682..40d587176a75 100644 --- a/drivers/gpio/gpio-mpc5200.c +++ b/drivers/gpio/gpio-mpc5200.c @@ -69,7 +69,7 @@ __mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) out_8(®s->wkup_dvo, chip->shadow_dvo); } -static void +static int mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long flags; @@ -81,6 +81,8 @@ mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) spin_unlock_irqrestore(&gpio_lock, flags); pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); + + return 0; } static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) @@ -151,7 +153,7 @@ static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev) gc->direction_input = mpc52xx_wkup_gpio_dir_in; gc->direction_output = mpc52xx_wkup_gpio_dir_out; gc->get = mpc52xx_wkup_gpio_get; - gc->set = mpc52xx_wkup_gpio_set; + gc->set_rv = mpc52xx_wkup_gpio_set; ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip); if (ret) @@ -228,7 +230,7 @@ __mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) out_be32(®s->simple_dvo, chip->shadow_dvo); } -static void +static int mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long flags; @@ -240,6 +242,8 @@ mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) spin_unlock_irqrestore(&gpio_lock, flags); pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); + + return 0; } static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) @@ -311,7 +315,7 @@ static int mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev) gc->direction_input = mpc52xx_simple_gpio_dir_in; gc->direction_output = mpc52xx_simple_gpio_dir_out; gc->get = mpc52xx_simple_gpio_get; - gc->set = mpc52xx_simple_gpio_set; + gc->set_rv = mpc52xx_simple_gpio_set; ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip); if (ret) diff --git a/drivers/gpio/gpio-mpfs.c b/drivers/gpio/gpio-mpfs.c index 561a961c97a6..3415cb7ebb0f 100644 --- a/drivers/gpio/gpio-mpfs.c +++ b/drivers/gpio/gpio-mpfs.c @@ -99,16 +99,19 @@ static int mpfs_gpio_get(struct gpio_chip *gc, unsigned int gpio_index) return regmap_test_bits(mpfs_gpio->regs, mpfs_gpio->offsets->inp, BIT(gpio_index)); } -static void mpfs_gpio_set(struct gpio_chip *gc, unsigned int gpio_index, int value) +static int mpfs_gpio_set(struct gpio_chip *gc, unsigned int gpio_index, int value) { struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc); + int ret; mpfs_gpio_get(gc, gpio_index); - regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, BIT(gpio_index), - value << gpio_index); + ret = regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, + BIT(gpio_index), value << gpio_index); mpfs_gpio_get(gc, gpio_index); + + return ret; } static int mpfs_gpio_probe(struct platform_device *pdev) @@ -147,7 +150,7 @@ static int mpfs_gpio_probe(struct platform_device *pdev) mpfs_gpio->gc.direction_output = mpfs_gpio_direction_output; mpfs_gpio->gc.get_direction = mpfs_gpio_get_direction; mpfs_gpio->gc.get = mpfs_gpio_get; - mpfs_gpio->gc.set = mpfs_gpio_set; + mpfs_gpio->gc.set_rv = mpfs_gpio_set; mpfs_gpio->gc.base = -1; mpfs_gpio->gc.ngpio = ngpios; mpfs_gpio->gc.label = dev_name(dev); diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c index 3ea32c5e33d1..b17de08e9e03 100644 --- a/drivers/gpio/gpio-mpsse.c +++ b/drivers/gpio/gpio-mpsse.c @@ -160,8 +160,8 @@ static int gpio_mpsse_get_bank(struct mpsse_priv *priv, u8 bank) return buf; } -static void gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask, - unsigned long *bits) +static int gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask, + unsigned long *bits) { unsigned long i, bank, bank_mask, bank_bits; int ret; @@ -180,11 +180,11 @@ static void gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask, ret = gpio_mpsse_set_bank(priv, bank); if (ret) - dev_err(&priv->intf->dev, - "Couldn't set values for bank %ld!", - bank); + return ret; } } + + return 0; } static int gpio_mpsse_get_multiple(struct gpio_chip *chip, unsigned long *mask, @@ -227,7 +227,7 @@ static int gpio_mpsse_gpio_get(struct gpio_chip *chip, unsigned int offset) return 0; } -static void gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset, +static int gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { unsigned long mask = 0, bits = 0; @@ -236,7 +236,7 @@ static void gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset, if (value) __set_bit(offset, &bits); - gpio_mpsse_set_multiple(chip, &mask, &bits); + return gpio_mpsse_set_multiple(chip, &mask, &bits); } static int gpio_mpsse_direction_output(struct gpio_chip *chip, @@ -249,9 +249,7 @@ static int gpio_mpsse_direction_output(struct gpio_chip *chip, scoped_guard(mutex, &priv->io_mutex) priv->gpio_dir[bank] |= BIT(bank_offset); - gpio_mpsse_gpio_set(chip, offset, value); - - return 0; + return gpio_mpsse_gpio_set(chip, offset, value); } static int gpio_mpsse_direction_input(struct gpio_chip *chip, @@ -450,9 +448,9 @@ static int gpio_mpsse_probe(struct usb_interface *interface, priv->gpio.direction_input = gpio_mpsse_direction_input; priv->gpio.direction_output = gpio_mpsse_direction_output; priv->gpio.get = gpio_mpsse_gpio_get; - priv->gpio.set = gpio_mpsse_gpio_set; + priv->gpio.set_rv = gpio_mpsse_gpio_set; priv->gpio.get_multiple = gpio_mpsse_get_multiple; - priv->gpio.set_multiple = gpio_mpsse_set_multiple; + priv->gpio.set_multiple_rv = gpio_mpsse_set_multiple; priv->gpio.base = -1; priv->gpio.ngpio = 16; priv->gpio.offset = priv->intf_id * priv->gpio.ngpio; diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c index 6db9e469e0dc..992339a89d19 100644 --- a/drivers/gpio/gpio-msc313.c +++ b/drivers/gpio/gpio-msc313.c @@ -486,7 +486,7 @@ struct msc313_gpio { u8 *saved; }; -static void msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +static int msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct msc313_gpio *gpio = gpiochip_get_data(chip); u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]); @@ -497,6 +497,8 @@ static void msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int val gpioreg &= ~MSC313_GPIO_OUT; writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]); + + return 0; } static int msc313_gpio_get(struct gpio_chip *chip, unsigned int offset) @@ -656,7 +658,7 @@ static int msc313_gpio_probe(struct platform_device *pdev) gpiochip->direction_input = msc313_gpio_direction_input; gpiochip->direction_output = msc313_gpio_direction_output; gpiochip->get = msc313_gpio_get; - gpiochip->set = msc313_gpio_set; + gpiochip->set_rv = msc313_gpio_set; gpiochip->base = -1; gpiochip->ngpio = gpio->gpio_data->num; gpiochip->names = gpio->gpio_data->names; diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 57633a7b4270..24792b8eb083 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -1236,8 +1236,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev) if (!have_irqs) return 0; - mvchip->domain = - irq_domain_create_linear(of_fwnode_handle(np), ngpios, &irq_generic_chip_ops, NULL); + mvchip->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), ngpios, + &irq_generic_chip_ops, NULL); if (!mvchip->domain) { dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", mvchip->chip.label); diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index fae1a30f8ae6..433cbadc3a4c 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -7,6 +7,7 @@ // Authors: Daniel Mack, Juergen Beisert. // Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved. +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/init.h> @@ -22,6 +23,7 @@ #include <linux/spinlock.h> #include <linux/syscore_ops.h> #include <linux/gpio/driver.h> +#include <linux/gpio/generic.h> #include <linux/of.h> #include <linux/bug.h> @@ -64,7 +66,7 @@ struct mxc_gpio_port { int irq_high; void (*mx_irq_handler)(struct irq_desc *desc); struct irq_domain *domain; - struct gpio_chip gc; + struct gpio_generic_chip gen_gc; struct device *dev; u32 both_edges; struct mxc_gpio_reg_saved gpio_saved_reg; @@ -161,7 +163,6 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mxc_gpio_port *port = gc->private; - unsigned long flags; u32 bit, val; u32 gpio_idx = d->hwirq; int edge; @@ -179,7 +180,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type) if (GPIO_EDGE_SEL >= 0) { edge = GPIO_INT_BOTH_EDGES; } else { - val = port->gc.get(&port->gc, gpio_idx); + val = port->gen_gc.gc.get(&port->gen_gc.gc, gpio_idx); if (val) { edge = GPIO_INT_LOW_LEV; pr_debug("mxc: set GPIO %d to low trigger\n", gpio_idx); @@ -200,41 +201,38 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type) return -EINVAL; } - raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags); + scoped_guard(gpio_generic_lock_irqsave, &port->gen_gc) { + if (GPIO_EDGE_SEL >= 0) { + val = readl(port->base + GPIO_EDGE_SEL); + if (edge == GPIO_INT_BOTH_EDGES) + writel(val | (1 << gpio_idx), + port->base + GPIO_EDGE_SEL); + else + writel(val & ~(1 << gpio_idx), + port->base + GPIO_EDGE_SEL); + } - if (GPIO_EDGE_SEL >= 0) { - val = readl(port->base + GPIO_EDGE_SEL); - if (edge == GPIO_INT_BOTH_EDGES) - writel(val | (1 << gpio_idx), - port->base + GPIO_EDGE_SEL); - else - writel(val & ~(1 << gpio_idx), - port->base + GPIO_EDGE_SEL); - } + if (edge != GPIO_INT_BOTH_EDGES) { + reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* lower or upper register */ + bit = gpio_idx & 0xf; + val = readl(reg) & ~(0x3 << (bit << 1)); + writel(val | (edge << (bit << 1)), reg); + } - if (edge != GPIO_INT_BOTH_EDGES) { - reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* lower or upper register */ - bit = gpio_idx & 0xf; - val = readl(reg) & ~(0x3 << (bit << 1)); - writel(val | (edge << (bit << 1)), reg); + writel(1 << gpio_idx, port->base + GPIO_ISR); + port->pad_type[gpio_idx] = type; } - writel(1 << gpio_idx, port->base + GPIO_ISR); - port->pad_type[gpio_idx] = type; - - raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags); - - return port->gc.direction_input(&port->gc, gpio_idx); + return port->gen_gc.gc.direction_input(&port->gen_gc.gc, gpio_idx); } static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio) { void __iomem *reg = port->base; - unsigned long flags; u32 bit, val; int edge; - raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags); + guard(gpio_generic_lock_irqsave)(&port->gen_gc); reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ bit = gpio & 0xf; @@ -250,12 +248,9 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio) } else { pr_err("mxc: invalid configuration for GPIO %d: %x\n", gpio, edge); - goto unlock; + return; } writel(val | (edge << (bit << 1)), reg); - -unlock: - raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags); } /* handle 32 interrupts in one status register */ @@ -420,6 +415,7 @@ static void mxc_update_irq_chained_handler(struct mxc_gpio_port *port, bool enab static int mxc_gpio_probe(struct platform_device *pdev) { + struct gpio_generic_chip_config config = { }; struct device_node *np = pdev->dev.of_node; struct mxc_gpio_port *port; int irq_count; @@ -479,27 +475,31 @@ static int mxc_gpio_probe(struct platform_device *pdev) port->mx_irq_handler = mx3_gpio_irq_handler; mxc_update_irq_chained_handler(port, true); - err = bgpio_init(&port->gc, &pdev->dev, 4, - port->base + GPIO_PSR, - port->base + GPIO_DR, NULL, - port->base + GPIO_GDIR, NULL, - BGPIOF_READ_OUTPUT_REG_SET); + + config.dev = &pdev->dev; + config.sz = 4; + config.dat = port->base + GPIO_PSR; + config.set = port->base + GPIO_DR; + config.dirout = port->base + GPIO_GDIR; + config.flags = BGPIOF_READ_OUTPUT_REG_SET; + + err = gpio_generic_chip_init(&port->gen_gc, &config); if (err) goto out_bgio; - port->gc.request = mxc_gpio_request; - port->gc.free = mxc_gpio_free; - port->gc.to_irq = mxc_gpio_to_irq; + port->gen_gc.gc.request = mxc_gpio_request; + port->gen_gc.gc.free = mxc_gpio_free; + port->gen_gc.gc.to_irq = mxc_gpio_to_irq; /* * Driver is DT-only, so a fixed base needs only be maintained for legacy * userspace with sysfs interface. */ if (IS_ENABLED(CONFIG_GPIO_SYSFS)) - port->gc.base = of_alias_get_id(np, "gpio") * 32; + port->gen_gc.gc.base = of_alias_get_id(np, "gpio") * 32; else /* silence boot time warning */ - port->gc.base = -1; + port->gen_gc.gc.base = -1; - err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port); + err = devm_gpiochip_add_data(&pdev->dev, &port->gen_gc.gc, port); if (err) goto out_bgio; @@ -509,8 +509,8 @@ static int mxc_gpio_probe(struct platform_device *pdev) goto out_bgio; } - port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0, - &irq_domain_simple_ops, NULL); + port->domain = irq_domain_create_legacy(dev_fwnode(&pdev->dev), 32, irq_base, 0, + &irq_domain_simple_ops, NULL); if (!port->domain) { err = -ENODEV; goto out_bgio; @@ -573,7 +573,8 @@ static bool mxc_gpio_generic_config(struct mxc_gpio_port *port, if (of_device_is_compatible(np, "fsl,imx8dxl-gpio") || of_device_is_compatible(np, "fsl,imx8qxp-gpio") || of_device_is_compatible(np, "fsl,imx8qm-gpio")) - return (gpiochip_generic_config(&port->gc, offset, conf) == 0); + return (gpiochip_generic_config(&port->gen_gc.gc, + offset, conf) == 0); return false; } diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index b418fbccb26c..0ea46f3d04e1 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -303,7 +303,7 @@ static int mxs_gpio_probe(struct platform_device *pdev) goto out_iounmap; } - port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0, + port->domain = irq_domain_create_legacy(dev_fwnode(&pdev->dev), 32, irq_base, 0, &irq_domain_simple_ops, NULL); if (!port->domain) { err = -ENODEV; diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c index fa19a44943fd..296d13845b30 100644 --- a/drivers/gpio/gpio-nomadik.c +++ b/drivers/gpio/gpio-nomadik.c @@ -347,8 +347,8 @@ static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned int offset) return value; } -static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset, - int val) +static int nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset, + int val) { struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip); @@ -357,6 +357,8 @@ static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset, __nmk_gpio_set_output(nmk_chip, offset, val); clk_disable(nmk_chip->clk); + + return 0; } static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned int offset, @@ -672,7 +674,7 @@ static int nmk_gpio_probe(struct platform_device *pdev) chip->direction_input = nmk_gpio_make_input; chip->get = nmk_gpio_get_input; chip->direction_output = nmk_gpio_make_output; - chip->set = nmk_gpio_set_output; + chip->set_rv = nmk_gpio_set_output; chip->dbg_show = nmk_gpio_dbg_show; chip->can_sleep = false; chip->owner = THIS_MODULE; diff --git a/drivers/gpio/gpio-npcm-sgpio.c b/drivers/gpio/gpio-npcm-sgpio.c index 260570614543..25b203a89e38 100644 --- a/drivers/gpio/gpio-npcm-sgpio.c +++ b/drivers/gpio/gpio-npcm-sgpio.c @@ -211,9 +211,7 @@ static int npcm_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset) static int npcm_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val) { - gc->set(gc, offset, val); - - return 0; + return gc->set_rv(gc, offset, val); } static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset) @@ -226,7 +224,7 @@ static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset) return GPIO_LINE_DIRECTION_IN; } -static void npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) +static int npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) { struct npcm_sgpio *gpio = gpiochip_get_data(gc); const struct npcm_sgpio_bank *bank = offset_to_bank(offset); @@ -242,6 +240,8 @@ static void npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) reg &= ~BIT(GPIO_BIT(offset)); iowrite8(reg, addr); + + return 0; } static int npcm_sgpio_get(struct gpio_chip *gc, unsigned int offset) @@ -546,7 +546,7 @@ static int npcm_sgpio_probe(struct platform_device *pdev) gpio->chip.direction_output = npcm_sgpio_dir_out; gpio->chip.get_direction = npcm_sgpio_get_direction; gpio->chip.get = npcm_sgpio_get; - gpio->chip.set = npcm_sgpio_set; + gpio->chip.set_rv = npcm_sgpio_set; gpio->chip.label = dev_name(&pdev->dev); gpio->chip.base = -1; diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c index afb0e8a791e5..24966161742a 100644 --- a/drivers/gpio/gpio-octeon.c +++ b/drivers/gpio/gpio-octeon.c @@ -47,12 +47,15 @@ static int octeon_gpio_dir_in(struct gpio_chip *chip, unsigned offset) return 0; } -static void octeon_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static int octeon_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct octeon_gpio *gpio = gpiochip_get_data(chip); u64 mask = 1ull << offset; u64 reg = gpio->register_base + (value ? TX_SET : TX_CLEAR); cvmx_write_csr(reg, mask); + + return 0; } static int octeon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, @@ -105,7 +108,7 @@ static int octeon_gpio_probe(struct platform_device *pdev) chip->direction_input = octeon_gpio_dir_in; chip->get = octeon_gpio_get; chip->direction_output = octeon_gpio_dir_out; - chip->set = octeon_gpio_set; + chip->set_rv = octeon_gpio_set; err = devm_gpiochip_add_data(&pdev->dev, chip, gpio); if (err) return err; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 54c4bfdccf56..ed5c88a5c520 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -953,7 +953,7 @@ static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset, return ret; } -static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static int omap_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct gpio_bank *bank; unsigned long flags; @@ -962,10 +962,12 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) raw_spin_lock_irqsave(&bank->lock, flags); bank->set_dataout(bank, offset, value); raw_spin_unlock_irqrestore(&bank->lock, flags); + + return 0; } -static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, - unsigned long *bits) +static int omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, + unsigned long *bits) { struct gpio_bank *bank = gpiochip_get_data(chip); void __iomem *reg = bank->base + bank->regs->dataout; @@ -977,6 +979,8 @@ static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, writel_relaxed(l, reg); bank->context.dataout = l; raw_spin_unlock_irqrestore(&bank->lock, flags); + + return 0; } /*---------------------------------------------------------------------*/ @@ -1042,8 +1046,8 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct device *pm_dev) bank->chip.get_multiple = omap_gpio_get_multiple; bank->chip.direction_output = omap_gpio_output; bank->chip.set_config = omap_gpio_set_config; - bank->chip.set = omap_gpio_set; - bank->chip.set_multiple = omap_gpio_set_multiple; + bank->chip.set_rv = omap_gpio_set; + bank->chip.set_multiple_rv = omap_gpio_set_multiple; if (bank->is_mpuio) { bank->chip.label = "mpuio"; if (bank->regs->wkup_en) diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c index 28dba7048509..9329d8ce8f59 100644 --- a/drivers/gpio/gpio-palmas.c +++ b/drivers/gpio/gpio-palmas.c @@ -54,12 +54,11 @@ static int palmas_gpio_get(struct gpio_chip *gc, unsigned offset) return !!(val & BIT(offset)); } -static void palmas_gpio_set(struct gpio_chip *gc, unsigned offset, - int value) +static int palmas_gpio_set(struct gpio_chip *gc, unsigned int offset, + int value) { struct palmas_gpio *pg = gpiochip_get_data(gc); struct palmas *palmas = pg->palmas; - int ret; unsigned int reg; int gpio16 = (offset/8); @@ -71,9 +70,7 @@ static void palmas_gpio_set(struct gpio_chip *gc, unsigned offset, reg = (value) ? PALMAS_GPIO_SET_DATA_OUT : PALMAS_GPIO_CLEAR_DATA_OUT; - ret = palmas_write(palmas, PALMAS_GPIO_BASE, reg, BIT(offset)); - if (ret < 0) - dev_err(gc->parent, "Reg 0x%02x write failed, %d\n", reg, ret); + return palmas_write(palmas, PALMAS_GPIO_BASE, reg, BIT(offset)); } static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset, @@ -89,7 +86,9 @@ static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset, reg = (gpio16) ? PALMAS_GPIO_DATA_DIR2 : PALMAS_GPIO_DATA_DIR; /* Set the initial value */ - palmas_gpio_set(gc, offset, value); + ret = palmas_gpio_set(gc, offset, value); + if (ret) + return ret; ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE, reg, BIT(offset), BIT(offset)); @@ -140,6 +139,7 @@ static const struct of_device_id of_palmas_gpio_match[] = { { .compatible = "ti,tps80036-gpio", .data = &tps80036_dev_data,}, { }, }; +MODULE_DEVICE_TABLE(of, of_palmas_gpio_match); static int palmas_gpio_probe(struct platform_device *pdev) { @@ -166,7 +166,7 @@ static int palmas_gpio_probe(struct platform_device *pdev) palmas_gpio->gpio_chip.direction_input = palmas_gpio_input; palmas_gpio->gpio_chip.direction_output = palmas_gpio_output; palmas_gpio->gpio_chip.to_irq = palmas_gpio_to_irq; - palmas_gpio->gpio_chip.set = palmas_gpio_set; + palmas_gpio->gpio_chip.set_rv = palmas_gpio_set; palmas_gpio->gpio_chip.get = palmas_gpio_get; palmas_gpio->gpio_chip.parent = &pdev->dev; @@ -197,3 +197,13 @@ static int __init palmas_gpio_init(void) return platform_driver_register(&palmas_gpio_driver); } subsys_initcall(palmas_gpio_init); + +static void __exit palmas_gpio_exit(void) +{ + platform_driver_unregister(&palmas_gpio_driver); +} +module_exit(palmas_gpio_exit); + +MODULE_DESCRIPTION("TI PALMAS series GPIO driver"); +MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index e80a96f39788..69906a9af7e6 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -38,6 +38,10 @@ #define PCA953X_INVERT 0x02 #define PCA953X_DIRECTION 0x03 +#define TCA6418_INPUT 0x14 +#define TCA6418_OUTPUT 0x17 +#define TCA6418_DIRECTION 0x23 + #define REG_ADDR_MASK GENMASK(5, 0) #define REG_ADDR_EXT BIT(6) #define REG_ADDR_AI BIT(7) @@ -76,7 +80,8 @@ #define PCA953X_TYPE BIT(12) #define PCA957X_TYPE BIT(13) #define PCAL653X_TYPE BIT(14) -#define PCA_TYPE_MASK GENMASK(15, 12) +#define TCA6418_TYPE BIT(16) +#define PCA_TYPE_MASK GENMASK(16, 12) #define PCA_CHIP_TYPE(x) ((x) & PCA_TYPE_MASK) @@ -115,6 +120,7 @@ static const struct i2c_device_id pca953x_id[] = { { "pca6107", 8 | PCA953X_TYPE | PCA_INT, }, { "tca6408", 8 | PCA953X_TYPE | PCA_INT, }, { "tca6416", 16 | PCA953X_TYPE | PCA_INT, }, + { "tca6418", 18 | TCA6418_TYPE | PCA_INT, }, { "tca6424", 24 | PCA953X_TYPE | PCA_INT, }, { "tca9538", 8 | PCA953X_TYPE | PCA_INT, }, { "tca9539", 16 | PCA953X_TYPE | PCA_INT, }, @@ -204,6 +210,13 @@ static const struct pca953x_reg_config pca957x_regs = { .invert = PCA957X_INVRT, }; +static const struct pca953x_reg_config tca6418_regs = { + .direction = TCA6418_DIRECTION, + .output = TCA6418_OUTPUT, + .input = TCA6418_INPUT, + .invert = 0xFF, /* Does not apply */ +}; + struct pca953x_chip { unsigned gpio_start; struct mutex i2c_lock; @@ -237,6 +250,22 @@ static int pca953x_bank_shift(struct pca953x_chip *chip) return fls((chip->gpio_chip.ngpio - 1) / BANK_SZ); } +/* + * Helper function to get the correct bit mask for a given offset and chip type. + * The TCA6418's input, output, and direction banks have a peculiar bit order: + * the first byte uses reversed bit order, while the second byte uses standard order. + */ +static inline u8 pca953x_get_bit_mask(struct pca953x_chip *chip, unsigned int offset) +{ + unsigned int bit_pos_in_bank = offset % BANK_SZ; + int msb = BANK_SZ - 1; + + if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE && offset <= msb) + return BIT(msb - bit_pos_in_bank); + + return BIT(bit_pos_in_bank); +} + #define PCA953x_BANK_INPUT BIT(0) #define PCA953x_BANK_OUTPUT BIT(1) #define PCA953x_BANK_POLARITY BIT(2) @@ -353,18 +382,43 @@ static bool pcal6534_check_register(struct pca953x_chip *chip, unsigned int reg, return true; } +/* TCA6418 breaks the PCA953x register order rule */ +static bool tca6418_check_register(struct pca953x_chip *chip, unsigned int reg, + u32 access_type_mask) +{ + /* Valid Input Registers - BIT(0) for readable access */ + if (reg >= TCA6418_INPUT && reg < (TCA6418_INPUT + NBANK(chip))) + return (access_type_mask & BIT(0)); + + /* Valid Output Registers - BIT(1) for writeable access */ + if (reg >= TCA6418_OUTPUT && reg < (TCA6418_OUTPUT + NBANK(chip))) + return (access_type_mask & (BIT(0) | BIT(1))); + + /* Valid Direction Registers - BIT(2) for volatile access */ + if (reg >= TCA6418_DIRECTION && reg < (TCA6418_DIRECTION + NBANK(chip))) + return (access_type_mask & (BIT(0) | BIT(1))); + + return false; +} + static bool pca953x_readable_register(struct device *dev, unsigned int reg) { struct pca953x_chip *chip = dev_get_drvdata(dev); u32 bank; - if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) { + switch (PCA_CHIP_TYPE(chip->driver_data)) { + case PCA957X_TYPE: bank = PCA957x_BANK_INPUT | PCA957x_BANK_OUTPUT | PCA957x_BANK_POLARITY | PCA957x_BANK_CONFIG | PCA957x_BANK_BUSHOLD; - } else { + break; + case TCA6418_TYPE: + /* BIT(0) to indicate read access */ + return tca6418_check_register(chip, reg, BIT(0)); + default: bank = PCA953x_BANK_INPUT | PCA953x_BANK_OUTPUT | PCA953x_BANK_POLARITY | PCA953x_BANK_CONFIG; + break; } if (chip->driver_data & PCA_PCAL) { @@ -381,12 +435,18 @@ static bool pca953x_writeable_register(struct device *dev, unsigned int reg) struct pca953x_chip *chip = dev_get_drvdata(dev); u32 bank; - if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) { + switch (PCA_CHIP_TYPE(chip->driver_data)) { + case PCA957X_TYPE: bank = PCA957x_BANK_OUTPUT | PCA957x_BANK_POLARITY | PCA957x_BANK_CONFIG | PCA957x_BANK_BUSHOLD; - } else { + break; + case TCA6418_TYPE: + /* BIT(1) for write access */ + return tca6418_check_register(chip, reg, BIT(1)); + default: bank = PCA953x_BANK_OUTPUT | PCA953x_BANK_POLARITY | PCA953x_BANK_CONFIG; + break; } if (chip->driver_data & PCA_PCAL) @@ -401,10 +461,17 @@ static bool pca953x_volatile_register(struct device *dev, unsigned int reg) struct pca953x_chip *chip = dev_get_drvdata(dev); u32 bank; - if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) + switch (PCA_CHIP_TYPE(chip->driver_data)) { + case PCA957X_TYPE: bank = PCA957x_BANK_INPUT; - else + break; + case TCA6418_TYPE: + /* BIT(2) for volatile access */ + return tca6418_check_register(chip, reg, BIT(2)); + default: bank = PCA953x_BANK_INPUT; + break; + } if (chip->driver_data & PCA_PCAL) bank |= PCAL9xxx_BANK_IRQ_STAT; @@ -489,6 +556,16 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off) return pinctrl + addr + (off / BANK_SZ); } +static u8 tca6418_recalc_addr(struct pca953x_chip *chip, int reg_base, int offset) +{ + /* + * reg_base will be TCA6418_INPUT, TCA6418_OUTPUT, or TCA6418_DIRECTION + * offset is the global GPIO line offset (0-17) + * BANK_SZ is 8 for TCA6418 (8 bits per register bank) + */ + return reg_base + (offset / BANK_SZ); +} + static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long *val) { u8 regaddr = chip->recalc_addr(chip, reg, 0); @@ -529,11 +606,14 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off) { struct pca953x_chip *chip = gpiochip_get_data(gc); u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off); - u8 bit = BIT(off % BANK_SZ); + u8 bit = pca953x_get_bit_mask(chip, off); guard(mutex)(&chip->i2c_lock); - return regmap_write_bits(chip->regmap, dirreg, bit, bit); + if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE) + return regmap_update_bits(chip->regmap, dirreg, bit, 0); + + return regmap_update_bits(chip->regmap, dirreg, bit, bit); } static int pca953x_gpio_direction_output(struct gpio_chip *gc, @@ -542,25 +622,31 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc, struct pca953x_chip *chip = gpiochip_get_data(gc); u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off); u8 outreg = chip->recalc_addr(chip, chip->regs->output, off); - u8 bit = BIT(off % BANK_SZ); + u8 bit = pca953x_get_bit_mask(chip, off); int ret; guard(mutex)(&chip->i2c_lock); /* set output level */ - ret = regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0); + ret = regmap_update_bits(chip->regmap, outreg, bit, val ? bit : 0); if (ret) return ret; - /* then direction */ - return regmap_write_bits(chip->regmap, dirreg, bit, 0); + /* + * then direction + * (in/out logic is inverted on TCA6418) + */ + if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE) + return regmap_update_bits(chip->regmap, dirreg, bit, bit); + + return regmap_update_bits(chip->regmap, dirreg, bit, 0); } static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) { struct pca953x_chip *chip = gpiochip_get_data(gc); u8 inreg = chip->recalc_addr(chip, chip->regs->input, off); - u8 bit = BIT(off % BANK_SZ); + u8 bit = pca953x_get_bit_mask(chip, off); u32 reg_val; int ret; @@ -577,18 +663,18 @@ static int pca953x_gpio_set_value(struct gpio_chip *gc, unsigned int off, { struct pca953x_chip *chip = gpiochip_get_data(gc); u8 outreg = chip->recalc_addr(chip, chip->regs->output, off); - u8 bit = BIT(off % BANK_SZ); + u8 bit = pca953x_get_bit_mask(chip, off); guard(mutex)(&chip->i2c_lock); - return regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0); + return regmap_update_bits(chip->regmap, outreg, bit, val ? bit : 0); } static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off) { struct pca953x_chip *chip = gpiochip_get_data(gc); u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off); - u8 bit = BIT(off % BANK_SZ); + u8 bit = pca953x_get_bit_mask(chip, off); u32 reg_val; int ret; @@ -597,7 +683,14 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off) if (ret < 0) return ret; - if (reg_val & bit) + /* (in/out logic is inverted on TCA6418) */ + if (reg_val & bit) { + if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE) + return GPIO_LINE_DIRECTION_OUT; + + return GPIO_LINE_DIRECTION_IN; + } + if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; @@ -658,9 +751,9 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip, /* Configure pull-up/pull-down */ if (param == PIN_CONFIG_BIAS_PULL_UP) - ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, bit); + ret = regmap_update_bits(chip->regmap, pull_sel_reg, bit, bit); else if (param == PIN_CONFIG_BIAS_PULL_DOWN) - ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, 0); + ret = regmap_update_bits(chip->regmap, pull_sel_reg, bit, 0); else ret = 0; if (ret) @@ -668,9 +761,9 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip, /* Disable/Enable pull-up/pull-down */ if (param == PIN_CONFIG_BIAS_DISABLE) - return regmap_write_bits(chip->regmap, pull_en_reg, bit, 0); + return regmap_update_bits(chip->regmap, pull_en_reg, bit, 0); else - return regmap_write_bits(chip->regmap, pull_en_reg, bit, bit); + return regmap_update_bits(chip->regmap, pull_en_reg, bit, bit); } static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset, @@ -1117,12 +1210,22 @@ static int pca953x_probe(struct i2c_client *client) regmap_config = &pca953x_i2c_regmap; } - if (PCA_CHIP_TYPE(chip->driver_data) == PCAL653X_TYPE) { + switch (PCA_CHIP_TYPE(chip->driver_data)) { + case PCAL653X_TYPE: chip->recalc_addr = pcal6534_recalc_addr; chip->check_reg = pcal6534_check_register; - } else { + break; + case TCA6418_TYPE: + chip->recalc_addr = tca6418_recalc_addr; + /* + * We don't assign chip->check_reg = tca6418_check_register directly here. + * Instead, the wrappers handle the dispatch based on PCA_CHIP_TYPE. + */ + break; + default: chip->recalc_addr = pca953x_recalc_addr; chip->check_reg = pca953x_check_register; + break; } chip->regmap = devm_regmap_init_i2c(client, regmap_config); @@ -1151,15 +1254,22 @@ static int pca953x_probe(struct i2c_client *client) lockdep_set_subclass(&chip->i2c_lock, i2c_adapter_depth(client->adapter)); - /* initialize cached registers from their original values. + /* + * initialize cached registers from their original values. * we can't share this chip with another i2c master. */ - if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) { + switch (PCA_CHIP_TYPE(chip->driver_data)) { + case PCA957X_TYPE: chip->regs = &pca957x_regs; ret = device_pca957x_init(chip); - } else { + break; + case TCA6418_TYPE: + chip->regs = &tca6418_regs; + break; + default: chip->regs = &pca953x_regs; ret = device_pca95xx_init(chip); + break; } if (ret) return ret; @@ -1325,6 +1435,7 @@ static const struct of_device_id pca953x_dt_ids[] = { { .compatible = "ti,pca9536", .data = OF_953X( 4, 0), }, { .compatible = "ti,tca6408", .data = OF_953X( 8, PCA_INT), }, { .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), }, + { .compatible = "ti,tca6418", .data = (void *)(18 | TCA6418_TYPE | PCA_INT), }, { .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), }, { .compatible = "ti,tca9535", .data = OF_953X(16, PCA_INT), }, { .compatible = "ti,tca9538", .data = OF_953X( 8, PCA_INT), }, @@ -1355,7 +1466,9 @@ static int __init pca953x_init(void) { return i2c_add_driver(&pca953x_driver); } -/* register after i2c postcore initcall and before + +/* + * register after i2c postcore initcall and before * subsys initcalls that may rely on these GPIOs */ subsys_initcall(pca953x_init); diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c index d37ba4049368..a33246f20fd8 100644 --- a/drivers/gpio/gpio-pca9570.c +++ b/drivers/gpio/gpio-pca9570.c @@ -88,7 +88,7 @@ static int pca9570_get(struct gpio_chip *chip, unsigned offset) return !!(buffer & BIT(offset)); } -static void pca9570_set(struct gpio_chip *chip, unsigned offset, int value) +static int pca9570_set(struct gpio_chip *chip, unsigned int offset, int value) { struct pca9570 *gpio = gpiochip_get_data(chip); u8 buffer; @@ -110,6 +110,7 @@ static void pca9570_set(struct gpio_chip *chip, unsigned offset, int value) out: mutex_unlock(&gpio->lock); + return ret; } static int pca9570_probe(struct i2c_client *client) @@ -125,7 +126,7 @@ static int pca9570_probe(struct i2c_client *client) gpio->chip.owner = THIS_MODULE; gpio->chip.get_direction = pca9570_get_direction; gpio->chip.get = pca9570_get; - gpio->chip.set = pca9570_set; + gpio->chip.set_rv = pca9570_set; gpio->chip.base = -1; gpio->chip_data = device_get_match_data(&client->dev); gpio->chip.ngpio = gpio->chip_data->ngpio; diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index 2e5f5d7f8865..a04203680333 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -171,21 +171,24 @@ static int pcf857x_output(struct gpio_chip *chip, unsigned int offset, int value return status; } -static void pcf857x_set(struct gpio_chip *chip, unsigned int offset, int value) +static int pcf857x_set(struct gpio_chip *chip, unsigned int offset, int value) { - pcf857x_output(chip, offset, value); + return pcf857x_output(chip, offset, value); } -static void pcf857x_set_multiple(struct gpio_chip *chip, unsigned long *mask, - unsigned long *bits) +static int pcf857x_set_multiple(struct gpio_chip *chip, unsigned long *mask, + unsigned long *bits) { struct pcf857x *gpio = gpiochip_get_data(chip); + int status; mutex_lock(&gpio->lock); gpio->out &= ~*mask; gpio->out |= *bits & *mask; - gpio->write(gpio->client, gpio->out); + status = gpio->write(gpio->client, gpio->out); mutex_unlock(&gpio->lock); + + return status; } /*-------------------------------------------------------------------------*/ @@ -292,8 +295,8 @@ static int pcf857x_probe(struct i2c_client *client) gpio->chip.owner = THIS_MODULE; gpio->chip.get = pcf857x_get; gpio->chip.get_multiple = pcf857x_get_multiple; - gpio->chip.set = pcf857x_set; - gpio->chip.set_multiple = pcf857x_set_multiple; + gpio->chip.set_rv = pcf857x_set; + gpio->chip.set_multiple_rv = pcf857x_set_multiple; gpio->chip.direction_input = pcf857x_input; gpio->chip.direction_output = pcf857x_output; gpio->chip.ngpio = (uintptr_t)i2c_get_match_data(client); diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c index 63f25c72eac2..c6f313342ba0 100644 --- a/drivers/gpio/gpio-pch.c +++ b/drivers/gpio/gpio-pch.c @@ -99,7 +99,7 @@ struct pch_gpio { spinlock_t spinlock; }; -static void pch_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val) +static int pch_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val) { u32 reg_val; struct pch_gpio *chip = gpiochip_get_data(gpio); @@ -114,6 +114,8 @@ static void pch_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val) iowrite32(reg_val, &chip->reg->po); spin_unlock_irqrestore(&chip->spinlock, flags); + + return 0; } static int pch_gpio_get(struct gpio_chip *gpio, unsigned int nr) @@ -217,7 +219,7 @@ static void pch_gpio_setup(struct pch_gpio *chip) gpio->direction_input = pch_gpio_direction_input; gpio->get = pch_gpio_get; gpio->direction_output = pch_gpio_direction_output; - gpio->set = pch_gpio_set; + gpio->set_rv = pch_gpio_set; gpio->base = -1; gpio->ngpio = gpio_pins[chip->ioh]; gpio->can_sleep = false; diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c index e3013e778e15..a69b74866a13 100644 --- a/drivers/gpio/gpio-pisosr.c +++ b/drivers/gpio/gpio-pisosr.c @@ -67,13 +67,6 @@ static int pisosr_gpio_direction_input(struct gpio_chip *chip, return 0; } -static int pisosr_gpio_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - /* This device is input only */ - return -EINVAL; -} - static int pisosr_gpio_get(struct gpio_chip *chip, unsigned offset) { struct pisosr_gpio *gpio = gpiochip_get_data(chip); @@ -108,7 +101,6 @@ static const struct gpio_chip template_chip = { .owner = THIS_MODULE, .get_direction = pisosr_gpio_get_direction, .direction_input = pisosr_gpio_direction_input, - .direction_output = pisosr_gpio_direction_output, .get = pisosr_gpio_get, .get_multiple = pisosr_gpio_get_multiple, .base = -1, diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index 1c273727ffa3..98cfac4eac85 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -115,11 +115,13 @@ static int pl061_get_value(struct gpio_chip *gc, unsigned offset) return !!readb(pl061->base + (BIT(offset + 2))); } -static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value) +static int pl061_set_value(struct gpio_chip *gc, unsigned int offset, int value) { struct pl061 *pl061 = gpiochip_get_data(gc); writeb(!!value << offset, pl061->base + (BIT(offset + 2))); + + return 0; } static int pl061_irq_type(struct irq_data *d, unsigned trigger) @@ -328,7 +330,7 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) pl061->gc.direction_input = pl061_direction_input; pl061->gc.direction_output = pl061_direction_output; pl061->gc.get = pl061_get_value; - pl061->gc.set = pl061_set_value; + pl061->gc.set_rv = pl061_set_value; pl061->gc.ngpio = PL061_GPIO_NR; pl061->gc.label = dev_name(dev); pl061->gc.parent = dev; diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c index d9b228bea42e..cb015fb5c946 100644 --- a/drivers/gpio/gpio-pmic-eic-sprd.c +++ b/drivers/gpio/gpio-pmic-eic-sprd.c @@ -109,12 +109,6 @@ static int sprd_pmic_eic_direction_input(struct gpio_chip *chip, return 0; } -static void sprd_pmic_eic_set(struct gpio_chip *chip, unsigned int offset, - int value) -{ - /* EICs are always input, nothing need to do here. */ -} - static int sprd_pmic_eic_set_debounce(struct gpio_chip *chip, unsigned int offset, unsigned int debounce) @@ -351,7 +345,6 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev) pmic_eic->chip.request = sprd_pmic_eic_request; pmic_eic->chip.free = sprd_pmic_eic_free; pmic_eic->chip.set_config = sprd_pmic_eic_set_config; - pmic_eic->chip.set = sprd_pmic_eic_set; pmic_eic->chip.get = sprd_pmic_eic_get; pmic_eic->chip.can_sleep = true; diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index aead35ea090e..13f7da2a9486 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -315,12 +315,14 @@ static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset) return !!(gplr & GPIO_bit(offset)); } -static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static int pxa_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { void __iomem *base = gpio_bank_base(chip, offset); writel_relaxed(GPIO_bit(offset), base + (value ? GPSR_OFFSET : GPCR_OFFSET)); + + return 0; } #ifdef CONFIG_OF_GPIO @@ -353,7 +355,7 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio, void __iom pchip->chip.direction_input = pxa_gpio_direction_input; pchip->chip.direction_output = pxa_gpio_direction_output; pchip->chip.get = pxa_gpio_get; - pchip->chip.set = pxa_gpio_set; + pchip->chip.set_rv = pxa_gpio_set; pchip->chip.to_irq = pxa_gpio_to_irq; pchip->chip.ngpio = ngpio; pchip->chip.request = gpiochip_generic_request; @@ -642,9 +644,8 @@ static int pxa_gpio_probe(struct platform_device *pdev) if (!pxa_last_gpio) return -EINVAL; - pchip->irqdomain = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), - pxa_last_gpio + 1, irq_base, 0, - &pxa_irq_domain_ops, pchip); + pchip->irqdomain = irq_domain_create_legacy(dev_fwnode(&pdev->dev), pxa_last_gpio + 1, + irq_base, 0, &pxa_irq_domain_ops, pchip); if (!pchip->irqdomain) return -ENOMEM; diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c index 9d1b95e429f1..b4b607515a04 100644 --- a/drivers/gpio/gpio-raspberrypi-exp.c +++ b/drivers/gpio/gpio-raspberrypi-exp.c @@ -175,7 +175,7 @@ static int rpi_exp_gpio_get(struct gpio_chip *gc, unsigned int off) return !!get.state; } -static void rpi_exp_gpio_set(struct gpio_chip *gc, unsigned int off, int val) +static int rpi_exp_gpio_set(struct gpio_chip *gc, unsigned int off, int val) { struct rpi_exp_gpio *gpio; struct gpio_get_set_state set; @@ -188,10 +188,14 @@ static void rpi_exp_gpio_set(struct gpio_chip *gc, unsigned int off, int val) ret = rpi_firmware_property(gpio->fw, RPI_FIRMWARE_SET_GPIO_STATE, &set, sizeof(set)); - if (ret || set.gpio != 0) + if (ret || set.gpio != 0) { dev_err(gc->parent, "Failed to set GPIO %u state (%d %x)\n", off, ret, set.gpio); + return ret ? ret : -EIO; + } + + return 0; } static int rpi_exp_gpio_probe(struct platform_device *pdev) @@ -228,7 +232,7 @@ static int rpi_exp_gpio_probe(struct platform_device *pdev) rpi_gpio->gc.direction_output = rpi_exp_gpio_dir_out; rpi_gpio->gc.get_direction = rpi_exp_gpio_get_direction; rpi_gpio->gc.get = rpi_exp_gpio_get; - rpi_gpio->gc.set = rpi_exp_gpio_set; + rpi_gpio->gc.set_rv = rpi_exp_gpio_set; rpi_gpio->gc.can_sleep = true; return devm_gpiochip_add_data(dev, &rpi_gpio->gc, rpi_gpio); diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c index c34dcadaee36..cf3e91d235df 100644 --- a/drivers/gpio/gpio-rc5t583.c +++ b/drivers/gpio/gpio-rc5t583.c @@ -35,14 +35,20 @@ static int rc5t583_gpio_get(struct gpio_chip *gc, unsigned int offset) return !!(val & BIT(offset)); } -static void rc5t583_gpio_set(struct gpio_chip *gc, unsigned int offset, int val) +static int rc5t583_gpio_set(struct gpio_chip *gc, unsigned int offset, int val) { struct rc5t583_gpio *rc5t583_gpio = gpiochip_get_data(gc); struct device *parent = rc5t583_gpio->rc5t583->dev; + int ret; + if (val) - rc5t583_set_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset)); + ret = rc5t583_set_bits(parent, RC5T583_GPIO_IOOUT, + BIT(offset)); else - rc5t583_clear_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset)); + ret = rc5t583_clear_bits(parent, RC5T583_GPIO_IOOUT, + BIT(offset)); + + return ret; } static int rc5t583_gpio_dir_input(struct gpio_chip *gc, unsigned int offset) @@ -66,7 +72,10 @@ static int rc5t583_gpio_dir_output(struct gpio_chip *gc, unsigned offset, struct device *parent = rc5t583_gpio->rc5t583->dev; int ret; - rc5t583_gpio_set(gc, offset, value); + ret = rc5t583_gpio_set(gc, offset, value); + if (ret) + return ret; + ret = rc5t583_set_bits(parent, RC5T583_GPIO_IOSEL, BIT(offset)); if (ret < 0) return ret; @@ -109,7 +118,7 @@ static int rc5t583_gpio_probe(struct platform_device *pdev) rc5t583_gpio->gpio_chip.free = rc5t583_gpio_free, rc5t583_gpio->gpio_chip.direction_input = rc5t583_gpio_dir_input, rc5t583_gpio->gpio_chip.direction_output = rc5t583_gpio_dir_output, - rc5t583_gpio->gpio_chip.set = rc5t583_gpio_set, + rc5t583_gpio->gpio_chip.set_rv = rc5t583_gpio_set, rc5t583_gpio->gpio_chip.get = rc5t583_gpio_get, rc5t583_gpio->gpio_chip.to_irq = rc5t583_gpio_to_irq, rc5t583_gpio->gpio_chip.ngpio = RC5T583_MAX_GPIO, diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 18c965ee02c8..cd31580effa9 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -331,14 +331,11 @@ static int gpio_rcar_get(struct gpio_chip *chip, unsigned offset) static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask, unsigned long *bits) { + u32 bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0); struct gpio_rcar_priv *p = gpiochip_get_data(chip); - u32 bankmask, outputs, m, val = 0; + u32 outputs, m, val = 0; unsigned long flags; - bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0); - if (!bankmask) - return 0; - if (p->info.has_always_in) { bits[0] = gpio_rcar_read(p, INDT) & bankmask; return 0; @@ -359,7 +356,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask, return 0; } -static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value) +static int gpio_rcar_set(struct gpio_chip *chip, unsigned int offset, int value) { struct gpio_rcar_priv *p = gpiochip_get_data(chip); unsigned long flags; @@ -367,18 +364,17 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value) raw_spin_lock_irqsave(&p->lock, flags); gpio_rcar_modify_bit(p, OUTDT, offset, value); raw_spin_unlock_irqrestore(&p->lock, flags); + + return 0; } -static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask, - unsigned long *bits) +static int gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask, + unsigned long *bits) { + u32 bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0); struct gpio_rcar_priv *p = gpiochip_get_data(chip); unsigned long flags; - u32 val, bankmask; - - bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0); - if (!bankmask) - return; + u32 val; raw_spin_lock_irqsave(&p->lock, flags); val = gpio_rcar_read(p, OUTDT); @@ -386,6 +382,8 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask, val |= (bankmask & bits[0]); gpio_rcar_write(p, OUTDT, val); raw_spin_unlock_irqrestore(&p->lock, flags); + + return 0; } static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset, @@ -537,8 +535,8 @@ static int gpio_rcar_probe(struct platform_device *pdev) gpio_chip->get = gpio_rcar_get; gpio_chip->get_multiple = gpio_rcar_get_multiple; gpio_chip->direction_output = gpio_rcar_direction_output; - gpio_chip->set = gpio_rcar_set; - gpio_chip->set_multiple = gpio_rcar_set_multiple; + gpio_chip->set_rv = gpio_rcar_set; + gpio_chip->set_multiple_rv = gpio_rcar_set_multiple; gpio_chip->label = name; gpio_chip->parent = dev; gpio_chip->owner = THIS_MODULE; @@ -594,7 +592,6 @@ static void gpio_rcar_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -#ifdef CONFIG_PM_SLEEP static int gpio_rcar_suspend(struct device *dev) { struct gpio_rcar_priv *p = dev_get_drvdata(dev); @@ -653,16 +650,16 @@ static int gpio_rcar_resume(struct device *dev) return 0; } -#endif /* CONFIG_PM_SLEEP*/ -static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, gpio_rcar_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, + gpio_rcar_resume); static struct platform_driver gpio_rcar_device_driver = { .probe = gpio_rcar_probe, .remove = gpio_rcar_remove, .driver = { .name = "gpio_rcar", - .pm = &gpio_rcar_pm_ops, + .pm = pm_sleep_ptr(&gpio_rcar_pm_ops), .of_match_table = gpio_rcar_of_table, } }; diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c index ec7fb9220a47..a75ed8021de5 100644 --- a/drivers/gpio/gpio-rdc321x.c +++ b/drivers/gpio/gpio-rdc321x.c @@ -64,8 +64,8 @@ static void rdc_gpio_set_value_impl(struct gpio_chip *chip, } /* set GPIO pin to value */ -static void rdc_gpio_set_value(struct gpio_chip *chip, - unsigned gpio, int value) +static int rdc_gpio_set_value(struct gpio_chip *chip, unsigned int gpio, + int value) { struct rdc321x_gpio *gpch; @@ -73,6 +73,8 @@ static void rdc_gpio_set_value(struct gpio_chip *chip, spin_lock(&gpch->lock); rdc_gpio_set_value_impl(chip, gpio, value); spin_unlock(&gpch->lock); + + return 0; } static int rdc_gpio_config(struct gpio_chip *chip, @@ -157,7 +159,7 @@ static int rdc321x_gpio_probe(struct platform_device *pdev) rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; - rdc321x_gpio_dev->chip.set = rdc_gpio_set_value; + rdc321x_gpio_dev->chip.set_rv = rdc_gpio_set_value; rdc321x_gpio_dev->chip.base = 0; rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios; diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c index 73c7260d89c0..d8da99f97385 100644 --- a/drivers/gpio/gpio-reg.c +++ b/drivers/gpio/gpio-reg.c @@ -46,7 +46,7 @@ static int gpio_reg_direction_output(struct gpio_chip *gc, unsigned offset, if (r->direction & BIT(offset)) return -ENOTSUPP; - gc->set(gc, offset, value); + gc->set_rv(gc, offset, value); return 0; } @@ -57,7 +57,7 @@ static int gpio_reg_direction_input(struct gpio_chip *gc, unsigned offset) return r->direction & BIT(offset) ? 0 : -ENOTSUPP; } -static void gpio_reg_set(struct gpio_chip *gc, unsigned offset, int value) +static int gpio_reg_set(struct gpio_chip *gc, unsigned int offset, int value) { struct gpio_reg *r = to_gpio_reg(gc); unsigned long flags; @@ -72,6 +72,8 @@ static void gpio_reg_set(struct gpio_chip *gc, unsigned offset, int value) r->out = val; writel_relaxed(val, r->reg); spin_unlock_irqrestore(&r->lock, flags); + + return 0; } static int gpio_reg_get(struct gpio_chip *gc, unsigned offset) @@ -92,8 +94,8 @@ static int gpio_reg_get(struct gpio_chip *gc, unsigned offset) return !!(val & mask); } -static void gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask, - unsigned long *bits) +static int gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask, + unsigned long *bits) { struct gpio_reg *r = to_gpio_reg(gc); unsigned long flags; @@ -102,6 +104,8 @@ static void gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask, r->out = (r->out & ~*mask) | (*bits & *mask); writel_relaxed(r->out, r->reg); spin_unlock_irqrestore(&r->lock, flags); + + return 0; } static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset) @@ -157,9 +161,9 @@ struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg, r->gc.get_direction = gpio_reg_get_direction; r->gc.direction_input = gpio_reg_direction_input; r->gc.direction_output = gpio_reg_direction_output; - r->gc.set = gpio_reg_set; + r->gc.set_rv = gpio_reg_set; r->gc.get = gpio_reg_get; - r->gc.set_multiple = gpio_reg_set_multiple; + r->gc.set_multiple_rv = gpio_reg_set_multiple; if (irqs) r->gc.to_irq = gpio_reg_to_irq; r->gc.base = base; diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c index c63352f2f1ec..ecd60ff9e1dd 100644 --- a/drivers/gpio/gpio-rockchip.c +++ b/drivers/gpio/gpio-rockchip.c @@ -177,8 +177,8 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip, return 0; } -static void rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset, - int value) +static int rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset, + int value) { struct rockchip_pin_bank *bank = gpiochip_get_data(gc); unsigned long flags; @@ -186,6 +186,8 @@ static void rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset, raw_spin_lock_irqsave(&bank->slock, flags); rockchip_gpio_writel_bit(bank, offset, value, bank->gpio_regs->port_dr); raw_spin_unlock_irqrestore(&bank->slock, flags); + + return 0; } static int rockchip_gpio_get(struct gpio_chip *gc, unsigned int offset) @@ -325,7 +327,7 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned int offset) static const struct gpio_chip rockchip_gpiolib_chip = { .request = gpiochip_generic_request, .free = gpiochip_generic_free, - .set = rockchip_gpio_set, + .set_rv = rockchip_gpio_set, .get = rockchip_gpio_get, .get_direction = rockchip_gpio_get_direction, .direction_input = rockchip_gpio_direction_input, @@ -521,8 +523,8 @@ static int rockchip_interrupts_register(struct rockchip_pin_bank *bank) struct irq_chip_generic *gc; int ret; - bank->domain = irq_domain_create_linear(of_fwnode_handle(bank->of_node), 32, - &irq_generic_chip_ops, NULL); + bank->domain = irq_domain_create_linear(dev_fwnode(bank->dev), 32, &irq_generic_chip_ops, + NULL); if (!bank->domain) { dev_warn(bank->dev, "could not init irq domain for bank %s\n", bank->name); diff --git a/drivers/gpio/gpio-rtd.c b/drivers/gpio/gpio-rtd.c index bf7f008f58d7..25bbd749b019 100644 --- a/drivers/gpio/gpio-rtd.c +++ b/drivers/gpio/gpio-rtd.c @@ -275,7 +275,7 @@ static int rtd_gpio_set_config(struct gpio_chip *chip, unsigned int offset, } } -static void rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +static int rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct rtd_gpio *data = gpiochip_get_data(chip); u32 mask = BIT(offset % 32); @@ -292,6 +292,8 @@ static void rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) else val &= ~mask; writel_relaxed(val, data->base + dato_reg_offset); + + return 0; } static int rtd_gpio_get(struct gpio_chip *chip, unsigned int offset) @@ -563,7 +565,7 @@ static int rtd_gpio_probe(struct platform_device *pdev) data->gpio_chip.get_direction = rtd_gpio_get_direction; data->gpio_chip.direction_input = rtd_gpio_direction_input; data->gpio_chip.direction_output = rtd_gpio_direction_output; - data->gpio_chip.set = rtd_gpio_set; + data->gpio_chip.set_rv = rtd_gpio_set; data->gpio_chip.get = rtd_gpio_get; data->gpio_chip.set_config = rtd_gpio_set_config; data->gpio_chip.parent = dev; diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c index 3f3ee36bc3cb..e9d054d78ccb 100644 --- a/drivers/gpio/gpio-sa1100.c +++ b/drivers/gpio/gpio-sa1100.c @@ -43,11 +43,14 @@ static int sa1100_gpio_get(struct gpio_chip *chip, unsigned offset) BIT(offset); } -static void sa1100_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static int sa1100_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { int reg = value ? R_GPSR : R_GPCR; writel_relaxed(BIT(offset), sa1100_gpio_chip(chip)->membase + reg); + + return 0; } static int sa1100_get_direction(struct gpio_chip *chip, unsigned offset) @@ -96,7 +99,7 @@ static struct sa1100_gpio_chip sa1100_gpio_chip = { .get_direction = sa1100_get_direction, .direction_input = sa1100_direction_input, .direction_output = sa1100_direction_output, - .set = sa1100_gpio_set, + .set_rv = sa1100_gpio_set, .get = sa1100_gpio_get, .to_irq = sa1100_to_irq, .base = 0, diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c index d770a6f3d846..c31244cf5e89 100644 --- a/drivers/gpio/gpio-sama5d2-piobu.c +++ b/drivers/gpio/gpio-sama5d2-piobu.c @@ -169,15 +169,15 @@ static int sama5d2_piobu_get(struct gpio_chip *chip, unsigned int pin) /* * sama5d2_piobu_set() - gpiochip set */ -static void sama5d2_piobu_set(struct gpio_chip *chip, unsigned int pin, - int value) +static int sama5d2_piobu_set(struct gpio_chip *chip, unsigned int pin, + int value) { if (!value) value = PIOBU_LOW; else value = PIOBU_HIGH; - sama5d2_piobu_write_value(chip, pin, PIOBU_SOD, value); + return sama5d2_piobu_write_value(chip, pin, PIOBU_SOD, value); } static int sama5d2_piobu_probe(struct platform_device *pdev) @@ -196,7 +196,7 @@ static int sama5d2_piobu_probe(struct platform_device *pdev) piobu->chip.direction_input = sama5d2_piobu_direction_input; piobu->chip.direction_output = sama5d2_piobu_direction_output; piobu->chip.get = sama5d2_piobu_get; - piobu->chip.set = sama5d2_piobu_set; + piobu->chip.set_rv = sama5d2_piobu_set; piobu->chip.base = -1; piobu->chip.ngpio = PIOBU_NUM; piobu->chip.can_sleep = 0; diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c index ff0341b1222f..833ffdd98d74 100644 --- a/drivers/gpio/gpio-sch.c +++ b/drivers/gpio/gpio-sch.c @@ -117,7 +117,7 @@ static int sch_gpio_get(struct gpio_chip *gc, unsigned int gpio_num) return sch_gpio_reg_get(sch, gpio_num, GLV); } -static void sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val) +static int sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val) { struct sch_gpio *sch = gpiochip_get_data(gc); unsigned long flags; @@ -125,6 +125,8 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val) spin_lock_irqsave(&sch->lock, flags); sch_gpio_reg_set(sch, gpio_num, GLV, val); spin_unlock_irqrestore(&sch->lock, flags); + + return 0; } static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned int gpio_num, @@ -146,8 +148,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned int gpio_num, * But we cannot prevent a short low pulse if direction is set to high * and an external pull-up is connected. */ - sch_gpio_set(gc, gpio_num, val); - return 0; + return sch_gpio_set(gc, gpio_num, val); } static int sch_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio_num) @@ -166,7 +167,7 @@ static const struct gpio_chip sch_gpio_chip = { .direction_input = sch_gpio_direction_in, .get = sch_gpio_get, .direction_output = sch_gpio_direction_out, - .set = sch_gpio_set, + .set_rv = sch_gpio_set, .get_direction = sch_gpio_get_direction, }; diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c index ba4fccf3cc94..44fb5fc21fb8 100644 --- a/drivers/gpio/gpio-sch311x.c +++ b/drivers/gpio/gpio-sch311x.c @@ -178,14 +178,16 @@ static void __sch311x_gpio_set(struct sch311x_gpio_block *block, outb(data, block->runtime_reg + block->data_reg); } -static void sch311x_gpio_set(struct gpio_chip *chip, unsigned offset, - int value) +static int sch311x_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct sch311x_gpio_block *block = gpiochip_get_data(chip); spin_lock(&block->lock); __sch311x_gpio_set(block, offset, value); spin_unlock(&block->lock); + + return 0; } static int sch311x_gpio_direction_in(struct gpio_chip *chip, unsigned offset) @@ -295,7 +297,7 @@ static int sch311x_gpio_probe(struct platform_device *pdev) block->chip.get_direction = sch311x_gpio_get_direction; block->chip.set_config = sch311x_gpio_set_config; block->chip.get = sch311x_gpio_get; - block->chip.set = sch311x_gpio_set; + block->chip.set_rv = sch311x_gpio_set; block->chip.ngpio = 8; block->chip.parent = &pdev->dev; block->chip.base = sch311x_gpio_blocks[i].base; diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index f638219a7c4f..9503296422fd 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -39,7 +39,7 @@ #include "dev-sync-probe.h" #define GPIO_SIM_NGPIO_MAX 1024 -#define GPIO_SIM_PROP_MAX 4 /* Max 3 properties + sentinel. */ +#define GPIO_SIM_PROP_MAX 5 /* Max 4 properties + sentinel. */ #define GPIO_SIM_NUM_ATTRS 3 /* value, pull and sentinel */ static DEFINE_IDA(gpio_sim_ida); @@ -629,6 +629,7 @@ struct gpio_sim_line { unsigned int offset; char *name; + bool valid; /* There can only be one hog per line. */ struct gpio_sim_hog *hog; @@ -744,6 +745,36 @@ gpio_sim_set_line_names(struct gpio_sim_bank *bank, char **line_names) } } +static unsigned int gpio_sim_get_reserved_ranges_size(struct gpio_sim_bank *bank) +{ + struct gpio_sim_line *line; + unsigned int size = 0; + + list_for_each_entry(line, &bank->line_list, siblings) { + if (line->valid) + continue; + + size += 2; + } + + return size; +} + +static void gpio_sim_set_reserved_ranges(struct gpio_sim_bank *bank, + u32 *ranges) +{ + struct gpio_sim_line *line; + int i = 0; + + list_for_each_entry(line, &bank->line_list, siblings) { + if (line->valid) + continue; + + ranges[i++] = line->offset; + ranges[i++] = 1; + } +} + static void gpio_sim_remove_hogs(struct gpio_sim_device *dev) { struct gpiod_hog *hog; @@ -844,9 +875,10 @@ static struct fwnode_handle * gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank, struct fwnode_handle *parent) { + unsigned int prop_idx = 0, line_names_size, ranges_size; struct property_entry properties[GPIO_SIM_PROP_MAX]; - unsigned int prop_idx = 0, line_names_size; char **line_names __free(kfree) = NULL; + u32 *ranges __free(kfree) = NULL; memset(properties, 0, sizeof(properties)); @@ -870,6 +902,19 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank, line_names, line_names_size); } + ranges_size = gpio_sim_get_reserved_ranges_size(bank); + if (ranges_size) { + ranges = kcalloc(ranges_size, sizeof(u32), GFP_KERNEL); + if (!ranges) + return ERR_PTR(-ENOMEM); + + gpio_sim_set_reserved_ranges(bank, ranges); + + properties[prop_idx++] = PROPERTY_ENTRY_U32_ARRAY_LEN( + "gpio-reserved-ranges", + ranges, ranges_size); + } + return fwnode_create_software_node(properties, parent); } @@ -1189,8 +1234,41 @@ static ssize_t gpio_sim_line_config_name_store(struct config_item *item, CONFIGFS_ATTR(gpio_sim_line_config_, name); +static ssize_t +gpio_sim_line_config_valid_show(struct config_item *item, char *page) +{ + struct gpio_sim_line *line = to_gpio_sim_line(item); + struct gpio_sim_device *dev = gpio_sim_line_get_device(line); + + guard(mutex)(&dev->lock); + + return sprintf(page, "%c\n", line->valid ? '1' : '0'); +} + +static ssize_t gpio_sim_line_config_valid_store(struct config_item *item, + const char *page, size_t count) +{ + struct gpio_sim_line *line = to_gpio_sim_line(item); + struct gpio_sim_device *dev = gpio_sim_line_get_device(line); + bool valid; + int ret; + + ret = kstrtobool(page, &valid); + if (ret) + return ret; + + guard(mutex)(&dev->lock); + + line->valid = valid; + + return count; +} + +CONFIGFS_ATTR(gpio_sim_line_config_, valid); + static struct configfs_attribute *gpio_sim_line_config_attrs[] = { &gpio_sim_line_config_attr_name, + &gpio_sim_line_config_attr_valid, NULL }; @@ -1399,6 +1477,7 @@ gpio_sim_bank_config_make_line_group(struct config_group *group, line->parent = bank; line->offset = offset; + line->valid = true; list_add_tail(&line->siblings, &bank->line_list); return &line->group; diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c index 051bc99bdfb2..95355dda621b 100644 --- a/drivers/gpio/gpio-siox.c +++ b/drivers/gpio/gpio-siox.c @@ -160,8 +160,8 @@ static int gpio_siox_get(struct gpio_chip *chip, unsigned int offset) return ret; } -static void gpio_siox_set(struct gpio_chip *chip, - unsigned int offset, int value) +static int gpio_siox_set(struct gpio_chip *chip, + unsigned int offset, int value) { struct gpio_siox_ddata *ddata = gpiochip_get_data(chip); u8 mask = 1 << (19 - offset); @@ -174,6 +174,8 @@ static void gpio_siox_set(struct gpio_chip *chip, ddata->setdata[0] &= ~mask; mutex_unlock(&ddata->lock); + + return 0; } static int gpio_siox_direction_input(struct gpio_chip *chip, @@ -191,8 +193,7 @@ static int gpio_siox_direction_output(struct gpio_chip *chip, if (offset < 12) return -EINVAL; - gpio_siox_set(chip, offset, value); - return 0; + return gpio_siox_set(chip, offset, value); } static int gpio_siox_get_direction(struct gpio_chip *chip, unsigned int offset) @@ -236,7 +237,7 @@ static int gpio_siox_probe(struct siox_device *sdevice) gc->parent = dev; gc->owner = THIS_MODULE; gc->get = gpio_siox_get; - gc->set = gpio_siox_set; + gc->set_rv = gpio_siox_set; gc->direction_input = gpio_siox_direction_input; gc->direction_output = gpio_siox_direction_output; gc->get_direction = gpio_siox_get_direction; diff --git a/drivers/gpio/gpio-sloppy-logic-analyzer.c b/drivers/gpio/gpio-sloppy-logic-analyzer.c index 8cf3b171c599..969dddd3d6fa 100644 --- a/drivers/gpio/gpio-sloppy-logic-analyzer.c +++ b/drivers/gpio/gpio-sloppy-logic-analyzer.c @@ -306,7 +306,7 @@ static void gpio_la_poll_remove(struct platform_device *pdev) } static const struct of_device_id gpio_la_poll_of_match[] = { - { .compatible = GPIO_LA_NAME }, + { .compatible = "gpio-sloppy-logic-analyzer" }, { } }; MODULE_DEVICE_TABLE(of, gpio_la_poll_of_match); diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c index 6a3c4c625138..abd13c79ace0 100644 --- a/drivers/gpio/gpio-sodaville.c +++ b/drivers/gpio/gpio-sodaville.c @@ -169,8 +169,8 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd, IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); - sd->id = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), SDV_NUM_PUB_GPIOS, - sd->irq_base, 0, &irq_domain_sdv_ops, sd); + sd->id = irq_domain_create_legacy(dev_fwnode(&pdev->dev), SDV_NUM_PUB_GPIOS, sd->irq_base, + 0, &irq_domain_sdv_ops, sd); if (!sd->id) return -ENODEV; diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c index 51539185400d..55f0e8afa291 100644 --- a/drivers/gpio/gpio-spear-spics.c +++ b/drivers/gpio/gpio-spear-spics.c @@ -51,13 +51,8 @@ struct spear_spics { struct gpio_chip chip; }; -/* gpio framework specific routines */ -static int spics_get_value(struct gpio_chip *chip, unsigned offset) -{ - return -ENXIO; -} - -static void spics_set_value(struct gpio_chip *chip, unsigned offset, int value) +static int spics_set_value(struct gpio_chip *chip, unsigned int offset, + int value) { struct spear_spics *spics = gpiochip_get_data(chip); u32 tmp; @@ -74,18 +69,14 @@ static void spics_set_value(struct gpio_chip *chip, unsigned offset, int value) tmp &= ~(0x1 << spics->cs_value_bit); tmp |= value << spics->cs_value_bit; writel_relaxed(tmp, spics->base + spics->perip_cfg); -} -static int spics_direction_input(struct gpio_chip *chip, unsigned offset) -{ - return -ENXIO; + return 0; } static int spics_direction_output(struct gpio_chip *chip, unsigned offset, int value) { - spics_set_value(chip, offset, value); - return 0; + return spics_set_value(chip, offset, value); } static int spics_request(struct gpio_chip *chip, unsigned offset) @@ -148,10 +139,8 @@ static int spics_gpio_probe(struct platform_device *pdev) spics->chip.base = -1; spics->chip.request = spics_request; spics->chip.free = spics_free; - spics->chip.direction_input = spics_direction_input; spics->chip.direction_output = spics_direction_output; - spics->chip.get = spics_get_value; - spics->chip.set = spics_set_value; + spics->chip.set_rv = spics_set_value; spics->chip.label = dev_name(&pdev->dev); spics->chip.parent = &pdev->dev; spics->chip.owner = THIS_MODULE; diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c index c117c11bfb29..bbd5bf51c088 100644 --- a/drivers/gpio/gpio-sprd.c +++ b/drivers/gpio/gpio-sprd.c @@ -108,10 +108,12 @@ static int sprd_gpio_get(struct gpio_chip *chip, unsigned int offset) return sprd_gpio_read(chip, offset, SPRD_GPIO_DATA); } -static void sprd_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int sprd_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { sprd_gpio_update(chip, offset, SPRD_GPIO_DATA, value); + + return 0; } static void sprd_gpio_irq_mask(struct irq_data *data) @@ -243,7 +245,7 @@ static int sprd_gpio_probe(struct platform_device *pdev) sprd_gpio->chip.request = sprd_gpio_request; sprd_gpio->chip.free = sprd_gpio_free; sprd_gpio->chip.get = sprd_gpio_get; - sprd_gpio->chip.set = sprd_gpio_set; + sprd_gpio->chip.set_rv = sprd_gpio_set; sprd_gpio->chip.direction_input = sprd_gpio_direction_input; sprd_gpio->chip.direction_output = sprd_gpio_direction_output; diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index dce8ff322e47..0a270156e0be 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -54,7 +54,7 @@ static int stmpe_gpio_get(struct gpio_chip *chip, unsigned offset) return !!(ret & mask); } -static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val) +static int stmpe_gpio_set(struct gpio_chip *chip, unsigned int offset, int val) { struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip); struct stmpe *stmpe = stmpe_gpio->stmpe; @@ -67,9 +67,9 @@ static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val) * For them we need to write 0 to clear and 1 to set. */ if (stmpe->regs[STMPE_IDX_GPSR_LSB] == stmpe->regs[STMPE_IDX_GPCR_LSB]) - stmpe_set_bits(stmpe, reg, mask, val ? mask : 0); - else - stmpe_reg_write(stmpe, reg, mask); + return stmpe_set_bits(stmpe, reg, mask, val ? mask : 0); + + return stmpe_reg_write(stmpe, reg, mask); } static int stmpe_gpio_get_direction(struct gpio_chip *chip, @@ -98,8 +98,11 @@ static int stmpe_gpio_direction_output(struct gpio_chip *chip, struct stmpe *stmpe = stmpe_gpio->stmpe; u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB + (offset / 8)]; u8 mask = BIT(offset % 8); + int ret; - stmpe_gpio_set(chip, offset, val); + ret = stmpe_gpio_set(chip, offset, val); + if (ret) + return ret; return stmpe_set_bits(stmpe, reg, mask, mask); } @@ -133,7 +136,7 @@ static const struct gpio_chip template_chip = { .direction_input = stmpe_gpio_direction_input, .get = stmpe_gpio_get, .direction_output = stmpe_gpio_direction_output, - .set = stmpe_gpio_set, + .set_rv = stmpe_gpio_set, .request = stmpe_gpio_request, .can_sleep = true, }; diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c index 5a6406d1f03a..fdda8de6ca36 100644 --- a/drivers/gpio/gpio-stp-xway.c +++ b/drivers/gpio/gpio-stp-xway.c @@ -113,7 +113,7 @@ static int xway_stp_get(struct gpio_chip *gc, unsigned int gpio) * * Set the shadow value and call ltq_ebu_apply. */ -static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val) +static int xway_stp_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct xway_stp *chip = gpiochip_get_data(gc); @@ -124,6 +124,8 @@ static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val) xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0); if (!chip->reserved) xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0); + + return 0; } /** @@ -136,9 +138,7 @@ static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val) */ static int xway_stp_dir_out(struct gpio_chip *gc, unsigned gpio, int val) { - xway_stp_set(gc, gpio, val); - - return 0; + return xway_stp_set(gc, gpio, val); } /** @@ -249,7 +249,7 @@ static int xway_stp_probe(struct platform_device *pdev) chip->gc.label = "stp-xway"; chip->gc.direction_output = xway_stp_dir_out; chip->gc.get = xway_stp_get; - chip->gc.set = xway_stp_set; + chip->gc.set_rv = xway_stp_set; chip->gc.request = xway_stp_request; chip->gc.base = -1; chip->gc.owner = THIS_MODULE; diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c index 5ab394ec81e6..f86f78655c24 100644 --- a/drivers/gpio/gpio-syscon.c +++ b/drivers/gpio/gpio-syscon.c @@ -40,8 +40,8 @@ struct syscon_gpio_data { unsigned int bit_count; unsigned int dat_bit_offset; unsigned int dir_bit_offset; - void (*set)(struct gpio_chip *chip, - unsigned offset, int value); + int (*set)(struct gpio_chip *chip, unsigned int offset, + int value); }; struct syscon_gpio_priv { @@ -68,17 +68,17 @@ static int syscon_gpio_get(struct gpio_chip *chip, unsigned offset) return !!(val & BIT(offs % SYSCON_REG_BITS)); } -static void syscon_gpio_set(struct gpio_chip *chip, unsigned offset, int val) +static int syscon_gpio_set(struct gpio_chip *chip, unsigned int offset, int val) { struct syscon_gpio_priv *priv = gpiochip_get_data(chip); unsigned int offs; offs = priv->dreg_offset + priv->data->dat_bit_offset + offset; - regmap_update_bits(priv->syscon, - (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE, - BIT(offs % SYSCON_REG_BITS), - val ? BIT(offs % SYSCON_REG_BITS) : 0); + return regmap_update_bits(priv->syscon, + (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE, + BIT(offs % SYSCON_REG_BITS), + val ? BIT(offs % SYSCON_REG_BITS) : 0); } static int syscon_gpio_dir_in(struct gpio_chip *chip, unsigned offset) @@ -115,9 +115,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val) BIT(offs % SYSCON_REG_BITS)); } - chip->set(chip, offset, val); - - return 0; + return chip->set_rv(chip, offset, val); } static const struct syscon_gpio_data clps711x_mctrl_gpio = { @@ -127,8 +125,8 @@ static const struct syscon_gpio_data clps711x_mctrl_gpio = { .dat_bit_offset = 0x40 * 8 + 8, }; -static void rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset, - int val) +static int rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset, + int val) { struct syscon_gpio_priv *priv = gpiochip_get_data(chip); unsigned int offs; @@ -144,6 +142,8 @@ static void rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset, data); if (ret < 0) dev_err(chip->parent, "gpio write failed ret(%d)\n", ret); + + return ret; } static const struct syscon_gpio_data rockchip_rk3328_gpio_mute = { @@ -156,7 +156,8 @@ static const struct syscon_gpio_data rockchip_rk3328_gpio_mute = { #define KEYSTONE_LOCK_BIT BIT(0) -static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val) +static int keystone_gpio_set(struct gpio_chip *chip, unsigned int offset, + int val) { struct syscon_gpio_priv *priv = gpiochip_get_data(chip); unsigned int offs; @@ -165,7 +166,7 @@ static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val) offs = priv->dreg_offset + priv->data->dat_bit_offset + offset; if (!val) - return; + return 0; ret = regmap_update_bits( priv->syscon, @@ -174,6 +175,8 @@ static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val) BIT(offs % SYSCON_REG_BITS) | KEYSTONE_LOCK_BIT); if (ret < 0) dev_err(chip->parent, "gpio write failed ret(%d)\n", ret); + + return ret; } static const struct syscon_gpio_data keystone_dsp_gpio = { @@ -248,7 +251,7 @@ static int syscon_gpio_probe(struct platform_device *pdev) if (priv->data->flags & GPIO_SYSCON_FEAT_IN) priv->chip.direction_input = syscon_gpio_dir_in; if (priv->data->flags & GPIO_SYSCON_FEAT_OUT) { - priv->chip.set = priv->data->set ? : syscon_gpio_set; + priv->chip.set_rv = priv->data->set ? : syscon_gpio_set; priv->chip.direction_output = syscon_gpio_dir_out; } diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c index a415e6d36173..ce17b98e0623 100644 --- a/drivers/gpio/gpio-tangier.c +++ b/drivers/gpio/gpio-tangier.c @@ -90,7 +90,7 @@ static int tng_gpio_get(struct gpio_chip *chip, unsigned int offset) return !!(readl(gplr) & BIT(shift)); } -static void tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +static int tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct tng_gpio *priv = gpiochip_get_data(chip); void __iomem *reg; @@ -101,6 +101,8 @@ static void tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) guard(raw_spinlock_irqsave)(&priv->lock); writel(BIT(shift), reg); + + return 0; } static int tng_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) @@ -428,7 +430,7 @@ int devm_tng_gpio_probe(struct device *dev, struct tng_gpio *gpio) gpio->chip.direction_input = tng_gpio_direction_input; gpio->chip.direction_output = tng_gpio_direction_output; gpio->chip.get = tng_gpio_get; - gpio->chip.set = tng_gpio_set; + gpio->chip.set_rv = tng_gpio_set; gpio->chip.get_direction = tng_gpio_get_direction; gpio->chip.set_config = tng_gpio_set_config; gpio->chip.base = info->base; diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c index 8cf676fd0a0b..1869ee7f9423 100644 --- a/drivers/gpio/gpio-tb10x.c +++ b/drivers/gpio/gpio-tb10x.c @@ -183,9 +183,8 @@ static int tb10x_gpio_probe(struct platform_device *pdev) if (ret != 0) return ret; - tb10x_gpio->domain = irq_domain_create_linear(of_fwnode_handle(np), - tb10x_gpio->gc.ngpio, - &irq_generic_chip_ops, NULL); + tb10x_gpio->domain = irq_domain_create_linear(dev_fwnode(dev), tb10x_gpio->gc.ngpio, + &irq_generic_chip_ops, NULL); if (!tb10x_gpio->domain) { return -ENOMEM; } diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c index e62ee7e56908..0bd32809fd68 100644 --- a/drivers/gpio/gpio-tc3589x.c +++ b/drivers/gpio/gpio-tc3589x.c @@ -49,7 +49,7 @@ static int tc3589x_gpio_get(struct gpio_chip *chip, unsigned int offset) return !!(ret & mask); } -static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int val) +static int tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int val) { struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; @@ -57,7 +57,7 @@ static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int va unsigned int pos = offset % 8; u8 data[] = {val ? BIT(pos) : 0, BIT(pos)}; - tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data); + return tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data); } static int tc3589x_gpio_direction_output(struct gpio_chip *chip, @@ -67,8 +67,11 @@ static int tc3589x_gpio_direction_output(struct gpio_chip *chip, struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; u8 reg = TC3589x_GPIODIR0 + offset / 8; unsigned int pos = offset % 8; + int ret; - tc3589x_gpio_set(chip, offset, val); + ret = tc3589x_gpio_set(chip, offset, val); + if (ret) + return ret; return tc3589x_set_bits(tc3589x, reg, BIT(pos), BIT(pos)); } @@ -146,7 +149,7 @@ static const struct gpio_chip template_chip = { .label = "tc3589x", .owner = THIS_MODULE, .get = tc3589x_gpio_get, - .set = tc3589x_gpio_set, + .set_rv = tc3589x_gpio_set, .direction_output = tc3589x_gpio_direction_output, .direction_input = tc3589x_gpio_direction_input, .get_direction = tc3589x_gpio_get_direction, diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 9ad286adf263..126fd12550aa 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -146,12 +146,14 @@ static void tegra_gpio_free(struct gpio_chip *chip, unsigned int offset) tegra_gpio_disable(tgi, offset); } -static void tegra_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int tegra_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct tegra_gpio_info *tgi = gpiochip_get_data(chip); tegra_gpio_mask_write(tgi, GPIO_MSK_OUT(tgi, offset), offset, value); + + return 0; } static int tegra_gpio_get(struct gpio_chip *chip, unsigned int offset) @@ -718,7 +720,7 @@ static int tegra_gpio_probe(struct platform_device *pdev) tgi->gc.direction_input = tegra_gpio_direction_input; tgi->gc.get = tegra_gpio_get; tgi->gc.direction_output = tegra_gpio_direction_output; - tgi->gc.set = tegra_gpio_set; + tgi->gc.set_rv = tegra_gpio_set; tgi->gc.get_direction = tegra_gpio_get_direction; tgi->gc.base = 0; tgi->gc.ngpio = tgi->bank_count * 32; diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index d27bfac6c9f5..f902da15c419 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -202,6 +202,28 @@ static int tegra186_init_valid_mask(struct gpio_chip *chip, return 0; } +static int tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset, + int level) +{ + struct tegra_gpio *gpio = gpiochip_get_data(chip); + void __iomem *base; + u32 value; + + base = tegra186_gpio_get_base(gpio, offset); + if (WARN_ON(base == NULL)) + return -ENODEV; + + value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE); + if (level == 0) + value &= ~TEGRA186_GPIO_OUTPUT_VALUE_HIGH; + else + value |= TEGRA186_GPIO_OUTPUT_VALUE_HIGH; + + writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE); + + return 0; +} + static int tegra186_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { @@ -249,9 +271,12 @@ static int tegra186_gpio_direction_output(struct gpio_chip *chip, struct tegra_gpio *gpio = gpiochip_get_data(chip); void __iomem *base; u32 value; + int ret; /* configure output level first */ - chip->set(chip, offset, level); + ret = tegra186_gpio_set(chip, offset, level); + if (ret) + return ret; base = tegra186_gpio_get_base(gpio, offset); if (WARN_ON(base == NULL)) @@ -359,26 +384,6 @@ static int tegra186_gpio_get(struct gpio_chip *chip, unsigned int offset) return value & BIT(0); } -static void tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset, - int level) -{ - struct tegra_gpio *gpio = gpiochip_get_data(chip); - void __iomem *base; - u32 value; - - base = tegra186_gpio_get_base(gpio, offset); - if (WARN_ON(base == NULL)) - return; - - value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE); - if (level == 0) - value &= ~TEGRA186_GPIO_OUTPUT_VALUE_HIGH; - else - value |= TEGRA186_GPIO_OUTPUT_VALUE_HIGH; - - writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE); -} - static int tegra186_gpio_set_config(struct gpio_chip *chip, unsigned int offset, unsigned long config) @@ -886,7 +891,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev) gpio->gpio.direction_input = tegra186_gpio_direction_input; gpio->gpio.direction_output = tegra186_gpio_direction_output; gpio->gpio.get = tegra186_gpio_get; - gpio->gpio.set = tegra186_gpio_set; + gpio->gpio.set_rv = tegra186_gpio_set; gpio->gpio.set_config = tegra186_gpio_set_config; gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges; gpio->gpio.init_valid_mask = tegra186_init_valid_mask; diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index 5b851e904c11..eb6a1f0279c0 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c @@ -116,8 +116,8 @@ static int thunderx_gpio_dir_in(struct gpio_chip *chip, unsigned int line) return 0; } -static void thunderx_gpio_set(struct gpio_chip *chip, unsigned int line, - int value) +static int thunderx_gpio_set(struct gpio_chip *chip, unsigned int line, + int value) { struct thunderx_gpio *txgpio = gpiochip_get_data(chip); int bank = line / 64; @@ -127,6 +127,8 @@ static void thunderx_gpio_set(struct gpio_chip *chip, unsigned int line, (bank * GPIO_2ND_BANK) + (value ? GPIO_TX_SET : GPIO_TX_CLR); writeq(BIT_ULL(bank_bit), reg); + + return 0; } static int thunderx_gpio_dir_out(struct gpio_chip *chip, unsigned int line, @@ -269,9 +271,9 @@ static int thunderx_gpio_get(struct gpio_chip *chip, unsigned int line) return masked_bits != 0; } -static void thunderx_gpio_set_multiple(struct gpio_chip *chip, - unsigned long *mask, - unsigned long *bits) +static int thunderx_gpio_set_multiple(struct gpio_chip *chip, + unsigned long *mask, + unsigned long *bits) { int bank; u64 set_bits, clear_bits; @@ -283,6 +285,8 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip, writeq(set_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET); writeq(clear_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_CLR); } + + return 0; } static void thunderx_gpio_irq_ack(struct irq_data *d) @@ -529,8 +533,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, chip->direction_input = thunderx_gpio_dir_in; chip->get = thunderx_gpio_get; chip->direction_output = thunderx_gpio_dir_out; - chip->set = thunderx_gpio_set; - chip->set_multiple = thunderx_gpio_set_multiple; + chip->set_rv = thunderx_gpio_set; + chip->set_multiple_rv = thunderx_gpio_set_multiple; chip->set_config = thunderx_gpio_set_config; girq = &chip->irq; gpio_irq_chip_set_chip(girq, &thunderx_gpio_irq_chip); diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c index cb303a26f4d3..fbb883089189 100644 --- a/drivers/gpio/gpio-timberdale.c +++ b/drivers/gpio/gpio-timberdale.c @@ -80,10 +80,9 @@ static int timbgpio_gpio_direction_output(struct gpio_chip *gpio, return timbgpio_update_bit(gpio, nr, TGPIODIR, false); } -static void timbgpio_gpio_set(struct gpio_chip *gpio, - unsigned nr, int val) +static int timbgpio_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val) { - timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0); + return timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0); } static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset) @@ -254,7 +253,7 @@ static int timbgpio_probe(struct platform_device *pdev) gc->direction_input = timbgpio_gpio_direction_input; gc->get = timbgpio_gpio_get; gc->direction_output = timbgpio_gpio_direction_output; - gc->set = timbgpio_gpio_set; + gc->set_rv = timbgpio_gpio_set; gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL; gc->dbg_show = NULL; gc->base = pdata->gpio_base; diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c index effb7b8ff81f..d5b8568ab061 100644 --- a/drivers/gpio/gpio-tpic2810.c +++ b/drivers/gpio/gpio-tpic2810.c @@ -25,7 +25,7 @@ struct tpic2810 { struct mutex lock; }; -static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value); +static int tpic2810_set(struct gpio_chip *chip, unsigned int offset, int value); static int tpic2810_get_direction(struct gpio_chip *chip, unsigned offset) @@ -34,19 +34,11 @@ static int tpic2810_get_direction(struct gpio_chip *chip, return GPIO_LINE_DIRECTION_OUT; } -static int tpic2810_direction_input(struct gpio_chip *chip, - unsigned offset) -{ - /* This device is output only */ - return -EINVAL; -} - static int tpic2810_direction_output(struct gpio_chip *chip, unsigned offset, int value) { /* This device always output */ - tpic2810_set(chip, offset, value); - return 0; + return tpic2810_set(chip, offset, value); } static void tpic2810_set_mask_bits(struct gpio_chip *chip, u8 mask, u8 bits) @@ -68,25 +60,28 @@ static void tpic2810_set_mask_bits(struct gpio_chip *chip, u8 mask, u8 bits) mutex_unlock(&gpio->lock); } -static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value) +static int tpic2810_set(struct gpio_chip *chip, unsigned int offset, int value) { tpic2810_set_mask_bits(chip, BIT(offset), value ? BIT(offset) : 0); + + return 0; } -static void tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask, - unsigned long *bits) +static int tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask, + unsigned long *bits) { tpic2810_set_mask_bits(chip, *mask, *bits); + + return 0; } static const struct gpio_chip template_chip = { .label = "tpic2810", .owner = THIS_MODULE, .get_direction = tpic2810_get_direction, - .direction_input = tpic2810_direction_input, .direction_output = tpic2810_direction_output, - .set = tpic2810_set, - .set_multiple = tpic2810_set_multiple, + .set_rv = tpic2810_set, + .set_multiple_rv = tpic2810_set_multiple, .base = -1, .ngpio = 8, .can_sleep = true, diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c index 8f5827554e1e..08fa061b73ef 100644 --- a/drivers/gpio/gpio-tps65086.c +++ b/drivers/gpio/gpio-tps65086.c @@ -37,10 +37,8 @@ static int tps65086_gpio_direction_output(struct gpio_chip *chip, struct tps65086_gpio *gpio = gpiochip_get_data(chip); /* Set the initial value */ - regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL, - BIT(4 + offset), value ? BIT(4 + offset) : 0); - - return 0; + return regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL, + BIT(4 + offset), value ? BIT(4 + offset) : 0); } static int tps65086_gpio_get(struct gpio_chip *chip, unsigned offset) @@ -55,13 +53,13 @@ static int tps65086_gpio_get(struct gpio_chip *chip, unsigned offset) return val & BIT(4 + offset); } -static void tps65086_gpio_set(struct gpio_chip *chip, unsigned offset, - int value) +static int tps65086_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct tps65086_gpio *gpio = gpiochip_get_data(chip); - regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL, - BIT(4 + offset), value ? BIT(4 + offset) : 0); + return regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL, + BIT(4 + offset), value ? BIT(4 + offset) : 0); } static const struct gpio_chip template_chip = { @@ -71,7 +69,7 @@ static const struct gpio_chip template_chip = { .direction_input = tps65086_gpio_direction_input, .direction_output = tps65086_gpio_direction_output, .get = tps65086_gpio_get, - .set = tps65086_gpio_set, + .set_rv = tps65086_gpio_set, .base = -1, .ngpio = 4, .can_sleep = true, diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c index d7d9d50dcddf..49cd7754ed05 100644 --- a/drivers/gpio/gpio-tps65218.c +++ b/drivers/gpio/gpio-tps65218.c @@ -34,34 +34,28 @@ static int tps65218_gpio_get(struct gpio_chip *gc, unsigned offset) return !!(val & (TPS65218_ENABLE2_GPIO1 << offset)); } -static void tps65218_gpio_set(struct gpio_chip *gc, unsigned offset, - int value) +static int tps65218_gpio_set(struct gpio_chip *gc, unsigned int offset, + int value) { struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc); struct tps65218 *tps65218 = tps65218_gpio->tps65218; if (value) - tps65218_set_bits(tps65218, TPS65218_REG_ENABLE2, - TPS65218_ENABLE2_GPIO1 << offset, - TPS65218_ENABLE2_GPIO1 << offset, - TPS65218_PROTECT_L1); - else - tps65218_clear_bits(tps65218, TPS65218_REG_ENABLE2, - TPS65218_ENABLE2_GPIO1 << offset, - TPS65218_PROTECT_L1); + return tps65218_set_bits(tps65218, TPS65218_REG_ENABLE2, + TPS65218_ENABLE2_GPIO1 << offset, + TPS65218_ENABLE2_GPIO1 << offset, + TPS65218_PROTECT_L1); + + return tps65218_clear_bits(tps65218, TPS65218_REG_ENABLE2, + TPS65218_ENABLE2_GPIO1 << offset, + TPS65218_PROTECT_L1); } static int tps65218_gpio_output(struct gpio_chip *gc, unsigned offset, int value) { /* Only drives GPOs */ - tps65218_gpio_set(gc, offset, value); - return 0; -} - -static int tps65218_gpio_input(struct gpio_chip *gc, unsigned offset) -{ - return -EPERM; + return tps65218_gpio_set(gc, offset, value); } static int tps65218_gpio_request(struct gpio_chip *gc, unsigned offset) @@ -174,9 +168,8 @@ static const struct gpio_chip template_chip = { .owner = THIS_MODULE, .request = tps65218_gpio_request, .direction_output = tps65218_gpio_output, - .direction_input = tps65218_gpio_input, .get = tps65218_gpio_get, - .set = tps65218_gpio_set, + .set_rv = tps65218_gpio_set, .set_config = tps65218_gpio_set_config, .can_sleep = true, .ngpio = 3, diff --git a/drivers/gpio/gpio-tps65219.c b/drivers/gpio/gpio-tps65219.c index 526640c39a11..c0177088c54c 100644 --- a/drivers/gpio/gpio-tps65219.c +++ b/drivers/gpio/gpio-tps65219.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* - * GPIO driver for TI TPS65219 PMICs + * GPIO driver for TI TPS65214/TPS65215/TPS65219 PMICs * - * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2022, 2025 Texas Instruments Incorporated - http://www.ti.com/ */ #include <linux/bits.h> @@ -13,20 +13,48 @@ #include <linux/regmap.h> #define TPS65219_GPIO0_DIR_MASK BIT(3) -#define TPS65219_GPIO0_OFFSET 2 -#define TPS65219_GPIO0_IDX 0 +#define TPS65214_GPIO0_DIR_MASK BIT(1) +#define TPS6521X_GPIO0_OFFSET 2 +#define TPS6521X_GPIO0_IDX 0 + +/* + * TPS65214 GPIO mapping + * Linux gpio offset 0 -> GPIO (pin16) -> bit_offset 2 + * Linux gpio offset 1 -> GPO1 (pin9 ) -> bit_offset 0 + * + * TPS65215 & TPS65219 GPIO mapping + * Linux gpio offset 0 -> GPIO (pin16) -> bit_offset 2 + * Linux gpio offset 1 -> GPO1 (pin8 ) -> bit_offset 0 + * Linux gpio offset 2 -> GPO2 (pin17) -> bit_offset 1 + */ struct tps65219_gpio { + int (*change_dir)(struct gpio_chip *gc, unsigned int offset, unsigned int dir); struct gpio_chip gpio_chip; struct tps65219 *tps; }; +static int tps65214_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + struct tps65219_gpio *gpio = gpiochip_get_data(gc); + int ret, val; + + if (offset != TPS6521X_GPIO0_IDX) + return GPIO_LINE_DIRECTION_OUT; + + ret = regmap_read(gpio->tps->regmap, TPS65219_REG_GENERAL_CONFIG, &val); + if (ret) + return ret; + + return !(val & TPS65214_GPIO0_DIR_MASK); +} + static int tps65219_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) { struct tps65219_gpio *gpio = gpiochip_get_data(gc); int ret, val; - if (offset != TPS65219_GPIO0_IDX) + if (offset != TPS6521X_GPIO0_IDX) return GPIO_LINE_DIRECTION_OUT; ret = regmap_read(gpio->tps->regmap, TPS65219_REG_MFP_1_CONFIG, &val); @@ -42,7 +70,7 @@ static int tps65219_gpio_get(struct gpio_chip *gc, unsigned int offset) struct device *dev = gpio->tps->dev; int ret, val; - if (offset != TPS65219_GPIO0_IDX) { + if (offset != TPS6521X_GPIO0_IDX) { dev_err(dev, "GPIO%d is output only, cannot get\n", offset); return -ENOTSUPP; } @@ -65,19 +93,18 @@ static int tps65219_gpio_get(struct gpio_chip *gc, unsigned int offset) return ret; } -static void tps65219_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +static int tps65219_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) { struct tps65219_gpio *gpio = gpiochip_get_data(gc); - struct device *dev = gpio->tps->dev; int v, mask, bit; - bit = (offset == TPS65219_GPIO0_IDX) ? TPS65219_GPIO0_OFFSET : offset - 1; + bit = (offset == TPS6521X_GPIO0_IDX) ? TPS6521X_GPIO0_OFFSET : offset - 1; mask = BIT(bit); v = value ? mask : 0; - if (regmap_update_bits(gpio->tps->regmap, TPS65219_REG_GENERAL_CONFIG, mask, v)) - dev_err(dev, "GPIO%d, set to value %d failed.\n", offset, value); + return regmap_update_bits(gpio->tps->regmap, + TPS65219_REG_GENERAL_CONFIG, mask, v); } static int tps65219_gpio_change_direction(struct gpio_chip *gc, unsigned int offset, @@ -112,12 +139,39 @@ static int tps65219_gpio_change_direction(struct gpio_chip *gc, unsigned int off return -ENOTSUPP; } +static int tps65214_gpio_change_direction(struct gpio_chip *gc, unsigned int offset, + unsigned int direction) +{ + struct tps65219_gpio *gpio = gpiochip_get_data(gc); + struct device *dev = gpio->tps->dev; + int val, ret; + + /** + * Verified if GPIO or GPO in parent function + * Masked value: 0 = GPIO, 1 = VSEL + */ + ret = regmap_read(gpio->tps->regmap, TPS65219_REG_MFP_1_CONFIG, &val); + if (ret) + return ret; + + ret = !!(val & BIT(TPS65219_GPIO0_DIR_MASK)); + if (ret) + dev_err(dev, "GPIO%d configured as VSEL, not GPIO\n", offset); + + ret = regmap_update_bits(gpio->tps->regmap, TPS65219_REG_GENERAL_CONFIG, + TPS65214_GPIO0_DIR_MASK, direction); + if (ret) + dev_err(dev, "Fail to change direction to %u for GPIO%d.\n", direction, offset); + + return ret; +} + static int tps65219_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) { struct tps65219_gpio *gpio = gpiochip_get_data(gc); struct device *dev = gpio->tps->dev; - if (offset != TPS65219_GPIO0_IDX) { + if (offset != TPS6521X_GPIO0_IDX) { dev_err(dev, "GPIO%d is output only, cannot change to input\n", offset); return -ENOTSUPP; } @@ -125,21 +179,36 @@ static int tps65219_gpio_direction_input(struct gpio_chip *gc, unsigned int offs if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_IN) return 0; - return tps65219_gpio_change_direction(gc, offset, GPIO_LINE_DIRECTION_IN); + return gpio->change_dir(gc, offset, GPIO_LINE_DIRECTION_IN); } static int tps65219_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value) { + struct tps65219_gpio *gpio = gpiochip_get_data(gc); + tps65219_gpio_set(gc, offset, value); - if (offset != TPS65219_GPIO0_IDX) + if (offset != TPS6521X_GPIO0_IDX) return 0; if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_OUT) return 0; - return tps65219_gpio_change_direction(gc, offset, GPIO_LINE_DIRECTION_OUT); + return gpio->change_dir(gc, offset, GPIO_LINE_DIRECTION_OUT); } +static const struct gpio_chip tps65214_template_chip = { + .label = "tps65214-gpio", + .owner = THIS_MODULE, + .get_direction = tps65214_gpio_get_direction, + .direction_input = tps65219_gpio_direction_input, + .direction_output = tps65219_gpio_direction_output, + .get = tps65219_gpio_get, + .set_rv = tps65219_gpio_set, + .base = -1, + .ngpio = 2, + .can_sleep = true, +}; + static const struct gpio_chip tps65219_template_chip = { .label = "tps65219-gpio", .owner = THIS_MODULE, @@ -147,7 +216,7 @@ static const struct gpio_chip tps65219_template_chip = { .direction_input = tps65219_gpio_direction_input, .direction_output = tps65219_gpio_direction_output, .get = tps65219_gpio_get, - .set = tps65219_gpio_set, + .set_rv = tps65219_gpio_set, .base = -1, .ngpio = 3, .can_sleep = true, @@ -155,6 +224,7 @@ static const struct gpio_chip tps65219_template_chip = { static int tps65219_gpio_probe(struct platform_device *pdev) { + enum pmic_id chip = platform_get_device_id(pdev)->driver_data; struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent); struct tps65219_gpio *gpio; @@ -162,22 +232,38 @@ static int tps65219_gpio_probe(struct platform_device *pdev) if (!gpio) return -ENOMEM; + if (chip == TPS65214) { + gpio->gpio_chip = tps65214_template_chip; + gpio->change_dir = tps65214_gpio_change_direction; + } else if (chip == TPS65219) { + gpio->gpio_chip = tps65219_template_chip; + gpio->change_dir = tps65219_gpio_change_direction; + } else { + return -ENODATA; + } + gpio->tps = tps; - gpio->gpio_chip = tps65219_template_chip; gpio->gpio_chip.parent = tps->dev; return devm_gpiochip_add_data(&pdev->dev, &gpio->gpio_chip, gpio); } +static const struct platform_device_id tps6521x_gpio_id_table[] = { + { "tps65214-gpio", TPS65214 }, + { "tps65219-gpio", TPS65219 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, tps6521x_gpio_id_table); + static struct platform_driver tps65219_gpio_driver = { .driver = { .name = "tps65219-gpio", }, .probe = tps65219_gpio_probe, + .id_table = tps6521x_gpio_id_table, }; module_platform_driver(tps65219_gpio_driver); -MODULE_ALIAS("platform:tps65219-gpio"); MODULE_AUTHOR("Jonathan Cormier <jcormier@criticallink.com>"); -MODULE_DESCRIPTION("TPS65219 GPIO driver"); +MODULE_DESCRIPTION("TPS65214/TPS65215/TPS65219 GPIO driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c index d277aa951143..f1ced092f38a 100644 --- a/drivers/gpio/gpio-tps6586x.c +++ b/drivers/gpio/gpio-tps6586x.c @@ -40,13 +40,13 @@ static int tps6586x_gpio_get(struct gpio_chip *gc, unsigned offset) return !!(val & (1 << offset)); } -static void tps6586x_gpio_set(struct gpio_chip *gc, unsigned offset, - int value) +static int tps6586x_gpio_set(struct gpio_chip *gc, unsigned int offset, + int value) { struct tps6586x_gpio *tps6586x_gpio = gpiochip_get_data(gc); - tps6586x_update(tps6586x_gpio->parent, TPS6586X_GPIOSET2, - value << offset, 1 << offset); + return tps6586x_update(tps6586x_gpio->parent, TPS6586X_GPIOSET2, + value << offset, 1 << offset); } static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset, @@ -54,8 +54,11 @@ static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset, { struct tps6586x_gpio *tps6586x_gpio = gpiochip_get_data(gc); uint8_t val, mask; + int ret; - tps6586x_gpio_set(gc, offset, value); + ret = tps6586x_gpio_set(gc, offset, value); + if (ret) + return ret; val = 0x1 << (offset * 2); mask = 0x3 << (offset * 2); @@ -95,7 +98,7 @@ static int tps6586x_gpio_probe(struct platform_device *pdev) /* FIXME: add handling of GPIOs as dedicated inputs */ tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output; - tps6586x_gpio->gpio_chip.set = tps6586x_gpio_set; + tps6586x_gpio->gpio_chip.set_rv = tps6586x_gpio_set; tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get; tps6586x_gpio->gpio_chip.to_irq = tps6586x_gpio_to_irq; diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c index 187d21580573..3204f55394cf 100644 --- a/drivers/gpio/gpio-tps65910.c +++ b/drivers/gpio/gpio-tps65910.c @@ -36,18 +36,18 @@ static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset) return 0; } -static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset, - int value) +static int tps65910_gpio_set(struct gpio_chip *gc, unsigned int offset, + int value) { struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc); struct tps65910 *tps65910 = tps65910_gpio->tps65910; if (value) - regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + offset, - GPIO_SET_MASK); - else - regmap_clear_bits(tps65910->regmap, TPS65910_GPIO0 + offset, - GPIO_SET_MASK); + return regmap_set_bits(tps65910->regmap, + TPS65910_GPIO0 + offset, GPIO_SET_MASK); + + return regmap_clear_bits(tps65910->regmap, TPS65910_GPIO0 + offset, + GPIO_SET_MASK); } static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset, @@ -55,9 +55,12 @@ static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset, { struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc); struct tps65910 *tps65910 = tps65910_gpio->tps65910; + int ret; /* Set the initial value */ - tps65910_gpio_set(gc, offset, value); + ret = tps65910_gpio_set(gc, offset, value); + if (ret) + return ret; return regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + offset, GPIO_CFG_MASK); @@ -136,7 +139,7 @@ static int tps65910_gpio_probe(struct platform_device *pdev) tps65910_gpio->gpio_chip.can_sleep = true; tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input; tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output; - tps65910_gpio->gpio_chip.set = tps65910_gpio_set; + tps65910_gpio->gpio_chip.set_rv = tps65910_gpio_set; tps65910_gpio->gpio_chip.get = tps65910_gpio_get; tps65910_gpio->gpio_chip.parent = &pdev->dev; diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c index fab771cb6a87..d586ccfbfc56 100644 --- a/drivers/gpio/gpio-tps65912.c +++ b/drivers/gpio/gpio-tps65912.c @@ -49,10 +49,13 @@ static int tps65912_gpio_direction_output(struct gpio_chip *gc, unsigned offset, int value) { struct tps65912_gpio *gpio = gpiochip_get_data(gc); + int ret; /* Set the initial value */ - regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset, - GPIO_SET_MASK, value ? GPIO_SET_MASK : 0); + ret = regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset, + GPIO_SET_MASK, value ? GPIO_SET_MASK : 0); + if (ret) + return ret; return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset, GPIO_CFG_MASK, GPIO_CFG_MASK); @@ -73,13 +76,13 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) return 0; } -static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, - int value) +static int tps65912_gpio_set(struct gpio_chip *gc, unsigned int offset, + int value) { struct tps65912_gpio *gpio = gpiochip_get_data(gc); - regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset, - GPIO_SET_MASK, value ? GPIO_SET_MASK : 0); + return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset, + GPIO_SET_MASK, value ? GPIO_SET_MASK : 0); } static const struct gpio_chip template_chip = { @@ -89,7 +92,7 @@ static const struct gpio_chip template_chip = { .direction_input = tps65912_gpio_direction_input, .direction_output = tps65912_gpio_direction_output, .get = tps65912_gpio_get, - .set = tps65912_gpio_set, + .set_rv = tps65912_gpio_set, .base = -1, .ngpio = 5, .can_sleep = true, diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c index 532deaddfd4e..3b8805c854f7 100644 --- a/drivers/gpio/gpio-tps68470.c +++ b/drivers/gpio/gpio-tps68470.c @@ -70,8 +70,8 @@ static int tps68470_gpio_get_direction(struct gpio_chip *gc, GPIO_LINE_DIRECTION_IN; } -static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset, - int value) +static int tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset, + int value) { struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc); struct regmap *regmap = tps68470_gpio->tps68470_regmap; @@ -82,7 +82,8 @@ static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset, offset -= TPS68470_N_REGULAR_GPIO; } - regmap_update_bits(regmap, reg, BIT(offset), value ? BIT(offset) : 0); + return regmap_update_bits(regmap, reg, BIT(offset), + value ? BIT(offset) : 0); } static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset, @@ -90,9 +91,12 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset, { struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc); struct regmap *regmap = tps68470_gpio->tps68470_regmap; + int ret; /* Set the initial value */ - tps68470_gpio_set(gc, offset, value); + ret = tps68470_gpio_set(gc, offset, value); + if (ret) + return ret; /* rest are always outputs */ if (offset >= TPS68470_N_REGULAR_GPIO) @@ -138,7 +142,7 @@ static int tps68470_gpio_probe(struct platform_device *pdev) tps68470_gpio->gc.direction_output = tps68470_gpio_output; tps68470_gpio->gc.get = tps68470_gpio_get; tps68470_gpio->gc.get_direction = tps68470_gpio_get_direction; - tps68470_gpio->gc.set = tps68470_gpio_set; + tps68470_gpio->gc.set_rv = tps68470_gpio_set; tps68470_gpio->gc.can_sleep = true; tps68470_gpio->gc.names = tps68470_names; tps68470_gpio->gc.ngpio = TPS68470_N_GPIO; diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c index 18f523a15b3c..056799ecce6a 100644 --- a/drivers/gpio/gpio-tqmx86.c +++ b/drivers/gpio/gpio-tqmx86.c @@ -93,14 +93,16 @@ static void _tqmx86_gpio_set(struct tqmx86_gpio_data *gpio, unsigned int offset, tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD); } -static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip); guard(raw_spinlock_irqsave)(&gpio->spinlock); _tqmx86_gpio_set(gpio, offset, value); + + return 0; } static int tqmx86_gpio_direction_input(struct gpio_chip *chip, @@ -368,7 +370,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev) chip->direction_output = tqmx86_gpio_direction_output; chip->get_direction = tqmx86_gpio_get_direction; chip->get = tqmx86_gpio_get; - chip->set = tqmx86_gpio_set; + chip->set_rv = tqmx86_gpio_set; chip->ngpio = TQMX86_NGPIO; chip->parent = pdev->dev.parent; diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c index 5c806140fdf0..35dd2d09b4d4 100644 --- a/drivers/gpio/gpio-ts4900.c +++ b/drivers/gpio/gpio-ts4900.c @@ -95,16 +95,16 @@ static int ts4900_gpio_get(struct gpio_chip *chip, unsigned int offset) return !!(reg & priv->input_bit); } -static void ts4900_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int ts4900_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); if (value) - regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, - TS4900_GPIO_OUT); - else - regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, 0); + return regmap_update_bits(priv->regmap, offset, + TS4900_GPIO_OUT, TS4900_GPIO_OUT); + + return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, 0); } static const struct regmap_config ts4900_regmap_config = { @@ -119,7 +119,7 @@ static const struct gpio_chip template_chip = { .direction_input = ts4900_gpio_direction_input, .direction_output = ts4900_gpio_direction_output, .get = ts4900_gpio_get, - .set = ts4900_gpio_set, + .set_rv = ts4900_gpio_set, .base = -1, .can_sleep = true, }; diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c index 61cbec5c06a7..bb432ed73698 100644 --- a/drivers/gpio/gpio-ts5500.c +++ b/drivers/gpio/gpio-ts5500.c @@ -244,7 +244,7 @@ static int ts5500_gpio_output(struct gpio_chip *chip, unsigned offset, int val) return 0; } -static void ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val) +static int ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val) { struct ts5500_priv *priv = gpiochip_get_data(chip); const struct ts5500_dio line = priv->pinout[offset]; @@ -256,6 +256,8 @@ static void ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val) else ts5500_clear_mask(line.value_mask, line.value_addr); spin_unlock_irqrestore(&priv->lock, flags); + + return 0; } static int ts5500_gpio_to_irq(struct gpio_chip *chip, unsigned offset) @@ -338,7 +340,7 @@ static int ts5500_dio_probe(struct platform_device *pdev) priv->gpio_chip.direction_input = ts5500_gpio_input; priv->gpio_chip.direction_output = ts5500_gpio_output; priv->gpio_chip.get = ts5500_gpio_get; - priv->gpio_chip.set = ts5500_gpio_set; + priv->gpio_chip.set_rv = ts5500_gpio_set; priv->gpio_chip.to_irq = ts5500_gpio_to_irq; priv->gpio_chip.base = -1; diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c index 0d17985a5fdc..e39e39e3ef85 100644 --- a/drivers/gpio/gpio-twl4030.c +++ b/drivers/gpio/gpio-twl4030.c @@ -120,7 +120,7 @@ static u8 cached_leden; * external pullup is needed. We could also expose the integrated PWM * as a LED brightness control; we initialize it as "always on". */ -static void twl4030_led_set_value(int led, int value) +static int twl4030_led_set_value(int led, int value) { u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM; @@ -132,8 +132,8 @@ static void twl4030_led_set_value(int led, int value) else cached_leden |= mask; - WARN_ON_ONCE(twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden, - TWL4030_LED_LEDEN_REG)); + return twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden, + TWL4030_LED_LEDEN_REG); } static int twl4030_set_gpio_direction(int gpio, int is_input) @@ -278,7 +278,7 @@ static void twl_free(struct gpio_chip *chip, unsigned offset) mutex_lock(&priv->mutex); if (offset >= TWL4030_GPIO_MAX) { - twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1); + WARN_ON_ONCE(twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1)); goto out; } @@ -334,15 +334,16 @@ out: return ret; } -static void twl_set(struct gpio_chip *chip, unsigned offset, int value) +static int twl_set(struct gpio_chip *chip, unsigned int offset, int value) { struct gpio_twl4030_priv *priv = gpiochip_get_data(chip); + int ret; mutex_lock(&priv->mutex); if (offset < TWL4030_GPIO_MAX) - twl4030_set_gpio_dataout(offset, value); + ret = twl4030_set_gpio_dataout(offset, value); else - twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value); + ret = twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value); if (value) priv->out_state |= BIT(offset); @@ -350,6 +351,8 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value) priv->out_state &= ~BIT(offset); mutex_unlock(&priv->mutex); + + return ret; } static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) @@ -373,9 +376,7 @@ static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) priv->direction |= BIT(offset); mutex_unlock(&priv->mutex); - twl_set(chip, offset, value); - - return ret; + return twl_set(chip, offset, value); } static int twl_get_direction(struct gpio_chip *chip, unsigned offset) @@ -418,7 +419,7 @@ static const struct gpio_chip template_chip = { .direction_output = twl_direction_out, .get_direction = twl_get_direction, .get = twl_get, - .set = twl_set, + .set_rv = twl_set, .to_irq = twl_to_irq, .can_sleep = true, }; @@ -523,7 +524,7 @@ static int gpio_twl4030_probe(struct platform_device *pdev) return irq_base; } - irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), TWL4030_GPIO_MAX, irq_base, 0, + irq_domain_create_legacy(dev_fwnode(&pdev->dev), TWL4030_GPIO_MAX, irq_base, 0, &irq_domain_simple_ops, NULL); ret = twl4030_sih_setup(&pdev->dev, TWL4030_MODULE_GPIO, irq_base); diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c index b9171bf66168..b2196b62b528 100644 --- a/drivers/gpio/gpio-twl6040.c +++ b/drivers/gpio/gpio-twl6040.c @@ -37,14 +37,8 @@ static int twl6040gpo_get_direction(struct gpio_chip *chip, unsigned offset) return GPIO_LINE_DIRECTION_OUT; } -static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned offset, - int value) -{ - /* This only drives GPOs, and can't change direction */ - return 0; -} - -static void twl6040gpo_set(struct gpio_chip *chip, unsigned offset, int value) +static int twl6040gpo_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct twl6040 *twl6040 = gpiochip_get_data(chip); int ret; @@ -52,14 +46,21 @@ static void twl6040gpo_set(struct gpio_chip *chip, unsigned offset, int value) ret = twl6040_reg_read(twl6040, TWL6040_REG_GPOCTL); if (ret < 0) - return; + return ret; if (value) gpoctl = ret | BIT(offset); else gpoctl = ret & ~BIT(offset); - twl6040_reg_write(twl6040, TWL6040_REG_GPOCTL, gpoctl); + return twl6040_reg_write(twl6040, TWL6040_REG_GPOCTL, gpoctl); +} + +static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned int offset, + int value) +{ + /* This only drives GPOs, and can't change direction */ + return twl6040gpo_set(chip, offset, value); } static struct gpio_chip twl6040gpo_chip = { @@ -68,7 +69,7 @@ static struct gpio_chip twl6040gpo_chip = { .get = twl6040gpo_get, .direction_output = twl6040gpo_direction_out, .get_direction = twl6040gpo_get_direction, - .set = twl6040gpo_set, + .set_rv = twl6040gpo_set, .can_sleep = true, }; diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c index d738da8718f9..8939556f42b6 100644 --- a/drivers/gpio/gpio-uniphier.c +++ b/drivers/gpio/gpio-uniphier.c @@ -138,14 +138,16 @@ static int uniphier_gpio_get(struct gpio_chip *chip, unsigned int offset) return uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DATA); } -static void uniphier_gpio_set(struct gpio_chip *chip, - unsigned int offset, int val) +static int uniphier_gpio_set(struct gpio_chip *chip, + unsigned int offset, int val) { uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DATA, val); + + return 0; } -static void uniphier_gpio_set_multiple(struct gpio_chip *chip, - unsigned long *mask, unsigned long *bits) +static int uniphier_gpio_set_multiple(struct gpio_chip *chip, + unsigned long *mask, unsigned long *bits) { unsigned long i, bank, bank_mask, bank_bits; @@ -156,6 +158,8 @@ static void uniphier_gpio_set_multiple(struct gpio_chip *chip, uniphier_gpio_bank_write(chip, bank, UNIPHIER_GPIO_PORT_DATA, bank_mask, bank_bits); } + + return 0; } static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) @@ -382,8 +386,8 @@ static int uniphier_gpio_probe(struct platform_device *pdev) chip->direction_input = uniphier_gpio_direction_input; chip->direction_output = uniphier_gpio_direction_output; chip->get = uniphier_gpio_get; - chip->set = uniphier_gpio_set; - chip->set_multiple = uniphier_gpio_set_multiple; + chip->set_rv = uniphier_gpio_set; + chip->set_multiple_rv = uniphier_gpio_set_multiple; chip->to_irq = uniphier_gpio_to_irq; chip->base = -1; chip->ngpio = ngpios; diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c index e55d28a8a66f..e8e906b54d51 100644 --- a/drivers/gpio/gpio-viperboard.c +++ b/drivers/gpio/gpio-viperboard.c @@ -128,45 +128,50 @@ static int vprbrd_gpioa_get(struct gpio_chip *chip, return answer; } -static void vprbrd_gpioa_set(struct gpio_chip *chip, - unsigned int offset, int value) +static int vprbrd_gpioa_set(struct gpio_chip *chip, unsigned int offset, + int value) { - int ret; + int ret = 0; struct vprbrd_gpio *gpio = gpiochip_get_data(chip); struct vprbrd *vb = gpio->vb; struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf; - if (gpio->gpioa_out & (1 << offset)) { - if (value) - gpio->gpioa_val |= (1 << offset); - else - gpio->gpioa_val &= ~(1 << offset); - - mutex_lock(&vb->lock); - - gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT; - gamsg->clk = 0x00; - gamsg->offset = offset; - gamsg->t1 = 0x00; - gamsg->t2 = 0x00; - gamsg->invert = 0x00; - gamsg->pwmlevel = 0x00; - gamsg->outval = value; - gamsg->risefall = 0x00; - gamsg->answer = 0x00; - gamsg->__fill = 0x00; - - ret = usb_control_msg(vb->usb_dev, - usb_sndctrlpipe(vb->usb_dev, 0), - VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, - 0x0000, 0x0000, gamsg, - sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS); - - mutex_unlock(&vb->lock); - - if (ret != sizeof(struct vprbrd_gpioa_msg)) - dev_err(chip->parent, "usb error setting pin value\n"); + if (!(gpio->gpioa_out & (1 << offset))) + return 0; + + if (value) + gpio->gpioa_val |= (1 << offset); + else + gpio->gpioa_val &= ~(1 << offset); + + mutex_lock(&vb->lock); + + gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT; + gamsg->clk = 0x00; + gamsg->offset = offset; + gamsg->t1 = 0x00; + gamsg->t2 = 0x00; + gamsg->invert = 0x00; + gamsg->pwmlevel = 0x00; + gamsg->outval = value; + gamsg->risefall = 0x00; + gamsg->answer = 0x00; + gamsg->__fill = 0x00; + + ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0), + VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, + 0x0000, 0x0000, gamsg, + sizeof(struct vprbrd_gpioa_msg), + VPRBRD_USB_TIMEOUT_MS); + + mutex_unlock(&vb->lock); + + if (ret != sizeof(struct vprbrd_gpioa_msg)) { + dev_err(chip->parent, "usb error setting pin value\n"); + return -EREMOTEIO; } + + return 0; } static int vprbrd_gpioa_direction_input(struct gpio_chip *chip, @@ -304,37 +309,42 @@ static int vprbrd_gpiob_get(struct gpio_chip *chip, return (gpio->gpiob_val >> offset) & 0x1; } -static void vprbrd_gpiob_set(struct gpio_chip *chip, - unsigned int offset, int value) +static int vprbrd_gpiob_set(struct gpio_chip *chip, unsigned int offset, + int value) { int ret; struct vprbrd_gpio *gpio = gpiochip_get_data(chip); struct vprbrd *vb = gpio->vb; struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf; - if (gpio->gpiob_out & (1 << offset)) { - if (value) - gpio->gpiob_val |= (1 << offset); - else - gpio->gpiob_val &= ~(1 << offset); + if (!(gpio->gpiob_out & (1 << offset))) + return 0; + + if (value) + gpio->gpiob_val |= (1 << offset); + else + gpio->gpiob_val &= ~(1 << offset); - mutex_lock(&vb->lock); + mutex_lock(&vb->lock); - gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL; - gbmsg->val = cpu_to_be16(value << offset); - gbmsg->mask = cpu_to_be16(0x0001 << offset); + gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL; + gbmsg->val = cpu_to_be16(value << offset); + gbmsg->mask = cpu_to_be16(0x0001 << offset); - ret = usb_control_msg(vb->usb_dev, - usb_sndctrlpipe(vb->usb_dev, 0), - VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT, - 0x0000, 0x0000, gbmsg, - sizeof(struct vprbrd_gpiob_msg), VPRBRD_USB_TIMEOUT_MS); + ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0), + VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT, + 0x0000, 0x0000, gbmsg, + sizeof(struct vprbrd_gpiob_msg), + VPRBRD_USB_TIMEOUT_MS); - mutex_unlock(&vb->lock); + mutex_unlock(&vb->lock); - if (ret != sizeof(struct vprbrd_gpiob_msg)) - dev_err(chip->parent, "usb error setting pin value\n"); + if (ret != sizeof(struct vprbrd_gpiob_msg)) { + dev_err(chip->parent, "usb error setting pin value\n"); + return -EREMOTEIO; } + + return 0; } static int vprbrd_gpiob_direction_input(struct gpio_chip *chip, @@ -368,16 +378,14 @@ static int vprbrd_gpiob_direction_output(struct gpio_chip *chip, gpio->gpiob_out |= (1 << offset); mutex_lock(&vb->lock); - ret = vprbrd_gpiob_setdir(vb, offset, 1); - if (ret) - dev_err(chip->parent, "usb error setting pin to output\n"); - mutex_unlock(&vb->lock); + if (ret) { + dev_err(chip->parent, "usb error setting pin to output\n"); + return ret; + } - vprbrd_gpiob_set(chip, offset, value); - - return ret; + return vprbrd_gpiob_set(chip, offset, value); } /* ----- end of gpio b chip ---------------------------------------------- */ @@ -400,7 +408,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev) vb_gpio->gpioa.base = -1; vb_gpio->gpioa.ngpio = 16; vb_gpio->gpioa.can_sleep = true; - vb_gpio->gpioa.set = vprbrd_gpioa_set; + vb_gpio->gpioa.set_rv = vprbrd_gpioa_set; vb_gpio->gpioa.get = vprbrd_gpioa_get; vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input; vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output; @@ -416,7 +424,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev) vb_gpio->gpiob.base = -1; vb_gpio->gpiob.ngpio = 16; vb_gpio->gpiob.can_sleep = true; - vb_gpio->gpiob.set = vprbrd_gpiob_set; + vb_gpio->gpiob.set_rv = vprbrd_gpiob_set; vb_gpio->gpiob.get = vprbrd_gpiob_get; vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input; vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output; diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c index ac39da17a29b..07552611da98 100644 --- a/drivers/gpio/gpio-virtio.c +++ b/drivers/gpio/gpio-virtio.c @@ -194,11 +194,12 @@ static int virtio_gpio_get(struct gpio_chip *gc, unsigned int gpio) return ret ? ret : value; } -static void virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value) +static int virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value) { struct virtio_gpio *vgpio = gpiochip_get_data(gc); - virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value, NULL); + return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value, + NULL); } /* Interrupt handling */ @@ -526,7 +527,6 @@ static const char **virtio_gpio_get_names(struct virtio_gpio *vgpio, static int virtio_gpio_probe(struct virtio_device *vdev) { - struct virtio_gpio_config config; struct device *dev = &vdev->dev; struct virtio_gpio *vgpio; struct irq_chip *gpio_irq_chip; @@ -539,9 +539,11 @@ static int virtio_gpio_probe(struct virtio_device *vdev) return -ENOMEM; /* Read configuration */ - virtio_cread_bytes(vdev, 0, &config, sizeof(config)); - gpio_names_size = le32_to_cpu(config.gpio_names_size); - ngpio = le16_to_cpu(config.ngpio); + gpio_names_size = + virtio_cread32(vdev, offsetof(struct virtio_gpio_config, + gpio_names_size)); + ngpio = virtio_cread16(vdev, offsetof(struct virtio_gpio_config, + ngpio)); if (!ngpio) { dev_err(dev, "Number of GPIOs can't be zero\n"); return -EINVAL; @@ -565,7 +567,7 @@ static int virtio_gpio_probe(struct virtio_device *vdev) vgpio->gc.direction_input = virtio_gpio_direction_input; vgpio->gc.direction_output = virtio_gpio_direction_output; vgpio->gc.get = virtio_gpio_get; - vgpio->gc.set = virtio_gpio_set; + vgpio->gc.set_rv = virtio_gpio_set; vgpio->gc.ngpio = ngpio; vgpio->gc.base = -1; /* Allocate base dynamically */ vgpio->gc.label = dev_name(dev); diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c index eab6726953b4..a10eab7d2617 100644 --- a/drivers/gpio/gpio-virtuser.c +++ b/drivers/gpio/gpio-virtuser.c @@ -215,9 +215,7 @@ static int gpio_virtuser_set_array_value(struct gpio_descs *descs, struct gpio_virtuser_irq_work_context ctx; if (!atomic) - return gpiod_set_array_value_cansleep(descs->ndescs, - descs->desc, - descs->info, values); + return gpiod_multi_set_value_cansleep(descs, values); gpio_virtuser_init_irq_work_context(&ctx); ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_set_value_array_atomic); diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c index 8fd6c3913d69..a3bceac7854c 100644 --- a/drivers/gpio/gpio-vx855.c +++ b/drivers/gpio/gpio-vx855.c @@ -127,8 +127,7 @@ static int vx855gpio_get(struct gpio_chip *gpio, unsigned int nr) return ret; } -static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr, - int val) +static int vx855gpio_set(struct gpio_chip *gpio, unsigned int nr, int val) { struct vx855_gpio *vg = gpiochip_get_data(gpio); unsigned long flags; @@ -136,7 +135,7 @@ static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr, /* True GPI cannot be switched to output mode */ if (nr < NR_VX855_GPI) - return; + return -EPERM; spin_lock_irqsave(&vg->lock, flags); reg_out = inl(vg->io_gpo); @@ -153,6 +152,8 @@ static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr, } outl(reg_out, vg->io_gpo); spin_unlock_irqrestore(&vg->lock, flags); + + return 0; } static int vx855gpio_direction_output(struct gpio_chip *gpio, @@ -215,7 +216,7 @@ static void vx855gpio_gpio_setup(struct vx855_gpio *vg) c->direction_input = vx855gpio_direction_input; c->direction_output = vx855gpio_direction_output; c->get = vx855gpio_get; - c->set = vx855gpio_set; + c->set_rv = vx855gpio_set; c->set_config = vx855gpio_set_config; c->dbg_show = NULL; c->base = 0; diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c index 2bba27b13947..c89da9a22016 100644 --- a/drivers/gpio/gpio-wcd934x.c +++ b/drivers/gpio/gpio-wcd934x.c @@ -46,9 +46,12 @@ static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin, int val) { struct wcd_gpio_data *data = gpiochip_get_data(chip); + int ret; - regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET, - WCD_PIN_MASK(pin), WCD_PIN_MASK(pin)); + ret = regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET, + WCD_PIN_MASK(pin), WCD_PIN_MASK(pin)); + if (ret) + return ret; return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET, WCD_PIN_MASK(pin), @@ -65,12 +68,13 @@ static int wcd_gpio_get(struct gpio_chip *chip, unsigned int pin) return !!(value & WCD_PIN_MASK(pin)); } -static void wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val) +static int wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val) { struct wcd_gpio_data *data = gpiochip_get_data(chip); - regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET, - WCD_PIN_MASK(pin), val ? WCD_PIN_MASK(pin) : 0); + return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET, + WCD_PIN_MASK(pin), + val ? WCD_PIN_MASK(pin) : 0); } static int wcd_gpio_probe(struct platform_device *pdev) @@ -94,7 +98,7 @@ static int wcd_gpio_probe(struct platform_device *pdev) chip->direction_output = wcd_gpio_direction_output; chip->get_direction = wcd_gpio_get_direction; chip->get = wcd_gpio_get; - chip->set = wcd_gpio_set; + chip->set_rv = wcd_gpio_set; chip->parent = dev; chip->base = -1; chip->ngpio = WCD934X_NPINS; diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c index 1ec24f6f9300..f7df3d5fc71c 100644 --- a/drivers/gpio/gpio-wcove.c +++ b/drivers/gpio/gpio-wcove.c @@ -200,18 +200,15 @@ static int wcove_gpio_get(struct gpio_chip *chip, unsigned int gpio) return val & 0x1; } -static void wcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) +static int wcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) { struct wcove_gpio *wg = gpiochip_get_data(chip); int reg = to_reg(gpio, CTRL_OUT); if (reg < 0) - return; + return 0; - if (value) - regmap_set_bits(wg->regmap, reg, 1); - else - regmap_clear_bits(wg->regmap, reg, 1); + return regmap_assign_bits(wg->regmap, reg, 1, value); } static int wcove_gpio_set_config(struct gpio_chip *chip, unsigned int gpio, @@ -442,7 +439,7 @@ static int wcove_gpio_probe(struct platform_device *pdev) wg->chip.direction_output = wcove_gpio_dir_out; wg->chip.get_direction = wcove_gpio_get_direction; wg->chip.get = wcove_gpio_get; - wg->chip.set = wcove_gpio_set; + wg->chip.set_rv = wcove_gpio_set; wg->chip.set_config = wcove_gpio_set_config; wg->chip.base = -1; wg->chip.ngpio = WCOVE_VGPIO_NUM; diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c index 4b61d975cc0e..421655b5d4c2 100644 --- a/drivers/gpio/gpio-winbond.c +++ b/drivers/gpio/gpio-winbond.c @@ -458,17 +458,19 @@ static int winbond_gpio_direction_out(struct gpio_chip *gc, return 0; } -static void winbond_gpio_set(struct gpio_chip *gc, unsigned int offset, - int val) +static int winbond_gpio_set(struct gpio_chip *gc, unsigned int offset, + int val) { unsigned long *base = gpiochip_get_data(gc); const struct winbond_gpio_info *info; + int ret; if (!winbond_gpio_get_info(&offset, &info)) - return; + return -EACCES; - if (winbond_sio_enter(*base) != 0) - return; + ret = winbond_sio_enter(*base); + if (ret) + return ret; winbond_sio_select_logical(*base, info->dev); @@ -481,6 +483,8 @@ static void winbond_gpio_set(struct gpio_chip *gc, unsigned int offset, winbond_sio_reg_bclear(*base, info->datareg, offset); winbond_sio_leave(*base); + + return 0; } static struct gpio_chip winbond_gpio_chip = { @@ -490,7 +494,7 @@ static struct gpio_chip winbond_gpio_chip = { .can_sleep = true, .get = winbond_gpio_get, .direction_input = winbond_gpio_direction_in, - .set = winbond_gpio_set, + .set_rv = winbond_gpio_set, .direction_output = winbond_gpio_direction_out, }; diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c index 61bb83a1e8ae..ab58aa7c0b99 100644 --- a/drivers/gpio/gpio-wm831x.c +++ b/drivers/gpio/gpio-wm831x.c @@ -58,13 +58,14 @@ static int wm831x_gpio_get(struct gpio_chip *chip, unsigned offset) return 0; } -static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static int wm831x_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; - wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset, - value << offset); + return wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset, + value << offset); } static int wm831x_gpio_direction_out(struct gpio_chip *chip, @@ -85,9 +86,7 @@ static int wm831x_gpio_direction_out(struct gpio_chip *chip, return ret; /* Can only set GPIO state once it's in output mode */ - wm831x_gpio_set(chip, offset, value); - - return 0; + return wm831x_gpio_set(chip, offset, value); } static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset) @@ -254,7 +253,7 @@ static const struct gpio_chip template_chip = { .direction_input = wm831x_gpio_direction_in, .get = wm831x_gpio_get, .direction_output = wm831x_gpio_direction_out, - .set = wm831x_gpio_set, + .set_rv = wm831x_gpio_set, .to_irq = wm831x_gpio_to_irq, .set_config = wm831x_set_config, .dbg_show = wm831x_gpio_dbg_show, diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c index 2421cf606ed6..9a7677f841fc 100644 --- a/drivers/gpio/gpio-wm8350.c +++ b/drivers/gpio/gpio-wm8350.c @@ -48,15 +48,16 @@ static int wm8350_gpio_get(struct gpio_chip *chip, unsigned offset) return 0; } -static void wm8350_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static int wm8350_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct wm8350_gpio_data *wm8350_gpio = gpiochip_get_data(chip); struct wm8350 *wm8350 = wm8350_gpio->wm8350; if (value) - wm8350_set_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset); - else - wm8350_clear_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset); + return wm8350_set_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset); + + return wm8350_clear_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset); } static int wm8350_gpio_direction_out(struct gpio_chip *chip, @@ -72,9 +73,7 @@ static int wm8350_gpio_direction_out(struct gpio_chip *chip, return ret; /* Don't have an atomic direction/value setup */ - wm8350_gpio_set(chip, offset, value); - - return 0; + return wm8350_gpio_set(chip, offset, value); } static int wm8350_gpio_to_irq(struct gpio_chip *chip, unsigned offset) @@ -94,7 +93,7 @@ static const struct gpio_chip template_chip = { .direction_input = wm8350_gpio_direction_in, .get = wm8350_gpio_get, .direction_output = wm8350_gpio_direction_out, - .set = wm8350_gpio_set, + .set_rv = wm8350_gpio_set, .to_irq = wm8350_gpio_to_irq, .can_sleep = true, }; diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c index bf05c9b5882b..ccc005628dd2 100644 --- a/drivers/gpio/gpio-wm8994.c +++ b/drivers/gpio/gpio-wm8994.c @@ -89,7 +89,8 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip, WM8994_GPN_DIR | WM8994_GPN_LVL, value); } -static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static int wm8994_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct wm8994_gpio *wm8994_gpio = gpiochip_get_data(chip); struct wm8994 *wm8994 = wm8994_gpio->wm8994; @@ -97,7 +98,8 @@ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value) if (value) value = WM8994_GPN_LVL; - wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value); + return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, + value); } static int wm8994_gpio_set_config(struct gpio_chip *chip, unsigned int offset, @@ -254,7 +256,7 @@ static const struct gpio_chip template_chip = { .direction_input = wm8994_gpio_direction_in, .get = wm8994_gpio_get, .direction_output = wm8994_gpio_direction_out, - .set = wm8994_gpio_set, + .set_rv = wm8994_gpio_set, .set_config = wm8994_gpio_set_config, .to_irq = wm8994_gpio_to_irq, .dbg_show = wm8994_gpio_dbg_show, diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c index fb4b0c67aeef..28f794e5eb26 100644 --- a/drivers/gpio/gpio-xgene.c +++ b/drivers/gpio/gpio-xgene.c @@ -62,7 +62,7 @@ static void __xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val) iowrite32(setval, chip->base + bank_offset); } -static void xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val) +static int xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val) { struct xgene_gpio *chip = gpiochip_get_data(gc); unsigned long flags; @@ -70,6 +70,8 @@ static void xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val) spin_lock_irqsave(&chip->lock, flags); __xgene_gpio_set(gc, offset, val); spin_unlock_irqrestore(&chip->lock, flags); + + return 0; } static int xgene_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) @@ -176,7 +178,7 @@ static int xgene_gpio_probe(struct platform_device *pdev) gpio->chip.direction_input = xgene_gpio_dir_in; gpio->chip.direction_output = xgene_gpio_dir_out; gpio->chip.get = xgene_gpio_get; - gpio->chip.set = xgene_gpio_set; + gpio->chip.set_rv = xgene_gpio_set; gpio->chip.label = dev_name(&pdev->dev); gpio->chip.base = -1; diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c index c58a7e1349b4..36d91cacc2d9 100644 --- a/drivers/gpio/gpio-xilinx.c +++ b/drivers/gpio/gpio-xilinx.c @@ -148,7 +148,7 @@ static int xgpio_get(struct gpio_chip *gc, unsigned int gpio) * This function writes the specified value in to the specified signal of the * GPIO device. */ -static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +static int xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long flags; struct xgpio_instance *chip = gpiochip_get_data(gc); @@ -162,6 +162,8 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state); raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); + + return 0; } /** @@ -173,8 +175,8 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) * This function writes the specified values into the specified signals of the * GPIO devices. */ -static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, - unsigned long *bits) +static int xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, + unsigned long *bits) { DECLARE_BITMAP(hw_mask, 64); DECLARE_BITMAP(hw_bits, 64); @@ -194,6 +196,8 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, bitmap_copy(chip->state, state, 64); raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); + + return 0; } /** @@ -600,10 +604,10 @@ static int xgpio_probe(struct platform_device *pdev) chip->gc.direction_input = xgpio_dir_in; chip->gc.direction_output = xgpio_dir_out; chip->gc.get = xgpio_get; - chip->gc.set = xgpio_set; + chip->gc.set_rv = xgpio_set; chip->gc.request = xgpio_request; chip->gc.free = xgpio_free; - chip->gc.set_multiple = xgpio_set_multiple; + chip->gc.set_multiple_rv = xgpio_set_multiple; chip->gc.label = dev_name(dev); diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c index b4b52213bcd9..bcd2dfec462d 100644 --- a/drivers/gpio/gpio-xlp.c +++ b/drivers/gpio/gpio-xlp.c @@ -206,7 +206,6 @@ static int xlp_gpio_dir_output(struct gpio_chip *gc, unsigned gpio, int state) { struct xlp_gpio_priv *priv = gpiochip_get_data(gc); - BUG_ON(gpio >= gc->ngpio); xlp_gpio_set_reg(priv->gpio_out_en, gpio, 0x1); return 0; @@ -216,7 +215,6 @@ static int xlp_gpio_dir_input(struct gpio_chip *gc, unsigned gpio) { struct xlp_gpio_priv *priv = gpiochip_get_data(gc); - BUG_ON(gpio >= gc->ngpio); xlp_gpio_set_reg(priv->gpio_out_en, gpio, 0x0); return 0; @@ -226,16 +224,16 @@ static int xlp_gpio_get(struct gpio_chip *gc, unsigned gpio) { struct xlp_gpio_priv *priv = gpiochip_get_data(gc); - BUG_ON(gpio >= gc->ngpio); return xlp_gpio_get_reg(priv->gpio_paddrv, gpio); } -static void xlp_gpio_set(struct gpio_chip *gc, unsigned gpio, int state) +static int xlp_gpio_set(struct gpio_chip *gc, unsigned int gpio, int state) { struct xlp_gpio_priv *priv = gpiochip_get_data(gc); - BUG_ON(gpio >= gc->ngpio); xlp_gpio_set_reg(priv->gpio_paddrv, gpio, state); + + return 0; } static int xlp_gpio_probe(struct platform_device *pdev) @@ -276,7 +274,7 @@ static int xlp_gpio_probe(struct platform_device *pdev) gc->ngpio = 70; gc->direction_output = xlp_gpio_dir_output; gc->direction_input = xlp_gpio_dir_input; - gc->set = xlp_gpio_set; + gc->set_rv = xlp_gpio_set; gc->get = xlp_gpio_get; spin_lock_init(&priv->lock); diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c index 842cf875bb92..70402c6b5407 100644 --- a/drivers/gpio/gpio-xra1403.c +++ b/drivers/gpio/gpio-xra1403.c @@ -102,16 +102,13 @@ static int xra1403_get(struct gpio_chip *chip, unsigned int offset) return !!(val & BIT(offset % 8)); } -static void xra1403_set(struct gpio_chip *chip, unsigned int offset, int value) +static int xra1403_set(struct gpio_chip *chip, unsigned int offset, int value) { - int ret; struct xra1403 *xra = gpiochip_get_data(chip); - ret = regmap_update_bits(xra->regmap, to_reg(XRA_OCR, offset), - BIT(offset % 8), value ? BIT(offset % 8) : 0); - if (ret) - dev_err(chip->parent, "Failed to set pin: %d, ret: %d\n", - offset, ret); + return regmap_update_bits(xra->regmap, to_reg(XRA_OCR, offset), + BIT(offset % 8), + value ? BIT(offset % 8) : 0); } #ifdef CONFIG_DEBUG_FS @@ -167,7 +164,7 @@ static int xra1403_probe(struct spi_device *spi) xra->chip.direction_output = xra1403_direction_output; xra->chip.get_direction = xra1403_get_direction; xra->chip.get = xra1403_get; - xra->chip.set = xra1403_set; + xra->chip.set_rv = xra1403_set; xra->chip.dbg_show = xra1403_dbg_show; diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c index c8af34a6368f..e7ff3c60324d 100644 --- a/drivers/gpio/gpio-xtensa.c +++ b/drivers/gpio/gpio-xtensa.c @@ -86,12 +86,6 @@ static int xtensa_impwire_get_value(struct gpio_chip *gc, unsigned offset) return !!(impwire & BIT(offset)); } -static void xtensa_impwire_set_value(struct gpio_chip *gc, unsigned offset, - int value) -{ - BUG(); /* output only; should never be called */ -} - static int xtensa_expstate_get_direction(struct gpio_chip *gc, unsigned offset) { return GPIO_LINE_DIRECTION_OUT; /* output only */ @@ -109,7 +103,7 @@ static int xtensa_expstate_get_value(struct gpio_chip *gc, unsigned offset) return !!(expstate & BIT(offset)); } -static void xtensa_expstate_set_value(struct gpio_chip *gc, unsigned offset, +static int xtensa_expstate_set_value(struct gpio_chip *gc, unsigned int offset, int value) { unsigned long flags, saved_cpenable; @@ -120,6 +114,8 @@ static void xtensa_expstate_set_value(struct gpio_chip *gc, unsigned offset, __asm__ __volatile__("wrmsk_expstate %0, %1" :: "a" (val), "a" (mask)); disable_cp(flags, saved_cpenable); + + return 0; } static struct gpio_chip impwire_chip = { @@ -128,7 +124,6 @@ static struct gpio_chip impwire_chip = { .ngpio = 32, .get_direction = xtensa_impwire_get_direction, .get = xtensa_impwire_get_value, - .set = xtensa_impwire_set_value, }; static struct gpio_chip expstate_chip = { @@ -137,7 +132,7 @@ static struct gpio_chip expstate_chip = { .ngpio = 32, .get_direction = xtensa_expstate_get_direction, .get = xtensa_expstate_get_value, - .set = xtensa_expstate_set_value, + .set_rv = xtensa_expstate_set_value, }; static int xtensa_gpio_probe(struct platform_device *pdev) diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c index d7230fd83f5d..0799f7976710 100644 --- a/drivers/gpio/gpio-zevio.c +++ b/drivers/gpio/gpio-zevio.c @@ -91,7 +91,7 @@ static int zevio_gpio_get(struct gpio_chip *chip, unsigned pin) return (val >> ZEVIO_GPIO_BIT(pin)) & 0x1; } -static void zevio_gpio_set(struct gpio_chip *chip, unsigned pin, int value) +static int zevio_gpio_set(struct gpio_chip *chip, unsigned int pin, int value) { struct zevio_gpio *controller = gpiochip_get_data(chip); u32 val; @@ -105,6 +105,8 @@ static void zevio_gpio_set(struct gpio_chip *chip, unsigned pin, int value) zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_OUTPUT, val); spin_unlock(&controller->lock); + + return 0; } static int zevio_gpio_direction_input(struct gpio_chip *chip, unsigned pin) @@ -159,7 +161,7 @@ static int zevio_gpio_to_irq(struct gpio_chip *chip, unsigned pin) static const struct gpio_chip zevio_gpio_chip = { .direction_input = zevio_gpio_direction_input, .direction_output = zevio_gpio_direction_output, - .set = zevio_gpio_set, + .set_rv = zevio_gpio_set, .get = zevio_gpio_get, .to_irq = zevio_gpio_to_irq, .base = 0, diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 3dae63f3ea21..b22b4e25c68d 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -265,8 +265,8 @@ static int zynq_gpio_get_value(struct gpio_chip *chip, unsigned int pin) * upper 16 bits) based on the given pin number and sets the state of a * gpio pin to the specified value. The state is either 0 or non-zero. */ -static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin, - int state) +static int zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin, + int state) { unsigned int reg_offset, bank_num, bank_pin_num; struct zynq_gpio *gpio = gpiochip_get_data(chip); @@ -290,6 +290,8 @@ static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin, ((state << bank_pin_num) | ZYNQ_GPIO_UPPER_MASK); writel_relaxed(state, gpio->base_addr + reg_offset); + + return 0; } /** @@ -930,7 +932,7 @@ static int zynq_gpio_probe(struct platform_device *pdev) chip->owner = THIS_MODULE; chip->parent = &pdev->dev; chip->get = zynq_gpio_get_value; - chip->set = zynq_gpio_set_value; + chip->set_rv = zynq_gpio_set_value; chip->request = zynq_gpio_request; chip->free = zynq_gpio_free; chip->direction_input = zynq_gpio_dir_in; diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c index 2f3c9ebfa78d..6dc5d7acb89c 100644 --- a/drivers/gpio/gpio-zynqmp-modepin.c +++ b/drivers/gpio/gpio-zynqmp-modepin.c @@ -57,8 +57,8 @@ static int modepin_gpio_get_value(struct gpio_chip *chip, unsigned int pin) * * Return: None. */ -static void modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin, - int state) +static int modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin, + int state) { u32 bootpin_val = 0; int ret; @@ -77,6 +77,8 @@ static void modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin, ret = zynqmp_pm_bootmode_write(bootpin_val); if (ret) pr_err("modepin: set value error %d for pin %d\n", ret, pin); + + return ret; } /** @@ -102,7 +104,7 @@ static int modepin_gpio_dir_in(struct gpio_chip *chip, unsigned int pin) static int modepin_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int state) { - return 0; + return modepin_gpio_set_value(chip, pin, state); } /** @@ -128,7 +130,7 @@ static int modepin_gpio_probe(struct platform_device *pdev) chip->owner = THIS_MODULE; chip->parent = &pdev->dev; chip->get = modepin_gpio_get_value; - chip->set = modepin_gpio_set_value; + chip->set_rv = modepin_gpio_set_value; chip->direction_input = modepin_gpio_dir_in; chip->direction_output = modepin_gpio_dir_out; chip->label = dev_name(&pdev->dev); diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c index aeae6df8bec9..3bc93ccadb5b 100644 --- a/drivers/gpio/gpiolib-legacy.c +++ b/drivers/gpio/gpiolib-legacy.c @@ -86,44 +86,6 @@ static void devm_gpio_release(struct device *dev, void *res) } /** - * devm_gpio_request - request a GPIO for a managed device - * @dev: device to request the GPIO for - * @gpio: GPIO to allocate - * @label: the name of the requested GPIO - * - * Except for the extra @dev argument, this function takes the - * same arguments and performs the same function as gpio_request(). - * GPIOs requested with this function will be automatically freed - * on driver detach. - * - * **DEPRECATED** This function is deprecated and must not be used in new code. - * - * Returns: - * 0 on success, or negative errno on failure. - */ -int devm_gpio_request(struct device *dev, unsigned gpio, const char *label) -{ - unsigned *dr; - int rc; - - dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL); - if (!dr) - return -ENOMEM; - - rc = gpio_request(gpio, label); - if (rc) { - devres_free(dr); - return rc; - } - - *dr = gpio; - devres_add(dev, dr); - - return 0; -} -EXPORT_SYMBOL_GPL(devm_gpio_request); - -/** * devm_gpio_request_one - request a single GPIO with initial setup * @dev: device to request for * @gpio: the GPIO number diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h index 3eebfac290c5..2257f7a498a1 100644 --- a/drivers/gpio/gpiolib-of.h +++ b/drivers/gpio/gpiolib-of.h @@ -8,7 +8,7 @@ #include <linux/notifier.h> -struct device; +struct device_node; struct fwnode_handle; struct gpio_chip; diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 4a3aa09dad9d..b64106f1cb7b 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -3,7 +3,6 @@ #include <linux/bitops.h> #include <linux/cleanup.h> #include <linux/device.h> -#include <linux/idr.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kdev_t.h> @@ -12,7 +11,6 @@ #include <linux/mutex.h> #include <linux/printk.h> #include <linux/slab.h> -#include <linux/spinlock.h> #include <linux/string.h> #include <linux/srcu.h> #include <linux/sysfs.h> @@ -26,6 +24,8 @@ #include "gpiolib.h" #include "gpiolib-sysfs.h" +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) + struct kernfs_node; #define GPIO_IRQF_TRIGGER_NONE 0 @@ -34,15 +34,64 @@ struct kernfs_node; #define GPIO_IRQF_TRIGGER_BOTH (GPIO_IRQF_TRIGGER_FALLING | \ GPIO_IRQF_TRIGGER_RISING) +enum { + GPIO_SYSFS_LINE_CLASS_ATTR_DIRECTION = 0, + GPIO_SYSFS_LINE_CLASS_ATTR_VALUE, + GPIO_SYSFS_LINE_CLASS_ATTR_EDGE, + GPIO_SYSFS_LINE_CLASS_ATTR_ACTIVE_LOW, + GPIO_SYSFS_LINE_CLASS_ATTR_SENTINEL, + GPIO_SYSFS_LINE_CLASS_ATTR_SIZE, +}; + +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ + +enum { + GPIO_SYSFS_LINE_CHIP_ATTR_DIRECTION = 0, + GPIO_SYSFS_LINE_CHIP_ATTR_VALUE, + GPIO_SYSFS_LINE_CHIP_ATTR_SENTINEL, + GPIO_SYSFS_LINE_CHIP_ATTR_SIZE, +}; + struct gpiod_data { + struct list_head list; + struct gpio_desc *desc; + struct device *dev; struct mutex mutex; +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) struct kernfs_node *value_kn; int irq; unsigned char irq_flags; +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ bool direction_can_change; + + struct kobject *parent; + struct device_attribute dir_attr; + struct device_attribute val_attr; + +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) + struct device_attribute edge_attr; + struct device_attribute active_low_attr; + + struct attribute *class_attrs[GPIO_SYSFS_LINE_CLASS_ATTR_SIZE]; + struct attribute_group class_attr_group; + const struct attribute_group *class_attr_groups[2]; +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ + + struct attribute *chip_attrs[GPIO_SYSFS_LINE_CHIP_ATTR_SIZE]; + struct attribute_group chip_attr_group; + const struct attribute_group *chip_attr_groups[2]; +}; + +struct gpiodev_data { + struct list_head exported_lines; + struct gpio_device *gdev; + struct device *cdev_id; /* Class device by GPIO device ID */ +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) + struct device *cdev_base; /* Class device by GPIO base */ +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ }; /* @@ -73,9 +122,10 @@ static DEFINE_MUTEX(sysfs_lock); */ static ssize_t direction_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { - struct gpiod_data *data = dev_get_drvdata(dev); + struct gpiod_data *data = container_of(attr, struct gpiod_data, + dir_attr); struct gpio_desc *desc = data->desc; int value; @@ -88,11 +138,13 @@ static ssize_t direction_show(struct device *dev, } static ssize_t direction_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) + struct device_attribute *attr, const char *buf, + size_t size) { - struct gpiod_data *data = dev_get_drvdata(dev); + struct gpiod_data *data = container_of(attr, struct gpiod_data, + dir_attr); struct gpio_desc *desc = data->desc; - ssize_t status; + ssize_t status; guard(mutex)(&data->mutex); @@ -107,14 +159,14 @@ static ssize_t direction_store(struct device *dev, return status ? : size; } -static DEVICE_ATTR_RW(direction); -static ssize_t value_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t value_show(struct device *dev, struct device_attribute *attr, + char *buf) { - struct gpiod_data *data = dev_get_drvdata(dev); + struct gpiod_data *data = container_of(attr, struct gpiod_data, + val_attr); struct gpio_desc *desc = data->desc; - ssize_t status; + ssize_t status; scoped_guard(mutex, &data->mutex) status = gpiod_get_value_cansleep(desc); @@ -125,10 +177,11 @@ static ssize_t value_show(struct device *dev, return sysfs_emit(buf, "%zd\n", status); } -static ssize_t value_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t value_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) { - struct gpiod_data *data = dev_get_drvdata(dev); + struct gpiod_data *data = container_of(attr, struct gpiod_data, + val_attr); struct gpio_desc *desc = data->desc; ssize_t status; long value; @@ -145,8 +198,8 @@ static ssize_t value_store(struct device *dev, return size; } -static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store); +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) static irqreturn_t gpio_sysfs_irq(int irq, void *priv) { struct gpiod_data *data = priv; @@ -157,9 +210,8 @@ static irqreturn_t gpio_sysfs_irq(int irq, void *priv) } /* Caller holds gpiod-data mutex. */ -static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags) +static int gpio_sysfs_request_irq(struct gpiod_data *data, unsigned char flags) { - struct gpiod_data *data = dev_get_drvdata(dev); struct gpio_desc *desc = data->desc; unsigned long irq_flags; int ret; @@ -172,33 +224,29 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags) if (data->irq < 0) return -EIO; - data->value_kn = sysfs_get_dirent(dev->kobj.sd, "value"); - if (!data->value_kn) - return -ENODEV; - irq_flags = IRQF_SHARED; if (flags & GPIO_IRQF_TRIGGER_FALLING) { irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? - IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; + IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; set_bit(FLAG_EDGE_FALLING, &desc->flags); } if (flags & GPIO_IRQF_TRIGGER_RISING) { irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? - IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; + IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; set_bit(FLAG_EDGE_RISING, &desc->flags); } /* * FIXME: This should be done in the irq_request_resources callback - * when the irq is requested, but a few drivers currently fail - * to do so. + * when the irq is requested, but a few drivers currently fail to do + * so. * - * Remove this redundant call (along with the corresponding - * unlock) when those drivers have been fixed. + * Remove this redundant call (along with the corresponding unlock) + * when those drivers have been fixed. */ ret = gpiochip_lock_as_irq(guard.gc, gpio_chip_hwgpio(desc)); if (ret < 0) - goto err_put_kn; + goto err_clr_bits; ret = request_any_context_irq(data->irq, gpio_sysfs_irq, irq_flags, "gpiolib", data); @@ -211,10 +259,9 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags) err_unlock: gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc)); -err_put_kn: +err_clr_bits: clear_bit(FLAG_EDGE_RISING, &desc->flags); clear_bit(FLAG_EDGE_FALLING, &desc->flags); - sysfs_put(data->value_kn); return ret; } @@ -223,9 +270,8 @@ err_put_kn: * Caller holds gpiod-data mutex (unless called after class-device * deregistration). */ -static void gpio_sysfs_free_irq(struct device *dev) +static void gpio_sysfs_free_irq(struct gpiod_data *data) { - struct gpiod_data *data = dev_get_drvdata(dev); struct gpio_desc *desc = data->desc; CLASS(gpio_chip_guard, guard)(desc); @@ -237,20 +283,20 @@ static void gpio_sysfs_free_irq(struct device *dev) gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc)); clear_bit(FLAG_EDGE_RISING, &desc->flags); clear_bit(FLAG_EDGE_FALLING, &desc->flags); - sysfs_put(data->value_kn); } -static const char * const trigger_names[] = { +static const char *const trigger_names[] = { [GPIO_IRQF_TRIGGER_NONE] = "none", [GPIO_IRQF_TRIGGER_FALLING] = "falling", [GPIO_IRQF_TRIGGER_RISING] = "rising", [GPIO_IRQF_TRIGGER_BOTH] = "both", }; -static ssize_t edge_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t edge_show(struct device *dev, struct device_attribute *attr, + char *buf) { - struct gpiod_data *data = dev_get_drvdata(dev); + struct gpiod_data *data = container_of(attr, struct gpiod_data, + edge_attr); int flags; scoped_guard(mutex, &data->mutex) @@ -262,10 +308,11 @@ static ssize_t edge_show(struct device *dev, return sysfs_emit(buf, "%s\n", trigger_names[flags]); } -static ssize_t edge_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t edge_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) { - struct gpiod_data *data = dev_get_drvdata(dev); + struct gpiod_data *data = container_of(attr, struct gpiod_data, + edge_attr); ssize_t status = size; int flags; @@ -279,12 +326,12 @@ static ssize_t edge_store(struct device *dev, return size; if (data->irq_flags) - gpio_sysfs_free_irq(dev); + gpio_sysfs_free_irq(data); if (!flags) return size; - status = gpio_sysfs_request_irq(dev, flags); + status = gpio_sysfs_request_irq(data, flags); if (status) return status; @@ -292,17 +339,14 @@ static ssize_t edge_store(struct device *dev, return size; } -static DEVICE_ATTR_RW(edge); /* Caller holds gpiod-data mutex. */ -static int gpio_sysfs_set_active_low(struct device *dev, int value) +static int gpio_sysfs_set_active_low(struct gpiod_data *data, int value) { - struct gpiod_data *data = dev_get_drvdata(dev); unsigned int flags = data->irq_flags; struct gpio_desc *desc = data->desc; int status = 0; - if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value) return 0; @@ -310,9 +354,9 @@ static int gpio_sysfs_set_active_low(struct device *dev, int value) /* reconfigure poll(2) support if enabled on one edge only */ if (flags == GPIO_IRQF_TRIGGER_FALLING || - flags == GPIO_IRQF_TRIGGER_RISING) { - gpio_sysfs_free_irq(dev); - status = gpio_sysfs_request_irq(dev, flags); + flags == GPIO_IRQF_TRIGGER_RISING) { + gpio_sysfs_free_irq(data); + status = gpio_sysfs_request_irq(data, flags); } gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG); @@ -321,9 +365,10 @@ static int gpio_sysfs_set_active_low(struct device *dev, int value) } static ssize_t active_low_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { - struct gpiod_data *data = dev_get_drvdata(dev); + struct gpiod_data *data = container_of(attr, struct gpiod_data, + active_low_attr); struct gpio_desc *desc = data->desc; int value; @@ -334,9 +379,11 @@ static ssize_t active_low_show(struct device *dev, } static ssize_t active_low_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) + struct device_attribute *attr, + const char *buf, size_t size) { - struct gpiod_data *data = dev_get_drvdata(dev); + struct gpiod_data *data = container_of(attr, struct gpiod_data, + active_low_attr); ssize_t status; long value; @@ -346,84 +393,189 @@ static ssize_t active_low_store(struct device *dev, guard(mutex)(&data->mutex); - return gpio_sysfs_set_active_low(dev, value) ?: size; + return gpio_sysfs_set_active_low(data, value) ?: size; } -static DEVICE_ATTR_RW(active_low); +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr, int n) { - struct device *dev = kobj_to_dev(kobj); - struct gpiod_data *data = dev_get_drvdata(dev); - struct gpio_desc *desc = data->desc; + struct device_attribute *dev_attr = container_of(attr, + struct device_attribute, attr); umode_t mode = attr->mode; - bool show_direction = data->direction_can_change; + struct gpiod_data *data; + + if (strcmp(attr->name, "direction") == 0) { + data = container_of(dev_attr, struct gpiod_data, dir_attr); - if (attr == &dev_attr_direction.attr) { - if (!show_direction) + if (!data->direction_can_change) mode = 0; - } else if (attr == &dev_attr_edge.attr) { - if (gpiod_to_irq(desc) < 0) +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) + } else if (strcmp(attr->name, "edge") == 0) { + data = container_of(dev_attr, struct gpiod_data, edge_attr); + + if (gpiod_to_irq(data->desc) < 0) mode = 0; - if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags)) + + if (!data->direction_can_change && + test_bit(FLAG_IS_OUT, &data->desc->flags)) mode = 0; +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ } return mode; } -static struct attribute *gpio_attrs[] = { - &dev_attr_direction.attr, - &dev_attr_edge.attr, - &dev_attr_value.attr, - &dev_attr_active_low.attr, - NULL, -}; - -static const struct attribute_group gpio_group = { - .attrs = gpio_attrs, - .is_visible = gpio_is_visible, -}; - -static const struct attribute_group *gpio_groups[] = { - &gpio_group, - NULL -}; - /* * /sys/class/gpio/gpiochipN/ * /base ... matching gpio_chip.base (N) * /label ... matching gpio_chip.label * /ngpio ... matching gpio_chip.ngpio + * + * AND + * + * /sys/class/gpio/chipX/ + * /export ... export GPIO at given offset + * /unexport ... unexport GPIO at given offset + * /label ... matching gpio_chip.label + * /ngpio ... matching gpio_chip.ngpio */ -static ssize_t base_show(struct device *dev, - struct device_attribute *attr, char *buf) +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) +static ssize_t base_show(struct device *dev, struct device_attribute *attr, + char *buf) { - const struct gpio_device *gdev = dev_get_drvdata(dev); + const struct gpiodev_data *data = dev_get_drvdata(dev); - return sysfs_emit(buf, "%u\n", gdev->base); + return sysfs_emit(buf, "%u\n", data->gdev->base); } static DEVICE_ATTR_RO(base); +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ -static ssize_t label_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t label_show(struct device *dev, struct device_attribute *attr, + char *buf) { - const struct gpio_device *gdev = dev_get_drvdata(dev); + const struct gpiodev_data *data = dev_get_drvdata(dev); - return sysfs_emit(buf, "%s\n", gdev->label); + return sysfs_emit(buf, "%s\n", data->gdev->label); } static DEVICE_ATTR_RO(label); -static ssize_t ngpio_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t ngpio_show(struct device *dev, struct device_attribute *attr, + char *buf) { - const struct gpio_device *gdev = dev_get_drvdata(dev); + const struct gpiodev_data *data = dev_get_drvdata(dev); - return sysfs_emit(buf, "%u\n", gdev->ngpio); + return sysfs_emit(buf, "%u\n", data->gdev->ngpio); } static DEVICE_ATTR_RO(ngpio); +static int export_gpio_desc(struct gpio_desc *desc) +{ + int offset, ret; + + CLASS(gpio_chip_guard, guard)(desc); + if (!guard.gc) + return -ENODEV; + + offset = gpio_chip_hwgpio(desc); + if (!gpiochip_line_is_valid(guard.gc, offset)) { + pr_debug_ratelimited("%s: GPIO %d masked\n", __func__, + gpio_chip_hwgpio(desc)); + return -EINVAL; + } + + /* + * No extra locking here; FLAG_SYSFS just signifies that the + * request and export were done by on behalf of userspace, so + * they may be undone on its behalf too. + */ + + ret = gpiod_request_user(desc, "sysfs"); + if (ret) + return ret; + + ret = gpiod_set_transitory(desc, false); + if (ret) { + gpiod_free(desc); + return ret; + } + + ret = gpiod_export(desc, true); + if (ret < 0) { + gpiod_free(desc); + } else { + set_bit(FLAG_SYSFS, &desc->flags); + gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); + } + + return ret; +} + +static int unexport_gpio_desc(struct gpio_desc *desc) +{ + /* + * No extra locking here; FLAG_SYSFS just signifies that the + * request and export were done by on behalf of userspace, so + * they may be undone on its behalf too. + */ + if (!test_and_clear_bit(FLAG_SYSFS, &desc->flags)) + return -EINVAL; + + gpiod_unexport(desc); + gpiod_free(desc); + + return 0; +} + +static ssize_t do_chip_export_store(struct device *dev, + struct device_attribute *attr, + const char *buf, ssize_t size, + int (*handler)(struct gpio_desc *desc)) +{ + struct gpiodev_data *data = dev_get_drvdata(dev); + struct gpio_device *gdev = data->gdev; + struct gpio_desc *desc; + unsigned int gpio; + int ret; + + ret = kstrtouint(buf, 0, &gpio); + if (ret) + return ret; + + desc = gpio_device_get_desc(gdev, gpio); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + ret = handler(desc); + if (ret) + return ret; + + return size; +} + +static ssize_t chip_export_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + return do_chip_export_store(dev, attr, buf, size, export_gpio_desc); +} + +static struct device_attribute dev_attr_export = __ATTR(export, 0200, NULL, + chip_export_store); + +static ssize_t chip_unexport_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + return do_chip_export_store(dev, attr, buf, size, unexport_gpio_desc); +} + +static struct device_attribute dev_attr_unexport = __ATTR(unexport, 0200, + NULL, + chip_unexport_store); + +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) static struct attribute *gpiochip_attrs[] = { &dev_attr_base.attr, &dev_attr_label.attr, @@ -431,7 +583,18 @@ static struct attribute *gpiochip_attrs[] = { NULL, }; ATTRIBUTE_GROUPS(gpiochip); +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ +static struct attribute *gpiochip_ext_attrs[] = { + &dev_attr_label.attr, + &dev_attr_ngpio.attr, + &dev_attr_export.attr, + &dev_attr_unexport.attr, + NULL +}; +ATTRIBUTE_GROUPS(gpiochip_ext); + +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) /* * /sys/class/gpio/export ... write-only * integer N ... number of GPIO to export (full access) @@ -439,11 +602,11 @@ ATTRIBUTE_GROUPS(gpiochip); * integer N ... number of GPIO to unexport */ static ssize_t export_store(const struct class *class, - const struct class_attribute *attr, - const char *buf, size_t len) + const struct class_attribute *attr, + const char *buf, size_t len) { struct gpio_desc *desc; - int status, offset; + int status; long gpio; status = kstrtol(buf, 0, &gpio); @@ -457,40 +620,7 @@ static ssize_t export_store(const struct class *class, return -EINVAL; } - CLASS(gpio_chip_guard, guard)(desc); - if (!guard.gc) - return -ENODEV; - - offset = gpio_chip_hwgpio(desc); - if (!gpiochip_line_is_valid(guard.gc, offset)) { - pr_debug_ratelimited("%s: GPIO %ld masked\n", __func__, gpio); - return -EINVAL; - } - - /* No extra locking here; FLAG_SYSFS just signifies that the - * request and export were done by on behalf of userspace, so - * they may be undone on its behalf too. - */ - - status = gpiod_request_user(desc, "sysfs"); - if (status) - goto done; - - status = gpiod_set_transitory(desc, false); - if (status) { - gpiod_free(desc); - goto done; - } - - status = gpiod_export(desc, true); - if (status < 0) { - gpiod_free(desc); - } else { - set_bit(FLAG_SYSFS, &desc->flags); - gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); - } - -done: + status = export_gpio_desc(desc); if (status) pr_debug("%s: status %d\n", __func__, status); return status ? : len; @@ -498,8 +628,8 @@ done: static CLASS_ATTR_WO(export); static ssize_t unexport_store(const struct class *class, - const struct class_attribute *attr, - const char *buf, size_t len) + const struct class_attribute *attr, + const char *buf, size_t len) { struct gpio_desc *desc; int status; @@ -507,7 +637,7 @@ static ssize_t unexport_store(const struct class *class, status = kstrtol(buf, 0, &gpio); if (status < 0) - goto done; + return status; desc = gpio_to_desc(gpio); /* reject bogus commands (gpiod_unexport() ignores them) */ @@ -516,18 +646,7 @@ static ssize_t unexport_store(const struct class *class, return -EINVAL; } - status = -EINVAL; - - /* No extra locking here; FLAG_SYSFS just signifies that the - * request and export were done by on behalf of userspace, so - * they may be undone on its behalf too. - */ - if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) { - gpiod_unexport(desc); - gpiod_free(desc); - status = 0; - } -done: + status = unexport_gpio_desc(desc); if (status) pr_debug("%s: status %d\n", __func__, status); return status ? : len; @@ -540,12 +659,55 @@ static struct attribute *gpio_class_attrs[] = { NULL, }; ATTRIBUTE_GROUPS(gpio_class); +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ static const struct class gpio_class = { .name = "gpio", +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) .class_groups = gpio_class_groups, +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ +}; + +static int match_gdev(struct device *dev, const void *desc) +{ + struct gpiodev_data *data = dev_get_drvdata(dev); + const struct gpio_device *gdev = desc; + + return data && data->gdev == gdev; +} + +static struct gpiodev_data * +gdev_get_data(struct gpio_device *gdev) __must_hold(&sysfs_lock) +{ + /* + * Find the first device in GPIO class that matches. Whether that's + * the one indexed by GPIO base or device ID doesn't matter, it has + * the same address set as driver data. + */ + struct device *cdev __free(put_device) = class_find_device(&gpio_class, + NULL, gdev, + match_gdev); + if (!cdev) + return NULL; + + return dev_get_drvdata(cdev); }; +static void gpiod_attr_init(struct device_attribute *dev_attr, const char *name, + ssize_t (*show)(struct device *dev, + struct device_attribute *attr, + char *buf), + ssize_t (*store)(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count)) +{ + sysfs_attr_init(&dev_attr->attr); + dev_attr->attr.name = name; + dev_attr->attr.mode = 0644; + dev_attr->show = show; + dev_attr->store = store; +} + /** * gpiod_export - export a GPIO through sysfs * @desc: GPIO to make available, already requested @@ -564,9 +726,11 @@ static const struct class gpio_class = { */ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) { + char *path __free(kfree) = NULL; + struct gpiodev_data *gdev_data; + struct gpiod_data *desc_data; struct gpio_device *gdev; - struct gpiod_data *data; - struct device *dev; + struct attribute **attrs; int status; /* can't export until sysfs is available ... */ @@ -591,43 +755,116 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) guard(mutex)(&sysfs_lock); - /* check if chip is being removed */ - if (!gdev->mockdev) { - status = -ENODEV; - goto err_clear_bit; - } - if (!test_bit(FLAG_REQUESTED, &desc->flags)) { gpiod_dbg(desc, "%s: unavailable (not requested)\n", __func__); status = -EPERM; goto err_clear_bit; } - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) { + desc_data = kzalloc(sizeof(*desc_data), GFP_KERNEL); + if (!desc_data) { status = -ENOMEM; goto err_clear_bit; } - data->desc = desc; - mutex_init(&data->mutex); + desc_data->desc = desc; + mutex_init(&desc_data->mutex); if (guard.gc->direction_input && guard.gc->direction_output) - data->direction_can_change = direction_may_change; + desc_data->direction_can_change = direction_may_change; else - data->direction_can_change = false; + desc_data->direction_can_change = false; + + gpiod_attr_init(&desc_data->dir_attr, "direction", + direction_show, direction_store); + gpiod_attr_init(&desc_data->val_attr, "value", value_show, value_store); - dev = device_create_with_groups(&gpio_class, &gdev->dev, - MKDEV(0, 0), data, gpio_groups, - "gpio%u", desc_to_gpio(desc)); - if (IS_ERR(dev)) { - status = PTR_ERR(dev); +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) + gpiod_attr_init(&desc_data->edge_attr, "edge", edge_show, edge_store); + gpiod_attr_init(&desc_data->active_low_attr, "active_low", + active_low_show, active_low_store); + + attrs = desc_data->class_attrs; + desc_data->class_attr_group.is_visible = gpio_is_visible; + attrs[GPIO_SYSFS_LINE_CLASS_ATTR_DIRECTION] = &desc_data->dir_attr.attr; + attrs[GPIO_SYSFS_LINE_CLASS_ATTR_VALUE] = &desc_data->val_attr.attr; + attrs[GPIO_SYSFS_LINE_CLASS_ATTR_EDGE] = &desc_data->edge_attr.attr; + attrs[GPIO_SYSFS_LINE_CLASS_ATTR_ACTIVE_LOW] = &desc_data->active_low_attr.attr; + + desc_data->class_attr_group.attrs = desc_data->class_attrs; + desc_data->class_attr_groups[0] = &desc_data->class_attr_group; + + /* + * Note: we need to continue passing desc_data here as there's still + * at least one known user of gpiod_export_link() in the tree. This + * function still uses class_find_device() internally. + */ + desc_data->dev = device_create_with_groups(&gpio_class, &gdev->dev, + MKDEV(0, 0), desc_data, + desc_data->class_attr_groups, + "gpio%u", + desc_to_gpio(desc)); + if (IS_ERR(desc_data->dev)) { + status = PTR_ERR(desc_data->dev); goto err_free_data; } + desc_data->value_kn = sysfs_get_dirent(desc_data->dev->kobj.sd, + "value"); + if (!desc_data->value_kn) { + status = -ENODEV; + goto err_unregister_device; + } +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ + + gdev_data = gdev_get_data(gdev); + if (!gdev_data) { + status = -ENODEV; + goto err_put_dirent; + } + + desc_data->chip_attr_group.name = kasprintf(GFP_KERNEL, "gpio%u", + gpio_chip_hwgpio(desc)); + if (!desc_data->chip_attr_group.name) { + status = -ENOMEM; + goto err_put_dirent; + } + + attrs = desc_data->chip_attrs; + desc_data->chip_attr_group.is_visible = gpio_is_visible; + attrs[GPIO_SYSFS_LINE_CHIP_ATTR_DIRECTION] = &desc_data->dir_attr.attr; + attrs[GPIO_SYSFS_LINE_CHIP_ATTR_VALUE] = &desc_data->val_attr.attr; + + desc_data->chip_attr_group.attrs = attrs; + desc_data->chip_attr_groups[0] = &desc_data->chip_attr_group; + + desc_data->parent = &gdev_data->cdev_id->kobj; + status = sysfs_create_groups(desc_data->parent, + desc_data->chip_attr_groups); + if (status) + goto err_free_name; + + path = kasprintf(GFP_KERNEL, "gpio%u/value", gpio_chip_hwgpio(desc)); + if (!path) { + status = -ENOMEM; + goto err_remove_groups; + } + + list_add(&desc_data->list, &gdev_data->exported_lines); + return 0; +err_remove_groups: + sysfs_remove_groups(desc_data->parent, desc_data->chip_attr_groups); +err_free_name: + kfree(desc_data->chip_attr_group.name); +err_put_dirent: +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) + sysfs_put(desc_data->value_kn); +err_unregister_device: + device_unregister(desc_data->dev); err_free_data: - kfree(data); +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ + kfree(desc_data); err_clear_bit: clear_bit(FLAG_EXPORT, &desc->flags); gpiod_dbg(desc, "%s: status %d\n", __func__, status); @@ -635,12 +872,14 @@ err_clear_bit: } EXPORT_SYMBOL_GPL(gpiod_export); +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) static int match_export(struct device *dev, const void *desc) { struct gpiod_data *data = dev_get_drvdata(dev); - return data->desc == desc; + return gpiod_is_equal(data->desc, desc); } +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ /** * gpiod_export_link - create a sysfs link to an exported GPIO node @@ -657,6 +896,7 @@ static int match_export(struct device *dev, const void *desc) int gpiod_export_link(struct device *dev, const char *name, struct gpio_desc *desc) { +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) struct device *cdev; int ret; @@ -673,6 +913,9 @@ int gpiod_export_link(struct device *dev, const char *name, put_device(cdev); return ret; +#else + return -EOPNOTSUPP; +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ } EXPORT_SYMBOL_GPL(gpiod_export_link); @@ -684,8 +927,9 @@ EXPORT_SYMBOL_GPL(gpiod_export_link); */ void gpiod_unexport(struct gpio_desc *desc) { - struct gpiod_data *data; - struct device *dev; + struct gpiod_data *tmp, *desc_data = NULL; + struct gpiodev_data *gdev_data; + struct gpio_device *gdev; if (!desc) { pr_warn("%s: invalid GPIO\n", __func__); @@ -696,32 +940,50 @@ void gpiod_unexport(struct gpio_desc *desc) if (!test_bit(FLAG_EXPORT, &desc->flags)) return; - dev = class_find_device(&gpio_class, NULL, desc, match_export); - if (!dev) + gdev = gpiod_to_gpio_device(desc); + gdev_data = gdev_get_data(gdev); + if (!gdev_data) + return; + + list_for_each_entry(tmp, &gdev_data->exported_lines, list) { + if (gpiod_is_equal(desc, tmp->desc)) { + desc_data = tmp; + break; + } + } + + if (!desc_data) return; - data = dev_get_drvdata(dev); + list_del(&desc_data->list); clear_bit(FLAG_EXPORT, &desc->flags); - device_unregister(dev); +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) + sysfs_put(desc_data->value_kn); + device_unregister(desc_data->dev); /* * Release irq after deregistration to prevent race with * edge_store. */ - if (data->irq_flags) - gpio_sysfs_free_irq(dev); + if (desc_data->irq_flags) + gpio_sysfs_free_irq(desc_data); +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ + + sysfs_remove_groups(desc_data->parent, + desc_data->chip_attr_groups); } - put_device(dev); - kfree(data); + mutex_destroy(&desc_data->mutex); + kfree(desc_data); } EXPORT_SYMBOL_GPL(gpiod_unexport); int gpiochip_sysfs_register(struct gpio_device *gdev) { + struct gpiodev_data *data; struct gpio_chip *chip; struct device *parent; - struct device *dev; + int err; /* * Many systems add gpio chips for SOC support very early, @@ -747,32 +1009,61 @@ int gpiochip_sysfs_register(struct gpio_device *gdev) else parent = &gdev->dev; - /* use chip->base for the ID; it's already known to be unique */ - dev = device_create_with_groups(&gpio_class, parent, MKDEV(0, 0), gdev, - gpiochip_groups, GPIOCHIP_NAME "%d", - chip->base); - if (IS_ERR(dev)) - return PTR_ERR(dev); + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->gdev = gdev; + INIT_LIST_HEAD(&data->exported_lines); guard(mutex)(&sysfs_lock); - gdev->mockdev = dev; + +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) + /* use chip->base for the ID; it's already known to be unique */ + data->cdev_base = device_create_with_groups(&gpio_class, parent, + MKDEV(0, 0), data, + gpiochip_groups, + GPIOCHIP_NAME "%d", + chip->base); + if (IS_ERR(data->cdev_base)) { + err = PTR_ERR(data->cdev_base); + kfree(data); + return err; + } +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ + + data->cdev_id = device_create_with_groups(&gpio_class, parent, + MKDEV(0, 0), data, + gpiochip_ext_groups, + "chip%d", gdev->id); + if (IS_ERR(data->cdev_id)) { +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) + device_unregister(data->cdev_base); +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ + err = PTR_ERR(data->cdev_id); + kfree(data); + return err; + } return 0; } void gpiochip_sysfs_unregister(struct gpio_device *gdev) { + struct gpiodev_data *data; struct gpio_desc *desc; struct gpio_chip *chip; scoped_guard(mutex, &sysfs_lock) { - if (!gdev->mockdev) + data = gdev_get_data(gdev); + if (!data) return; - device_unregister(gdev->mockdev); - - /* prevent further gpiod exports */ - gdev->mockdev = NULL; +#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY) + device_unregister(data->cdev_base); +#endif /* CONFIG_GPIO_SYSFS_LEGACY */ + device_unregister(data->cdev_id); + kfree(data); } guard(srcu)(&gdev->srcu); @@ -798,9 +1089,6 @@ static int gpiofind_sysfs_register(struct gpio_chip *gc, const void *data) struct gpio_device *gdev = gc->gpiodev; int ret; - if (gdev->mockdev) - return 0; - ret = gpiochip_sysfs_register(gdev); if (ret) chip_err(gc, "failed to register the sysfs entry: %d\n", ret); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 3a3eca5b4c40..a93d2a9355e2 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -75,6 +75,19 @@ static const struct bus_type gpio_bus_type = { }; /* + * At the end we want all GPIOs to be dynamically allocated from 0. + * However, some legacy drivers still perform fixed allocation. + * Until they are all fixed, leave 0-512 space for them. + */ +#define GPIO_DYNAMIC_BASE 512 +/* + * Define the maximum of the possible GPIO in the global numberspace. + * While the GPIO base and numbers are positive, we limit it with signed + * maximum as a lot of code is using negative values for special cases. + */ +#define GPIO_DYNAMIC_MAX INT_MAX + +/* * Number of GPIOs to use for the fast path in set array */ #define FASTPATH_NGPIO CONFIG_GPIOLIB_FASTPATH_LIMIT @@ -266,20 +279,6 @@ struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc) EXPORT_SYMBOL_GPL(gpiod_to_gpio_device); /** - * gpiod_is_equal() - Check if two GPIO descriptors refer to the same pin. - * @desc: Descriptor to compare. - * @other: The second descriptor to compare against. - * - * Returns: - * True if the descriptors refer to the same physical pin. False otherwise. - */ -bool gpiod_is_equal(struct gpio_desc *desc, struct gpio_desc *other) -{ - return desc == other; -} -EXPORT_SYMBOL_GPL(gpiod_is_equal); - -/** * gpio_device_get_base() - Get the base GPIO number allocated by this device * @gdev: GPIO device * @@ -387,6 +386,21 @@ static int validate_desc(const struct gpio_desc *desc, const char *func) return; \ } while (0) +/** + * gpiod_is_equal() - Check if two GPIO descriptors refer to the same pin. + * @desc: Descriptor to compare. + * @other: The second descriptor to compare against. + * + * Returns: + * True if the descriptors refer to the same physical pin. False otherwise. + */ +bool gpiod_is_equal(const struct gpio_desc *desc, const struct gpio_desc *other) +{ + return validate_desc(desc, __func__) > 0 && + !IS_ERR_OR_NULL(other) && desc == other; +} +EXPORT_SYMBOL_GPL(gpiod_is_equal); + static int gpiochip_get_direction(struct gpio_chip *gc, unsigned int offset) { int ret; @@ -5221,8 +5235,8 @@ core_initcall(gpiolib_dev_init); static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev) { bool active_low, is_irq, is_out; - unsigned int gpio = gdev->base; struct gpio_desc *desc; + unsigned int gpio = 0; struct gpio_chip *gc; unsigned long flags; int value; @@ -5326,8 +5340,7 @@ static int gpiolib_seq_show(struct seq_file *s, void *v) return 0; } - seq_printf(s, "%s: GPIOs %u-%u", dev_name(&gdev->dev), gdev->base, - gdev->base + gdev->ngpio - 1); + seq_printf(s, "%s: %u GPIOs", dev_name(&gdev->dev), gdev->ngpio); parent = gc->parent; if (parent) seq_printf(s, ", parent: %s/%s", diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 58f64056de77..9b74738a9ca5 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -27,8 +27,6 @@ * @dev: the GPIO device struct * @chrdev: character device for the GPIO device * @id: numerical ID number for the GPIO chip - * @mockdev: class device used by the deprecated sysfs interface (may be - * NULL) * @owner: helps prevent removal of modules exporting active GPIOs * @chip: pointer to the corresponding gpiochip, holding static * data for this device @@ -65,7 +63,6 @@ struct gpio_device { struct device dev; struct cdev chrdev; int id; - struct device *mockdev; struct module *owner; struct gpio_chip __rcu *chip; struct gpio_desc *descs; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 78f8755996f0..aa32df7e2fb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5193,6 +5193,8 @@ exit: dev->dev->power.disable_depth--; #endif } + + amdgpu_vram_mgr_clear_reset_blocks(adev); adev->in_suspend = false; if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 208b7d1d8a27..450e4bf093b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -154,6 +154,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, uint64_t start, uint64_t size); int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start); +void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev); bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, struct ttm_resource *res); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index abdc52b0895a..07c936e90d8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -783,6 +783,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr) } /** + * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks + * + * @adev: amdgpu device pointer + * + * Reset the cleared drm buddy blocks. + */ +void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev) +{ + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; + struct drm_buddy *mm = &mgr->mm; + + mutex_lock(&mgr->lock); + drm_buddy_reset_clear(mm, false); + mutex_unlock(&mgr->lock); +} + +/** * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection * * @man: TTM memory type manager diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index de9c23537465..834b42a4d31f 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1373,7 +1373,7 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev, regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE, 0); mutex_unlock(&pdata->comms_mutex); - }; + } drm_bridge_add(&pdata->bridge); diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c index ec7eac6b595f..718c9122bc3a 100644 --- a/drivers/gpu/drm/display/drm_dp_aux_bus.c +++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c @@ -57,7 +57,7 @@ static int dp_aux_ep_probe(struct device *dev) container_of(aux_ep, struct dp_aux_ep_device_with_data, aux_ep); int ret; - ret = dev_pm_domain_attach(dev, true); + ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON); if (ret) return dev_err_probe(dev, ret, "Failed to attach to PM Domain\n"); diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index 241c855f891f..66aff35f8647 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -405,6 +405,49 @@ drm_get_buddy(struct drm_buddy_block *block) EXPORT_SYMBOL(drm_get_buddy); /** + * drm_buddy_reset_clear - reset blocks clear state + * + * @mm: DRM buddy manager + * @is_clear: blocks clear state + * + * Reset the clear state based on @is_clear value for each block + * in the freelist. + */ +void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) +{ + u64 root_size, size, start; + unsigned int order; + int i; + + size = mm->size; + for (i = 0; i < mm->n_roots; ++i) { + order = ilog2(size) - ilog2(mm->chunk_size); + start = drm_buddy_block_offset(mm->roots[i]); + __force_merge(mm, start, start + size, order); + + root_size = mm->chunk_size << order; + size -= root_size; + } + + for (i = 0; i <= mm->max_order; ++i) { + struct drm_buddy_block *block; + + list_for_each_entry_reverse(block, &mm->free_list[i], link) { + if (is_clear != drm_buddy_block_is_clear(block)) { + if (is_clear) { + mark_cleared(block); + mm->clear_avail += drm_buddy_block_size(mm, block); + } else { + clear_reset(block); + mm->clear_avail -= drm_buddy_block_size(mm, block); + } + } + } + } +} +EXPORT_SYMBOL(drm_buddy_reset_clear); + +/** * drm_buddy_free_block - free a block * * @mm: DRM buddy manager diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c index b7f033d4352a..4f0320df858f 100644 --- a/drivers/gpu/drm/drm_gem_dma_helper.c +++ b/drivers/gpu/drm/drm_gem_dma_helper.c @@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj) if (drm_gem_is_imported(gem_obj)) { if (dma_obj->vaddr) - dma_buf_vunmap_unlocked(gem_obj->dma_buf, &map); + dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map); drm_prime_gem_destroy(gem_obj, dma_obj->sgt); } else if (dma_obj->vaddr) { if (dma_obj->map_noncoherent) diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 6f72e7a0f427..6ff22e04029e 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -419,6 +419,7 @@ EXPORT_SYMBOL(drm_gem_fb_vunmap); static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir, unsigned int num_planes) { + struct dma_buf_attachment *import_attach; struct drm_gem_object *obj; int ret; @@ -427,9 +428,10 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat obj = drm_gem_fb_get_obj(fb, num_planes); if (!obj) continue; + import_attach = obj->import_attach; if (!drm_gem_is_imported(obj)) continue; - ret = dma_buf_end_cpu_access(obj->dma_buf, dir); + ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir); if (ret) drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n", ret, num_planes, dir); @@ -452,6 +454,7 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat */ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir) { + struct dma_buf_attachment *import_attach; struct drm_gem_object *obj; unsigned int i; int ret; @@ -462,9 +465,10 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct ret = -EINVAL; goto err___drm_gem_fb_end_cpu_access; } + import_attach = obj->import_attach; if (!drm_gem_is_imported(obj)) continue; - ret = dma_buf_begin_cpu_access(obj->dma_buf, dir); + ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir); if (ret) goto err___drm_gem_fb_end_cpu_access; } diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index aa43265f4f4f..a5dbee6974ab 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -349,7 +349,7 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, int ret = 0; if (drm_gem_is_imported(obj)) { - ret = dma_buf_vmap(obj->dma_buf, map); + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); } else { pgprot_t prot = PAGE_KERNEL; @@ -409,7 +409,7 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, struct drm_gem_object *obj = &shmem->base; if (drm_gem_is_imported(obj)) { - dma_buf_vunmap(obj->dma_buf, map); + dma_buf_vunmap(obj->import_attach->dmabuf, map); } else { dma_resv_assert_held(shmem->base.resv); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index d828502268b8..a0a5d725eab0 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -453,7 +453,13 @@ struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev, } mutex_lock(&dev->object_name_lock); - /* re-export the original imported/exported object */ + /* re-export the original imported object */ + if (obj->import_attach) { + dmabuf = obj->import_attach->dmabuf; + get_dma_buf(dmabuf); + goto out_have_obj; + } + if (obj->dma_buf) { get_dma_buf(obj->dma_buf); dmabuf = obj->dma_buf; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 917ad527c961..40a50c60dfff 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr); if (etnaviv_obj->vaddr) - dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map); + dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map); /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: @@ -82,7 +82,7 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) lockdep_assert_held(&etnaviv_obj->lock); - ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map); + ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map); if (ret) return NULL; return map.vaddr; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 6f0a0bc71b06..43aa1f97378b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -7061,7 +7061,8 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat struct drm_i915_private *i915 = to_i915(intel_state->base.dev); struct drm_plane *plane; struct drm_plane_state *new_plane_state; - int ret, i; + long ret; + int i; for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) { if (new_plane_state->fence) { diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 640c43bf62d4..724de7ed3c04 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1604,6 +1604,12 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select) { + struct intel_display *display = to_intel_display(intel_dp); + + /* FIXME g4x can't generate an exact 2.7GHz with the 96MHz non-SSC refclk */ + if (display->platform.g4x && port_clock == 268800) + port_clock = 270000; + /* eDP 1.4 rate select method. */ if (intel_dp->use_rate_select) { *link_bw = 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 05e440643aa2..f4f1c979d1b9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -105,7 +105,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * if (!obj->base.filp) return -ENODEV; - ret = call_mmap(obj->base.filp, vma); + ret = vfs_mmap(obj->base.filp, vma); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 19a3eb82dc6a..9cbb0f68a5bb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -6,6 +6,7 @@ #include <linux/pagevec.h> #include <linux/shmem_fs.h> #include <linux/swap.h> +#include <linux/uio.h> #include <drm/drm_cache.h> @@ -400,12 +401,12 @@ static int shmem_pwrite(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg) { - struct address_space *mapping = obj->base.filp->f_mapping; - const struct address_space_operations *aops = mapping->a_ops; char __user *user_data = u64_to_user_ptr(arg->data_ptr); - u64 remain; - loff_t pos; - unsigned int pg; + struct file *file = obj->base.filp; + struct kiocb kiocb; + struct iov_iter iter; + ssize_t written; + u64 size = arg->size; /* Caller already validated user args */ GEM_BUG_ON(!access_ok(user_data, arg->size)); @@ -428,63 +429,24 @@ shmem_pwrite(struct drm_i915_gem_object *obj, if (obj->mm.madv != I915_MADV_WILLNEED) return -EFAULT; - /* - * Before the pages are instantiated the object is treated as being - * in the CPU domain. The pages will be clflushed as required before - * use, and we can freely write into the pages directly. If userspace - * races pwrite with any other operation; corruption will ensue - - * that is userspace's prerogative! - */ + if (size > MAX_RW_COUNT) + return -EFBIG; - remain = arg->size; - pos = arg->offset; - pg = offset_in_page(pos); + if (!file->f_op->write_iter) + return -EINVAL; - do { - unsigned int len, unwritten; - struct folio *folio; - void *data, *vaddr; - int err; - char __maybe_unused c; - - len = PAGE_SIZE - pg; - if (len > remain) - len = remain; - - /* Prefault the user page to reduce potential recursion */ - err = __get_user(c, user_data); - if (err) - return err; - - err = __get_user(c, user_data + len - 1); - if (err) - return err; - - err = aops->write_begin(obj->base.filp, mapping, pos, len, - &folio, &data); - if (err < 0) - return err; - - vaddr = kmap_local_folio(folio, offset_in_folio(folio, pos)); - pagefault_disable(); - unwritten = __copy_from_user_inatomic(vaddr, user_data, len); - pagefault_enable(); - kunmap_local(vaddr); - - err = aops->write_end(obj->base.filp, mapping, pos, len, - len - unwritten, folio, data); - if (err < 0) - return err; - - /* We don't handle -EFAULT, leave it to the caller to check */ - if (unwritten) - return -ENODEV; - - remain -= len; - user_data += len; - pos += len; - pg = 0; - } while (remain); + init_sync_kiocb(&kiocb, file); + kiocb.ki_pos = arg->offset; + iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)user_data, size); + + written = file->f_op->write_iter(&kiocb, &iter); + BUG_ON(written == -EIOCBQUEUED); + + if (written != size) + return -EIO; + + if (written < 0) + return written; return 0; } @@ -637,9 +599,8 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, { struct drm_i915_gem_object *obj; struct file *file; - const struct address_space_operations *aops; - loff_t pos; - int err; + loff_t pos = 0; + ssize_t err; GEM_WARN_ON(IS_DGFX(i915)); obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE)); @@ -649,29 +610,15 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); file = obj->base.filp; - aops = file->f_mapping->a_ops; - pos = 0; - do { - unsigned int len = min_t(typeof(size), size, PAGE_SIZE); - struct folio *folio; - void *fsdata; - - err = aops->write_begin(file, file->f_mapping, pos, len, - &folio, &fsdata); - if (err < 0) - goto fail; + err = kernel_write(file, data, size, &pos); - memcpy_to_folio(folio, offset_in_folio(folio, pos), data, len); + if (err < 0) + goto fail; - err = aops->write_end(file, file->f_mapping, pos, len, len, - folio, fsdata); - if (err < 0) - goto fail; - - size -= len; - data += len; - pos += len; - } while (size); + if (err != size) { + err = -EIO; + goto fail; + } return obj; diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c index 65d84a93c525..a09e2eb47175 100644 --- a/drivers/gpu/drm/i915/gem/i915_gemfs.c +++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c @@ -5,16 +5,23 @@ #include <linux/fs.h> #include <linux/mount.h> +#include <linux/fs_context.h> #include "i915_drv.h" #include "i915_gemfs.h" #include "i915_utils.h" +static int add_param(struct fs_context *fc, const char *key, const char *val) +{ + return vfs_parse_fs_string(fc, key, val, strlen(val)); +} + void i915_gemfs_init(struct drm_i915_private *i915) { - char huge_opt[] = "huge=within_size"; /* r/w */ struct file_system_type *type; + struct fs_context *fc; struct vfsmount *gemfs; + int ret; /* * By creating our own shmemfs mountpoint, we can pass in @@ -38,8 +45,16 @@ void i915_gemfs_init(struct drm_i915_private *i915) if (!type) goto err; - gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt); - if (IS_ERR(gemfs)) + fc = fs_context_for_mount(type, SB_KERNMOUNT); + if (IS_ERR(fc)) + goto err; + ret = add_param(fc, "source", "tmpfs"); + if (!ret) + ret = add_param(fc, "huge", "within_size"); + if (!ret) + gemfs = fc_mount_longterm(fc); + put_fs_context(fc); + if (ret) goto err; i915->mm.gemfs = gemfs; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 7bb64fcdd497..1527b801f013 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1284,9 +1284,6 @@ nouveau_ioctls[] = { DRM_IOCTL_DEF_DRV(NOUVEAU_EXEC, nouveau_exec_ioctl_exec, DRM_RENDER_ALLOW), }; -#define DRM_IOCTL_NOUVEAU_NVIF _IOC(_IOC_READ | _IOC_WRITE, DRM_IOCTL_BASE, \ - DRM_COMMAND_BASE + DRM_NOUVEAU_NVIF, 0) - long nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -1300,10 +1297,14 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; } - if ((cmd & ~IOCSIZE_MASK) == DRM_IOCTL_NOUVEAU_NVIF) + switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) { + case DRM_NOUVEAU_NVIF: ret = nouveau_abi16_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd)); - else + break; + default: ret = drm_ioctl(file, cmd, arg); + break; + } pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); diff --git a/drivers/gpu/drm/nouveau/nvif/chan.c b/drivers/gpu/drm/nouveau/nvif/chan.c index baa10227d51a..80c01017d642 100644 --- a/drivers/gpu/drm/nouveau/nvif/chan.c +++ b/drivers/gpu/drm/nouveau/nvif/chan.c @@ -39,6 +39,9 @@ nvif_chan_gpfifo_post(struct nvif_chan *chan) const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size; const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max; + if (!chan->func->gpfifo.post) + return 0; + return chan->func->gpfifo.post(chan, gpptr, pbptr); } diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index e671aa241720..ac678de7fe5e 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -355,17 +355,6 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity) } EXPORT_SYMBOL(drm_sched_entity_destroy); -/* drm_sched_entity_clear_dep - callback to clear the entities dependency */ -static void drm_sched_entity_clear_dep(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct drm_sched_entity *entity = - container_of(cb, struct drm_sched_entity, cb); - - entity->dependency = NULL; - dma_fence_put(f); -} - /* * drm_sched_entity_wakeup - callback to clear the entity's dependency and * wake up the scheduler @@ -376,7 +365,8 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct drm_sched_entity *entity = container_of(cb, struct drm_sched_entity, cb); - drm_sched_entity_clear_dep(f, cb); + entity->dependency = NULL; + dma_fence_put(f); drm_sched_wakeup(entity->rq->sched); } @@ -429,13 +419,6 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) fence = dma_fence_get(&s_fence->scheduled); dma_fence_put(entity->dependency); entity->dependency = fence; - if (!dma_fence_add_callback(fence, &entity->cb, - drm_sched_entity_clear_dep)) - return true; - - /* Ignore it when it is already scheduled */ - dma_fence_put(fence); - return false; } if (!dma_fence_add_callback(entity->dependency, &entity->cb, diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c index 4c5e18590a5c..8ec6ed82b3d9 100644 --- a/drivers/gpu/drm/v3d/v3d_gemfs.c +++ b/drivers/gpu/drm/v3d/v3d_gemfs.c @@ -3,14 +3,21 @@ #include <linux/fs.h> #include <linux/mount.h> +#include <linux/fs_context.h> #include "v3d_drv.h" +static int add_param(struct fs_context *fc, const char *key, const char *val) +{ + return vfs_parse_fs_string(fc, key, val, strlen(val)); +} + void v3d_gemfs_init(struct v3d_dev *v3d) { - char huge_opt[] = "huge=within_size"; struct file_system_type *type; + struct fs_context *fc; struct vfsmount *gemfs; + int ret; /* * By creating our own shmemfs mountpoint, we can pass in @@ -28,8 +35,16 @@ void v3d_gemfs_init(struct v3d_dev *v3d) if (!type) goto err; - gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt); - if (IS_ERR(gemfs)) + fc = fs_context_for_mount(type, SB_KERNMOUNT); + if (IS_ERR(fc)) + goto err; + ret = add_param(fc, "source", "tmpfs"); + if (!ret) + ret = add_param(fc, "huge", "within_size"); + if (!ret) + gemfs = fc_mount_longterm(fc); + put_fs_context(fc); + if (ret) goto err; v3d->gemfs = gemfs; diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 1118a0250279..ce49282198cb 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -204,15 +204,16 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj) { struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); struct virtio_gpu_device *vgdev = obj->dev->dev_private; + struct dma_buf_attachment *attach = obj->import_attach; if (drm_gem_is_imported(obj)) { - struct dma_buf *dmabuf = obj->dma_buf; + struct dma_buf *dmabuf = attach->dmabuf; dma_resv_lock(dmabuf->resv, NULL); virtgpu_dma_buf_unmap(bo); dma_resv_unlock(dmabuf->resv); - dma_buf_detach(dmabuf, obj->import_attach); + dma_buf_detach(dmabuf, attach); dma_buf_put(dmabuf); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c index c55382167c1b..e417921af584 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -85,10 +85,10 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) int ret; if (drm_gem_is_imported(obj)) { - ret = dma_buf_vmap(obj->dma_buf, map); + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); if (!ret) { if (drm_WARN_ON(obj->dev, map->is_iomem)) { - dma_buf_vunmap(obj->dma_buf, map); + dma_buf_vunmap(obj->import_attach->dmabuf, map); return -EIO; } } @@ -102,7 +102,7 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { if (drm_gem_is_imported(obj)) - dma_buf_vunmap(obj->dma_buf, map); + dma_buf_vunmap(obj->import_attach->dmabuf, map); else drm_gem_ttm_vunmap(obj, map); } diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h index 187fa6490eaf..6357325f3939 100644 --- a/drivers/gpu/drm/xe/xe_gt.h +++ b/drivers/gpu/drm/xe/xe_gt.h @@ -24,7 +24,7 @@ extern struct fault_attr gt_reset_failure; static inline bool xe_fault_inject_gt_reset(void) { - return should_fail(>_reset_failure, 1); + return IS_ENABLED(CONFIG_DEBUG_FS) && should_fail(>_reset_failure, 1); } struct xe_gt *xe_gt_alloc(struct xe_tile *tile); diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c index b378848d3b7b..56930ad42962 100644 --- a/drivers/gpu/drm/xe/xe_vsec.c +++ b/drivers/gpu/drm/xe/xe_vsec.c @@ -24,6 +24,7 @@ #define BMG_DEVICE_ID 0xE2F8 static struct intel_vsec_header bmg_telemetry = { + .rev = 1, .length = 0x10, .id = VSEC_ID_TELEMETRY, .num_entries = 2, @@ -32,28 +33,19 @@ static struct intel_vsec_header bmg_telemetry = { .offset = BMG_DISCOVERY_OFFSET, }; -static struct intel_vsec_header bmg_punit_crashlog = { +static struct intel_vsec_header bmg_crashlog = { + .rev = 1, .length = 0x10, .id = VSEC_ID_CRASHLOG, - .num_entries = 1, - .entry_size = 4, + .num_entries = 2, + .entry_size = 6, .tbir = 0, .offset = BMG_DISCOVERY_OFFSET + 0x60, }; -static struct intel_vsec_header bmg_oobmsm_crashlog = { - .length = 0x10, - .id = VSEC_ID_CRASHLOG, - .num_entries = 1, - .entry_size = 4, - .tbir = 0, - .offset = BMG_DISCOVERY_OFFSET + 0x78, -}; - static struct intel_vsec_header *bmg_capabilities[] = { &bmg_telemetry, - &bmg_punit_crashlog, - &bmg_oobmsm_crashlog, + &bmg_crashlog, NULL }; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index b31b8a2fd540..768f4df623c4 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2303,6 +2303,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) case BUS_I2C: bus = "I2C"; break; + case BUS_SDW: + bus = "SOUNDWIRE"; + break; case BUS_VIRTUAL: bus = "VIRTUAL"; break; diff --git a/drivers/hv/mshv_eventfd.c b/drivers/hv/mshv_eventfd.c index 8dd22be2ca0b..48c514da34eb 100644 --- a/drivers/hv/mshv_eventfd.c +++ b/drivers/hv/mshv_eventfd.c @@ -377,10 +377,11 @@ static int mshv_irqfd_assign(struct mshv_partition *pt, struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL; struct mshv_irqfd *irqfd, *tmp; unsigned int events; - struct fd f; int ret; int idx; + CLASS(fd, f)(args->fd); + irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); if (!irqfd) return -ENOMEM; @@ -390,8 +391,7 @@ static int mshv_irqfd_assign(struct mshv_partition *pt, INIT_WORK(&irqfd->irqfd_shutdown, mshv_irqfd_shutdown); seqcount_spinlock_init(&irqfd->irqfd_irqe_sc, &pt->pt_irqfds_lock); - f = fdget(args->fd); - if (!fd_file(f)) { + if (fd_empty(f)) { ret = -EBADF; goto out; } @@ -496,12 +496,6 @@ static int mshv_irqfd_assign(struct mshv_partition *pt, mshv_assert_irq_slow(irqfd); srcu_read_unlock(&pt->pt_irq_srcu, idx); - /* - * do not drop the file until the irqfd is fully initialized, otherwise - * we might race against the POLLHUP - */ - fdput(f); - return 0; fail: @@ -514,8 +508,6 @@ fail: if (eventfd && !IS_ERR(eventfd)) eventfd_ctx_put(eventfd); - fdput(f); - out: kfree(irqfd); return ret; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 079620dd4286..9d28fcf7cd2a 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -700,6 +700,16 @@ config SENSORS_MC13783_ADC help Support for the A/D converter on MC13783 and MC13892 PMIC. +config SENSORS_MC33XS2410 + tristate "MC33XS2410 HWMON support" + depends on PWM_MC33XS2410 + help + If you say yes here you get hardware monitoring support for + MC33XS2410. + + This driver can also be built as a module. If so, the module + will be called mc33xs2410_hwmon. + config SENSORS_FSCHMD tristate "Fujitsu Siemens Computers sensor chips" depends on (X86 || COMPILE_TEST) && I2C diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 48e5866c0c9a..cd8bc4752b4d 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -165,6 +165,7 @@ obj-$(CONFIG_SENSORS_MAX31790) += max31790.o obj-$(CONFIG_MAX31827) += max31827.o obj-$(CONFIG_SENSORS_MAX77705) += max77705-hwmon.o obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o +obj-$(CONFIG_SENSORS_MC33XS2410) += mc33xs2410_hwmon.o obj-$(CONFIG_SENSORS_MC34VR500) += mc34vr500.o obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o obj-$(CONFIG_SENSORS_TC654) += tc654.o diff --git a/drivers/hwmon/mc33xs2410_hwmon.c b/drivers/hwmon/mc33xs2410_hwmon.c new file mode 100644 index 000000000000..23eb90e33709 --- /dev/null +++ b/drivers/hwmon/mc33xs2410_hwmon.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025 Liebherr-Electronics and Drives GmbH + */ + +#include <linux/auxiliary_bus.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/hwmon.h> +#include <linux/mc33xs2410.h> +#include <linux/module.h> + +/* ctrl registers */ + +#define MC33XS2410_TEMP_WT 0x29 +#define MC33XS2410_TEMP_WT_MASK GENMASK(7, 0) + +/* diag registers */ + +/* chan in { 1 ... 4 } */ +#define MC33XS2410_OUT_STA(chan) (0x02 + (chan) - 1) +#define MC33XS2410_OUT_STA_OTW BIT(8) + +#define MC33XS2410_TS_TEMP_DIE 0x26 +#define MC33XS2410_TS_TEMP_MASK GENMASK(9, 0) + +/* chan in { 1 ... 4 } */ +#define MC33XS2410_TS_TEMP(chan) (0x2f + (chan) - 1) + +static const struct hwmon_channel_info * const mc33xs2410_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_LABEL | HWMON_T_INPUT, + HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_ALARM, + HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_ALARM, + HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_ALARM, + HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_ALARM), + NULL, +}; + +static umode_t mc33xs2410_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_alarm: + case hwmon_temp_label: + return 0444; + case hwmon_temp_max: + return 0644; + default: + return 0; + } +} + +static int mc33xs2410_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct spi_device *spi = dev_get_drvdata(dev); + u16 reg_val; + int ret; + u8 reg; + + switch (attr) { + case hwmon_temp_input: + reg = (channel == 0) ? MC33XS2410_TS_TEMP_DIE : + MC33XS2410_TS_TEMP(channel); + ret = mc33xs2410_read_reg_diag(spi, reg, ®_val); + if (ret < 0) + return ret; + + /* LSB is 0.25 degree celsius */ + *val = FIELD_GET(MC33XS2410_TS_TEMP_MASK, reg_val) * 250 - 40000; + return 0; + case hwmon_temp_alarm: + ret = mc33xs2410_read_reg_diag(spi, MC33XS2410_OUT_STA(channel), + ®_val); + if (ret < 0) + return ret; + + *val = FIELD_GET(MC33XS2410_OUT_STA_OTW, reg_val); + return 0; + case hwmon_temp_max: + ret = mc33xs2410_read_reg_ctrl(spi, MC33XS2410_TEMP_WT, ®_val); + if (ret < 0) + return ret; + + /* LSB is 1 degree celsius */ + *val = FIELD_GET(MC33XS2410_TEMP_WT_MASK, reg_val) * 1000 - 40000; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int mc33xs2410_hwmon_write(struct device *dev, + enum hwmon_sensor_types type, u32 attr, + int channel, long val) +{ + struct spi_device *spi = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_temp_max: + val = clamp_val(val, -40000, 215000); + + /* LSB is 1 degree celsius */ + val = (val / 1000) + 40; + return mc33xs2410_modify_reg(spi, MC33XS2410_TEMP_WT, + MC33XS2410_TEMP_WT_MASK, val); + default: + return -EOPNOTSUPP; + } +} + +static const char *const mc33xs2410_temp_label[] = { + "Central die temperature", + "Channel 1 temperature", + "Channel 2 temperature", + "Channel 3 temperature", + "Channel 4 temperature", +}; + +static int mc33xs2410_read_string(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + *str = mc33xs2410_temp_label[channel]; + + return 0; +} + +static const struct hwmon_ops mc33xs2410_hwmon_hwmon_ops = { + .is_visible = mc33xs2410_hwmon_is_visible, + .read = mc33xs2410_hwmon_read, + .read_string = mc33xs2410_read_string, + .write = mc33xs2410_hwmon_write, +}; + +static const struct hwmon_chip_info mc33xs2410_hwmon_chip_info = { + .ops = &mc33xs2410_hwmon_hwmon_ops, + .info = mc33xs2410_hwmon_info, +}; + +static int mc33xs2410_hwmon_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct device *dev = &adev->dev; + struct spi_device *spi = container_of(dev->parent, struct spi_device, dev); + struct device *hwmon; + + hwmon = devm_hwmon_device_register_with_info(dev, NULL, spi, + &mc33xs2410_hwmon_chip_info, + NULL); + return PTR_ERR_OR_ZERO(hwmon); +} + +static const struct auxiliary_device_id mc33xs2410_hwmon_ids[] = { + { + .name = "pwm_mc33xs2410.hwmon", + }, + { } +}; +MODULE_DEVICE_TABLE(auxiliary, mc33xs2410_hwmon_ids); + +static struct auxiliary_driver mc33xs2410_hwmon_driver = { + .probe = mc33xs2410_hwmon_probe, + .id_table = mc33xs2410_hwmon_ids, +}; +module_auxiliary_driver(mc33xs2410_hwmon_driver); + +MODULE_DESCRIPTION("NXP MC33XS2410 hwmon driver"); +MODULE_AUTHOR("Dimitri Fedrau <dimitri.fedrau@liebherr.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 6059f585843e..fc348924d522 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -452,8 +452,10 @@ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len) if (!(status & I2C_STATUS_BUS_ACTIVE)) break; - if (time_after(jiffies, timeout)) + if (time_after(jiffies, timeout)) { ret = -ETIMEDOUT; + break; + } usleep_range(len, len * 2); } diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 0862b98007f5..6316360475e7 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -607,7 +607,6 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev) static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) { u32 val, clk_divisor, clk_multiplier, tsu_thd, tlow, thigh, non_hs_mode; - acpi_handle handle = ACPI_HANDLE(i2c_dev->dev); struct i2c_timings *t = &i2c_dev->timings; int err; @@ -619,11 +618,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) * emit a noisy warning on error, which won't stay unnoticed and * won't hose machine entirely. */ - if (handle) - err = acpi_evaluate_object(handle, "_RST", NULL, NULL); - else - err = reset_control_reset(i2c_dev->rst); - + err = device_reset(i2c_dev->dev); WARN_ON_ONCE(err); if (IS_DVC(i2c_dev)) @@ -1666,19 +1661,6 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev) i2c_dev->is_vi = true; } -static int tegra_i2c_init_reset(struct tegra_i2c_dev *i2c_dev) -{ - if (ACPI_HANDLE(i2c_dev->dev)) - return 0; - - i2c_dev->rst = devm_reset_control_get_exclusive(i2c_dev->dev, "i2c"); - if (IS_ERR(i2c_dev->rst)) - return dev_err_probe(i2c_dev->dev, PTR_ERR(i2c_dev->rst), - "failed to get reset control\n"); - - return 0; -} - static int tegra_i2c_init_clocks(struct tegra_i2c_dev *i2c_dev) { int err; @@ -1788,10 +1770,6 @@ static int tegra_i2c_probe(struct platform_device *pdev) tegra_i2c_parse_dt(i2c_dev); - err = tegra_i2c_init_reset(i2c_dev); - if (err) - return err; - err = tegra_i2c_init_clocks(i2c_dev); if (err) return err; diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c index 9b05ff53d3d7..af1381949f50 100644 --- a/drivers/i2c/busses/i2c-virtio.c +++ b/drivers/i2c/busses/i2c-virtio.c @@ -116,15 +116,16 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq, for (i = 0; i < num; i++) { struct virtio_i2c_req *req = &reqs[i]; - wait_for_completion(&req->completion); - - if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK) - failed = true; + if (!failed) { + if (wait_for_completion_interruptible(&req->completion)) + failed = true; + else if (req->in_hdr.status != VIRTIO_I2C_MSG_OK) + failed = true; + else + j++; + } i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed); - - if (!failed) - j++; } return j; diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 2ad2b1838f0f..38eabf1173da 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -573,7 +573,7 @@ static int i2c_device_probe(struct device *dev) goto err_clear_wakeup_irq; do_power_on = !i2c_acpi_waive_d0_probe(dev); - status = dev_pm_domain_attach(&client->dev, do_power_on); + status = dev_pm_domain_attach(&client->dev, do_power_on ? PD_FLAG_ATTACH_POWER_ON : 0); if (status) goto err_clear_wakeup_irq; diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 721ab69e84ac..7c4f309a4cb6 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -37,6 +37,17 @@ config KEYBOARD_ADP5520 To compile this driver as a module, choose M here: the module will be called adp5520-keys. +config KEYBOARD_ADP5585 + tristate "ADP558x keypad support" + depends on MFD_ADP5585 + select INPUT_MATRIXKMAP + help + This option enables support for the KEYPAD function found in the Analog + Devices ADP5585 and similar devices. + + To compile this driver as a module, choose M here: the + module will be called adp5585-keys. + config KEYBOARD_ADP5588 tristate "ADP5588/87 I2C QWERTY Keypad and IO Expander" depends on I2C @@ -50,16 +61,6 @@ config KEYBOARD_ADP5588 To compile this driver as a module, choose M here: the module will be called adp5588-keys. -config KEYBOARD_ADP5589 - tristate "ADP5585/ADP5589 I2C QWERTY Keypad and IO Expander" - depends on I2C - help - Say Y here if you want to use a ADP5585/ADP5589 attached to your - system I2C bus. - - To compile this driver as a module, choose M here: the - module will be called adp5589-keys. - config KEYBOARD_AMIGA tristate "Amiga keyboard" depends on AMIGA diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 1e0721c30709..8bc20ab2b103 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -7,8 +7,8 @@ obj-$(CONFIG_KEYBOARD_ADC) += adc-keys.o obj-$(CONFIG_KEYBOARD_ADP5520) += adp5520-keys.o +obj-$(CONFIG_KEYBOARD_ADP5585) += adp5585-keys.o obj-$(CONFIG_KEYBOARD_ADP5588) += adp5588-keys.o -obj-$(CONFIG_KEYBOARD_ADP5589) += adp5589-keys.o obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o obj-$(CONFIG_KEYBOARD_APPLESPI) += applespi.o obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o diff --git a/drivers/input/keyboard/adp5585-keys.c b/drivers/input/keyboard/adp5585-keys.c new file mode 100644 index 000000000000..4208229e1356 --- /dev/null +++ b/drivers/input/keyboard/adp5585-keys.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Analog Devices ADP5585 Keys driver + * + * Copyright (C) 2025 Analog Devices, Inc. + */ + +#include <linux/bitmap.h> +#include <linux/container_of.h> +#include <linux/device.h> +#include <linux/find.h> +#include <linux/input.h> +#include <linux/input/matrix_keypad.h> +#include <linux/mfd/adp5585.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/notifier.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regmap.h> +#include <linux/types.h> + +/* As needed for the matrix parsing code */ +#define ADP5589_MAX_KEYMAPSIZE 123 + +struct adp5585_kpad_chip { + u8 key_ev_min; + u8 key_ev_max; + u8 max_rows; + u8 max_cols; +}; + +struct adp5585_kpad { + const struct adp5585_kpad_chip *info; + struct notifier_block nb; + struct input_dev *input; + unsigned short keycode[ADP5589_MAX_KEYMAPSIZE]; + struct device *dev; + unsigned long keypad; + int row_shift; +}; + +static int adp5585_keys_validate_events(const struct adp5585_kpad *kpad, + const u32 *events, u32 n_events) +{ + unsigned int ev; + u32 row, col; + + for (ev = 0; ev < n_events; ev++) { + if (events[ev] < kpad->info->key_ev_min || + events[ev] > kpad->info->key_ev_max) + continue; + + /* + * if the event is to be generated by the keymap, we need to make + * sure that the pins are part of it! + */ + row = (events[ev] - 1) / kpad->info->max_cols; + col = (events[ev] - 1) % kpad->info->max_cols; + + if (test_bit(row, &kpad->keypad) && + test_bit(col + kpad->info->max_rows, &kpad->keypad)) + continue; + + return dev_err_probe(kpad->dev, -EINVAL, + "Invalid unlock/reset event(%u) not used in the keypad\n", + events[ev]); + } + + return 0; +} + +static int adp5585_keys_check_special_events(const struct adp5585_dev *adp5585, + const struct adp5585_kpad *kpad) +{ + int error; + + error = adp5585_keys_validate_events(kpad, adp5585->unlock_keys, + adp5585->nkeys_unlock); + if (error) + return error; + + error = adp5585_keys_validate_events(kpad, adp5585->reset1_keys, + adp5585->nkeys_reset1); + if (error) + return error; + + return adp5585_keys_validate_events(kpad, adp5585->reset2_keys, + adp5585->nkeys_reset2); +} + +static void adp5585_keys_pins_free(void *data) +{ + struct adp5585_kpad *kpad = data; + struct adp5585_dev *adp5585 = dev_get_drvdata(kpad->dev->parent); + unsigned int pin; + + for_each_set_bit(pin, &kpad->keypad, adp5585->n_pins) + clear_bit(pin, adp5585->pin_usage); +} + +static int adp5585_keys_parse_fw(const struct adp5585_dev *adp5585, + struct adp5585_kpad *kpad) +{ + struct device *dev = kpad->dev; + u32 cols = 0, rows = 0, pin; + int error, n_pins; + + /* + * We do not check for errors (or no value) since the input device is + * only added if this property is present in the first place. + */ + n_pins = device_property_count_u32(dev, "adi,keypad-pins"); + if (n_pins > adp5585->n_pins) + return dev_err_probe(dev, -EINVAL, + "Too many keypad pins (%d) defined (max=%d)\n", + n_pins, adp5585->n_pins); + + unsigned int *keypad_pins __free(kfree) = kcalloc(n_pins, sizeof(*keypad_pins), + GFP_KERNEL); + if (!keypad_pins) + return -ENOMEM; + + error = device_property_read_u32_array(dev, "adi,keypad-pins", + keypad_pins, n_pins); + if (error) + return error; + + /* + * We can add the action here since it makes the code easier and nothing + * "bad" will happen out of it. Worst case, it will be a no-op and no + * bit will set. + */ + error = devm_add_action_or_reset(dev, adp5585_keys_pins_free, kpad); + if (error) + return error; + + for (pin = 0; pin < n_pins; pin++) { + if (keypad_pins[pin] >= adp5585->n_pins) + return dev_err_probe(dev, -EINVAL, + "Invalid keypad pin(%u) defined\n", + keypad_pins[pin]); + + if (test_and_set_bit(keypad_pins[pin], adp5585->pin_usage)) + return dev_err_probe(dev, -EBUSY, + "Keypad pin(%u) already used\n", + keypad_pins[pin]); + + __set_bit(keypad_pins[pin], &kpad->keypad); + } + + /* + * Note that given that we get a mask (and the HW allows it), we + * can have holes in our keypad (eg: row0, row1 and row7 enabled). + * However, for the matrix parsing functions we need to pass the + * number of rows/cols as the maximum row/col used plus 1. This + * pretty much means we will also have holes in our SW keypad. + */ + + rows = find_last_bit(&kpad->keypad, kpad->info->max_rows) + 1; + if (rows == kpad->info->max_rows + 1) + return dev_err_probe(dev, -EINVAL, + "Now rows defined in the keypad!\n"); + + cols = find_last_bit(&kpad->keypad, kpad->info->max_cols + kpad->info->max_rows); + if (cols < kpad->info->max_rows) + return dev_err_probe(dev, -EINVAL, + "No columns defined in the keypad!\n"); + + cols = cols + 1 - kpad->info->max_rows; + + error = matrix_keypad_build_keymap(NULL, NULL, rows, cols, + kpad->keycode, kpad->input); + if (error) + return error; + + kpad->row_shift = get_count_order(cols); + + if (device_property_read_bool(kpad->dev, "autorepeat")) + __set_bit(EV_REP, kpad->input->evbit); + + error = adp5585_keys_check_special_events(adp5585, kpad); + if (error) + return error; + + return 0; +} + +static int adp5585_keys_setup(const struct adp5585_dev *adp5585, + struct adp5585_kpad *kpad) +{ + unsigned long keys_bits, start = 0, nbits = kpad->info->max_rows; + const struct adp5585_regs *regs = adp5585->regs; + unsigned int i = 0, max_cols = kpad->info->max_cols; + int error; + + /* + * Take care as the below assumes max_rows is always less or equal than + * 8 which is true for the supported devices. If we happen to add + * another device we need to make sure this still holds true. Although + * adding a new device is very unlikely. + */ + do { + keys_bits = bitmap_read(&kpad->keypad, start, nbits); + if (keys_bits) { + error = regmap_write(adp5585->regmap, regs->pin_cfg_a + i, + keys_bits); + if (error) + return error; + } + + start += nbits; + if (max_cols > 8) { + nbits = 8; + max_cols -= nbits; + } else { + nbits = max_cols; + } + + i++; + } while (start < kpad->info->max_rows + kpad->info->max_cols); + + return 0; +} + +static int adp5585_keys_ev_handle(struct notifier_block *nb, unsigned long key, + void *data) +{ + struct adp5585_kpad *kpad = container_of(nb, struct adp5585_kpad, nb); + unsigned long key_press = (unsigned long)data; + unsigned int row, col, code; + + /* make sure the event is for us */ + if (key < kpad->info->key_ev_min || key > kpad->info->key_ev_max) + return NOTIFY_DONE; + + /* + * Unlikely but lets be on the safe side! We do not return any error + * because the event was indeed for us but with some weird value. So, + * we still want the caller know that the right handler was called. + */ + if (!key) + return NOTIFY_BAD; + + row = (key - 1) / (kpad->info->max_cols); + col = (key - 1) % (kpad->info->max_cols); + code = MATRIX_SCAN_CODE(row, col, kpad->row_shift); + + dev_dbg_ratelimited(kpad->dev, "report key(%lu) r(%d) c(%d) code(%d)\n", + key, row, col, kpad->keycode[code]); + + input_report_key(kpad->input, kpad->keycode[code], key_press); + input_sync(kpad->input); + + return NOTIFY_STOP; +} + +static void adp5585_keys_unreg_notifier(void *data) +{ + struct adp5585_kpad *kpad = data; + struct adp5585_dev *adp5585 = dev_get_drvdata(kpad->dev->parent); + + blocking_notifier_chain_unregister(&adp5585->event_notifier, + &kpad->nb); +} + +static int adp5585_keys_probe(struct platform_device *pdev) +{ + const struct platform_device_id *id = platform_get_device_id(pdev); + struct adp5585_dev *adp5585 = dev_get_drvdata(pdev->dev.parent); + struct device *dev = &pdev->dev; + struct adp5585_kpad *kpad; + unsigned int revid; + const char *phys; + int error; + + kpad = devm_kzalloc(dev, sizeof(*kpad), GFP_KERNEL); + if (!kpad) + return -ENOMEM; + + if (!adp5585->irq) + return dev_err_probe(dev, -EINVAL, + "IRQ is mandatory for the keypad\n"); + + kpad->dev = dev; + + kpad->input = devm_input_allocate_device(dev); + if (!kpad->input) + return -ENOMEM; + + kpad->info = (const struct adp5585_kpad_chip *)id->driver_data; + if (!kpad->info) + return -ENODEV; + + error = regmap_read(adp5585->regmap, ADP5585_ID, &revid); + if (error) + return dev_err_probe(dev, error, "Failed to read device ID\n"); + + phys = devm_kasprintf(dev, GFP_KERNEL, "%s/input0", pdev->name); + if (!phys) + return -ENOMEM; + + kpad->input->name = pdev->name; + kpad->input->phys = phys; + + kpad->input->id.bustype = BUS_I2C; + kpad->input->id.vendor = 0x0001; + kpad->input->id.product = 0x0001; + kpad->input->id.version = revid & ADP5585_REV_ID_MASK; + + device_set_of_node_from_dev(dev, dev->parent); + + error = adp5585_keys_parse_fw(adp5585, kpad); + if (error) + return error; + + error = adp5585_keys_setup(adp5585, kpad); + if (error) + return error; + + kpad->nb.notifier_call = adp5585_keys_ev_handle; + error = blocking_notifier_chain_register(&adp5585->event_notifier, + &kpad->nb); + if (error) + return error; + + error = devm_add_action_or_reset(dev, adp5585_keys_unreg_notifier, kpad); + if (error) + return error; + + error = input_register_device(kpad->input); + if (error) + return dev_err_probe(dev, error, + "Failed to register input device\n"); + + return 0; +} + +static const struct adp5585_kpad_chip adp5585_kpad_chip_info = { + .max_rows = 6, + .max_cols = 5, + .key_ev_min = ADP5585_ROW5_KEY_EVENT_START, + .key_ev_max = ADP5585_ROW5_KEY_EVENT_END, +}; + +static const struct adp5585_kpad_chip adp5589_kpad_chip_info = { + .max_rows = 8, + .max_cols = 11, + .key_ev_min = ADP5589_KEY_EVENT_START, + .key_ev_max = ADP5589_KEY_EVENT_END, +}; + +static const struct platform_device_id adp5585_keys_id_table[] = { + { "adp5585-keys", (kernel_ulong_t)&adp5585_kpad_chip_info }, + { "adp5589-keys", (kernel_ulong_t)&adp5589_kpad_chip_info }, + { } +}; +MODULE_DEVICE_TABLE(platform, adp5585_keys_id_table); + +static struct platform_driver adp5585_keys_driver = { + .driver = { + .name = "adp5585-keys", + }, + .probe = adp5585_keys_probe, + .id_table = adp5585_keys_id_table, +}; +module_platform_driver(adp5585_keys_driver); + +MODULE_AUTHOR("Nuno Sá <nuno.sa@analog.com>"); +MODULE_DESCRIPTION("ADP5585 Keys Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c deleted file mode 100644 index 81d0876ee358..000000000000 --- a/drivers/input/keyboard/adp5589-keys.c +++ /dev/null @@ -1,1066 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Description: keypad driver for ADP5589, ADP5585 - * I2C QWERTY Keypad and IO Expander - * Bugs: Enter bugs at http://blackfin.uclinux.org/ - * - * Copyright (C) 2010-2011 Analog Devices Inc. - */ - -#include <linux/bitops.h> -#include <linux/module.h> -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/workqueue.h> -#include <linux/errno.h> -#include <linux/pm.h> -#include <linux/pm_wakeirq.h> -#include <linux/platform_device.h> -#include <linux/input.h> -#include <linux/i2c.h> -#include <linux/gpio/driver.h> -#include <linux/slab.h> - -#include <linux/input/adp5589.h> - -/* ADP5589/ADP5585 Common Registers */ -#define ADP5589_5_ID 0x00 -#define ADP5589_5_INT_STATUS 0x01 -#define ADP5589_5_STATUS 0x02 -#define ADP5589_5_FIFO_1 0x03 -#define ADP5589_5_FIFO_2 0x04 -#define ADP5589_5_FIFO_3 0x05 -#define ADP5589_5_FIFO_4 0x06 -#define ADP5589_5_FIFO_5 0x07 -#define ADP5589_5_FIFO_6 0x08 -#define ADP5589_5_FIFO_7 0x09 -#define ADP5589_5_FIFO_8 0x0A -#define ADP5589_5_FIFO_9 0x0B -#define ADP5589_5_FIFO_10 0x0C -#define ADP5589_5_FIFO_11 0x0D -#define ADP5589_5_FIFO_12 0x0E -#define ADP5589_5_FIFO_13 0x0F -#define ADP5589_5_FIFO_14 0x10 -#define ADP5589_5_FIFO_15 0x11 -#define ADP5589_5_FIFO_16 0x12 -#define ADP5589_5_GPI_INT_STAT_A 0x13 -#define ADP5589_5_GPI_INT_STAT_B 0x14 - -/* ADP5589 Registers */ -#define ADP5589_GPI_INT_STAT_C 0x15 -#define ADP5589_GPI_STATUS_A 0x16 -#define ADP5589_GPI_STATUS_B 0x17 -#define ADP5589_GPI_STATUS_C 0x18 -#define ADP5589_RPULL_CONFIG_A 0x19 -#define ADP5589_RPULL_CONFIG_B 0x1A -#define ADP5589_RPULL_CONFIG_C 0x1B -#define ADP5589_RPULL_CONFIG_D 0x1C -#define ADP5589_RPULL_CONFIG_E 0x1D -#define ADP5589_GPI_INT_LEVEL_A 0x1E -#define ADP5589_GPI_INT_LEVEL_B 0x1F -#define ADP5589_GPI_INT_LEVEL_C 0x20 -#define ADP5589_GPI_EVENT_EN_A 0x21 -#define ADP5589_GPI_EVENT_EN_B 0x22 -#define ADP5589_GPI_EVENT_EN_C 0x23 -#define ADP5589_GPI_INTERRUPT_EN_A 0x24 -#define ADP5589_GPI_INTERRUPT_EN_B 0x25 -#define ADP5589_GPI_INTERRUPT_EN_C 0x26 -#define ADP5589_DEBOUNCE_DIS_A 0x27 -#define ADP5589_DEBOUNCE_DIS_B 0x28 -#define ADP5589_DEBOUNCE_DIS_C 0x29 -#define ADP5589_GPO_DATA_OUT_A 0x2A -#define ADP5589_GPO_DATA_OUT_B 0x2B -#define ADP5589_GPO_DATA_OUT_C 0x2C -#define ADP5589_GPO_OUT_MODE_A 0x2D -#define ADP5589_GPO_OUT_MODE_B 0x2E -#define ADP5589_GPO_OUT_MODE_C 0x2F -#define ADP5589_GPIO_DIRECTION_A 0x30 -#define ADP5589_GPIO_DIRECTION_B 0x31 -#define ADP5589_GPIO_DIRECTION_C 0x32 -#define ADP5589_UNLOCK1 0x33 -#define ADP5589_UNLOCK2 0x34 -#define ADP5589_EXT_LOCK_EVENT 0x35 -#define ADP5589_UNLOCK_TIMERS 0x36 -#define ADP5589_LOCK_CFG 0x37 -#define ADP5589_RESET1_EVENT_A 0x38 -#define ADP5589_RESET1_EVENT_B 0x39 -#define ADP5589_RESET1_EVENT_C 0x3A -#define ADP5589_RESET2_EVENT_A 0x3B -#define ADP5589_RESET2_EVENT_B 0x3C -#define ADP5589_RESET_CFG 0x3D -#define ADP5589_PWM_OFFT_LOW 0x3E -#define ADP5589_PWM_OFFT_HIGH 0x3F -#define ADP5589_PWM_ONT_LOW 0x40 -#define ADP5589_PWM_ONT_HIGH 0x41 -#define ADP5589_PWM_CFG 0x42 -#define ADP5589_CLOCK_DIV_CFG 0x43 -#define ADP5589_LOGIC_1_CFG 0x44 -#define ADP5589_LOGIC_2_CFG 0x45 -#define ADP5589_LOGIC_FF_CFG 0x46 -#define ADP5589_LOGIC_INT_EVENT_EN 0x47 -#define ADP5589_POLL_PTIME_CFG 0x48 -#define ADP5589_PIN_CONFIG_A 0x49 -#define ADP5589_PIN_CONFIG_B 0x4A -#define ADP5589_PIN_CONFIG_C 0x4B -#define ADP5589_PIN_CONFIG_D 0x4C -#define ADP5589_GENERAL_CFG 0x4D -#define ADP5589_INT_EN 0x4E - -/* ADP5585 Registers */ -#define ADP5585_GPI_STATUS_A 0x15 -#define ADP5585_GPI_STATUS_B 0x16 -#define ADP5585_RPULL_CONFIG_A 0x17 -#define ADP5585_RPULL_CONFIG_B 0x18 -#define ADP5585_RPULL_CONFIG_C 0x19 -#define ADP5585_RPULL_CONFIG_D 0x1A -#define ADP5585_GPI_INT_LEVEL_A 0x1B -#define ADP5585_GPI_INT_LEVEL_B 0x1C -#define ADP5585_GPI_EVENT_EN_A 0x1D -#define ADP5585_GPI_EVENT_EN_B 0x1E -#define ADP5585_GPI_INTERRUPT_EN_A 0x1F -#define ADP5585_GPI_INTERRUPT_EN_B 0x20 -#define ADP5585_DEBOUNCE_DIS_A 0x21 -#define ADP5585_DEBOUNCE_DIS_B 0x22 -#define ADP5585_GPO_DATA_OUT_A 0x23 -#define ADP5585_GPO_DATA_OUT_B 0x24 -#define ADP5585_GPO_OUT_MODE_A 0x25 -#define ADP5585_GPO_OUT_MODE_B 0x26 -#define ADP5585_GPIO_DIRECTION_A 0x27 -#define ADP5585_GPIO_DIRECTION_B 0x28 -#define ADP5585_RESET1_EVENT_A 0x29 -#define ADP5585_RESET1_EVENT_B 0x2A -#define ADP5585_RESET1_EVENT_C 0x2B -#define ADP5585_RESET2_EVENT_A 0x2C -#define ADP5585_RESET2_EVENT_B 0x2D -#define ADP5585_RESET_CFG 0x2E -#define ADP5585_PWM_OFFT_LOW 0x2F -#define ADP5585_PWM_OFFT_HIGH 0x30 -#define ADP5585_PWM_ONT_LOW 0x31 -#define ADP5585_PWM_ONT_HIGH 0x32 -#define ADP5585_PWM_CFG 0x33 -#define ADP5585_LOGIC_CFG 0x34 -#define ADP5585_LOGIC_FF_CFG 0x35 -#define ADP5585_LOGIC_INT_EVENT_EN 0x36 -#define ADP5585_POLL_PTIME_CFG 0x37 -#define ADP5585_PIN_CONFIG_A 0x38 -#define ADP5585_PIN_CONFIG_B 0x39 -#define ADP5585_PIN_CONFIG_D 0x3A -#define ADP5585_GENERAL_CFG 0x3B -#define ADP5585_INT_EN 0x3C - -/* ID Register */ -#define ADP5589_5_DEVICE_ID_MASK 0xF -#define ADP5589_5_MAN_ID_MASK 0xF -#define ADP5589_5_MAN_ID_SHIFT 4 -#define ADP5589_5_MAN_ID 0x02 - -/* GENERAL_CFG Register */ -#define OSC_EN BIT(7) -#define CORE_CLK(x) (((x) & 0x3) << 5) -#define LCK_TRK_LOGIC BIT(4) /* ADP5589 only */ -#define LCK_TRK_GPI BIT(3) /* ADP5589 only */ -#define INT_CFG BIT(1) -#define RST_CFG BIT(0) - -/* INT_EN Register */ -#define LOGIC2_IEN BIT(5) /* ADP5589 only */ -#define LOGIC1_IEN BIT(4) -#define LOCK_IEN BIT(3) /* ADP5589 only */ -#define OVRFLOW_IEN BIT(2) -#define GPI_IEN BIT(1) -#define EVENT_IEN BIT(0) - -/* Interrupt Status Register */ -#define LOGIC2_INT BIT(5) /* ADP5589 only */ -#define LOGIC1_INT BIT(4) -#define LOCK_INT BIT(3) /* ADP5589 only */ -#define OVRFLOW_INT BIT(2) -#define GPI_INT BIT(1) -#define EVENT_INT BIT(0) - -/* STATUS Register */ -#define LOGIC2_STAT BIT(7) /* ADP5589 only */ -#define LOGIC1_STAT BIT(6) -#define LOCK_STAT BIT(5) /* ADP5589 only */ -#define KEC 0x1F - -/* PIN_CONFIG_D Register */ -#define C4_EXTEND_CFG BIT(6) /* RESET2 */ -#define R4_EXTEND_CFG BIT(5) /* RESET1 */ - -/* LOCK_CFG */ -#define LOCK_EN BIT(0) - -#define PTIME_MASK 0x3 -#define LTIME_MASK 0x3 /* ADP5589 only */ - -/* Key Event Register xy */ -#define KEY_EV_PRESSED BIT(7) -#define KEY_EV_MASK 0x7F - -#define KEYP_MAX_EVENT 16 -#define ADP5589_MAXGPIO 19 -#define ADP5585_MAXGPIO 11 /* 10 on the ADP5585-01, 11 on ADP5585-02 */ - -enum { - ADP5589, - ADP5585_01, - ADP5585_02 -}; - -struct adp_constants { - u8 maxgpio; - u8 keymapsize; - u8 gpi_pin_row_base; - u8 gpi_pin_row_end; - u8 gpi_pin_col_base; - u8 gpi_pin_base; - u8 gpi_pin_end; - u8 gpimapsize_max; - u8 max_row_num; - u8 max_col_num; - u8 row_mask; - u8 col_mask; - u8 col_shift; - u8 c4_extend_cfg; - u8 (*bank) (u8 offset); - u8 (*bit) (u8 offset); - u8 (*reg) (u8 reg); -}; - -struct adp5589_kpad { - struct i2c_client *client; - struct input_dev *input; - const struct adp_constants *var; - unsigned short keycode[ADP5589_KEYMAPSIZE]; - const struct adp5589_gpi_map *gpimap; - unsigned short gpimapsize; - unsigned extend_cfg; - bool is_adp5585; - bool support_row5; -#ifdef CONFIG_GPIOLIB - unsigned char gpiomap[ADP5589_MAXGPIO]; - struct gpio_chip gc; - struct mutex gpio_lock; /* Protect cached dir, dat_out */ - u8 dat_out[3]; - u8 dir[3]; -#endif -}; - -/* - * ADP5589 / ADP5585 derivative / variant handling - */ - - -/* ADP5589 */ - -static unsigned char adp5589_bank(unsigned char offset) -{ - return offset >> 3; -} - -static unsigned char adp5589_bit(unsigned char offset) -{ - return 1u << (offset & 0x7); -} - -static unsigned char adp5589_reg(unsigned char reg) -{ - return reg; -} - -static const struct adp_constants const_adp5589 = { - .maxgpio = ADP5589_MAXGPIO, - .keymapsize = ADP5589_KEYMAPSIZE, - .gpi_pin_row_base = ADP5589_GPI_PIN_ROW_BASE, - .gpi_pin_row_end = ADP5589_GPI_PIN_ROW_END, - .gpi_pin_col_base = ADP5589_GPI_PIN_COL_BASE, - .gpi_pin_base = ADP5589_GPI_PIN_BASE, - .gpi_pin_end = ADP5589_GPI_PIN_END, - .gpimapsize_max = ADP5589_GPIMAPSIZE_MAX, - .c4_extend_cfg = 12, - .max_row_num = ADP5589_MAX_ROW_NUM, - .max_col_num = ADP5589_MAX_COL_NUM, - .row_mask = ADP5589_ROW_MASK, - .col_mask = ADP5589_COL_MASK, - .col_shift = ADP5589_COL_SHIFT, - .bank = adp5589_bank, - .bit = adp5589_bit, - .reg = adp5589_reg, -}; - -/* ADP5585 */ - -static unsigned char adp5585_bank(unsigned char offset) -{ - return offset > ADP5585_MAX_ROW_NUM; -} - -static unsigned char adp5585_bit(unsigned char offset) -{ - return (offset > ADP5585_MAX_ROW_NUM) ? - 1u << (offset - ADP5585_COL_SHIFT) : 1u << offset; -} - -static const unsigned char adp5585_reg_lut[] = { - [ADP5589_GPI_STATUS_A] = ADP5585_GPI_STATUS_A, - [ADP5589_GPI_STATUS_B] = ADP5585_GPI_STATUS_B, - [ADP5589_RPULL_CONFIG_A] = ADP5585_RPULL_CONFIG_A, - [ADP5589_RPULL_CONFIG_B] = ADP5585_RPULL_CONFIG_B, - [ADP5589_RPULL_CONFIG_C] = ADP5585_RPULL_CONFIG_C, - [ADP5589_RPULL_CONFIG_D] = ADP5585_RPULL_CONFIG_D, - [ADP5589_GPI_INT_LEVEL_A] = ADP5585_GPI_INT_LEVEL_A, - [ADP5589_GPI_INT_LEVEL_B] = ADP5585_GPI_INT_LEVEL_B, - [ADP5589_GPI_EVENT_EN_A] = ADP5585_GPI_EVENT_EN_A, - [ADP5589_GPI_EVENT_EN_B] = ADP5585_GPI_EVENT_EN_B, - [ADP5589_GPI_INTERRUPT_EN_A] = ADP5585_GPI_INTERRUPT_EN_A, - [ADP5589_GPI_INTERRUPT_EN_B] = ADP5585_GPI_INTERRUPT_EN_B, - [ADP5589_DEBOUNCE_DIS_A] = ADP5585_DEBOUNCE_DIS_A, - [ADP5589_DEBOUNCE_DIS_B] = ADP5585_DEBOUNCE_DIS_B, - [ADP5589_GPO_DATA_OUT_A] = ADP5585_GPO_DATA_OUT_A, - [ADP5589_GPO_DATA_OUT_B] = ADP5585_GPO_DATA_OUT_B, - [ADP5589_GPO_OUT_MODE_A] = ADP5585_GPO_OUT_MODE_A, - [ADP5589_GPO_OUT_MODE_B] = ADP5585_GPO_OUT_MODE_B, - [ADP5589_GPIO_DIRECTION_A] = ADP5585_GPIO_DIRECTION_A, - [ADP5589_GPIO_DIRECTION_B] = ADP5585_GPIO_DIRECTION_B, - [ADP5589_RESET1_EVENT_A] = ADP5585_RESET1_EVENT_A, - [ADP5589_RESET1_EVENT_B] = ADP5585_RESET1_EVENT_B, - [ADP5589_RESET1_EVENT_C] = ADP5585_RESET1_EVENT_C, - [ADP5589_RESET2_EVENT_A] = ADP5585_RESET2_EVENT_A, - [ADP5589_RESET2_EVENT_B] = ADP5585_RESET2_EVENT_B, - [ADP5589_RESET_CFG] = ADP5585_RESET_CFG, - [ADP5589_PWM_OFFT_LOW] = ADP5585_PWM_OFFT_LOW, - [ADP5589_PWM_OFFT_HIGH] = ADP5585_PWM_OFFT_HIGH, - [ADP5589_PWM_ONT_LOW] = ADP5585_PWM_ONT_LOW, - [ADP5589_PWM_ONT_HIGH] = ADP5585_PWM_ONT_HIGH, - [ADP5589_PWM_CFG] = ADP5585_PWM_CFG, - [ADP5589_LOGIC_1_CFG] = ADP5585_LOGIC_CFG, - [ADP5589_LOGIC_FF_CFG] = ADP5585_LOGIC_FF_CFG, - [ADP5589_LOGIC_INT_EVENT_EN] = ADP5585_LOGIC_INT_EVENT_EN, - [ADP5589_POLL_PTIME_CFG] = ADP5585_POLL_PTIME_CFG, - [ADP5589_PIN_CONFIG_A] = ADP5585_PIN_CONFIG_A, - [ADP5589_PIN_CONFIG_B] = ADP5585_PIN_CONFIG_B, - [ADP5589_PIN_CONFIG_D] = ADP5585_PIN_CONFIG_D, - [ADP5589_GENERAL_CFG] = ADP5585_GENERAL_CFG, - [ADP5589_INT_EN] = ADP5585_INT_EN, -}; - -static unsigned char adp5585_reg(unsigned char reg) -{ - return adp5585_reg_lut[reg]; -} - -static const struct adp_constants const_adp5585 = { - .maxgpio = ADP5585_MAXGPIO, - .keymapsize = ADP5585_KEYMAPSIZE, - .gpi_pin_row_base = ADP5585_GPI_PIN_ROW_BASE, - .gpi_pin_row_end = ADP5585_GPI_PIN_ROW_END, - .gpi_pin_col_base = ADP5585_GPI_PIN_COL_BASE, - .gpi_pin_base = ADP5585_GPI_PIN_BASE, - .gpi_pin_end = ADP5585_GPI_PIN_END, - .gpimapsize_max = ADP5585_GPIMAPSIZE_MAX, - .c4_extend_cfg = 10, - .max_row_num = ADP5585_MAX_ROW_NUM, - .max_col_num = ADP5585_MAX_COL_NUM, - .row_mask = ADP5585_ROW_MASK, - .col_mask = ADP5585_COL_MASK, - .col_shift = ADP5585_COL_SHIFT, - .bank = adp5585_bank, - .bit = adp5585_bit, - .reg = adp5585_reg, -}; - -static int adp5589_read(struct i2c_client *client, u8 reg) -{ - int ret = i2c_smbus_read_byte_data(client, reg); - - if (ret < 0) - dev_err(&client->dev, "Read Error\n"); - - return ret; -} - -static int adp5589_write(struct i2c_client *client, u8 reg, u8 val) -{ - return i2c_smbus_write_byte_data(client, reg, val); -} - -#ifdef CONFIG_GPIOLIB -static int adp5589_gpio_get_value(struct gpio_chip *chip, unsigned off) -{ - struct adp5589_kpad *kpad = gpiochip_get_data(chip); - unsigned int bank = kpad->var->bank(kpad->gpiomap[off]); - unsigned int bit = kpad->var->bit(kpad->gpiomap[off]); - int val; - - mutex_lock(&kpad->gpio_lock); - if (kpad->dir[bank] & bit) - val = kpad->dat_out[bank]; - else - val = adp5589_read(kpad->client, - kpad->var->reg(ADP5589_GPI_STATUS_A) + bank); - mutex_unlock(&kpad->gpio_lock); - - return !!(val & bit); -} - -static void adp5589_gpio_set_value(struct gpio_chip *chip, - unsigned off, int val) -{ - struct adp5589_kpad *kpad = gpiochip_get_data(chip); - unsigned int bank = kpad->var->bank(kpad->gpiomap[off]); - unsigned int bit = kpad->var->bit(kpad->gpiomap[off]); - - guard(mutex)(&kpad->gpio_lock); - - if (val) - kpad->dat_out[bank] |= bit; - else - kpad->dat_out[bank] &= ~bit; - - adp5589_write(kpad->client, kpad->var->reg(ADP5589_GPO_DATA_OUT_A) + - bank, kpad->dat_out[bank]); -} - -static int adp5589_gpio_direction_input(struct gpio_chip *chip, unsigned off) -{ - struct adp5589_kpad *kpad = gpiochip_get_data(chip); - unsigned int bank = kpad->var->bank(kpad->gpiomap[off]); - unsigned int bit = kpad->var->bit(kpad->gpiomap[off]); - - guard(mutex)(&kpad->gpio_lock); - - kpad->dir[bank] &= ~bit; - return adp5589_write(kpad->client, - kpad->var->reg(ADP5589_GPIO_DIRECTION_A) + bank, - kpad->dir[bank]); -} - -static int adp5589_gpio_direction_output(struct gpio_chip *chip, - unsigned off, int val) -{ - struct adp5589_kpad *kpad = gpiochip_get_data(chip); - unsigned int bank = kpad->var->bank(kpad->gpiomap[off]); - unsigned int bit = kpad->var->bit(kpad->gpiomap[off]); - int error; - - guard(mutex)(&kpad->gpio_lock); - - kpad->dir[bank] |= bit; - - if (val) - kpad->dat_out[bank] |= bit; - else - kpad->dat_out[bank] &= ~bit; - - error = adp5589_write(kpad->client, - kpad->var->reg(ADP5589_GPO_DATA_OUT_A) + bank, - kpad->dat_out[bank]); - if (error) - return error; - - error = adp5589_write(kpad->client, - kpad->var->reg(ADP5589_GPIO_DIRECTION_A) + bank, - kpad->dir[bank]); - if (error) - return error; - - return 0; -} - -static int adp5589_build_gpiomap(struct adp5589_kpad *kpad, - const struct adp5589_kpad_platform_data *pdata) -{ - bool pin_used[ADP5589_MAXGPIO]; - int n_unused = 0; - int i; - - memset(pin_used, false, sizeof(pin_used)); - - for (i = 0; i < kpad->var->maxgpio; i++) - if (pdata->keypad_en_mask & BIT(i)) - pin_used[i] = true; - - for (i = 0; i < kpad->gpimapsize; i++) - pin_used[kpad->gpimap[i].pin - kpad->var->gpi_pin_base] = true; - - if (kpad->extend_cfg & R4_EXTEND_CFG) - pin_used[4] = true; - - if (kpad->extend_cfg & C4_EXTEND_CFG) - pin_used[kpad->var->c4_extend_cfg] = true; - - if (!kpad->support_row5) - pin_used[5] = true; - - for (i = 0; i < kpad->var->maxgpio; i++) - if (!pin_used[i]) - kpad->gpiomap[n_unused++] = i; - - return n_unused; -} - -static int adp5589_gpio_add(struct adp5589_kpad *kpad) -{ - struct device *dev = &kpad->client->dev; - const struct adp5589_kpad_platform_data *pdata = dev_get_platdata(dev); - const struct adp5589_gpio_platform_data *gpio_data = pdata->gpio_data; - int i, error; - - if (!gpio_data) - return 0; - - kpad->gc.parent = dev; - kpad->gc.ngpio = adp5589_build_gpiomap(kpad, pdata); - if (kpad->gc.ngpio == 0) { - dev_info(dev, "No unused gpios left to export\n"); - return 0; - } - - kpad->gc.direction_input = adp5589_gpio_direction_input; - kpad->gc.direction_output = adp5589_gpio_direction_output; - kpad->gc.get = adp5589_gpio_get_value; - kpad->gc.set = adp5589_gpio_set_value; - kpad->gc.can_sleep = 1; - - kpad->gc.base = gpio_data->gpio_start; - kpad->gc.label = kpad->client->name; - kpad->gc.owner = THIS_MODULE; - - mutex_init(&kpad->gpio_lock); - - error = devm_gpiochip_add_data(dev, &kpad->gc, kpad); - if (error) - return error; - - for (i = 0; i <= kpad->var->bank(kpad->var->maxgpio); i++) { - kpad->dat_out[i] = adp5589_read(kpad->client, kpad->var->reg( - ADP5589_GPO_DATA_OUT_A) + i); - kpad->dir[i] = adp5589_read(kpad->client, kpad->var->reg( - ADP5589_GPIO_DIRECTION_A) + i); - } - - return 0; -} -#else -static inline int adp5589_gpio_add(struct adp5589_kpad *kpad) -{ - return 0; -} -#endif - -static void adp5589_report_switches(struct adp5589_kpad *kpad, - int key, int key_val) -{ - int i; - - for (i = 0; i < kpad->gpimapsize; i++) { - if (key_val == kpad->gpimap[i].pin) { - input_report_switch(kpad->input, - kpad->gpimap[i].sw_evt, - key & KEY_EV_PRESSED); - break; - } - } -} - -static void adp5589_report_events(struct adp5589_kpad *kpad, int ev_cnt) -{ - int i; - - for (i = 0; i < ev_cnt; i++) { - int key = adp5589_read(kpad->client, ADP5589_5_FIFO_1 + i); - int key_val = key & KEY_EV_MASK; - - if (key_val >= kpad->var->gpi_pin_base && - key_val <= kpad->var->gpi_pin_end) { - adp5589_report_switches(kpad, key, key_val); - } else { - input_report_key(kpad->input, - kpad->keycode[key_val - 1], - key & KEY_EV_PRESSED); - } - } -} - -static irqreturn_t adp5589_irq(int irq, void *handle) -{ - struct adp5589_kpad *kpad = handle; - struct i2c_client *client = kpad->client; - int status, ev_cnt; - - status = adp5589_read(client, ADP5589_5_INT_STATUS); - - if (status & OVRFLOW_INT) /* Unlikely and should never happen */ - dev_err(&client->dev, "Event Overflow Error\n"); - - if (status & EVENT_INT) { - ev_cnt = adp5589_read(client, ADP5589_5_STATUS) & KEC; - if (ev_cnt) { - adp5589_report_events(kpad, ev_cnt); - input_sync(kpad->input); - } - } - - adp5589_write(client, ADP5589_5_INT_STATUS, status); /* Status is W1C */ - - return IRQ_HANDLED; -} - -static int adp5589_get_evcode(struct adp5589_kpad *kpad, unsigned short key) -{ - int i; - - for (i = 0; i < kpad->var->keymapsize; i++) - if (key == kpad->keycode[i]) - return (i + 1) | KEY_EV_PRESSED; - - dev_err(&kpad->client->dev, "RESET/UNLOCK key not in keycode map\n"); - - return -EINVAL; -} - -static int adp5589_setup(struct adp5589_kpad *kpad) -{ - struct i2c_client *client = kpad->client; - const struct adp5589_kpad_platform_data *pdata = - dev_get_platdata(&client->dev); - u8 (*reg) (u8) = kpad->var->reg; - unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0; - unsigned char pull_mask = 0; - int i, ret; - - ret = adp5589_write(client, reg(ADP5589_PIN_CONFIG_A), - pdata->keypad_en_mask & kpad->var->row_mask); - ret |= adp5589_write(client, reg(ADP5589_PIN_CONFIG_B), - (pdata->keypad_en_mask >> kpad->var->col_shift) & - kpad->var->col_mask); - - if (!kpad->is_adp5585) - ret |= adp5589_write(client, ADP5589_PIN_CONFIG_C, - (pdata->keypad_en_mask >> 16) & 0xFF); - - if (!kpad->is_adp5585 && pdata->en_keylock) { - ret |= adp5589_write(client, ADP5589_UNLOCK1, - pdata->unlock_key1); - ret |= adp5589_write(client, ADP5589_UNLOCK2, - pdata->unlock_key2); - ret |= adp5589_write(client, ADP5589_UNLOCK_TIMERS, - pdata->unlock_timer & LTIME_MASK); - ret |= adp5589_write(client, ADP5589_LOCK_CFG, LOCK_EN); - } - - for (i = 0; i < KEYP_MAX_EVENT; i++) - ret |= adp5589_read(client, ADP5589_5_FIFO_1 + i); - - for (i = 0; i < pdata->gpimapsize; i++) { - unsigned short pin = pdata->gpimap[i].pin; - - if (pin <= kpad->var->gpi_pin_row_end) { - evt_mode1 |= BIT(pin - kpad->var->gpi_pin_row_base); - } else { - evt_mode2 |= - BIT(pin - kpad->var->gpi_pin_col_base) & 0xFF; - if (!kpad->is_adp5585) - evt_mode3 |= - BIT(pin - kpad->var->gpi_pin_col_base) >> 8; - } - } - - if (pdata->gpimapsize) { - ret |= adp5589_write(client, reg(ADP5589_GPI_EVENT_EN_A), - evt_mode1); - ret |= adp5589_write(client, reg(ADP5589_GPI_EVENT_EN_B), - evt_mode2); - if (!kpad->is_adp5585) - ret |= adp5589_write(client, - reg(ADP5589_GPI_EVENT_EN_C), - evt_mode3); - } - - if (pdata->pull_dis_mask & pdata->pullup_en_100k & - pdata->pullup_en_300k & pdata->pulldown_en_300k) - dev_warn(&client->dev, "Conflicting pull resistor config\n"); - - for (i = 0; i <= kpad->var->max_row_num; i++) { - unsigned int val = 0, bit = BIT(i); - if (pdata->pullup_en_300k & bit) - val = 0; - else if (pdata->pulldown_en_300k & bit) - val = 1; - else if (pdata->pullup_en_100k & bit) - val = 2; - else if (pdata->pull_dis_mask & bit) - val = 3; - - pull_mask |= val << (2 * (i & 0x3)); - - if (i % 4 == 3 || i == kpad->var->max_row_num) { - ret |= adp5589_write(client, reg(ADP5585_RPULL_CONFIG_A) - + (i >> 2), pull_mask); - pull_mask = 0; - } - } - - for (i = 0; i <= kpad->var->max_col_num; i++) { - unsigned int val = 0, bit = BIT(i + kpad->var->col_shift); - if (pdata->pullup_en_300k & bit) - val = 0; - else if (pdata->pulldown_en_300k & bit) - val = 1; - else if (pdata->pullup_en_100k & bit) - val = 2; - else if (pdata->pull_dis_mask & bit) - val = 3; - - pull_mask |= val << (2 * (i & 0x3)); - - if (i % 4 == 3 || i == kpad->var->max_col_num) { - ret |= adp5589_write(client, - reg(ADP5585_RPULL_CONFIG_C) + - (i >> 2), pull_mask); - pull_mask = 0; - } - } - - if (pdata->reset1_key_1 && pdata->reset1_key_2 && pdata->reset1_key_3) { - ret |= adp5589_write(client, reg(ADP5589_RESET1_EVENT_A), - adp5589_get_evcode(kpad, - pdata->reset1_key_1)); - ret |= adp5589_write(client, reg(ADP5589_RESET1_EVENT_B), - adp5589_get_evcode(kpad, - pdata->reset1_key_2)); - ret |= adp5589_write(client, reg(ADP5589_RESET1_EVENT_C), - adp5589_get_evcode(kpad, - pdata->reset1_key_3)); - kpad->extend_cfg |= R4_EXTEND_CFG; - } - - if (pdata->reset2_key_1 && pdata->reset2_key_2) { - ret |= adp5589_write(client, reg(ADP5589_RESET2_EVENT_A), - adp5589_get_evcode(kpad, - pdata->reset2_key_1)); - ret |= adp5589_write(client, reg(ADP5589_RESET2_EVENT_B), - adp5589_get_evcode(kpad, - pdata->reset2_key_2)); - kpad->extend_cfg |= C4_EXTEND_CFG; - } - - if (kpad->extend_cfg) { - ret |= adp5589_write(client, reg(ADP5589_RESET_CFG), - pdata->reset_cfg); - ret |= adp5589_write(client, reg(ADP5589_PIN_CONFIG_D), - kpad->extend_cfg); - } - - ret |= adp5589_write(client, reg(ADP5589_DEBOUNCE_DIS_A), - pdata->debounce_dis_mask & kpad->var->row_mask); - - ret |= adp5589_write(client, reg(ADP5589_DEBOUNCE_DIS_B), - (pdata->debounce_dis_mask >> kpad->var->col_shift) - & kpad->var->col_mask); - - if (!kpad->is_adp5585) - ret |= adp5589_write(client, reg(ADP5589_DEBOUNCE_DIS_C), - (pdata->debounce_dis_mask >> 16) & 0xFF); - - ret |= adp5589_write(client, reg(ADP5589_POLL_PTIME_CFG), - pdata->scan_cycle_time & PTIME_MASK); - ret |= adp5589_write(client, ADP5589_5_INT_STATUS, - (kpad->is_adp5585 ? 0 : LOGIC2_INT) | - LOGIC1_INT | OVRFLOW_INT | - (kpad->is_adp5585 ? 0 : LOCK_INT) | - GPI_INT | EVENT_INT); /* Status is W1C */ - - ret |= adp5589_write(client, reg(ADP5589_GENERAL_CFG), - INT_CFG | OSC_EN | CORE_CLK(3)); - ret |= adp5589_write(client, reg(ADP5589_INT_EN), - OVRFLOW_IEN | GPI_IEN | EVENT_IEN); - - if (ret < 0) { - dev_err(&client->dev, "Write Error\n"); - return ret; - } - - return 0; -} - -static void adp5589_report_switch_state(struct adp5589_kpad *kpad) -{ - int gpi_stat_tmp, pin_loc; - int i; - int gpi_stat1 = adp5589_read(kpad->client, - kpad->var->reg(ADP5589_GPI_STATUS_A)); - int gpi_stat2 = adp5589_read(kpad->client, - kpad->var->reg(ADP5589_GPI_STATUS_B)); - int gpi_stat3 = !kpad->is_adp5585 ? - adp5589_read(kpad->client, ADP5589_GPI_STATUS_C) : 0; - - for (i = 0; i < kpad->gpimapsize; i++) { - unsigned short pin = kpad->gpimap[i].pin; - - if (pin <= kpad->var->gpi_pin_row_end) { - gpi_stat_tmp = gpi_stat1; - pin_loc = pin - kpad->var->gpi_pin_row_base; - } else if ((pin - kpad->var->gpi_pin_col_base) < 8) { - gpi_stat_tmp = gpi_stat2; - pin_loc = pin - kpad->var->gpi_pin_col_base; - } else { - gpi_stat_tmp = gpi_stat3; - pin_loc = pin - kpad->var->gpi_pin_col_base - 8; - } - - if (gpi_stat_tmp < 0) { - dev_err(&kpad->client->dev, - "Can't read GPIO_DAT_STAT switch %d, default to OFF\n", - pin); - gpi_stat_tmp = 0; - } - - input_report_switch(kpad->input, - kpad->gpimap[i].sw_evt, - !(gpi_stat_tmp & BIT(pin_loc))); - } - - input_sync(kpad->input); -} - -static int adp5589_keypad_add(struct adp5589_kpad *kpad, unsigned int revid) -{ - struct i2c_client *client = kpad->client; - const struct adp5589_kpad_platform_data *pdata = - dev_get_platdata(&client->dev); - struct input_dev *input; - unsigned int i; - int error; - - if (!((pdata->keypad_en_mask & kpad->var->row_mask) && - (pdata->keypad_en_mask >> kpad->var->col_shift)) || - !pdata->keymap) { - dev_err(&client->dev, "no rows, cols or keymap from pdata\n"); - return -EINVAL; - } - - if (pdata->keymapsize != kpad->var->keymapsize) { - dev_err(&client->dev, "invalid keymapsize\n"); - return -EINVAL; - } - - if (!pdata->gpimap && pdata->gpimapsize) { - dev_err(&client->dev, "invalid gpimap from pdata\n"); - return -EINVAL; - } - - if (pdata->gpimapsize > kpad->var->gpimapsize_max) { - dev_err(&client->dev, "invalid gpimapsize\n"); - return -EINVAL; - } - - for (i = 0; i < pdata->gpimapsize; i++) { - unsigned short pin = pdata->gpimap[i].pin; - - if (pin < kpad->var->gpi_pin_base || - pin > kpad->var->gpi_pin_end) { - dev_err(&client->dev, "invalid gpi pin data\n"); - return -EINVAL; - } - - if (BIT(pin - kpad->var->gpi_pin_row_base) & - pdata->keypad_en_mask) { - dev_err(&client->dev, "invalid gpi row/col data\n"); - return -EINVAL; - } - } - - if (!client->irq) { - dev_err(&client->dev, "no IRQ?\n"); - return -EINVAL; - } - - input = devm_input_allocate_device(&client->dev); - if (!input) - return -ENOMEM; - - kpad->input = input; - - input->name = client->name; - input->phys = "adp5589-keys/input0"; - input->dev.parent = &client->dev; - - input_set_drvdata(input, kpad); - - input->id.bustype = BUS_I2C; - input->id.vendor = 0x0001; - input->id.product = 0x0001; - input->id.version = revid; - - input->keycodesize = sizeof(kpad->keycode[0]); - input->keycodemax = pdata->keymapsize; - input->keycode = kpad->keycode; - - memcpy(kpad->keycode, pdata->keymap, - pdata->keymapsize * input->keycodesize); - - kpad->gpimap = pdata->gpimap; - kpad->gpimapsize = pdata->gpimapsize; - - /* setup input device */ - __set_bit(EV_KEY, input->evbit); - - if (pdata->repeat) - __set_bit(EV_REP, input->evbit); - - for (i = 0; i < input->keycodemax; i++) - if (kpad->keycode[i] <= KEY_MAX) - __set_bit(kpad->keycode[i], input->keybit); - __clear_bit(KEY_RESERVED, input->keybit); - - if (kpad->gpimapsize) - __set_bit(EV_SW, input->evbit); - for (i = 0; i < kpad->gpimapsize; i++) - __set_bit(kpad->gpimap[i].sw_evt, input->swbit); - - error = input_register_device(input); - if (error) { - dev_err(&client->dev, "unable to register input device\n"); - return error; - } - - error = devm_request_threaded_irq(&client->dev, client->irq, - NULL, adp5589_irq, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - client->dev.driver->name, kpad); - if (error) { - dev_err(&client->dev, "unable to request irq %d\n", client->irq); - return error; - } - - return 0; -} - -static void adp5589_clear_config(void *data) -{ - struct adp5589_kpad *kpad = data; - - adp5589_write(kpad->client, kpad->var->reg(ADP5589_GENERAL_CFG), 0); -} - -static int adp5589_probe(struct i2c_client *client) -{ - const struct i2c_device_id *id = i2c_client_get_device_id(client); - struct adp5589_kpad *kpad; - const struct adp5589_kpad_platform_data *pdata = - dev_get_platdata(&client->dev); - unsigned int revid; - int error, ret; - - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_BYTE_DATA)) { - dev_err(&client->dev, "SMBUS Byte Data not Supported\n"); - return -EIO; - } - - if (!pdata) { - dev_err(&client->dev, "no platform data?\n"); - return -EINVAL; - } - - kpad = devm_kzalloc(&client->dev, sizeof(*kpad), GFP_KERNEL); - if (!kpad) - return -ENOMEM; - - kpad->client = client; - - switch (id->driver_data) { - case ADP5585_02: - kpad->support_row5 = true; - fallthrough; - case ADP5585_01: - kpad->is_adp5585 = true; - kpad->var = &const_adp5585; - break; - case ADP5589: - kpad->support_row5 = true; - kpad->var = &const_adp5589; - break; - } - - error = devm_add_action_or_reset(&client->dev, adp5589_clear_config, - kpad); - if (error) - return error; - - ret = adp5589_read(client, ADP5589_5_ID); - if (ret < 0) - return ret; - - revid = (u8) ret & ADP5589_5_DEVICE_ID_MASK; - - if (pdata->keymapsize) { - error = adp5589_keypad_add(kpad, revid); - if (error) - return error; - } - - error = adp5589_setup(kpad); - if (error) - return error; - - if (kpad->gpimapsize) - adp5589_report_switch_state(kpad); - - error = adp5589_gpio_add(kpad); - if (error) - return error; - - dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq); - return 0; -} - -static int adp5589_suspend(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - struct adp5589_kpad *kpad = i2c_get_clientdata(client); - - if (kpad->input) - disable_irq(client->irq); - - return 0; -} - -static int adp5589_resume(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - struct adp5589_kpad *kpad = i2c_get_clientdata(client); - - if (kpad->input) - enable_irq(client->irq); - - return 0; -} - -static DEFINE_SIMPLE_DEV_PM_OPS(adp5589_dev_pm_ops, adp5589_suspend, adp5589_resume); - -static const struct i2c_device_id adp5589_id[] = { - {"adp5589-keys", ADP5589}, - {"adp5585-keys", ADP5585_01}, - {"adp5585-02-keys", ADP5585_02}, /* Adds ROW5 to ADP5585 */ - {} -}; - -MODULE_DEVICE_TABLE(i2c, adp5589_id); - -static struct i2c_driver adp5589_driver = { - .driver = { - .name = KBUILD_MODNAME, - .pm = pm_sleep_ptr(&adp5589_dev_pm_ops), - }, - .probe = adp5589_probe, - .id_table = adp5589_id, -}; - -module_i2c_driver(adp5589_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); -MODULE_DESCRIPTION("ADP5589/ADP5585 Keypad driver"); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 2ea490b9d370..1492c8552255 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -168,14 +168,14 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, { const char *err; struct cache_sb_disk *s; - struct page *page; + struct folio *folio; unsigned int i; - page = read_cache_page_gfp(bdev->bd_mapping, - SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL); - if (IS_ERR(page)) + folio = mapping_read_folio_gfp(bdev->bd_mapping, + SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL); + if (IS_ERR(folio)) return "IO error"; - s = page_address(page) + offset_in_page(SB_OFFSET); + s = folio_address(folio) + offset_in_folio(folio, SB_OFFSET); sb->offset = le64_to_cpu(s->offset); sb->version = le64_to_cpu(s->version); @@ -272,7 +272,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, *res = s; return NULL; err: - put_page(page); + folio_put(folio); return err; } @@ -1366,7 +1366,7 @@ static CLOSURE_CALLBACK(cached_dev_free) mutex_unlock(&bch_register_lock); if (dc->sb_disk) - put_page(virt_to_page(dc->sb_disk)); + folio_put(virt_to_folio(dc->sb_disk)); if (dc->bdev_file) fput(dc->bdev_file); @@ -2216,7 +2216,7 @@ void bch_cache_release(struct kobject *kobj) free_fifo(&ca->free[i]); if (ca->sb_disk) - put_page(virt_to_page(ca->sb_disk)); + folio_put(virt_to_folio(ca->sb_disk)); if (ca->bdev_file) fput(ca->bdev_file); @@ -2593,7 +2593,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (!holder) { ret = -ENOMEM; err = "cannot allocate memory"; - goto out_put_sb_page; + goto out_put_sb_folio; } /* Now reopen in exclusive mode with proper holder */ @@ -2667,8 +2667,8 @@ async_done: out_free_holder: kfree(holder); -out_put_sb_page: - put_page(virt_to_page(sb_disk)); +out_put_sb_folio: + folio_put(virt_to_folio(sb_disk)); out_blkdev_put: if (bdev_file) fput(bdev_file); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 17157c4216a5..5ef43231fe77 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -253,17 +253,35 @@ MODULE_PARM_DESC(max_read_size, "Maximum size of a read request"); static unsigned int max_write_size = 0; module_param(max_write_size, uint, 0644); MODULE_PARM_DESC(max_write_size, "Maximum size of a write request"); -static unsigned get_max_request_size(struct crypt_config *cc, bool wrt) + +static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio) { + struct crypt_config *cc = ti->private; unsigned val, sector_align; - val = !wrt ? READ_ONCE(max_read_size) : READ_ONCE(max_write_size); - if (likely(!val)) - val = !wrt ? DM_CRYPT_DEFAULT_MAX_READ_SIZE : DM_CRYPT_DEFAULT_MAX_WRITE_SIZE; - if (wrt || cc->used_tag_size) { - if (unlikely(val > BIO_MAX_VECS << PAGE_SHIFT)) - val = BIO_MAX_VECS << PAGE_SHIFT; - } - sector_align = max(bdev_logical_block_size(cc->dev->bdev), (unsigned)cc->sector_size); + bool wrt = op_is_write(bio_op(bio)); + + if (wrt) { + /* + * For zoned devices, splitting write operations creates the + * risk of deadlocking queue freeze operations with zone write + * plugging BIO work when the reminder of a split BIO is + * issued. So always allow the entire BIO to proceed. + */ + if (ti->emulate_zone_append) + return bio_sectors(bio); + + val = min_not_zero(READ_ONCE(max_write_size), + DM_CRYPT_DEFAULT_MAX_WRITE_SIZE); + } else { + val = min_not_zero(READ_ONCE(max_read_size), + DM_CRYPT_DEFAULT_MAX_READ_SIZE); + } + + if (wrt || cc->used_tag_size) + val = min(val, BIO_MAX_VECS << PAGE_SHIFT); + + sector_align = max(bdev_logical_block_size(cc->dev->bdev), + (unsigned)cc->sector_size); val = round_down(val, sector_align); if (unlikely(!val)) val = sector_align; @@ -1192,11 +1210,11 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) return -EINVAL; } - if (bi->tuple_size < cc->used_tag_size) { + if (bi->metadata_size < cc->used_tag_size) { ti->error = "Integrity profile tag size mismatch."; return -EINVAL; } - cc->tuple_size = bi->tuple_size; + cc->tuple_size = bi->metadata_size; if (1 << bi->interval_exp != cc->sector_size) { ti->error = "Integrity profile sector size mismatch."; return -EINVAL; @@ -3496,7 +3514,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) /* * Check if bio is too large, split as needed. */ - max_sectors = get_max_request_size(cc, bio_data_dir(bio) == WRITE); + max_sectors = get_max_request_sectors(ti, bio); if (unlikely(bio_sectors(bio) > max_sectors)) dm_accept_partial_bio(bio, max_sectors); @@ -3733,6 +3751,17 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) max_t(unsigned int, limits->physical_block_size, cc->sector_size); limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size); limits->dma_alignment = limits->logical_block_size - 1; + + /* + * For zoned dm-crypt targets, there will be no internal splitting of + * write BIOs to avoid exceeding BIO_MAX_VECS vectors per BIO. But + * without respecting this limit, crypt_alloc_buffer() will trigger a + * BUG(). Avoid this by forcing DM core to split write BIOs to this + * limit. + */ + if (ti->emulate_zone_append) + limits->max_hw_sectors = min(limits->max_hw_sectors, + BIO_MAX_VECS << PAGE_SECTORS_SHIFT); } static struct target_type crypt_target = { diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 4395657fa583..efeee0a873c0 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -3906,8 +3906,8 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim struct blk_integrity *bi = &limits->integrity; memset(bi, 0, sizeof(*bi)); - bi->tuple_size = ic->tag_size; - bi->tag_size = bi->tuple_size; + bi->metadata_size = ic->tag_size; + bi->tag_size = bi->metadata_size; bi->interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT; } @@ -4746,18 +4746,18 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv ti->error = "Integrity profile not supported"; goto bad; } - /*printk("tag_size: %u, tuple_size: %u\n", bi->tag_size, bi->tuple_size);*/ - if (bi->tuple_size < ic->tag_size) { + /*printk("tag_size: %u, metadata_size: %u\n", bi->tag_size, bi->metadata_size);*/ + if (bi->metadata_size < ic->tag_size) { r = -EINVAL; ti->error = "The integrity profile is smaller than tag size"; goto bad; } - if ((unsigned long)bi->tuple_size > PAGE_SIZE / 2) { + if ((unsigned long)bi->metadata_size > PAGE_SIZE / 2) { r = -EINVAL; ti->error = "Too big tuple size"; goto bad; } - ic->tuple_size = bi->tuple_size; + ic->tuple_size = bi->metadata_size; if (1 << bi->interval_exp != ic->sectors_per_block << SECTOR_SHIFT) { r = -EINVAL; ti->error = "Integrity profile sector size mismatch"; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index a7dc04bd55e5..5bbbdf8fc1bd 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -458,6 +458,7 @@ static void stripe_io_hints(struct dm_target *ti, struct stripe_c *sc = ti->private; unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT; + limits->chunk_sectors = sc->chunk_size; limits->io_min = chunk_size; limits->io_opt = chunk_size * sc->stripes; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 24a857ff6d0b..d9d5e6aa5707 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -2065,8 +2065,10 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, limits->discard_alignment = 0; } - if (!dm_table_supports_write_zeroes(t)) + if (!dm_table_supports_write_zeroes(t)) { limits->max_write_zeroes_sectors = 0; + limits->max_hw_wzeroes_unmap_sectors = 0; + } if (!dm_table_supports_secure_erase(t)) limits->max_secure_erase_sectors = 0; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1726f0f828cc..abfe0392b5a4 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1293,8 +1293,9 @@ out: /* * A target may call dm_accept_partial_bio only from the map routine. It is * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management - * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by - * __send_duplicate_bios(). + * operations, zone append writes (native with REQ_OP_ZONE_APPEND or emulated + * with write BIOs flagged with BIO_EMULATES_ZONE_APPEND) and any bio serviced + * by __send_duplicate_bios(). * * dm_accept_partial_bio informs the dm that the target only wants to process * additional n_sectors sectors of the bio and the rest of the data should be @@ -1327,11 +1328,19 @@ void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors) unsigned int bio_sectors = bio_sectors(bio); BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); - BUG_ON(op_is_zone_mgmt(bio_op(bio))); - BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); BUG_ON(bio_sectors > *tio->len_ptr); BUG_ON(n_sectors > bio_sectors); + if (static_branch_unlikely(&zoned_enabled) && + unlikely(bdev_is_zoned(bio->bi_bdev))) { + enum req_op op = bio_op(bio); + + BUG_ON(op_is_zone_mgmt(op)); + BUG_ON(op == REQ_OP_WRITE); + BUG_ON(op == REQ_OP_WRITE_ZEROES); + BUG_ON(op == REQ_OP_ZONE_APPEND); + } + *tio->len_ptr -= bio_sectors - n_sectors; bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; @@ -1776,19 +1785,35 @@ static void init_clone_info(struct clone_info *ci, struct dm_io *io, } #ifdef CONFIG_BLK_DEV_ZONED -static inline bool dm_zone_bio_needs_split(struct mapped_device *md, - struct bio *bio) +static inline bool dm_zone_bio_needs_split(struct bio *bio) { /* - * For mapped device that need zone append emulation, we must - * split any large BIO that straddles zone boundaries. + * Special case the zone operations that cannot or should not be split. */ - return dm_emulate_zone_append(md) && bio_straddles_zones(bio) && - !bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING); + switch (bio_op(bio)) { + case REQ_OP_ZONE_APPEND: + case REQ_OP_ZONE_FINISH: + case REQ_OP_ZONE_RESET: + case REQ_OP_ZONE_RESET_ALL: + return false; + default: + break; + } + + /* + * When mapped devices use the block layer zone write plugging, we must + * split any large BIO to the mapped device limits to not submit BIOs + * that span zone boundaries and to avoid potential deadlocks with + * queue freeze operations. + */ + return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio); } + static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) { - return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0); + if (!bio_needs_zone_write_plugging(bio)) + return false; + return blk_zone_plug_bio(bio, 0); } static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci, @@ -1904,8 +1929,7 @@ static blk_status_t __send_zone_reset_all(struct clone_info *ci) } #else -static inline bool dm_zone_bio_needs_split(struct mapped_device *md, - struct bio *bio) +static inline bool dm_zone_bio_needs_split(struct bio *bio) { return false; } @@ -1932,9 +1956,7 @@ static void dm_split_and_process_bio(struct mapped_device *md, is_abnormal = is_abnormal_io(bio); if (static_branch_unlikely(&zoned_enabled)) { - /* Special case REQ_OP_ZONE_RESET_ALL as it cannot be split. */ - need_split = (bio_op(bio) != REQ_OP_ZONE_RESET_ALL) && - (is_abnormal || dm_zone_bio_needs_split(md, bio)); + need_split = is_abnormal || dm_zone_bio_needs_split(bio); } else { need_split = is_abnormal; } diff --git a/drivers/md/md.c b/drivers/md/md.c index 0f03b21e66e4..046fe85c76fe 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -636,9 +636,6 @@ static void __mddev_put(struct mddev *mddev) mddev->ctime || mddev->hold_active) return; - /* Array is not configured at all, and not held active, so destroy it */ - set_bit(MD_DELETED, &mddev->flags); - /* * Call queue_work inside the spinlock so that flush_workqueue() after * mddev_find will succeed in waiting for the work to be done. @@ -873,6 +870,16 @@ void mddev_unlock(struct mddev *mddev) kobject_del(&rdev->kobj); export_rdev(rdev, mddev); } + + /* Call del_gendisk after release reconfig_mutex to avoid + * deadlock (e.g. call del_gendisk under the lock and an + * access to sysfs files waits the lock) + * And MD_DELETED is only used for md raid which is set in + * do_md_stop. dm raid only uses md_stop to stop. So dm raid + * doesn't need to check MD_DELETED when getting reconfig lock + */ + if (test_bit(MD_DELETED, &mddev->flags)) + del_gendisk(mddev->gendisk); } EXPORT_SYMBOL_GPL(mddev_unlock); @@ -5774,19 +5781,30 @@ md_attr_store(struct kobject *kobj, struct attribute *attr, struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); struct mddev *mddev = container_of(kobj, struct mddev, kobj); ssize_t rv; + struct kernfs_node *kn = NULL; if (!entry->store) return -EIO; if (!capable(CAP_SYS_ADMIN)) return -EACCES; + + if (entry->store == array_state_store && cmd_match(page, "clear")) + kn = sysfs_break_active_protection(kobj, attr); + spin_lock(&all_mddevs_lock); if (!mddev_get(mddev)) { spin_unlock(&all_mddevs_lock); + if (kn) + sysfs_unbreak_active_protection(kn); return -EBUSY; } spin_unlock(&all_mddevs_lock); rv = entry->store(mddev, page, length); mddev_put(mddev); + + if (kn) + sysfs_unbreak_active_protection(kn); + return rv; } @@ -5794,12 +5812,6 @@ static void md_kobj_release(struct kobject *ko) { struct mddev *mddev = container_of(ko, struct mddev, kobj); - if (mddev->sysfs_state) - sysfs_put(mddev->sysfs_state); - if (mddev->sysfs_level) - sysfs_put(mddev->sysfs_level); - - del_gendisk(mddev->gendisk); put_disk(mddev->gendisk); } @@ -6413,15 +6425,10 @@ static void md_clean(struct mddev *mddev) mddev->persistent = 0; mddev->level = LEVEL_NONE; mddev->clevel[0] = 0; - /* - * Don't clear MD_CLOSING, or mddev can be opened again. - * 'hold_active != 0' means mddev is still in the creation - * process and will be used later. - */ - if (mddev->hold_active) - mddev->flags = 0; - else - mddev->flags &= BIT_ULL_MASK(MD_CLOSING); + /* if UNTIL_STOP is set, it's cleared here */ + mddev->hold_active = 0; + /* Don't clear MD_CLOSING, or mddev can be opened again. */ + mddev->flags &= BIT_ULL_MASK(MD_CLOSING); mddev->sb_flags = 0; mddev->ro = MD_RDWR; mddev->metadata_type[0] = 0; @@ -6516,8 +6523,6 @@ static void __md_stop(struct mddev *mddev) if (mddev->private) pers->free(mddev, mddev->private); mddev->private = NULL; - if (pers->sync_request && mddev->to_remove == NULL) - mddev->to_remove = &md_redundancy_group; put_pers(pers); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); @@ -6646,10 +6651,8 @@ static int do_md_stop(struct mddev *mddev, int mode) mddev->bitmap_info.offset = 0; export_array(mddev); - md_clean(mddev); - if (mddev->hold_active == UNTIL_STOP) - mddev->hold_active = 0; + set_bit(MD_DELETED, &mddev->flags); } md_new_event(); sysfs_notify_dirent_safe(mddev->sysfs_state); @@ -9456,17 +9459,11 @@ static bool md_spares_need_change(struct mddev *mddev) return false; } -static int remove_and_add_spares(struct mddev *mddev, - struct md_rdev *this) +static int remove_spares(struct mddev *mddev, struct md_rdev *this) { struct md_rdev *rdev; - int spares = 0; int removed = 0; - if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) - /* Mustn't remove devices when resync thread is running */ - return 0; - rdev_for_each(rdev, mddev) { if ((this == NULL || rdev == this) && rdev_removeable(rdev) && !mddev->pers->hot_remove_disk(mddev, rdev)) { @@ -9480,6 +9477,21 @@ static int remove_and_add_spares(struct mddev *mddev, if (removed && mddev->kobj.sd) sysfs_notify_dirent_safe(mddev->sysfs_degraded); + return removed; +} + +static int remove_and_add_spares(struct mddev *mddev, + struct md_rdev *this) +{ + struct md_rdev *rdev; + int spares = 0; + int removed = 0; + + if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + /* Mustn't remove devices when resync thread is running */ + return 0; + + removed = remove_spares(mddev, this); if (this && removed) goto no_add; @@ -9522,6 +9534,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares) /* Check if resync is in progress. */ if (mddev->recovery_cp < MaxSector) { + remove_spares(mddev, NULL); set_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); return true; diff --git a/drivers/md/md.h b/drivers/md/md.h index d45a9e6ead80..67b365621507 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -700,11 +700,26 @@ static inline bool reshape_interrupted(struct mddev *mddev) static inline int __must_check mddev_lock(struct mddev *mddev) { - return mutex_lock_interruptible(&mddev->reconfig_mutex); + int ret; + + ret = mutex_lock_interruptible(&mddev->reconfig_mutex); + + /* MD_DELETED is set in do_md_stop with reconfig_mutex. + * So check it here. + */ + if (!ret && test_bit(MD_DELETED, &mddev->flags)) { + ret = -ENODEV; + mutex_unlock(&mddev->reconfig_mutex); + } + + return ret; } /* Sometimes we need to take the lock in a situation where * failure due to interrupts is not acceptable. + * It doesn't need to check MD_DELETED here, the owner which + * holds the lock here can't be stopped. And all paths can't + * call this function after do_md_stop. */ static inline void mddev_lock_nointr(struct mddev *mddev) { @@ -713,7 +728,14 @@ static inline void mddev_lock_nointr(struct mddev *mddev) static inline int mddev_trylock(struct mddev *mddev) { - return mutex_trylock(&mddev->reconfig_mutex); + int ret; + + ret = mutex_trylock(&mddev->reconfig_mutex); + if (!ret && test_bit(MD_DELETED, &mddev->flags)) { + ret = -ENODEV; + mutex_unlock(&mddev->reconfig_mutex); + } + return ret; } extern void mddev_unlock(struct mddev *mddev); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index d8f639f4ae12..cbe2a9054cb9 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -384,6 +384,7 @@ static int raid0_set_limits(struct mddev *mddev) lim.max_write_zeroes_sectors = mddev->chunk_sectors; lim.io_min = mddev->chunk_sectors << 9; lim.io_opt = lim.io_min * mddev->raid_disks; + lim.chunk_sectors = mddev->chunk_sectors; lim.features |= BLK_FEAT_ATOMIC_WRITES; err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); if (err) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c9bd2005bfd0..95dc354a86a0 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2446,15 +2446,12 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) * that are active */ for (i = 0; i < conf->copies; i++) { - int d; - tbio = r10_bio->devs[i].repl_bio; if (!tbio || !tbio->bi_end_io) continue; if (r10_bio->devs[i].bio->bi_end_io != end_sync_write && r10_bio->devs[i].bio != fbio) bio_copy_data(tbio, fbio); - d = r10_bio->devs[i].devnum; atomic_inc(&r10_bio->remaining); submit_bio_noacct(tbio); } @@ -4012,6 +4009,7 @@ static int raid10_set_queue_limits(struct mddev *mddev) md_init_stacking_limits(&lim); lim.max_write_zeroes_sectors = 0; lim.io_min = mddev->chunk_sectors << 9; + lim.chunk_sectors = mddev->chunk_sectors; lim.io_opt = lim.io_min * raid10_nr_stripes(conf); lim.features |= BLK_FEAT_ATOMIC_WRITES; err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ca5b0e8ba707..7ec61ee7b218 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -9040,7 +9040,7 @@ static int __init raid5_init(void) int ret; raid5_wq = alloc_workqueue("raid5wq", - WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); + WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_SYSFS, 0); if (!raid5_wq) return -ENOMEM; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 6fb3768e3d71..c6cc42360887 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -285,6 +285,24 @@ config MFD_CS42L43_SDW Select this to support the Cirrus Logic CS42L43 PC CODEC with headphone and class D speaker drivers over SoundWire. +config MFD_MACSMC + tristate "Apple Silicon System Management Controller (SMC)" + depends on ARCH_APPLE || COMPILE_TEST + depends on OF + depends on APPLE_RTKIT + select MFD_CORE + help + The System Management Controller (SMC) on Apple Silicon machines is a + piece of hardware that exposes various functionalities such as + temperature sensors, voltage/power meters, shutdown/reboot handling, + GPIOs and more. + + Communication happens via a shared mailbox using the RTKit protocol + which is also used for other co-processors. The SMC protocol then + allows reading and writing many different keys which implement the + various features. The MFD core device handles this protocol and + exposes it to the sub-devices. + config MFD_MADERA tristate "Cirrus Logic Madera codecs" select MFD_CORE diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 79495f9f3457..f7bdedd5a66d 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_MFD_CS42L43_SDW) += cs42l43-sdw.o obj-$(CONFIG_MFD_ENE_KB3930) += ene-kb3930.o obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o obj-$(CONFIG_MFD_GATEWORKS_GSC) += gateworks-gsc.o +obj-$(CONFIG_MFD_MACSMC) += macsmc.o obj-$(CONFIG_MFD_TI_LP873X) += lp873x.o obj-$(CONFIG_MFD_TI_LP87565) += lp87565.o diff --git a/drivers/mfd/adp5585.c b/drivers/mfd/adp5585.c index 160e0b38106a..58f7cebe2ea4 100644 --- a/drivers/mfd/adp5585.c +++ b/drivers/mfd/adp5585.c @@ -4,22 +4,40 @@ * * Copyright 2022 NXP * Copyright 2024 Ideas on Board Oy + * Copyright 2025 Analog Devices Inc. */ #include <linux/array_size.h> +#include <linux/bitfield.h> #include <linux/device.h> #include <linux/err.h> #include <linux/i2c.h> +#include <linux/gpio/consumer.h> #include <linux/mfd/adp5585.h> #include <linux/mfd/core.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/regmap.h> +#include <linux/regulator/consumer.h> #include <linux/types.h> -static const struct mfd_cell adp5585_devs[] = { - { .name = "adp5585-gpio", }, - { .name = "adp5585-pwm", }, +enum { + ADP5585_DEV_GPIO, + ADP5585_DEV_PWM, + ADP5585_DEV_INPUT, + ADP5585_DEV_MAX +}; + +static const struct mfd_cell adp5585_devs[ADP5585_DEV_MAX] = { + MFD_CELL_NAME("adp5585-gpio"), + MFD_CELL_NAME("adp5585-pwm"), + MFD_CELL_NAME("adp5585-keys"), +}; + +static const struct mfd_cell adp5589_devs[] = { + MFD_CELL_NAME("adp5589-gpio"), + MFD_CELL_NAME("adp5589-pwm"), + MFD_CELL_NAME("adp5589-keys"), }; static const struct regmap_range adp5585_volatile_ranges[] = { @@ -31,6 +49,15 @@ static const struct regmap_access_table adp5585_volatile_regs = { .n_yes_ranges = ARRAY_SIZE(adp5585_volatile_ranges), }; +static const struct regmap_range adp5589_volatile_ranges[] = { + regmap_reg_range(ADP5585_ID, ADP5589_GPI_STATUS_C), +}; + +static const struct regmap_access_table adp5589_volatile_regs = { + .yes_ranges = adp5589_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(adp5589_volatile_ranges), +}; + /* * Chip variants differ in the default configuration of pull-up and pull-down * resistors, and therefore have different default register values: @@ -74,46 +101,597 @@ static const u8 adp5585_regmap_defaults_04[ADP5585_MAX_REG + 1] = { /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00, }; -enum adp5585_regmap_type { - ADP5585_REGMAP_00, - ADP5585_REGMAP_02, - ADP5585_REGMAP_04, +static const u8 adp5589_regmap_defaults_00[ADP5589_MAX_REG + 1] = { + /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x08 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x18 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x48 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; -static const struct regmap_config adp5585_regmap_configs[] = { - [ADP5585_REGMAP_00] = { - .reg_bits = 8, - .val_bits = 8, - .max_register = ADP5585_MAX_REG, - .volatile_table = &adp5585_volatile_regs, - .cache_type = REGCACHE_MAPLE, - .reg_defaults_raw = adp5585_regmap_defaults_00, - .num_reg_defaults_raw = sizeof(adp5585_regmap_defaults_00), - }, - [ADP5585_REGMAP_02] = { - .reg_bits = 8, - .val_bits = 8, - .max_register = ADP5585_MAX_REG, - .volatile_table = &adp5585_volatile_regs, - .cache_type = REGCACHE_MAPLE, - .reg_defaults_raw = adp5585_regmap_defaults_02, - .num_reg_defaults_raw = sizeof(adp5585_regmap_defaults_02), - }, - [ADP5585_REGMAP_04] = { - .reg_bits = 8, - .val_bits = 8, - .max_register = ADP5585_MAX_REG, - .volatile_table = &adp5585_volatile_regs, - .cache_type = REGCACHE_MAPLE, - .reg_defaults_raw = adp5585_regmap_defaults_04, - .num_reg_defaults_raw = sizeof(adp5585_regmap_defaults_04), - }, +static const u8 adp5589_regmap_defaults_01[ADP5589_MAX_REG + 1] = { + /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x08 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x18 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, + /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x48 */ 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, }; +static const u8 adp5589_regmap_defaults_02[ADP5589_MAX_REG + 1] = { + /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x08 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x18 */ 0x00, 0x41, 0x01, 0x00, 0x11, 0x04, 0x00, 0x00, + /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x48 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 *adp5585_regmap_defaults[ADP5585_MAX] = { + [ADP5585_00] = adp5585_regmap_defaults_00, + [ADP5585_01] = adp5585_regmap_defaults_00, + [ADP5585_02] = adp5585_regmap_defaults_02, + [ADP5585_03] = adp5585_regmap_defaults_00, + [ADP5585_04] = adp5585_regmap_defaults_04, + [ADP5589_00] = adp5589_regmap_defaults_00, + [ADP5589_01] = adp5589_regmap_defaults_01, + [ADP5589_02] = adp5589_regmap_defaults_02, +}; + +static const struct regmap_config adp5585_regmap_config_template = { + .reg_bits = 8, + .val_bits = 8, + .max_register = ADP5585_MAX_REG, + .volatile_table = &adp5585_volatile_regs, + .cache_type = REGCACHE_MAPLE, + .num_reg_defaults_raw = ADP5585_MAX_REG + 1, +}; + +static const struct regmap_config adp5589_regmap_config_template = { + .reg_bits = 8, + .val_bits = 8, + .max_register = ADP5589_MAX_REG, + .volatile_table = &adp5589_volatile_regs, + .cache_type = REGCACHE_MAPLE, + .num_reg_defaults_raw = ADP5589_MAX_REG + 1, +}; + +static const struct adp5585_regs adp5585_regs = { + .ext_cfg = ADP5585_PIN_CONFIG_C, + .int_en = ADP5585_INT_EN, + .gen_cfg = ADP5585_GENERAL_CFG, + .poll_ptime_cfg = ADP5585_POLL_PTIME_CFG, + .reset_cfg = ADP5585_RESET_CFG, + .reset1_event_a = ADP5585_RESET1_EVENT_A, + .reset2_event_a = ADP5585_RESET2_EVENT_A, + .pin_cfg_a = ADP5585_PIN_CONFIG_A, +}; + +static const struct adp5585_regs adp5589_regs = { + .ext_cfg = ADP5589_PIN_CONFIG_D, + .int_en = ADP5589_INT_EN, + .gen_cfg = ADP5589_GENERAL_CFG, + .poll_ptime_cfg = ADP5589_POLL_PTIME_CFG, + .reset_cfg = ADP5589_RESET_CFG, + .reset1_event_a = ADP5589_RESET1_EVENT_A, + .reset2_event_a = ADP5589_RESET2_EVENT_A, + .pin_cfg_a = ADP5589_PIN_CONFIG_A, +}; + +static int adp5585_validate_event(const struct adp5585_dev *adp5585, unsigned int ev) +{ + if (adp5585->has_pin6) { + if (ev >= ADP5585_ROW5_KEY_EVENT_START && ev <= ADP5585_ROW5_KEY_EVENT_END) + return 0; + if (ev >= ADP5585_GPI_EVENT_START && ev <= ADP5585_GPI_EVENT_END) + return 0; + + return dev_err_probe(adp5585->dev, -EINVAL, + "Invalid unlock/reset event(%u) for this device\n", ev); + } + + if (ev >= ADP5585_KEY_EVENT_START && ev <= ADP5585_KEY_EVENT_END) + return 0; + if (ev >= ADP5585_GPI_EVENT_START && ev <= ADP5585_GPI_EVENT_END) { + /* + * Some variants of the adp5585 do not have the Row 5 + * (meaning pin 6 or GPIO 6) available. Instead that pin serves + * as a reset pin. So, we need to make sure no event is + * configured for it. + */ + if (ev == (ADP5585_GPI_EVENT_START + 5)) + return dev_err_probe(adp5585->dev, -EINVAL, + "Invalid unlock/reset event(%u). R5 not available\n", + ev); + return 0; + } + + return dev_err_probe(adp5585->dev, -EINVAL, + "Invalid unlock/reset event(%u) for this device\n", ev); +} + +static int adp5589_validate_event(const struct adp5585_dev *adp5585, unsigned int ev) +{ + if (ev >= ADP5589_KEY_EVENT_START && ev <= ADP5589_KEY_EVENT_END) + return 0; + if (ev >= ADP5589_GPI_EVENT_START && ev <= ADP5589_GPI_EVENT_END) + return 0; + + return dev_err_probe(adp5585->dev, -EINVAL, + "Invalid unlock/reset event(%u) for this device\n", ev); +} + +static struct regmap_config *adp5585_fill_variant_config(struct adp5585_dev *adp5585) +{ + struct regmap_config *regmap_config; + + switch (adp5585->variant) { + case ADP5585_00: + case ADP5585_01: + case ADP5585_02: + case ADP5585_03: + case ADP5585_04: + adp5585->id = ADP5585_MAN_ID_VALUE; + adp5585->regs = &adp5585_regs; + adp5585->n_pins = ADP5585_PIN_MAX; + adp5585->reset2_out = ADP5585_RESET2_OUT; + if (adp5585->variant == ADP5585_01) + adp5585->has_pin6 = true; + regmap_config = devm_kmemdup(adp5585->dev, &adp5585_regmap_config_template, + sizeof(*regmap_config), GFP_KERNEL); + break; + case ADP5589_00: + case ADP5589_01: + case ADP5589_02: + adp5585->id = ADP5589_MAN_ID_VALUE; + adp5585->regs = &adp5589_regs; + adp5585->has_unlock = true; + adp5585->has_pin6 = true; + adp5585->n_pins = ADP5589_PIN_MAX; + adp5585->reset2_out = ADP5589_RESET2_OUT; + regmap_config = devm_kmemdup(adp5585->dev, &adp5589_regmap_config_template, + sizeof(*regmap_config), GFP_KERNEL); + break; + default: + return ERR_PTR(-ENODEV); + } + + if (!regmap_config) + return ERR_PTR(-ENOMEM); + + regmap_config->reg_defaults_raw = adp5585_regmap_defaults[adp5585->variant]; + + return regmap_config; +} + +static int adp5585_parse_ev_array(const struct adp5585_dev *adp5585, const char *prop, u32 *events, + u32 *n_events, u32 max_evs, bool reset_ev) +{ + struct device *dev = adp5585->dev; + unsigned int ev; + int ret; + + /* + * The device has the capability of handling special events through GPIs or a Keypad: + * unlock events: Unlock the keymap until one of the configured events is detected. + * reset events: Generate a reset pulse when one of the configured events is detected. + */ + ret = device_property_count_u32(dev, prop); + if (ret < 0) + return 0; + + *n_events = ret; + + if (!adp5585->has_unlock && !reset_ev) + return dev_err_probe(dev, -EOPNOTSUPP, "Unlock keys not supported\n"); + + if (*n_events > max_evs) + return dev_err_probe(dev, -EINVAL, + "Invalid number of keys(%u > %u) for %s\n", + *n_events, max_evs, prop); + + ret = device_property_read_u32_array(dev, prop, events, *n_events); + if (ret) + return ret; + + for (ev = 0; ev < *n_events; ev++) { + if (!reset_ev && events[ev] == ADP5589_UNLOCK_WILDCARD) + continue; + + if (adp5585->id == ADP5585_MAN_ID_VALUE) + ret = adp5585_validate_event(adp5585, events[ev]); + else + ret = adp5589_validate_event(adp5585, events[ev]); + if (ret) + return ret; + } + + return 0; +} + +static int adp5585_unlock_ev_parse(struct adp5585_dev *adp5585) +{ + struct device *dev = adp5585->dev; + int ret; + + ret = adp5585_parse_ev_array(adp5585, "adi,unlock-events", adp5585->unlock_keys, + &adp5585->nkeys_unlock, ARRAY_SIZE(adp5585->unlock_keys), + false); + if (ret) + return ret; + if (!adp5585->nkeys_unlock) + return 0; + + ret = device_property_read_u32(dev, "adi,unlock-trigger-sec", &adp5585->unlock_time); + if (!ret) { + if (adp5585->unlock_time > ADP5585_MAX_UNLOCK_TIME_SEC) + return dev_err_probe(dev, -EINVAL, + "Invalid unlock time(%u > %d)\n", + adp5585->unlock_time, + ADP5585_MAX_UNLOCK_TIME_SEC); + } + + return 0; +} + +static int adp5585_reset_ev_parse(struct adp5585_dev *adp5585) +{ + struct device *dev = adp5585->dev; + u32 prop_val; + int ret; + + ret = adp5585_parse_ev_array(adp5585, "adi,reset1-events", adp5585->reset1_keys, + &adp5585->nkeys_reset1, + ARRAY_SIZE(adp5585->reset1_keys), true); + if (ret) + return ret; + + ret = adp5585_parse_ev_array(adp5585, "adi,reset2-events", + adp5585->reset2_keys, + &adp5585->nkeys_reset2, + ARRAY_SIZE(adp5585->reset2_keys), true); + if (ret) + return ret; + + if (!adp5585->nkeys_reset1 && !adp5585->nkeys_reset2) + return 0; + + if (adp5585->nkeys_reset1 && device_property_read_bool(dev, "adi,reset1-active-high")) + adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET1_POL, 1); + + if (adp5585->nkeys_reset2 && device_property_read_bool(dev, "adi,reset2-active-high")) + adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET2_POL, 1); + + if (device_property_read_bool(dev, "adi,rst-passthrough-enable")) + adp5585->reset_cfg |= FIELD_PREP(ADP5585_RST_PASSTHRU_EN, 1); + + ret = device_property_read_u32(dev, "adi,reset-trigger-ms", &prop_val); + if (!ret) { + switch (prop_val) { + case 0: + adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 0); + break; + case 1000: + adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 1); + break; + case 1500: + adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 2); + break; + case 2000: + adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 3); + break; + case 2500: + adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 4); + break; + case 3000: + adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 5); + break; + case 3500: + adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 6); + break; + case 4000: + adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 7); + break; + default: + return dev_err_probe(dev, -EINVAL, + "Invalid value(%u) for adi,reset-trigger-ms\n", + prop_val); + } + } + + ret = device_property_read_u32(dev, "adi,reset-pulse-width-us", &prop_val); + if (!ret) { + switch (prop_val) { + case 500: + adp5585->reset_cfg |= FIELD_PREP(ADP5585_PULSE_WIDTH, 0); + break; + case 1000: + adp5585->reset_cfg |= FIELD_PREP(ADP5585_PULSE_WIDTH, 1); + break; + case 2000: + adp5585->reset_cfg |= FIELD_PREP(ADP5585_PULSE_WIDTH, 2); + break; + case 10000: + adp5585->reset_cfg |= FIELD_PREP(ADP5585_PULSE_WIDTH, 3); + break; + default: + return dev_err_probe(dev, -EINVAL, + "Invalid value(%u) for adi,reset-pulse-width-us\n", + prop_val); + } + return ret; + } + + return 0; +} + +static int adp5585_add_devices(const struct adp5585_dev *adp5585) +{ + struct device *dev = adp5585->dev; + const struct mfd_cell *cells; + int ret; + + if (adp5585->id == ADP5585_MAN_ID_VALUE) + cells = adp5585_devs; + else + cells = adp5589_devs; + + if (device_property_present(dev, "#pwm-cells")) { + /* Make sure the PWM output pin is not used by the GPIO or INPUT devices */ + __set_bit(ADP5585_PWM_OUT, adp5585->pin_usage); + ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, + &cells[ADP5585_DEV_PWM], 1, NULL, 0, NULL); + if (ret) + return dev_err_probe(dev, ret, "Failed to add PWM device\n"); + } + + if (device_property_present(dev, "#gpio-cells")) { + ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, + &cells[ADP5585_DEV_GPIO], 1, NULL, 0, NULL); + if (ret) + return dev_err_probe(dev, ret, "Failed to add GPIO device\n"); + } + + if (device_property_present(adp5585->dev, "adi,keypad-pins")) { + ret = devm_mfd_add_devices(adp5585->dev, PLATFORM_DEVID_AUTO, + &cells[ADP5585_DEV_INPUT], 1, NULL, 0, NULL); + if (ret) + return dev_err_probe(dev, ret, "Failed to add input device\n"); + } + + return 0; +} + +static void adp5585_osc_disable(void *data) +{ + const struct adp5585_dev *adp5585 = data; + + regmap_write(adp5585->regmap, ADP5585_GENERAL_CFG, 0); +} + +static void adp5585_report_events(struct adp5585_dev *adp5585, int ev_cnt) +{ + unsigned int i; + + for (i = 0; i < ev_cnt; i++) { + unsigned long key_val, key_press; + unsigned int key; + int ret; + + ret = regmap_read(adp5585->regmap, ADP5585_FIFO_1 + i, &key); + if (ret) + return; + + key_val = FIELD_GET(ADP5585_KEY_EVENT_MASK, key); + key_press = FIELD_GET(ADP5585_KEV_EV_PRESS_MASK, key); + + blocking_notifier_call_chain(&adp5585->event_notifier, key_val, (void *)key_press); + } +} + +static irqreturn_t adp5585_irq(int irq, void *data) +{ + struct adp5585_dev *adp5585 = data; + unsigned int status, ev_cnt; + int ret; + + ret = regmap_read(adp5585->regmap, ADP5585_INT_STATUS, &status); + if (ret) + return IRQ_HANDLED; + + if (status & ADP5585_OVRFLOW_INT) + dev_err_ratelimited(adp5585->dev, "Event overflow error\n"); + + if (!(status & ADP5585_EVENT_INT)) + goto out_irq; + + ret = regmap_read(adp5585->regmap, ADP5585_STATUS, &ev_cnt); + if (ret) + goto out_irq; + + ev_cnt = FIELD_GET(ADP5585_EC_MASK, ev_cnt); + if (!ev_cnt) + goto out_irq; + + adp5585_report_events(adp5585, ev_cnt); +out_irq: + regmap_write(adp5585->regmap, ADP5585_INT_STATUS, status); + return IRQ_HANDLED; +} + +static int adp5585_setup(struct adp5585_dev *adp5585) +{ + const struct adp5585_regs *regs = adp5585->regs; + unsigned int reg_val = 0, i; + int ret; + + /* If pin_6 (ROW5/GPI6) is not available, make sure to mark it as "busy" */ + if (!adp5585->has_pin6) + __set_bit(ADP5585_ROW5, adp5585->pin_usage); + + /* Configure the device with reset and unlock events */ + for (i = 0; i < adp5585->nkeys_unlock; i++) { + ret = regmap_write(adp5585->regmap, ADP5589_UNLOCK1 + i, + adp5585->unlock_keys[i] | ADP5589_UNLOCK_EV_PRESS); + if (ret) + return ret; + } + + if (adp5585->nkeys_unlock) { + ret = regmap_update_bits(adp5585->regmap, ADP5589_UNLOCK_TIMERS, + ADP5589_UNLOCK_TIMER, adp5585->unlock_time); + if (ret) + return ret; + + ret = regmap_set_bits(adp5585->regmap, ADP5589_LOCK_CFG, ADP5589_LOCK_EN); + if (ret) + return ret; + } + + for (i = 0; i < adp5585->nkeys_reset1; i++) { + ret = regmap_write(adp5585->regmap, regs->reset1_event_a + i, + adp5585->reset1_keys[i] | ADP5585_RESET_EV_PRESS); + if (ret) + return ret; + + /* Mark that pin as not usable for the INPUT and GPIO devices. */ + __set_bit(ADP5585_RESET1_OUT, adp5585->pin_usage); + } + + for (i = 0; i < adp5585->nkeys_reset2; i++) { + ret = regmap_write(adp5585->regmap, regs->reset2_event_a + i, + adp5585->reset2_keys[i] | ADP5585_RESET_EV_PRESS); + if (ret) + return ret; + + __set_bit(adp5585->reset2_out, adp5585->pin_usage); + } + + if (adp5585->nkeys_reset1 || adp5585->nkeys_reset2) { + ret = regmap_write(adp5585->regmap, regs->reset_cfg, adp5585->reset_cfg); + if (ret) + return ret; + + /* If there's a reset1 event, then R4 is used as an output for the reset signal */ + if (adp5585->nkeys_reset1) + reg_val = ADP5585_R4_EXTEND_CFG_RESET1; + /* If there's a reset2 event, then C4 is used as an output for the reset signal */ + if (adp5585->nkeys_reset2) + reg_val |= ADP5585_C4_EXTEND_CFG_RESET2; + + ret = regmap_update_bits(adp5585->regmap, regs->ext_cfg, + ADP5585_C4_EXTEND_CFG_MASK | ADP5585_R4_EXTEND_CFG_MASK, + reg_val); + if (ret) + return ret; + } + + /* Clear any possible event by reading all the FIFO entries */ + for (i = 0; i < ADP5585_EV_MAX; i++) { + ret = regmap_read(adp5585->regmap, ADP5585_FIFO_1 + i, ®_val); + if (ret) + return ret; + } + + ret = regmap_write(adp5585->regmap, regs->poll_ptime_cfg, adp5585->ev_poll_time); + if (ret) + return ret; + + /* + * Enable the internal oscillator, as it's shared between multiple + * functions. + */ + ret = regmap_write(adp5585->regmap, regs->gen_cfg, + ADP5585_OSC_FREQ_500KHZ | ADP5585_INT_CFG | ADP5585_OSC_EN); + if (ret) + return ret; + + return devm_add_action_or_reset(adp5585->dev, adp5585_osc_disable, adp5585); +} + +static int adp5585_parse_fw(struct adp5585_dev *adp5585) +{ + unsigned int prop_val; + int ret; + + ret = device_property_read_u32(adp5585->dev, "poll-interval", &prop_val); + if (!ret) { + adp5585->ev_poll_time = prop_val / 10 - 1; + /* + * ev_poll_time is the raw value to be written on the register and 0 to 3 are the + * valid values. + */ + if (adp5585->ev_poll_time > 3) + return dev_err_probe(adp5585->dev, -EINVAL, + "Invalid value(%u) for poll-interval\n", prop_val); + } + + ret = adp5585_unlock_ev_parse(adp5585); + if (ret) + return ret; + + return adp5585_reset_ev_parse(adp5585); +} + +static void adp5585_irq_disable(void *data) +{ + struct adp5585_dev *adp5585 = data; + + regmap_write(adp5585->regmap, adp5585->regs->int_en, 0); +} + +static int adp5585_irq_enable(struct i2c_client *i2c, + struct adp5585_dev *adp5585) +{ + const struct adp5585_regs *regs = adp5585->regs; + unsigned int stat; + int ret; + + if (i2c->irq <= 0) + return 0; + + ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL, adp5585_irq, + IRQF_ONESHOT, i2c->name, adp5585); + if (ret) + return ret; + + /* + * Clear any possible outstanding interrupt before enabling them. We do that by reading + * the status register and writing back the same value. + */ + ret = regmap_read(adp5585->regmap, ADP5585_INT_STATUS, &stat); + if (ret) + return ret; + + ret = regmap_write(adp5585->regmap, ADP5585_INT_STATUS, stat); + if (ret) + return ret; + + ret = regmap_write(adp5585->regmap, regs->int_en, ADP5585_OVRFLOW_IEN | ADP5585_EVENT_IEN); + if (ret) + return ret; + + return devm_add_action_or_reset(&i2c->dev, adp5585_irq_disable, adp5585); +} + static int adp5585_i2c_probe(struct i2c_client *i2c) { - const struct regmap_config *regmap_config; + struct regmap_config *regmap_config; struct adp5585_dev *adp5585; + struct gpio_desc *gpio; unsigned int id; int ret; @@ -122,8 +700,36 @@ static int adp5585_i2c_probe(struct i2c_client *i2c) return -ENOMEM; i2c_set_clientdata(i2c, adp5585); + adp5585->dev = &i2c->dev; + adp5585->irq = i2c->irq; + BLOCKING_INIT_NOTIFIER_HEAD(&adp5585->event_notifier); + + adp5585->variant = (enum adp5585_variant)(uintptr_t)i2c_get_match_data(i2c); + if (!adp5585->variant) + return -ENODEV; + + regmap_config = adp5585_fill_variant_config(adp5585); + if (IS_ERR(regmap_config)) + return PTR_ERR(regmap_config); + + ret = devm_regulator_get_enable(&i2c->dev, "vdd"); + if (ret) + return ret; + + gpio = devm_gpiod_get_optional(&i2c->dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + /* + * Note the timings are not documented anywhere in the datasheet. They are just + * reasonable values that work. + */ + if (gpio) { + fsleep(30); + gpiod_set_value_cansleep(gpio, 0); + fsleep(60); + } - regmap_config = i2c_get_match_data(i2c); adp5585->regmap = devm_regmap_init_i2c(i2c, regmap_config); if (IS_ERR(adp5585->regmap)) return dev_err_probe(&i2c->dev, PTR_ERR(adp5585->regmap), @@ -134,24 +740,37 @@ static int adp5585_i2c_probe(struct i2c_client *i2c) return dev_err_probe(&i2c->dev, ret, "Failed to read device ID\n"); - if ((id & ADP5585_MAN_ID_MASK) != ADP5585_MAN_ID_VALUE) + id &= ADP5585_MAN_ID_MASK; + if (id != adp5585->id) return dev_err_probe(&i2c->dev, -ENODEV, "Invalid device ID 0x%02x\n", id); - ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO, - adp5585_devs, ARRAY_SIZE(adp5585_devs), - NULL, 0, NULL); + adp5585->pin_usage = devm_bitmap_zalloc(&i2c->dev, adp5585->n_pins, GFP_KERNEL); + if (!adp5585->pin_usage) + return -ENOMEM; + + ret = adp5585_parse_fw(adp5585); if (ret) - return dev_err_probe(&i2c->dev, ret, - "Failed to add child devices\n"); + return ret; - return 0; + ret = adp5585_setup(adp5585); + if (ret) + return ret; + + ret = adp5585_add_devices(adp5585); + if (ret) + return ret; + + return adp5585_irq_enable(i2c, adp5585); } static int adp5585_suspend(struct device *dev) { struct adp5585_dev *adp5585 = dev_get_drvdata(dev); + if (adp5585->irq) + disable_irq(adp5585->irq); + regcache_cache_only(adp5585->regmap, true); return 0; @@ -160,11 +779,19 @@ static int adp5585_suspend(struct device *dev) static int adp5585_resume(struct device *dev) { struct adp5585_dev *adp5585 = dev_get_drvdata(dev); + int ret; regcache_cache_only(adp5585->regmap, false); regcache_mark_dirty(adp5585->regmap); - return regcache_sync(adp5585->regmap); + ret = regcache_sync(adp5585->regmap); + if (ret) + return ret; + + if (adp5585->irq) + enable_irq(adp5585->irq); + + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(adp5585_pm, adp5585_suspend, adp5585_resume); @@ -172,19 +799,31 @@ static DEFINE_SIMPLE_DEV_PM_OPS(adp5585_pm, adp5585_suspend, adp5585_resume); static const struct of_device_id adp5585_of_match[] = { { .compatible = "adi,adp5585-00", - .data = &adp5585_regmap_configs[ADP5585_REGMAP_00], + .data = (void *)ADP5585_00, }, { .compatible = "adi,adp5585-01", - .data = &adp5585_regmap_configs[ADP5585_REGMAP_00], + .data = (void *)ADP5585_01, }, { .compatible = "adi,adp5585-02", - .data = &adp5585_regmap_configs[ADP5585_REGMAP_02], + .data = (void *)ADP5585_02, }, { .compatible = "adi,adp5585-03", - .data = &adp5585_regmap_configs[ADP5585_REGMAP_00], + .data = (void *)ADP5585_03, }, { .compatible = "adi,adp5585-04", - .data = &adp5585_regmap_configs[ADP5585_REGMAP_04], + .data = (void *)ADP5585_04, + }, { + .compatible = "adi,adp5589-00", + .data = (void *)ADP5589_00, + }, { + .compatible = "adi,adp5589-01", + .data = (void *)ADP5589_01, + }, { + .compatible = "adi,adp5589-02", + .data = (void *)ADP5589_02, + }, { + .compatible = "adi,adp5589", + .data = (void *)ADP5589_00, }, { /* sentinel */ } }; diff --git a/drivers/mfd/macsmc.c b/drivers/mfd/macsmc.c new file mode 100644 index 000000000000..870c8b2028a8 --- /dev/null +++ b/drivers/mfd/macsmc.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SMC (System Management Controller) MFD driver + * + * Copyright The Asahi Linux Contributors + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/math.h> +#include <linux/mfd/core.h> +#include <linux/mfd/macsmc.h> +#include <linux/notifier.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/overflow.h> +#include <linux/platform_device.h> +#include <linux/soc/apple/rtkit.h> +#include <linux/unaligned.h> + +#define SMC_ENDPOINT 0x20 + +/* We don't actually know the true size here but this seem reasonable */ +#define SMC_SHMEM_SIZE 0x1000 +#define SMC_MAX_SIZE 255 + +#define SMC_MSG_READ_KEY 0x10 +#define SMC_MSG_WRITE_KEY 0x11 +#define SMC_MSG_GET_KEY_BY_INDEX 0x12 +#define SMC_MSG_GET_KEY_INFO 0x13 +#define SMC_MSG_INITIALIZE 0x17 +#define SMC_MSG_NOTIFICATION 0x18 +#define SMC_MSG_RW_KEY 0x20 + +#define SMC_DATA GENMASK_ULL(63, 32) +#define SMC_WSIZE GENMASK_ULL(31, 24) +#define SMC_SIZE GENMASK_ULL(23, 16) +#define SMC_ID GENMASK_ULL(15, 12) +#define SMC_MSG GENMASK_ULL(7, 0) +#define SMC_RESULT SMC_MSG + +#define SMC_TIMEOUT_MS 500 + +static const struct mfd_cell apple_smc_devs[] = { + MFD_CELL_OF("macsmc-gpio", NULL, NULL, 0, 0, "apple,smc-gpio"), + MFD_CELL_OF("macsmc-reboot", NULL, NULL, 0, 0, "apple,smc-reboot"), +}; + +static int apple_smc_cmd_locked(struct apple_smc *smc, u64 cmd, u64 arg, + u64 size, u64 wsize, u32 *ret_data) +{ + u8 result; + int ret; + u64 msg; + + lockdep_assert_held(&smc->mutex); + + if (smc->boot_stage != APPLE_SMC_INITIALIZED) + return -EIO; + if (smc->atomic_mode) + return -EIO; + + reinit_completion(&smc->cmd_done); + + smc->msg_id = (smc->msg_id + 1) & 0xf; + msg = (FIELD_PREP(SMC_MSG, cmd) | + FIELD_PREP(SMC_SIZE, size) | + FIELD_PREP(SMC_WSIZE, wsize) | + FIELD_PREP(SMC_ID, smc->msg_id) | + FIELD_PREP(SMC_DATA, arg)); + + ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT, msg, NULL, false); + if (ret) { + dev_err(smc->dev, "Failed to send command\n"); + return ret; + } + + if (wait_for_completion_timeout(&smc->cmd_done, msecs_to_jiffies(SMC_TIMEOUT_MS)) <= 0) { + dev_err(smc->dev, "Command timed out (%llx)", msg); + return -ETIMEDOUT; + } + + if (FIELD_GET(SMC_ID, smc->cmd_ret) != smc->msg_id) { + dev_err(smc->dev, "Command sequence mismatch (expected %d, got %d)\n", + smc->msg_id, (unsigned int)FIELD_GET(SMC_ID, smc->cmd_ret)); + return -EIO; + } + + result = FIELD_GET(SMC_RESULT, smc->cmd_ret); + if (result) + return -EIO; + + if (ret_data) + *ret_data = FIELD_GET(SMC_DATA, smc->cmd_ret); + + return FIELD_GET(SMC_SIZE, smc->cmd_ret); +} + +static int apple_smc_cmd(struct apple_smc *smc, u64 cmd, u64 arg, + u64 size, u64 wsize, u32 *ret_data) +{ + guard(mutex)(&smc->mutex); + + return apple_smc_cmd_locked(smc, cmd, arg, size, wsize, ret_data); +} + +static int apple_smc_rw_locked(struct apple_smc *smc, smc_key key, + const void *wbuf, size_t wsize, + void *rbuf, size_t rsize) +{ + u64 smc_size, smc_wsize; + u32 rdata; + int ret; + u64 cmd; + + lockdep_assert_held(&smc->mutex); + + if (rsize > SMC_MAX_SIZE) + return -EINVAL; + if (wsize > SMC_MAX_SIZE) + return -EINVAL; + + if (rsize && wsize) { + cmd = SMC_MSG_RW_KEY; + memcpy_toio(smc->shmem.iomem, wbuf, wsize); + smc_size = rsize; + smc_wsize = wsize; + } else if (wsize && !rsize) { + cmd = SMC_MSG_WRITE_KEY; + memcpy_toio(smc->shmem.iomem, wbuf, wsize); + /* + * Setting size to the length we want to write and wsize to 0 + * looks silly but that's how the SMC protocol works ¯\_(ツ)_/¯ + */ + smc_size = wsize; + smc_wsize = 0; + } else if (!wsize && rsize) { + cmd = SMC_MSG_READ_KEY; + smc_size = rsize; + smc_wsize = 0; + } else { + return -EINVAL; + } + + ret = apple_smc_cmd_locked(smc, cmd, key, smc_size, smc_wsize, &rdata); + if (ret < 0) + return ret; + + if (rsize) { + /* + * Small data <= 4 bytes is returned as part of the reply + * message which is sent over the mailbox FIFO. Everything + * bigger has to be copied from SRAM which is mapped as + * Device memory. + */ + if (rsize <= 4) + memcpy(rbuf, &rdata, rsize); + else + memcpy_fromio(rbuf, smc->shmem.iomem, rsize); + } + + return ret; +} + +int apple_smc_read(struct apple_smc *smc, smc_key key, void *buf, size_t size) +{ + guard(mutex)(&smc->mutex); + + return apple_smc_rw_locked(smc, key, NULL, 0, buf, size); +} +EXPORT_SYMBOL(apple_smc_read); + +int apple_smc_write(struct apple_smc *smc, smc_key key, void *buf, size_t size) +{ + guard(mutex)(&smc->mutex); + + return apple_smc_rw_locked(smc, key, buf, size, NULL, 0); +} +EXPORT_SYMBOL(apple_smc_write); + +int apple_smc_rw(struct apple_smc *smc, smc_key key, void *wbuf, size_t wsize, + void *rbuf, size_t rsize) +{ + guard(mutex)(&smc->mutex); + + return apple_smc_rw_locked(smc, key, wbuf, wsize, rbuf, rsize); +} +EXPORT_SYMBOL(apple_smc_rw); + +int apple_smc_get_key_by_index(struct apple_smc *smc, int index, smc_key *key) +{ + int ret; + + ret = apple_smc_cmd(smc, SMC_MSG_GET_KEY_BY_INDEX, index, 0, 0, key); + + *key = swab32(*key); + return ret; +} +EXPORT_SYMBOL(apple_smc_get_key_by_index); + +int apple_smc_get_key_info(struct apple_smc *smc, smc_key key, struct apple_smc_key_info *info) +{ + u8 key_info[6]; + int ret; + + ret = apple_smc_cmd(smc, SMC_MSG_GET_KEY_INFO, key, 0, 0, NULL); + if (ret >= 0 && info) { + memcpy_fromio(key_info, smc->shmem.iomem, sizeof(key_info)); + info->size = key_info[0]; + info->type_code = get_unaligned_be32(&key_info[1]); + info->flags = key_info[5]; + } + return ret; +} +EXPORT_SYMBOL(apple_smc_get_key_info); + +int apple_smc_enter_atomic(struct apple_smc *smc) +{ + guard(mutex)(&smc->mutex); + + /* + * Disable notifications since this is called before shutdown and no + * notification handler will be able to handle the notification + * using atomic operations only. Also ignore any failure here + * because we're about to shut down or reboot anyway. + * We can't use apple_smc_write_flag here since that would try to lock + * smc->mutex again. + */ + const u8 flag = 0; + + apple_smc_rw_locked(smc, SMC_KEY(NTAP), &flag, sizeof(flag), NULL, 0); + + smc->atomic_mode = true; + + return 0; +} +EXPORT_SYMBOL(apple_smc_enter_atomic); + +int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, void *buf, size_t size) +{ + guard(spinlock_irqsave)(&smc->lock); + u8 result; + int ret; + u64 msg; + + if (size > SMC_MAX_SIZE || size == 0) + return -EINVAL; + + if (smc->boot_stage != APPLE_SMC_INITIALIZED) + return -EIO; + if (!smc->atomic_mode) + return -EIO; + + memcpy_toio(smc->shmem.iomem, buf, size); + smc->msg_id = (smc->msg_id + 1) & 0xf; + msg = (FIELD_PREP(SMC_MSG, SMC_MSG_WRITE_KEY) | + FIELD_PREP(SMC_SIZE, size) | + FIELD_PREP(SMC_ID, smc->msg_id) | + FIELD_PREP(SMC_DATA, key)); + smc->atomic_pending = true; + + ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT, msg, NULL, true); + if (ret < 0) { + dev_err(smc->dev, "Failed to send command (%d)\n", ret); + return ret; + } + + while (smc->atomic_pending) { + ret = apple_rtkit_poll(smc->rtk); + if (ret < 0) { + dev_err(smc->dev, "RTKit poll failed (%llx)", msg); + return ret; + } + udelay(100); + } + + if (FIELD_GET(SMC_ID, smc->cmd_ret) != smc->msg_id) { + dev_err(smc->dev, "Command sequence mismatch (expected %d, got %d)\n", + smc->msg_id, (unsigned int)FIELD_GET(SMC_ID, smc->cmd_ret)); + return -EIO; + } + + result = FIELD_GET(SMC_RESULT, smc->cmd_ret); + if (result) + return -EIO; + + return FIELD_GET(SMC_SIZE, smc->cmd_ret); +} +EXPORT_SYMBOL(apple_smc_write_atomic); + +static void apple_smc_rtkit_crashed(void *cookie, const void *bfr, size_t bfr_len) +{ + struct apple_smc *smc = cookie; + + smc->boot_stage = APPLE_SMC_ERROR_CRASHED; + dev_err(smc->dev, "SMC crashed! Your system will reboot in a few seconds...\n"); +} + +static int apple_smc_rtkit_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr) +{ + struct apple_smc *smc = cookie; + size_t bfr_end; + + if (!bfr->iova) { + dev_err(smc->dev, "RTKit wants a RAM buffer\n"); + return -EIO; + } + + if (check_add_overflow(bfr->iova, bfr->size - 1, &bfr_end)) + return -EFAULT; + + if (bfr->iova < smc->sram->start || bfr->iova > smc->sram->end || + bfr_end > smc->sram->end) { + dev_err(smc->dev, "RTKit buffer request outside SRAM region: [0x%llx, 0x%llx]\n", + (unsigned long long)bfr->iova, + (unsigned long long)bfr_end); + return -EFAULT; + } + + bfr->iomem = smc->sram_base + (bfr->iova - smc->sram->start); + bfr->is_mapped = true; + + return 0; +} + +static bool apple_smc_rtkit_recv_early(void *cookie, u8 endpoint, u64 message) +{ + struct apple_smc *smc = cookie; + + if (endpoint != SMC_ENDPOINT) { + dev_warn(smc->dev, "Received message for unknown endpoint 0x%x\n", endpoint); + return false; + } + + if (smc->boot_stage == APPLE_SMC_BOOTING) { + int ret; + + smc->shmem.iova = message; + smc->shmem.size = SMC_SHMEM_SIZE; + ret = apple_smc_rtkit_shmem_setup(smc, &smc->shmem); + if (ret < 0) { + smc->boot_stage = APPLE_SMC_ERROR_NO_SHMEM; + dev_err(smc->dev, "Failed to initialize shared memory (%d)\n", ret); + } else { + smc->boot_stage = APPLE_SMC_INITIALIZED; + } + complete(&smc->init_done); + } else if (FIELD_GET(SMC_MSG, message) == SMC_MSG_NOTIFICATION) { + /* Handle these in the RTKit worker thread */ + return false; + } else { + smc->cmd_ret = message; + if (smc->atomic_pending) + smc->atomic_pending = false; + else + complete(&smc->cmd_done); + } + + return true; +} + +static void apple_smc_rtkit_recv(void *cookie, u8 endpoint, u64 message) +{ + struct apple_smc *smc = cookie; + + if (endpoint != SMC_ENDPOINT) { + dev_warn(smc->dev, "Received message for unknown endpoint 0x%x\n", endpoint); + return; + } + + if (FIELD_GET(SMC_MSG, message) != SMC_MSG_NOTIFICATION) { + dev_warn(smc->dev, "Received unknown message from worker: 0x%llx\n", message); + return; + } + + blocking_notifier_call_chain(&smc->event_handlers, FIELD_GET(SMC_DATA, message), NULL); +} + +static const struct apple_rtkit_ops apple_smc_rtkit_ops = { + .crashed = apple_smc_rtkit_crashed, + .recv_message = apple_smc_rtkit_recv, + .recv_message_early = apple_smc_rtkit_recv_early, + .shmem_setup = apple_smc_rtkit_shmem_setup, +}; + +static void apple_smc_rtkit_shutdown(void *data) +{ + struct apple_smc *smc = data; + + /* Shut down SMC firmware, if it's not completely wedged */ + if (apple_rtkit_is_running(smc->rtk)) + apple_rtkit_quiesce(smc->rtk); +} + +static void apple_smc_disable_notifications(void *data) +{ + struct apple_smc *smc = data; + + apple_smc_write_flag(smc, SMC_KEY(NTAP), false); +} + +static int apple_smc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct apple_smc *smc; + u32 count; + int ret; + + smc = devm_kzalloc(dev, sizeof(*smc), GFP_KERNEL); + if (!smc) + return -ENOMEM; + + smc->dev = &pdev->dev; + smc->sram_base = devm_platform_get_and_ioremap_resource(pdev, 1, &smc->sram); + if (IS_ERR(smc->sram_base)) + return dev_err_probe(dev, PTR_ERR(smc->sram_base), "Failed to map SRAM region"); + + smc->rtk = devm_apple_rtkit_init(dev, smc, NULL, 0, &apple_smc_rtkit_ops); + if (IS_ERR(smc->rtk)) + return dev_err_probe(dev, PTR_ERR(smc->rtk), "Failed to initialize RTKit"); + + smc->boot_stage = APPLE_SMC_BOOTING; + ret = apple_rtkit_wake(smc->rtk); + if (ret) + return dev_err_probe(dev, ret, "Failed to wake up SMC"); + + ret = devm_add_action_or_reset(dev, apple_smc_rtkit_shutdown, smc); + if (ret) + return dev_err_probe(dev, ret, "Failed to register rtkit shutdown action"); + + ret = apple_rtkit_start_ep(smc->rtk, SMC_ENDPOINT); + if (ret) + return dev_err_probe(dev, ret, "Failed to start SMC endpoint"); + + init_completion(&smc->init_done); + init_completion(&smc->cmd_done); + + ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT, + FIELD_PREP(SMC_MSG, SMC_MSG_INITIALIZE), NULL, false); + if (ret) + return dev_err_probe(dev, ret, "Failed to send init message"); + + if (wait_for_completion_timeout(&smc->init_done, msecs_to_jiffies(SMC_TIMEOUT_MS)) == 0) { + dev_err(dev, "Timed out initializing SMC"); + return -ETIMEDOUT; + } + + if (smc->boot_stage != APPLE_SMC_INITIALIZED) { + dev_err(dev, "SMC failed to boot successfully, boot stage=%d\n", smc->boot_stage); + return -EIO; + } + + dev_set_drvdata(&pdev->dev, smc); + BLOCKING_INIT_NOTIFIER_HEAD(&smc->event_handlers); + + ret = apple_smc_read_u32(smc, SMC_KEY(#KEY), &count); + if (ret) + return dev_err_probe(smc->dev, ret, "Failed to get key count"); + smc->key_count = be32_to_cpu(count); + + /* Enable notifications */ + apple_smc_write_flag(smc, SMC_KEY(NTAP), true); + ret = devm_add_action_or_reset(dev, apple_smc_disable_notifications, smc); + if (ret) + return dev_err_probe(dev, ret, "Failed to register notification disable action"); + + ret = devm_mfd_add_devices(smc->dev, PLATFORM_DEVID_NONE, + apple_smc_devs, ARRAY_SIZE(apple_smc_devs), + NULL, 0, NULL); + if (ret) + return dev_err_probe(smc->dev, ret, "Failed to register sub-devices"); + + + return 0; +} + +static const struct of_device_id apple_smc_of_match[] = { + { .compatible = "apple,smc" }, + {}, +}; +MODULE_DEVICE_TABLE(of, apple_smc_of_match); + +static struct platform_driver apple_smc_driver = { + .driver = { + .name = "macsmc", + .of_match_table = apple_smc_of_match, + }, + .probe = apple_smc_probe, +}; +module_platform_driver(apple_smc_driver); + +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_AUTHOR("Sven Peter <sven@kernel.org>"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple SMC driver"); diff --git a/drivers/mfd/tps6594-core.c b/drivers/mfd/tps6594-core.c index a7223e873cd1..c16c37e36617 100644 --- a/drivers/mfd/tps6594-core.c +++ b/drivers/mfd/tps6594-core.c @@ -1,6 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Core functions for TI TPS65224/TPS6594/TPS6593/LP8764 PMICs + * Core functions for following TI PMICs: + * - LP8764 + * - TPS65224 + * - TPS652G1 + * - TPS6593 + * - TPS6594 * * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/ */ @@ -414,6 +419,61 @@ static const unsigned int tps65224_irq_reg[] = { TPS6594_REG_INT_FSM_ERR, }; +/* TPS652G1 Resources */ + +static const struct mfd_cell tps652g1_common_cells[] = { + MFD_CELL_RES("tps6594-pfsm", tps65224_pfsm_resources), + MFD_CELL_RES("tps6594-pinctrl", tps65224_pinctrl_resources), + MFD_CELL_NAME("tps6594-regulator"), +}; + +static const struct regmap_irq tps652g1_irqs[] = { + /* INT_GPIO register */ + REGMAP_IRQ_REG(TPS65224_IRQ_GPIO1, 2, TPS65224_BIT_GPIO1_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_GPIO2, 2, TPS65224_BIT_GPIO2_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_GPIO3, 2, TPS65224_BIT_GPIO3_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_GPIO4, 2, TPS65224_BIT_GPIO4_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_GPIO5, 2, TPS65224_BIT_GPIO5_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_GPIO6, 2, TPS65224_BIT_GPIO6_INT), + + /* INT_STARTUP register */ + REGMAP_IRQ_REG(TPS65224_IRQ_VSENSE, 3, TPS65224_BIT_VSENSE_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_ENABLE, 3, TPS6594_BIT_ENABLE_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_PB_SHORT, 3, TPS65224_BIT_PB_SHORT_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_FSD, 3, TPS6594_BIT_FSD_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_SOFT_REBOOT, 3, TPS6594_BIT_SOFT_REBOOT_INT), + + /* INT_MISC register */ + REGMAP_IRQ_REG(TPS65224_IRQ_BIST_PASS, 4, TPS6594_BIT_BIST_PASS_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_EXT_CLK, 4, TPS6594_BIT_EXT_CLK_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_REG_UNLOCK, 4, TPS65224_BIT_REG_UNLOCK_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_TWARN, 4, TPS6594_BIT_TWARN_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_PB_LONG, 4, TPS65224_BIT_PB_LONG_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_PB_FALL, 4, TPS65224_BIT_PB_FALL_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_PB_RISE, 4, TPS65224_BIT_PB_RISE_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_ADC_CONV_READY, 4, TPS65224_BIT_ADC_CONV_READY_INT), + + /* INT_MODERATE_ERR register */ + REGMAP_IRQ_REG(TPS65224_IRQ_TSD_ORD, 5, TPS6594_BIT_TSD_ORD_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_BIST_FAIL, 5, TPS6594_BIT_BIST_FAIL_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_REG_CRC_ERR, 5, TPS6594_BIT_REG_CRC_ERR_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_RECOV_CNT, 5, TPS6594_BIT_RECOV_CNT_INT), + + /* INT_SEVERE_ERR register */ + REGMAP_IRQ_REG(TPS65224_IRQ_TSD_IMM, 6, TPS6594_BIT_TSD_IMM_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_VCCA_OVP, 6, TPS6594_BIT_VCCA_OVP_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_PFSM_ERR, 6, TPS6594_BIT_PFSM_ERR_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_BG_XMON, 6, TPS65224_BIT_BG_XMON_INT), + + /* INT_FSM_ERR register */ + REGMAP_IRQ_REG(TPS65224_IRQ_IMM_SHUTDOWN, 7, TPS6594_BIT_IMM_SHUTDOWN_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_ORD_SHUTDOWN, 7, TPS6594_BIT_ORD_SHUTDOWN_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_MCU_PWR_ERR, 7, TPS6594_BIT_MCU_PWR_ERR_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_SOC_PWR_ERR, 7, TPS6594_BIT_SOC_PWR_ERR_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_COMM_ERR, 7, TPS6594_BIT_COMM_ERR_INT), + REGMAP_IRQ_REG(TPS65224_IRQ_I2C2_ERR, 7, TPS65224_BIT_I2C2_ERR_INT), +}; + static inline unsigned int tps6594_get_irq_reg(struct regmap_irq_chip_data *data, unsigned int base, int index) { @@ -443,7 +503,7 @@ static int tps6594_handle_post_irq(void *irq_drv_data) * a new interrupt. */ if (tps->use_crc) { - if (tps->chip_id == TPS65224) { + if (tps->chip_id == TPS65224 || tps->chip_id == TPS652G1) { regmap_reg = TPS6594_REG_INT_FSM_ERR; mask_val = TPS6594_BIT_COMM_ERR_INT; } else { @@ -481,6 +541,18 @@ static struct regmap_irq_chip tps65224_irq_chip = { .handle_post_irq = tps6594_handle_post_irq, }; +static struct regmap_irq_chip tps652g1_irq_chip = { + .ack_base = TPS6594_REG_INT_BUCK, + .ack_invert = 1, + .clear_ack = 1, + .init_ack_masked = 1, + .num_regs = ARRAY_SIZE(tps65224_irq_reg), + .irqs = tps652g1_irqs, + .num_irqs = ARRAY_SIZE(tps652g1_irqs), + .get_irq_reg = tps65224_get_irq_reg, + .handle_post_irq = tps6594_handle_post_irq, +}; + static const struct regmap_range tps6594_volatile_ranges[] = { regmap_reg_range(TPS6594_REG_INT_TOP, TPS6594_REG_STAT_READBACK_ERR), regmap_reg_range(TPS6594_REG_RTC_STATUS, TPS6594_REG_RTC_STATUS), @@ -507,7 +579,7 @@ static int tps6594_check_crc_mode(struct tps6594 *tps, bool primary_pmic) int ret; unsigned int regmap_reg, mask_val; - if (tps->chip_id == TPS65224) { + if (tps->chip_id == TPS65224 || tps->chip_id == TPS652G1) { regmap_reg = TPS6594_REG_CONFIG_2; mask_val = TPS65224_BIT_I2C1_SPI_CRC_EN; } else { @@ -537,7 +609,7 @@ static int tps6594_set_crc_feature(struct tps6594 *tps) int ret; unsigned int regmap_reg, mask_val; - if (tps->chip_id == TPS65224) { + if (tps->chip_id == TPS65224 || tps->chip_id == TPS652G1) { regmap_reg = TPS6594_REG_CONFIG_2; mask_val = TPS65224_BIT_I2C1_SPI_CRC_EN; } else { @@ -628,6 +700,10 @@ int tps6594_device_init(struct tps6594 *tps, bool enable_crc) irq_chip = &tps65224_irq_chip; n_cells = ARRAY_SIZE(tps65224_common_cells); cells = tps65224_common_cells; + } else if (tps->chip_id == TPS652G1) { + irq_chip = &tps652g1_irq_chip; + n_cells = ARRAY_SIZE(tps652g1_common_cells); + cells = tps652g1_common_cells; } else { irq_chip = &tps6594_irq_chip; n_cells = ARRAY_SIZE(tps6594_common_cells); @@ -651,8 +727,8 @@ int tps6594_device_init(struct tps6594 *tps, bool enable_crc) if (ret) return dev_err_probe(dev, ret, "Failed to add common child devices\n"); - /* No RTC for LP8764 and TPS65224 */ - if (tps->chip_id != LP8764 && tps->chip_id != TPS65224) { + /* No RTC for LP8764, TPS65224 and TPS652G1 */ + if (tps->chip_id != LP8764 && tps->chip_id != TPS65224 && tps->chip_id != TPS652G1) { ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, tps6594_rtc_cells, ARRAY_SIZE(tps6594_rtc_cells), NULL, 0, regmap_irq_get_domain(tps->irq_data)); diff --git a/drivers/mfd/tps6594-i2c.c b/drivers/mfd/tps6594-i2c.c index 4ab91c34d9fb..7ff7516286fd 100644 --- a/drivers/mfd/tps6594-i2c.c +++ b/drivers/mfd/tps6594-i2c.c @@ -1,6 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* - * I2C access driver for TI TPS65224/TPS6594/TPS6593/LP8764 PMICs + * I2C access driver for the following TI PMICs: + * - LP8764 + * - TPS65224 + * - TPS652G1 + * - TPS6593 + * - TPS6594 * * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/ */ @@ -197,6 +202,7 @@ static const struct of_device_id tps6594_i2c_of_match_table[] = { { .compatible = "ti,tps6593-q1", .data = (void *)TPS6593, }, { .compatible = "ti,lp8764-q1", .data = (void *)LP8764, }, { .compatible = "ti,tps65224-q1", .data = (void *)TPS65224, }, + { .compatible = "ti,tps652g1", .data = (void *)TPS652G1, }, {} }; MODULE_DEVICE_TABLE(of, tps6594_i2c_of_match_table); @@ -222,7 +228,7 @@ static int tps6594_i2c_probe(struct i2c_client *client) return dev_err_probe(dev, -EINVAL, "Failed to find matching chip ID\n"); tps->chip_id = (unsigned long)match->data; - if (tps->chip_id == TPS65224) + if (tps->chip_id == TPS65224 || tps->chip_id == TPS652G1) tps6594_i2c_regmap_config.volatile_table = &tps65224_volatile_table; tps->regmap = devm_regmap_init(dev, NULL, client, &tps6594_i2c_regmap_config); diff --git a/drivers/mfd/tps6594-spi.c b/drivers/mfd/tps6594-spi.c index 6ebccb79f0cc..944b7313a1d9 100644 --- a/drivers/mfd/tps6594-spi.c +++ b/drivers/mfd/tps6594-spi.c @@ -1,6 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* - * SPI access driver for TI TPS65224/TPS6594/TPS6593/LP8764 PMICs + * SPI access driver for the following TI PMICs: + * - LP8764 + * - TPS65224 + * - TPS652G1 + * - TPS6593 + * - TPS6594 * * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/ */ @@ -82,6 +87,7 @@ static const struct of_device_id tps6594_spi_of_match_table[] = { { .compatible = "ti,tps6593-q1", .data = (void *)TPS6593, }, { .compatible = "ti,lp8764-q1", .data = (void *)LP8764, }, { .compatible = "ti,tps65224-q1", .data = (void *)TPS65224, }, + { .compatible = "ti,tps652g1", .data = (void *)TPS652G1, }, {} }; MODULE_DEVICE_TABLE(of, tps6594_spi_of_match_table); @@ -107,7 +113,7 @@ static int tps6594_spi_probe(struct spi_device *spi) return dev_err_probe(dev, -EINVAL, "Failed to find matching chip ID\n"); tps->chip_id = (unsigned long)match->data; - if (tps->chip_id == TPS65224) + if (tps->chip_id == TPS65224 || tps->chip_id == TPS652G1) tps6594_spi_regmap_config.volatile_table = &tps65224_volatile_table; tps->regmap = devm_regmap_init(dev, NULL, spi, &tps6594_spi_regmap_config); diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c index ef03d6cec9ff..fc2daffc4352 100644 --- a/drivers/mfd/vexpress-sysreg.c +++ b/drivers/mfd/vexpress-sysreg.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/slab.h> #include <linux/stat.h> @@ -37,22 +38,34 @@ /* The sysreg block is just a random collection of various functions... */ -static struct bgpio_pdata vexpress_sysreg_sys_led_pdata = { - .label = "sys_led", - .base = -1, - .ngpio = 8, +static const struct property_entry vexpress_sysreg_sys_led_props[] = { + PROPERTY_ENTRY_STRING("label", "sys_led"), + PROPERTY_ENTRY_U32("ngpios", 8), + { } }; -static struct bgpio_pdata vexpress_sysreg_sys_mci_pdata = { - .label = "sys_mci", - .base = -1, - .ngpio = 2, +static const struct software_node vexpress_sysreg_sys_led_swnode = { + .properties = vexpress_sysreg_sys_led_props, }; -static struct bgpio_pdata vexpress_sysreg_sys_flash_pdata = { - .label = "sys_flash", - .base = -1, - .ngpio = 1, +static const struct property_entry vexpress_sysreg_sys_mci_props[] = { + PROPERTY_ENTRY_STRING("label", "sys_mci"), + PROPERTY_ENTRY_U32("ngpios", 2), + { } +}; + +static const struct software_node vexpress_sysreg_sys_mci_swnode = { + .properties = vexpress_sysreg_sys_mci_props, +}; + +static const struct property_entry vexpress_sysreg_sys_flash_props[] = { + PROPERTY_ENTRY_STRING("label", "sys_flash"), + PROPERTY_ENTRY_U32("ngpios", 1), + { } +}; + +static const struct software_node vexpress_sysreg_sys_flash_swnode = { + .properties = vexpress_sysreg_sys_flash_props, }; static struct mfd_cell vexpress_sysreg_cells[] = { @@ -61,22 +74,19 @@ static struct mfd_cell vexpress_sysreg_cells[] = { .of_compatible = "arm,vexpress-sysreg,sys_led", .num_resources = 1, .resources = &DEFINE_RES_MEM_NAMED(SYS_LED, 0x4, "dat"), - .platform_data = &vexpress_sysreg_sys_led_pdata, - .pdata_size = sizeof(vexpress_sysreg_sys_led_pdata), + .swnode = &vexpress_sysreg_sys_led_swnode, }, { .name = "basic-mmio-gpio", .of_compatible = "arm,vexpress-sysreg,sys_mci", .num_resources = 1, .resources = &DEFINE_RES_MEM_NAMED(SYS_MCI, 0x4, "dat"), - .platform_data = &vexpress_sysreg_sys_mci_pdata, - .pdata_size = sizeof(vexpress_sysreg_sys_mci_pdata), + .swnode = &vexpress_sysreg_sys_mci_swnode, }, { .name = "basic-mmio-gpio", .of_compatible = "arm,vexpress-sysreg,sys_flash", .num_resources = 1, .resources = &DEFINE_RES_MEM_NAMED(SYS_FLASH, 0x4, "dat"), - .platform_data = &vexpress_sysreg_sys_flash_pdata, - .pdata_size = sizeof(vexpress_sysreg_sys_flash_pdata), + .swnode = &vexpress_sysreg_sys_flash_swnode, }, { .name = "vexpress-syscfg", .num_resources = 1, diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index 39468bd27b85..03ebe33185f9 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -8,7 +8,7 @@ lkdtm-$(CONFIG_LKDTM) += perms.o lkdtm-$(CONFIG_LKDTM) += refcount.o lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o lkdtm-$(CONFIG_LKDTM) += usercopy.o -lkdtm-$(CONFIG_LKDTM) += stackleak.o +lkdtm-$(CONFIG_LKDTM) += kstack_erase.o lkdtm-$(CONFIG_LKDTM) += cfi.o lkdtm-$(CONFIG_LKDTM) += fortify.o lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/kstack_erase.c index f1d022160913..4fd9b0bfb874 100644 --- a/drivers/misc/lkdtm/stackleak.c +++ b/drivers/misc/lkdtm/kstack_erase.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * This code tests that the current task stack is properly erased (filled - * with STACKLEAK_POISON). + * with KSTACK_ERASE_POISON). * * Authors: * Alexander Popov <alex.popov@linux.com> @@ -9,9 +9,9 @@ */ #include "lkdtm.h" -#include <linux/stackleak.h> +#include <linux/kstack_erase.h> -#if defined(CONFIG_GCC_PLUGIN_STACKLEAK) +#if defined(CONFIG_KSTACK_ERASE) /* * Check that stackleak tracks the lowest stack pointer and erases the stack * below this as expected. @@ -85,7 +85,7 @@ static void noinstr check_stackleak_irqoff(void) while (poison_low > task_stack_low) { poison_low -= sizeof(unsigned long); - if (*(unsigned long *)poison_low == STACKLEAK_POISON) + if (*(unsigned long *)poison_low == KSTACK_ERASE_POISON) continue; instrumentation_begin(); @@ -96,7 +96,7 @@ static void noinstr check_stackleak_irqoff(void) } instrumentation_begin(); - pr_info("stackleak stack usage:\n" + pr_info("kstack erase stack usage:\n" " high offset: %lu bytes\n" " current: %lu bytes\n" " lowest: %lu bytes\n" @@ -121,7 +121,7 @@ out: instrumentation_end(); } -static void lkdtm_STACKLEAK_ERASING(void) +static void lkdtm_KSTACK_ERASE(void) { unsigned long flags; @@ -129,19 +129,19 @@ static void lkdtm_STACKLEAK_ERASING(void) check_stackleak_irqoff(); local_irq_restore(flags); } -#else /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */ -static void lkdtm_STACKLEAK_ERASING(void) +#else /* defined(CONFIG_KSTACK_ERASE) */ +static void lkdtm_KSTACK_ERASE(void) { - if (IS_ENABLED(CONFIG_HAVE_ARCH_STACKLEAK)) { - pr_err("XFAIL: stackleak is not enabled (CONFIG_GCC_PLUGIN_STACKLEAK=n)\n"); + if (IS_ENABLED(CONFIG_HAVE_ARCH_KSTACK_ERASE)) { + pr_err("XFAIL: stackleak is not enabled (CONFIG_KSTACK_ERASE=n)\n"); } else { - pr_err("XFAIL: stackleak is not supported on this arch (HAVE_ARCH_STACKLEAK=n)\n"); + pr_err("XFAIL: stackleak is not supported on this arch (HAVE_ARCH_KSTACK_ERASE=n)\n"); } } -#endif /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */ +#endif /* defined(CONFIG_KSTACK_ERASE) */ static struct crashtype crashtypes[] = { - CRASHTYPE(STACKLEAK_ERASING), + CRASHTYPE(KSTACK_ERASE), }; struct crashtype_category stackleak_crashtypes = { diff --git a/drivers/misc/ti_fpc202.c b/drivers/misc/ti_fpc202.c index f7cde245ac95..f84cae50e2c9 100644 --- a/drivers/misc/ti_fpc202.c +++ b/drivers/misc/ti_fpc202.c @@ -118,20 +118,17 @@ static void fpc202_set_enable(struct fpc202_priv *priv, int enable) gpiod_set_value(priv->en_gpio, enable); } -static void fpc202_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int fpc202_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct fpc202_priv *priv = gpiochip_get_data(chip); int ret; u8 val; - if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_IN) - return; - ret = fpc202_read(priv, FPC202_REG_OUT_A_OUT_B_VAL); if (ret < 0) { dev_err(&priv->client->dev, "Failed to set GPIO %d value! err %d\n", offset, ret); - return; + return ret; } val = (u8)ret; @@ -141,7 +138,7 @@ static void fpc202_gpio_set(struct gpio_chip *chip, unsigned int offset, else val &= ~BIT(offset - FPC202_GPIO_P0_S0_OUT_A); - fpc202_write(priv, FPC202_REG_OUT_A_OUT_B_VAL, val); + return fpc202_write(priv, FPC202_REG_OUT_A_OUT_B_VAL, val); } static int fpc202_gpio_get(struct gpio_chip *chip, unsigned int offset) @@ -336,7 +333,7 @@ static int fpc202_probe(struct i2c_client *client) priv->gpio.base = -1; priv->gpio.direction_input = fpc202_gpio_direction_input; priv->gpio.direction_output = fpc202_gpio_direction_output; - priv->gpio.set = fpc202_gpio_set; + priv->gpio.set_rv = fpc202_gpio_set; priv->gpio.get = fpc202_gpio_get; priv->gpio.ngpio = FPC202_GPIO_COUNT; priv->gpio.parent = dev; diff --git a/drivers/misc/tps6594-pfsm.c b/drivers/misc/tps6594-pfsm.c index 6db1c9d48f8f..44fa81d6cec2 100644 --- a/drivers/misc/tps6594-pfsm.c +++ b/drivers/misc/tps6594-pfsm.c @@ -1,6 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /* - * PFSM (Pre-configurable Finite State Machine) driver for TI TPS65224/TPS6594/TPS6593/LP8764 PMICs + * PFSM (Pre-configurable Finite State Machine) driver for the following + * PMICs: + * - LP8764 + * - TPS65224 + * - TPS652G1 + * - TPS6594 + * - TPS6593 * * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/ */ @@ -141,7 +147,7 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a switch (cmd) { case PMIC_GOTO_STANDBY: /* Disable LP mode on TPS6594 Family PMIC */ - if (pfsm->chip_id != TPS65224) { + if (pfsm->chip_id != TPS65224 && pfsm->chip_id != TPS652G1) { ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2, TPS6594_BIT_LP_STANDBY_SEL); @@ -154,8 +160,8 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a TPS6594_BIT_TRIGGER_I2C(0), TPS6594_BIT_TRIGGER_I2C(0)); break; case PMIC_GOTO_LP_STANDBY: - /* TPS65224 does not support LP STANDBY */ - if (pfsm->chip_id == TPS65224) + /* TPS65224/TPS652G1 does not support LP STANDBY */ + if (pfsm->chip_id == TPS65224 || pfsm->chip_id == TPS652G1) return ret; /* Enable LP mode */ @@ -179,8 +185,8 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a TPS6594_BIT_NSLEEP1B | TPS6594_BIT_NSLEEP2B); break; case PMIC_SET_MCU_ONLY_STATE: - /* TPS65224 does not support MCU_ONLY_STATE */ - if (pfsm->chip_id == TPS65224) + /* TPS65224/TPS652G1 does not support MCU_ONLY_STATE */ + if (pfsm->chip_id == TPS65224 || pfsm->chip_id == TPS652G1) return ret; if (copy_from_user(&state_opt, argp, sizeof(state_opt))) @@ -206,7 +212,7 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a return -EFAULT; /* Configure wake-up destination */ - if (pfsm->chip_id == TPS65224) { + if (pfsm->chip_id == TPS65224 || pfsm->chip_id == TPS652G1) { regmap_reg = TPS65224_REG_STARTUP_CTRL; mask = TPS65224_MASK_STARTUP_DEST; } else { @@ -230,9 +236,14 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a return ret; /* Modify NSLEEP1-2 bits */ - ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS, - pfsm->chip_id == TPS65224 ? - TPS6594_BIT_NSLEEP1B : TPS6594_BIT_NSLEEP2B); + if (pfsm->chip_id == TPS65224 || pfsm->chip_id == TPS652G1) + ret = regmap_clear_bits(pfsm->regmap, + TPS6594_REG_FSM_NSLEEP_TRIGGERS, + TPS6594_BIT_NSLEEP1B); + else + ret = regmap_clear_bits(pfsm->regmap, + TPS6594_REG_FSM_NSLEEP_TRIGGERS, + TPS6594_BIT_NSLEEP2B); break; } diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index b66b637e2d57..656601754966 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -161,7 +161,7 @@ static int sdio_bus_probe(struct device *dev) if (!id) return -ENODEV; - ret = dev_pm_domain_attach(dev, false); + ret = dev_pm_domain_attach(dev, 0); if (ret) return ret; diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 1003cf118c01..4dd6f1a4e797 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -1379,7 +1379,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); int cwperpage, bad_block_byte, ret; bool wide_bus; - int ecc_mode = 1; + int ecc_mode = ECC_MODE_8BIT; /* controller only supports 512 bytes data steps */ ecc->size = NANDC_STEP_SIZE; @@ -1400,7 +1400,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) if (ecc->strength >= 8) { /* 8 bit ECC defaults to BCH ECC on all platforms */ host->bch_enabled = true; - ecc_mode = 1; + ecc_mode = ECC_MODE_8BIT; if (wide_bus) { host->ecc_bytes_hw = 14; @@ -1420,7 +1420,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) if (nandc->props->ecc_modes & ECC_BCH_4BIT) { /* BCH */ host->bch_enabled = true; - ecc_mode = 0; + ecc_mode = ECC_MODE_4BIT; if (wide_bus) { host->ecc_bytes_hw = 8; diff --git a/drivers/mux/core.c b/drivers/mux/core.c index 02be4ba37257..a3840fe0995f 100644 --- a/drivers/mux/core.c +++ b/drivers/mux/core.c @@ -98,13 +98,12 @@ struct mux_chip *mux_chip_alloc(struct device *dev, if (WARN_ON(!dev || !controllers)) return ERR_PTR(-EINVAL); - mux_chip = kzalloc(sizeof(*mux_chip) + - controllers * sizeof(*mux_chip->mux) + - sizeof_priv, GFP_KERNEL); + mux_chip = kzalloc(size_add(struct_size(mux_chip, mux, controllers), + sizeof_priv), + GFP_KERNEL); if (!mux_chip) return ERR_PTR(-ENOMEM); - mux_chip->mux = (struct mux_control *)(mux_chip + 1); mux_chip->dev.class = &mux_class; mux_chip->dev.type = &mux_type; mux_chip->dev.parent = dev; diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index ea8c807af4d8..3913971125de 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -145,13 +145,16 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, EXPORT_SYMBOL_GPL(can_change_state); /* CAN device restart for bus-off recovery */ -static void can_restart(struct net_device *dev) +static int can_restart(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); struct sk_buff *skb; struct can_frame *cf; int err; + if (!priv->do_set_mode) + return -EOPNOTSUPP; + if (netif_carrier_ok(dev)) netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n"); @@ -173,10 +176,14 @@ static void can_restart(struct net_device *dev) if (err) { netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err)); netif_carrier_off(dev); + + return err; } else { netdev_dbg(dev, "Restarted\n"); priv->can_stats.restarts++; } + + return 0; } static void can_restart_work(struct work_struct *work) @@ -201,9 +208,8 @@ int can_restart_now(struct net_device *dev) return -EBUSY; cancel_delayed_work_sync(&priv->restart_work); - can_restart(dev); - return 0; + return can_restart(dev); } /* CAN bus-off diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index a36842ace084..f0e3f0d538fb 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -285,6 +285,12 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], } if (data[IFLA_CAN_RESTART_MS]) { + if (!priv->do_set_mode) { + NL_SET_ERR_MSG(extack, + "Device doesn't support restart from Bus Off"); + return -EOPNOTSUPP; + } + /* Do not allow changing restart delay while running */ if (dev->flags & IFF_UP) return -EBUSY; @@ -292,6 +298,12 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], } if (data[IFLA_CAN_RESTART]) { + if (!priv->do_set_mode) { + NL_SET_ERR_MSG(extack, + "Device doesn't support restart from Bus Off"); + return -EOPNOTSUPP; + } + /* Do not allow a restart while not running */ if (!(dev->flags & IFF_UP)) return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index 0d61b8580d72..f832562bd7a3 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -818,6 +818,9 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf) /* Tx SPB */ tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT), TX_SPB_CTRL_XF_CTRL2); + + if (intf->parent->tx_chan_offset) + tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR); tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT); tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index b82f121cadad..0f4efd505332 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4666,12 +4666,19 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) return PTR_ERR(dpmac_dev); } - if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) + if (IS_ERR(dpmac_dev)) return 0; + if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) { + err = 0; + goto out_put_device; + } + mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL); - if (!mac) - return -ENOMEM; + if (!mac) { + err = -ENOMEM; + goto out_put_device; + } mac->mc_dev = dpmac_dev; mac->mc_io = priv->mc_io; @@ -4705,6 +4712,8 @@ err_close_mac: dpaa2_mac_close(mac); err_free_mac: kfree(mac); +out_put_device: + put_device(&dpmac_dev->dev); return err; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 147a93bf9fa9..4643a3380618 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -1448,12 +1448,19 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv) if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) return PTR_ERR(dpmac_dev); - if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) + if (IS_ERR(dpmac_dev)) return 0; + if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) { + err = 0; + goto out_put_device; + } + mac = kzalloc(sizeof(*mac), GFP_KERNEL); - if (!mac) - return -ENOMEM; + if (!mac) { + err = -ENOMEM; + goto out_put_device; + } mac->mc_dev = dpmac_dev; mac->mc_io = port_priv->ethsw_data->mc_io; @@ -1483,6 +1490,8 @@ err_close_mac: dpaa2_mac_close(mac); err_free_mac: kfree(mac); +out_put_device: + put_device(&dpmac_dev->dev); return err; } diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index dc35a23ec47f..d1aeb722d48f 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1917,49 +1917,56 @@ static void gve_turnup_and_check_status(struct gve_priv *priv) gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); } -static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) +static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv, + unsigned int txqueue) { - struct gve_notify_block *block; - struct gve_tx_ring *tx = NULL; - struct gve_priv *priv; - u32 last_nic_done; - u32 current_time; u32 ntfy_idx; - netdev_info(dev, "Timeout on tx queue, %d", txqueue); - priv = netdev_priv(dev); if (txqueue > priv->tx_cfg.num_queues) - goto reset; + return NULL; ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); if (ntfy_idx >= priv->num_ntfy_blks) - goto reset; + return NULL; + + return &priv->ntfy_blocks[ntfy_idx]; +} + +static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv, + unsigned int txqueue) +{ + struct gve_notify_block *block; + u32 current_time; - block = &priv->ntfy_blocks[ntfy_idx]; - tx = block->tx; + block = gve_get_tx_notify_block(priv, txqueue); + + if (!block) + return false; current_time = jiffies_to_msecs(jiffies); - if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time) - goto reset; + if (block->tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time) + return false; - /* Check to see if there are missed completions, which will allow us to - * kick the queue. - */ - last_nic_done = gve_tx_load_event_counter(priv, tx); - if (last_nic_done - tx->done) { - netdev_info(dev, "Kicking queue %d", txqueue); - iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); - napi_schedule(&block->napi); - tx->last_kick_msec = current_time; - goto out; - } // Else reset. + netdev_info(priv->dev, "Kicking queue %d", txqueue); + napi_schedule(&block->napi); + block->tx->last_kick_msec = current_time; + return true; +} -reset: - gve_schedule_reset(priv); +static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct gve_notify_block *block; + struct gve_priv *priv; -out: - if (tx) - tx->queue_timeout++; + netdev_info(dev, "Timeout on tx queue, %d", txqueue); + priv = netdev_priv(dev); + + if (!gve_tx_timeout_try_q_kick(priv, txqueue)) + gve_schedule_reset(priv); + + block = gve_get_tx_notify_block(priv, txqueue); + if (block) + block->tx->queue_timeout++; priv->tx_timeo_cnt++; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b03b8758c777..aaa803563bd2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -11,6 +11,7 @@ #include <linux/irq.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <linux/iommu.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/skbuff.h> @@ -1039,6 +1040,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) { u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_tx_spare *tx_spare; struct page *page; dma_addr_t dma; @@ -1080,6 +1083,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) tx_spare->buf = page_address(page); tx_spare->len = PAGE_SIZE << order; ring->tx_spare = tx_spare; + ring->tx_copybreak = priv->tx_copybreak; return; dma_mapping_error: @@ -4874,6 +4878,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) devm_kfree(&pdev->dev, priv->tqp_vector); } +static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv) +{ +#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024) +#define HNS3_MAX_PACKET_SIZE (64 * 1024) + + struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev); + struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); + struct hnae3_handle *handle = priv->ae_handle; + + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) + return; + + if (!(domain && iommu_is_dma_domain(domain))) + return; + + priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE; + priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE; + + if (priv->tx_copybreak < priv->min_tx_copybreak) + priv->tx_copybreak = priv->min_tx_copybreak; + if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size) + handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size; +} + static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, unsigned int ring_type) { @@ -5107,6 +5135,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) int i, j; int ret; + hns3_update_tx_spare_buf_config(priv); for (i = 0; i < ring_num; i++) { ret = hns3_alloc_ring_memory(&priv->ring[i]); if (ret) { @@ -5311,6 +5340,8 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->ae_handle = handle; priv->tx_timeout_count = 0; priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; + priv->min_tx_copybreak = 0; + priv->min_tx_spare_buf_size = 0; set_bit(HNS3_NIC_STATE_DOWN, &priv->state); handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index d36c4ed16d8d..caf7a4df8585 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -596,6 +596,8 @@ struct hns3_nic_priv { struct hns3_enet_coalesce rx_coal; u32 tx_copybreak; u32 rx_copybreak; + u32 min_tx_copybreak; + u32 min_tx_spare_buf_size; }; union l3_hdr_info { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a7de67699a01..30bdec1acb57 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9576,33 +9576,36 @@ static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport) return false; } -int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) +static int __hclge_enable_vport_vlan_filter(struct hclge_vport *vport, + bool request_en) { - struct hclge_dev *hdev = vport->back; bool need_en; int ret; - mutex_lock(&hdev->vport_lock); - - vport->req_vlan_fltr_en = request_en; - need_en = hclge_need_enable_vport_vlan_filter(vport); - if (need_en == vport->cur_vlan_fltr_en) { - mutex_unlock(&hdev->vport_lock); + if (need_en == vport->cur_vlan_fltr_en) return 0; - } ret = hclge_set_vport_vlan_filter(vport, need_en); - if (ret) { - mutex_unlock(&hdev->vport_lock); + if (ret) return ret; - } vport->cur_vlan_fltr_en = need_en; + return 0; +} + +int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + mutex_lock(&hdev->vport_lock); + vport->req_vlan_fltr_en = request_en; + ret = __hclge_enable_vport_vlan_filter(vport, request_en); mutex_unlock(&hdev->vport_lock); - return 0; + return ret; } static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) @@ -10623,16 +10626,19 @@ static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) &vport->state)) continue; - ret = hclge_enable_vport_vlan_filter(vport, - vport->req_vlan_fltr_en); + mutex_lock(&hdev->vport_lock); + ret = __hclge_enable_vport_vlan_filter(vport, + vport->req_vlan_fltr_en); if (ret) { dev_err(&hdev->pdev->dev, "failed to sync vlan filter state for vport%u, ret = %d\n", vport->vport_id, ret); set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); + mutex_unlock(&hdev->vport_lock); return; } + mutex_unlock(&hdev->vport_lock); } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index ec581d4b696f..4bd52eab3914 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -497,14 +497,14 @@ int hclge_ptp_init(struct hclge_dev *hdev) if (ret) { dev_err(&hdev->pdev->dev, "failed to init freq, ret = %d\n", ret); - goto out; + goto out_clear_int; } ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg); if (ret) { dev_err(&hdev->pdev->dev, "failed to init ts mode, ret = %d\n", ret); - goto out; + goto out_clear_int; } ktime_get_real_ts64(&ts); @@ -512,7 +512,7 @@ int hclge_ptp_init(struct hclge_dev *hdev) if (ret) { dev_err(&hdev->pdev->dev, "failed to init ts time, ret = %d\n", ret); - goto out; + goto out_clear_int; } set_bit(HCLGE_STATE_PTP_EN, &hdev->state); @@ -520,6 +520,9 @@ int hclge_ptp_init(struct hclge_dev *hdev) return 0; +out_clear_int: + clear_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags); + hclge_ptp_int_en(hdev, false); out: hclge_ptp_destroy_clock(hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index c4f35e8e2177..4cf6ac04662a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -3094,11 +3094,7 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) { - struct hnae3_handle *nic = &hdev->nic; - struct hnae3_knic_private_info *kinfo = &nic->kinfo; - - return min_t(u32, hdev->rss_size_max, - hdev->num_tqps / kinfo->tc_info.num_tc); + return min(hdev->rss_size_max, hdev->num_tqps); } /** diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 8294a7c4f122..ba331899d186 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -638,6 +638,9 @@ /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ #define NVM_SUM 0xBABA +/* Uninitialized ("empty") checksum word value */ +#define NVM_CHECKSUM_UNINITIALIZED 0xFFFF + /* PBA (printed board assembly) number words */ #define NVM_PBA_OFFSET_0 8 #define NVM_PBA_OFFSET_1 9 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 364378133526..df4e7d781cb1 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -4274,6 +4274,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) ret_val = e1000e_update_nvm_checksum(hw); if (ret_val) return ret_val; + } else if (hw->mac.type == e1000_pch_tgp) { + return 0; } } diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index e609f4df86f4..16369e6d245a 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -558,6 +558,12 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) checksum += nvm_data; } + if (hw->mac.type == e1000_pch_tgp && + nvm_data == NVM_CHECKSUM_UNINITIALIZED) { + e_dbg("Uninitialized NVM Checksum on TGP platform - ignoring\n"); + return 0; + } + if (checksum != (u16)NVM_SUM) { e_dbg("NVM Checksum Invalid\n"); return -E1000_ERR_NVM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 88e6bef69342..7ccfc1191ae5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -3137,10 +3137,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) const u8 *addr = al->list[i].addr; /* Allow to delete VF primary MAC only if it was not set - * administratively by PF or if VF is trusted. + * administratively by PF. */ if (ether_addr_equal(addr, vf->default_lan_addr.addr)) { - if (i40e_can_vf_change_mac(vf)) + if (!vf->pf_set_mac) was_unimac_deleted = true; else continue; @@ -5006,7 +5006,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id, vf_stats->broadcast = stats->rx_broadcast; vf_stats->multicast = stats->rx_multicast; vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other; - vf_stats->tx_dropped = stats->tx_discards; + vf_stats->tx_dropped = stats->tx_errors; return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index 59323c019544..351824dc3c62 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -2301,6 +2301,8 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, return ICE_DDP_PKG_ERR; buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); + if (!buf_copy) + return ICE_DDP_PKG_ERR; state = ice_init_pkg(hw, buf_copy, len); if (!ice_is_init_pkg_successful(state)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index b1aeea7c4a91..e395ef5f356e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1947,8 +1947,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, pages_queue, token, force_polling); - if (callback) - return err; + if (callback && !err) + return 0; if (err > 0) /* Failed in FW, command didn't execute */ err = deliv_status_to_err(err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 0e3a977d5332..bee906661282 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1182,19 +1182,19 @@ static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw, static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, struct mlx5_core_dev *peer_dev) { + struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; struct mlx5_flow_destination dest = {}; struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_handle **flows; - /* total vports is the same for both e-switches */ - int nvports = esw->total_vports; struct mlx5_flow_handle *flow; + struct mlx5_vport *peer_vport; struct mlx5_flow_spec *spec; - struct mlx5_vport *vport; int err, pfindex; unsigned long i; void *misc; - if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev)) + if (!MLX5_VPORT_MANAGER(peer_dev) && + !mlx5_core_is_ecpf_esw_manager(peer_dev)) return 0; spec = kvzalloc(sizeof(*spec), GFP_KERNEL); @@ -1203,7 +1203,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, peer_miss_rules_setup(esw, peer_dev, spec, &dest); - flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); + flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL); if (!flows) { err = -ENOMEM; goto alloc_flows_err; @@ -1213,10 +1213,10 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); - esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, - spec, MLX5_VPORT_PF); + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); + esw_set_peer_miss_rule_source_port(esw, peer_esw, spec, + MLX5_VPORT_PF); flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), spec, &flow_act, &dest, 1); @@ -1224,11 +1224,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, err = PTR_ERR(flow); goto add_pf_flow_err; } - flows[vport->index] = flow; + flows[peer_vport->index] = flow; } - if (mlx5_ecpf_vport_exists(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); + if (mlx5_ecpf_vport_exists(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), spec, &flow_act, &dest, 1); @@ -1236,13 +1236,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, err = PTR_ERR(flow); goto add_ecpf_flow_err; } - flows[vport->index] = flow; + flows[peer_vport->index] = flow; } - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_vfs(peer_dev)) { esw_set_peer_miss_rule_source_port(esw, - peer_dev->priv.eswitch, - spec, vport->vport); + peer_esw, + spec, peer_vport->vport); flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), spec, &flow_act, &dest, 1); @@ -1250,22 +1251,22 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, err = PTR_ERR(flow); goto add_vf_flow_err; } - flows[vport->index] = flow; + flows[peer_vport->index] = flow; } - if (mlx5_core_ec_sriov_enabled(esw->dev)) { - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { - if (i >= mlx5_core_max_ec_vfs(peer_dev)) - break; - esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, - spec, vport->vport); + if (mlx5_core_ec_sriov_enabled(peer_dev)) { + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_ec_vfs(peer_dev)) { + esw_set_peer_miss_rule_source_port(esw, peer_esw, + spec, + peer_vport->vport); flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow)) { err = PTR_ERR(flow); goto add_ec_vf_flow_err; } - flows[vport->index] = flow; + flows[peer_vport->index] = flow; } } @@ -1282,25 +1283,27 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, return 0; add_ec_vf_flow_err: - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { - if (!flows[vport->index]) + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_ec_vfs(peer_dev)) { + if (!flows[peer_vport->index]) continue; - mlx5_del_flow_rules(flows[vport->index]); + mlx5_del_flow_rules(flows[peer_vport->index]); } add_vf_flow_err: - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { - if (!flows[vport->index]) + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_vfs(peer_dev)) { + if (!flows[peer_vport->index]) continue; - mlx5_del_flow_rules(flows[vport->index]); + mlx5_del_flow_rules(flows[peer_vport->index]); } - if (mlx5_ecpf_vport_exists(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); - mlx5_del_flow_rules(flows[vport->index]); + if (mlx5_ecpf_vport_exists(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); + mlx5_del_flow_rules(flows[peer_vport->index]); } add_ecpf_flow_err: - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); - mlx5_del_flow_rules(flows[vport->index]); + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); + mlx5_del_flow_rules(flows[peer_vport->index]); } add_pf_flow_err: esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); @@ -1313,37 +1316,34 @@ alloc_flows_err: static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw, struct mlx5_core_dev *peer_dev) { + struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; u16 peer_index = mlx5_get_dev_index(peer_dev); struct mlx5_flow_handle **flows; - struct mlx5_vport *vport; + struct mlx5_vport *peer_vport; unsigned long i; flows = esw->fdb_table.offloads.peer_miss_rules[peer_index]; if (!flows) return; - if (mlx5_core_ec_sriov_enabled(esw->dev)) { - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { - /* The flow for a particular vport could be NULL if the other ECPF - * has fewer or no VFs enabled - */ - if (!flows[vport->index]) - continue; - mlx5_del_flow_rules(flows[vport->index]); - } + if (mlx5_core_ec_sriov_enabled(peer_dev)) { + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_ec_vfs(peer_dev)) + mlx5_del_flow_rules(flows[peer_vport->index]); } - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) - mlx5_del_flow_rules(flows[vport->index]); + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_vfs(peer_dev)) + mlx5_del_flow_rules(flows[peer_vport->index]); - if (mlx5_ecpf_vport_exists(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); - mlx5_del_flow_rules(flows[vport->index]); + if (mlx5_ecpf_vport_exists(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); + mlx5_del_flow_rules(flows[peer_vport->index]); } - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); - mlx5_del_flow_rules(flows[vport->index]); + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); + mlx5_del_flow_rules(flows[peer_vport->index]); } kvfree(flows); diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c index ddfd1c02a885..da53eb04b0a4 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.c +++ b/drivers/net/ethernet/ti/icssg/icssg_config.c @@ -288,8 +288,12 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac) int i; addr = lower_32_bits(prueth->msmcram.pa); - if (slice) - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + if (slice) { + if (prueth->pdata.banked_ms_ram) + addr += MSMC_RAM_BANK_SIZE; + else + addr += PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE; + } if (addr % SZ_64K) { dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); @@ -297,43 +301,66 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac) } bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; - /* workaround for f/w bug. bpool 0 needs to be initialized */ - for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) { + + /* Configure buffer pools for forwarding buffers + * - used by firmware to store packets to be forwarded to other port + * - 8 total pools per slice + */ + for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) { writel(addr, &bpool_cfg[i].addr); - writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); - addr += PRUETH_EMAC_BUF_POOL_SIZE; + writel(PRUETH_SW_FWD_BUF_POOL_SIZE, &bpool_cfg[i].len); + addr += PRUETH_SW_FWD_BUF_POOL_SIZE; } - if (!slice) - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; - else - addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; - - for (i = PRUETH_NUM_BUF_POOLS; - i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS; - i++) { - /* The driver only uses first 4 queues per PRU so only initialize them */ - if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) { - writel(addr, &bpool_cfg[i].addr); - writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len); - addr += PRUETH_SW_BUF_POOL_SIZE_HOST; + /* Configure buffer pools for Local Injection buffers + * - used by firmware to store packets received from host core + * - 16 total pools per slice + */ + for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) { + int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; + + /* The driver only uses first 4 queues per PRU, + * so only initialize buffer for them + */ + if ((i % PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE) + < PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) { + writel(addr, &bpool_cfg[cfg_idx].addr); + writel(PRUETH_SW_LI_BUF_POOL_SIZE, + &bpool_cfg[cfg_idx].len); + addr += PRUETH_SW_LI_BUF_POOL_SIZE; } else { - writel(0, &bpool_cfg[i].addr); - writel(0, &bpool_cfg[i].len); + writel(0, &bpool_cfg[cfg_idx].addr); + writel(0, &bpool_cfg[cfg_idx].len); } } - if (!slice) - addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; - else - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + /* Express RX buffer queue + * - used by firmware to store express packets to be transmitted + * to the host core + */ + rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET; + for (i = 0; i < 3; i++) + writel(addr, &rxq_ctx->start[i]); + + addr += PRUETH_SW_HOST_EXP_BUF_POOL_SIZE; + writel(addr, &rxq_ctx->end); + /* Pre-emptible RX buffer queue + * - used by firmware to store preemptible packets to be transmitted + * to the host core + */ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; for (i = 0; i < 3; i++) writel(addr, &rxq_ctx->start[i]); - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; - writel(addr - SZ_2K, &rxq_ctx->end); + addr += PRUETH_SW_HOST_PRE_BUF_POOL_SIZE; + writel(addr, &rxq_ctx->end); + + /* Set pointer for default dropped packet write + * - used by firmware to temporarily store packet to be dropped + */ + rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET; + writel(addr, &rxq_ctx->start[0]); return 0; } @@ -347,13 +374,13 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac) u32 addr; int i; - /* Layout to have 64KB aligned buffer pool - * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1| - */ - addr = lower_32_bits(prueth->msmcram.pa); - if (slice) - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + if (slice) { + if (prueth->pdata.banked_ms_ram) + addr += MSMC_RAM_BANK_SIZE; + else + addr += PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE; + } if (addr % SZ_64K) { dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); @@ -361,39 +388,66 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac) } bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; - /* workaround for f/w bug. bpool 0 needs to be initilalized */ - writel(addr, &bpool_cfg[0].addr); - writel(0, &bpool_cfg[0].len); - for (i = PRUETH_EMAC_BUF_POOL_START; - i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS; - i++) { - writel(addr, &bpool_cfg[i].addr); - writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); - addr += PRUETH_EMAC_BUF_POOL_SIZE; + /* Configure buffer pools for forwarding buffers + * - in mac mode - no forwarding so initialize all pools to 0 + * - 8 total pools per slice + */ + for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) { + writel(0, &bpool_cfg[i].addr); + writel(0, &bpool_cfg[i].len); } - if (!slice) - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; - else - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2; + /* Configure buffer pools for Local Injection buffers + * - used by firmware to store packets received from host core + * - 16 total pools per slice + */ + bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; + for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) { + int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; + + /* In EMAC mode, only first 4 buffers are used, + * as 1 slice needs to handle only 1 port + */ + if (i < PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) { + writel(addr, &bpool_cfg[cfg_idx].addr); + writel(PRUETH_EMAC_LI_BUF_POOL_SIZE, + &bpool_cfg[cfg_idx].len); + addr += PRUETH_EMAC_LI_BUF_POOL_SIZE; + } else { + writel(0, &bpool_cfg[cfg_idx].addr); + writel(0, &bpool_cfg[cfg_idx].len); + } + } - /* Pre-emptible RX buffer queue */ - rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; + /* Express RX buffer queue + * - used by firmware to store express packets to be transmitted + * to host core + */ + rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET; for (i = 0; i < 3; i++) writel(addr, &rxq_ctx->start[i]); - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + addr += PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE; writel(addr, &rxq_ctx->end); - /* Express RX buffer queue */ - rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET; + /* Pre-emptible RX buffer queue + * - used by firmware to store preemptible packets to be transmitted + * to host core + */ + rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; for (i = 0; i < 3; i++) writel(addr, &rxq_ctx->start[i]); - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + addr += PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE; writel(addr, &rxq_ctx->end); + /* Set pointer for default dropped packet write + * - used by firmware to temporarily store packet to be dropped + */ + rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET; + writel(addr, &rxq_ctx->start[0]); + return 0; } diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h index c884e9fa099e..60d69744ffae 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.h +++ b/drivers/net/ethernet/ti/icssg/icssg_config.h @@ -26,21 +26,71 @@ struct icssg_flow_cfg { #define PRUETH_MAX_RX_FLOWS 1 /* excluding default flow */ #define PRUETH_RX_FLOW_DATA 0 -#define PRUETH_EMAC_BUF_POOL_SIZE SZ_8K -#define PRUETH_EMAC_POOLS_PER_SLICE 24 -#define PRUETH_EMAC_BUF_POOL_START 8 -#define PRUETH_NUM_BUF_POOLS 8 -#define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */ -#define MSMC_RAM_SIZE \ - (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \ - PRUETH_EMAC_RX_CTX_BUF_SIZE * 2)) - -#define PRUETH_SW_BUF_POOL_SIZE_HOST SZ_4K -#define PRUETH_SW_NUM_BUF_POOLS_HOST 8 -#define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4 -#define MSMC_RAM_SIZE_SWITCH_MODE \ - (MSMC_RAM_SIZE + \ - (2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST)) +/* Defines for forwarding path buffer pools: + * - used by firmware to store packets to be forwarded to other port + * - 8 total pools per slice + * - only used in switch mode (as no forwarding in mac mode) + */ +#define PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE 8 +#define PRUETH_SW_FWD_BUF_POOL_SIZE (SZ_8K) + +/* Defines for local injection path buffer pools: + * - used by firmware to store packets received from host core + * - 16 total pools per slice + * - 8 pools per port per slice and each slice handles both ports + * - only 4 out of 8 pools used per port (as only 4 real QoS levels in ICSSG) + * - switch mode: 8 total pools used + * - mac mode: 4 total pools used + */ +#define PRUETH_NUM_LI_BUF_POOLS_PER_SLICE 16 +#define PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE 8 +#define PRUETH_SW_LI_BUF_POOL_SIZE SZ_4K +#define PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE 8 +#define PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4 +#define PRUETH_EMAC_LI_BUF_POOL_SIZE SZ_8K +#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE 4 +#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4 + +/* Defines for host egress path - express and preemptible buffers + * - used by firmware to store express and preemptible packets + * to be transmitted to host core + * - used by both mac/switch modes + */ +#define PRUETH_SW_HOST_EXP_BUF_POOL_SIZE SZ_16K +#define PRUETH_SW_HOST_PRE_BUF_POOL_SIZE (SZ_16K - SZ_2K) +#define PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE PRUETH_SW_HOST_EXP_BUF_POOL_SIZE +#define PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + +/* Buffer used by firmware to temporarily store packet to be dropped */ +#define PRUETH_SW_DROP_PKT_BUF_SIZE SZ_2K +#define PRUETH_EMAC_DROP_PKT_BUF_SIZE PRUETH_SW_DROP_PKT_BUF_SIZE + +/* Total switch mode memory usage for buffers per slice */ +#define PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE \ + (PRUETH_SW_FWD_BUF_POOL_SIZE * PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE + \ + PRUETH_SW_LI_BUF_POOL_SIZE * PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE + \ + PRUETH_SW_HOST_EXP_BUF_POOL_SIZE + \ + PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + \ + PRUETH_SW_DROP_PKT_BUF_SIZE) + +/* Total switch mode memory usage for all buffers */ +#define PRUETH_SW_TOTAL_BUF_SIZE \ + (2 * PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE) + +/* Total mac mode memory usage for buffers per slice */ +#define PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE \ + (PRUETH_EMAC_LI_BUF_POOL_SIZE * \ + PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE + \ + PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE + \ + PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE + \ + PRUETH_EMAC_DROP_PKT_BUF_SIZE) + +/* Total mac mode memory usage for all buffers */ +#define PRUETH_EMAC_TOTAL_BUF_SIZE \ + (2 * PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE) + +/* Size of 1 bank of MSMC/OC_SRAM memory */ +#define MSMC_RAM_BANK_SIZE SZ_256K #define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1) diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 86fc1278127c..2f5c4335dec3 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -1764,10 +1764,15 @@ static int prueth_probe(struct platform_device *pdev) goto put_mem; } - msmc_ram_size = MSMC_RAM_SIZE; prueth->is_switchmode_supported = prueth->pdata.switch_mode; - if (prueth->is_switchmode_supported) - msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE; + if (prueth->pdata.banked_ms_ram) { + /* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */ + msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE); + } else { + msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE; + if (prueth->is_switchmode_supported) + msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE; + } /* NOTE: FW bug needs buffer base to be 64KB aligned */ prueth->msmcram.va = @@ -1924,7 +1929,8 @@ put_iep0: free_pool: gen_pool_free(prueth->sram_pool, - (unsigned long)prueth->msmcram.va, msmc_ram_size); + (unsigned long)prueth->msmcram.va, + prueth->msmcram.size); put_mem: pruss_release_mem_region(prueth->pruss, &prueth->shram); @@ -1976,8 +1982,8 @@ static void prueth_remove(struct platform_device *pdev) icss_iep_put(prueth->iep0); gen_pool_free(prueth->sram_pool, - (unsigned long)prueth->msmcram.va, - MSMC_RAM_SIZE); + (unsigned long)prueth->msmcram.va, + prueth->msmcram.size); pruss_release_mem_region(prueth->pruss, &prueth->shram); @@ -1994,12 +2000,14 @@ static const struct prueth_pdata am654_icssg_pdata = { .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, .quirk_10m_link_issue = 1, .switch_mode = 1, + .banked_ms_ram = 0, }; static const struct prueth_pdata am64x_icssg_pdata = { .fdqring_mode = K3_RINGACC_RING_MODE_RING, .quirk_10m_link_issue = 1, .switch_mode = 1, + .banked_ms_ram = 1, }; static const struct of_device_id prueth_dt_match[] = { diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index 23c465f1ce7f..3bb9fd8c3804 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -251,11 +251,13 @@ struct prueth_emac { * @fdqring_mode: Free desc queue mode * @quirk_10m_link_issue: 10M link detect errata * @switch_mode: switch firmware support + * @banked_ms_ram: banked memory support */ struct prueth_pdata { enum k3_ring_mode fdqring_mode; u32 quirk_10m_link_issue:1; u32 switch_mode:1; + u32 banked_ms_ram:1; }; struct icssg_firmwares { diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h index 490a9cc06fb0..7e053b8af3ec 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h +++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h @@ -180,6 +180,9 @@ /* Used to notify the FW of the current link speed */ #define PORT_LINK_SPEED_OFFSET 0x00A8 +/* 2k memory pointer reserved for default writes by PRU0*/ +#define DEFAULT_MSMC_Q_OFFSET 0x00AC + /* TAS gate mask for windows list0 */ #define TAS_GATE_MASK_LIST0 0x0100 diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 423dcd190906..2a1aa32e6693 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1506,7 +1506,7 @@ static int btt_blk_init(struct btt *btt) int rc; if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { - lim.integrity.tuple_size = btt_meta_size(btt); + lim.integrity.metadata_size = btt_meta_size(btt); lim.integrity.tag_size = btt_meta_size(btt); } diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index b1fddfa33ab9..1286c31320e6 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -301,8 +301,8 @@ static void apple_nvme_submit_cmd(struct apple_nvme_queue *q, memcpy(&q->sqes[tag], cmd, sizeof(*cmd)); /* - * This lock here doesn't make much sense at a first glace but - * removing it will result in occasional missed completetion + * This lock here doesn't make much sense at a first glance but + * removing it will result in occasional missed completion * interrupts even though the commands still appear on the CQ. * It's unclear why this happens but our best guess is that * there is a bug in the firmware triggered when a new command diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c index 1a0058be5821..dc90df9e13a2 100644 --- a/drivers/nvme/host/constants.c +++ b/drivers/nvme/host/constants.c @@ -133,7 +133,7 @@ static const char * const nvme_statuses[] = { [NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached", [NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported", [NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid", - [NVME_SC_SELT_TEST_IN_PROGRESS] = "Device Self-test In Progress", + [NVME_SC_SELF_TEST_IN_PROGRESS] = "Device Self-test In Progress", [NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited", [NVME_SC_CTRL_ID_INVALID] = "Invalid Controller Identifier", [NVME_SC_SEC_CTRL_STATE_INVALID] = "Invalid Secondary Controller State", @@ -145,7 +145,7 @@ static const char * const nvme_statuses[] = { [NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes", [NVME_SC_INVALID_PI] = "Invalid Protection Information", [NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range", - [NVME_SC_CMD_SIZE_LIM_EXCEEDED ] = "Command Size Limits Exceeded", + [NVME_SC_CMD_SIZE_LIM_EXCEEDED] = "Command Size Limits Exceeded", [NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error", [NVME_SC_ZONE_FULL] = "Zone Is Full", [NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only", diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 895fb163d48e..9d988f4cb87a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1870,8 +1870,11 @@ static bool nvme_init_integrity(struct nvme_ns_head *head, break; } - bi->tuple_size = head->ms; - bi->pi_offset = info->pi_offset; + bi->metadata_size = head->ms; + if (bi->csum_type) { + bi->pi_tuple_size = head->pi_size; + bi->pi_offset = info->pi_offset; + } return true; } @@ -2408,22 +2411,24 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, else lim.write_stream_granularity = 0; - ret = queue_limits_commit_update(ns->disk->queue, &lim); - if (ret) { - blk_mq_unfreeze_queue(ns->disk->queue, memflags); - goto out; - } - - set_capacity_and_notify(ns->disk, capacity); - /* * Only set the DEAC bit if the device guarantees that reads from * deallocated data return zeroes. While the DEAC bit does not * require that, it must be a no-op if reads from deallocated data * do not return zeroes. */ - if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) + if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) { ns->head->features |= NVME_NS_DEAC; + lim.max_hw_wzeroes_unmap_sectors = lim.max_write_zeroes_sectors; + } + + ret = queue_limits_commit_update(ns->disk->queue, &lim); + if (ret) { + blk_mq_unfreeze_queue(ns->disk->queue, memflags); + goto out; + } + + set_capacity_and_notify(ns->disk, capacity); set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); set_bit(NVME_NS_READY, &ns->flags); blk_mq_unfreeze_queue(ns->disk->queue, memflags); @@ -4295,7 +4300,7 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) } /* - * If available try to use the Command Set Idependent Identify Namespace + * If available try to use the Command Set Independent Identify Namespace * data structure to find all the generic information that is needed to * set up a namespace. If not fall back to the legacy version. */ diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 014b387f1e8b..08a5ea3e9383 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -899,7 +899,7 @@ EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss); * may crash. * * As such: - * Wrapper all the dma routines and check the dev pointer. + * Wrap all the dma routines and check the dev pointer. * * If simple mappings (return just a dma address, we'll noop them, * returning a dma address of 0. @@ -1955,8 +1955,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) } /* - * For the linux implementation, if we have an unsucceesful - * status, they blk-mq layer can typically be called with the + * For the linux implementation, if we have an unsuccessful + * status, the blk-mq layer can typically be called with the * non-zero status and the content of the cqe isn't important. */ if (status) @@ -2429,7 +2429,7 @@ static bool nvme_fc_terminate_exchange(struct request *req, void *data) /* * This routine runs through all outstanding commands on the association - * and aborts them. This routine is typically be called by the + * and aborts them. This routine is typically called by the * delete_association routine. It is also called due to an error during * reconnect. In that scenario, it is most likely a command that initializes * the controller, including fabric Connect commands on io queues, that @@ -2622,7 +2622,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, * as part of the exchange. The CQE is the last thing for the io, * which is transferred (explicitly or implicitly) with the RSP IU * sent on the exchange. After the CQE is received, the FC exchange is - * terminaed and the Exchange may be used on a different io. + * terminated and the Exchange may be used on a different io. * * The transport to LLDD api has the transport making a request for a * new fcp io request to the LLDD. The LLDD then allocates a FC exchange diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 7df2ea21851f..cfd2b5b90b91 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -69,7 +69,7 @@ enum nvme_quirks { NVME_QUIRK_IDENTIFY_CNS = (1 << 1), /* - * The controller deterministically returns O's on reads to + * The controller deterministically returns 0's on reads to * logical blocks that deallocate was called on. */ NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 320aaa41ec39..071efec25346 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -7,7 +7,7 @@ #include <linux/acpi.h> #include <linux/async.h> #include <linux/blkdev.h> -#include <linux/blk-mq.h> +#include <linux/blk-mq-dma.h> #include <linux/blk-integrity.h> #include <linux/dmi.h> #include <linux/init.h> @@ -27,7 +27,6 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/sed-opal.h> -#include <linux/pci-p2pdma.h> #include "trace.h" #include "nvme.h" @@ -39,20 +38,17 @@ #define NVME_SMALL_POOL_SIZE 256 /* - * These can be higher, but we need to ensure that any command doesn't - * require an sg allocation that needs more than a page of data. + * Arbitrary upper bound. */ -#define NVME_MAX_KB_SZ 8192 +#define NVME_MAX_BYTES SZ_8M #define NVME_MAX_NR_DESCRIPTORS 5 /* - * For data SGLs we support a single descriptors worth of SGL entries, but for - * now we also limit it to avoid an allocation larger than PAGE_SIZE for the - * scatterlist. + * For data SGLs we support a single descriptors worth of SGL entries. + * For PRPs, segments don't matter at all. */ #define NVME_MAX_SEGS \ - min(NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc), \ - (PAGE_SIZE / sizeof(struct scatterlist))) + (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) /* * For metadata SGLs, only the small descriptor is supported, and the first @@ -61,6 +57,21 @@ #define NVME_MAX_META_SEGS \ ((NVME_SMALL_POOL_SIZE / sizeof(struct nvme_sgl_desc)) - 1) +/* + * The last entry is used to link to the next descriptor. + */ +#define PRPS_PER_PAGE \ + (((NVME_CTRL_PAGE_SIZE / sizeof(__le64))) - 1) + +/* + * I/O could be non-aligned both at the beginning and end. + */ +#define MAX_PRP_RANGE \ + (NVME_MAX_BYTES + 2 * (NVME_CTRL_PAGE_SIZE - 1)) + +static_assert(MAX_PRP_RANGE / NVME_CTRL_PAGE_SIZE <= + (1 /* prp1 */ + NVME_MAX_NR_DESCRIPTORS * PRPS_PER_PAGE)); + static int use_threaded_interrupts; module_param(use_threaded_interrupts, int, 0444); @@ -97,7 +108,7 @@ static int io_queue_count_set(const char *val, const struct kernel_param *kp) int ret; ret = kstrtouint(val, 10, &n); - if (ret != 0 || n > num_possible_cpus()) + if (ret != 0 || n > blk_mq_num_possible_queues(0)) return -EINVAL; return param_set_uint(val, kp); } @@ -162,7 +173,7 @@ struct nvme_dev { bool hmb; struct sg_table *hmb_sgt; - mempool_t *iod_mempool; + mempool_t *dmavec_mempool; mempool_t *iod_meta_mempool; /* shadow doorbell buffer support: */ @@ -246,7 +257,15 @@ enum nvme_iod_flags { IOD_ABORTED = 1U << 0, /* uses the small descriptor pool */ - IOD_SMALL_DESCRIPTOR = 1U << 1, + IOD_SMALL_DESCRIPTOR = 1U << 1, + + /* single segment dma mapping */ + IOD_SINGLE_SEGMENT = 1U << 2, +}; + +struct nvme_dma_vec { + dma_addr_t addr; + unsigned int len; }; /* @@ -257,13 +276,16 @@ struct nvme_iod { struct nvme_command cmd; u8 flags; u8 nr_descriptors; - unsigned int dma_len; /* length of single DMA segment mapping */ - dma_addr_t first_dma; + + unsigned int total_len; + struct dma_iova_state dma_state; + void *descriptors[NVME_MAX_NR_DESCRIPTORS]; + struct nvme_dma_vec *dma_vecs; + unsigned int nr_dma_vecs; + dma_addr_t meta_dma; - struct sg_table sgt; struct sg_table meta_sgt; struct nvme_sgl_desc *meta_descriptor; - void *descriptors[NVME_MAX_NR_DESCRIPTORS]; }; static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) @@ -406,18 +428,6 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, return true; } -/* - * Will slightly overestimate the number of pages needed. This is OK - * as it only leads to a small amount of wasted memory for the lifetime of - * the I/O. - */ -static __always_inline int nvme_pci_npages_prp(void) -{ - unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; - unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); - return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); -} - static struct nvme_descriptor_pools * nvme_setup_descriptor_pools(struct nvme_dev *dev, unsigned numa_node) { @@ -578,32 +588,49 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) spin_unlock(&nvmeq->sq_lock); } -static inline bool nvme_pci_metadata_use_sgls(struct nvme_dev *dev, - struct request *req) +enum nvme_use_sgl { + SGL_UNSUPPORTED, + SGL_SUPPORTED, + SGL_FORCED, +}; + +static inline bool nvme_pci_metadata_use_sgls(struct request *req) { + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct nvme_dev *dev = nvmeq->dev; + if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl)) return false; return req->nr_integrity_segments > 1 || nvme_req(req)->flags & NVME_REQ_USERCMD; } -static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, - int nseg) +static inline enum nvme_use_sgl nvme_pci_use_sgls(struct nvme_dev *dev, + struct request *req) { struct nvme_queue *nvmeq = req->mq_hctx->driver_data; - unsigned int avg_seg_size; - avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); + if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) { + if (nvme_req(req)->flags & NVME_REQ_USERCMD) + return SGL_FORCED; + if (req->nr_integrity_segments > 1) + return SGL_FORCED; + return SGL_SUPPORTED; + } - if (!nvme_ctrl_sgl_supported(&dev->ctrl)) - return false; - if (!nvmeq->qid) - return false; - if (nvme_pci_metadata_use_sgls(dev, req)) - return true; - if (!sgl_threshold || avg_seg_size < sgl_threshold) - return nvme_req(req)->flags & NVME_REQ_USERCMD; - return true; + return SGL_UNSUPPORTED; +} + +static unsigned int nvme_pci_avg_seg_size(struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + unsigned int nseg; + + if (blk_rq_dma_map_coalesce(&iod->dma_state)) + nseg = 1; + else + nseg = blk_rq_nr_phys_segments(req); + return DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); } static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq, @@ -614,11 +641,25 @@ static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq, return nvmeq->descriptor_pools.large; } -static void nvme_free_descriptors(struct nvme_queue *nvmeq, struct request *req) +static inline bool nvme_pci_cmd_use_sgl(struct nvme_command *cmd) +{ + return cmd->common.flags & + (NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG); +} + +static inline dma_addr_t nvme_pci_first_desc_dma_addr(struct nvme_command *cmd) { + if (nvme_pci_cmd_use_sgl(cmd)) + return le64_to_cpu(cmd->common.dptr.sgl.addr); + return le64_to_cpu(cmd->common.dptr.prp2); +} + +static void nvme_free_descriptors(struct request *req) +{ + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - dma_addr_t dma_addr = iod->first_dma; + dma_addr_t dma_addr = nvme_pci_first_desc_dma_addr(&iod->cmd); int i; if (iod->nr_descriptors == 1) { @@ -637,68 +678,130 @@ static void nvme_free_descriptors(struct nvme_queue *nvmeq, struct request *req) } } -static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_queue *nvmeq, - struct request *req) +static void nvme_free_prps(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + unsigned int i; - if (iod->dma_len) { - dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, - rq_dma_dir(req)); + for (i = 0; i < iod->nr_dma_vecs; i++) + dma_unmap_page(nvmeq->dev->dev, iod->dma_vecs[i].addr, + iod->dma_vecs[i].len, rq_dma_dir(req)); + mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool); +} + +static void nvme_free_sgls(struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct device *dma_dev = nvmeq->dev->dev; + dma_addr_t sqe_dma_addr = le64_to_cpu(iod->cmd.common.dptr.sgl.addr); + unsigned int sqe_dma_len = le32_to_cpu(iod->cmd.common.dptr.sgl.length); + struct nvme_sgl_desc *sg_list = iod->descriptors[0]; + enum dma_data_direction dir = rq_dma_dir(req); + + if (iod->nr_descriptors) { + unsigned int nr_entries = sqe_dma_len / sizeof(*sg_list), i; + + for (i = 0; i < nr_entries; i++) + dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr), + le32_to_cpu(sg_list[i].length), dir); + } else { + dma_unmap_page(dma_dev, sqe_dma_addr, sqe_dma_len, dir); + } +} + +static void nvme_unmap_data(struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct device *dma_dev = nvmeq->dev->dev; + + if (iod->flags & IOD_SINGLE_SEGMENT) { + static_assert(offsetof(union nvme_data_ptr, prp1) == + offsetof(union nvme_data_ptr, sgl.addr)); + dma_unmap_page(dma_dev, le64_to_cpu(iod->cmd.common.dptr.prp1), + iod->total_len, rq_dma_dir(req)); return; } - WARN_ON_ONCE(!iod->sgt.nents); + if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) { + if (nvme_pci_cmd_use_sgl(&iod->cmd)) + nvme_free_sgls(req); + else + nvme_free_prps(req); + } - dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); - nvme_free_descriptors(nvmeq, req); - mempool_free(iod->sgt.sgl, dev->iod_mempool); + if (iod->nr_descriptors) + nvme_free_descriptors(req); } -static void nvme_print_sgl(struct scatterlist *sgl, int nents) +static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev, + struct blk_dma_iter *iter) { - int i; - struct scatterlist *sg; + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - for_each_sg(sgl, sg, nents, i) { - dma_addr_t phys = sg_phys(sg); - pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " - "dma_address:%pad dma_length:%d\n", - i, &phys, sg->offset, sg->length, &sg_dma_address(sg), - sg_dma_len(sg)); + if (iter->len) + return true; + if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter)) + return false; + if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) { + iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; + iod->dma_vecs[iod->nr_dma_vecs].len = iter->len; + iod->nr_dma_vecs++; } + return true; } -static blk_status_t nvme_pci_setup_prps(struct nvme_queue *nvmeq, - struct request *req, struct nvme_rw_command *cmnd) +static blk_status_t nvme_pci_setup_data_prp(struct request *req, + struct blk_dma_iter *iter) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - int length = blk_rq_payload_bytes(req); - struct scatterlist *sg = iod->sgt.sgl; - int dma_len = sg_dma_len(sg); - u64 dma_addr = sg_dma_address(sg); - int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + unsigned int length = blk_rq_payload_bytes(req); + dma_addr_t prp1_dma, prp2_dma = 0; + unsigned int prp_len, i; __le64 *prp_list; - dma_addr_t prp_dma; - int i; - length -= (NVME_CTRL_PAGE_SIZE - offset); - if (length <= 0) { - iod->first_dma = 0; - goto done; + if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) { + iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool, + GFP_ATOMIC); + if (!iod->dma_vecs) + return BLK_STS_RESOURCE; + iod->dma_vecs[0].addr = iter->addr; + iod->dma_vecs[0].len = iter->len; + iod->nr_dma_vecs = 1; } - dma_len -= (NVME_CTRL_PAGE_SIZE - offset); - if (dma_len) { - dma_addr += (NVME_CTRL_PAGE_SIZE - offset); - } else { - sg = sg_next(sg); - dma_addr = sg_dma_address(sg); - dma_len = sg_dma_len(sg); + /* + * PRP1 always points to the start of the DMA transfers. + * + * This is the only PRP (except for the list entries) that could be + * non-aligned. + */ + prp1_dma = iter->addr; + prp_len = min(length, NVME_CTRL_PAGE_SIZE - + (iter->addr & (NVME_CTRL_PAGE_SIZE - 1))); + iod->total_len += prp_len; + iter->addr += prp_len; + iter->len -= prp_len; + length -= prp_len; + if (!length) + goto done; + + if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) { + if (WARN_ON_ONCE(!iter->status)) + goto bad_sgl; + goto done; } + /* + * PRP2 is usually a list, but can point to data if all data to be + * transferred fits into PRP1 + PRP2: + */ if (length <= NVME_CTRL_PAGE_SIZE) { - iod->first_dma = dma_addr; + prp2_dma = iter->addr; + iod->total_len += length; goto done; } @@ -707,58 +810,80 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_queue *nvmeq, iod->flags |= IOD_SMALL_DESCRIPTOR; prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC, - &prp_dma); - if (!prp_list) - return BLK_STS_RESOURCE; + &prp2_dma); + if (!prp_list) { + iter->status = BLK_STS_RESOURCE; + goto done; + } iod->descriptors[iod->nr_descriptors++] = prp_list; - iod->first_dma = prp_dma; + i = 0; for (;;) { + prp_list[i++] = cpu_to_le64(iter->addr); + prp_len = min(length, NVME_CTRL_PAGE_SIZE); + if (WARN_ON_ONCE(iter->len < prp_len)) + goto bad_sgl; + + iod->total_len += prp_len; + iter->addr += prp_len; + iter->len -= prp_len; + length -= prp_len; + if (!length) + break; + + if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) { + if (WARN_ON_ONCE(!iter->status)) + goto bad_sgl; + goto done; + } + + /* + * If we've filled the entire descriptor, allocate a new that is + * pointed to be the last entry in the previous PRP list. To + * accommodate for that move the last actual entry to the new + * descriptor. + */ if (i == NVME_CTRL_PAGE_SIZE >> 3) { __le64 *old_prp_list = prp_list; + dma_addr_t prp_list_dma; prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large, - GFP_ATOMIC, &prp_dma); - if (!prp_list) - goto free_prps; + GFP_ATOMIC, &prp_list_dma); + if (!prp_list) { + iter->status = BLK_STS_RESOURCE; + goto done; + } iod->descriptors[iod->nr_descriptors++] = prp_list; + prp_list[0] = old_prp_list[i - 1]; - old_prp_list[i - 1] = cpu_to_le64(prp_dma); + old_prp_list[i - 1] = cpu_to_le64(prp_list_dma); i = 1; } - prp_list[i++] = cpu_to_le64(dma_addr); - dma_len -= NVME_CTRL_PAGE_SIZE; - dma_addr += NVME_CTRL_PAGE_SIZE; - length -= NVME_CTRL_PAGE_SIZE; - if (length <= 0) - break; - if (dma_len > 0) - continue; - if (unlikely(dma_len < 0)) - goto bad_sgl; - sg = sg_next(sg); - dma_addr = sg_dma_address(sg); - dma_len = sg_dma_len(sg); } + done: - cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); - cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); - return BLK_STS_OK; -free_prps: - nvme_free_descriptors(nvmeq, req); - return BLK_STS_RESOURCE; + /* + * nvme_unmap_data uses the DPT field in the SQE to tear down the + * mapping, so initialize it even for failures. + */ + iod->cmd.common.dptr.prp1 = cpu_to_le64(prp1_dma); + iod->cmd.common.dptr.prp2 = cpu_to_le64(prp2_dma); + if (unlikely(iter->status)) + nvme_unmap_data(req); + return iter->status; + bad_sgl: - WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), - "Invalid SGL for payload:%d nents:%d\n", - blk_rq_payload_bytes(req), iod->sgt.nents); + dev_err_once(nvmeq->dev->dev, + "Incorrectly formed request for payload:%d nents:%d\n", + blk_rq_payload_bytes(req), blk_rq_nr_phys_segments(req)); return BLK_STS_IOERR; } static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, - struct scatterlist *sg) + struct blk_dma_iter *iter) { - sge->addr = cpu_to_le64(sg_dma_address(sg)); - sge->length = cpu_to_le32(sg_dma_len(sg)); + sge->addr = cpu_to_le64(iter->addr); + sge->length = cpu_to_le32(iter->len); sge->type = NVME_SGL_FMT_DATA_DESC << 4; } @@ -770,21 +895,22 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; } -static blk_status_t nvme_pci_setup_sgls(struct nvme_queue *nvmeq, - struct request *req, struct nvme_rw_command *cmd) +static blk_status_t nvme_pci_setup_data_sgl(struct request *req, + struct blk_dma_iter *iter) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + unsigned int entries = blk_rq_nr_phys_segments(req); struct nvme_sgl_desc *sg_list; - struct scatterlist *sg = iod->sgt.sgl; - unsigned int entries = iod->sgt.nents; dma_addr_t sgl_dma; - int i = 0; + unsigned int mapped = 0; - /* setting the transfer type as SGL */ - cmd->flags = NVME_CMD_SGL_METABUF; + /* set the transfer type as SGL */ + iod->cmd.common.flags = NVME_CMD_SGL_METABUF; - if (entries == 1) { - nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); + if (entries == 1 || blk_rq_dma_map_coalesce(&iod->dma_state)) { + nvme_pci_sgl_set_data(&iod->cmd.common.dptr.sgl, iter); + iod->total_len += iter->len; return BLK_STS_OK; } @@ -796,119 +922,104 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_queue *nvmeq, if (!sg_list) return BLK_STS_RESOURCE; iod->descriptors[iod->nr_descriptors++] = sg_list; - iod->first_dma = sgl_dma; - nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); do { - nvme_pci_sgl_set_data(&sg_list[i++], sg); - sg = sg_next(sg); - } while (--entries > 0); + if (WARN_ON_ONCE(mapped == entries)) { + iter->status = BLK_STS_IOERR; + break; + } + nvme_pci_sgl_set_data(&sg_list[mapped++], iter); + iod->total_len += iter->len; + } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, &iod->dma_state, + iter)); - return BLK_STS_OK; + nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped); + if (unlikely(iter->status)) + nvme_free_sgls(req); + return iter->status; } -static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, - struct request *req, struct nvme_rw_command *cmnd, - struct bio_vec *bv) +static blk_status_t nvme_pci_setup_data_simple(struct request *req, + enum nvme_use_sgl use_sgl) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); - unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; - - iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); - if (dma_mapping_error(dev->dev, iod->first_dma)) + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct bio_vec bv = req_bvec(req); + unsigned int prp1_offset = bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1); + bool prp_possible = prp1_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2; + dma_addr_t dma_addr; + + if (!use_sgl && !prp_possible) + return BLK_STS_AGAIN; + if (is_pci_p2pdma_page(bv.bv_page)) + return BLK_STS_AGAIN; + + dma_addr = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); + if (dma_mapping_error(nvmeq->dev->dev, dma_addr)) return BLK_STS_RESOURCE; - iod->dma_len = bv->bv_len; - - cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); - if (bv->bv_len > first_prp_len) - cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); - else - cmnd->dptr.prp2 = 0; - return BLK_STS_OK; -} - -static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, - struct request *req, struct nvme_rw_command *cmnd, - struct bio_vec *bv) -{ - struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + iod->total_len = bv.bv_len; + iod->flags |= IOD_SINGLE_SEGMENT; + + if (use_sgl == SGL_FORCED || !prp_possible) { + iod->cmd.common.flags = NVME_CMD_SGL_METABUF; + iod->cmd.common.dptr.sgl.addr = cpu_to_le64(dma_addr); + iod->cmd.common.dptr.sgl.length = cpu_to_le32(bv.bv_len); + iod->cmd.common.dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; + } else { + unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - prp1_offset; - iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); - if (dma_mapping_error(dev->dev, iod->first_dma)) - return BLK_STS_RESOURCE; - iod->dma_len = bv->bv_len; + iod->cmd.common.dptr.prp1 = cpu_to_le64(dma_addr); + iod->cmd.common.dptr.prp2 = 0; + if (bv.bv_len > first_prp_len) + iod->cmd.common.dptr.prp2 = + cpu_to_le64(dma_addr + first_prp_len); + } - cmnd->flags = NVME_CMD_SGL_METABUF; - cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); - cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); - cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; return BLK_STS_OK; } -static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, - struct nvme_command *cmnd) +static blk_status_t nvme_map_data(struct request *req) { - struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - blk_status_t ret = BLK_STS_RESOURCE; - int rc; + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct nvme_dev *dev = nvmeq->dev; + enum nvme_use_sgl use_sgl = nvme_pci_use_sgls(dev, req); + struct blk_dma_iter iter; + blk_status_t ret; + /* + * Try to skip the DMA iterator for single segment requests, as that + * significantly improves performances for small I/O sizes. + */ if (blk_rq_nr_phys_segments(req) == 1) { - struct bio_vec bv = req_bvec(req); - - if (!is_pci_p2pdma_page(bv.bv_page)) { - if (!nvme_pci_metadata_use_sgls(dev, req) && - (bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) + - bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) - return nvme_setup_prp_simple(dev, req, - &cmnd->rw, &bv); - - if (nvmeq->qid && sgl_threshold && - nvme_ctrl_sgl_supported(&dev->ctrl)) - return nvme_setup_sgl_simple(dev, req, - &cmnd->rw, &bv); - } + ret = nvme_pci_setup_data_simple(req, use_sgl); + if (ret != BLK_STS_AGAIN) + return ret; } - iod->dma_len = 0; - iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); - if (!iod->sgt.sgl) - return BLK_STS_RESOURCE; - sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); - iod->sgt.orig_nents = blk_rq_map_sg(req, iod->sgt.sgl); - if (!iod->sgt.orig_nents) - goto out_free_sg; + if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter)) + return iter.status; - rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), - DMA_ATTR_NO_WARN); - if (rc) { - if (rc == -EREMOTEIO) - ret = BLK_STS_TARGET; - goto out_free_sg; - } - - if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) - ret = nvme_pci_setup_sgls(nvmeq, req, &cmnd->rw); - else - ret = nvme_pci_setup_prps(nvmeq, req, &cmnd->rw); - if (ret != BLK_STS_OK) - goto out_unmap_sg; - return BLK_STS_OK; + if (use_sgl == SGL_FORCED || + (use_sgl == SGL_SUPPORTED && + (sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold))) + return nvme_pci_setup_data_sgl(req, &iter); + return nvme_pci_setup_data_prp(req, &iter); +} -out_unmap_sg: - dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); -out_free_sg: - mempool_free(iod->sgt.sgl, dev->iod_mempool); - return ret; +static void nvme_pci_sgl_set_data_sg(struct nvme_sgl_desc *sge, + struct scatterlist *sg) +{ + sge->addr = cpu_to_le64(sg_dma_address(sg)); + sge->length = cpu_to_le32(sg_dma_len(sg)); + sge->type = NVME_SGL_FMT_DATA_DESC << 4; } -static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev, - struct request *req) +static blk_status_t nvme_pci_setup_meta_sgls(struct request *req) { struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct nvme_dev *dev = nvmeq->dev; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - struct nvme_rw_command *cmnd = &iod->cmd.rw; struct nvme_sgl_desc *sg_list; struct scatterlist *sgl, *sg; unsigned int entries; @@ -939,19 +1050,19 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev, iod->meta_descriptor = sg_list; iod->meta_dma = sgl_dma; - cmnd->flags = NVME_CMD_SGL_METASEG; - cmnd->metadata = cpu_to_le64(sgl_dma); + iod->cmd.common.flags = NVME_CMD_SGL_METASEG; + iod->cmd.common.metadata = cpu_to_le64(sgl_dma); sgl = iod->meta_sgt.sgl; if (entries == 1) { - nvme_pci_sgl_set_data(sg_list, sgl); + nvme_pci_sgl_set_data_sg(sg_list, sgl); return BLK_STS_OK; } sgl_dma += sizeof(*sg_list); nvme_pci_sgl_set_seg(sg_list, sgl_dma, entries); for_each_sg(sgl, sg, entries, i) - nvme_pci_sgl_set_data(&sg_list[i + 1], sg); + nvme_pci_sgl_set_data_sg(&sg_list[i + 1], sg); return BLK_STS_OK; @@ -962,38 +1073,37 @@ out_free_sg: return BLK_STS_RESOURCE; } -static blk_status_t nvme_pci_setup_meta_mptr(struct nvme_dev *dev, - struct request *req) +static blk_status_t nvme_pci_setup_meta_mptr(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct bio_vec bv = rq_integrity_vec(req); - struct nvme_command *cmnd = &iod->cmd; - iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0); - if (dma_mapping_error(dev->dev, iod->meta_dma)) + iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); + if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma)) return BLK_STS_IOERR; - cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); + iod->cmd.common.metadata = cpu_to_le64(iod->meta_dma); return BLK_STS_OK; } -static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req) +static blk_status_t nvme_map_metadata(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) && - nvme_pci_metadata_use_sgls(dev, req)) - return nvme_pci_setup_meta_sgls(dev, req); - return nvme_pci_setup_meta_mptr(dev, req); + nvme_pci_metadata_use_sgls(req)) + return nvme_pci_setup_meta_sgls(req); + return nvme_pci_setup_meta_mptr(req); } -static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) +static blk_status_t nvme_prep_rq(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret; iod->flags = 0; iod->nr_descriptors = 0; - iod->sgt.nents = 0; + iod->total_len = 0; iod->meta_sgt.nents = 0; ret = nvme_setup_cmd(req->q->queuedata, req); @@ -1001,13 +1111,13 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) return ret; if (blk_rq_nr_phys_segments(req)) { - ret = nvme_map_data(dev, req, &iod->cmd); + ret = nvme_map_data(req); if (ret) goto out_free_cmd; } if (blk_integrity_rq(req)) { - ret = nvme_map_metadata(dev, req); + ret = nvme_map_metadata(req); if (ret) goto out_unmap_data; } @@ -1016,7 +1126,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) return BLK_STS_OK; out_unmap_data: if (blk_rq_nr_phys_segments(req)) - nvme_unmap_data(dev, req->mq_hctx->driver_data, req); + nvme_unmap_data(req); out_free_cmd: nvme_cleanup_cmd(req); return ret; @@ -1041,7 +1151,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) return nvme_fail_nonready_command(&dev->ctrl, req); - ret = nvme_prep_rq(dev, req); + ret = nvme_prep_rq(req); if (unlikely(ret)) return ret; spin_lock(&nvmeq->sq_lock); @@ -1079,7 +1189,7 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) return false; - return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; + return nvme_prep_rq(req) == BLK_STS_OK; } static void nvme_queue_rqs(struct rq_list *rqlist) @@ -1105,11 +1215,11 @@ static void nvme_queue_rqs(struct rq_list *rqlist) *rqlist = requeue_list; } -static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev, - struct nvme_queue *nvmeq, - struct request *req) +static __always_inline void nvme_unmap_metadata(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct nvme_dev *dev = nvmeq->dev; if (!iod->meta_sgt.nents) { dma_unmap_page(dev->dev, iod->meta_dma, @@ -1126,14 +1236,10 @@ static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev, static __always_inline void nvme_pci_unmap_rq(struct request *req) { - struct nvme_queue *nvmeq = req->mq_hctx->driver_data; - struct nvme_dev *dev = nvmeq->dev; - if (blk_integrity_rq(req)) - nvme_unmap_metadata(dev, nvmeq, req); - + nvme_unmap_metadata(req); if (blk_rq_nr_phys_segments(req)) - nvme_unmap_data(dev, nvmeq, req); + nvme_unmap_data(req); } static void nvme_pci_complete_rq(struct request *req) @@ -1958,8 +2064,28 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) * might be pointing at! */ result = nvme_disable_ctrl(&dev->ctrl, false); - if (result < 0) - return result; + if (result < 0) { + struct pci_dev *pdev = to_pci_dev(dev->dev); + + /* + * The NVMe Controller Reset method did not get an expected + * CSTS.RDY transition, so something with the device appears to + * be stuck. Use the lower level and bigger hammer PCIe + * Function Level Reset to attempt restoring the device to its + * initial state, and try again. + */ + result = pcie_reset_flr(pdev, false); + if (result < 0) + return result; + + pci_restore_state(pdev); + result = nvme_disable_ctrl(&dev->ctrl, false); + if (result < 0) + return result; + + dev_info(dev->ctrl.device, + "controller reset completed after pcie flr\n"); + } result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); if (result) @@ -2331,7 +2457,7 @@ static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, { struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); - return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n", + return sysfs_emit(buf, "cmbloc : 0x%08x\ncmbsz : 0x%08x\n", ndev->cmbloc, ndev->cmbsz); } static DEVICE_ATTR_RO(cmb); @@ -2518,7 +2644,8 @@ static unsigned int nvme_max_io_queues(struct nvme_dev *dev) */ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) return 1; - return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; + return blk_mq_num_possible_queues(0) + dev->nr_write_queues + + dev->nr_poll_queues; } static int nvme_setup_io_queues(struct nvme_dev *dev) @@ -2913,13 +3040,13 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) { size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1); - size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS; + size_t alloc_size = sizeof(struct nvme_dma_vec) * NVME_MAX_SEGS; - dev->iod_mempool = mempool_create_node(1, + dev->dmavec_mempool = mempool_create_node(1, mempool_kmalloc, mempool_kfree, (void *)alloc_size, GFP_KERNEL, dev_to_node(dev->dev)); - if (!dev->iod_mempool) + if (!dev->dmavec_mempool) return -ENOMEM; dev->iod_meta_mempool = mempool_create_node(1, @@ -2928,10 +3055,9 @@ static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) dev_to_node(dev->dev)); if (!dev->iod_meta_mempool) goto free; - return 0; free: - mempool_destroy(dev->iod_mempool); + mempool_destroy(dev->dmavec_mempool); return -ENOMEM; } @@ -3272,7 +3398,8 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, * over a single page. */ dev->ctrl.max_hw_sectors = min_t(u32, - NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); + NVME_MAX_BYTES >> SECTOR_SHIFT, + dma_opt_mapping_size(&pdev->dev) >> 9); dev->ctrl.max_segments = NVME_MAX_SEGS; dev->ctrl.max_integrity_segments = 1; return dev; @@ -3380,7 +3507,7 @@ out_disable: nvme_dbbuf_dma_free(dev); nvme_free_queues(dev, 0); out_release_iod_mempool: - mempool_destroy(dev->iod_mempool); + mempool_destroy(dev->dmavec_mempool); mempool_destroy(dev->iod_meta_mempool); out_dev_unmap: nvme_dev_unmap(dev); @@ -3444,7 +3571,7 @@ static void nvme_remove(struct pci_dev *pdev) nvme_dev_remove_admin(dev); nvme_dbbuf_dma_free(dev); nvme_free_queues(dev, 0); - mempool_destroy(dev->iod_mempool); + mempool_destroy(dev->dmavec_mempool); mempool_destroy(dev->iod_meta_mempool); nvme_release_descriptor_pools(dev); nvme_dev_unmap(dev); @@ -3847,7 +3974,6 @@ static int __init nvme_init(void) BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); - BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_DESCRIPTORS); return pci_register_driver(&nvme_driver); } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 9bd3646568d0..190a4cfa8a5e 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -877,7 +877,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) /* * Only start IO queues for which we have allocated the tagset - * and limitted it to the available queues. On reconnects, the + * and limited it to the available queues. On reconnects, the * queue number might have changed. */ nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index d924008c3949..9233f088fac8 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1745,9 +1745,14 @@ static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl, qid, ret); tls_handshake_cancel(queue->sock->sk); } else { - dev_dbg(nctrl->device, - "queue %d: TLS handshake complete, error %d\n", - qid, queue->tls_err); + if (queue->tls_err) { + dev_err(nctrl->device, + "queue %d: TLS handshake complete, error %d\n", + qid, queue->tls_err); + } else { + dev_dbg(nctrl->device, + "queue %d: TLS handshake complete\n", qid); + } ret = queue->tls_err; } return ret; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 175c5b6d4dd5..884286f90688 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -581,8 +581,6 @@ int nvmet_ns_enable(struct nvmet_ns *ns) if (ns->enabled) goto out_unlock; - ret = -EMFILE; - ret = nvmet_bdev_ns_enable(ns); if (ret == -ENOTBLK) ret = nvmet_file_ns_enable(ns); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index eba42df2f821..8d246b8ca604 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -46,6 +46,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) id->npda = id->npdg; /* NOWS = Namespace Optimal Write Size */ id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev)); + + /* Set WZDS and DRB if device supports unmapped write zeroes */ + if (bdev_write_zeroes_unmap_sectors(bdev)) + id->dlfeat = (1 << 3) | 0x1; } void nvmet_bdev_ns_disable(struct nvmet_ns *ns) @@ -65,7 +69,7 @@ static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns) return; if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) { - ns->metadata_size = bi->tuple_size; + ns->metadata_size = bi->metadata_size; if (bi->flags & BLK_INTEGRITY_REF_TAG) ns->pi_type = NVME_NS_DPS_PI_TYPE1; else diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index b7515c53829b..3b4b0df8f879 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -106,7 +106,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) pctrl->max_hw_sectors); /* - * nvmet_passthru_map_sg is limitted to using a single bio so limit + * nvmet_passthru_map_sg is limited to using a single bio so limit * the mdts based on BIO_MAX_VECS as well */ max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT, @@ -147,7 +147,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) * When passthru controller is setup using nvme-loop transport it will * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl() - * code path with duplicate ctr subsynqn. In order to prevent that we + * code path with duplicate ctrl subsysnqn. In order to prevent that we * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. */ memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn)); diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c index a4295a5b8d28..2e78397a7373 100644 --- a/drivers/nvme/target/pci-epf.c +++ b/drivers/nvme/target/pci-epf.c @@ -1242,8 +1242,11 @@ static void nvmet_pci_epf_queue_response(struct nvmet_req *req) iod->status = le16_to_cpu(req->cqe->status) >> 1; - /* If we have no data to transfer, directly complete the command. */ - if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) { + /* + * If the command failed or we have no data to transfer, complete the + * command immediately. + */ + if (iod->status || !iod->data_len || iod->dma_dir != DMA_TO_DEVICE) { nvmet_pci_epf_complete_iod(iod); return; } @@ -1604,8 +1607,13 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work) goto complete; } + /* + * If nvmet_req_init() fails (e.g., unsupported opcode) it will call + * __nvmet_req_complete() internally which will call + * nvmet_pci_epf_queue_response() and will complete the command directly. + */ if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops)) - goto complete; + return; iod->data_len = nvmet_req_transfer_len(req); if (iod->data_len) { @@ -1643,10 +1651,11 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work) wait_for_completion(&iod->done); - if (iod->status == NVME_SC_SUCCESS) { - WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE); - nvmet_pci_epf_transfer_iod_data(iod); - } + if (iod->status != NVME_SC_SUCCESS) + return; + + WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE); + nvmet_pci_epf_transfer_iod_data(iod); complete: nvmet_pci_epf_complete_iod(iod); @@ -1860,7 +1869,7 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc); if (ctrl->io_cqes < sizeof(struct nvme_completion)) { dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n", - ctrl->io_sqes, sizeof(struct nvme_completion)); + ctrl->io_cqes, sizeof(struct nvme_completion)); goto err; } diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 29a60fabfcc8..15a579cf528c 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -541,7 +541,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) struct bio *bio; int sg_cnt; - /* Request is completed on len mismatch in nvmet_check_transter_len() */ + /* Request is completed on len mismatch in nvmet_check_transfer_len() */ if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) return; diff --git a/drivers/nvmem/layouts/u-boot-env.c b/drivers/nvmem/layouts/u-boot-env.c index 8571aac56295..a27eeb08146f 100644 --- a/drivers/nvmem/layouts/u-boot-env.c +++ b/drivers/nvmem/layouts/u-boot-env.c @@ -148,7 +148,7 @@ int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem, crc32_data_len = dev_size - crc32_data_offset; data_len = dev_size - data_offset; - calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L; + calc = crc32_le(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L; if (calc != crc32) { dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32); err = -EINVAL; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 67db34fd10ee..b853585cb1f8 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -708,6 +708,8 @@ static int pci_pm_prepare(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + dev_pm_set_strict_midlayer(dev, true); + if (pm && pm->prepare) { int error = pm->prepare(dev); if (error < 0) @@ -749,6 +751,8 @@ static void pci_pm_complete(struct device *dev) if (pci_dev->current_state < pre_sleep_state) pm_request_resume(dev); } + + dev_pm_set_strict_midlayer(dev, false); } #else /* !CONFIG_PM_SLEEP */ diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 4b8693ec9e4c..e6a34db77826 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -2508,6 +2508,7 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, } EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); +#if IS_ENABLED(CONFIG_PCI_PWRCTRL) static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn) { struct pci_host_bridge *host = pci_find_host_bridge(bus); @@ -2537,6 +2538,12 @@ static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, in return pdev; } +#else +static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn) +{ + return NULL; +} +#endif /* * Read the config data for a PCI device, sanity-check it, diff --git a/drivers/pinctrl/pinctrl-tps6594.c b/drivers/pinctrl/pinctrl-tps6594.c index 54cc810f79d6..6726853110d1 100644 --- a/drivers/pinctrl/pinctrl-tps6594.c +++ b/drivers/pinctrl/pinctrl-tps6594.c @@ -226,6 +226,10 @@ static const char *const tps65224_nerr_mcu_func_group_names[] = { "GPIO5", }; +static const char *const tps652g1_cs_spi_func_group_names[] = { + "GPIO1", +}; + struct tps6594_pinctrl_function { struct pinfunction pinfunction; u8 muxval; @@ -287,6 +291,18 @@ static const struct tps6594_pinctrl_function tps65224_pinctrl_functions[] = { FUNCTION(tps65224, nerr_mcu, TPS65224_PINCTRL_NERR_MCU_FUNCTION), }; +static const struct tps6594_pinctrl_function tps652g1_pinctrl_functions[] = { + FUNCTION(tps65224, gpio, TPS6594_PINCTRL_GPIO_FUNCTION), + FUNCTION(tps65224, sda_i2c2_sdo_spi, TPS65224_PINCTRL_SDA_I2C2_SDO_SPI_FUNCTION), + FUNCTION(tps65224, nsleep2, TPS65224_PINCTRL_NSLEEP2_FUNCTION), + FUNCTION(tps65224, nint, TPS65224_PINCTRL_NINT_FUNCTION), + FUNCTION(tps652g1, cs_spi, TPS65224_PINCTRL_SCL_I2C2_CS_SPI_FUNCTION), + FUNCTION(tps65224, nsleep1, TPS65224_PINCTRL_NSLEEP1_FUNCTION), + FUNCTION(tps65224, pb, TPS65224_PINCTRL_PB_FUNCTION), + FUNCTION(tps65224, wkup, TPS65224_PINCTRL_WKUP_FUNCTION), + FUNCTION(tps65224, syncclkin, TPS65224_PINCTRL_SYNCCLKIN_FUNCTION), +}; + struct tps6594_pinctrl { struct tps6594 *tps; struct gpio_regmap *gpio_regmap; @@ -300,6 +316,16 @@ struct tps6594_pinctrl { struct muxval_remap *remap; }; +static struct tps6594_pinctrl tps652g1_template_pinctrl = { + .funcs = tps652g1_pinctrl_functions, + .func_cnt = ARRAY_SIZE(tps652g1_pinctrl_functions), + .pins = tps65224_pins, + .num_pins = ARRAY_SIZE(tps65224_pins), + .mux_sel_mask = TPS65224_MASK_GPIO_SEL, + .remap = tps65224_muxval_remap, + .remap_cnt = ARRAY_SIZE(tps65224_muxval_remap), +}; + static struct tps6594_pinctrl tps65224_template_pinctrl = { .funcs = tps65224_pinctrl_functions, .func_cnt = ARRAY_SIZE(tps65224_pinctrl_functions), @@ -475,6 +501,15 @@ static int tps6594_pinctrl_probe(struct platform_device *pdev) return -ENOMEM; switch (tps->chip_id) { + case TPS652G1: + pctrl_desc->pins = tps65224_pins; + pctrl_desc->npins = ARRAY_SIZE(tps65224_pins); + + *pinctrl = tps652g1_template_pinctrl; + + config.ngpio = ARRAY_SIZE(tps65224_gpio_func_group_names); + config.ngpio_per_reg = TPS65224_NGPIO_PER_REG; + break; case TPS65224: pctrl_desc->pins = tps65224_pins; pctrl_desc->npins = ARRAY_SIZE(tps65224_pins); diff --git a/drivers/platform/arm64/huawei-gaokun-ec.c b/drivers/platform/arm64/huawei-gaokun-ec.c index 7e5aa7ca2403..7170f8eb76f7 100644 --- a/drivers/platform/arm64/huawei-gaokun-ec.c +++ b/drivers/platform/arm64/huawei-gaokun-ec.c @@ -662,6 +662,7 @@ static void gaokun_aux_release(struct device *dev) { struct auxiliary_device *adev = to_auxiliary_dev(dev); + of_node_put(dev->of_node); kfree(adev); } @@ -693,6 +694,7 @@ static int gaokun_aux_init(struct device *parent, const char *name, ret = auxiliary_device_init(adev); if (ret) { + of_node_put(adev->dev.of_node); kfree(adev); return ret; } diff --git a/drivers/platform/arm64/lenovo-yoga-c630.c b/drivers/platform/arm64/lenovo-yoga-c630.c index 1f05c9a6a89d..75060c842b24 100644 --- a/drivers/platform/arm64/lenovo-yoga-c630.c +++ b/drivers/platform/arm64/lenovo-yoga-c630.c @@ -191,50 +191,16 @@ void yoga_c630_ec_unregister_notify(struct yoga_c630_ec *ec, struct notifier_blo } EXPORT_SYMBOL_GPL(yoga_c630_ec_unregister_notify); -static void yoga_c630_aux_release(struct device *dev) -{ - struct auxiliary_device *adev = to_auxiliary_dev(dev); - - kfree(adev); -} - -static void yoga_c630_aux_remove(void *data) -{ - struct auxiliary_device *adev = data; - - auxiliary_device_delete(adev); - auxiliary_device_uninit(adev); -} - static int yoga_c630_aux_init(struct device *parent, const char *name, struct yoga_c630_ec *ec) { struct auxiliary_device *adev; - int ret; - adev = kzalloc(sizeof(*adev), GFP_KERNEL); + adev = devm_auxiliary_device_create(parent, name, ec); if (!adev) - return -ENOMEM; - - adev->name = name; - adev->id = 0; - adev->dev.parent = parent; - adev->dev.release = yoga_c630_aux_release; - adev->dev.platform_data = ec; - - ret = auxiliary_device_init(adev); - if (ret) { - kfree(adev); - return ret; - } - - ret = auxiliary_device_add(adev); - if (ret) { - auxiliary_device_uninit(adev); - return ret; - } + return -ENODEV; - return devm_add_action_or_reset(parent, yoga_c630_aux_remove, adev); + return 0; } static int yoga_c630_ec_probe(struct i2c_client *client) diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig index 10941ac37305..2281d6dacc9b 100644 --- a/drivers/platform/chrome/Kconfig +++ b/drivers/platform/chrome/Kconfig @@ -286,7 +286,7 @@ config CROS_USBPD_NOTIFY default MFD_CROS_EC_DEV help If you say Y here, you get support for Type-C PD event notifications - from the ChromeOS EC. On ACPI platorms this driver will bind to the + from the ChromeOS EC. On ACPI platforms this driver will bind to the GOOG0003 ACPI device, and on platforms which don't have this device it will get initialized on ECs which support the feature EC_FEATURE_USB_PD. diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 3ab668764383..0b92047265de 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -726,9 +726,9 @@ static int __init chromeos_laptop_setup_irq(struct i2c_peripheral *i2c_dev) if (irq < 0) return irq; - i2c_dev->irq_resource = (struct resource) - DEFINE_RES_NAMED(irq, 1, NULL, - IORESOURCE_IRQ | i2c_dev->irqflags); + i2c_dev->irq_resource = DEFINE_RES_IRQ(irq); + i2c_dev->irq_resource.flags |= i2c_dev->irqflags; + i2c_dev->board_info.resources = &i2c_dev->irq_resource; i2c_dev->board_info.num_resources = 1; } @@ -782,8 +782,7 @@ err_out: while (--i >= 0) { i2c_dev = &i2c_peripherals[i]; info = &i2c_dev->board_info; - if (!IS_ERR_OR_NULL(info->fwnode)) - fwnode_remove_software_node(info->fwnode); + fwnode_remove_software_node(info->fwnode); } kfree(i2c_peripherals); return error; diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c index f37c0ef4af1f..a6eed99507d4 100644 --- a/drivers/platform/chrome/chromeos_pstore.c +++ b/drivers/platform/chrome/chromeos_pstore.c @@ -9,6 +9,10 @@ #include <linux/platform_device.h> #include <linux/pstore_ram.h> +static int ecc_size; +module_param(ecc_size, int, 0400); +MODULE_PARM_DESC(ecc_size, "ECC parity data size in bytes. A positive value enables ECC for the ramoops region."); + static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = { { /* @@ -117,6 +121,9 @@ static int __init chromeos_pstore_init(void) { bool acpi_dev_found; + if (ecc_size > 0) + chromeos_ramoops_data.ecc_info.ecc_size = ecc_size; + /* First check ACPI for non-hardcoded values from firmware. */ acpi_dev_found = chromeos_check_acpi(); diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c index 110771a8645e..fd58781a2fb7 100644 --- a/drivers/platform/chrome/cros_ec.c +++ b/drivers/platform/chrome/cros_ec.c @@ -318,6 +318,9 @@ EXPORT_SYMBOL(cros_ec_register); */ void cros_ec_unregister(struct cros_ec_device *ec_dev) { + if (ec_dev->mkbp_event_supported) + blocking_notifier_chain_unregister(&ec_dev->event_notifier, + &ec_dev->notifier_ready); platform_device_unregister(ec_dev->pd); platform_device_unregister(ec_dev->ec); mutex_destroy(&ec_dev->lock); diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c index 50cdae67fa32..9bad8f72680e 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub.c +++ b/drivers/platform/chrome/cros_ec_sensorhub.c @@ -8,6 +8,7 @@ #include <linux/init.h> #include <linux/device.h> +#include <linux/delay.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_data/cros_ec_commands.h> @@ -18,6 +19,7 @@ #include <linux/types.h> #define DRV_NAME "cros-ec-sensorhub" +#define CROS_EC_CMD_INFO_RETRIES 50 static void cros_ec_sensorhub_free_sensor(void *arg) { @@ -53,7 +55,7 @@ static int cros_ec_sensorhub_register(struct device *dev, int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 }; struct cros_ec_command *msg = sensorhub->msg; struct cros_ec_dev *ec = sensorhub->ec; - int ret, i; + int ret, i, retries; char *name; @@ -65,12 +67,25 @@ static int cros_ec_sensorhub_register(struct device *dev, sensorhub->params->cmd = MOTIONSENSE_CMD_INFO; sensorhub->params->info.sensor_num = i; - ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); + retries = CROS_EC_CMD_INFO_RETRIES; + do { + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); + if (ret == -EBUSY) { + /* The EC is still busy initializing sensors. */ + usleep_range(5000, 6000); + retries--; + } + } while (ret == -EBUSY && retries); + if (ret < 0) { - dev_warn(dev, "no info for EC sensor %d : %d/%d\n", - i, ret, msg->result); + dev_err(dev, "no info for EC sensor %d : %d/%d\n", + i, ret, msg->result); continue; } + if (retries < CROS_EC_CMD_INFO_RETRIES) { + dev_warn(dev, "%d retries needed to bring up sensor %d\n", + CROS_EC_CMD_INFO_RETRIES - retries, i); + } switch (sensorhub->resp->info.type) { case MOTIONSENSE_TYPE_ACCEL: diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c index 7678e3d05fd3..b712bcff6fb2 100644 --- a/drivers/platform/chrome/cros_ec_typec.c +++ b/drivers/platform/chrome/cros_ec_typec.c @@ -58,8 +58,91 @@ static int cros_typec_enter_usb_mode(struct typec_port *tc_port, enum usb_mode m &req, sizeof(req), NULL, 0); } +static int cros_typec_perform_role_swap(struct typec_port *tc_port, int target_role, u8 swap_type) +{ + struct cros_typec_port *port = typec_get_drvdata(tc_port); + struct cros_typec_data *data = port->typec_data; + struct ec_response_usb_pd_control_v2 resp; + struct ec_params_usb_pd_control req; + int role, ret; + + /* Must be at least v1 to support role swap. */ + if (!data->pd_ctrl_ver) + return -EOPNOTSUPP; + + /* First query the state */ + req.port = port->port_num; + req.role = USB_PD_CTRL_ROLE_NO_CHANGE; + req.mux = USB_PD_CTRL_MUX_NO_CHANGE; + req.swap = USB_PD_CTRL_SWAP_NONE; + + ret = cros_ec_cmd(data->ec, data->pd_ctrl_ver, EC_CMD_USB_PD_CONTROL, + &req, sizeof(req), &resp, sizeof(resp)); + if (ret < 0) + return ret; + + switch (swap_type) { + case USB_PD_CTRL_SWAP_DATA: + role = (resp.role & PD_CTRL_RESP_ROLE_DATA) ? TYPEC_HOST : + TYPEC_DEVICE; + break; + case USB_PD_CTRL_SWAP_POWER: + role = (resp.role & PD_CTRL_RESP_ROLE_POWER) ? TYPEC_SOURCE : + TYPEC_SINK; + break; + default: + dev_warn(data->dev, "Unsupported role swap type %d\n", swap_type); + return -EOPNOTSUPP; + } + + if (role == target_role) + return 0; + + req.swap = swap_type; + ret = cros_ec_cmd(data->ec, data->pd_ctrl_ver, EC_CMD_USB_PD_CONTROL, + &req, sizeof(req), &resp, sizeof(resp)); + if (ret < 0) + return ret; + + switch (swap_type) { + case USB_PD_CTRL_SWAP_DATA: + role = resp.role & PD_CTRL_RESP_ROLE_DATA ? TYPEC_HOST : TYPEC_DEVICE; + if (role != target_role) { + dev_err(data->dev, "Data role swap failed despite EC returning success\n"); + return -EIO; + } + typec_set_data_role(tc_port, target_role); + break; + case USB_PD_CTRL_SWAP_POWER: + role = resp.role & PD_CTRL_RESP_ROLE_POWER ? TYPEC_SOURCE : TYPEC_SINK; + if (role != target_role) { + dev_err(data->dev, "Power role swap failed despite EC returning success\n"); + return -EIO; + } + typec_set_pwr_role(tc_port, target_role); + break; + default: + /* Should never execute */ + break; + } + + return 0; +} + +static int cros_typec_dr_swap(struct typec_port *port, enum typec_data_role role) +{ + return cros_typec_perform_role_swap(port, role, USB_PD_CTRL_SWAP_DATA); +} + +static int cros_typec_pr_swap(struct typec_port *port, enum typec_role role) +{ + return cros_typec_perform_role_swap(port, role, USB_PD_CTRL_SWAP_POWER); +} + static const struct typec_operations cros_typec_usb_mode_ops = { - .enter_usb_mode = cros_typec_enter_usb_mode + .enter_usb_mode = cros_typec_enter_usb_mode, + .dr_set = cros_typec_dr_swap, + .pr_set = cros_typec_pr_swap, }; static int cros_typec_parse_port_props(struct typec_capability *cap, @@ -1271,9 +1354,9 @@ static int cros_typec_probe(struct platform_device *pdev) typec->dev = dev; typec->ec = dev_get_drvdata(pdev->dev.parent); - if (!typec->ec) { - dev_err(dev, "couldn't find parent EC device\n"); - return -ENODEV; + if (!typec->ec || !typec->ec->ec) { + dev_warn(dev, "couldn't find parent EC device\n"); + return -EPROBE_DEFER; } platform_set_drvdata(pdev, typec); diff --git a/drivers/platform/cznic/turris-omnia-mcu-gpio.c b/drivers/platform/cznic/turris-omnia-mcu-gpio.c index c2df24ea8686..77184c8b42ea 100644 --- a/drivers/platform/cznic/turris-omnia-mcu-gpio.c +++ b/drivers/platform/cznic/turris-omnia-mcu-gpio.c @@ -439,27 +439,28 @@ static int omnia_gpio_get_multiple(struct gpio_chip *gc, unsigned long *mask, return 0; } -static void omnia_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +static int omnia_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) { const struct omnia_gpio *gpio = &omnia_gpios[offset]; struct omnia_mcu *mcu = gpiochip_get_data(gc); u16 val, mask; if (!gpio->ctl_cmd) - return; + return -EINVAL; mask = BIT(gpio->ctl_bit); val = value ? mask : 0; - omnia_ctl_cmd(mcu, gpio->ctl_cmd, val, mask); + return omnia_ctl_cmd(mcu, gpio->ctl_cmd, val, mask); } -static void omnia_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, - unsigned long *bits) +static int omnia_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, + unsigned long *bits) { unsigned long ctl = 0, ctl_mask = 0, ext_ctl = 0, ext_ctl_mask = 0; struct omnia_mcu *mcu = gpiochip_get_data(gc); unsigned int i; + int err; for_each_set_bit(i, mask, ARRAY_SIZE(omnia_gpios)) { unsigned long *field, *field_mask; @@ -488,13 +489,21 @@ static void omnia_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, guard(mutex)(&mcu->lock); - if (ctl_mask) - omnia_ctl_cmd_locked(mcu, OMNIA_CMD_GENERAL_CONTROL, - ctl, ctl_mask); + if (ctl_mask) { + err = omnia_ctl_cmd_locked(mcu, OMNIA_CMD_GENERAL_CONTROL, + ctl, ctl_mask); + if (err) + return err; + } + + if (ext_ctl_mask) { + err = omnia_ctl_cmd_locked(mcu, OMNIA_CMD_EXT_CONTROL, + ext_ctl, ext_ctl_mask); + if (err) + return err; + } - if (ext_ctl_mask) - omnia_ctl_cmd_locked(mcu, OMNIA_CMD_EXT_CONTROL, - ext_ctl, ext_ctl_mask); + return 0; } static bool omnia_gpio_available(struct omnia_mcu *mcu, @@ -1015,8 +1024,8 @@ int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu) mcu->gc.direction_output = omnia_gpio_direction_output; mcu->gc.get = omnia_gpio_get; mcu->gc.get_multiple = omnia_gpio_get_multiple; - mcu->gc.set = omnia_gpio_set; - mcu->gc.set_multiple = omnia_gpio_set_multiple; + mcu->gc.set_rv = omnia_gpio_set; + mcu->gc.set_multiple_rv = omnia_gpio_set_multiple; mcu->gc.init_valid_mask = omnia_gpio_init_valid_mask; mcu->gc.can_sleep = true; mcu->gc.names = omnia_mcu_gpio_names; diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c index a1c529f1ff1a..4776013e0764 100644 --- a/drivers/platform/mellanox/mlxbf-pmc.c +++ b/drivers/platform/mellanox/mlxbf-pmc.c @@ -15,6 +15,7 @@ #include <linux/hwmon.h> #include <linux/platform_device.h> #include <linux/string.h> +#include <linux/string_helpers.h> #include <uapi/linux/psci.h> #define MLXBF_PMC_WRITE_REG_32 0x82000009 @@ -1222,7 +1223,7 @@ static int mlxbf_pmc_get_event_num(const char *blk, const char *evt) return -ENODEV; } -/* Get the event number given the name */ +/* Get the event name given the number */ static char *mlxbf_pmc_get_event_name(const char *blk, u32 evt) { const struct mlxbf_pmc_events *events; @@ -1784,6 +1785,7 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev, attr, struct mlxbf_pmc_attribute, dev_attr); unsigned int blk_num, cnt_num; bool is_l3 = false; + char *evt_name; int evt_num; int err; @@ -1791,14 +1793,23 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev, cnt_num = attr_event->index; if (isalpha(buf[0])) { + /* Remove the trailing newline character if present */ + evt_name = kstrdup_and_replace(buf, '\n', '\0', GFP_KERNEL); + if (!evt_name) + return -ENOMEM; + evt_num = mlxbf_pmc_get_event_num(pmc->block_name[blk_num], - buf); + evt_name); + kfree(evt_name); if (evt_num < 0) return -EINVAL; } else { err = kstrtouint(buf, 0, &evt_num); if (err < 0) return err; + + if (!mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num)) + return -EINVAL; } if (strstr(pmc->block_name[blk_num], "l3cache")) @@ -1879,13 +1890,14 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev, { struct mlxbf_pmc_attribute *attr_enable = container_of( attr, struct mlxbf_pmc_attribute, dev_attr); - unsigned int en, blk_num; + unsigned int blk_num; u32 word; int err; + bool en; blk_num = attr_enable->nr; - err = kstrtouint(buf, 0, &en); + err = kstrtobool(buf, &en); if (err < 0) return err; @@ -1905,14 +1917,11 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev, MLXBF_PMC_CRSPACE_PERFMON_CTL(pmc->block[blk_num].counters), MLXBF_PMC_WRITE_REG_32, word); } else { - if (en && en != 1) - return -EINVAL; - err = mlxbf_pmc_config_l3_counters(blk_num, false, !!en); if (err) return err; - if (en == 1) { + if (en) { err = mlxbf_pmc_config_l3_counters(blk_num, true, false); if (err) return err; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index e5cbd58a99f3..6d238e120dce 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -37,6 +37,15 @@ config ACPI_WMI It is safe to enable this driver even if your DSDT doesn't define any ACPI-WMI devices. +config ACPI_WMI_LEGACY_DEVICE_NAMES + bool "Use legacy WMI device naming scheme" + depends on ACPI_WMI + help + Say Y here to force the WMI driver core to use the old WMI device naming + scheme when creating WMI devices. Doing so might be necessary for some + userspace applications but will cause the registration of WMI devices with + the same GUID to fail in some corner cases. + config WMI_BMOF tristate "WMI embedded Binary MOF driver" depends on ACPI_WMI @@ -120,32 +129,6 @@ config GIGABYTE_WMI To compile this driver as a module, choose M here: the module will be called gigabyte-wmi. -config YOGABOOK - tristate "Lenovo Yoga Book tablet key driver" - depends on ACPI_WMI - depends on INPUT - depends on I2C - select LEDS_CLASS - select NEW_LEDS - help - Say Y here if you want to support the 'Pen' key and keyboard backlight - control on the Lenovo Yoga Book tablets. - - To compile this driver as a module, choose M here: the module will - be called lenovo-yogabook. - -config YT2_1380 - tristate "Lenovo Yoga Tablet 2 1380 fast charge driver" - depends on SERIAL_DEV_BUS - depends on EXTCON - depends on ACPI - help - Say Y here to enable support for the custom fast charging protocol - found on the Lenovo Yoga Tablet 2 1380F / 1380L models. - - To compile this driver as a module, choose M here: the module will - be called lenovo-yogabook. - config ACERHDF tristate "Acer Aspire One temperature and fan driver" depends on ACPI_EC && THERMAL @@ -459,43 +442,6 @@ config IBM_RTL state = 0 (BIOS SMIs on) state = 1 (BIOS SMIs off) -config IDEAPAD_LAPTOP - tristate "Lenovo IdeaPad Laptop Extras" - depends on ACPI - depends on RFKILL && INPUT - depends on SERIO_I8042 - depends on BACKLIGHT_CLASS_DEVICE - depends on ACPI_VIDEO || ACPI_VIDEO = n - depends on ACPI_WMI || ACPI_WMI = n - select ACPI_PLATFORM_PROFILE - select INPUT_SPARSEKMAP - select NEW_LEDS - select LEDS_CLASS - help - This is a driver for Lenovo IdeaPad netbooks contains drivers for - rfkill switch, hotkey, fan control and backlight control. - -config LENOVO_WMI_HOTKEY_UTILITIES - tristate "Lenovo Hotkey Utility WMI extras driver" - depends on ACPI_WMI - select NEW_LEDS - select LEDS_CLASS - imply IDEAPAD_LAPTOP - help - This driver provides WMI support for Lenovo customized hotkeys function, - such as LED control for audio/mic mute event for Ideapad, YOGA, XiaoXin, - Gaming, ThinkBook and so on. - -config LENOVO_YMC - tristate "Lenovo Yoga Tablet Mode Control" - depends on ACPI_WMI - depends on INPUT - depends on IDEAPAD_LAPTOP - select INPUT_SPARSEKMAP - help - This driver maps the Tablet Mode Control switch to SW_TABLET_MODE input - events for Lenovo Yoga notebooks. - config SENSORS_HDAPS tristate "Thinkpad Hard Drive Active Protection System (hdaps)" depends on INPUT @@ -514,160 +460,8 @@ config SENSORS_HDAPS Say Y here if you have an applicable laptop and want to experience the awesome power of hdaps. -config THINKPAD_ACPI - tristate "ThinkPad ACPI Laptop Extras" - depends on ACPI_EC - depends on ACPI_BATTERY - depends on INPUT - depends on RFKILL || RFKILL = n - depends on ACPI_VIDEO || ACPI_VIDEO = n - depends on BACKLIGHT_CLASS_DEVICE - depends on I2C - depends on DRM - select ACPI_PLATFORM_PROFILE - select DRM_PRIVACY_SCREEN - select HWMON - select NVRAM - select NEW_LEDS - select LEDS_CLASS - select INPUT_SPARSEKMAP - help - This is a driver for the IBM and Lenovo ThinkPad laptops. It adds - support for Fn-Fx key combinations, Bluetooth control, video - output switching, ThinkLight control, UltraBay eject and more. - For more information about this driver see - <file:Documentation/admin-guide/laptops/thinkpad-acpi.rst> and - <http://ibm-acpi.sf.net/> . - - This driver was formerly known as ibm-acpi. - - Extra functionality will be available if the rfkill (CONFIG_RFKILL) - and/or ALSA (CONFIG_SND) subsystems are available in the kernel. - Note that if you want ThinkPad-ACPI to be built-in instead of - modular, ALSA and rfkill will also have to be built-in. - - If you have an IBM or Lenovo ThinkPad laptop, say Y or M here. - -config THINKPAD_ACPI_ALSA_SUPPORT - bool "Console audio control ALSA interface" - depends on THINKPAD_ACPI - depends on SND - depends on SND = y || THINKPAD_ACPI = SND - default y - help - Enables monitoring of the built-in console audio output control - (headphone and speakers), which is operated by the mute and (in - some ThinkPad models) volume hotkeys. - - If this option is enabled, ThinkPad-ACPI will export an ALSA card - with a single read-only mixer control, which should be used for - on-screen-display feedback purposes by the Desktop Environment. - - Optionally, the driver will also allow software control (the - ALSA mixer will be made read-write). Please refer to the driver - documentation for details. - - All IBM models have both volume and mute control. Newer Lenovo - models only have mute control (the volume hotkeys are just normal - keys and volume control is done through the main HDA mixer). - -config THINKPAD_ACPI_DEBUGFACILITIES - bool "Maintainer debug facilities" - depends on THINKPAD_ACPI - help - Enables extra stuff in the thinkpad-acpi which is completely useless - for normal use. Read the driver source to find out what it does. - - Say N here, unless you were told by a kernel maintainer to do - otherwise. - -config THINKPAD_ACPI_DEBUG - bool "Verbose debug mode" - depends on THINKPAD_ACPI - help - Enables extra debugging information, at the expense of a slightly - increase in driver size. - - If you are not sure, say N here. - -config THINKPAD_ACPI_UNSAFE_LEDS - bool "Allow control of important LEDs (unsafe)" - depends on THINKPAD_ACPI - help - Overriding LED state on ThinkPads can mask important - firmware alerts (like critical battery condition), or misled - the user into damaging the hardware (undocking or ejecting - the bay while buses are still active), etc. - - LED control on the ThinkPad is write-only (with very few - exceptions on very ancient models), which makes it - impossible to know beforehand if important information will - be lost when one changes LED state. - - Users that know what they are doing can enable this option - and the driver will allow control of every LED, including - the ones on the dock stations. - - Never enable this option on a distribution kernel. - - Say N here, unless you are building a kernel for your own - use, and need to control the important firmware LEDs. - -config THINKPAD_ACPI_VIDEO - bool "Video output control support" - depends on THINKPAD_ACPI - default y - help - Allows the thinkpad_acpi driver to provide an interface to control - the various video output ports. - - This feature often won't work well, depending on ThinkPad model, - display state, video output devices in use, whether there is a X - server running, phase of the moon, and the current mood of - Schroedinger's cat. If you can use X.org's RandR to control - your ThinkPad's video output ports instead of this feature, - don't think twice: do it and say N here to save memory and avoid - bad interactions with X.org. - - NOTE: access to this feature is limited to processes with the - CAP_SYS_ADMIN capability, to avoid local DoS issues in platforms - where it interacts badly with X.org. - - If you are not sure, say Y here but do try to check if you could - be using X.org RandR instead. - -config THINKPAD_ACPI_HOTKEY_POLL - bool "Support NVRAM polling for hot keys" - depends on THINKPAD_ACPI - default y - help - Some thinkpad models benefit from NVRAM polling to detect a few of - the hot key press events. If you know your ThinkPad model does not - need to do NVRAM polling to support any of the hot keys you use, - unselecting this option will save about 1kB of memory. - - ThinkPads T40 and newer, R52 and newer, and X31 and newer are - unlikely to need NVRAM polling in their latest BIOS versions. - - NVRAM polling can detect at most the following keys: ThinkPad/Access - IBM, Zoom, Switch Display (fn+F7), ThinkLight, Volume up/down/mute, - Brightness up/down, Display Expand (fn+F8), Hibernate (fn+F12). - - If you are not sure, say Y here. The driver enables polling only if - it is strictly necessary to do so. - -config THINKPAD_LMI - tristate "Lenovo WMI-based systems management driver" - depends on ACPI_WMI - select FW_ATTR_CLASS - help - This driver allows changing BIOS settings on Lenovo machines whose - BIOS support the WMI interface. - - To compile this driver as a module, choose M here: the module will - be called think-lmi. - source "drivers/platform/x86/intel/Kconfig" +source "drivers/platform/x86/lenovo/Kconfig" config ACPI_QUICKSTART tristate "ACPI Quickstart button driver" @@ -825,6 +619,7 @@ config SAMSUNG_LAPTOP tristate "Samsung Laptop driver" depends on RFKILL || RFKILL = n depends on ACPI_VIDEO || ACPI_VIDEO = n + depends on ACPI_BATTERY depends on BACKLIGHT_CLASS_DEVICE select LEDS_CLASS select NEW_LEDS @@ -1078,18 +873,6 @@ config INSPUR_PLATFORM_PROFILE To compile this driver as a module, choose M here: the module will be called inspur-platform-profile. -config LENOVO_WMI_CAMERA - tristate "Lenovo WMI Camera Button driver" - depends on ACPI_WMI - depends on INPUT - help - This driver provides support for Lenovo camera button. The Camera - button is a GPIO device. This driver receives ACPI notifications when - the camera button is switched on/off. - - To compile this driver as a module, choose M here: the module - will be called lenovo-wmi-camera. - config DASHARO_ACPI tristate "Dasharo ACPI Platform Driver" depends on ACPI diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index abbc2644ff6d..a0c5848513e3 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -58,17 +58,14 @@ obj-$(CONFIG_X86_PLATFORM_DRIVERS_HP) += hp/ # Hewlett Packard Enterprise obj-$(CONFIG_UV_SYSFS) += uv_sysfs.o -# IBM Thinkpad and Lenovo +obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o + +# IBM Thinkpad (before 2005) obj-$(CONFIG_IBM_RTL) += ibm_rtl.o -obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o -obj-$(CONFIG_LENOVO_WMI_HOTKEY_UTILITIES) += lenovo-wmi-hotkey-utilities.o -obj-$(CONFIG_LENOVO_YMC) += lenovo-ymc.o obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o -obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o -obj-$(CONFIG_THINKPAD_LMI) += think-lmi.o -obj-$(CONFIG_YOGABOOK) += lenovo-yogabook.o -obj-$(CONFIG_YT2_1380) += lenovo-yoga-tab2-pro-1380-fastcharger.o -obj-$(CONFIG_LENOVO_WMI_CAMERA) += lenovo-wmi-camera.o + +# Lenovo +obj-y += lenovo/ # Intel obj-y += intel/ @@ -128,7 +125,6 @@ obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o # Platform drivers -obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o obj-$(CONFIG_SERIAL_MULTI_INSTANTIATE) += serial-multi-instantiate.o obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o obj-$(CONFIG_WIRELESS_HOTKEY) += wireless-hotkey.o diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c index 2f1faa82d13e..d974c2289f5a 100644 --- a/drivers/platform/x86/amd/hsmp/acpi.c +++ b/drivers/platform/x86/amd/hsmp/acpi.c @@ -587,8 +587,10 @@ static int hsmp_acpi_probe(struct platform_device *pdev) if (!hsmp_pdev->is_probed) { hsmp_pdev->num_sockets = amd_num_nodes(); - if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES) + if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES) { + dev_err(&pdev->dev, "Wrong number of sockets\n"); return -ENODEV; + } hsmp_pdev->sock = devm_kcalloc(&pdev->dev, hsmp_pdev->num_sockets, sizeof(*hsmp_pdev->sock), @@ -605,9 +607,12 @@ static int hsmp_acpi_probe(struct platform_device *pdev) if (!hsmp_pdev->is_probed) { ret = hsmp_misc_register(&pdev->dev); - if (ret) + if (ret) { + dev_err(&pdev->dev, "Failed to register misc device\n"); return ret; + } hsmp_pdev->is_probed = true; + dev_dbg(&pdev->dev, "AMD HSMP ACPI is probed successfully\n"); } return 0; diff --git a/drivers/platform/x86/amd/hsmp/hsmp.h b/drivers/platform/x86/amd/hsmp/hsmp.h index 36b5ceea9ac0..0509a442eaae 100644 --- a/drivers/platform/x86/amd/hsmp/hsmp.h +++ b/drivers/platform/x86/amd/hsmp/hsmp.h @@ -13,6 +13,7 @@ #include <linux/compiler_types.h> #include <linux/device.h> #include <linux/hwmon.h> +#include <linux/kconfig.h> #include <linux/miscdevice.h> #include <linux/pci.h> #include <linux/semaphore.h> @@ -64,7 +65,7 @@ int hsmp_misc_register(struct device *dev); int hsmp_get_tbl_dram_base(u16 sock_ind); ssize_t hsmp_metric_tbl_read(struct hsmp_socket *sock, char *buf, size_t size); struct hsmp_plat_device *get_hsmp_pdev(void); -#if IS_REACHABLE(CONFIG_HWMON) +#if IS_ENABLED(CONFIG_HWMON) int hsmp_create_sensor(struct device *dev, u16 sock_ind); #else static inline int hsmp_create_sensor(struct device *dev, u16 sock_ind) { return 0; } diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c index e3874c47ed9e..f8aa844d33e4 100644 --- a/drivers/platform/x86/amd/hsmp/plat.c +++ b/drivers/platform/x86/amd/hsmp/plat.c @@ -14,6 +14,8 @@ #include <linux/acpi.h> #include <linux/build_bug.h> #include <linux/device.h> +#include <linux/dev_printk.h> +#include <linux/kconfig.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/platform_device.h> @@ -215,7 +217,14 @@ static int hsmp_pltdrv_probe(struct platform_device *pdev) return ret; } - return hsmp_misc_register(&pdev->dev); + ret = hsmp_misc_register(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "Failed to register misc device\n"); + return ret; + } + + dev_dbg(&pdev->dev, "AMD HSMP is probed successfully\n"); + return 0; } static void hsmp_pltdrv_remove(struct platform_device *pdev) @@ -287,15 +296,20 @@ static int __init hsmp_plt_init(void) { int ret = -ENODEV; + if (acpi_dev_present(ACPI_HSMP_DEVICE_HID, NULL, -1)) { + if (IS_ENABLED(CONFIG_AMD_HSMP_ACPI)) + pr_debug("HSMP is supported through ACPI on this platform, please use hsmp_acpi.ko\n"); + else + pr_info("HSMP is supported through ACPI on this platform, please enable AMD_HSMP_ACPI config\n"); + return -ENODEV; + } + if (!legacy_hsmp_support()) { - pr_info("HSMP is not supported on Family:%x model:%x\n", + pr_info("HSMP interface is either disabled or not supported on family:%x model:%x\n", boot_cpu_data.x86, boot_cpu_data.x86_model); return ret; } - if (acpi_dev_present(ACPI_HSMP_DEVICE_HID, NULL, -1)) - return -ENODEV; - hsmp_pdev = get_hsmp_pdev(); if (!hsmp_pdev) return -ENOMEM; @@ -305,8 +319,10 @@ static int __init hsmp_plt_init(void) * if we have N SMN/DF interfaces that ideally means N sockets */ hsmp_pdev->num_sockets = amd_num_nodes(); - if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES) + if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES) { + pr_err("Wrong number of sockets\n"); return ret; + } ret = platform_driver_register(&amd_hsmp_driver); if (ret) diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index 131f10b68308..ded4c84f5ed1 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -190,6 +190,15 @@ static const struct dmi_system_id fwbug_list[] = { DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"), } }, + /* https://gitlab.freedesktop.org/drm/amd/-/issues/4434 */ + { + .ident = "Lenovo Yoga 6 13ALC6", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82ND"), + } + }, /* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */ { .ident = "HP Laptop 15s-eq2xxx", diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c index 20ec122a9fe0..31f9643a6a3b 100644 --- a/drivers/platform/x86/dell/alienware-wmi-wmax.c +++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c @@ -90,6 +90,14 @@ static struct awcc_quirks empty_quirks; static const struct dmi_system_id awcc_dmi_table[] __initconst = { { + .ident = "Alienware Area-51m", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Alienware Area-51m"), + }, + .driver_data = &generic_quirks, + }, + { .ident = "Alienware Area-51m R2", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), @@ -98,6 +106,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = { .driver_data = &generic_quirks, }, { + .ident = "Alienware m15 R5", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m15 R5"), + }, + .driver_data = &generic_quirks, + }, + { .ident = "Alienware m15 R7", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), @@ -233,6 +249,7 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = { }, .driver_data = &g_series_quirks, }, + {} }; enum AWCC_GET_FAN_SENSORS_OPERATIONS { @@ -273,9 +290,29 @@ enum AWCC_SPECIAL_THERMAL_CODES { enum AWCC_TEMP_SENSOR_TYPES { AWCC_TEMP_SENSOR_CPU = 0x01, + AWCC_TEMP_SENSOR_FRONT = 0x03, AWCC_TEMP_SENSOR_GPU = 0x06, }; +enum AWCC_FAN_TYPES { + AWCC_FAN_CPU_1 = 0x32, + AWCC_FAN_GPU_1 = 0x33, + AWCC_FAN_PCI = 0x34, + AWCC_FAN_MID = 0x35, + AWCC_FAN_TOP_1 = 0x36, + AWCC_FAN_SIDE = 0x37, + AWCC_FAN_U2_1 = 0x38, + AWCC_FAN_U2_2 = 0x39, + AWCC_FAN_FRONT_1 = 0x3A, + AWCC_FAN_CPU_2 = 0x3B, + AWCC_FAN_GPU_2 = 0x3C, + AWCC_FAN_TOP_2 = 0x3D, + AWCC_FAN_TOP_3 = 0x3E, + AWCC_FAN_FRONT_2 = 0x3F, + AWCC_FAN_BOTTOM_1 = 0x40, + AWCC_FAN_BOTTOM_2 = 0x41, +}; + enum awcc_thermal_profile { AWCC_PROFILE_USTT_BALANCED, AWCC_PROFILE_USTT_BALANCED_PERFORMANCE, @@ -314,7 +351,6 @@ struct wmax_u32_args { struct awcc_fan_data { unsigned long auto_channels_temp; - const char *label; u32 min_rpm; u32 max_rpm; u8 suspend_cache; @@ -896,6 +932,9 @@ static int awcc_hwmon_read_string(struct device *dev, enum hwmon_sensor_types ty case AWCC_TEMP_SENSOR_CPU: *str = "CPU"; break; + case AWCC_TEMP_SENSOR_FRONT: + *str = "Front"; + break; case AWCC_TEMP_SENSOR_GPU: *str = "GPU"; break; @@ -906,7 +945,46 @@ static int awcc_hwmon_read_string(struct device *dev, enum hwmon_sensor_types ty break; case hwmon_fan: - *str = priv->fan_data[channel]->label; + switch (priv->fan_data[channel]->id) { + case AWCC_FAN_CPU_1: + case AWCC_FAN_CPU_2: + *str = "CPU Fan"; + break; + case AWCC_FAN_GPU_1: + case AWCC_FAN_GPU_2: + *str = "GPU Fan"; + break; + case AWCC_FAN_PCI: + *str = "PCI Fan"; + break; + case AWCC_FAN_MID: + *str = "Mid Fan"; + break; + case AWCC_FAN_TOP_1: + case AWCC_FAN_TOP_2: + case AWCC_FAN_TOP_3: + *str = "Top Fan"; + break; + case AWCC_FAN_SIDE: + *str = "Side Fan"; + break; + case AWCC_FAN_U2_1: + case AWCC_FAN_U2_2: + *str = "U.2 Fan"; + break; + case AWCC_FAN_FRONT_1: + case AWCC_FAN_FRONT_2: + *str = "Front Fan"; + break; + case AWCC_FAN_BOTTOM_1: + case AWCC_FAN_BOTTOM_2: + *str = "Bottom Fan"; + break; + default: + *str = "Unknown Fan"; + break; + } + break; default: return -EOPNOTSUPP; @@ -1051,40 +1129,6 @@ static int awcc_hwmon_temps_init(struct wmi_device *wdev) return 0; } -static char *awcc_get_fan_label(unsigned long *fan_temps) -{ - unsigned int temp_count = bitmap_weight(fan_temps, AWCC_ID_BITMAP_SIZE); - char *label; - u8 temp_id; - - switch (temp_count) { - case 0: - label = "Independent Fan"; - break; - case 1: - temp_id = find_first_bit(fan_temps, AWCC_ID_BITMAP_SIZE); - - switch (temp_id) { - case AWCC_TEMP_SENSOR_CPU: - label = "Processor Fan"; - break; - case AWCC_TEMP_SENSOR_GPU: - label = "Video Fan"; - break; - default: - label = "Unknown Fan"; - break; - } - - break; - default: - label = "Shared Fan"; - break; - } - - return label; -} - static int awcc_hwmon_fans_init(struct wmi_device *wdev) { struct awcc_priv *priv = dev_get_drvdata(&wdev->dev); @@ -1138,7 +1182,6 @@ static int awcc_hwmon_fans_init(struct wmi_device *wdev) fan_data->id = id; fan_data->min_rpm = min_rpm; fan_data->max_rpm = max_rpm; - fan_data->label = awcc_get_fan_label(fan_temps); bitmap_gather(gather, fan_temps, priv->temp_sensors, AWCC_ID_BITMAP_SIZE); bitmap_copy(&fan_data->auto_channels_temp, gather, BITS_PER_LONG); priv->fan_data[i] = fan_data; diff --git a/drivers/platform/x86/dell/dell-lis3lv02d.c b/drivers/platform/x86/dell/dell-lis3lv02d.c index 0791118dd6b7..732de5f556f8 100644 --- a/drivers/platform/x86/dell/dell-lis3lv02d.c +++ b/drivers/platform/x86/dell/dell-lis3lv02d.c @@ -49,6 +49,7 @@ static const struct dmi_system_id lis3lv02d_devices[] __initconst = { DELL_LIS3LV02D_DMI_ENTRY("Latitude E6330", 0x29), DELL_LIS3LV02D_DMI_ENTRY("Latitude E6430", 0x29), DELL_LIS3LV02D_DMI_ENTRY("Precision 3540", 0x29), + DELL_LIS3LV02D_DMI_ENTRY("Precision 3551", 0x29), DELL_LIS3LV02D_DMI_ENTRY("Precision M6800", 0x29), DELL_LIS3LV02D_DMI_ENTRY("Vostro V131", 0x1d), DELL_LIS3LV02D_DMI_ENTRY("Vostro 5568", 0x29), diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c index 8f868f845350..f323a667dc2d 100644 --- a/drivers/platform/x86/dell/dell-uart-backlight.c +++ b/drivers/platform/x86/dell/dell-uart-backlight.c @@ -305,7 +305,7 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev) dev_dbg(dev, "Firmware version: %.*s\n", resp[RESP_LEN] - 3, resp + RESP_DATA); /* Initialize bl_power to a known value */ - ret = dell_uart_set_bl_power(dell_bl, FB_BLANK_UNBLANK); + ret = dell_uart_set_bl_power(dell_bl, BACKLIGHT_POWER_ON); if (ret) return ret; diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c index 67f3d7158403..62e3d060f038 100644 --- a/drivers/platform/x86/dell/dell-wmi-ddv.c +++ b/drivers/platform/x86/dell/dell-wmi-ddv.c @@ -689,9 +689,13 @@ static int dell_wmi_ddv_battery_translate(struct dell_wmi_ddv_data *data, dev_dbg(&data->wdev->dev, "Translation cache miss\n"); - /* Perform a translation between a ACPI battery and a battery index */ - - ret = power_supply_get_property(battery, POWER_SUPPLY_PROP_SERIAL_NUMBER, &val); + /* + * Perform a translation between a ACPI battery and a battery index. + * We have to use power_supply_get_property_direct() here because this + * function will also get called from the callbacks of the power supply + * extension. + */ + ret = power_supply_get_property_direct(battery, POWER_SUPPLY_PROP_SERIAL_NUMBER, &val); if (ret < 0) return ret; diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c index 9dd9f2cb074f..45c0a72e494a 100644 --- a/drivers/platform/x86/dell/dell_rbu.c +++ b/drivers/platform/x86/dell/dell_rbu.c @@ -77,14 +77,14 @@ struct packet_data { int ordernum; }; -static struct packet_data packet_data_head; +static struct list_head packet_data_list; static struct platform_device *rbu_device; static int context; static void init_packet_head(void) { - INIT_LIST_HEAD(&packet_data_head.list); + INIT_LIST_HEAD(&packet_data_list); rbu_data.packet_read_count = 0; rbu_data.num_packets = 0; rbu_data.packetsize = 0; @@ -183,7 +183,7 @@ static int create_packet(void *data, size_t length) __must_hold(&rbu_data.lock) /* initialize the newly created packet headers */ INIT_LIST_HEAD(&newpacket->list); - list_add_tail(&newpacket->list, &packet_data_head.list); + list_add_tail(&newpacket->list, &packet_data_list); memcpy(newpacket->data, data, length); @@ -292,7 +292,7 @@ static int packet_read_list(char *data, size_t * pread_length) remaining_bytes = *pread_length; bytes_read = rbu_data.packet_read_count; - list_for_each_entry(newpacket, &packet_data_head.list, list) { + list_for_each_entry(newpacket, &packet_data_list, list) { bytes_copied = do_packet_read(pdest, newpacket, remaining_bytes, bytes_read, &temp_count); remaining_bytes -= bytes_copied; @@ -315,7 +315,7 @@ static void packet_empty_list(void) { struct packet_data *newpacket, *tmp; - list_for_each_entry_safe(newpacket, tmp, &packet_data_head.list, list) { + list_for_each_entry_safe(newpacket, tmp, &packet_data_list, list) { list_del(&newpacket->list); /* diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 162809140f68..931fbcdd21b8 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -180,15 +180,19 @@ static ssize_t charge_control_end_threshold_store(struct device *dev, const char *buf, size_t count) { int cc_end_value, s006_cc_return; - int value, ret; + unsigned int value; + int ret; ret = kstrtouint(buf, 10, &value); if (ret) return ret; - if (value < 50 || value > 100) + if (value > 100) return -EINVAL; + if (value < 50) + value = 50; + cc_end_value = value * 0x100 + 0x20; s006_cc_return = call_fext_func(fext, FUNC_S006_METHOD, CHARGE_CONTROL_RW, cc_end_value, 0x0); diff --git a/drivers/platform/x86/intel/plr_tpmi.c b/drivers/platform/x86/intel/plr_tpmi.c index 2b55347a5a93..58132da47745 100644 --- a/drivers/platform/x86/intel/plr_tpmi.c +++ b/drivers/platform/x86/intel/plr_tpmi.c @@ -14,6 +14,7 @@ #include <linux/err.h> #include <linux/gfp_types.h> #include <linux/intel_tpmi.h> +#include <linux/intel_vsec.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kstrtox.h> @@ -256,7 +257,7 @@ DEFINE_SHOW_STORE_ATTRIBUTE(plr_status); static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) { - struct intel_tpmi_plat_info *plat_info; + struct oobmsm_plat_info *plat_info; struct dentry *dentry; int i, num_resources; struct resource *res; diff --git a/drivers/platform/x86/intel/pmt/Kconfig b/drivers/platform/x86/intel/pmt/Kconfig index e916fc966221..7363446b7773 100644 --- a/drivers/platform/x86/intel/pmt/Kconfig +++ b/drivers/platform/x86/intel/pmt/Kconfig @@ -18,6 +18,7 @@ config INTEL_PMT_CLASS config INTEL_PMT_TELEMETRY tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver" depends on INTEL_VSEC + select INTEL_PMT_DISCOVERY select INTEL_PMT_CLASS help The Intel Platform Monitory Technology (PMT) Telemetry driver provides @@ -38,3 +39,30 @@ config INTEL_PMT_CRASHLOG To compile this driver as a module, choose M here: the module will be called intel_pmt_crashlog. + +config INTEL_PMT_DISCOVERY + tristate "Intel Platform Monitoring Technology (PMT) Discovery driver" + depends on INTEL_VSEC + select INTEL_PMT_CLASS + help + The Intel Platform Monitoring Technology (PMT) discovery driver provides + access to details about the various PMT features and feature specific + attributes. + + To compile this driver as a module, choose M here: the module + will be called pmt_discovery. + +config INTEL_PMT_KUNIT_TEST + tristate "KUnit tests for Intel PMT driver" + depends on INTEL_PMT_DISCOVERY + depends on INTEL_PMT_TELEMETRY || !INTEL_PMT_TELEMETRY + depends on KUNIT + help + Enable this option to compile and run a suite of KUnit tests for the Intel + Platform Monitoring Technology (PMT) driver. These tests are designed to + validate the driver's functionality, error handling, and overall stability, + helping developers catch regressions and ensure code quality during changes. + + This option is intended for development and testing environments. It is + recommended to disable it in production builds. To compile this driver as a + module, choose M here: the module will be called pmt-discovery-kunit. diff --git a/drivers/platform/x86/intel/pmt/Makefile b/drivers/platform/x86/intel/pmt/Makefile index 279e158c7c23..47f692c091c9 100644 --- a/drivers/platform/x86/intel/pmt/Makefile +++ b/drivers/platform/x86/intel/pmt/Makefile @@ -10,3 +10,7 @@ obj-$(CONFIG_INTEL_PMT_TELEMETRY) += pmt_telemetry.o pmt_telemetry-y := telemetry.o obj-$(CONFIG_INTEL_PMT_CRASHLOG) += pmt_crashlog.o pmt_crashlog-y := crashlog.o +obj-$(CONFIG_INTEL_PMT_DISCOVERY) += pmt_discovery.o +pmt_discovery-y := discovery.o features.o +obj-$(CONFIG_INTEL_PMT_KUNIT_TEST) += pmt-discovery-kunit.o +pmt-discovery-kunit-y := discovery-kunit.o diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index 7233b654bbad..edcce340ea67 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -9,11 +9,13 @@ */ #include <linux/kernel.h> +#include <linux/log2.h> #include <linux/intel_vsec.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/pci.h> +#include <linux/sysfs.h> #include "class.h" @@ -97,7 +99,7 @@ intel_pmt_read(struct file *filp, struct kobject *kobj, if (count > entry->size - off) count = entry->size - off; - count = pmt_telem_read_mmio(entry->ep->pcidev, entry->cb, entry->header.guid, buf, + count = pmt_telem_read_mmio(entry->pcidev, entry->cb, entry->header.guid, buf, entry->base, off, count); return count; @@ -166,12 +168,41 @@ static struct attribute *intel_pmt_attrs[] = { &dev_attr_offset.attr, NULL }; -ATTRIBUTE_GROUPS(intel_pmt); -static struct class intel_pmt_class = { +static umode_t intel_pmt_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev->parent); + struct intel_vsec_device *ivdev = auxdev_to_ivdev(auxdev); + + /* + * Place the discovery features folder in /sys/class/intel_pmt, but + * exclude the common attributes as they are not applicable. + */ + if (ivdev->cap_id == ilog2(VSEC_CAP_DISCOVERY)) + return 0; + + return attr->mode; +} + +static bool intel_pmt_group_visible(struct kobject *kobj) +{ + return true; +} +DEFINE_SYSFS_GROUP_VISIBLE(intel_pmt); + +static const struct attribute_group intel_pmt_group = { + .attrs = intel_pmt_attrs, + .is_visible = SYSFS_GROUP_VISIBLE(intel_pmt), +}; +__ATTRIBUTE_GROUPS(intel_pmt); + +struct class intel_pmt_class = { .name = "intel_pmt", .dev_groups = intel_pmt_groups, }; +EXPORT_SYMBOL_GPL(intel_pmt_class); static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, struct intel_vsec_device *ivdev, @@ -252,6 +283,7 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, return -EINVAL; } + entry->pcidev = pci_dev; entry->guid = header->guid; entry->size = header->size; entry->cb = ivdev->priv_data; @@ -284,8 +316,8 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, entry->kobj = &dev->kobj; - if (ns->attr_grp) { - ret = sysfs_create_group(entry->kobj, ns->attr_grp); + if (entry->attr_grp) { + ret = sysfs_create_group(entry->kobj, entry->attr_grp); if (ret) goto fail_sysfs_create_group; } @@ -326,8 +358,8 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, fail_add_endpoint: sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr); fail_ioremap: - if (ns->attr_grp) - sysfs_remove_group(entry->kobj, ns->attr_grp); + if (entry->attr_grp) + sysfs_remove_group(entry->kobj, entry->attr_grp); fail_sysfs_create_group: device_unregister(dev); fail_dev_create: @@ -369,8 +401,8 @@ void intel_pmt_dev_destroy(struct intel_pmt_entry *entry, if (entry->size) sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr); - if (ns->attr_grp) - sysfs_remove_group(entry->kobj, ns->attr_grp); + if (entry->attr_grp) + sysfs_remove_group(entry->kobj, entry->attr_grp); device_unregister(dev); xa_erase(ns->xa, entry->devid); diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h index b2006d57779d..3c5ad5f52bca 100644 --- a/drivers/platform/x86/intel/pmt/class.h +++ b/drivers/platform/x86/intel/pmt/class.h @@ -20,6 +20,7 @@ #define GET_ADDRESS(v) ((v) & GENMASK(31, 3)) struct pci_dev; +extern struct class intel_pmt_class; struct telem_endpoint { struct pci_dev *pcidev; @@ -39,22 +40,25 @@ struct intel_pmt_header { struct intel_pmt_entry { struct telem_endpoint *ep; + struct pci_dev *pcidev; struct intel_pmt_header header; struct bin_attribute pmt_bin_attr; + const struct attribute_group *attr_grp; struct kobject *kobj; void __iomem *disc_table; void __iomem *base; struct pmt_callbacks *cb; unsigned long base_addr; size_t size; + u64 feature_flags; u32 guid; + u32 num_rmids; /* Number of Resource Monitoring IDs */ int devid; }; struct intel_pmt_namespace { const char *name; struct xarray *xa; - const struct attribute_group *attr_grp; int (*pmt_header_decode)(struct intel_pmt_entry *entry, struct device *dev); int (*pmt_add_endpoint)(struct intel_vsec_device *ivdev, @@ -69,4 +73,10 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_vsec_device *dev, int idx); void intel_pmt_dev_destroy(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns); +#if IS_ENABLED(CONFIG_INTEL_PMT_DISCOVERY) +void intel_pmt_get_features(struct intel_pmt_entry *entry); +#else +static inline void intel_pmt_get_features(struct intel_pmt_entry *entry) {} +#endif + #endif diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c index 6a9eb3c4b313..b0393c9c5b4b 100644 --- a/drivers/platform/x86/intel/pmt/crashlog.c +++ b/drivers/platform/x86/intel/pmt/crashlog.c @@ -9,9 +9,11 @@ */ #include <linux/auxiliary_bus.h> +#include <linux/cleanup.h> #include <linux/intel_vsec.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/uaccess.h> @@ -22,21 +24,6 @@ /* Crashlog discovery header types */ #define CRASH_TYPE_OOBMSM 1 -/* Control Flags */ -#define CRASHLOG_FLAG_DISABLE BIT(28) - -/* - * Bits 29 and 30 control the state of bit 31. - * - * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured. - * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31. - * Bit 31 is the read-only status with a 1 indicating log is complete. - */ -#define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(29) -#define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(30) -#define CRASHLOG_FLAG_TRIGGER_COMPLETE BIT(31) -#define CRASHLOG_FLAG_TRIGGER_MASK GENMASK(31, 28) - /* Crashlog Discovery Header */ #define CONTROL_OFFSET 0x0 #define GUID_OFFSET 0x4 @@ -48,10 +35,84 @@ /* size is in bytes */ #define GET_SIZE(v) ((v) * sizeof(u32)) +/* + * Type 1 Version 0 + * status and control registers are combined. + * + * Bits 29 and 30 control the state of bit 31. + * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured. + * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31. + * Bit 31 is the read-only status with a 1 indicating log is complete. + */ +#define TYPE1_VER0_STATUS_OFFSET 0x00 +#define TYPE1_VER0_CONTROL_OFFSET 0x00 + +#define TYPE1_VER0_DISABLE BIT(28) +#define TYPE1_VER0_CLEAR BIT(29) +#define TYPE1_VER0_EXECUTE BIT(30) +#define TYPE1_VER0_COMPLETE BIT(31) +#define TYPE1_VER0_TRIGGER_MASK GENMASK(31, 28) + +/* + * Type 1 Version 2 + * status and control are different registers + */ +#define TYPE1_VER2_STATUS_OFFSET 0x00 +#define TYPE1_VER2_CONTROL_OFFSET 0x14 + +/* status register */ +#define TYPE1_VER2_CLEAR_SUPPORT BIT(20) +#define TYPE1_VER2_REARMED BIT(25) +#define TYPE1_VER2_ERROR BIT(26) +#define TYPE1_VER2_CONSUMED BIT(27) +#define TYPE1_VER2_DISABLED BIT(28) +#define TYPE1_VER2_CLEARED BIT(29) +#define TYPE1_VER2_IN_PROGRESS BIT(30) +#define TYPE1_VER2_COMPLETE BIT(31) + +/* control register */ +#define TYPE1_VER2_CONSUME BIT(25) +#define TYPE1_VER2_REARM BIT(28) +#define TYPE1_VER2_EXECUTE BIT(29) +#define TYPE1_VER2_CLEAR BIT(30) +#define TYPE1_VER2_DISABLE BIT(31) +#define TYPE1_VER2_TRIGGER_MASK \ + (TYPE1_VER2_EXECUTE | TYPE1_VER2_CLEAR | TYPE1_VER2_DISABLE) + +/* After offset, order alphabetically, not bit ordered */ +struct crashlog_status { + u32 offset; + u32 clear_supported; + u32 cleared; + u32 complete; + u32 consumed; + u32 disabled; + u32 error; + u32 in_progress; + u32 rearmed; +}; + +struct crashlog_control { + u32 offset; + u32 trigger_mask; + u32 clear; + u32 consume; + u32 disable; + u32 manual; + u32 rearm; +}; + +struct crashlog_info { + const struct crashlog_status status; + const struct crashlog_control control; + const struct attribute_group *attr_grp; +}; + struct crashlog_entry { /* entry must be first member of struct */ struct intel_pmt_entry entry; struct mutex control_mutex; + const struct crashlog_info *info; }; struct pmt_crashlog_priv { @@ -62,180 +123,397 @@ struct pmt_crashlog_priv { /* * I/O */ -static bool pmt_crashlog_complete(struct intel_pmt_entry *entry) + +/* Read, modify, write the control register, setting or clearing @bit based on @set */ +static void pmt_crashlog_rmw(struct crashlog_entry *crashlog, u32 bit, bool set) { - u32 control = readl(entry->disc_table + CONTROL_OFFSET); + const struct crashlog_control *control = &crashlog->info->control; + struct intel_pmt_entry *entry = &crashlog->entry; + u32 reg = readl(entry->disc_table + control->offset); - /* return current value of the crashlog complete flag */ - return !!(control & CRASHLOG_FLAG_TRIGGER_COMPLETE); + reg &= ~control->trigger_mask; + + if (set) + reg |= bit; + else + reg &= ~bit; + + writel(reg, entry->disc_table + control->offset); } -static bool pmt_crashlog_disabled(struct intel_pmt_entry *entry) +/* Read the status register and see if the specified @bit is set */ +static bool pmt_crashlog_rc(struct crashlog_entry *crashlog, u32 bit) { - u32 control = readl(entry->disc_table + CONTROL_OFFSET); + const struct crashlog_status *status = &crashlog->info->status; + u32 reg = readl(crashlog->entry.disc_table + status->offset); + return !!(reg & bit); +} + +static bool pmt_crashlog_complete(struct crashlog_entry *crashlog) +{ + /* return current value of the crashlog complete flag */ + return pmt_crashlog_rc(crashlog, crashlog->info->status.complete); +} + +static bool pmt_crashlog_disabled(struct crashlog_entry *crashlog) +{ /* return current value of the crashlog disabled flag */ - return !!(control & CRASHLOG_FLAG_DISABLE); + return pmt_crashlog_rc(crashlog, crashlog->info->status.disabled); } -static bool pmt_crashlog_supported(struct intel_pmt_entry *entry) +static bool pmt_crashlog_supported(struct intel_pmt_entry *entry, u32 *crash_type, u32 *version) { u32 discovery_header = readl(entry->disc_table + CONTROL_OFFSET); - u32 crash_type, version; - crash_type = GET_TYPE(discovery_header); - version = GET_VERSION(discovery_header); + *crash_type = GET_TYPE(discovery_header); + *version = GET_VERSION(discovery_header); /* - * Currently we only recognize OOBMSM version 0 devices. - * We can ignore all other crashlog devices in the system. + * Currently we only recognize OOBMSM (type 1) and version 0 or 2 + * devices. + * + * Ignore all other crashlog devices in the system. */ - return crash_type == CRASH_TYPE_OOBMSM && version == 0; + if (*crash_type == CRASH_TYPE_OOBMSM && (*version == 0 || *version == 2)) + return true; + + return false; } -static void pmt_crashlog_set_disable(struct intel_pmt_entry *entry, +static void pmt_crashlog_set_disable(struct crashlog_entry *crashlog, bool disable) { - u32 control = readl(entry->disc_table + CONTROL_OFFSET); - - /* clear trigger bits so we are only modifying disable flag */ - control &= ~CRASHLOG_FLAG_TRIGGER_MASK; + pmt_crashlog_rmw(crashlog, crashlog->info->control.disable, disable); +} - if (disable) - control |= CRASHLOG_FLAG_DISABLE; - else - control &= ~CRASHLOG_FLAG_DISABLE; +static void pmt_crashlog_set_clear(struct crashlog_entry *crashlog) +{ + pmt_crashlog_rmw(crashlog, crashlog->info->control.clear, true); +} - writel(control, entry->disc_table + CONTROL_OFFSET); +static void pmt_crashlog_set_execute(struct crashlog_entry *crashlog) +{ + pmt_crashlog_rmw(crashlog, crashlog->info->control.manual, true); } -static void pmt_crashlog_set_clear(struct intel_pmt_entry *entry) +static bool pmt_crashlog_cleared(struct crashlog_entry *crashlog) { - u32 control = readl(entry->disc_table + CONTROL_OFFSET); + return pmt_crashlog_rc(crashlog, crashlog->info->status.cleared); +} - control &= ~CRASHLOG_FLAG_TRIGGER_MASK; - control |= CRASHLOG_FLAG_TRIGGER_CLEAR; +static bool pmt_crashlog_consumed(struct crashlog_entry *crashlog) +{ + return pmt_crashlog_rc(crashlog, crashlog->info->status.consumed); +} - writel(control, entry->disc_table + CONTROL_OFFSET); +static void pmt_crashlog_set_consumed(struct crashlog_entry *crashlog) +{ + pmt_crashlog_rmw(crashlog, crashlog->info->control.consume, true); } -static void pmt_crashlog_set_execute(struct intel_pmt_entry *entry) +static bool pmt_crashlog_error(struct crashlog_entry *crashlog) { - u32 control = readl(entry->disc_table + CONTROL_OFFSET); + return pmt_crashlog_rc(crashlog, crashlog->info->status.error); +} - control &= ~CRASHLOG_FLAG_TRIGGER_MASK; - control |= CRASHLOG_FLAG_TRIGGER_EXECUTE; +static bool pmt_crashlog_rearm(struct crashlog_entry *crashlog) +{ + return pmt_crashlog_rc(crashlog, crashlog->info->status.rearmed); +} - writel(control, entry->disc_table + CONTROL_OFFSET); +static void pmt_crashlog_set_rearm(struct crashlog_entry *crashlog) +{ + pmt_crashlog_rmw(crashlog, crashlog->info->control.rearm, true); } /* * sysfs */ static ssize_t +clear_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct crashlog_entry *crashlog = dev_get_drvdata(dev); + bool cleared = pmt_crashlog_cleared(crashlog); + + return sysfs_emit(buf, "%d\n", cleared); +} + +static ssize_t +clear_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct crashlog_entry *crashlog; + bool clear; + int result; + + crashlog = dev_get_drvdata(dev); + + result = kstrtobool(buf, &clear); + if (result) + return result; + + /* set bit only */ + if (!clear) + return -EINVAL; + + guard(mutex)(&crashlog->control_mutex); + + pmt_crashlog_set_clear(crashlog); + + return count; +} +static DEVICE_ATTR_RW(clear); + +static ssize_t +consumed_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct crashlog_entry *crashlog = dev_get_drvdata(dev); + bool consumed = pmt_crashlog_consumed(crashlog); + + return sysfs_emit(buf, "%d\n", consumed); +} + +static ssize_t +consumed_store(struct device *dev, struct device_attribute *attr, const char *buf, + size_t count) +{ + struct crashlog_entry *crashlog; + bool consumed; + int result; + + crashlog = dev_get_drvdata(dev); + + result = kstrtobool(buf, &consumed); + if (result) + return result; + + /* set bit only */ + if (!consumed) + return -EINVAL; + + guard(mutex)(&crashlog->control_mutex); + + if (pmt_crashlog_disabled(crashlog)) + return -EBUSY; + + if (!pmt_crashlog_complete(crashlog)) + return -EEXIST; + + pmt_crashlog_set_consumed(crashlog); + + return count; +} +static DEVICE_ATTR_RW(consumed); + +static ssize_t enable_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_pmt_entry *entry = dev_get_drvdata(dev); - int enabled = !pmt_crashlog_disabled(entry); + struct crashlog_entry *crashlog = dev_get_drvdata(dev); + bool enabled = !pmt_crashlog_disabled(crashlog); return sprintf(buf, "%d\n", enabled); } static ssize_t enable_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) + const char *buf, size_t count) { - struct crashlog_entry *entry; + struct crashlog_entry *crashlog; bool enabled; int result; - entry = dev_get_drvdata(dev); + crashlog = dev_get_drvdata(dev); result = kstrtobool(buf, &enabled); if (result) return result; - mutex_lock(&entry->control_mutex); - pmt_crashlog_set_disable(&entry->entry, !enabled); - mutex_unlock(&entry->control_mutex); + guard(mutex)(&crashlog->control_mutex); + + pmt_crashlog_set_disable(crashlog, !enabled); return count; } static DEVICE_ATTR_RW(enable); static ssize_t +error_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct crashlog_entry *crashlog = dev_get_drvdata(dev); + bool error = pmt_crashlog_error(crashlog); + + return sysfs_emit(buf, "%d\n", error); +} +static DEVICE_ATTR_RO(error); + +static ssize_t +rearm_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct crashlog_entry *crashlog = dev_get_drvdata(dev); + int rearmed = pmt_crashlog_rearm(crashlog); + + return sysfs_emit(buf, "%d\n", rearmed); +} + +static ssize_t +rearm_store(struct device *dev, struct device_attribute *attr, const char *buf, + size_t count) +{ + struct crashlog_entry *crashlog; + bool rearm; + int result; + + crashlog = dev_get_drvdata(dev); + + result = kstrtobool(buf, &rearm); + if (result) + return result; + + /* set only */ + if (!rearm) + return -EINVAL; + + guard(mutex)(&crashlog->control_mutex); + + pmt_crashlog_set_rearm(crashlog); + + return count; +} +static DEVICE_ATTR_RW(rearm); + +static ssize_t trigger_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_pmt_entry *entry; - int trigger; + struct crashlog_entry *crashlog; + bool trigger; - entry = dev_get_drvdata(dev); - trigger = pmt_crashlog_complete(entry); + crashlog = dev_get_drvdata(dev); + trigger = pmt_crashlog_complete(crashlog); return sprintf(buf, "%d\n", trigger); } static ssize_t trigger_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) + const char *buf, size_t count) { - struct crashlog_entry *entry; + struct crashlog_entry *crashlog; bool trigger; int result; - entry = dev_get_drvdata(dev); + crashlog = dev_get_drvdata(dev); result = kstrtobool(buf, &trigger); if (result) return result; - mutex_lock(&entry->control_mutex); + guard(mutex)(&crashlog->control_mutex); + + /* if device is currently disabled, return busy */ + if (pmt_crashlog_disabled(crashlog)) + return -EBUSY; if (!trigger) { - pmt_crashlog_set_clear(&entry->entry); - } else if (pmt_crashlog_complete(&entry->entry)) { - /* we cannot trigger a new crash if one is still pending */ - result = -EEXIST; - goto err; - } else if (pmt_crashlog_disabled(&entry->entry)) { - /* if device is currently disabled, return busy */ - result = -EBUSY; - goto err; - } else { - pmt_crashlog_set_execute(&entry->entry); + pmt_crashlog_set_clear(crashlog); + return count; } - result = count; -err: - mutex_unlock(&entry->control_mutex); - return result; + /* we cannot trigger a new crash if one is still pending */ + if (pmt_crashlog_complete(crashlog)) + return -EEXIST; + + pmt_crashlog_set_execute(crashlog); + + return count; } static DEVICE_ATTR_RW(trigger); -static struct attribute *pmt_crashlog_attrs[] = { +static struct attribute *pmt_crashlog_type1_ver0_attrs[] = { &dev_attr_enable.attr, &dev_attr_trigger.attr, NULL }; -static const struct attribute_group pmt_crashlog_group = { - .attrs = pmt_crashlog_attrs, +static struct attribute *pmt_crashlog_type1_ver2_attrs[] = { + &dev_attr_clear.attr, + &dev_attr_consumed.attr, + &dev_attr_enable.attr, + &dev_attr_error.attr, + &dev_attr_rearm.attr, + &dev_attr_trigger.attr, + NULL +}; + +static const struct attribute_group pmt_crashlog_type1_ver0_group = { + .attrs = pmt_crashlog_type1_ver0_attrs, +}; + +static const struct attribute_group pmt_crashlog_type1_ver2_group = { + .attrs = pmt_crashlog_type1_ver2_attrs, +}; + +static const struct crashlog_info crashlog_type1_ver0 = { + .status.offset = TYPE1_VER0_STATUS_OFFSET, + .status.cleared = TYPE1_VER0_CLEAR, + .status.complete = TYPE1_VER0_COMPLETE, + .status.disabled = TYPE1_VER0_DISABLE, + + .control.offset = TYPE1_VER0_CONTROL_OFFSET, + .control.trigger_mask = TYPE1_VER0_TRIGGER_MASK, + .control.clear = TYPE1_VER0_CLEAR, + .control.disable = TYPE1_VER0_DISABLE, + .control.manual = TYPE1_VER0_EXECUTE, + .attr_grp = &pmt_crashlog_type1_ver0_group, +}; + +static const struct crashlog_info crashlog_type1_ver2 = { + .status.offset = TYPE1_VER2_STATUS_OFFSET, + .status.clear_supported = TYPE1_VER2_CLEAR_SUPPORT, + .status.cleared = TYPE1_VER2_CLEARED, + .status.complete = TYPE1_VER2_COMPLETE, + .status.consumed = TYPE1_VER2_CONSUMED, + .status.disabled = TYPE1_VER2_DISABLED, + .status.error = TYPE1_VER2_ERROR, + .status.in_progress = TYPE1_VER2_IN_PROGRESS, + .status.rearmed = TYPE1_VER2_REARMED, + + .control.offset = TYPE1_VER2_CONTROL_OFFSET, + .control.trigger_mask = TYPE1_VER2_TRIGGER_MASK, + .control.clear = TYPE1_VER2_CLEAR, + .control.consume = TYPE1_VER2_CONSUME, + .control.disable = TYPE1_VER2_DISABLE, + .control.manual = TYPE1_VER2_EXECUTE, + .control.rearm = TYPE1_VER2_REARM, + .attr_grp = &pmt_crashlog_type1_ver2_group, }; +static const struct crashlog_info *select_crashlog_info(u32 type, u32 version) +{ + if (version == 0) + return &crashlog_type1_ver0; + + return &crashlog_type1_ver2; +} + static int pmt_crashlog_header_decode(struct intel_pmt_entry *entry, struct device *dev) { void __iomem *disc_table = entry->disc_table; struct intel_pmt_header *header = &entry->header; struct crashlog_entry *crashlog; + u32 version; + u32 type; - if (!pmt_crashlog_supported(entry)) + if (!pmt_crashlog_supported(entry, &type, &version)) return 1; - /* initialize control mutex */ + /* initialize the crashlog struct */ crashlog = container_of(entry, struct crashlog_entry, entry); mutex_init(&crashlog->control_mutex); + crashlog->info = select_crashlog_info(type, version); + header->access_type = GET_ACCESS(readl(disc_table)); header->guid = readl(disc_table + GUID_OFFSET); header->base_offset = readl(disc_table + BASE_OFFSET); @@ -243,6 +521,8 @@ static int pmt_crashlog_header_decode(struct intel_pmt_entry *entry, /* Size is measured in DWORDS, but accessor returns bytes */ header->size = GET_SIZE(readl(disc_table + SIZE_OFFSET)); + entry->attr_grp = crashlog->info->attr_grp; + return 0; } @@ -250,7 +530,6 @@ static DEFINE_XARRAY_ALLOC(crashlog_array); static struct intel_pmt_namespace pmt_crashlog_ns = { .name = "crashlog", .xa = &crashlog_array, - .attr_grp = &pmt_crashlog_group, .pmt_header_decode = pmt_crashlog_header_decode, }; @@ -262,8 +541,12 @@ static void pmt_crashlog_remove(struct auxiliary_device *auxdev) struct pmt_crashlog_priv *priv = auxiliary_get_drvdata(auxdev); int i; - for (i = 0; i < priv->num_entries; i++) - intel_pmt_dev_destroy(&priv->entry[i].entry, &pmt_crashlog_ns); + for (i = 0; i < priv->num_entries; i++) { + struct crashlog_entry *crashlog = &priv->entry[i]; + + intel_pmt_dev_destroy(&crashlog->entry, &pmt_crashlog_ns); + mutex_destroy(&crashlog->control_mutex); + } } static int pmt_crashlog_probe(struct auxiliary_device *auxdev, diff --git a/drivers/platform/x86/intel/pmt/discovery-kunit.c b/drivers/platform/x86/intel/pmt/discovery-kunit.c new file mode 100644 index 000000000000..f44eb41d58f6 --- /dev/null +++ b/drivers/platform/x86/intel/pmt/discovery-kunit.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Platform Monitory Technology Discovery KUNIT tests + * + * Copyright (c) 2025, Intel Corporation. + * All Rights Reserved. + */ + +#include <kunit/test.h> +#include <linux/err.h> +#include <linux/intel_pmt_features.h> +#include <linux/intel_vsec.h> +#include <linux/module.h> +#include <linux/slab.h> + +#define PMT_FEATURE_COUNT (FEATURE_MAX + 1) + +static void +validate_pmt_regions(struct kunit *test, struct pmt_feature_group *feature_group, int feature_id) +{ + int i; + + kunit_info(test, "Feature ID %d [%s] has %d regions.\n", feature_id, + pmt_feature_names[feature_id], feature_group->count); + + for (i = 0; i < feature_group->count; i++) { + struct telemetry_region *region = &feature_group->regions[i]; + + kunit_info(test, " - Region %d: cdie_mask=%u, package_id=%u, partition=%u, segment=%u,", + i, region->plat_info.cdie_mask, region->plat_info.package_id, + region->plat_info.partition, region->plat_info.segment); + kunit_info(test, "\t\tbus=%u, device=%u, function=%u, guid=0x%x,", + region->plat_info.bus_number, region->plat_info.device_number, + region->plat_info.function_number, region->guid); + kunit_info(test, "\t\taddr=%p, size=%zu, num_rmids=%u", region->addr, region->size, + region->num_rmids); + + + KUNIT_ASSERT_GE(test, region->plat_info.cdie_mask, 0); + KUNIT_ASSERT_GE(test, region->plat_info.package_id, 0); + KUNIT_ASSERT_GE(test, region->plat_info.partition, 0); + KUNIT_ASSERT_GE(test, region->plat_info.segment, 0); + KUNIT_ASSERT_GE(test, region->plat_info.bus_number, 0); + KUNIT_ASSERT_GE(test, region->plat_info.device_number, 0); + KUNIT_ASSERT_GE(test, region->plat_info.function_number, 0); + + KUNIT_ASSERT_NE(test, region->guid, 0); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, (__force const void *)region->addr); + } +} + +static void linebreak(struct kunit *test) +{ + kunit_info(test, "*****************************************************************************\n"); +} + +static void test_intel_pmt_get_regions_by_feature(struct kunit *test) +{ + struct pmt_feature_group *feature_group; + int num_available = 0; + int feature_id; + + /* Iterate through all possible feature IDs */ + for (feature_id = 1; feature_id < PMT_FEATURE_COUNT; feature_id++, linebreak(test)) { + const char *name; + + if (!pmt_feature_id_is_valid(feature_id)) + continue; + + name = pmt_feature_names[feature_id]; + + feature_group = intel_pmt_get_regions_by_feature(feature_id); + if (IS_ERR(feature_group)) { + if (PTR_ERR(feature_group) == -ENOENT) + kunit_warn(test, "intel_pmt_get_regions_by_feature() reporting feature %d [%s] is not present.\n", + feature_id, name); + else + kunit_warn(test, "intel_pmt_get_regions_by_feature() returned error %ld while attempt to lookup %d [%s].\n", + PTR_ERR(feature_group), feature_id, name); + + continue; + } + + if (!feature_group) { + kunit_warn(test, "Feature ID %d: %s is not available.\n", feature_id, name); + continue; + } + + num_available++; + + validate_pmt_regions(test, feature_group, feature_id); + + intel_pmt_put_feature_group(feature_group); + } + + if (num_available == 0) + kunit_warn(test, "No PMT region groups were available for any feature ID (0-10).\n"); +} + +static struct kunit_case intel_pmt_discovery_test_cases[] = { + KUNIT_CASE(test_intel_pmt_get_regions_by_feature), + {} +}; + +static struct kunit_suite intel_pmt_discovery_test_suite = { + .name = "pmt_discovery_test", + .test_cases = intel_pmt_discovery_test_cases, +}; + +kunit_test_suite(intel_pmt_discovery_test_suite); + +MODULE_IMPORT_NS("INTEL_PMT_DISCOVERY"); +MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); +MODULE_DESCRIPTION("Intel PMT Discovery KUNIT test driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/pmt/discovery.c b/drivers/platform/x86/intel/pmt/discovery.c new file mode 100644 index 000000000000..32713a194a55 --- /dev/null +++ b/drivers/platform/x86/intel/pmt/discovery.c @@ -0,0 +1,635 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Platform Monitory Technology Discovery driver + * + * Copyright (c) 2025, Intel Corporation. + * All Rights Reserved. + */ + +#include <linux/auxiliary_bus.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/bug.h> +#include <linux/cleanup.h> +#include <linux/container_of.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/kdev_t.h> +#include <linux/kobject.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/overflow.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string_choices.h> +#include <linux/sysfs.h> +#include <linux/types.h> +#include <linux/uaccess.h> + +#include <linux/intel_pmt_features.h> +#include <linux/intel_vsec.h> + +#include "class.h" + +#define MAX_FEATURE_VERSION 0 +#define DT_TBIR GENMASK(2, 0) +#define FEAT_ATTR_SIZE(x) ((x) * sizeof(u32)) +#define PMT_GUID_SIZE(x) ((x) * sizeof(u32)) +#define PMT_ACCESS_TYPE_RSVD 0xF +#define SKIP_FEATURE 1 + +struct feature_discovery_table { + u32 access_type:4; + u32 version:8; + u32 size:16; + u32 reserved:4; + u32 id; + u32 offset; + u32 reserved2; +}; + +/* Common feature table header */ +struct feature_header { + u32 attr_size:8; + u32 num_guids:8; + u32 reserved:16; +}; + +/* Feature attribute fields */ +struct caps { + u32 caps; +}; + +struct command { + u32 max_stream_size:16; + u32 max_command_size:16; +}; + +struct watcher { + u32 reserved:21; + u32 period:11; + struct command command; +}; + +struct rmid { + u32 num_rmids:16; /* Number of Resource Monitoring IDs */ + u32 reserved:16; + struct watcher watcher; +}; + +struct feature_table { + struct feature_header header; + struct caps caps; + union { + struct command command; + struct watcher watcher; + struct rmid rmid; + }; + u32 *guids; +}; + +/* For backreference in struct feature */ +struct pmt_features_priv; + +struct feature { + struct feature_table table; + struct kobject kobj; + struct pmt_features_priv *priv; + struct list_head list; + const struct attribute_group *attr_group; + enum pmt_feature_id id; +}; + +struct pmt_features_priv { + struct device *parent; + struct device *dev; + int count; + u32 mask; + struct feature feature[]; +}; + +static LIST_HEAD(pmt_feature_list); +static DEFINE_MUTEX(feature_list_lock); + +#define to_pmt_feature(x) container_of(x, struct feature, kobj) +static void pmt_feature_release(struct kobject *kobj) +{ +} + +static ssize_t caps_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct feature *feature = to_pmt_feature(kobj); + struct pmt_cap **pmt_caps; + u32 caps = feature->table.caps.caps; + ssize_t ret = 0; + + switch (feature->id) { + case FEATURE_PER_CORE_PERF_TELEM: + pmt_caps = pmt_caps_pcpt; + break; + case FEATURE_PER_CORE_ENV_TELEM: + pmt_caps = pmt_caps_pcet; + break; + case FEATURE_PER_RMID_PERF_TELEM: + pmt_caps = pmt_caps_rmid_perf; + break; + case FEATURE_ACCEL_TELEM: + pmt_caps = pmt_caps_accel; + break; + case FEATURE_UNCORE_TELEM: + pmt_caps = pmt_caps_uncore; + break; + case FEATURE_CRASH_LOG: + pmt_caps = pmt_caps_crashlog; + break; + case FEATURE_PETE_LOG: + pmt_caps = pmt_caps_pete; + break; + case FEATURE_TPMI_CTRL: + pmt_caps = pmt_caps_tpmi; + break; + case FEATURE_TRACING: + pmt_caps = pmt_caps_tracing; + break; + case FEATURE_PER_RMID_ENERGY_TELEM: + pmt_caps = pmt_caps_rmid_energy; + break; + default: + return -EINVAL; + } + + while (*pmt_caps) { + struct pmt_cap *pmt_cap = *pmt_caps; + + while (pmt_cap->name) { + ret += sysfs_emit_at(buf, ret, "%-40s Available: %s\n", pmt_cap->name, + str_yes_no(pmt_cap->mask & caps)); + pmt_cap++; + } + pmt_caps++; + } + + return ret; +} +static struct kobj_attribute caps_attribute = __ATTR_RO(caps); + +static struct watcher *get_watcher(struct feature *feature) +{ + switch (feature_layout[feature->id]) { + case LAYOUT_RMID: + return &feature->table.rmid.watcher; + case LAYOUT_WATCHER: + return &feature->table.watcher; + default: + return ERR_PTR(-EINVAL); + } +} + +static struct command *get_command(struct feature *feature) +{ + switch (feature_layout[feature->id]) { + case LAYOUT_RMID: + return &feature->table.rmid.watcher.command; + case LAYOUT_WATCHER: + return &feature->table.watcher.command; + case LAYOUT_COMMAND: + return &feature->table.command; + default: + return ERR_PTR(-EINVAL); + } +} + +static ssize_t num_rmids_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct feature *feature = to_pmt_feature(kobj); + + return sysfs_emit(buf, "%u\n", feature->table.rmid.num_rmids); +} +static struct kobj_attribute num_rmids_attribute = __ATTR_RO(num_rmids); + +static ssize_t min_watcher_period_ms_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct feature *feature = to_pmt_feature(kobj); + struct watcher *watcher = get_watcher(feature); + + if (IS_ERR(watcher)) + return PTR_ERR(watcher); + + return sysfs_emit(buf, "%u\n", watcher->period); +} +static struct kobj_attribute min_watcher_period_ms_attribute = + __ATTR_RO(min_watcher_period_ms); + +static ssize_t max_stream_size_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct feature *feature = to_pmt_feature(kobj); + struct command *command = get_command(feature); + + if (IS_ERR(command)) + return PTR_ERR(command); + + return sysfs_emit(buf, "%u\n", command->max_stream_size); +} +static struct kobj_attribute max_stream_size_attribute = + __ATTR_RO(max_stream_size); + +static ssize_t max_command_size_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct feature *feature = to_pmt_feature(kobj); + struct command *command = get_command(feature); + + if (IS_ERR(command)) + return PTR_ERR(command); + + return sysfs_emit(buf, "%u\n", command->max_command_size); +} +static struct kobj_attribute max_command_size_attribute = + __ATTR_RO(max_command_size); + +static ssize_t guids_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct feature *feature = to_pmt_feature(kobj); + int i, count = 0; + + for (i = 0; i < feature->table.header.num_guids; i++) + count += sysfs_emit_at(buf, count, "0x%x\n", + feature->table.guids[i]); + + return count; +} +static struct kobj_attribute guids_attribute = __ATTR_RO(guids); + +static struct attribute *pmt_feature_rmid_attrs[] = { + &caps_attribute.attr, + &num_rmids_attribute.attr, + &min_watcher_period_ms_attribute.attr, + &max_stream_size_attribute.attr, + &max_command_size_attribute.attr, + &guids_attribute.attr, + NULL +}; +ATTRIBUTE_GROUPS(pmt_feature_rmid); + +static const struct kobj_type pmt_feature_rmid_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .release = pmt_feature_release, + .default_groups = pmt_feature_rmid_groups, +}; + +static struct attribute *pmt_feature_watcher_attrs[] = { + &caps_attribute.attr, + &min_watcher_period_ms_attribute.attr, + &max_stream_size_attribute.attr, + &max_command_size_attribute.attr, + &guids_attribute.attr, + NULL +}; +ATTRIBUTE_GROUPS(pmt_feature_watcher); + +static const struct kobj_type pmt_feature_watcher_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .release = pmt_feature_release, + .default_groups = pmt_feature_watcher_groups, +}; + +static struct attribute *pmt_feature_command_attrs[] = { + &caps_attribute.attr, + &max_stream_size_attribute.attr, + &max_command_size_attribute.attr, + &guids_attribute.attr, + NULL +}; +ATTRIBUTE_GROUPS(pmt_feature_command); + +static const struct kobj_type pmt_feature_command_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .release = pmt_feature_release, + .default_groups = pmt_feature_command_groups, +}; + +static struct attribute *pmt_feature_guids_attrs[] = { + &caps_attribute.attr, + &guids_attribute.attr, + NULL +}; +ATTRIBUTE_GROUPS(pmt_feature_guids); + +static const struct kobj_type pmt_feature_guids_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .release = pmt_feature_release, + .default_groups = pmt_feature_guids_groups, +}; + +static int +pmt_feature_get_disc_table(struct pmt_features_priv *priv, + struct resource *disc_res, + struct feature_discovery_table *disc_tbl) +{ + void __iomem *disc_base; + + disc_base = devm_ioremap_resource(priv->dev, disc_res); + if (IS_ERR(disc_base)) + return PTR_ERR(disc_base); + + memcpy_fromio(disc_tbl, disc_base, sizeof(*disc_tbl)); + + devm_iounmap(priv->dev, disc_base); + + if (priv->mask & BIT(disc_tbl->id)) + return dev_err_probe(priv->dev, -EINVAL, "Duplicate feature: %s\n", + pmt_feature_names[disc_tbl->id]); + + /* + * Some devices may expose non-functioning entries that are + * reserved for future use. They have zero size. Do not fail + * probe for these. Just ignore them. + */ + if (disc_tbl->size == 0 || disc_tbl->access_type == PMT_ACCESS_TYPE_RSVD) + return SKIP_FEATURE; + + if (disc_tbl->version > MAX_FEATURE_VERSION) + return SKIP_FEATURE; + + if (!pmt_feature_id_is_valid(disc_tbl->id)) + return SKIP_FEATURE; + + priv->mask |= BIT(disc_tbl->id); + + return 0; +} + +static int +pmt_feature_get_feature_table(struct pmt_features_priv *priv, + struct feature *feature, + struct feature_discovery_table *disc_tbl, + struct resource *disc_res) +{ + struct feature_table *feat_tbl = &feature->table; + struct feature_header *header; + struct resource res = {}; + resource_size_t res_size; + void __iomem *feat_base, *feat_offset; + void *tbl_offset; + size_t size; + u32 *guids; + u8 tbir; + + tbir = FIELD_GET(DT_TBIR, disc_tbl->offset); + + switch (disc_tbl->access_type) { + case ACCESS_LOCAL: + if (tbir) + return dev_err_probe(priv->dev, -EINVAL, + "Unsupported BAR index %u for access type %u\n", + tbir, disc_tbl->access_type); + + + /* + * For access_type LOCAL, the base address is as follows: + * base address = end of discovery region + base offset + 1 + */ + res = DEFINE_RES_MEM(disc_res->end + disc_tbl->offset + 1, + disc_tbl->size * sizeof(u32)); + break; + + default: + return dev_err_probe(priv->dev, -EINVAL, "Unrecognized access_type %u\n", + disc_tbl->access_type); + } + + feature->id = disc_tbl->id; + + /* Get the feature table */ + feat_base = devm_ioremap_resource(priv->dev, &res); + if (IS_ERR(feat_base)) + return PTR_ERR(feat_base); + + feat_offset = feat_base; + tbl_offset = feat_tbl; + + /* Get the header */ + header = &feat_tbl->header; + memcpy_fromio(header, feat_offset, sizeof(*header)); + + /* Validate fields fit within mapped resource */ + size = sizeof(*header) + FEAT_ATTR_SIZE(header->attr_size) + + PMT_GUID_SIZE(header->num_guids); + res_size = resource_size(&res); + if (WARN(size > res_size, "Bad table size %zu > %pa", size, &res_size)) + return -EINVAL; + + /* Get the feature attributes, including capability fields */ + tbl_offset += sizeof(*header); + feat_offset += sizeof(*header); + + memcpy_fromio(tbl_offset, feat_offset, FEAT_ATTR_SIZE(header->attr_size)); + + /* Finally, get the guids */ + guids = devm_kmalloc(priv->dev, PMT_GUID_SIZE(header->num_guids), GFP_KERNEL); + if (!guids) + return -ENOMEM; + + feat_offset += FEAT_ATTR_SIZE(header->attr_size); + + memcpy_fromio(guids, feat_offset, PMT_GUID_SIZE(header->num_guids)); + + feat_tbl->guids = guids; + + devm_iounmap(priv->dev, feat_base); + + return 0; +} + +static void pmt_features_add_feat(struct feature *feature) +{ + guard(mutex)(&feature_list_lock); + list_add(&feature->list, &pmt_feature_list); +} + +static void pmt_features_remove_feat(struct feature *feature) +{ + guard(mutex)(&feature_list_lock); + list_del(&feature->list); +} + +/* Get the discovery table and use it to get the feature table */ +static int pmt_features_discovery(struct pmt_features_priv *priv, + struct feature *feature, + struct intel_vsec_device *ivdev, + int idx) +{ + struct feature_discovery_table disc_tbl = {}; /* Avoid false warning */ + struct resource *disc_res = &ivdev->resource[idx]; + const struct kobj_type *ktype; + int ret; + + ret = pmt_feature_get_disc_table(priv, disc_res, &disc_tbl); + if (ret) + return ret; + + ret = pmt_feature_get_feature_table(priv, feature, &disc_tbl, disc_res); + if (ret) + return ret; + + switch (feature_layout[feature->id]) { + case LAYOUT_RMID: + ktype = &pmt_feature_rmid_ktype; + feature->attr_group = &pmt_feature_rmid_group; + break; + case LAYOUT_WATCHER: + ktype = &pmt_feature_watcher_ktype; + feature->attr_group = &pmt_feature_watcher_group; + break; + case LAYOUT_COMMAND: + ktype = &pmt_feature_command_ktype; + feature->attr_group = &pmt_feature_command_group; + break; + case LAYOUT_CAPS_ONLY: + ktype = &pmt_feature_guids_ktype; + feature->attr_group = &pmt_feature_guids_group; + break; + default: + return -EINVAL; + } + + ret = kobject_init_and_add(&feature->kobj, ktype, &priv->dev->kobj, + "%s", pmt_feature_names[feature->id]); + if (ret) + return ret; + + kobject_uevent(&feature->kobj, KOBJ_ADD); + pmt_features_add_feat(feature); + + return 0; +} + +static void pmt_features_remove(struct auxiliary_device *auxdev) +{ + struct pmt_features_priv *priv = auxiliary_get_drvdata(auxdev); + int i; + + for (i = 0; i < priv->count; i++) { + struct feature *feature = &priv->feature[i]; + + pmt_features_remove_feat(feature); + sysfs_remove_group(&feature->kobj, feature->attr_group); + kobject_put(&feature->kobj); + } + + device_unregister(priv->dev); +} + +static int pmt_features_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) +{ + struct intel_vsec_device *ivdev = auxdev_to_ivdev(auxdev); + struct pmt_features_priv *priv; + size_t size; + int ret, i; + + size = struct_size(priv, feature, ivdev->num_resources); + priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->parent = &ivdev->pcidev->dev; + auxiliary_set_drvdata(auxdev, priv); + + priv->dev = device_create(&intel_pmt_class, &auxdev->dev, MKDEV(0, 0), priv, + "%s-%s", "features", dev_name(priv->parent)); + if (IS_ERR(priv->dev)) + return dev_err_probe(priv->dev, PTR_ERR(priv->dev), + "Could not create %s-%s device node\n", + "features", dev_name(priv->dev)); + + /* Initialize each feature */ + for (i = 0; i < ivdev->num_resources; i++) { + struct feature *feature = &priv->feature[priv->count]; + + ret = pmt_features_discovery(priv, feature, ivdev, i); + if (ret == SKIP_FEATURE) + continue; + if (ret != 0) + goto abort_probe; + + feature->priv = priv; + priv->count++; + } + + return 0; + +abort_probe: + /* + * Only fully initialized features are tracked in priv->count, which is + * incremented only after a feature is completely set up (i.e., after + * discovery and sysfs registration). If feature initialization fails, + * the failing feature's state is local and does not require rollback. + * + * Therefore, on error, we can safely call the driver's remove() routine + * pmt_features_remove() to clean up only those features that were + * fully initialized and counted. All other resources are device-managed + * and will be cleaned up automatically during device_unregister(). + */ + pmt_features_remove(auxdev); + + return ret; +} + +static void pmt_get_features(struct intel_pmt_entry *entry, struct feature *f) +{ + int num_guids = f->table.header.num_guids; + int i; + + for (i = 0; i < num_guids; i++) { + if (f->table.guids[i] != entry->guid) + continue; + + entry->feature_flags |= BIT(f->id); + + if (feature_layout[f->id] == LAYOUT_RMID) + entry->num_rmids = f->table.rmid.num_rmids; + else + entry->num_rmids = 0; /* entry is kzalloc but set anyway */ + } +} + +void intel_pmt_get_features(struct intel_pmt_entry *entry) +{ + struct feature *feature; + + mutex_lock(&feature_list_lock); + list_for_each_entry(feature, &pmt_feature_list, list) { + if (feature->priv->parent != &entry->ep->pcidev->dev) + continue; + + pmt_get_features(entry, feature); + } + mutex_unlock(&feature_list_lock); +} +EXPORT_SYMBOL_NS_GPL(intel_pmt_get_features, "INTEL_PMT"); + +static const struct auxiliary_device_id pmt_features_id_table[] = { + { .name = "intel_vsec.discovery" }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, pmt_features_id_table); + +static struct auxiliary_driver pmt_features_aux_driver = { + .id_table = pmt_features_id_table, + .remove = pmt_features_remove, + .probe = pmt_features_probe, +}; +module_auxiliary_driver(pmt_features_aux_driver); + +MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); +MODULE_DESCRIPTION("Intel PMT Discovery driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("INTEL_PMT"); diff --git a/drivers/platform/x86/intel/pmt/features.c b/drivers/platform/x86/intel/pmt/features.c new file mode 100644 index 000000000000..8a39cddc75c8 --- /dev/null +++ b/drivers/platform/x86/intel/pmt/features.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2025, Intel Corporation. + * All Rights Reserved. + * + * Author: "David E. Box" <david.e.box@linux.intel.com> + */ + +#include <linux/export.h> +#include <linux/types.h> + +#include <linux/intel_pmt_features.h> + +const char * const pmt_feature_names[] = { + [FEATURE_PER_CORE_PERF_TELEM] = "per_core_performance_telemetry", + [FEATURE_PER_CORE_ENV_TELEM] = "per_core_environment_telemetry", + [FEATURE_PER_RMID_PERF_TELEM] = "per_rmid_perf_telemetry", + [FEATURE_ACCEL_TELEM] = "accelerator_telemetry", + [FEATURE_UNCORE_TELEM] = "uncore_telemetry", + [FEATURE_CRASH_LOG] = "crash_log", + [FEATURE_PETE_LOG] = "pete_log", + [FEATURE_TPMI_CTRL] = "tpmi_control", + [FEATURE_TRACING] = "tracing", + [FEATURE_PER_RMID_ENERGY_TELEM] = "per_rmid_energy_telemetry", +}; +EXPORT_SYMBOL_NS_GPL(pmt_feature_names, "INTEL_PMT_DISCOVERY"); + +enum feature_layout feature_layout[] = { + [FEATURE_PER_CORE_PERF_TELEM] = LAYOUT_WATCHER, + [FEATURE_PER_CORE_ENV_TELEM] = LAYOUT_WATCHER, + [FEATURE_PER_RMID_PERF_TELEM] = LAYOUT_RMID, + [FEATURE_ACCEL_TELEM] = LAYOUT_WATCHER, + [FEATURE_UNCORE_TELEM] = LAYOUT_WATCHER, + [FEATURE_CRASH_LOG] = LAYOUT_COMMAND, + [FEATURE_PETE_LOG] = LAYOUT_COMMAND, + [FEATURE_TPMI_CTRL] = LAYOUT_CAPS_ONLY, + [FEATURE_TRACING] = LAYOUT_CAPS_ONLY, + [FEATURE_PER_RMID_ENERGY_TELEM] = LAYOUT_RMID, +}; + +struct pmt_cap pmt_cap_common[] = { + {PMT_CAP_TELEM, "telemetry"}, + {PMT_CAP_WATCHER, "watcher"}, + {PMT_CAP_CRASHLOG, "crashlog"}, + {PMT_CAP_STREAMING, "streaming"}, + {PMT_CAP_THRESHOLD, "threshold"}, + {PMT_CAP_WINDOW, "window"}, + {PMT_CAP_CONFIG, "config"}, + {PMT_CAP_TRACING, "tracing"}, + {PMT_CAP_INBAND, "inband"}, + {PMT_CAP_OOB, "oob"}, + {PMT_CAP_SECURED_CHAN, "secure_chan"}, + {PMT_CAP_PMT_SP, "pmt_sp"}, + {PMT_CAP_PMT_SP_POLICY, "pmt_sp_policy"}, + {} +}; + +struct pmt_cap pmt_cap_pcpt[] = { + {PMT_CAP_PCPT_CORE_PERF, "core_performance"}, + {PMT_CAP_PCPT_CORE_C0_RES, "core_c0_residency"}, + {PMT_CAP_PCPT_CORE_ACTIVITY, "core_activity"}, + {PMT_CAP_PCPT_CACHE_PERF, "cache_performance"}, + {PMT_CAP_PCPT_QUALITY_TELEM, "quality_telemetry"}, + {} +}; + +struct pmt_cap *pmt_caps_pcpt[] = { + pmt_cap_common, + pmt_cap_pcpt, + NULL +}; + +struct pmt_cap pmt_cap_pcet[] = { + {PMT_CAP_PCET_WORKPOINT_HIST, "workpoint_histogram"}, + {PMT_CAP_PCET_CORE_CURR_TEMP, "core_current_temp"}, + {PMT_CAP_PCET_CORE_INST_RES, "core_inst_residency"}, + {PMT_CAP_PCET_QUALITY_TELEM, "quality_telemetry"}, + {PMT_CAP_PCET_CORE_CDYN_LVL, "core_cdyn_level"}, + {PMT_CAP_PCET_CORE_STRESS_LVL, "core_stress_level"}, + {PMT_CAP_PCET_CORE_DAS, "core_digital_aging_sensor"}, + {PMT_CAP_PCET_FIVR_HEALTH, "fivr_health"}, + {PMT_CAP_PCET_ENERGY, "energy"}, + {PMT_CAP_PCET_PEM_STATUS, "pem_status"}, + {PMT_CAP_PCET_CORE_C_STATE, "core_c_state"}, + {} +}; + +struct pmt_cap *pmt_caps_pcet[] = { + pmt_cap_common, + pmt_cap_pcet, + NULL +}; + +struct pmt_cap pmt_cap_rmid_perf[] = { + {PMT_CAP_RMID_CORES_PERF, "core_performance"}, + {PMT_CAP_RMID_CACHE_PERF, "cache_performance"}, + {PMT_CAP_RMID_PERF_QUAL, "performance_quality"}, + {} +}; + +struct pmt_cap *pmt_caps_rmid_perf[] = { + pmt_cap_common, + pmt_cap_rmid_perf, + NULL +}; + +struct pmt_cap pmt_cap_accel[] = { + {PMT_CAP_ACCEL_CPM_TELEM, "content_processing_module"}, + {PMT_CAP_ACCEL_TIP_TELEM, "content_turbo_ip"}, + {} +}; + +struct pmt_cap *pmt_caps_accel[] = { + pmt_cap_common, + pmt_cap_accel, + NULL +}; + +struct pmt_cap pmt_cap_uncore[] = { + {PMT_CAP_UNCORE_IO_CA_TELEM, "io_ca"}, + {PMT_CAP_UNCORE_RMID_TELEM, "rmid"}, + {PMT_CAP_UNCORE_D2D_ULA_TELEM, "d2d_ula"}, + {PMT_CAP_UNCORE_PKGC_TELEM, "package_c"}, + {} +}; + +struct pmt_cap *pmt_caps_uncore[] = { + pmt_cap_common, + pmt_cap_uncore, + NULL +}; + +struct pmt_cap pmt_cap_crashlog[] = { + {PMT_CAP_CRASHLOG_MAN_TRIG, "manual_trigger"}, + {PMT_CAP_CRASHLOG_CORE, "core"}, + {PMT_CAP_CRASHLOG_UNCORE, "uncore"}, + {PMT_CAP_CRASHLOG_TOR, "tor"}, + {PMT_CAP_CRASHLOG_S3M, "s3m"}, + {PMT_CAP_CRASHLOG_PERSISTENCY, "persistency"}, + {PMT_CAP_CRASHLOG_CLIP_GPIO, "crashlog_in_progress"}, + {PMT_CAP_CRASHLOG_PRE_RESET, "pre_reset_extraction"}, + {PMT_CAP_CRASHLOG_POST_RESET, "post_reset_extraction"}, + {} +}; + +struct pmt_cap *pmt_caps_crashlog[] = { + pmt_cap_common, + pmt_cap_crashlog, + NULL +}; + +struct pmt_cap pmt_cap_pete[] = { + {PMT_CAP_PETE_MAN_TRIG, "manual_trigger"}, + {PMT_CAP_PETE_ENCRYPTION, "encryption"}, + {PMT_CAP_PETE_PERSISTENCY, "persistency"}, + {PMT_CAP_PETE_REQ_TOKENS, "required_tokens"}, + {PMT_CAP_PETE_PROD_ENABLED, "production_enabled"}, + {PMT_CAP_PETE_DEBUG_ENABLED, "debug_enabled"}, + {} +}; + +struct pmt_cap *pmt_caps_pete[] = { + pmt_cap_common, + pmt_cap_pete, + NULL +}; + +struct pmt_cap pmt_cap_tpmi[] = { + {PMT_CAP_TPMI_MAILBOX, "mailbox"}, + {PMT_CAP_TPMI_LOCK, "bios_lock"}, + {} +}; + +struct pmt_cap *pmt_caps_tpmi[] = { + pmt_cap_common, + pmt_cap_tpmi, + NULL +}; + +struct pmt_cap pmt_cap_tracing[] = { + {PMT_CAP_TRACE_SRAR, "srar_errors"}, + {PMT_CAP_TRACE_CORRECTABLE, "correctable_errors"}, + {PMT_CAP_TRACE_MCTP, "mctp"}, + {PMT_CAP_TRACE_MRT, "memory_resiliency"}, + {} +}; + +struct pmt_cap *pmt_caps_tracing[] = { + pmt_cap_common, + pmt_cap_tracing, + NULL +}; + +struct pmt_cap pmt_cap_rmid_energy[] = { + {PMT_CAP_RMID_ENERGY, "energy"}, + {PMT_CAP_RMID_ACTIVITY, "activity"}, + {PMT_CAP_RMID_ENERGY_QUAL, "energy_quality"}, + {} +}; + +struct pmt_cap *pmt_caps_rmid_energy[] = { + pmt_cap_common, + pmt_cap_rmid_energy, + NULL +}; diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c index ac3a9bdf5601..a4dfca6cac19 100644 --- a/drivers/platform/x86/intel/pmt/telemetry.c +++ b/drivers/platform/x86/intel/pmt/telemetry.c @@ -9,13 +9,21 @@ */ #include <linux/auxiliary_bus.h> +#include <linux/bitops.h> +#include <linux/cleanup.h> +#include <linux/err.h> +#include <linux/intel_pmt_features.h> #include <linux/intel_vsec.h> #include <linux/kernel.h> +#include <linux/kref.h> #include <linux/module.h> +#include <linux/mutex.h> +#include <linux/overflow.h> #include <linux/pci.h> #include <linux/slab.h> +#include <linux/types.h> #include <linux/uaccess.h> -#include <linux/overflow.h> +#include <linux/xarray.h> #include "class.h" @@ -206,6 +214,87 @@ unlock: } EXPORT_SYMBOL_NS_GPL(pmt_telem_get_endpoint_info, "INTEL_PMT_TELEMETRY"); +static int pmt_copy_region(struct telemetry_region *region, + struct intel_pmt_entry *entry) +{ + + struct oobmsm_plat_info *plat_info; + + plat_info = intel_vsec_get_mapping(entry->ep->pcidev); + if (IS_ERR(plat_info)) + return PTR_ERR(plat_info); + + region->plat_info = *plat_info; + region->guid = entry->guid; + region->addr = entry->ep->base; + region->size = entry->size; + region->num_rmids = entry->num_rmids; + + return 0; +} + +static void pmt_feature_group_release(struct kref *kref) +{ + struct pmt_feature_group *feature_group; + + feature_group = container_of(kref, struct pmt_feature_group, kref); + kfree(feature_group); +} + +struct pmt_feature_group *intel_pmt_get_regions_by_feature(enum pmt_feature_id id) +{ + struct pmt_feature_group *feature_group __free(kfree) = NULL; + struct telemetry_region *region; + struct intel_pmt_entry *entry; + unsigned long idx; + int count = 0; + size_t size; + + if (!pmt_feature_id_is_valid(id)) + return ERR_PTR(-EINVAL); + + guard(mutex)(&ep_lock); + xa_for_each(&telem_array, idx, entry) { + if (entry->feature_flags & BIT(id)) + count++; + } + + if (!count) + return ERR_PTR(-ENOENT); + + size = struct_size(feature_group, regions, count); + feature_group = kzalloc(size, GFP_KERNEL); + if (!feature_group) + return ERR_PTR(-ENOMEM); + + feature_group->count = count; + + region = feature_group->regions; + xa_for_each(&telem_array, idx, entry) { + int ret; + + if (!(entry->feature_flags & BIT(id))) + continue; + + ret = pmt_copy_region(region, entry); + if (ret) + return ERR_PTR(ret); + + region++; + } + + kref_init(&feature_group->kref); + + return no_free_ptr(feature_group); +} +EXPORT_SYMBOL(intel_pmt_get_regions_by_feature); + +void intel_pmt_put_feature_group(struct pmt_feature_group *feature_group) +{ + kref_put(&feature_group->kref, pmt_feature_group_release); +} +EXPORT_SYMBOL(intel_pmt_put_feature_group); + int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count) { u32 offset, size; @@ -311,6 +400,8 @@ static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxilia continue; priv->num_entries++; + + intel_pmt_get_features(entry); } return 0; @@ -348,3 +439,4 @@ MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); MODULE_DESCRIPTION("Intel PMT Telemetry driver"); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS("INTEL_PMT"); +MODULE_IMPORT_NS("INTEL_VSEC"); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 18c035710eb9..34bff2f65a83 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -22,6 +22,7 @@ #include <linux/auxiliary_bus.h> #include <linux/delay.h> #include <linux/intel_tpmi.h> +#include <linux/intel_vsec.h> #include <linux/fs.h> #include <linux/io.h> #include <linux/kernel.h> @@ -1546,7 +1547,7 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) { struct tpmi_per_power_domain_info *pd_info; bool read_blocked = 0, write_blocked = 0; - struct intel_tpmi_plat_info *plat_info; + struct oobmsm_plat_info *plat_info; struct device *dev = &auxdev->dev; struct tpmi_sst_struct *tpmi_sst; u8 i, num_resources, io_die_cnt; @@ -1698,7 +1699,7 @@ EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, "INTEL_TPMI_SST"); void tpmi_sst_dev_remove(struct auxiliary_device *auxdev) { struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); - struct intel_tpmi_plat_info *plat_info; + struct oobmsm_plat_info *plat_info; plat_info = tpmi_get_platform_data(auxdev); if (!plat_info) @@ -1720,7 +1721,7 @@ void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev) { struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); struct tpmi_per_power_domain_info *power_domain_info; - struct intel_tpmi_plat_info *plat_info; + struct oobmsm_plat_info *plat_info; void __iomem *cp_base; plat_info = tpmi_get_platform_data(auxdev); @@ -1748,7 +1749,7 @@ void tpmi_sst_dev_resume(struct auxiliary_device *auxdev) { struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); struct tpmi_per_power_domain_info *power_domain_info; - struct intel_tpmi_plat_info *plat_info; + struct oobmsm_plat_info *plat_info; void __iomem *cp_base; plat_info = tpmi_get_platform_data(auxdev); diff --git a/drivers/platform/x86/intel/telemetry/core.c b/drivers/platform/x86/intel/telemetry/core.c index e4be40f73eeb..f312864b8d07 100644 --- a/drivers/platform/x86/intel/telemetry/core.c +++ b/drivers/platform/x86/intel/telemetry/core.c @@ -21,33 +21,6 @@ struct telemetry_core_config { static struct telemetry_core_config telm_core_conf; -static int telemetry_def_update_events(struct telemetry_evtconfig pss_evtconfig, - struct telemetry_evtconfig ioss_evtconfig) -{ - return 0; -} - -static int telemetry_def_set_sampling_period(u8 pss_period, u8 ioss_period) -{ - return 0; -} - -static int telemetry_def_get_sampling_period(u8 *pss_min_period, - u8 *pss_max_period, - u8 *ioss_min_period, - u8 *ioss_max_period) -{ - return 0; -} - -static int telemetry_def_get_eventconfig( - struct telemetry_evtconfig *pss_evtconfig, - struct telemetry_evtconfig *ioss_evtconfig, - int pss_len, int ioss_len) -{ - return 0; -} - static int telemetry_def_get_trace_verbosity(enum telemetry_unit telem_unit, u32 *verbosity) { @@ -75,145 +48,14 @@ static int telemetry_def_read_eventlog(enum telemetry_unit telem_unit, return 0; } -static int telemetry_def_add_events(u8 num_pss_evts, u8 num_ioss_evts, - u32 *pss_evtmap, u32 *ioss_evtmap) -{ - return 0; -} - -static int telemetry_def_reset_events(void) -{ - return 0; -} - static const struct telemetry_core_ops telm_defpltops = { - .set_sampling_period = telemetry_def_set_sampling_period, - .get_sampling_period = telemetry_def_get_sampling_period, .get_trace_verbosity = telemetry_def_get_trace_verbosity, .set_trace_verbosity = telemetry_def_set_trace_verbosity, .raw_read_eventlog = telemetry_def_raw_read_eventlog, - .get_eventconfig = telemetry_def_get_eventconfig, .read_eventlog = telemetry_def_read_eventlog, - .update_events = telemetry_def_update_events, - .reset_events = telemetry_def_reset_events, - .add_events = telemetry_def_add_events, }; /** - * telemetry_update_events() - Update telemetry Configuration - * @pss_evtconfig: PSS related config. No change if num_evts = 0. - * @ioss_evtconfig: IOSS related config. No change if num_evts = 0. - * - * This API updates the IOSS & PSS Telemetry configuration. Old config - * is overwritten. Call telemetry_reset_events when logging is over - * All sample period values should be in the form of: - * bits[6:3] -> value; bits [0:2]-> Exponent; Period = (Value *16^Exponent) - * - * Return: 0 success, < 0 for failure - */ -int telemetry_update_events(struct telemetry_evtconfig pss_evtconfig, - struct telemetry_evtconfig ioss_evtconfig) -{ - return telm_core_conf.telem_ops->update_events(pss_evtconfig, - ioss_evtconfig); -} -EXPORT_SYMBOL_GPL(telemetry_update_events); - - -/** - * telemetry_set_sampling_period() - Sets the IOSS & PSS sampling period - * @pss_period: placeholder for PSS Period to be set. - * Set to 0 if not required to be updated - * @ioss_period: placeholder for IOSS Period to be set - * Set to 0 if not required to be updated - * - * All values should be in the form of: - * bits[6:3] -> value; bits [0:2]-> Exponent; Period = (Value *16^Exponent) - * - * Return: 0 success, < 0 for failure - */ -int telemetry_set_sampling_period(u8 pss_period, u8 ioss_period) -{ - return telm_core_conf.telem_ops->set_sampling_period(pss_period, - ioss_period); -} -EXPORT_SYMBOL_GPL(telemetry_set_sampling_period); - -/** - * telemetry_get_sampling_period() - Get IOSS & PSS min & max sampling period - * @pss_min_period: placeholder for PSS Min Period supported - * @pss_max_period: placeholder for PSS Max Period supported - * @ioss_min_period: placeholder for IOSS Min Period supported - * @ioss_max_period: placeholder for IOSS Max Period supported - * - * All values should be in the form of: - * bits[6:3] -> value; bits [0:2]-> Exponent; Period = (Value *16^Exponent) - * - * Return: 0 success, < 0 for failure - */ -int telemetry_get_sampling_period(u8 *pss_min_period, u8 *pss_max_period, - u8 *ioss_min_period, u8 *ioss_max_period) -{ - return telm_core_conf.telem_ops->get_sampling_period(pss_min_period, - pss_max_period, - ioss_min_period, - ioss_max_period); -} -EXPORT_SYMBOL_GPL(telemetry_get_sampling_period); - - -/** - * telemetry_reset_events() - Restore the IOSS & PSS configuration to default - * - * Return: 0 success, < 0 for failure - */ -int telemetry_reset_events(void) -{ - return telm_core_conf.telem_ops->reset_events(); -} -EXPORT_SYMBOL_GPL(telemetry_reset_events); - -/** - * telemetry_get_eventconfig() - Returns the pss and ioss events enabled - * @pss_evtconfig: Pointer to PSS related configuration. - * @ioss_evtconfig: Pointer to IOSS related configuration. - * @pss_len: Number of u32 elements allocated for pss_evtconfig array - * @ioss_len: Number of u32 elements allocated for ioss_evtconfig array - * - * Return: 0 success, < 0 for failure - */ -int telemetry_get_eventconfig(struct telemetry_evtconfig *pss_evtconfig, - struct telemetry_evtconfig *ioss_evtconfig, - int pss_len, int ioss_len) -{ - return telm_core_conf.telem_ops->get_eventconfig(pss_evtconfig, - ioss_evtconfig, - pss_len, ioss_len); -} -EXPORT_SYMBOL_GPL(telemetry_get_eventconfig); - -/** - * telemetry_add_events() - Add IOSS & PSS configuration to existing settings. - * @num_pss_evts: Number of PSS Events (<29) in pss_evtmap. Can be 0. - * @num_ioss_evts: Number of IOSS Events (<29) in ioss_evtmap. Can be 0. - * @pss_evtmap: Array of PSS Event-IDs to Enable - * @ioss_evtmap: Array of PSS Event-IDs to Enable - * - * Events are appended to Old Configuration. In case of total events > 28, it - * returns error. Call telemetry_reset_events to reset after eventlog done - * - * Return: 0 success, < 0 for failure - */ -int telemetry_add_events(u8 num_pss_evts, u8 num_ioss_evts, - u32 *pss_evtmap, u32 *ioss_evtmap) -{ - return telm_core_conf.telem_ops->add_events(num_pss_evts, - num_ioss_evts, pss_evtmap, - ioss_evtmap); -} -EXPORT_SYMBOL_GPL(telemetry_add_events); - -/** * telemetry_read_events() - Fetches samples as specified by evtlog.telem_evt_id * @telem_unit: Specify whether IOSS or PSS Read * @evtlog: Array of telemetry_evtlog structs to fill data @@ -231,25 +73,6 @@ int telemetry_read_events(enum telemetry_unit telem_unit, EXPORT_SYMBOL_GPL(telemetry_read_events); /** - * telemetry_raw_read_events() - Fetch samples specified by evtlog.telem_evt_id - * @telem_unit: Specify whether IOSS or PSS Read - * @evtlog: Array of telemetry_evtlog structs to fill data - * evtlog.telem_evt_id specifies the ids to read - * @len: Length of array of evtlog - * - * The caller must take care of locking in this case. - * - * Return: number of eventlogs read for success, < 0 for failure - */ -int telemetry_raw_read_events(enum telemetry_unit telem_unit, - struct telemetry_evtlog *evtlog, int len) -{ - return telm_core_conf.telem_ops->raw_read_eventlog(telem_unit, evtlog, - len, 0); -} -EXPORT_SYMBOL_GPL(telemetry_raw_read_events); - -/** * telemetry_read_eventlog() - Fetch the Telemetry log from PSS or IOSS * @telem_unit: Specify whether IOSS or PSS Read * @evtlog: Array of telemetry_evtlog structs to fill data diff --git a/drivers/platform/x86/intel/telemetry/pltdrv.c b/drivers/platform/x86/intel/telemetry/pltdrv.c index 9a499efa1e4d..f23c170a55dc 100644 --- a/drivers/platform/x86/intel/telemetry/pltdrv.c +++ b/drivers/platform/x86/intel/telemetry/pltdrv.c @@ -639,231 +639,6 @@ static int telemetry_setup(struct platform_device *pdev) return 0; } -static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig, - struct telemetry_evtconfig ioss_evtconfig) -{ - int ret; - - if ((pss_evtconfig.num_evts > 0) && - (TELEM_SAMPLE_PERIOD_INVALID(pss_evtconfig.period))) { - pr_err("PSS Sampling Period Out of Range\n"); - return -EINVAL; - } - - if ((ioss_evtconfig.num_evts > 0) && - (TELEM_SAMPLE_PERIOD_INVALID(ioss_evtconfig.period))) { - pr_err("IOSS Sampling Period Out of Range\n"); - return -EINVAL; - } - - ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig, - TELEM_UPDATE); - if (ret) - pr_err("TELEMETRY Config Failed\n"); - - return ret; -} - - -static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period) -{ - u32 telem_ctrl = 0; - int ret = 0; - - mutex_lock(&(telm_conf->telem_lock)); - if (ioss_period) { - struct intel_scu_ipc_dev *scu = telm_conf->scu; - - if (TELEM_SAMPLE_PERIOD_INVALID(ioss_period)) { - pr_err("IOSS Sampling Period Out of Range\n"); - ret = -EINVAL; - goto out; - } - - /* Get telemetry EVENT CTL */ - ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM, - IOSS_TELEM_EVENT_CTL_READ, NULL, 0, - &telem_ctrl, sizeof(telem_ctrl)); - if (ret) { - pr_err("IOSS TELEM_CTRL Read Failed\n"); - goto out; - } - - /* Disable Telemetry */ - TELEM_DISABLE(telem_ctrl); - - ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM, - IOSS_TELEM_EVENT_CTL_WRITE, - &telem_ctrl, sizeof(telem_ctrl), - NULL, 0); - if (ret) { - pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n"); - goto out; - } - - /* Enable Periodic Telemetry Events and enable SRAM trace */ - TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl); - TELEM_ENABLE_SRAM_EVT_TRACE(telem_ctrl); - TELEM_ENABLE_PERIODIC(telem_ctrl); - telem_ctrl |= ioss_period; - - ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM, - IOSS_TELEM_EVENT_CTL_WRITE, - &telem_ctrl, sizeof(telem_ctrl), - NULL, 0); - if (ret) { - pr_err("IOSS TELEM_CTRL Event Enable Write Failed\n"); - goto out; - } - telm_conf->ioss_config.curr_period = ioss_period; - } - - if (pss_period) { - if (TELEM_SAMPLE_PERIOD_INVALID(pss_period)) { - pr_err("PSS Sampling Period Out of Range\n"); - ret = -EINVAL; - goto out; - } - - /* Get telemetry EVENT CTL */ - ret = intel_punit_ipc_command( - IPC_PUNIT_BIOS_READ_TELE_EVENT_CTRL, - 0, 0, NULL, &telem_ctrl); - if (ret) { - pr_err("PSS TELEM_CTRL Read Failed\n"); - goto out; - } - - /* Disable Telemetry */ - TELEM_DISABLE(telem_ctrl); - ret = intel_punit_ipc_command( - IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL, - 0, 0, &telem_ctrl, NULL); - if (ret) { - pr_err("PSS TELEM_CTRL Event Disable Write Failed\n"); - goto out; - } - - /* Enable Periodic Telemetry Events and enable SRAM trace */ - TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl); - TELEM_ENABLE_SRAM_EVT_TRACE(telem_ctrl); - TELEM_ENABLE_PERIODIC(telem_ctrl); - telem_ctrl |= pss_period; - - ret = intel_punit_ipc_command( - IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL, - 0, 0, &telem_ctrl, NULL); - if (ret) { - pr_err("PSS TELEM_CTRL Event Enable Write Failed\n"); - goto out; - } - telm_conf->pss_config.curr_period = pss_period; - } - -out: - mutex_unlock(&(telm_conf->telem_lock)); - return ret; -} - - -static int telemetry_plt_get_sampling_period(u8 *pss_min_period, - u8 *pss_max_period, - u8 *ioss_min_period, - u8 *ioss_max_period) -{ - *pss_min_period = telm_conf->pss_config.min_period; - *pss_max_period = telm_conf->pss_config.max_period; - *ioss_min_period = telm_conf->ioss_config.min_period; - *ioss_max_period = telm_conf->ioss_config.max_period; - - return 0; -} - - -static int telemetry_plt_reset_events(void) -{ - struct telemetry_evtconfig pss_evtconfig, ioss_evtconfig; - int ret; - - pss_evtconfig.evtmap = NULL; - pss_evtconfig.num_evts = TELEM_MAX_OS_ALLOCATED_EVENTS; - pss_evtconfig.period = TELEM_SAMPLING_DEFAULT_PERIOD; - - ioss_evtconfig.evtmap = NULL; - ioss_evtconfig.num_evts = TELEM_MAX_OS_ALLOCATED_EVENTS; - ioss_evtconfig.period = TELEM_SAMPLING_DEFAULT_PERIOD; - - ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig, - TELEM_RESET); - if (ret) - pr_err("TELEMETRY Reset Failed\n"); - - return ret; -} - - -static int telemetry_plt_get_eventconfig(struct telemetry_evtconfig *pss_config, - struct telemetry_evtconfig *ioss_config, - int pss_len, int ioss_len) -{ - u32 *pss_evtmap, *ioss_evtmap; - u32 index; - - pss_evtmap = pss_config->evtmap; - ioss_evtmap = ioss_config->evtmap; - - mutex_lock(&(telm_conf->telem_lock)); - pss_config->num_evts = telm_conf->pss_config.ssram_evts_used; - ioss_config->num_evts = telm_conf->ioss_config.ssram_evts_used; - - pss_config->period = telm_conf->pss_config.curr_period; - ioss_config->period = telm_conf->ioss_config.curr_period; - - if ((pss_len < telm_conf->pss_config.ssram_evts_used) || - (ioss_len < telm_conf->ioss_config.ssram_evts_used)) { - mutex_unlock(&(telm_conf->telem_lock)); - return -EINVAL; - } - - for (index = 0; index < telm_conf->pss_config.ssram_evts_used; - index++) { - pss_evtmap[index] = - telm_conf->pss_config.telem_evts[index].evt_id; - } - - for (index = 0; index < telm_conf->ioss_config.ssram_evts_used; - index++) { - ioss_evtmap[index] = - telm_conf->ioss_config.telem_evts[index].evt_id; - } - - mutex_unlock(&(telm_conf->telem_lock)); - return 0; -} - - -static int telemetry_plt_add_events(u8 num_pss_evts, u8 num_ioss_evts, - u32 *pss_evtmap, u32 *ioss_evtmap) -{ - struct telemetry_evtconfig pss_evtconfig, ioss_evtconfig; - int ret; - - pss_evtconfig.evtmap = pss_evtmap; - pss_evtconfig.num_evts = num_pss_evts; - pss_evtconfig.period = telm_conf->pss_config.curr_period; - - ioss_evtconfig.evtmap = ioss_evtmap; - ioss_evtconfig.num_evts = num_ioss_evts; - ioss_evtconfig.period = telm_conf->ioss_config.curr_period; - - ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig, - TELEM_ADD); - if (ret) - pr_err("TELEMETRY ADD Failed\n"); - - return ret; -} - static int telem_evtlog_read(enum telemetry_unit telem_unit, struct telem_ssram_region *ssram_region, u8 len) { @@ -1093,14 +868,8 @@ out: static const struct telemetry_core_ops telm_pltops = { .get_trace_verbosity = telemetry_plt_get_trace_verbosity, .set_trace_verbosity = telemetry_plt_set_trace_verbosity, - .set_sampling_period = telemetry_plt_set_sampling_period, - .get_sampling_period = telemetry_plt_get_sampling_period, .raw_read_eventlog = telemetry_plt_raw_read_eventlog, - .get_eventconfig = telemetry_plt_get_eventconfig, - .update_events = telemetry_plt_update_events, .read_eventlog = telemetry_plt_read_eventlog, - .reset_events = telemetry_plt_reset_events, - .add_events = telemetry_plt_add_events, }; static int telemetry_pltdrv_probe(struct platform_device *pdev) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index 44d9948ed224..6df55c8e16b7 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -22,9 +22,10 @@ #include <linux/auxiliary_bus.h> #include <linux/bitfield.h> #include <linux/bits.h> +#include <linux/intel_tpmi.h> +#include <linux/intel_vsec.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/intel_tpmi.h> #include "../tpmi_power_domains.h" #include "uncore-frequency-common.h" @@ -448,7 +449,7 @@ static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore) } static void set_cdie_id(int domain_id, struct tpmi_uncore_cluster_info *cluster_info, - struct intel_tpmi_plat_info *plat_info) + struct oobmsm_plat_info *plat_info) { cluster_info->cdie_id = domain_id; @@ -465,7 +466,7 @@ static void set_cdie_id(int domain_id, struct tpmi_uncore_cluster_info *cluster_ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) { bool read_blocked = 0, write_blocked = 0; - struct intel_tpmi_plat_info *plat_info; + struct oobmsm_plat_info *plat_info; struct tpmi_uncore_struct *tpmi_uncore; bool uncore_sysfs_added = false; int ret, i, pkg = 0; diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 055ca9f48fb4..f66f0ce8559b 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -15,9 +15,12 @@ #include <linux/auxiliary_bus.h> #include <linux/bits.h> +#include <linux/bitops.h> +#include <linux/bug.h> #include <linux/cleanup.h> #include <linux/delay.h> #include <linux/idr.h> +#include <linux/log2.h> #include <linux/intel_vsec.h> #include <linux/kernel.h> #include <linux/module.h> @@ -32,6 +35,20 @@ static DEFINE_IDA(intel_vsec_ida); static DEFINE_IDA(intel_vsec_sdsi_ida); static DEFINE_XARRAY_ALLOC(auxdev_array); +enum vsec_device_state { + STATE_NOT_FOUND, + STATE_REGISTERED, + STATE_SKIP, +}; + +struct vsec_priv { + struct intel_vsec_platform_info *info; + struct device *suppliers[VSEC_FEATURE_COUNT]; + struct oobmsm_plat_info plat_info; + enum vsec_device_state state[VSEC_FEATURE_COUNT]; + unsigned long found_caps; +}; + static const char *intel_vsec_name(enum intel_vsec_id id) { switch (id) { @@ -50,6 +67,9 @@ static const char *intel_vsec_name(enum intel_vsec_id id) case VSEC_ID_TPMI: return "tpmi"; + case VSEC_ID_DISCOVERY: + return "discovery"; + default: return NULL; } @@ -68,6 +88,8 @@ static bool intel_vsec_supported(u16 id, unsigned long caps) return !!(caps & VSEC_CAP_SDSI); case VSEC_ID_TPMI: return !!(caps & VSEC_CAP_TPMI); + case VSEC_ID_DISCOVERY: + return !!(caps & VSEC_CAP_DISCOVERY); default: return false; } @@ -91,6 +113,97 @@ static void intel_vsec_dev_release(struct device *dev) kfree(intel_vsec_dev); } +static const struct vsec_feature_dependency * +get_consumer_dependencies(struct vsec_priv *priv, int cap_id) +{ + const struct vsec_feature_dependency *deps = priv->info->deps; + int consumer_id = priv->info->num_deps; + + if (!deps) + return NULL; + + while (consumer_id--) + if (deps[consumer_id].feature == BIT(cap_id)) + return &deps[consumer_id]; + + return NULL; +} + +static bool vsec_driver_present(int cap_id) +{ + unsigned long bit = BIT(cap_id); + + switch (bit) { + case VSEC_CAP_TELEMETRY: + return IS_ENABLED(CONFIG_INTEL_PMT_TELEMETRY); + case VSEC_CAP_WATCHER: + return IS_ENABLED(CONFIG_INTEL_PMT_WATCHER); + case VSEC_CAP_CRASHLOG: + return IS_ENABLED(CONFIG_INTEL_PMT_CRASHLOG); + case VSEC_CAP_SDSI: + return IS_ENABLED(CONFIG_INTEL_SDSI); + case VSEC_CAP_TPMI: + return IS_ENABLED(CONFIG_INTEL_TPMI); + case VSEC_CAP_DISCOVERY: + return IS_ENABLED(CONFIG_INTEL_PMT_DISCOVERY); + default: + return false; + } +} + +/* + * Although pci_device_id table is available in the pdev, this prototype is + * necessary because the code using it can be called by an exported API that + * might pass a different pdev. + */ +static const struct pci_device_id intel_vsec_pci_ids[]; + +static int intel_vsec_link_devices(struct pci_dev *pdev, struct device *dev, + int consumer_id) +{ + const struct vsec_feature_dependency *deps; + enum vsec_device_state *state; + struct device **suppliers; + struct vsec_priv *priv; + int supplier_id; + + if (!consumer_id) + return 0; + + if (!pci_match_id(intel_vsec_pci_ids, pdev)) + return 0; + + priv = pci_get_drvdata(pdev); + state = priv->state; + suppliers = priv->suppliers; + + priv->suppliers[consumer_id] = dev; + + deps = get_consumer_dependencies(priv, consumer_id); + if (!deps) + return 0; + + for_each_set_bit(supplier_id, &deps->supplier_bitmap, VSEC_FEATURE_COUNT) { + struct device_link *link; + + if (state[supplier_id] != STATE_REGISTERED || + !vsec_driver_present(supplier_id)) + continue; + + if (!suppliers[supplier_id]) { + dev_err(dev, "Bad supplier list\n"); + return -EINVAL; + } + + link = device_link_add(dev, suppliers[supplier_id], + DL_FLAG_AUTOPROBE_CONSUMER); + if (!link) + return -EINVAL; + } + + return 0; +} + int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, struct intel_vsec_device *intel_vsec_dev, const char *name) @@ -128,19 +241,37 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, return ret; } + /* + * Assign a name now to ensure that the device link doesn't contain + * a null string for the consumer name. This is a problem when a supplier + * supplies more than one consumer and can lead to a duplicate name error + * when the link is created in sysfs. + */ + ret = dev_set_name(&auxdev->dev, "%s.%s.%d", KBUILD_MODNAME, auxdev->name, + auxdev->id); + if (ret) + goto cleanup_aux; + + ret = intel_vsec_link_devices(pdev, &auxdev->dev, intel_vsec_dev->cap_id); + if (ret) + goto cleanup_aux; + ret = auxiliary_device_add(auxdev); - if (ret < 0) { - auxiliary_device_uninit(auxdev); - return ret; - } + if (ret) + goto cleanup_aux; return devm_add_action_or_reset(parent, intel_vsec_remove_aux, auxdev); + +cleanup_aux: + auxiliary_device_uninit(auxdev); + return ret; } EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, "INTEL_VSEC"); static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header, - struct intel_vsec_platform_info *info) + struct intel_vsec_platform_info *info, + unsigned long cap_id) { struct intel_vsec_device __free(kfree) *intel_vsec_dev = NULL; struct resource __free(kfree) *res = NULL; @@ -207,6 +338,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he intel_vsec_dev->quirks = info->quirks; intel_vsec_dev->base_addr = info->base_addr; intel_vsec_dev->priv_data = info->priv_data; + intel_vsec_dev->cap_id = cap_id; if (header->id == VSEC_ID_SDSI) intel_vsec_dev->ida = &intel_vsec_sdsi_ida; @@ -221,6 +353,109 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he intel_vsec_name(header->id)); } +static bool suppliers_ready(struct vsec_priv *priv, + const struct vsec_feature_dependency *consumer_deps, + int cap_id) +{ + enum vsec_device_state *state = priv->state; + int supplier_id; + + if (WARN_ON_ONCE(consumer_deps->feature != BIT(cap_id))) + return false; + + /* + * Verify that all required suppliers have been found. Return false + * immediately if any are still missing. + */ + for_each_set_bit(supplier_id, &consumer_deps->supplier_bitmap, VSEC_FEATURE_COUNT) { + if (state[supplier_id] == STATE_SKIP) + continue; + + if (state[supplier_id] == STATE_NOT_FOUND) + return false; + } + + /* + * All suppliers have been found and the consumer is ready to be + * registered. + */ + return true; +} + +static int get_cap_id(u32 header_id, unsigned long *cap_id) +{ + switch (header_id) { + case VSEC_ID_TELEMETRY: + *cap_id = ilog2(VSEC_CAP_TELEMETRY); + break; + case VSEC_ID_WATCHER: + *cap_id = ilog2(VSEC_CAP_WATCHER); + break; + case VSEC_ID_CRASHLOG: + *cap_id = ilog2(VSEC_CAP_CRASHLOG); + break; + case VSEC_ID_SDSI: + *cap_id = ilog2(VSEC_CAP_SDSI); + break; + case VSEC_ID_TPMI: + *cap_id = ilog2(VSEC_CAP_TPMI); + break; + case VSEC_ID_DISCOVERY: + *cap_id = ilog2(VSEC_CAP_DISCOVERY); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int intel_vsec_register_device(struct pci_dev *pdev, + struct intel_vsec_header *header, + struct intel_vsec_platform_info *info) +{ + const struct vsec_feature_dependency *consumer_deps; + struct vsec_priv *priv; + unsigned long cap_id; + int ret; + + ret = get_cap_id(header->id, &cap_id); + if (ret) + return ret; + + /* + * Only track dependencies for devices probed by the VSEC driver. + * For others using the exported APIs, add the device directly. + */ + if (!pci_match_id(intel_vsec_pci_ids, pdev)) + return intel_vsec_add_dev(pdev, header, info, cap_id); + + priv = pci_get_drvdata(pdev); + if (priv->state[cap_id] == STATE_REGISTERED || + priv->state[cap_id] == STATE_SKIP) + return -EEXIST; + + priv->found_caps |= BIT(cap_id); + + if (!vsec_driver_present(cap_id)) { + priv->state[cap_id] = STATE_SKIP; + return -ENODEV; + } + + consumer_deps = get_consumer_dependencies(priv, cap_id); + if (!consumer_deps || suppliers_ready(priv, consumer_deps, cap_id)) { + ret = intel_vsec_add_dev(pdev, header, info, cap_id); + if (ret) + priv->state[cap_id] = STATE_SKIP; + else + priv->state[cap_id] = STATE_REGISTERED; + + return ret; + } + + return -EAGAIN; +} + static bool intel_vsec_walk_header(struct pci_dev *pdev, struct intel_vsec_platform_info *info) { @@ -229,7 +464,7 @@ static bool intel_vsec_walk_header(struct pci_dev *pdev, int ret; for ( ; *header; header++) { - ret = intel_vsec_add_dev(pdev, *header, info); + ret = intel_vsec_register_device(pdev, *header, info); if (!ret) have_devices = true; } @@ -277,7 +512,7 @@ static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr); header.id = PCI_DVSEC_HEADER2_ID(hdr); - ret = intel_vsec_add_dev(pdev, &header, info); + ret = intel_vsec_register_device(pdev, &header, info); if (ret) continue; @@ -322,7 +557,7 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev, header.tbir = INTEL_DVSEC_TABLE_BAR(table); header.offset = INTEL_DVSEC_TABLE_OFFSET(table); - ret = intel_vsec_add_dev(pdev, &header, info); + ret = intel_vsec_register_device(pdev, &header, info); if (ret) continue; @@ -345,11 +580,56 @@ int intel_vsec_register(struct pci_dev *pdev, } EXPORT_SYMBOL_NS_GPL(intel_vsec_register, "INTEL_VSEC"); +static bool intel_vsec_get_features(struct pci_dev *pdev, + struct intel_vsec_platform_info *info) +{ + bool found = false; + + /* + * Both DVSEC and VSEC capabilities can exist on the same device, + * so both intel_vsec_walk_dvsec() and intel_vsec_walk_vsec() must be + * called independently. Additionally, intel_vsec_walk_header() is + * needed for devices that do not have VSEC/DVSEC but provide the + * information via device_data. + */ + if (intel_vsec_walk_dvsec(pdev, info)) + found = true; + + if (intel_vsec_walk_vsec(pdev, info)) + found = true; + + if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) && + intel_vsec_walk_header(pdev, info)) + found = true; + + return found; +} + +static void intel_vsec_skip_missing_dependencies(struct pci_dev *pdev) +{ + struct vsec_priv *priv = pci_get_drvdata(pdev); + const struct vsec_feature_dependency *deps = priv->info->deps; + int consumer_id = priv->info->num_deps; + + while (consumer_id--) { + int supplier_id; + + deps = &priv->info->deps[consumer_id]; + + for_each_set_bit(supplier_id, &deps->supplier_bitmap, VSEC_FEATURE_COUNT) { + if (!(BIT(supplier_id) & priv->found_caps)) + priv->state[supplier_id] = STATE_SKIP; + } + } +} + static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct intel_vsec_platform_info *info; - bool have_devices = false; - int ret; + struct vsec_priv *priv; + int num_caps, ret; + int run_once = 0; + bool found_any = false; ret = pcim_enable_device(pdev); if (ret) @@ -360,22 +640,62 @@ static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id if (!info) return -EINVAL; - if (intel_vsec_walk_dvsec(pdev, info)) - have_devices = true; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; - if (intel_vsec_walk_vsec(pdev, info)) - have_devices = true; + priv->info = info; + pci_set_drvdata(pdev, priv); - if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) && - intel_vsec_walk_header(pdev, info)) - have_devices = true; + num_caps = hweight_long(info->caps); + while (num_caps--) { + found_any |= intel_vsec_get_features(pdev, info); + + if (priv->found_caps == info->caps) + break; - if (!have_devices) + if (!run_once) { + intel_vsec_skip_missing_dependencies(pdev); + run_once = 1; + } + } + + if (!found_any) return -ENODEV; return 0; } +int intel_vsec_set_mapping(struct oobmsm_plat_info *plat_info, + struct intel_vsec_device *vsec_dev) +{ + struct vsec_priv *priv; + + priv = pci_get_drvdata(vsec_dev->pcidev); + if (!priv) + return -EINVAL; + + priv->plat_info = *plat_info; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(intel_vsec_set_mapping, "INTEL_VSEC"); + +struct oobmsm_plat_info *intel_vsec_get_mapping(struct pci_dev *pdev) +{ + struct vsec_priv *priv; + + if (!pci_match_id(intel_vsec_pci_ids, pdev)) + return ERR_PTR(-EINVAL); + + priv = pci_get_drvdata(pdev); + if (!priv) + return ERR_PTR(-EINVAL); + + return &priv->plat_info; +} +EXPORT_SYMBOL_NS_GPL(intel_vsec_get_mapping, "INTEL_VSEC"); + /* DG1 info */ static struct intel_vsec_header dg1_header = { .length = 0x10, @@ -402,14 +722,26 @@ static const struct intel_vsec_platform_info mtl_info = { .caps = VSEC_CAP_TELEMETRY, }; +static const struct vsec_feature_dependency oobmsm_deps[] = { + { + .feature = VSEC_CAP_TELEMETRY, + .supplier_bitmap = VSEC_CAP_DISCOVERY | VSEC_CAP_TPMI, + }, +}; + /* OOBMSM info */ static const struct intel_vsec_platform_info oobmsm_info = { - .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI, + .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI | + VSEC_CAP_DISCOVERY, + .deps = oobmsm_deps, + .num_deps = ARRAY_SIZE(oobmsm_deps), }; /* DMR OOBMSM info */ static const struct intel_vsec_platform_info dmr_oobmsm_info = { - .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI, + .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI | VSEC_CAP_DISCOVERY, + .deps = oobmsm_deps, + .num_deps = ARRAY_SIZE(oobmsm_deps), }; /* TGL info */ diff --git a/drivers/platform/x86/intel/vsec_tpmi.c b/drivers/platform/x86/intel/vsec_tpmi.c index 5c383a27bbe8..7748b5557a18 100644 --- a/drivers/platform/x86/intel/vsec_tpmi.c +++ b/drivers/platform/x86/intel/vsec_tpmi.c @@ -116,7 +116,7 @@ struct intel_tpmi_info { struct intel_vsec_device *vsec_dev; int feature_count; u64 pfs_start; - struct intel_tpmi_plat_info plat_info; + struct oobmsm_plat_info plat_info; void __iomem *tpmi_control_mem; struct dentry *dbgfs_dir; }; @@ -187,7 +187,7 @@ struct tpmi_feature_state { /* Used during auxbus device creation */ static DEFINE_IDA(intel_vsec_tpmi_ida); -struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev) +struct oobmsm_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev) { struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); @@ -799,6 +799,10 @@ static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev) ret = tpmi_process_info(tpmi_info, pfs); if (ret) return ret; + + ret = intel_vsec_set_mapping(&tpmi_info->plat_info, vsec_dev); + if (ret) + return ret; } if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID) diff --git a/drivers/platform/x86/lenovo/Kconfig b/drivers/platform/x86/lenovo/Kconfig new file mode 100644 index 000000000000..d22b774e0236 --- /dev/null +++ b/drivers/platform/x86/lenovo/Kconfig @@ -0,0 +1,276 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Lenovo X86 Platform Specific Drivers +# + +config IDEAPAD_LAPTOP + tristate "Lenovo IdeaPad Laptop Extras" + depends on ACPI + depends on ACPI_BATTERY + depends on RFKILL && INPUT + depends on SERIO_I8042 + depends on BACKLIGHT_CLASS_DEVICE + depends on ACPI_VIDEO || ACPI_VIDEO = n + depends on ACPI_WMI || ACPI_WMI = n + select ACPI_PLATFORM_PROFILE + select INPUT_SPARSEKMAP + select NEW_LEDS + select LEDS_CLASS + help + This is a driver for Lenovo IdeaPad netbooks contains drivers for + rfkill switch, hotkey, fan control and backlight control. + +config LENOVO_WMI_HOTKEY_UTILITIES + tristate "Lenovo Hotkey Utility WMI extras driver" + depends on ACPI_WMI + select NEW_LEDS + select LEDS_CLASS + imply IDEAPAD_LAPTOP + help + This driver provides WMI support for Lenovo customized hotkeys function, + such as LED control for audio/mic mute event for Ideapad, YOGA, XiaoXin, + Gaming, ThinkBook and so on. + +config LENOVO_WMI_CAMERA + tristate "Lenovo WMI Camera Button driver" + depends on ACPI_WMI + depends on INPUT + help + This driver provides support for Lenovo camera button. The Camera + button is a GPIO device. This driver receives ACPI notifications when + the camera button is switched on/off. + + To compile this driver as a module, choose M here: the module + will be called lenovo-wmi-camera. + +config LENOVO_YMC + tristate "Lenovo Yoga Tablet Mode Control" + depends on ACPI_WMI + depends on INPUT + depends on IDEAPAD_LAPTOP + select INPUT_SPARSEKMAP + help + This driver maps the Tablet Mode Control switch to SW_TABLET_MODE input + events for Lenovo Yoga notebooks. + +config THINKPAD_ACPI + tristate "ThinkPad ACPI Laptop Extras" + depends on ACPI_EC + depends on ACPI_BATTERY + depends on INPUT + depends on RFKILL || RFKILL = n + depends on ACPI_VIDEO || ACPI_VIDEO = n + depends on BACKLIGHT_CLASS_DEVICE + depends on I2C + depends on DRM + select ACPI_PLATFORM_PROFILE + select DRM_PRIVACY_SCREEN + select HWMON + select NVRAM + select NEW_LEDS + select LEDS_CLASS + select INPUT_SPARSEKMAP + help + This is a driver for the IBM and Lenovo ThinkPad laptops. It adds + support for Fn-Fx key combinations, Bluetooth control, video + output switching, ThinkLight control, UltraBay eject and more. + For more information about this driver see + <file:Documentation/admin-guide/laptops/thinkpad-acpi.rst> and + <http://ibm-acpi.sf.net/> . + + This driver was formerly known as ibm-acpi. + + Extra functionality will be available if the rfkill (CONFIG_RFKILL) + and/or ALSA (CONFIG_SND) subsystems are available in the kernel. + Note that if you want ThinkPad-ACPI to be built-in instead of + modular, ALSA and rfkill will also have to be built-in. + + If you have an IBM or Lenovo ThinkPad laptop, say Y or M here. + +config THINKPAD_ACPI_ALSA_SUPPORT + bool "Console audio control ALSA interface" + depends on THINKPAD_ACPI + depends on SND + depends on SND = y || THINKPAD_ACPI = SND + default y + help + Enables monitoring of the built-in console audio output control + (headphone and speakers), which is operated by the mute and (in + some ThinkPad models) volume hotkeys. + + If this option is enabled, ThinkPad-ACPI will export an ALSA card + with a single read-only mixer control, which should be used for + on-screen-display feedback purposes by the Desktop Environment. + + Optionally, the driver will also allow software control (the + ALSA mixer will be made read-write). Please refer to the driver + documentation for details. + + All IBM models have both volume and mute control. Newer Lenovo + models only have mute control (the volume hotkeys are just normal + keys and volume control is done through the main HDA mixer). + +config THINKPAD_ACPI_DEBUGFACILITIES + bool "Maintainer debug facilities" + depends on THINKPAD_ACPI + help + Enables extra stuff in the thinkpad-acpi which is completely useless + for normal use. Read the driver source to find out what it does. + + Say N here, unless you were told by a kernel maintainer to do + otherwise. + +config THINKPAD_ACPI_DEBUG + bool "Verbose debug mode" + depends on THINKPAD_ACPI + help + Enables extra debugging information, at the expense of a slightly + increase in driver size. + + If you are not sure, say N here. + +config THINKPAD_ACPI_UNSAFE_LEDS + bool "Allow control of important LEDs (unsafe)" + depends on THINKPAD_ACPI + help + Overriding LED state on ThinkPads can mask important + firmware alerts (like critical battery condition), or misled + the user into damaging the hardware (undocking or ejecting + the bay while buses are still active), etc. + + LED control on the ThinkPad is write-only (with very few + exceptions on very ancient models), which makes it + impossible to know beforehand if important information will + be lost when one changes LED state. + + Users that know what they are doing can enable this option + and the driver will allow control of every LED, including + the ones on the dock stations. + + Never enable this option on a distribution kernel. + + Say N here, unless you are building a kernel for your own + use, and need to control the important firmware LEDs. + +config THINKPAD_ACPI_VIDEO + bool "Video output control support" + depends on THINKPAD_ACPI + default y + help + Allows the thinkpad_acpi driver to provide an interface to control + the various video output ports. + + This feature often won't work well, depending on ThinkPad model, + display state, video output devices in use, whether there is a X + server running, phase of the moon, and the current mood of + Schroedinger's cat. If you can use X.org's RandR to control + your ThinkPad's video output ports instead of this feature, + don't think twice: do it and say N here to save memory and avoid + bad interactions with X.org. + + NOTE: access to this feature is limited to processes with the + CAP_SYS_ADMIN capability, to avoid local DoS issues in platforms + where it interacts badly with X.org. + + If you are not sure, say Y here but do try to check if you could + be using X.org RandR instead. + +config THINKPAD_ACPI_HOTKEY_POLL + bool "Support NVRAM polling for hot keys" + depends on THINKPAD_ACPI + default y + help + Some thinkpad models benefit from NVRAM polling to detect a few of + the hot key press events. If you know your ThinkPad model does not + need to do NVRAM polling to support any of the hot keys you use, + unselecting this option will save about 1kB of memory. + + ThinkPads T40 and newer, R52 and newer, and X31 and newer are + unlikely to need NVRAM polling in their latest BIOS versions. + + NVRAM polling can detect at most the following keys: ThinkPad/Access + IBM, Zoom, Switch Display (fn+F7), ThinkLight, Volume up/down/mute, + Brightness up/down, Display Expand (fn+F8), Hibernate (fn+F12). + + If you are not sure, say Y here. The driver enables polling only if + it is strictly necessary to do so. + +config THINKPAD_LMI + tristate "Lenovo WMI-based systems management driver" + depends on ACPI_WMI + depends on DMI + select FW_ATTR_CLASS + help + This driver allows changing BIOS settings on Lenovo machines whose + BIOS support the WMI interface. + + To compile this driver as a module, choose M here: the module will + be called think-lmi. + +config YOGABOOK + tristate "Lenovo Yoga Book tablet key driver" + depends on ACPI_WMI + depends on INPUT + depends on I2C + select LEDS_CLASS + select NEW_LEDS + help + Say Y here if you want to support the 'Pen' key and keyboard backlight + control on the Lenovo Yoga Book tablets. + + To compile this driver as a module, choose M here: the module will + be called lenovo-yogabook. + +config YT2_1380 + tristate "Lenovo Yoga Tablet 2 1380 fast charge driver" + depends on SERIAL_DEV_BUS + depends on EXTCON + depends on ACPI + help + Say Y here to enable support for the custom fast charging protocol + found on the Lenovo Yoga Tablet 2 1380F / 1380L models. + + To compile this driver as a module, choose M here: the module will + be called lenovo-yogabook. + +config LENOVO_WMI_DATA01 + tristate + depends on ACPI_WMI + +config LENOVO_WMI_EVENTS + tristate + depends on ACPI_WMI + +config LENOVO_WMI_HELPERS + tristate + depends on ACPI_WMI + +config LENOVO_WMI_GAMEZONE + tristate "Lenovo GameZone WMI Driver" + depends on ACPI_WMI + depends on DMI + select ACPI_PLATFORM_PROFILE + select LENOVO_WMI_EVENTS + select LENOVO_WMI_HELPERS + select LENOVO_WMI_TUNING + help + Say Y here if you have a WMI aware Lenovo Legion device and would like to use the + platform-profile firmware interface to manage power usage. + + To compile this driver as a module, choose M here: the module will + be called lenovo-wmi-gamezone. + +config LENOVO_WMI_TUNING + tristate "Lenovo Other Mode WMI Driver" + depends on ACPI_WMI + select FW_ATTR_CLASS + select LENOVO_WMI_DATA01 + select LENOVO_WMI_EVENTS + select LENOVO_WMI_HELPERS + help + Say Y here if you have a WMI aware Lenovo Legion device and would like to use the + firmware_attributes API to control various tunable settings typically exposed by + Lenovo software in Windows. + + To compile this driver as a module, choose M here: the module will + be called lenovo-wmi-other. diff --git a/drivers/platform/x86/lenovo/Makefile b/drivers/platform/x86/lenovo/Makefile new file mode 100644 index 000000000000..7b2128e3a214 --- /dev/null +++ b/drivers/platform/x86/lenovo/Makefile @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for linux/drivers/platform/x86/lenovo +# Lenovo x86 Platform Specific Drivers +# +obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o +obj-$(CONFIG_THINKPAD_LMI) += think-lmi.o +obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o + +lenovo-target-$(CONFIG_LENOVO_WMI_HOTKEY_UTILITIES) += wmi-hotkey-utilities.o +lenovo-target-$(CONFIG_LENOVO_YMC) += ymc.o +lenovo-target-$(CONFIG_YOGABOOK) += yogabook.o +lenovo-target-$(CONFIG_YT2_1380) += yoga-tab2-pro-1380-fastcharger.o +lenovo-target-$(CONFIG_LENOVO_WMI_CAMERA) += wmi-camera.o +lenovo-target-$(CONFIG_LENOVO_WMI_DATA01) += wmi-capdata01.o +lenovo-target-$(CONFIG_LENOVO_WMI_EVENTS) += wmi-events.o +lenovo-target-$(CONFIG_LENOVO_WMI_HELPERS) += wmi-helpers.o +lenovo-target-$(CONFIG_LENOVO_WMI_GAMEZONE) += wmi-gamezone.o +lenovo-target-$(CONFIG_LENOVO_WMI_TUNING) += wmi-other.o + +# Add 'lenovo' prefix to each module listed in lenovo-target-* +define LENOVO_OBJ_TARGET +lenovo-$(1)-y := $(1).o +obj-$(2) += lenovo-$(1).o +endef + +$(foreach target, $(basename $(lenovo-target-y)), $(eval $(call LENOVO_OBJ_TARGET,$(target),y))) +$(foreach target, $(basename $(lenovo-target-m)), $(eval $(call LENOVO_OBJ_TARGET,$(target),m))) diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/lenovo/ideapad-laptop.c index b5e4da6a6779..fcebfbaf0460 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/lenovo/ideapad-laptop.c @@ -28,6 +28,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/platform_profile.h> +#include <linux/power_supply.h> #include <linux/rfkill.h> #include <linux/seq_file.h> #include <linux/sysfs.h> @@ -35,6 +36,7 @@ #include <linux/wmi.h> #include "ideapad-laptop.h" +#include <acpi/battery.h> #include <acpi/video.h> #include <dt-bindings/leds/common.h> @@ -163,6 +165,7 @@ struct ideapad_private { struct backlight_device *blightdev; struct ideapad_dytc_priv *dytc; struct dentry *debug; + struct acpi_battery_hook battery_hook; unsigned long cfg; unsigned long r_touchpad_val; struct { @@ -604,6 +607,11 @@ static ssize_t camera_power_store(struct device *dev, static DEVICE_ATTR_RW(camera_power); +static void show_conservation_mode_deprecation_warning(struct device *dev) +{ + dev_warn_once(dev, "conservation_mode attribute has been deprecated, see charge_types.\n"); +} + static ssize_t conservation_mode_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -612,6 +620,8 @@ static ssize_t conservation_mode_show(struct device *dev, unsigned long result; int err; + show_conservation_mode_deprecation_warning(dev); + err = eval_gbmd(priv->adev->handle, &result); if (err) return err; @@ -627,6 +637,8 @@ static ssize_t conservation_mode_store(struct device *dev, bool state; int err; + show_conservation_mode_deprecation_warning(dev); + err = kstrtobool(buf, &state); if (err) return err; @@ -1669,7 +1681,7 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv) priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT; priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get; priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set; - priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED; + priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED | LED_RETAIN_AT_SHUTDOWN; err = led_classdev_register(&priv->platform_device->dev, &priv->kbd_bl.led); if (err) @@ -1728,7 +1740,7 @@ static int ideapad_fn_lock_led_init(struct ideapad_private *priv) priv->fn_lock.led.name = "platform::" LED_FUNCTION_FNLOCK; priv->fn_lock.led.brightness_get = ideapad_fn_lock_led_cdev_get; priv->fn_lock.led.brightness_set_blocking = ideapad_fn_lock_led_cdev_set; - priv->fn_lock.led.flags = LED_BRIGHT_HW_CHANGED; + priv->fn_lock.led.flags = LED_BRIGHT_HW_CHANGED | LED_RETAIN_AT_SHUTDOWN; err = led_classdev_register(&priv->platform_device->dev, &priv->fn_lock.led); if (err) @@ -1988,10 +2000,90 @@ static const struct dmi_system_id ctrl_ps2_aux_port_list[] = { {} }; -static void ideapad_check_features(struct ideapad_private *priv) +static int ideapad_psy_ext_set_prop(struct power_supply *psy, + const struct power_supply_ext *ext, + void *ext_data, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct ideapad_private *priv = ext_data; + + switch (val->intval) { + case POWER_SUPPLY_CHARGE_TYPE_LONGLIFE: + return exec_sbmc(priv->adev->handle, SBMC_CONSERVATION_ON); + case POWER_SUPPLY_CHARGE_TYPE_STANDARD: + return exec_sbmc(priv->adev->handle, SBMC_CONSERVATION_OFF); + default: + return -EINVAL; + } +} + +static int ideapad_psy_ext_get_prop(struct power_supply *psy, + const struct power_supply_ext *ext, + void *ext_data, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct ideapad_private *priv = ext_data; + unsigned long result; + int err; + + err = eval_gbmd(priv->adev->handle, &result); + if (err) + return err; + + if (test_bit(GBMD_CONSERVATION_STATE_BIT, &result)) + val->intval = POWER_SUPPLY_CHARGE_TYPE_LONGLIFE; + else + val->intval = POWER_SUPPLY_CHARGE_TYPE_STANDARD; + + return 0; +} + +static int ideapad_psy_prop_is_writeable(struct power_supply *psy, + const struct power_supply_ext *ext, + void *data, + enum power_supply_property psp) +{ + return true; +} + +static const enum power_supply_property ideapad_power_supply_props[] = { + POWER_SUPPLY_PROP_CHARGE_TYPES, +}; + +static const struct power_supply_ext ideapad_battery_ext = { + .name = "ideapad_laptop", + .properties = ideapad_power_supply_props, + .num_properties = ARRAY_SIZE(ideapad_power_supply_props), + .charge_types = (BIT(POWER_SUPPLY_CHARGE_TYPE_STANDARD) | + BIT(POWER_SUPPLY_CHARGE_TYPE_LONGLIFE)), + .get_property = ideapad_psy_ext_get_prop, + .set_property = ideapad_psy_ext_set_prop, + .property_is_writeable = ideapad_psy_prop_is_writeable, +}; + +static int ideapad_battery_add(struct power_supply *battery, struct acpi_battery_hook *hook) +{ + struct ideapad_private *priv = container_of(hook, struct ideapad_private, battery_hook); + + return power_supply_register_extension(battery, &ideapad_battery_ext, + &priv->platform_device->dev, priv); +} + +static int ideapad_battery_remove(struct power_supply *battery, + struct acpi_battery_hook *hook) +{ + power_supply_unregister_extension(battery, &ideapad_battery_ext); + + return 0; +} + +static int ideapad_check_features(struct ideapad_private *priv) { acpi_handle handle = priv->adev->handle; unsigned long val; + int err; priv->features.set_fn_lock_led = set_fn_lock_led || dmi_check_system(set_fn_lock_led_list); @@ -2006,8 +2098,16 @@ static void ideapad_check_features(struct ideapad_private *priv) if (!read_ec_data(handle, VPCCMD_R_FAN, &val)) priv->features.fan_mode = true; - if (acpi_has_method(handle, "GBMD") && acpi_has_method(handle, "SBMC")) + if (acpi_has_method(handle, "GBMD") && acpi_has_method(handle, "SBMC")) { priv->features.conservation_mode = true; + priv->battery_hook.add_battery = ideapad_battery_add; + priv->battery_hook.remove_battery = ideapad_battery_remove; + priv->battery_hook.name = "Ideapad Battery Extension"; + + err = devm_battery_hook_register(&priv->platform_device->dev, &priv->battery_hook); + if (err) + return err; + } if (acpi_has_method(handle, "DYTC")) priv->features.dytc = true; @@ -2042,6 +2142,8 @@ static void ideapad_check_features(struct ideapad_private *priv) } } } + + return 0; } #if IS_ENABLED(CONFIG_ACPI_WMI) @@ -2190,7 +2292,9 @@ static int ideapad_acpi_add(struct platform_device *pdev) if (err) return err; - ideapad_check_features(priv); + err = ideapad_check_features(priv); + if (err) + return err; ideapad_debugfs_init(priv); diff --git a/drivers/platform/x86/ideapad-laptop.h b/drivers/platform/x86/lenovo/ideapad-laptop.h index 1e52f2aa0aac..1e52f2aa0aac 100644 --- a/drivers/platform/x86/ideapad-laptop.h +++ b/drivers/platform/x86/lenovo/ideapad-laptop.h diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/lenovo/think-lmi.c index b73b84fdb15e..0992b41b6221 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/lenovo/think-lmi.c @@ -20,7 +20,7 @@ #include <linux/types.h> #include <linux/dmi.h> #include <linux/wmi.h> -#include "firmware_attributes_class.h" +#include "../firmware_attributes_class.h" #include "think-lmi.h" static bool debug_support; @@ -772,6 +772,7 @@ static ssize_t certificate_store(struct kobject *kobj, struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj); enum cert_install_mode install_mode = TLMI_CERT_INSTALL; char *auth_str, *new_cert; + const char *serial; char *signature; char *guid; int ret; @@ -789,9 +790,10 @@ static ssize_t certificate_store(struct kobject *kobj, return -EACCES; /* Format: 'serial#, signature' */ - auth_str = cert_command(setting, - dmi_get_system_info(DMI_PRODUCT_SERIAL), - setting->signature); + serial = dmi_get_system_info(DMI_PRODUCT_SERIAL); + if (!serial) + return -ENODEV; + auth_str = cert_command(setting, serial, setting->signature); if (!auth_str) return -ENOMEM; diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/lenovo/think-lmi.h index 9b014644d316..9b014644d316 100644 --- a/drivers/platform/x86/think-lmi.h +++ b/drivers/platform/x86/lenovo/think-lmi.h diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/lenovo/thinkpad_acpi.c index b59b4d90b0c7..cc19fe520ea9 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/lenovo/thinkpad_acpi.c @@ -81,7 +81,7 @@ #include <sound/core.h> #include <sound/initval.h> -#include "dual_accel_detect.h" +#include "../dual_accel_detect.h" /* ThinkPad CMOS commands */ #define TP_CMOS_VOLUME_DOWN 0 @@ -559,12 +559,12 @@ static unsigned long __init tpacpi_check_quirks( return 0; } -static inline bool __pure __init tpacpi_is_lenovo(void) +static __always_inline bool __pure __init tpacpi_is_lenovo(void) { return thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO; } -static inline bool __pure __init tpacpi_is_ibm(void) +static __always_inline bool __pure __init tpacpi_is_ibm(void) { return thinkpad_id.vendor == PCI_VENDOR_ID_IBM; } diff --git a/drivers/platform/x86/lenovo-wmi-camera.c b/drivers/platform/x86/lenovo/wmi-camera.c index eb60fb9a5b3f..eb60fb9a5b3f 100644 --- a/drivers/platform/x86/lenovo-wmi-camera.c +++ b/drivers/platform/x86/lenovo/wmi-camera.c diff --git a/drivers/platform/x86/lenovo/wmi-capdata01.c b/drivers/platform/x86/lenovo/wmi-capdata01.c new file mode 100644 index 000000000000..c922680b3cba --- /dev/null +++ b/drivers/platform/x86/lenovo/wmi-capdata01.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Lenovo Capability Data 01 WMI Data Block driver. + * + * Lenovo Capability Data 01 provides information on tunable attributes used by + * the "Other Mode" WMI interface. The data includes if the attribute is + * supported by the hardware, the default_value, max_value, min_value, and step + * increment. Each attribute has multiple pages, one for each of the thermal + * modes managed by the Gamezone interface. + * + * Copyright (C) 2025 Derek J. Clark <derekjohn.clark@gmail.com> + */ + +#include <linux/acpi.h> +#include <linux/cleanup.h> +#include <linux/component.h> +#include <linux/container_of.h> +#include <linux/device.h> +#include <linux/export.h> +#include <linux/gfp_types.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/mutex_types.h> +#include <linux/notifier.h> +#include <linux/overflow.h> +#include <linux/types.h> +#include <linux/wmi.h> + +#include "wmi-capdata01.h" + +#define LENOVO_CAPABILITY_DATA_01_GUID "7A8F5407-CB67-4D6E-B547-39B3BE018154" + +#define ACPI_AC_CLASS "ac_adapter" +#define ACPI_AC_NOTIFY_STATUS 0x80 + +struct lwmi_cd01_priv { + struct notifier_block acpi_nb; /* ACPI events */ + struct wmi_device *wdev; + struct cd01_list *list; +}; + +struct cd01_list { + struct mutex list_mutex; /* list R/W mutex */ + u8 count; + struct capdata01 data[]; +}; + +/** + * lwmi_cd01_component_bind() - Bind component to master device. + * @cd01_dev: Pointer to the lenovo-wmi-capdata01 driver parent device. + * @om_dev: Pointer to the lenovo-wmi-other driver parent device. + * @data: capdata01_list object pointer used to return the capability data. + * + * On lenovo-wmi-other's master bind, provide a pointer to the local capdata01 + * list. This is used to call lwmi_cd01_get_data to look up attribute data + * from the lenovo-wmi-other driver. + * + * Return: 0 + */ +static int lwmi_cd01_component_bind(struct device *cd01_dev, + struct device *om_dev, void *data) +{ + struct lwmi_cd01_priv *priv = dev_get_drvdata(cd01_dev); + struct cd01_list **cd01_list = data; + + *cd01_list = priv->list; + + return 0; +} + +static const struct component_ops lwmi_cd01_component_ops = { + .bind = lwmi_cd01_component_bind, +}; + +/** + * lwmi_cd01_get_data - Get the data of the specified attribute + * @list: The lenovo-wmi-capdata01 pointer to its cd01_list struct. + * @attribute_id: The capdata attribute ID to be found. + * @output: Pointer to a capdata01 struct to return the data. + * + * Retrieves the capability data 01 struct pointer for the given + * attribute for its specified thermal mode. + * + * Return: 0 on success, or -EINVAL. + */ +int lwmi_cd01_get_data(struct cd01_list *list, u32 attribute_id, struct capdata01 *output) +{ + u8 idx; + + guard(mutex)(&list->list_mutex); + for (idx = 0; idx < list->count; idx++) { + if (list->data[idx].id != attribute_id) + continue; + memcpy(output, &list->data[idx], sizeof(list->data[idx])); + return 0; + }; + + return -EINVAL; +} +EXPORT_SYMBOL_NS_GPL(lwmi_cd01_get_data, "LENOVO_WMI_CD01"); + +/** + * lwmi_cd01_cache() - Cache all WMI data block information + * @priv: lenovo-wmi-capdata01 driver data. + * + * Loop through each WMI data block and cache the data. + * + * Return: 0 on success, or an error. + */ +static int lwmi_cd01_cache(struct lwmi_cd01_priv *priv) +{ + int idx; + + guard(mutex)(&priv->list->list_mutex); + for (idx = 0; idx < priv->list->count; idx++) { + union acpi_object *ret_obj __free(kfree) = NULL; + + ret_obj = wmidev_block_query(priv->wdev, idx); + if (!ret_obj) + return -ENODEV; + + if (ret_obj->type != ACPI_TYPE_BUFFER || + ret_obj->buffer.length < sizeof(priv->list->data[idx])) + continue; + + memcpy(&priv->list->data[idx], ret_obj->buffer.pointer, + ret_obj->buffer.length); + } + + return 0; +} + +/** + * lwmi_cd01_alloc() - Allocate a cd01_list struct in drvdata + * @priv: lenovo-wmi-capdata01 driver data. + * + * Allocate a cd01_list struct large enough to contain data from all WMI data + * blocks provided by the interface. + * + * Return: 0 on success, or an error. + */ +static int lwmi_cd01_alloc(struct lwmi_cd01_priv *priv) +{ + struct cd01_list *list; + size_t list_size; + int count, ret; + + count = wmidev_instance_count(priv->wdev); + list_size = struct_size(list, data, count); + + list = devm_kzalloc(&priv->wdev->dev, list_size, GFP_KERNEL); + if (!list) + return -ENOMEM; + + ret = devm_mutex_init(&priv->wdev->dev, &list->list_mutex); + if (ret) + return ret; + + list->count = count; + priv->list = list; + + return 0; +} + +/** + * lwmi_cd01_setup() - Cache all WMI data block information + * @priv: lenovo-wmi-capdata01 driver data. + * + * Allocate a cd01_list struct large enough to contain data from all WMI data + * blocks provided by the interface. Then loop through each data block and + * cache the data. + * + * Return: 0 on success, or an error code. + */ +static int lwmi_cd01_setup(struct lwmi_cd01_priv *priv) +{ + int ret; + + ret = lwmi_cd01_alloc(priv); + if (ret) + return ret; + + return lwmi_cd01_cache(priv); +} + +/** + * lwmi_cd01_notifier_call() - Call method for lenovo-wmi-capdata01 driver notifier. + * block call chain. + * @nb: The notifier_block registered to lenovo-wmi-events driver. + * @action: Unused. + * @data: The ACPI event. + * + * For LWMI_EVENT_THERMAL_MODE, set current_mode and notify platform_profile + * of a change. + * + * Return: notifier_block status. + */ +static int lwmi_cd01_notifier_call(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct acpi_bus_event *event = data; + struct lwmi_cd01_priv *priv; + int ret; + + if (strcmp(event->device_class, ACPI_AC_CLASS) != 0) + return NOTIFY_DONE; + + priv = container_of(nb, struct lwmi_cd01_priv, acpi_nb); + + switch (event->type) { + case ACPI_AC_NOTIFY_STATUS: + ret = lwmi_cd01_cache(priv); + if (ret) + return NOTIFY_BAD; + + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +/** + * lwmi_cd01_unregister() - Unregister the cd01 ACPI notifier_block. + * @data: The ACPI event notifier_block to unregister. + */ +static void lwmi_cd01_unregister(void *data) +{ + struct notifier_block *acpi_nb = data; + + unregister_acpi_notifier(acpi_nb); +} + +static int lwmi_cd01_probe(struct wmi_device *wdev, const void *context) + +{ + struct lwmi_cd01_priv *priv; + int ret; + + priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->wdev = wdev; + dev_set_drvdata(&wdev->dev, priv); + + ret = lwmi_cd01_setup(priv); + if (ret) + return ret; + + priv->acpi_nb.notifier_call = lwmi_cd01_notifier_call; + + ret = register_acpi_notifier(&priv->acpi_nb); + if (ret) + return ret; + + ret = devm_add_action_or_reset(&wdev->dev, lwmi_cd01_unregister, &priv->acpi_nb); + if (ret) + return ret; + + return component_add(&wdev->dev, &lwmi_cd01_component_ops); +} + +static void lwmi_cd01_remove(struct wmi_device *wdev) +{ + component_del(&wdev->dev, &lwmi_cd01_component_ops); +} + +static const struct wmi_device_id lwmi_cd01_id_table[] = { + { LENOVO_CAPABILITY_DATA_01_GUID, NULL }, + {} +}; + +static struct wmi_driver lwmi_cd01_driver = { + .driver = { + .name = "lenovo_wmi_cd01", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + .id_table = lwmi_cd01_id_table, + .probe = lwmi_cd01_probe, + .remove = lwmi_cd01_remove, + .no_singleton = true, +}; + +/** + * lwmi_cd01_match() - Match rule for the master driver. + * @dev: Pointer to the capability data 01 parent device. + * @data: Unused void pointer for passing match criteria. + * + * Return: int. + */ +int lwmi_cd01_match(struct device *dev, void *data) +{ + return dev->driver == &lwmi_cd01_driver.driver; +} +EXPORT_SYMBOL_NS_GPL(lwmi_cd01_match, "LENOVO_WMI_CD01"); + +module_wmi_driver(lwmi_cd01_driver); + +MODULE_DEVICE_TABLE(wmi, lwmi_cd01_id_table); +MODULE_AUTHOR("Derek J. Clark <derekjohn.clark@gmail.com>"); +MODULE_DESCRIPTION("Lenovo Capability Data 01 WMI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/lenovo/wmi-capdata01.h b/drivers/platform/x86/lenovo/wmi-capdata01.h new file mode 100644 index 000000000000..bd06c5751f68 --- /dev/null +++ b/drivers/platform/x86/lenovo/wmi-capdata01.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* Copyright (C) 2025 Derek J. Clark <derekjohn.clark@gmail.com> */ + +#ifndef _LENOVO_WMI_CAPDATA01_H_ +#define _LENOVO_WMI_CAPDATA01_H_ + +#include <linux/types.h> + +struct device; +struct cd01_list; + +struct capdata01 { + u32 id; + u32 supported; + u32 default_value; + u32 step; + u32 min_value; + u32 max_value; +}; + +int lwmi_cd01_get_data(struct cd01_list *list, u32 attribute_id, struct capdata01 *output); +int lwmi_cd01_match(struct device *dev, void *data); + +#endif /* !_LENOVO_WMI_CAPDATA01_H_ */ diff --git a/drivers/platform/x86/lenovo/wmi-events.c b/drivers/platform/x86/lenovo/wmi-events.c new file mode 100644 index 000000000000..0994cd7dd504 --- /dev/null +++ b/drivers/platform/x86/lenovo/wmi-events.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Lenovo WMI Events driver. Lenovo WMI interfaces provide various + * hardware triggered events that many drivers need to have propagated. + * This driver provides a uniform entrypoint for these events so that + * any driver that needs to respond to these events can subscribe to a + * notifier chain. + * + * Copyright (C) 2025 Derek J. Clark <derekjohn.clark@gmail.com> + */ + +#include <linux/acpi.h> +#include <linux/export.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/types.h> +#include <linux/wmi.h> + +#include "wmi-events.h" +#include "wmi-gamezone.h" + +#define THERMAL_MODE_EVENT_GUID "D320289E-8FEA-41E0-86F9-911D83151B5F" + +#define LWMI_EVENT_DEVICE(guid, type) \ + .guid_string = (guid), .context = &(enum lwmi_events_type) \ + { \ + type \ + } + +static BLOCKING_NOTIFIER_HEAD(events_chain_head); + +struct lwmi_events_priv { + struct wmi_device *wdev; + enum lwmi_events_type type; +}; + +/** + * lwmi_events_register_notifier() - Add a notifier to the notifier chain. + * @nb: The notifier_block struct to register + * + * Call blocking_notifier_chain_register to register the notifier block to the + * lenovo-wmi-events driver blocking notifier chain. + * + * Return: 0 on success, %-EEXIST on error. + */ +int lwmi_events_register_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&events_chain_head, nb); +} +EXPORT_SYMBOL_NS_GPL(lwmi_events_register_notifier, "LENOVO_WMI_EVENTS"); + +/** + * lwmi_events_unregister_notifier() - Remove a notifier from the notifier + * chain. + * @nb: The notifier_block struct to unregister + * + * Call blocking_notifier_chain_unregister to unregister the notifier block + * from the lenovo-wmi-events driver blocking notifier chain. + * + * Return: 0 on success, %-ENOENT on error. + */ +int lwmi_events_unregister_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&events_chain_head, nb); +} +EXPORT_SYMBOL_NS_GPL(lwmi_events_unregister_notifier, "LENOVO_WMI_EVENTS"); + +/** + * devm_lwmi_events_unregister_notifier() - Remove a notifier from the notifier + * chain. + * @data: Void pointer to the notifier_block struct to unregister. + * + * Call lwmi_events_unregister_notifier to unregister the notifier block from + * the lenovo-wmi-events driver blocking notifier chain. + * + * Return: 0 on success, %-ENOENT on error. + */ +static void devm_lwmi_events_unregister_notifier(void *data) +{ + struct notifier_block *nb = data; + + lwmi_events_unregister_notifier(nb); +} + +/** + * devm_lwmi_events_register_notifier() - Add a notifier to the notifier chain. + * @dev: The parent device of the notifier_block struct. + * @nb: The notifier_block struct to register + * + * Call lwmi_events_register_notifier to register the notifier block to the + * lenovo-wmi-events driver blocking notifier chain. Then add, as a device + * managed action, unregister_notifier to automatically unregister the + * notifier block upon its parent device removal. + * + * Return: 0 on success, or an error code. + */ +int devm_lwmi_events_register_notifier(struct device *dev, + struct notifier_block *nb) +{ + int ret; + + ret = lwmi_events_register_notifier(nb); + if (ret < 0) + return ret; + + return devm_add_action_or_reset(dev, devm_lwmi_events_unregister_notifier, nb); +} +EXPORT_SYMBOL_NS_GPL(devm_lwmi_events_register_notifier, "LENOVO_WMI_EVENTS"); + +/** + * lwmi_events_notify() - Call functions for the notifier call chain. + * @wdev: The parent WMI device of the driver. + * @obj: ACPI object passed by the registered WMI Event. + * + * Validate WMI event data and notify all registered drivers of the event and + * its output. + * + * Return: 0 on success, or an error code. + */ +static void lwmi_events_notify(struct wmi_device *wdev, union acpi_object *obj) +{ + struct lwmi_events_priv *priv = dev_get_drvdata(&wdev->dev); + int sel_prof; + int ret; + + switch (priv->type) { + case LWMI_EVENT_THERMAL_MODE: + if (obj->type != ACPI_TYPE_INTEGER) + return; + + sel_prof = obj->integer.value; + + switch (sel_prof) { + case LWMI_GZ_THERMAL_MODE_QUIET: + case LWMI_GZ_THERMAL_MODE_BALANCED: + case LWMI_GZ_THERMAL_MODE_PERFORMANCE: + case LWMI_GZ_THERMAL_MODE_EXTREME: + case LWMI_GZ_THERMAL_MODE_CUSTOM: + ret = blocking_notifier_call_chain(&events_chain_head, + LWMI_EVENT_THERMAL_MODE, + &sel_prof); + if (ret == NOTIFY_BAD) + dev_err(&wdev->dev, + "Failed to send notification to call chain for WMI Events\n"); + return; + default: + dev_err(&wdev->dev, "Got invalid thermal mode: %x", + sel_prof); + return; + } + break; + default: + return; + } +} + +static int lwmi_events_probe(struct wmi_device *wdev, const void *context) +{ + struct lwmi_events_priv *priv; + + if (!context) + return -EINVAL; + + priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->wdev = wdev; + priv->type = *(enum lwmi_events_type *)context; + dev_set_drvdata(&wdev->dev, priv); + + return 0; +} + +static const struct wmi_device_id lwmi_events_id_table[] = { + { LWMI_EVENT_DEVICE(THERMAL_MODE_EVENT_GUID, LWMI_EVENT_THERMAL_MODE) }, + {} +}; + +static struct wmi_driver lwmi_events_driver = { + .driver = { + .name = "lenovo_wmi_events", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + .id_table = lwmi_events_id_table, + .probe = lwmi_events_probe, + .notify = lwmi_events_notify, + .no_singleton = true, +}; + +module_wmi_driver(lwmi_events_driver); + +MODULE_DEVICE_TABLE(wmi, lwmi_events_id_table); +MODULE_AUTHOR("Derek J. Clark <derekjohn.clark@gmail.com>"); +MODULE_DESCRIPTION("Lenovo WMI Events Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/lenovo/wmi-events.h b/drivers/platform/x86/lenovo/wmi-events.h new file mode 100644 index 000000000000..cd34e886912c --- /dev/null +++ b/drivers/platform/x86/lenovo/wmi-events.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* Copyright (C) 2025 Derek J. Clark <derekjohn.clark@gmail.com> */ + +#ifndef _LENOVO_WMI_EVENTS_H_ +#define _LENOVO_WMI_EVENTS_H_ + +struct device; +struct notifier_block; + +enum lwmi_events_type { + LWMI_EVENT_THERMAL_MODE = 1, +}; + +int lwmi_events_register_notifier(struct notifier_block *nb); +int lwmi_events_unregister_notifier(struct notifier_block *nb); +int devm_lwmi_events_register_notifier(struct device *dev, + struct notifier_block *nb); + +#endif /* !_LENOVO_WMI_EVENTS_H_ */ diff --git a/drivers/platform/x86/lenovo/wmi-gamezone.c b/drivers/platform/x86/lenovo/wmi-gamezone.c new file mode 100644 index 000000000000..0eb7fe8222f4 --- /dev/null +++ b/drivers/platform/x86/lenovo/wmi-gamezone.c @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Lenovo GameZone WMI interface driver. + * + * The GameZone WMI interface provides platform profile and fan curve settings + * for devices that fall under the "Gaming Series" of Lenovo Legion devices. + * + * Copyright (C) 2025 Derek J. Clark <derekjohn.clark@gmail.com> + */ + +#include <linux/acpi.h> +#include <linux/dmi.h> +#include <linux/export.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/platform_profile.h> +#include <linux/spinlock.h> +#include <linux/spinlock_types.h> +#include <linux/types.h> +#include <linux/wmi.h> + +#include "wmi-events.h" +#include "wmi-gamezone.h" +#include "wmi-helpers.h" +#include "wmi-other.h" + +#define LENOVO_GAMEZONE_GUID "887B54E3-DDDC-4B2C-8B88-68A26A8835D0" + +#define LWMI_GZ_METHOD_ID_SMARTFAN_SUP 43 +#define LWMI_GZ_METHOD_ID_SMARTFAN_SET 44 +#define LWMI_GZ_METHOD_ID_SMARTFAN_GET 45 + +static BLOCKING_NOTIFIER_HEAD(gz_chain_head); + +struct lwmi_gz_priv { + enum thermal_mode current_mode; + struct notifier_block event_nb; + struct notifier_block mode_nb; + spinlock_t gz_mode_lock; /* current_mode lock */ + struct wmi_device *wdev; + int extreme_supported; + struct device *ppdev; +}; + +struct quirk_entry { + bool extreme_supported; +}; + +static struct quirk_entry quirk_no_extreme_bug = { + .extreme_supported = false, +}; + +/** + * lwmi_gz_mode_call() - Call method for lenovo-wmi-other driver notifier. + * + * @nb: The notifier_block registered to lenovo-wmi-other driver. + * @cmd: The event type. + * @data: Thermal mode enum pointer pointer for returning the thermal mode. + * + * For LWMI_GZ_GET_THERMAL_MODE, retrieve the current thermal mode. + * + * Return: Notifier_block status. + */ +static int lwmi_gz_mode_call(struct notifier_block *nb, unsigned long cmd, + void *data) +{ + enum thermal_mode **mode = data; + struct lwmi_gz_priv *priv; + + priv = container_of(nb, struct lwmi_gz_priv, mode_nb); + + switch (cmd) { + case LWMI_GZ_GET_THERMAL_MODE: + scoped_guard(spinlock, &priv->gz_mode_lock) { + **mode = priv->current_mode; + } + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +/** + * lwmi_gz_event_call() - Call method for lenovo-wmi-events driver notifier. + * block call chain. + * @nb: The notifier_block registered to lenovo-wmi-events driver. + * @cmd: The event type. + * @data: The data to be updated by the event. + * + * For LWMI_EVENT_THERMAL_MODE, set current_mode and notify platform_profile + * of a change. + * + * Return: notifier_block status. + */ +static int lwmi_gz_event_call(struct notifier_block *nb, unsigned long cmd, + void *data) +{ + enum thermal_mode *mode = data; + struct lwmi_gz_priv *priv; + + priv = container_of(nb, struct lwmi_gz_priv, event_nb); + + switch (cmd) { + case LWMI_EVENT_THERMAL_MODE: + scoped_guard(spinlock, &priv->gz_mode_lock) { + priv->current_mode = *mode; + } + platform_profile_notify(priv->ppdev); + return NOTIFY_STOP; + default: + return NOTIFY_DONE; + } +} + +/** + * lwmi_gz_thermal_mode_supported() - Get the version of the WMI + * interface to determine the support level. + * @wdev: The Gamezone WMI device. + * @supported: Pointer to return the support level with. + * + * Return: 0 on success, or an error code. + */ +static int lwmi_gz_thermal_mode_supported(struct wmi_device *wdev, + int *supported) +{ + return lwmi_dev_evaluate_int(wdev, 0x0, LWMI_GZ_METHOD_ID_SMARTFAN_SUP, + NULL, 0, supported); +} + +/** + * lwmi_gz_thermal_mode_get() - Get the current thermal mode. + * @wdev: The Gamezone interface WMI device. + * @mode: Pointer to return the thermal mode with. + * + * Return: 0 on success, or an error code. + */ +static int lwmi_gz_thermal_mode_get(struct wmi_device *wdev, + enum thermal_mode *mode) +{ + return lwmi_dev_evaluate_int(wdev, 0x0, LWMI_GZ_METHOD_ID_SMARTFAN_GET, + NULL, 0, mode); +} + +/** + * lwmi_gz_profile_get() - Get the current platform profile. + * @dev: the Gamezone interface parent device. + * @profile: Pointer to provide the current platform profile with. + * + * Call lwmi_gz_thermal_mode_get and convert the thermal mode into a platform + * profile based on the support level of the interface. + * + * Return: 0 on success, or an error code. + */ +static int lwmi_gz_profile_get(struct device *dev, + enum platform_profile_option *profile) +{ + struct lwmi_gz_priv *priv = dev_get_drvdata(dev); + enum thermal_mode mode; + int ret; + + ret = lwmi_gz_thermal_mode_get(priv->wdev, &mode); + if (ret) + return ret; + + switch (mode) { + case LWMI_GZ_THERMAL_MODE_QUIET: + *profile = PLATFORM_PROFILE_LOW_POWER; + break; + case LWMI_GZ_THERMAL_MODE_BALANCED: + *profile = PLATFORM_PROFILE_BALANCED; + break; + case LWMI_GZ_THERMAL_MODE_PERFORMANCE: + if (priv->extreme_supported) { + *profile = PLATFORM_PROFILE_BALANCED_PERFORMANCE; + break; + } + *profile = PLATFORM_PROFILE_PERFORMANCE; + break; + case LWMI_GZ_THERMAL_MODE_EXTREME: + *profile = PLATFORM_PROFILE_PERFORMANCE; + break; + case LWMI_GZ_THERMAL_MODE_CUSTOM: + *profile = PLATFORM_PROFILE_CUSTOM; + break; + default: + return -EINVAL; + } + + guard(spinlock)(&priv->gz_mode_lock); + priv->current_mode = mode; + + return 0; +} + +/** + * lwmi_gz_profile_set() - Set the current platform profile. + * @dev: The Gamezone interface parent device. + * @profile: Pointer to the desired platform profile. + * + * Convert the given platform profile into a thermal mode based on the support + * level of the interface, then call the WMI method to set the thermal mode. + * + * Return: 0 on success, or an error code. + */ +static int lwmi_gz_profile_set(struct device *dev, + enum platform_profile_option profile) +{ + struct lwmi_gz_priv *priv = dev_get_drvdata(dev); + struct wmi_method_args_32 args; + enum thermal_mode mode; + int ret; + + switch (profile) { + case PLATFORM_PROFILE_LOW_POWER: + mode = LWMI_GZ_THERMAL_MODE_QUIET; + break; + case PLATFORM_PROFILE_BALANCED: + mode = LWMI_GZ_THERMAL_MODE_BALANCED; + break; + case PLATFORM_PROFILE_BALANCED_PERFORMANCE: + mode = LWMI_GZ_THERMAL_MODE_PERFORMANCE; + break; + case PLATFORM_PROFILE_PERFORMANCE: + if (priv->extreme_supported) { + mode = LWMI_GZ_THERMAL_MODE_EXTREME; + break; + } + mode = LWMI_GZ_THERMAL_MODE_PERFORMANCE; + break; + case PLATFORM_PROFILE_CUSTOM: + mode = LWMI_GZ_THERMAL_MODE_CUSTOM; + break; + default: + return -EOPNOTSUPP; + } + + args.arg0 = mode; + + ret = lwmi_dev_evaluate_int(priv->wdev, 0x0, + LWMI_GZ_METHOD_ID_SMARTFAN_SET, + (u8 *)&args, sizeof(args), NULL); + if (ret) + return ret; + + guard(spinlock)(&priv->gz_mode_lock); + priv->current_mode = mode; + + return 0; +} + +static const struct dmi_system_id fwbug_list[] = { + { + .ident = "Legion Go 8APU1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8APU1"), + }, + .driver_data = &quirk_no_extreme_bug, + }, + { + .ident = "Legion Go S 8APU1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Legion Go S 8APU1"), + }, + .driver_data = &quirk_no_extreme_bug, + }, + { + .ident = "Legion Go S 8ARP1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Legion Go S 8ARP1"), + }, + .driver_data = &quirk_no_extreme_bug, + }, + {}, + +}; + +/** + * lwmi_gz_extreme_supported() - Evaluate if a device supports extreme thermal mode. + * @profile_support_ver: Version of the WMI interface. + * + * Determine if the extreme thermal mode is supported by the hardware. + * Anything version 5 or lower does not. For devices with a version 6 or + * greater do a DMI check, as some devices report a version that supports + * extreme mode but have an incomplete entry in the BIOS. To ensure this + * cannot be set, quirk them to prevent assignment. + * + * Return: bool. + */ +static bool lwmi_gz_extreme_supported(int profile_support_ver) +{ + const struct dmi_system_id *dmi_id; + struct quirk_entry *quirks; + + if (profile_support_ver < 6) + return false; + + dmi_id = dmi_first_match(fwbug_list); + if (!dmi_id) + return true; + + quirks = dmi_id->driver_data; + + return quirks->extreme_supported; +} + +/** + * lwmi_gz_platform_profile_probe - Enable and set up the platform profile + * device. + * @drvdata: Driver data for the interface. + * @choices: Container for enabled platform profiles. + * + * Determine if thermal mode is supported, and if so to what feature level. + * Then enable all supported platform profiles. + * + * Return: 0 on success, or an error code. + */ +static int lwmi_gz_platform_profile_probe(void *drvdata, unsigned long *choices) +{ + struct lwmi_gz_priv *priv = drvdata; + int profile_support_ver; + int ret; + + ret = lwmi_gz_thermal_mode_supported(priv->wdev, &profile_support_ver); + if (ret) + return ret; + + if (profile_support_ver < 1) + return -ENODEV; + + set_bit(PLATFORM_PROFILE_LOW_POWER, choices); + set_bit(PLATFORM_PROFILE_BALANCED, choices); + set_bit(PLATFORM_PROFILE_PERFORMANCE, choices); + set_bit(PLATFORM_PROFILE_CUSTOM, choices); + + priv->extreme_supported = lwmi_gz_extreme_supported(profile_support_ver); + if (priv->extreme_supported) + set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices); + + return 0; +} + +static const struct platform_profile_ops lwmi_gz_platform_profile_ops = { + .probe = lwmi_gz_platform_profile_probe, + .profile_get = lwmi_gz_profile_get, + .profile_set = lwmi_gz_profile_set, +}; + +static int lwmi_gz_probe(struct wmi_device *wdev, const void *context) +{ + struct lwmi_gz_priv *priv; + int ret; + + priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->wdev = wdev; + dev_set_drvdata(&wdev->dev, priv); + + priv->ppdev = devm_platform_profile_register(&wdev->dev, "lenovo-wmi-gamezone", + priv, &lwmi_gz_platform_profile_ops); + if (IS_ERR(priv->ppdev)) + return -ENODEV; + + spin_lock_init(&priv->gz_mode_lock); + + ret = lwmi_gz_thermal_mode_get(wdev, &priv->current_mode); + if (ret) + return ret; + + priv->event_nb.notifier_call = lwmi_gz_event_call; + ret = devm_lwmi_events_register_notifier(&wdev->dev, &priv->event_nb); + if (ret) + return ret; + + priv->mode_nb.notifier_call = lwmi_gz_mode_call; + return devm_lwmi_om_register_notifier(&wdev->dev, &priv->mode_nb); +} + +static const struct wmi_device_id lwmi_gz_id_table[] = { + { LENOVO_GAMEZONE_GUID, NULL }, + {} +}; + +static struct wmi_driver lwmi_gz_driver = { + .driver = { + .name = "lenovo_wmi_gamezone", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + .id_table = lwmi_gz_id_table, + .probe = lwmi_gz_probe, + .no_singleton = true, +}; + +module_wmi_driver(lwmi_gz_driver); + +MODULE_IMPORT_NS("LENOVO_WMI_EVENTS"); +MODULE_IMPORT_NS("LENOVO_WMI_HELPERS"); +MODULE_IMPORT_NS("LENOVO_WMI_OTHER"); +MODULE_DEVICE_TABLE(wmi, lwmi_gz_id_table); +MODULE_AUTHOR("Derek J. Clark <derekjohn.clark@gmail.com>"); +MODULE_DESCRIPTION("Lenovo GameZone WMI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/lenovo/wmi-gamezone.h b/drivers/platform/x86/lenovo/wmi-gamezone.h new file mode 100644 index 000000000000..6b163a5eeb95 --- /dev/null +++ b/drivers/platform/x86/lenovo/wmi-gamezone.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* Copyright (C) 2025 Derek J. Clark <derekjohn.clark@gmail.com> */ + +#ifndef _LENOVO_WMI_GAMEZONE_H_ +#define _LENOVO_WMI_GAMEZONE_H_ + +enum gamezone_events_type { + LWMI_GZ_GET_THERMAL_MODE = 1, +}; + +enum thermal_mode { + LWMI_GZ_THERMAL_MODE_QUIET = 0x01, + LWMI_GZ_THERMAL_MODE_BALANCED = 0x02, + LWMI_GZ_THERMAL_MODE_PERFORMANCE = 0x03, + LWMI_GZ_THERMAL_MODE_EXTREME = 0xE0, /* Ver 6+ */ + LWMI_GZ_THERMAL_MODE_CUSTOM = 0xFF, +}; + +#endif /* !_LENOVO_WMI_GAMEZONE_H_ */ diff --git a/drivers/platform/x86/lenovo/wmi-helpers.c b/drivers/platform/x86/lenovo/wmi-helpers.c new file mode 100644 index 000000000000..f6fef6296251 --- /dev/null +++ b/drivers/platform/x86/lenovo/wmi-helpers.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Lenovo Legion WMI helpers driver. + * + * The Lenovo Legion WMI interface is broken up into multiple GUID interfaces + * that require cross-references between GUID's for some functionality. The + * "Custom Mode" interface is a legacy interface for managing and displaying + * CPU & GPU power and hwmon settings and readings. The "Other Mode" interface + * is a modern interface that replaces or extends the "Custom Mode" interface + * methods. The "Gamezone" interface adds advanced features such as fan + * profiles and overclocking. The "Lighting" interface adds control of various + * status lights related to different hardware components. Each of these + * drivers uses a common procedure to get data from the WMI interface, + * enumerated here. + * + * Copyright (C) 2025 Derek J. Clark <derekjohn.clark@gmail.com> + */ + +#include <linux/acpi.h> +#include <linux/cleanup.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/module.h> +#include <linux/wmi.h> + +#include "wmi-helpers.h" + +/** + * lwmi_dev_evaluate_int() - Helper function for calling WMI methods that + * return an integer. + * @wdev: Pointer to the WMI device to be called. + * @instance: Instance of the called method. + * @method_id: WMI Method ID for the method to be called. + * @buf: Buffer of all arguments for the given method_id. + * @size: Length of the buffer. + * @retval: Pointer for the return value to be assigned. + * + * Calls wmidev_evaluate_method for Lenovo WMI devices that return an ACPI + * integer. Validates the return value type and assigns the value to the + * retval pointer. + * + * Return: 0 on success, or an error code. + */ +int lwmi_dev_evaluate_int(struct wmi_device *wdev, u8 instance, u32 method_id, + unsigned char *buf, size_t size, u32 *retval) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *ret_obj __free(kfree) = NULL; + struct acpi_buffer input = { size, buf }; + acpi_status status; + + status = wmidev_evaluate_method(wdev, instance, method_id, &input, + &output); + if (ACPI_FAILURE(status)) + return -EIO; + + if (retval) { + ret_obj = output.pointer; + if (!ret_obj) + return -ENODATA; + + if (ret_obj->type != ACPI_TYPE_INTEGER) + return -ENXIO; + + *retval = (u32)ret_obj->integer.value; + } + + return 0; +}; +EXPORT_SYMBOL_NS_GPL(lwmi_dev_evaluate_int, "LENOVO_WMI_HELPERS"); + +MODULE_AUTHOR("Derek J. Clark <derekjohn.clark@gmail.com>"); +MODULE_DESCRIPTION("Lenovo WMI Helpers Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/lenovo/wmi-helpers.h b/drivers/platform/x86/lenovo/wmi-helpers.h new file mode 100644 index 000000000000..20fd21749803 --- /dev/null +++ b/drivers/platform/x86/lenovo/wmi-helpers.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* Copyright (C) 2025 Derek J. Clark <derekjohn.clark@gmail.com> */ + +#ifndef _LENOVO_WMI_HELPERS_H_ +#define _LENOVO_WMI_HELPERS_H_ + +#include <linux/types.h> + +struct wmi_device; + +struct wmi_method_args_32 { + u32 arg0; + u32 arg1; +}; + +int lwmi_dev_evaluate_int(struct wmi_device *wdev, u8 instance, u32 method_id, + unsigned char *buf, size_t size, u32 *retval); + +#endif /* !_LENOVO_WMI_HELPERS_H_ */ diff --git a/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c b/drivers/platform/x86/lenovo/wmi-hotkey-utilities.c index 89153afd7015..7b9bad1978ff 100644 --- a/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c +++ b/drivers/platform/x86/lenovo/wmi-hotkey-utilities.c @@ -122,26 +122,35 @@ static int lenovo_super_hotkey_wmi_led_init(enum mute_led_type led_type, struct return -EIO; union acpi_object *obj __free(kfree) = output.pointer; - if (obj && obj->type == ACPI_TYPE_INTEGER) - led_version = obj->integer.value; - else + if (!obj || obj->type != ACPI_TYPE_INTEGER) return -EIO; - wpriv->cdev[led_type].max_brightness = LED_ON; - wpriv->cdev[led_type].flags = LED_CORE_SUSPENDRESUME; + led_version = obj->integer.value; + + /* + * Output parameters define: 0 means mute LED is not supported, Non-zero means + * mute LED can be supported. + */ + if (led_version == 0) + return 0; + switch (led_type) { case MIC_MUTE: - if (led_version != WMI_LUD_SUPPORT_MICMUTE_LED_VER) - return -EIO; + if (led_version != WMI_LUD_SUPPORT_MICMUTE_LED_VER) { + pr_warn("The MIC_MUTE LED of this device isn't supported.\n"); + return 0; + } wpriv->cdev[led_type].name = "platform::micmute"; wpriv->cdev[led_type].brightness_set_blocking = &lsh_wmi_micmute_led_set; wpriv->cdev[led_type].default_trigger = "audio-micmute"; break; case AUDIO_MUTE: - if (led_version != WMI_LUD_SUPPORT_AUDIOMUTE_LED_VER) - return -EIO; + if (led_version != WMI_LUD_SUPPORT_AUDIOMUTE_LED_VER) { + pr_warn("The AUDIO_MUTE LED of this device isn't supported.\n"); + return 0; + } wpriv->cdev[led_type].name = "platform::mute"; wpriv->cdev[led_type].brightness_set_blocking = &lsh_wmi_audiomute_led_set; @@ -152,6 +161,9 @@ static int lenovo_super_hotkey_wmi_led_init(enum mute_led_type led_type, struct return -EINVAL; } + wpriv->cdev[led_type].max_brightness = LED_ON; + wpriv->cdev[led_type].flags = LED_CORE_SUSPENDRESUME; + err = devm_led_classdev_register(dev, &wpriv->cdev[led_type]); if (err < 0) { dev_err(dev, "Could not register mute LED %d : %d\n", led_type, err); diff --git a/drivers/platform/x86/lenovo/wmi-other.c b/drivers/platform/x86/lenovo/wmi-other.c new file mode 100644 index 000000000000..2a960b278f11 --- /dev/null +++ b/drivers/platform/x86/lenovo/wmi-other.c @@ -0,0 +1,665 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Lenovo Other Mode WMI interface driver. + * + * This driver uses the fw_attributes class to expose the various WMI functions + * provided by the "Other Mode" WMI interface. This enables CPU and GPU power + * limit as well as various other attributes for devices that fall under the + * "Gaming Series" of Lenovo laptop devices. Each attribute exposed by the + * "Other Mode" interface has a corresponding Capability Data struct that + * allows the driver to probe details about the attribute such as if it is + * supported by the hardware, the default_value, max_value, min_value, and step + * increment. + * + * These attributes typically don't fit anywhere else in the sysfs and are set + * in Windows using one of Lenovo's multiple user applications. + * + * Copyright (C) 2025 Derek J. Clark <derekjohn.clark@gmail.com> + */ + +#include <linux/acpi.h> +#include <linux/bitfield.h> +#include <linux/cleanup.h> +#include <linux/component.h> +#include <linux/container_of.h> +#include <linux/device.h> +#include <linux/export.h> +#include <linux/gfp_types.h> +#include <linux/idr.h> +#include <linux/kdev_t.h> +#include <linux/kobject.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/platform_profile.h> +#include <linux/types.h> +#include <linux/wmi.h> + +#include "wmi-capdata01.h" +#include "wmi-events.h" +#include "wmi-gamezone.h" +#include "wmi-helpers.h" +#include "wmi-other.h" +#include "../firmware_attributes_class.h" + +#define LENOVO_OTHER_MODE_GUID "DC2A8805-3A8C-41BA-A6F7-092E0089CD3B" + +#define LWMI_DEVICE_ID_CPU 0x01 + +#define LWMI_FEATURE_ID_CPU_SPPT 0x01 +#define LWMI_FEATURE_ID_CPU_SPL 0x02 +#define LWMI_FEATURE_ID_CPU_FPPT 0x03 + +#define LWMI_TYPE_ID_NONE 0x00 + +#define LWMI_FEATURE_VALUE_GET 17 +#define LWMI_FEATURE_VALUE_SET 18 + +#define LWMI_ATTR_DEV_ID_MASK GENMASK(31, 24) +#define LWMI_ATTR_FEAT_ID_MASK GENMASK(23, 16) +#define LWMI_ATTR_MODE_ID_MASK GENMASK(15, 8) +#define LWMI_ATTR_TYPE_ID_MASK GENMASK(7, 0) + +#define LWMI_OM_FW_ATTR_BASE_PATH "lenovo-wmi-other" + +static BLOCKING_NOTIFIER_HEAD(om_chain_head); +static DEFINE_IDA(lwmi_om_ida); + +enum attribute_property { + DEFAULT_VAL, + MAX_VAL, + MIN_VAL, + STEP_VAL, + SUPPORTED, +}; + +struct lwmi_om_priv { + struct component_master_ops *ops; + struct cd01_list *cd01_list; /* only valid after capdata01 bind */ + struct device *fw_attr_dev; + struct kset *fw_attr_kset; + struct notifier_block nb; + struct wmi_device *wdev; + int ida_id; +}; + +struct tunable_attr_01 { + struct capdata01 *capdata; + struct device *dev; + u32 feature_id; + u32 device_id; + u32 type_id; +}; + +static struct tunable_attr_01 ppt_pl1_spl = { + .device_id = LWMI_DEVICE_ID_CPU, + .feature_id = LWMI_FEATURE_ID_CPU_SPL, + .type_id = LWMI_TYPE_ID_NONE, +}; + +static struct tunable_attr_01 ppt_pl2_sppt = { + .device_id = LWMI_DEVICE_ID_CPU, + .feature_id = LWMI_FEATURE_ID_CPU_SPPT, + .type_id = LWMI_TYPE_ID_NONE, +}; + +static struct tunable_attr_01 ppt_pl3_fppt = { + .device_id = LWMI_DEVICE_ID_CPU, + .feature_id = LWMI_FEATURE_ID_CPU_FPPT, + .type_id = LWMI_TYPE_ID_NONE, +}; + +struct capdata01_attr_group { + const struct attribute_group *attr_group; + struct tunable_attr_01 *tunable_attr; +}; + +/** + * lwmi_om_register_notifier() - Add a notifier to the blocking notifier chain + * @nb: The notifier_block struct to register + * + * Call blocking_notifier_chain_register to register the notifier block to the + * lenovo-wmi-other driver notifier chain. + * + * Return: 0 on success, %-EEXIST on error. + */ +int lwmi_om_register_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&om_chain_head, nb); +} +EXPORT_SYMBOL_NS_GPL(lwmi_om_register_notifier, "LENOVO_WMI_OTHER"); + +/** + * lwmi_om_unregister_notifier() - Remove a notifier from the blocking notifier + * chain. + * @nb: The notifier_block struct to register + * + * Call blocking_notifier_chain_unregister to unregister the notifier block from the + * lenovo-wmi-other driver notifier chain. + * + * Return: 0 on success, %-ENOENT on error. + */ +int lwmi_om_unregister_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&om_chain_head, nb); +} +EXPORT_SYMBOL_NS_GPL(lwmi_om_unregister_notifier, "LENOVO_WMI_OTHER"); + +/** + * devm_lwmi_om_unregister_notifier() - Remove a notifier from the blocking + * notifier chain. + * @data: Void pointer to the notifier_block struct to register. + * + * Call lwmi_om_unregister_notifier to unregister the notifier block from the + * lenovo-wmi-other driver notifier chain. + * + * Return: 0 on success, %-ENOENT on error. + */ +static void devm_lwmi_om_unregister_notifier(void *data) +{ + struct notifier_block *nb = data; + + lwmi_om_unregister_notifier(nb); +} + +/** + * devm_lwmi_om_register_notifier() - Add a notifier to the blocking notifier + * chain. + * @dev: The parent device of the notifier_block struct. + * @nb: The notifier_block struct to register + * + * Call lwmi_om_register_notifier to register the notifier block to the + * lenovo-wmi-other driver notifier chain. Then add devm_lwmi_om_unregister_notifier + * as a device managed action to automatically unregister the notifier block + * upon parent device removal. + * + * Return: 0 on success, or an error code. + */ +int devm_lwmi_om_register_notifier(struct device *dev, + struct notifier_block *nb) +{ + int ret; + + ret = lwmi_om_register_notifier(nb); + if (ret < 0) + return ret; + + return devm_add_action_or_reset(dev, devm_lwmi_om_unregister_notifier, + nb); +} +EXPORT_SYMBOL_NS_GPL(devm_lwmi_om_register_notifier, "LENOVO_WMI_OTHER"); + +/** + * lwmi_om_notifier_call() - Call functions for the notifier call chain. + * @mode: Pointer to a thermal mode enum to retrieve the data from. + * + * Call blocking_notifier_call_chain to retrieve the thermal mode from the + * lenovo-wmi-gamezone driver. + * + * Return: 0 on success, or an error code. + */ +static int lwmi_om_notifier_call(enum thermal_mode *mode) +{ + int ret; + + ret = blocking_notifier_call_chain(&om_chain_head, + LWMI_GZ_GET_THERMAL_MODE, &mode); + if ((ret & ~NOTIFY_STOP_MASK) != NOTIFY_OK) + return -EINVAL; + + return 0; +} + +/* Attribute Methods */ + +/** + * int_type_show() - Emit the data type for an integer attribute + * @kobj: Pointer to the driver object. + * @kattr: Pointer to the attribute calling this function. + * @buf: The buffer to write to. + * + * Return: Number of characters written to buf. + */ +static ssize_t int_type_show(struct kobject *kobj, struct kobj_attribute *kattr, + char *buf) +{ + return sysfs_emit(buf, "integer\n"); +} + +/** + * attr_capdata01_show() - Get the value of the specified attribute property + * + * @kobj: Pointer to the driver object. + * @kattr: Pointer to the attribute calling this function. + * @buf: The buffer to write to. + * @tunable_attr: The attribute to be read. + * @prop: The property of this attribute to be read. + * + * Retrieves the given property from the capability data 01 struct for the + * specified attribute's "custom" thermal mode. This function is intended + * to be generic so it can be called from any integer attributes "_show" + * function. + * + * If the WMI is success the sysfs attribute is notified. + * + * Return: Either number of characters written to buf, or an error code. + */ +static ssize_t attr_capdata01_show(struct kobject *kobj, + struct kobj_attribute *kattr, char *buf, + struct tunable_attr_01 *tunable_attr, + enum attribute_property prop) +{ + struct lwmi_om_priv *priv = dev_get_drvdata(tunable_attr->dev); + struct capdata01 capdata; + u32 attribute_id; + int value, ret; + + attribute_id = + FIELD_PREP(LWMI_ATTR_DEV_ID_MASK, tunable_attr->device_id) | + FIELD_PREP(LWMI_ATTR_FEAT_ID_MASK, tunable_attr->feature_id) | + FIELD_PREP(LWMI_ATTR_MODE_ID_MASK, + LWMI_GZ_THERMAL_MODE_CUSTOM) | + FIELD_PREP(LWMI_ATTR_TYPE_ID_MASK, tunable_attr->type_id); + + ret = lwmi_cd01_get_data(priv->cd01_list, attribute_id, &capdata); + if (ret) + return ret; + + switch (prop) { + case DEFAULT_VAL: + value = capdata.default_value; + break; + case MAX_VAL: + value = capdata.max_value; + break; + case MIN_VAL: + value = capdata.min_value; + break; + case STEP_VAL: + value = capdata.step; + break; + default: + return -EINVAL; + } + + return sysfs_emit(buf, "%d\n", value); +} + +/** + * attr_current_value_store() - Set the current value of the given attribute + * @kobj: Pointer to the driver object. + * @kattr: Pointer to the attribute calling this function. + * @buf: The buffer to read from, this is parsed to `int` type. + * @count: Required by sysfs attribute macros, pass in from the callee attr. + * @tunable_attr: The attribute to be stored. + * + * Sets the value of the given attribute when operating under the "custom" + * smartfan profile. The current smartfan profile is retrieved from the + * lenovo-wmi-gamezone driver and error is returned if the result is not + * "custom". This function is intended to be generic so it can be called from + * any integer attribute's "_store" function. The integer to be sent to the WMI + * method is range checked and an error code is returned if out of range. + * + * If the value is valid and WMI is success, then the sysfs attribute is + * notified. + * + * Return: Either count, or an error code. + */ +static ssize_t attr_current_value_store(struct kobject *kobj, + struct kobj_attribute *kattr, + const char *buf, size_t count, + struct tunable_attr_01 *tunable_attr) +{ + struct lwmi_om_priv *priv = dev_get_drvdata(tunable_attr->dev); + struct wmi_method_args_32 args; + struct capdata01 capdata; + enum thermal_mode mode; + u32 attribute_id; + u32 value; + int ret; + + ret = lwmi_om_notifier_call(&mode); + if (ret) + return ret; + + if (mode != LWMI_GZ_THERMAL_MODE_CUSTOM) + return -EBUSY; + + attribute_id = + FIELD_PREP(LWMI_ATTR_DEV_ID_MASK, tunable_attr->device_id) | + FIELD_PREP(LWMI_ATTR_FEAT_ID_MASK, tunable_attr->feature_id) | + FIELD_PREP(LWMI_ATTR_MODE_ID_MASK, mode) | + FIELD_PREP(LWMI_ATTR_TYPE_ID_MASK, tunable_attr->type_id); + + ret = lwmi_cd01_get_data(priv->cd01_list, attribute_id, &capdata); + if (ret) + return ret; + + ret = kstrtouint(buf, 10, &value); + if (ret) + return ret; + + if (value < capdata.min_value || value > capdata.max_value) + return -EINVAL; + + args.arg0 = attribute_id; + args.arg1 = value; + + ret = lwmi_dev_evaluate_int(priv->wdev, 0x0, LWMI_FEATURE_VALUE_SET, + (unsigned char *)&args, sizeof(args), NULL); + if (ret) + return ret; + + return count; +}; + +/** + * attr_current_value_show() - Get the current value of the given attribute + * @kobj: Pointer to the driver object. + * @kattr: Pointer to the attribute calling this function. + * @buf: The buffer to write to. + * @tunable_attr: The attribute to be read. + * + * Retrieves the value of the given attribute for the current smartfan profile. + * The current smartfan profile is retrieved from the lenovo-wmi-gamezone driver. + * This function is intended to be generic so it can be called from any integer + * attribute's "_show" function. + * + * If the WMI is success the sysfs attribute is notified. + * + * Return: Either number of characters written to buf, or an error code. + */ +static ssize_t attr_current_value_show(struct kobject *kobj, + struct kobj_attribute *kattr, char *buf, + struct tunable_attr_01 *tunable_attr) +{ + struct lwmi_om_priv *priv = dev_get_drvdata(tunable_attr->dev); + struct wmi_method_args_32 args; + enum thermal_mode mode; + u32 attribute_id; + int retval; + int ret; + + ret = lwmi_om_notifier_call(&mode); + if (ret) + return ret; + + attribute_id = + FIELD_PREP(LWMI_ATTR_DEV_ID_MASK, tunable_attr->device_id) | + FIELD_PREP(LWMI_ATTR_FEAT_ID_MASK, tunable_attr->feature_id) | + FIELD_PREP(LWMI_ATTR_MODE_ID_MASK, mode) | + FIELD_PREP(LWMI_ATTR_TYPE_ID_MASK, tunable_attr->type_id); + + args.arg0 = attribute_id; + + ret = lwmi_dev_evaluate_int(priv->wdev, 0x0, LWMI_FEATURE_VALUE_GET, + (unsigned char *)&args, sizeof(args), + &retval); + if (ret) + return ret; + + return sysfs_emit(buf, "%d\n", retval); +} + +/* Lenovo WMI Other Mode Attribute macros */ +#define __LWMI_ATTR_RO(_func, _name) \ + { \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .show = _func##_##_name##_show, \ + } + +#define __LWMI_ATTR_RO_AS(_name, _show) \ + { \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .show = _show, \ + } + +#define __LWMI_ATTR_RW(_func, _name) \ + __ATTR(_name, 0644, _func##_##_name##_show, _func##_##_name##_store) + +/* Shows a formatted static variable */ +#define __LWMI_ATTR_SHOW_FMT(_prop, _attrname, _fmt, _val) \ + static ssize_t _attrname##_##_prop##_show( \ + struct kobject *kobj, struct kobj_attribute *kattr, char *buf) \ + { \ + return sysfs_emit(buf, _fmt, _val); \ + } \ + static struct kobj_attribute attr_##_attrname##_##_prop = \ + __LWMI_ATTR_RO(_attrname, _prop) + +/* Attribute current value read/write */ +#define __LWMI_TUNABLE_CURRENT_VALUE_CAP01(_attrname) \ + static ssize_t _attrname##_current_value_store( \ + struct kobject *kobj, struct kobj_attribute *kattr, \ + const char *buf, size_t count) \ + { \ + return attr_current_value_store(kobj, kattr, buf, count, \ + &_attrname); \ + } \ + static ssize_t _attrname##_current_value_show( \ + struct kobject *kobj, struct kobj_attribute *kattr, char *buf) \ + { \ + return attr_current_value_show(kobj, kattr, buf, &_attrname); \ + } \ + static struct kobj_attribute attr_##_attrname##_current_value = \ + __LWMI_ATTR_RW(_attrname, current_value) + +/* Attribute property read only */ +#define __LWMI_TUNABLE_RO_CAP01(_prop, _attrname, _prop_type) \ + static ssize_t _attrname##_##_prop##_show( \ + struct kobject *kobj, struct kobj_attribute *kattr, char *buf) \ + { \ + return attr_capdata01_show(kobj, kattr, buf, &_attrname, \ + _prop_type); \ + } \ + static struct kobj_attribute attr_##_attrname##_##_prop = \ + __LWMI_ATTR_RO(_attrname, _prop) + +#define LWMI_ATTR_GROUP_TUNABLE_CAP01(_attrname, _fsname, _dispname) \ + __LWMI_TUNABLE_CURRENT_VALUE_CAP01(_attrname); \ + __LWMI_TUNABLE_RO_CAP01(default_value, _attrname, DEFAULT_VAL); \ + __LWMI_ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ + __LWMI_TUNABLE_RO_CAP01(max_value, _attrname, MAX_VAL); \ + __LWMI_TUNABLE_RO_CAP01(min_value, _attrname, MIN_VAL); \ + __LWMI_TUNABLE_RO_CAP01(scalar_increment, _attrname, STEP_VAL); \ + static struct kobj_attribute attr_##_attrname##_type = \ + __LWMI_ATTR_RO_AS(type, int_type_show); \ + static struct attribute *_attrname##_attrs[] = { \ + &attr_##_attrname##_current_value.attr, \ + &attr_##_attrname##_default_value.attr, \ + &attr_##_attrname##_display_name.attr, \ + &attr_##_attrname##_max_value.attr, \ + &attr_##_attrname##_min_value.attr, \ + &attr_##_attrname##_scalar_increment.attr, \ + &attr_##_attrname##_type.attr, \ + NULL, \ + }; \ + static const struct attribute_group _attrname##_attr_group = { \ + .name = _fsname, .attrs = _attrname##_attrs \ + } + +LWMI_ATTR_GROUP_TUNABLE_CAP01(ppt_pl1_spl, "ppt_pl1_spl", + "Set the CPU sustained power limit"); +LWMI_ATTR_GROUP_TUNABLE_CAP01(ppt_pl2_sppt, "ppt_pl2_sppt", + "Set the CPU slow package power tracking limit"); +LWMI_ATTR_GROUP_TUNABLE_CAP01(ppt_pl3_fppt, "ppt_pl3_fppt", + "Set the CPU fast package power tracking limit"); + +static struct capdata01_attr_group cd01_attr_groups[] = { + { &ppt_pl1_spl_attr_group, &ppt_pl1_spl }, + { &ppt_pl2_sppt_attr_group, &ppt_pl2_sppt }, + { &ppt_pl3_fppt_attr_group, &ppt_pl3_fppt }, + {}, +}; + +/** + * lwmi_om_fw_attr_add() - Register all firmware_attributes_class members + * @priv: The Other Mode driver data. + * + * Return: Either 0, or an error code. + */ +static int lwmi_om_fw_attr_add(struct lwmi_om_priv *priv) +{ + unsigned int i; + int err; + + priv->ida_id = ida_alloc(&lwmi_om_ida, GFP_KERNEL); + if (priv->ida_id < 0) + return priv->ida_id; + + priv->fw_attr_dev = device_create(&firmware_attributes_class, NULL, + MKDEV(0, 0), NULL, "%s-%u", + LWMI_OM_FW_ATTR_BASE_PATH, + priv->ida_id); + if (IS_ERR(priv->fw_attr_dev)) { + err = PTR_ERR(priv->fw_attr_dev); + goto err_free_ida; + } + + priv->fw_attr_kset = kset_create_and_add("attributes", NULL, + &priv->fw_attr_dev->kobj); + if (!priv->fw_attr_kset) { + err = -ENOMEM; + goto err_destroy_classdev; + } + + for (i = 0; i < ARRAY_SIZE(cd01_attr_groups) - 1; i++) { + err = sysfs_create_group(&priv->fw_attr_kset->kobj, + cd01_attr_groups[i].attr_group); + if (err) + goto err_remove_groups; + + cd01_attr_groups[i].tunable_attr->dev = &priv->wdev->dev; + } + return 0; + +err_remove_groups: + while (i--) + sysfs_remove_group(&priv->fw_attr_kset->kobj, + cd01_attr_groups[i].attr_group); + + kset_unregister(priv->fw_attr_kset); + +err_destroy_classdev: + device_unregister(priv->fw_attr_dev); + +err_free_ida: + ida_free(&lwmi_om_ida, priv->ida_id); + return err; +} + +/** + * lwmi_om_fw_attr_remove() - Unregister all capability data attribute groups + * @priv: the lenovo-wmi-other driver data. + */ +static void lwmi_om_fw_attr_remove(struct lwmi_om_priv *priv) +{ + for (unsigned int i = 0; i < ARRAY_SIZE(cd01_attr_groups) - 1; i++) + sysfs_remove_group(&priv->fw_attr_kset->kobj, + cd01_attr_groups[i].attr_group); + + kset_unregister(priv->fw_attr_kset); + device_unregister(priv->fw_attr_dev); +} + +/** + * lwmi_om_master_bind() - Bind all components of the other mode driver + * @dev: The lenovo-wmi-other driver basic device. + * + * Call component_bind_all to bind the lenovo-wmi-capdata01 driver to the + * lenovo-wmi-other master driver. On success, assign the capability data 01 + * list pointer to the driver data struct for later access. This pointer + * is only valid while the capdata01 interface exists. Finally, register all + * firmware attribute groups. + * + * Return: 0 on success, or an error code. + */ +static int lwmi_om_master_bind(struct device *dev) +{ + struct lwmi_om_priv *priv = dev_get_drvdata(dev); + struct cd01_list *tmp_list; + int ret; + + ret = component_bind_all(dev, &tmp_list); + if (ret) + return ret; + + priv->cd01_list = tmp_list; + if (!priv->cd01_list) + return -ENODEV; + + return lwmi_om_fw_attr_add(priv); +} + +/** + * lwmi_om_master_unbind() - Unbind all components of the other mode driver + * @dev: The lenovo-wmi-other driver basic device + * + * Unregister all capability data attribute groups. Then call + * component_unbind_all to unbind the lenovo-wmi-capdata01 driver from the + * lenovo-wmi-other master driver. Finally, free the IDA for this device. + */ +static void lwmi_om_master_unbind(struct device *dev) +{ + struct lwmi_om_priv *priv = dev_get_drvdata(dev); + + lwmi_om_fw_attr_remove(priv); + component_unbind_all(dev, NULL); +} + +static const struct component_master_ops lwmi_om_master_ops = { + .bind = lwmi_om_master_bind, + .unbind = lwmi_om_master_unbind, +}; + +static int lwmi_other_probe(struct wmi_device *wdev, const void *context) +{ + struct component_match *master_match = NULL; + struct lwmi_om_priv *priv; + + priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->wdev = wdev; + dev_set_drvdata(&wdev->dev, priv); + + component_match_add(&wdev->dev, &master_match, lwmi_cd01_match, NULL); + if (IS_ERR(master_match)) + return PTR_ERR(master_match); + + return component_master_add_with_match(&wdev->dev, &lwmi_om_master_ops, + master_match); +} + +static void lwmi_other_remove(struct wmi_device *wdev) +{ + struct lwmi_om_priv *priv = dev_get_drvdata(&wdev->dev); + + component_master_del(&wdev->dev, &lwmi_om_master_ops); + ida_free(&lwmi_om_ida, priv->ida_id); +} + +static const struct wmi_device_id lwmi_other_id_table[] = { + { LENOVO_OTHER_MODE_GUID, NULL }, + {} +}; + +static struct wmi_driver lwmi_other_driver = { + .driver = { + .name = "lenovo_wmi_other", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + .id_table = lwmi_other_id_table, + .probe = lwmi_other_probe, + .remove = lwmi_other_remove, + .no_singleton = true, +}; + +module_wmi_driver(lwmi_other_driver); + +MODULE_IMPORT_NS("LENOVO_WMI_CD01"); +MODULE_IMPORT_NS("LENOVO_WMI_HELPERS"); +MODULE_DEVICE_TABLE(wmi, lwmi_other_id_table); +MODULE_AUTHOR("Derek J. Clark <derekjohn.clark@gmail.com>"); +MODULE_DESCRIPTION("Lenovo Other Mode WMI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/lenovo/wmi-other.h b/drivers/platform/x86/lenovo/wmi-other.h new file mode 100644 index 000000000000..8ebf5602bb99 --- /dev/null +++ b/drivers/platform/x86/lenovo/wmi-other.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* Copyright (C) 2025 Derek J. Clark <derekjohn.clark@gmail.com> */ + +#ifndef _LENOVO_WMI_OTHER_H_ +#define _LENOVO_WMI_OTHER_H_ + +struct device; +struct notifier_block; + +int lwmi_om_register_notifier(struct notifier_block *nb); +int lwmi_om_unregister_notifier(struct notifier_block *nb); +int devm_lwmi_om_register_notifier(struct device *dev, + struct notifier_block *nb); + +#endif /* !_LENOVO_WMI_OTHER_H_ */ diff --git a/drivers/platform/x86/lenovo-ymc.c b/drivers/platform/x86/lenovo/ymc.c index 470d53e3c9d2..470d53e3c9d2 100644 --- a/drivers/platform/x86/lenovo-ymc.c +++ b/drivers/platform/x86/lenovo/ymc.c diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c index 25933cd018d1..1b33c977f6d7 100644 --- a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c +++ b/drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c @@ -21,7 +21,7 @@ #include <linux/time.h> #include <linux/types.h> #include <linux/workqueue.h> -#include "serdev_helpers.h" +#include "../serdev_helpers.h" #define YT2_1380_FC_PDEV_NAME "lenovo-yoga-tab2-pro-1380-fastcharger" #define YT2_1380_FC_SERDEV_CTRL "serial0" @@ -240,30 +240,25 @@ static int yt2_1380_fc_pdev_probe(struct platform_device *pdev) int ret; /* Register pinctrl mappings for setting the UART3 pins mode */ - ret = pinctrl_register_mappings(yt2_1380_fc_pinctrl_map, - ARRAY_SIZE(yt2_1380_fc_pinctrl_map)); + ret = devm_pinctrl_register_mappings(&pdev->dev, yt2_1380_fc_pinctrl_map, + ARRAY_SIZE(yt2_1380_fc_pinctrl_map)); if (ret) return ret; /* And create the serdev to talk to the charger over the UART3 pins */ ctrl_dev = get_serdev_controller("PNP0501", "1", 0, YT2_1380_FC_SERDEV_CTRL); - if (IS_ERR(ctrl_dev)) { - ret = PTR_ERR(ctrl_dev); - goto out_pinctrl_unregister_mappings; - } + if (IS_ERR(ctrl_dev)) + return PTR_ERR(ctrl_dev); serdev = serdev_device_alloc(to_serdev_controller(ctrl_dev)); put_device(ctrl_dev); - if (!serdev) { - ret = -ENOMEM; - goto out_pinctrl_unregister_mappings; - } + if (!serdev) + return -ENOMEM; ret = serdev_device_add(serdev); if (ret) { - dev_err_probe(&pdev->dev, ret, "adding serdev\n"); serdev_device_put(serdev); - goto out_pinctrl_unregister_mappings; + return dev_err_probe(&pdev->dev, ret, "adding serdev\n"); } /* @@ -273,20 +268,15 @@ static int yt2_1380_fc_pdev_probe(struct platform_device *pdev) ret = device_driver_attach(&yt2_1380_fc_serdev_driver.driver, &serdev->dev); if (ret) { /* device_driver_attach() maps EPROBE_DEFER to EAGAIN, map it back */ - ret = (ret == -EAGAIN) ? -EPROBE_DEFER : ret; - dev_err_probe(&pdev->dev, ret, "attaching serdev driver\n"); - goto out_serdev_device_remove; + serdev_device_remove(serdev); + return dev_err_probe(&pdev->dev, + (ret == -EAGAIN) ? -EPROBE_DEFER : ret, + "attaching serdev driver\n"); } /* So that yt2_1380_fc_pdev_remove() can remove the serdev */ platform_set_drvdata(pdev, serdev); return 0; - -out_serdev_device_remove: - serdev_device_remove(serdev); -out_pinctrl_unregister_mappings: - pinctrl_unregister_mappings(yt2_1380_fc_pinctrl_map); - return ret; } static void yt2_1380_fc_pdev_remove(struct platform_device *pdev) @@ -294,7 +284,6 @@ static void yt2_1380_fc_pdev_remove(struct platform_device *pdev) struct serdev_device *serdev = platform_get_drvdata(pdev); serdev_device_remove(serdev); - pinctrl_unregister_mappings(yt2_1380_fc_pinctrl_map); } static struct platform_driver yt2_1380_fc_pdev_driver = { diff --git a/drivers/platform/x86/lenovo-yogabook.c b/drivers/platform/x86/lenovo/yogabook.c index 31b298dc5046..31b298dc5046 100644 --- a/drivers/platform/x86/lenovo-yogabook.c +++ b/drivers/platform/x86/lenovo/yogabook.c diff --git a/drivers/platform/x86/oxpec.c b/drivers/platform/x86/oxpec.c index 06759036945d..eb076bb4099b 100644 --- a/drivers/platform/x86/oxpec.c +++ b/drivers/platform/x86/oxpec.c @@ -58,7 +58,8 @@ enum oxp_board { oxp_mini_amd_a07, oxp_mini_amd_pro, oxp_x1, - oxp_g1, + oxp_g1_i, + oxp_g1_a, }; static enum oxp_board board; @@ -247,14 +248,14 @@ static const struct dmi_system_id dmi_table[] = { DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER G1 A"), }, - .driver_data = (void *)oxp_g1, + .driver_data = (void *)oxp_g1_a, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER G1 i"), }, - .driver_data = (void *)oxp_g1, + .driver_data = (void *)oxp_g1_i, }, { .matches = { @@ -294,6 +295,13 @@ static const struct dmi_system_id dmi_table[] = { { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1Mini Pro"), + }, + .driver_data = (void *)oxp_x1, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1Pro"), }, .driver_data = (void *)oxp_x1, @@ -352,7 +360,8 @@ static umode_t tt_toggle_is_visible(struct kobject *kobj, case oxp_mini_amd_a07: case oxp_mini_amd_pro: case oxp_x1: - case oxp_g1: + case oxp_g1_i: + case oxp_g1_a: return attr->mode; default: break; @@ -381,12 +390,13 @@ static ssize_t tt_toggle_store(struct device *dev, case aok_zoe_a1: case oxp_fly: case oxp_mini_amd_pro: + case oxp_g1_a: reg = OXP_TURBO_SWITCH_REG; mask = OXP_TURBO_TAKE_VAL; break; case oxp_2: case oxp_x1: - case oxp_g1: + case oxp_g1_i: reg = OXP_2_TURBO_SWITCH_REG; mask = OXP_TURBO_TAKE_VAL; break; @@ -426,12 +436,13 @@ static ssize_t tt_toggle_show(struct device *dev, case aok_zoe_a1: case oxp_fly: case oxp_mini_amd_pro: + case oxp_g1_a: reg = OXP_TURBO_SWITCH_REG; mask = OXP_TURBO_TAKE_VAL; break; case oxp_2: case oxp_x1: - case oxp_g1: + case oxp_g1_i: reg = OXP_2_TURBO_SWITCH_REG; mask = OXP_TURBO_TAKE_VAL; break; @@ -520,7 +531,8 @@ static bool oxp_psy_ext_supported(void) { switch (board) { case oxp_x1: - case oxp_g1: + case oxp_g1_i: + case oxp_g1_a: case oxp_fly: return true; default: @@ -659,7 +671,8 @@ static int oxp_pwm_enable(void) case oxp_mini_amd_a07: case oxp_mini_amd_pro: case oxp_x1: - case oxp_g1: + case oxp_g1_i: + case oxp_g1_a: return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL); default: return -EINVAL; @@ -686,7 +699,8 @@ static int oxp_pwm_disable(void) case oxp_mini_amd_a07: case oxp_mini_amd_pro: case oxp_x1: - case oxp_g1: + case oxp_g1_i: + case oxp_g1_a: return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO); default: return -EINVAL; @@ -713,7 +727,8 @@ static int oxp_pwm_read(long *val) case oxp_mini_amd_a07: case oxp_mini_amd_pro: case oxp_x1: - case oxp_g1: + case oxp_g1_i: + case oxp_g1_a: return read_from_ec(OXP_SENSOR_PWM_ENABLE_REG, 1, val); default: return -EOPNOTSUPP; @@ -742,7 +757,7 @@ static int oxp_pwm_fan_speed(long *val) return read_from_ec(ORANGEPI_SENSOR_FAN_REG, 2, val); case oxp_2: case oxp_x1: - case oxp_g1: + case oxp_g1_i: return read_from_ec(OXP_2_SENSOR_FAN_REG, 2, val); case aok_zoe_a1: case aya_neo_2: @@ -757,6 +772,7 @@ static int oxp_pwm_fan_speed(long *val) case oxp_mini_amd: case oxp_mini_amd_a07: case oxp_mini_amd_pro: + case oxp_g1_a: return read_from_ec(OXP_SENSOR_FAN_REG, 2, val); default: return -EOPNOTSUPP; @@ -776,7 +792,7 @@ static int oxp_pwm_input_write(long val) return write_to_ec(ORANGEPI_SENSOR_PWM_REG, val); case oxp_2: case oxp_x1: - case oxp_g1: + case oxp_g1_i: /* scale to range [0-184] */ val = (val * 184) / 255; return write_to_ec(OXP_SENSOR_PWM_REG, val); @@ -796,6 +812,7 @@ static int oxp_pwm_input_write(long val) case aok_zoe_a1: case oxp_fly: case oxp_mini_amd_pro: + case oxp_g1_a: return write_to_ec(OXP_SENSOR_PWM_REG, val); default: return -EOPNOTSUPP; @@ -816,7 +833,7 @@ static int oxp_pwm_input_read(long *val) break; case oxp_2: case oxp_x1: - case oxp_g1: + case oxp_g1_i: ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val); if (ret) return ret; @@ -842,6 +859,7 @@ static int oxp_pwm_input_read(long *val) case aok_zoe_a1: case oxp_fly: case oxp_mini_amd_pro: + case oxp_g1_a: default: ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val); if (ret) diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index decde4c9a3d9..9d43a12db73c 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -16,6 +16,7 @@ #include <linux/leds.h> #include <linux/dmi.h> #include <linux/platform_device.h> +#include <linux/power_supply.h> #include <linux/rfkill.h> #include <linux/acpi.h> #include <linux/seq_file.h> @@ -23,6 +24,7 @@ #include <linux/ctype.h> #include <linux/efi.h> #include <linux/suspend.h> +#include <acpi/battery.h> #include <acpi/video.h> /* @@ -348,6 +350,8 @@ struct samsung_laptop { struct notifier_block pm_nb; + struct acpi_battery_hook battery_hook; + bool handle_backlight; bool has_stepping_quirk; @@ -697,6 +701,11 @@ static ssize_t set_performance_level(struct device *dev, static DEVICE_ATTR(performance_level, 0644, get_performance_level, set_performance_level); +static void show_battery_life_extender_deprecation_warning(struct device *dev) +{ + dev_warn_once(dev, "battery_life_extender attribute has been deprecated, see charge_types.\n"); +} + static int read_battery_life_extender(struct samsung_laptop *samsung) { const struct sabi_commands *commands = &samsung->config->commands; @@ -739,6 +748,8 @@ static ssize_t get_battery_life_extender(struct device *dev, struct samsung_laptop *samsung = dev_get_drvdata(dev); int ret; + show_battery_life_extender_deprecation_warning(dev); + ret = read_battery_life_extender(samsung); if (ret < 0) return ret; @@ -753,6 +764,8 @@ static ssize_t set_battery_life_extender(struct device *dev, struct samsung_laptop *samsung = dev_get_drvdata(dev); int ret, value; + show_battery_life_extender_deprecation_warning(dev); + if (!count || kstrtoint(buf, 0, &value) != 0) return -EINVAL; @@ -766,6 +779,84 @@ static ssize_t set_battery_life_extender(struct device *dev, static DEVICE_ATTR(battery_life_extender, 0644, get_battery_life_extender, set_battery_life_extender); +static int samsung_psy_ext_set_prop(struct power_supply *psy, + const struct power_supply_ext *ext, + void *ext_data, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct samsung_laptop *samsung = ext_data; + + switch (val->intval) { + case POWER_SUPPLY_CHARGE_TYPE_LONGLIFE: + return write_battery_life_extender(samsung, 1); + case POWER_SUPPLY_CHARGE_TYPE_STANDARD: + return write_battery_life_extender(samsung, 0); + default: + return -EINVAL; + } +} + +static int samsung_psy_ext_get_prop(struct power_supply *psy, + const struct power_supply_ext *ext, + void *ext_data, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct samsung_laptop *samsung = ext_data; + int ret; + + ret = read_battery_life_extender(samsung); + if (ret < 0) + return ret; + + if (ret == 1) + val->intval = POWER_SUPPLY_CHARGE_TYPE_LONGLIFE; + else + val->intval = POWER_SUPPLY_CHARGE_TYPE_STANDARD; + + return 0; +} + +static int samsung_psy_prop_is_writeable(struct power_supply *psy, + const struct power_supply_ext *ext, + void *data, + enum power_supply_property psp) +{ + return true; +} + +static const enum power_supply_property samsung_power_supply_props[] = { + POWER_SUPPLY_PROP_CHARGE_TYPES, +}; + +static const struct power_supply_ext samsung_battery_ext = { + .name = "samsung_laptop", + .properties = samsung_power_supply_props, + .num_properties = ARRAY_SIZE(samsung_power_supply_props), + .charge_types = (BIT(POWER_SUPPLY_CHARGE_TYPE_STANDARD) | + BIT(POWER_SUPPLY_CHARGE_TYPE_LONGLIFE)), + .get_property = samsung_psy_ext_get_prop, + .set_property = samsung_psy_ext_set_prop, + .property_is_writeable = samsung_psy_prop_is_writeable, +}; + +static int samsung_battery_add(struct power_supply *battery, struct acpi_battery_hook *hook) +{ + struct samsung_laptop *samsung = container_of(hook, struct samsung_laptop, battery_hook); + + return power_supply_register_extension(battery, &samsung_battery_ext, + &samsung->platform_device->dev, samsung); +} + +static int samsung_battery_remove(struct power_supply *battery, + struct acpi_battery_hook *hook) +{ + power_supply_unregister_extension(battery, &samsung_battery_ext); + + return 0; +} + static int read_usb_charge(struct samsung_laptop *samsung) { const struct sabi_commands *commands = &samsung->config->commands; @@ -1043,6 +1134,21 @@ static int __init samsung_lid_handling_init(struct samsung_laptop *samsung) return retval; } +static int __init samsung_battery_hook_init(struct samsung_laptop *samsung) +{ + int retval = 0; + + if (samsung->config->commands.get_battery_life_extender != 0xFFFF) { + samsung->battery_hook.add_battery = samsung_battery_add; + samsung->battery_hook.remove_battery = samsung_battery_remove; + samsung->battery_hook.name = "Samsung Battery Extension"; + retval = devm_battery_hook_register(&samsung->platform_device->dev, + &samsung->battery_hook); + } + + return retval; +} + static int kbd_backlight_enable(struct samsung_laptop *samsung) { const struct sabi_commands *commands = &samsung->config->commands; @@ -1604,6 +1710,10 @@ static int __init samsung_init(void) if (ret) goto error_lid_handling; + ret = samsung_battery_hook_init(samsung); + if (ret) + goto error_lid_handling; + samsung_debugfs_init(samsung); samsung->pm_nb.notifier_call = samsung_pm_notification; diff --git a/drivers/platform/x86/silicom-platform.c b/drivers/platform/x86/silicom-platform.c index 021f3fed197a..63b5da410ed5 100644 --- a/drivers/platform/x86/silicom-platform.c +++ b/drivers/platform/x86/silicom-platform.c @@ -248,13 +248,9 @@ static int silicom_gpio_direction_input(struct gpio_chip *gc, static int silicom_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) { - int direction = silicom_gpio_get_direction(gc, offset); u8 *channels = gpiochip_get_data(gc); int channel = channels[offset]; - if (direction == GPIO_LINE_DIRECTION_IN) - return -EPERM; - silicom_mec_port_set(channel, !value); return 0; diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 03aecf8bb7f8..4e86a422f05f 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -20,6 +20,7 @@ #include <linux/bits.h> #include <linux/build_bug.h> #include <linux/device.h> +#include <linux/idr.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> @@ -74,6 +75,8 @@ struct wmi_guid_count_context { int count; }; +static DEFINE_IDA(wmi_ida); + /* * If the GUID data block is marked as expensive, we must enable and * explicitily disable data collection. @@ -984,6 +987,19 @@ static int guid_count(const guid_t *guid) return context.count; } +static int wmi_dev_set_name(struct wmi_block *wblock, int count) +{ + if (IS_ENABLED(CONFIG_ACPI_WMI_LEGACY_DEVICE_NAMES)) { + if (count) + return dev_set_name(&wblock->dev.dev, "%pUL-%d", &wblock->gblock.guid, + count); + else + return dev_set_name(&wblock->dev.dev, "%pUL", &wblock->gblock.guid); + } + + return dev_set_name(&wblock->dev.dev, "%pUL-%d", &wblock->gblock.guid, wblock->dev.dev.id); +} + static int wmi_create_device(struct device *wmi_bus_dev, struct wmi_block *wblock, struct acpi_device *device) @@ -992,7 +1008,7 @@ static int wmi_create_device(struct device *wmi_bus_dev, struct acpi_device_info *info; acpi_handle method_handle; acpi_status status; - int count; + int count, ret; if (wblock->gblock.flags & ACPI_WMI_EVENT) { wblock->dev.dev.type = &wmi_type_event; @@ -1063,11 +1079,18 @@ static int wmi_create_device(struct device *wmi_bus_dev, if (count < 0) return count; - if (count) { - dev_set_name(&wblock->dev.dev, "%pUL-%d", &wblock->gblock.guid, count); + if (count) set_bit(WMI_GUID_DUPLICATED, &wblock->flags); - } else { - dev_set_name(&wblock->dev.dev, "%pUL", &wblock->gblock.guid); + + ret = ida_alloc(&wmi_ida, GFP_KERNEL); + if (ret < 0) + return ret; + + wblock->dev.dev.id = ret; + ret = wmi_dev_set_name(wblock, count); + if (ret < 0) { + ida_free(&wmi_ida, wblock->dev.dev.id); + return ret; } device_initialize(&wblock->dev.dev); @@ -1153,6 +1176,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev) dev_err(wmi_bus_dev, "failed to register %pUL\n", &wblock->gblock.guid); + ida_free(&wmi_ida, wblock->dev.dev.id); put_device(&wblock->dev.dev); } } @@ -1252,7 +1276,10 @@ static void acpi_wmi_notify_handler(acpi_handle handle, u32 event, void *context static int wmi_remove_device(struct device *dev, void *data) { + int id = dev->id; + device_unregister(dev); + ida_free(&wmi_ida, id); return 0; } diff --git a/drivers/platform/x86/x86-android-tablets/asus.c b/drivers/platform/x86/x86-android-tablets/asus.c index 7dde63b9943f..97cd14c1fd23 100644 --- a/drivers/platform/x86/x86-android-tablets/asus.c +++ b/drivers/platform/x86/x86-android-tablets/asus.c @@ -206,24 +206,9 @@ static const struct software_node asus_tf103c_touchscreen_node = { .properties = asus_tf103c_touchscreen_props, }; -static const struct property_entry asus_tf103c_battery_props[] = { - PROPERTY_ENTRY_STRING("compatible", "simple-battery"), - PROPERTY_ENTRY_STRING("device-chemistry", "lithium-ion-polymer"), - PROPERTY_ENTRY_U32("precharge-current-microamp", 256000), - PROPERTY_ENTRY_U32("charge-term-current-microamp", 128000), - PROPERTY_ENTRY_U32("constant-charge-current-max-microamp", 2048000), - PROPERTY_ENTRY_U32("constant-charge-voltage-max-microvolt", 4208000), - PROPERTY_ENTRY_U32("factory-internal-resistance-micro-ohms", 150000), - { } -}; - -static const struct software_node asus_tf103c_battery_node = { - .properties = asus_tf103c_battery_props, -}; - static const struct property_entry asus_tf103c_bq24190_props[] = { PROPERTY_ENTRY_STRING_ARRAY_LEN("supplied-from", tusb1211_chg_det_psy, 1), - PROPERTY_ENTRY_REF("monitored-battery", &asus_tf103c_battery_node), + PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_4v2_battery_node), PROPERTY_ENTRY_U32("ti,system-minimum-microvolt", 3600000), PROPERTY_ENTRY_BOOL("omit-battery-class"), PROPERTY_ENTRY_BOOL("disable-reset"), @@ -236,7 +221,7 @@ static const struct software_node asus_tf103c_bq24190_node = { static const struct property_entry asus_tf103c_ug3105_props[] = { PROPERTY_ENTRY_STRING_ARRAY_LEN("supplied-from", bq24190_psy, 1), - PROPERTY_ENTRY_REF("monitored-battery", &asus_tf103c_battery_node), + PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_4v2_battery_node), PROPERTY_ENTRY_U32("upisemi,rsns-microohm", 5000), { } }; @@ -321,6 +306,6 @@ const struct x86_dev_info asus_tf103c_info __initconst = { .gpio_button = &asus_me176c_tf103c_lid, .gpio_button_count = 1, .gpiod_lookup_tables = asus_tf103c_gpios, - .bat_swnode = &asus_tf103c_battery_node, + .bat_swnode = &generic_lipo_4v2_battery_node, .modules = bq24190_modules, }; diff --git a/drivers/platform/x86/x86-android-tablets/shared-psy-info.c b/drivers/platform/x86/x86-android-tablets/shared-psy-info.c index a46fa15acfb1..fe34cedb6257 100644 --- a/drivers/platform/x86/x86-android-tablets/shared-psy-info.c +++ b/drivers/platform/x86/x86-android-tablets/shared-psy-info.c @@ -39,6 +39,78 @@ const struct software_node fg_bq25890_supply_node = { .properties = fg_bq25890_supply_props, }; +static const u32 generic_lipo_battery_ovc_cap_celcius[] = { 25 }; + +static const u32 generic_lipo_4v2_battery_ovc_cap_table0[] = { + 4200000, 100, + 4150000, 95, + 4110000, 90, + 4075000, 85, + 4020000, 80, + 3982500, 75, + 3945000, 70, + 3907500, 65, + 3870000, 60, + 3853333, 55, + 3836667, 50, + 3820000, 45, + 3803333, 40, + 3786667, 35, + 3770000, 30, + 3750000, 25, + 3730000, 20, + 3710000, 15, + 3690000, 10, + 3610000, 5, + 3350000, 0 +}; + +static const u32 generic_lipo_hv_4v35_battery_ovc_cap_table0[] = { + 4300000, 100, + 4250000, 96, + 4200000, 91, + 4150000, 86, + 4110000, 82, + 4075000, 77, + 4020000, 73, + 3982500, 68, + 3945000, 64, + 3907500, 59, + 3870000, 55, + 3853333, 50, + 3836667, 45, + 3820000, 41, + 3803333, 36, + 3786667, 32, + 3770000, 27, + 3750000, 23, + 3730000, 18, + 3710000, 14, + 3690000, 9, + 3610000, 5, + 3350000, 0 +}; + +/* Standard LiPo (max 4.2V) settings used by most devs with a LiPo battery */ +static const struct property_entry generic_lipo_4v2_battery_props[] = { + PROPERTY_ENTRY_STRING("compatible", "simple-battery"), + PROPERTY_ENTRY_STRING("device-chemistry", "lithium-ion-polymer"), + PROPERTY_ENTRY_U32("precharge-current-microamp", 256000), + PROPERTY_ENTRY_U32("charge-term-current-microamp", 128000), + PROPERTY_ENTRY_U32("constant-charge-current-max-microamp", 2048000), + PROPERTY_ENTRY_U32("constant-charge-voltage-max-microvolt", 4208000), + PROPERTY_ENTRY_U32("factory-internal-resistance-micro-ohms", 150000), + PROPERTY_ENTRY_U32_ARRAY("ocv-capacity-celsius", + generic_lipo_battery_ovc_cap_celcius), + PROPERTY_ENTRY_U32_ARRAY("ocv-capacity-table-0", + generic_lipo_4v2_battery_ovc_cap_table0), + { } +}; + +const struct software_node generic_lipo_4v2_battery_node = { + .properties = generic_lipo_4v2_battery_props, +}; + /* LiPo HighVoltage (max 4.35V) settings used by most devs with a HV battery */ static const struct property_entry generic_lipo_hv_4v35_battery_props[] = { PROPERTY_ENTRY_STRING("compatible", "simple-battery"), @@ -48,6 +120,10 @@ static const struct property_entry generic_lipo_hv_4v35_battery_props[] = { PROPERTY_ENTRY_U32("constant-charge-current-max-microamp", 1856000), PROPERTY_ENTRY_U32("constant-charge-voltage-max-microvolt", 4352000), PROPERTY_ENTRY_U32("factory-internal-resistance-micro-ohms", 150000), + PROPERTY_ENTRY_U32_ARRAY("ocv-capacity-celsius", + generic_lipo_battery_ovc_cap_celcius), + PROPERTY_ENTRY_U32_ARRAY("ocv-capacity-table-0", + generic_lipo_hv_4v35_battery_ovc_cap_table0), { } }; diff --git a/drivers/platform/x86/x86-android-tablets/shared-psy-info.h b/drivers/platform/x86/x86-android-tablets/shared-psy-info.h index c2d2968cddc2..bcf9845ad275 100644 --- a/drivers/platform/x86/x86-android-tablets/shared-psy-info.h +++ b/drivers/platform/x86/x86-android-tablets/shared-psy-info.h @@ -21,6 +21,7 @@ extern const char * const bq25890_psy[]; extern const struct software_node fg_bq24190_supply_node; extern const struct software_node fg_bq25890_supply_node; +extern const struct software_node generic_lipo_4v2_battery_node; extern const struct software_node generic_lipo_hv_4v35_battery_node; extern struct bq24190_platform_data bq24190_pdata; diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index e71f0af4e378..733d81262159 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -128,6 +128,15 @@ config POWER_RESET_LINKSTATION Say Y here if you have a Buffalo LinkStation LS421D/E. +config POWER_RESET_MACSMC + tristate "Apple SMC reset/power-off driver" + depends on MFD_MACSMC + help + This driver supports reset and power-off on Apple Mac machines + that implement this functionality via the SMC. + + Say Y here if you have an Apple Silicon Mac. + config POWER_RESET_MSM bool "Qualcomm MSM power-off driver" depends on ARCH_QCOM diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile index 1b9b63a1a873..b7c2b5940be9 100644 --- a/drivers/power/reset/Makefile +++ b/drivers/power/reset/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o obj-$(CONFIG_POWER_RESET_LINKSTATION) += linkstation-poweroff.o +obj-$(CONFIG_POWER_RESET_MACSMC) += macsmc-reboot.o obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o obj-$(CONFIG_POWER_RESET_MT6323) += mt6323-poweroff.o obj-$(CONFIG_POWER_RESET_QCOM_PON) += qcom-pon.o diff --git a/drivers/power/reset/macsmc-reboot.c b/drivers/power/reset/macsmc-reboot.c new file mode 100644 index 000000000000..e9702acdd366 --- /dev/null +++ b/drivers/power/reset/macsmc-reboot.c @@ -0,0 +1,290 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SMC Reboot/Poweroff Handler + * Copyright The Asahi Linux Contributors + */ + +#include <linux/delay.h> +#include <linux/mfd/core.h> +#include <linux/mfd/macsmc.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/slab.h> + +struct macsmc_reboot_nvmem { + struct nvmem_cell *shutdown_flag; + struct nvmem_cell *boot_stage; + struct nvmem_cell *boot_error_count; + struct nvmem_cell *panic_count; +}; + +static const char * const nvmem_names[] = { + "shutdown_flag", + "boot_stage", + "boot_error_count", + "panic_count", +}; + +enum boot_stage { + BOOT_STAGE_SHUTDOWN = 0x00, /* Clean shutdown */ + BOOT_STAGE_IBOOT_DONE = 0x2f, /* Last stage of bootloader */ + BOOT_STAGE_KERNEL_STARTED = 0x30, /* Normal OS booting */ +}; + +struct macsmc_reboot { + struct device *dev; + struct apple_smc *smc; + struct notifier_block reboot_notify; + + union { + struct macsmc_reboot_nvmem nvm; + struct nvmem_cell *nvm_cells[ARRAY_SIZE(nvmem_names)]; + }; +}; + +/* Helpers to read/write a u8 given a struct nvmem_cell */ +static int nvmem_cell_get_u8(struct nvmem_cell *cell) +{ + size_t len; + void *bfr; + u8 val; + + bfr = nvmem_cell_read(cell, &len); + if (IS_ERR(bfr)) + return PTR_ERR(bfr); + + if (len < 1) { + kfree(bfr); + return -EINVAL; + } + + val = *(u8 *)bfr; + kfree(bfr); + return val; +} + +static int nvmem_cell_set_u8(struct nvmem_cell *cell, u8 val) +{ + return nvmem_cell_write(cell, &val, sizeof(val)); +} + +/* + * SMC 'MBSE' key actions: + * + * 'offw' - shutdown warning + * 'slpw' - sleep warning + * 'rest' - restart warning + * 'off1' - shutdown (needs PMU bit set to stay on) + * 'susp' - suspend + * 'phra' - restart ("PE Halt Restart Action"?) + * 'panb' - panic beginning + * 'pane' - panic end + */ + +static int macsmc_prepare_atomic(struct sys_off_data *data) +{ + struct macsmc_reboot *reboot = data->cb_data; + + dev_info(reboot->dev, "Preparing SMC for atomic mode\n"); + + apple_smc_enter_atomic(reboot->smc); + return NOTIFY_OK; +} + +static int macsmc_power_off(struct sys_off_data *data) +{ + struct macsmc_reboot *reboot = data->cb_data; + + dev_info(reboot->dev, "Issuing power off (off1)\n"); + + if (apple_smc_write_u32_atomic(reboot->smc, SMC_KEY(MBSE), SMC_KEY(off1)) < 0) { + dev_err(reboot->dev, "Failed to issue MBSE = off1 (power_off)\n"); + } else { + mdelay(100); + WARN_ONCE(1, "Unable to power off system\n"); + } + + return NOTIFY_OK; +} + +static int macsmc_restart(struct sys_off_data *data) +{ + struct macsmc_reboot *reboot = data->cb_data; + + dev_info(reboot->dev, "Issuing restart (phra)\n"); + + if (apple_smc_write_u32_atomic(reboot->smc, SMC_KEY(MBSE), SMC_KEY(phra)) < 0) { + dev_err(reboot->dev, "Failed to issue MBSE = phra (restart)\n"); + } else { + mdelay(100); + WARN_ONCE(1, "Unable to restart system\n"); + } + + return NOTIFY_OK; +} + +static int macsmc_reboot_notify(struct notifier_block *this, unsigned long action, void *data) +{ + struct macsmc_reboot *reboot = container_of(this, struct macsmc_reboot, reboot_notify); + u8 shutdown_flag; + u32 val; + + switch (action) { + case SYS_RESTART: + val = SMC_KEY(rest); + shutdown_flag = 0; + break; + case SYS_POWER_OFF: + val = SMC_KEY(offw); + shutdown_flag = 1; + break; + default: + return NOTIFY_DONE; + } + + dev_info(reboot->dev, "Preparing for reboot (%p4ch)\n", &val); + + /* On the Mac Mini, this will turn off the LED for power off */ + if (apple_smc_write_u32(reboot->smc, SMC_KEY(MBSE), val) < 0) + dev_err(reboot->dev, "Failed to issue MBSE = %p4ch (reboot_prepare)\n", &val); + + /* Set the boot_stage to 0, which means we're doing a clean shutdown/reboot. */ + if (reboot->nvm.boot_stage && + nvmem_cell_set_u8(reboot->nvm.boot_stage, BOOT_STAGE_SHUTDOWN) < 0) + dev_err(reboot->dev, "Failed to write boot_stage\n"); + + /* + * Set the PMU flag to actually reboot into the off state. + * Without this, the device will just reboot. We make it optional in case it is no longer + * necessary on newer hardware. + */ + if (reboot->nvm.shutdown_flag && + nvmem_cell_set_u8(reboot->nvm.shutdown_flag, shutdown_flag) < 0) + dev_err(reboot->dev, "Failed to write shutdown_flag\n"); + + return NOTIFY_OK; +} + +static void macsmc_power_init_error_counts(struct macsmc_reboot *reboot) +{ + int boot_error_count, panic_count; + + if (!reboot->nvm.boot_error_count || !reboot->nvm.panic_count) + return; + + boot_error_count = nvmem_cell_get_u8(reboot->nvm.boot_error_count); + if (boot_error_count < 0) { + dev_err(reboot->dev, "Failed to read boot_error_count (%d)\n", boot_error_count); + return; + } + + panic_count = nvmem_cell_get_u8(reboot->nvm.panic_count); + if (panic_count < 0) { + dev_err(reboot->dev, "Failed to read panic_count (%d)\n", panic_count); + return; + } + + if (!boot_error_count && !panic_count) + return; + + dev_warn(reboot->dev, "PMU logged %d boot error(s) and %d panic(s)\n", + boot_error_count, panic_count); + + if (nvmem_cell_set_u8(reboot->nvm.panic_count, 0) < 0) + dev_err(reboot->dev, "Failed to reset panic_count\n"); + if (nvmem_cell_set_u8(reboot->nvm.boot_error_count, 0) < 0) + dev_err(reboot->dev, "Failed to reset boot_error_count\n"); +} + +static int macsmc_reboot_probe(struct platform_device *pdev) +{ + struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent); + struct macsmc_reboot *reboot; + int ret, i; + + reboot = devm_kzalloc(&pdev->dev, sizeof(*reboot), GFP_KERNEL); + if (!reboot) + return -ENOMEM; + + reboot->dev = &pdev->dev; + reboot->smc = smc; + + platform_set_drvdata(pdev, reboot); + + for (i = 0; i < ARRAY_SIZE(nvmem_names); i++) { + struct nvmem_cell *cell; + + cell = devm_nvmem_cell_get(&pdev->dev, + nvmem_names[i]); + if (IS_ERR(cell)) { + if (PTR_ERR(cell) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_warn(&pdev->dev, "Missing NVMEM cell %s (%ld)\n", + nvmem_names[i], PTR_ERR(cell)); + /* Non fatal, we'll deal with it */ + cell = NULL; + } + reboot->nvm_cells[i] = cell; + } + + /* Set the boot_stage to indicate we're running the OS kernel */ + if (reboot->nvm.boot_stage && + nvmem_cell_set_u8(reboot->nvm.boot_stage, BOOT_STAGE_KERNEL_STARTED) < 0) + dev_err(reboot->dev, "Failed to write boot_stage\n"); + + /* Display and clear the error counts */ + macsmc_power_init_error_counts(reboot); + + reboot->reboot_notify.notifier_call = macsmc_reboot_notify; + + ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_POWER_OFF_PREPARE, + SYS_OFF_PRIO_HIGH, macsmc_prepare_atomic, reboot); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to register power-off prepare handler\n"); + ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_HIGH, + macsmc_power_off, reboot); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to register power-off handler\n"); + + ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART_PREPARE, + SYS_OFF_PRIO_HIGH, macsmc_prepare_atomic, reboot); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to register restart prepare handler\n"); + ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART, SYS_OFF_PRIO_HIGH, + macsmc_restart, reboot); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to register restart handler\n"); + + ret = devm_register_reboot_notifier(&pdev->dev, &reboot->reboot_notify); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to register reboot notifier\n"); + + dev_info(&pdev->dev, "Handling reboot and poweroff requests via SMC\n"); + + return 0; +} + +static const struct of_device_id macsmc_reboot_of_table[] = { + { .compatible = "apple,smc-reboot", }, + {} +}; +MODULE_DEVICE_TABLE(of, macsmc_reboot_of_table); + +static struct platform_driver macsmc_reboot_driver = { + .driver = { + .name = "macsmc-reboot", + .of_match_table = macsmc_reboot_of_table, + }, + .probe = macsmc_reboot_probe, +}; +module_platform_driver(macsmc_reboot_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple SMC reboot/poweroff driver"); +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); diff --git a/drivers/power/sequencing/Kconfig b/drivers/power/sequencing/Kconfig index ddcc42a98492..280f92beb5d0 100644 --- a/drivers/power/sequencing/Kconfig +++ b/drivers/power/sequencing/Kconfig @@ -16,7 +16,7 @@ if POWER_SEQUENCING config POWER_SEQUENCING_QCOM_WCN tristate "Qualcomm WCN family PMU driver" default m if ARCH_QCOM - depends on OF + depends on OF || COMPILE_TEST help Say Y here to enable the power sequencing driver for Qualcomm WCN Bluetooth/WLAN chipsets. @@ -27,4 +27,12 @@ config POWER_SEQUENCING_QCOM_WCN this driver is needed for correct power control or else we'd risk not respecting the required delays between enabling Bluetooth and WLAN. +config POWER_SEQUENCING_TH1520_GPU + tristate "T-HEAD TH1520 GPU power sequencing driver" + depends on (ARCH_THEAD && AUXILIARY_BUS) || COMPILE_TEST + help + Say Y here to enable the power sequencing driver for the TH1520 SoC + GPU. This driver handles the complex clock and reset sequence + required to power on the Imagination BXM GPU on this platform. + endif diff --git a/drivers/power/sequencing/Makefile b/drivers/power/sequencing/Makefile index 2eec2df7912d..96c1cf0a98ac 100644 --- a/drivers/power/sequencing/Makefile +++ b/drivers/power/sequencing/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_POWER_SEQUENCING) += pwrseq-core.o pwrseq-core-y := core.o obj-$(CONFIG_POWER_SEQUENCING_QCOM_WCN) += pwrseq-qcom-wcn.o +obj-$(CONFIG_POWER_SEQUENCING_TH1520_GPU) += pwrseq-thead-gpu.o diff --git a/drivers/power/sequencing/core.c b/drivers/power/sequencing/core.c index 0ffc259c6bb6..190564e55988 100644 --- a/drivers/power/sequencing/core.c +++ b/drivers/power/sequencing/core.c @@ -628,7 +628,7 @@ static int pwrseq_match_device(struct device *pwrseq_dev, void *data) return 0; ret = pwrseq->match(pwrseq, match_data->dev); - if (ret <= 0) + if (ret == PWRSEQ_NO_MATCH || ret < 0) return ret; /* We got the matching device, let's find the right target. */ @@ -651,7 +651,7 @@ static int pwrseq_match_device(struct device *pwrseq_dev, void *data) match_data->desc->pwrseq = pwrseq_device_get(pwrseq); - return 1; + return PWRSEQ_MATCH_OK; } /** @@ -684,7 +684,7 @@ struct pwrseq_desc *pwrseq_get(struct device *dev, const char *target) pwrseq_match_device); if (ret < 0) return ERR_PTR(ret); - if (ret == 0) + if (ret == PWRSEQ_NO_MATCH) /* No device matched. */ return ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/power/sequencing/pwrseq-qcom-wcn.c b/drivers/power/sequencing/pwrseq-qcom-wcn.c index e8f5030f2639..663d9a537065 100644 --- a/drivers/power/sequencing/pwrseq-qcom-wcn.c +++ b/drivers/power/sequencing/pwrseq-qcom-wcn.c @@ -155,7 +155,7 @@ static const struct pwrseq_unit_data pwrseq_qcom_wcn_bt_unit_data = { }; static const struct pwrseq_unit_data pwrseq_qcom_wcn6855_bt_unit_data = { - .name = "wlan-enable", + .name = "bluetooth-enable", .deps = pwrseq_qcom_wcn6855_unit_deps, .enable = pwrseq_qcom_wcn_bt_enable, .disable = pwrseq_qcom_wcn_bt_disable, @@ -341,12 +341,12 @@ static int pwrseq_qcom_wcn_match(struct pwrseq_device *pwrseq, * device. */ if (!of_property_present(dev_node, "vddaon-supply")) - return 0; + return PWRSEQ_NO_MATCH; struct device_node *reg_node __free(device_node) = of_parse_phandle(dev_node, "vddaon-supply", 0); if (!reg_node) - return 0; + return PWRSEQ_NO_MATCH; /* * `reg_node` is the PMU AON regulator, its parent is the `regulators` @@ -355,9 +355,9 @@ static int pwrseq_qcom_wcn_match(struct pwrseq_device *pwrseq, */ if (!reg_node->parent || !reg_node->parent->parent || reg_node->parent->parent != ctx->of_node) - return 0; + return PWRSEQ_NO_MATCH; - return 1; + return PWRSEQ_MATCH_OK; } static int pwrseq_qcom_wcn_probe(struct platform_device *pdev) diff --git a/drivers/power/sequencing/pwrseq-thead-gpu.c b/drivers/power/sequencing/pwrseq-thead-gpu.c new file mode 100644 index 000000000000..7c82a10ca9f6 --- /dev/null +++ b/drivers/power/sequencing/pwrseq-thead-gpu.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * T-HEAD TH1520 GPU Power Sequencer Driver + * + * Copyright (c) 2025 Samsung Electronics Co., Ltd. + * Author: Michal Wilczynski <m.wilczynski@samsung.com> + * + * This driver implements the power sequence for the Imagination BXM-4-64 + * GPU on the T-HEAD TH1520 SoC. The sequence requires coordinating resources + * from both the sequencer's parent device node (clkgen_reset) and the GPU's + * device node (clocks and core reset). + * + * The `match` function is used to acquire the GPU's resources when the + * GPU driver requests the "gpu-power" sequence target. + */ + +#include <linux/auxiliary_bus.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/pwrseq/provider.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#include <dt-bindings/power/thead,th1520-power.h> + +struct pwrseq_thead_gpu_ctx { + struct pwrseq_device *pwrseq; + struct reset_control *clkgen_reset; + struct device_node *aon_node; + + /* Consumer resources */ + struct device_node *consumer_node; + struct clk_bulk_data *clks; + int num_clks; + struct reset_control *gpu_reset; +}; + +static int pwrseq_thead_gpu_enable(struct pwrseq_device *pwrseq) +{ + struct pwrseq_thead_gpu_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + int ret; + + if (!ctx->clks || !ctx->gpu_reset) + return -ENODEV; + + ret = clk_bulk_prepare_enable(ctx->num_clks, ctx->clks); + if (ret) + return ret; + + ret = reset_control_deassert(ctx->clkgen_reset); + if (ret) + goto err_disable_clks; + + /* + * According to the hardware manual, a delay of at least 32 clock + * cycles is required between de-asserting the clkgen reset and + * de-asserting the GPU reset. Assuming a worst-case scenario with + * a very high GPU clock frequency, a delay of 1 microsecond is + * sufficient to ensure this requirement is met across all + * feasible GPU clock speeds. + */ + udelay(1); + + ret = reset_control_deassert(ctx->gpu_reset); + if (ret) + goto err_assert_clkgen; + + return 0; + +err_assert_clkgen: + reset_control_assert(ctx->clkgen_reset); +err_disable_clks: + clk_bulk_disable_unprepare(ctx->num_clks, ctx->clks); + return ret; +} + +static int pwrseq_thead_gpu_disable(struct pwrseq_device *pwrseq) +{ + struct pwrseq_thead_gpu_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + int ret = 0, err; + + if (!ctx->clks || !ctx->gpu_reset) + return -ENODEV; + + err = reset_control_assert(ctx->gpu_reset); + if (err) + ret = err; + + err = reset_control_assert(ctx->clkgen_reset); + if (err && !ret) + ret = err; + + clk_bulk_disable_unprepare(ctx->num_clks, ctx->clks); + + /* ret stores values of the first error code */ + return ret; +} + +static const struct pwrseq_unit_data pwrseq_thead_gpu_unit = { + .name = "gpu-power-sequence", + .enable = pwrseq_thead_gpu_enable, + .disable = pwrseq_thead_gpu_disable, +}; + +static const struct pwrseq_target_data pwrseq_thead_gpu_target = { + .name = "gpu-power", + .unit = &pwrseq_thead_gpu_unit, +}; + +static const struct pwrseq_target_data *pwrseq_thead_gpu_targets[] = { + &pwrseq_thead_gpu_target, + NULL +}; + +static int pwrseq_thead_gpu_match(struct pwrseq_device *pwrseq, + struct device *dev) +{ + struct pwrseq_thead_gpu_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + static const char *const clk_names[] = { "core", "sys" }; + struct of_phandle_args pwr_spec; + int i, ret; + + /* We only match the specific T-HEAD TH1520 GPU compatible */ + if (!of_device_is_compatible(dev->of_node, "thead,th1520-gpu")) + return PWRSEQ_NO_MATCH; + + ret = of_parse_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells", 0, &pwr_spec); + if (ret) + return PWRSEQ_NO_MATCH; + + /* Additionally verify consumer device has AON as power-domain */ + if (pwr_spec.np != ctx->aon_node || pwr_spec.args[0] != TH1520_GPU_PD) { + of_node_put(pwr_spec.np); + return PWRSEQ_NO_MATCH; + } + + of_node_put(pwr_spec.np); + + /* If a consumer is already bound, only allow a re-match from it */ + if (ctx->consumer_node) + return ctx->consumer_node == dev->of_node ? + PWRSEQ_MATCH_OK : PWRSEQ_NO_MATCH; + + ctx->num_clks = ARRAY_SIZE(clk_names); + ctx->clks = kcalloc(ctx->num_clks, sizeof(*ctx->clks), GFP_KERNEL); + if (!ctx->clks) + return -ENOMEM; + + for (i = 0; i < ctx->num_clks; i++) + ctx->clks[i].id = clk_names[i]; + + ret = clk_bulk_get(dev, ctx->num_clks, ctx->clks); + if (ret) + goto err_free_clks; + + ctx->gpu_reset = reset_control_get_shared(dev, NULL); + if (IS_ERR(ctx->gpu_reset)) { + ret = PTR_ERR(ctx->gpu_reset); + goto err_put_clks; + } + + ctx->consumer_node = of_node_get(dev->of_node); + + return PWRSEQ_MATCH_OK; + +err_put_clks: + clk_bulk_put(ctx->num_clks, ctx->clks); +err_free_clks: + kfree(ctx->clks); + ctx->clks = NULL; + + return ret; +} + +static int pwrseq_thead_gpu_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct device *dev = &adev->dev; + struct device *parent_dev = dev->parent; + struct pwrseq_thead_gpu_ctx *ctx; + struct pwrseq_config config = {}; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->aon_node = parent_dev->of_node; + + ctx->clkgen_reset = + devm_reset_control_get_exclusive(parent_dev, "gpu-clkgen"); + if (IS_ERR(ctx->clkgen_reset)) + return dev_err_probe( + dev, PTR_ERR(ctx->clkgen_reset), + "Failed to get GPU clkgen reset from parent\n"); + + config.parent = dev; + config.owner = THIS_MODULE; + config.drvdata = ctx; + config.match = pwrseq_thead_gpu_match; + config.targets = pwrseq_thead_gpu_targets; + + ctx->pwrseq = devm_pwrseq_device_register(dev, &config); + if (IS_ERR(ctx->pwrseq)) + return dev_err_probe(dev, PTR_ERR(ctx->pwrseq), + "Failed to register power sequencer\n"); + + auxiliary_set_drvdata(adev, ctx); + + return 0; +} + +static void pwrseq_thead_gpu_remove(struct auxiliary_device *adev) +{ + struct pwrseq_thead_gpu_ctx *ctx = auxiliary_get_drvdata(adev); + + if (ctx->gpu_reset) + reset_control_put(ctx->gpu_reset); + + if (ctx->clks) { + clk_bulk_put(ctx->num_clks, ctx->clks); + kfree(ctx->clks); + } + + if (ctx->consumer_node) + of_node_put(ctx->consumer_node); +} + +static const struct auxiliary_device_id pwrseq_thead_gpu_id_table[] = { + { .name = "th1520_pm_domains.pwrseq-gpu" }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, pwrseq_thead_gpu_id_table); + +static struct auxiliary_driver pwrseq_thead_gpu_driver = { + .driver = { + .name = "pwrseq-thead-gpu", + }, + .probe = pwrseq_thead_gpu_probe, + .remove = pwrseq_thead_gpu_remove, + .id_table = pwrseq_thead_gpu_id_table, +}; +module_auxiliary_driver(pwrseq_thead_gpu_driver); + +MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>"); +MODULE_DESCRIPTION("T-HEAD TH1520 GPU power sequencer driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 33a5bfce4604..cfb0e3e0d4aa 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -1235,9 +1235,8 @@ bool power_supply_has_property(struct power_supply *psy, return false; } -int power_supply_get_property(struct power_supply *psy, - enum power_supply_property psp, - union power_supply_propval *val) +static int __power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, + union power_supply_propval *val, bool use_extensions) { struct power_supply_ext_registration *reg; @@ -1247,10 +1246,14 @@ int power_supply_get_property(struct power_supply *psy, return -ENODEV; } - scoped_guard(rwsem_read, &psy->extensions_sem) { - power_supply_for_each_extension(reg, psy) { - if (power_supply_ext_has_property(reg->ext, psp)) + if (use_extensions) { + scoped_guard(rwsem_read, &psy->extensions_sem) { + power_supply_for_each_extension(reg, psy) { + if (!power_supply_ext_has_property(reg->ext, psp)) + continue; + return reg->ext->get_property(psy, reg->ext, reg->data, psp, val); + } } } @@ -1261,20 +1264,49 @@ int power_supply_get_property(struct power_supply *psy, else return -EINVAL; } + +int power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, + union power_supply_propval *val) +{ + return __power_supply_get_property(psy, psp, val, true); +} EXPORT_SYMBOL_GPL(power_supply_get_property); -int power_supply_set_property(struct power_supply *psy, - enum power_supply_property psp, - const union power_supply_propval *val) +/** + * power_supply_get_property_direct - Read a power supply property without checking for extensions + * @psy: The power supply + * @psp: The power supply property to read + * @val: The resulting value of the power supply property + * + * Read a power supply property without taking into account any power supply extensions registered + * on the given power supply. This is mostly useful for power supply extensions that want to access + * their own power supply as using power_supply_get_property() directly will result in a potential + * deadlock. + * + * Return: 0 on success or negative error code on failure. + */ +int power_supply_get_property_direct(struct power_supply *psy, enum power_supply_property psp, + union power_supply_propval *val) +{ + return __power_supply_get_property(psy, psp, val, false); +} +EXPORT_SYMBOL_GPL(power_supply_get_property_direct); + + +static int __power_supply_set_property(struct power_supply *psy, enum power_supply_property psp, + const union power_supply_propval *val, bool use_extensions) { struct power_supply_ext_registration *reg; if (atomic_read(&psy->use_cnt) <= 0) return -ENODEV; - scoped_guard(rwsem_read, &psy->extensions_sem) { - power_supply_for_each_extension(reg, psy) { - if (power_supply_ext_has_property(reg->ext, psp)) { + if (use_extensions) { + scoped_guard(rwsem_read, &psy->extensions_sem) { + power_supply_for_each_extension(reg, psy) { + if (!power_supply_ext_has_property(reg->ext, psp)) + continue; + if (reg->ext->set_property) return reg->ext->set_property(psy, reg->ext, reg->data, psp, val); @@ -1289,8 +1321,34 @@ int power_supply_set_property(struct power_supply *psy, return psy->desc->set_property(psy, psp, val); } + +int power_supply_set_property(struct power_supply *psy, enum power_supply_property psp, + const union power_supply_propval *val) +{ + return __power_supply_set_property(psy, psp, val, true); +} EXPORT_SYMBOL_GPL(power_supply_set_property); +/** + * power_supply_set_property_direct - Write a power supply property without checking for extensions + * @psy: The power supply + * @psp: The power supply property to write + * @val: The value to write to the power supply property + * + * Write a power supply property without taking into account any power supply extensions registered + * on the given power supply. This is mostly useful for power supply extensions that want to access + * their own power supply as using power_supply_set_property() directly will result in a potential + * deadlock. + * + * Return: 0 on success or negative error code on failure. + */ +int power_supply_set_property_direct(struct power_supply *psy, enum power_supply_property psp, + const union power_supply_propval *val) +{ + return __power_supply_set_property(psy, psp, val, false); +} +EXPORT_SYMBOL_GPL(power_supply_set_property_direct); + int power_supply_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c index 5bfdfcf6013b..2c0e9ad820c0 100644 --- a/drivers/power/supply/test_power.c +++ b/drivers/power/supply/test_power.c @@ -259,6 +259,7 @@ static const struct power_supply_config test_power_configs[] = { static int test_power_battery_extmanufacture_year = 1234; static int test_power_battery_exttemp_max = 1000; static const enum power_supply_property test_power_battery_extprops[] = { + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, POWER_SUPPLY_PROP_MANUFACTURE_YEAR, POWER_SUPPLY_PROP_TEMP_MAX, }; @@ -270,6 +271,9 @@ static int test_power_battery_extget_property(struct power_supply *psy, union power_supply_propval *val) { switch (psp) { + case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: + return power_supply_get_property_direct(psy, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, + val); case POWER_SUPPLY_PROP_MANUFACTURE_YEAR: val->intval = test_power_battery_extmanufacture_year; break; diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c index 6b6f51b21550..99390ec1481f 100644 --- a/drivers/powercap/dtpm_cpu.c +++ b/drivers/powercap/dtpm_cpu.c @@ -96,6 +96,8 @@ static u64 get_pd_power_uw(struct dtpm *dtpm) int i; pd = em_cpu_get(dtpm_cpu->cpu); + if (!pd) + return 0; pd_mask = em_span_cpus(pd); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index faa0b6bc5b53..c7e7f9bf5313 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -1277,6 +1277,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { X86_MATCH_VFM(INTEL_RAPTORLAKE, &rapl_defaults_core), X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &rapl_defaults_core), X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_BARTLETTLAKE, &rapl_defaults_core), X86_MATCH_VFM(INTEL_METEORLAKE, &rapl_defaults_core), X86_MATCH_VFM(INTEL_METEORLAKE_L, &rapl_defaults_core), X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &rapl_defaults_spr_server), diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c index 8ad2115d65f6..4ed06c71a3ac 100644 --- a/drivers/powercap/intel_rapl_msr.c +++ b/drivers/powercap/intel_rapl_msr.c @@ -150,6 +150,7 @@ static const struct x86_cpu_id pl4_support_ids[] = { X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL), X86_MATCH_VFM(INTEL_ARROWLAKE_U, NULL), X86_MATCH_VFM(INTEL_ARROWLAKE_H, NULL), + X86_MATCH_VFM(INTEL_PANTHERLAKE_L, NULL), {} }; diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c index af2368f4db10..82201bf4685d 100644 --- a/drivers/powercap/intel_rapl_tpmi.c +++ b/drivers/powercap/intel_rapl_tpmi.c @@ -9,9 +9,10 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/auxiliary_bus.h> -#include <linux/io.h> -#include <linux/intel_tpmi.h> #include <linux/intel_rapl.h> +#include <linux/intel_tpmi.h> +#include <linux/intel_vsec.h> +#include <linux/io.h> #include <linux/module.h> #include <linux/slab.h> @@ -48,7 +49,7 @@ enum tpmi_rapl_register { struct tpmi_rapl_package { struct rapl_if_priv priv; - struct intel_tpmi_plat_info *tpmi_info; + struct oobmsm_plat_info *tpmi_info; struct rapl_package *rp; void __iomem *base; struct list_head node; @@ -253,7 +254,7 @@ static int intel_rapl_tpmi_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) { struct tpmi_rapl_package *trp; - struct intel_tpmi_plat_info *info; + struct oobmsm_plat_info *info; struct resource *res; u32 offset; int ret; diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index d9bcd1e8413e..f00ce973dddf 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -66,6 +66,15 @@ config PWM_APPLE To compile this driver as a module, choose M here: the module will be called pwm-apple. +config PWM_ARGON_FAN_HAT + tristate "Argon40 Fan HAT support" + depends on I2C && OF + help + Generic PWM framework driver for Argon40 Fan HAT. + + To compile this driver as a module, choose M here: the module + will be called pwm-argon-fan-hat. + config PWM_ATMEL tristate "Atmel PWM support" depends on ARCH_AT91 || COMPILE_TEST @@ -427,6 +436,7 @@ config PWM_MC33XS2410 tristate "MC33XS2410 PWM support" depends on OF depends on SPI + select AUXILIARY_BUS help NXP MC33XS2410 high-side switch driver. The MC33XS2410 is a four channel high-side switch. The device is operational from 3.0 V @@ -517,7 +527,7 @@ config PWM_PCA9685 config PWM_PXA tristate "PXA PWM support" - depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST + depends on ARCH_PXA || ARCH_MMP || ARCH_SPACEMIT || COMPILE_TEST depends on HAS_IOMEM help Generic PWM framework driver for PXA. @@ -526,7 +536,7 @@ config PWM_PXA will be called pwm-pxa. config PWM_RASPBERRYPI_POE - tristate "Raspberry Pi Firwmware PoE Hat PWM support" + tristate "Raspberry Pi Firmware PoE Hat PWM support" # Make sure not 'y' when RASPBERRYPI_FIRMWARE is 'm'. This can only # happen when COMPILE_TEST=y, hence the added !RASPBERRYPI_FIRMWARE. depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE) diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 96160f4257fc..ff4f47e5fb7a 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_PWM) += core.o obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o obj-$(CONFIG_PWM_ADP5585) += pwm-adp5585.o obj-$(CONFIG_PWM_APPLE) += pwm-apple.o +obj-$(CONFIG_PWM_ARGON_FAN_HAT) += pwm-argon-fan-hat.o obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o obj-$(CONFIG_PWM_ATMEL_HLCDC_PWM) += pwm-atmel-hlcdc.o obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index edf776b8ad53..0d66376a83ec 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -23,9 +23,13 @@ #include <dt-bindings/pwm/pwm.h> +#include <uapi/linux/pwm.h> + #define CREATE_TRACE_POINTS #include <trace/events/pwm.h> +#define PWM_MINOR_COUNT 256 + /* protects access to pwm_chips */ static DEFINE_MUTEX(pwm_lock); @@ -206,8 +210,6 @@ static int __pwm_write_waveform(struct pwm_chip *chip, struct pwm_device *pwm, c return ret; } -#define WFHWSIZE 20 - /** * pwm_round_waveform_might_sleep - Query hardware capabilities * Cannot be used in atomic context. @@ -244,10 +246,10 @@ int pwm_round_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform * struct pwm_chip *chip = pwm->chip; const struct pwm_ops *ops = chip->ops; struct pwm_waveform wf_req = *wf; - char wfhw[WFHWSIZE]; + char wfhw[PWM_WFHWSIZE]; int ret_tohw, ret_fromhw; - BUG_ON(WFHWSIZE < ops->sizeof_wfhw); + BUG_ON(PWM_WFHWSIZE < ops->sizeof_wfhw); if (!pwmchip_supports_waveform(chip)) return -EOPNOTSUPP; @@ -302,10 +304,10 @@ int pwm_get_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf { struct pwm_chip *chip = pwm->chip; const struct pwm_ops *ops = chip->ops; - char wfhw[WFHWSIZE]; + char wfhw[PWM_WFHWSIZE]; int err; - BUG_ON(WFHWSIZE < ops->sizeof_wfhw); + BUG_ON(PWM_WFHWSIZE < ops->sizeof_wfhw); if (!pwmchip_supports_waveform(chip) || !ops->read_waveform) return -EOPNOTSUPP; @@ -330,11 +332,11 @@ static int __pwm_set_waveform(struct pwm_device *pwm, { struct pwm_chip *chip = pwm->chip; const struct pwm_ops *ops = chip->ops; - char wfhw[WFHWSIZE]; + char wfhw[PWM_WFHWSIZE]; struct pwm_waveform wf_rounded; int err, ret_tohw; - BUG_ON(WFHWSIZE < ops->sizeof_wfhw); + BUG_ON(PWM_WFHWSIZE < ops->sizeof_wfhw); if (!pwmchip_supports_waveform(chip)) return -EOPNOTSUPP; @@ -646,9 +648,9 @@ static int __pwm_apply(struct pwm_device *pwm, const struct pwm_state *state) if (pwmchip_supports_waveform(chip)) { struct pwm_waveform wf; - char wfhw[WFHWSIZE]; + char wfhw[PWM_WFHWSIZE]; - BUG_ON(WFHWSIZE < ops->sizeof_wfhw); + BUG_ON(PWM_WFHWSIZE < ops->sizeof_wfhw); pwm_state2wf(state, &wf); @@ -805,10 +807,10 @@ int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state) return -ENODEV; if (pwmchip_supports_waveform(chip) && ops->read_waveform) { - char wfhw[WFHWSIZE]; + char wfhw[PWM_WFHWSIZE]; struct pwm_waveform wf; - BUG_ON(WFHWSIZE < ops->sizeof_wfhw); + BUG_ON(PWM_WFHWSIZE < ops->sizeof_wfhw); ret = __pwm_read_waveform(chip, pwm, &wfhw); if (ret) @@ -1692,8 +1694,8 @@ static bool pwm_ops_check(const struct pwm_chip *chip) !ops->write_waveform) return false; - if (WFHWSIZE < ops->sizeof_wfhw) { - dev_warn(pwmchip_parent(chip), "WFHWSIZE < %zu\n", ops->sizeof_wfhw); + if (PWM_WFHWSIZE < ops->sizeof_wfhw) { + dev_warn(pwmchip_parent(chip), "PWM_WFHWSIZE < %zu\n", ops->sizeof_wfhw); return false; } } else { @@ -2007,20 +2009,9 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) } EXPORT_SYMBOL_GPL(pwm_get); -/** - * pwm_put() - release a PWM device - * @pwm: PWM device - */ -void pwm_put(struct pwm_device *pwm) +static void __pwm_put(struct pwm_device *pwm) { - struct pwm_chip *chip; - - if (!pwm) - return; - - chip = pwm->chip; - - guard(mutex)(&pwm_lock); + struct pwm_chip *chip = pwm->chip; /* * Trigger a warning if a consumer called pwm_put() twice. @@ -2041,6 +2032,20 @@ void pwm_put(struct pwm_device *pwm) module_put(chip->owner); } + +/** + * pwm_put() - release a PWM device + * @pwm: PWM device + */ +void pwm_put(struct pwm_device *pwm) +{ + if (!pwm) + return; + + guard(mutex)(&pwm_lock); + + __pwm_put(pwm); +} EXPORT_SYMBOL_GPL(pwm_put); static void devm_pwm_release(void *pwm) @@ -2110,6 +2115,274 @@ struct pwm_device *devm_fwnode_pwm_get(struct device *dev, } EXPORT_SYMBOL_GPL(devm_fwnode_pwm_get); +struct pwm_cdev_data { + struct pwm_chip *chip; + struct pwm_device *pwm[]; +}; + +static int pwm_cdev_open(struct inode *inode, struct file *file) +{ + struct pwm_chip *chip = container_of(inode->i_cdev, struct pwm_chip, cdev); + struct pwm_cdev_data *cdata; + + guard(mutex)(&pwm_lock); + + if (!chip->operational) + return -ENXIO; + + cdata = kzalloc(struct_size(cdata, pwm, chip->npwm), GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + cdata->chip = chip; + + file->private_data = cdata; + + return nonseekable_open(inode, file); +} + +static int pwm_cdev_release(struct inode *inode, struct file *file) +{ + struct pwm_cdev_data *cdata = file->private_data; + unsigned int i; + + for (i = 0; i < cdata->chip->npwm; ++i) { + struct pwm_device *pwm = cdata->pwm[i]; + + if (pwm) { + const char *label = pwm->label; + + pwm_put(cdata->pwm[i]); + kfree(label); + } + } + kfree(cdata); + + return 0; +} + +static int pwm_cdev_request(struct pwm_cdev_data *cdata, unsigned int hwpwm) +{ + struct pwm_chip *chip = cdata->chip; + + if (hwpwm >= chip->npwm) + return -EINVAL; + + if (!cdata->pwm[hwpwm]) { + struct pwm_device *pwm = &chip->pwms[hwpwm]; + const char *label; + int ret; + + label = kasprintf(GFP_KERNEL, "pwm-cdev (pid=%d)", current->pid); + if (!label) + return -ENOMEM; + + ret = pwm_device_request(pwm, label); + if (ret < 0) { + kfree(label); + return ret; + } + + cdata->pwm[hwpwm] = pwm; + } + + return 0; +} + +static int pwm_cdev_free(struct pwm_cdev_data *cdata, unsigned int hwpwm) +{ + struct pwm_chip *chip = cdata->chip; + + if (hwpwm >= chip->npwm) + return -EINVAL; + + if (cdata->pwm[hwpwm]) { + struct pwm_device *pwm = cdata->pwm[hwpwm]; + const char *label = pwm->label; + + __pwm_put(pwm); + + kfree(label); + + cdata->pwm[hwpwm] = NULL; + } + + return 0; +} + +static struct pwm_device *pwm_cdev_get_requested_pwm(struct pwm_cdev_data *cdata, + u32 hwpwm) +{ + struct pwm_chip *chip = cdata->chip; + + if (hwpwm >= chip->npwm) + return ERR_PTR(-EINVAL); + + if (cdata->pwm[hwpwm]) + return cdata->pwm[hwpwm]; + + return ERR_PTR(-EINVAL); +} + +static long pwm_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret = 0; + struct pwm_cdev_data *cdata = file->private_data; + struct pwm_chip *chip = cdata->chip; + + guard(mutex)(&pwm_lock); + + if (!chip->operational) + return -ENODEV; + + switch (cmd) { + case PWM_IOCTL_REQUEST: + { + unsigned int hwpwm = arg; + + return pwm_cdev_request(cdata, hwpwm); + } + + case PWM_IOCTL_FREE: + { + unsigned int hwpwm = arg; + + return pwm_cdev_free(cdata, hwpwm); + } + + case PWM_IOCTL_ROUNDWF: + { + struct pwmchip_waveform cwf; + struct pwm_waveform wf; + struct pwm_device *pwm; + + ret = copy_from_user(&cwf, + (struct pwmchip_waveform __user *)arg, + sizeof(cwf)); + if (ret) + return -EFAULT; + + if (cwf.__pad != 0) + return -EINVAL; + + pwm = pwm_cdev_get_requested_pwm(cdata, cwf.hwpwm); + if (IS_ERR(pwm)) + return PTR_ERR(pwm); + + wf = (struct pwm_waveform) { + .period_length_ns = cwf.period_length_ns, + .duty_length_ns = cwf.duty_length_ns, + .duty_offset_ns = cwf.duty_offset_ns, + }; + + ret = pwm_round_waveform_might_sleep(pwm, &wf); + if (ret < 0) + return ret; + + cwf = (struct pwmchip_waveform) { + .hwpwm = cwf.hwpwm, + .period_length_ns = wf.period_length_ns, + .duty_length_ns = wf.duty_length_ns, + .duty_offset_ns = wf.duty_offset_ns, + }; + + return copy_to_user((struct pwmchip_waveform __user *)arg, + &cwf, sizeof(cwf)); + } + + case PWM_IOCTL_GETWF: + { + struct pwmchip_waveform cwf; + struct pwm_waveform wf; + struct pwm_device *pwm; + + ret = copy_from_user(&cwf, + (struct pwmchip_waveform __user *)arg, + sizeof(cwf)); + if (ret) + return -EFAULT; + + if (cwf.__pad != 0) + return -EINVAL; + + pwm = pwm_cdev_get_requested_pwm(cdata, cwf.hwpwm); + if (IS_ERR(pwm)) + return PTR_ERR(pwm); + + ret = pwm_get_waveform_might_sleep(pwm, &wf); + if (ret) + return ret; + + cwf = (struct pwmchip_waveform) { + .hwpwm = cwf.hwpwm, + .period_length_ns = wf.period_length_ns, + .duty_length_ns = wf.duty_length_ns, + .duty_offset_ns = wf.duty_offset_ns, + }; + + return copy_to_user((struct pwmchip_waveform __user *)arg, + &cwf, sizeof(cwf)); + } + + case PWM_IOCTL_SETROUNDEDWF: + case PWM_IOCTL_SETEXACTWF: + { + struct pwmchip_waveform cwf; + struct pwm_waveform wf; + struct pwm_device *pwm; + + ret = copy_from_user(&cwf, + (struct pwmchip_waveform __user *)arg, + sizeof(cwf)); + if (ret) + return -EFAULT; + + if (cwf.__pad != 0) + return -EINVAL; + + wf = (struct pwm_waveform){ + .period_length_ns = cwf.period_length_ns, + .duty_length_ns = cwf.duty_length_ns, + .duty_offset_ns = cwf.duty_offset_ns, + }; + + if (!pwm_wf_valid(&wf)) + return -EINVAL; + + pwm = pwm_cdev_get_requested_pwm(cdata, cwf.hwpwm); + if (IS_ERR(pwm)) + return PTR_ERR(pwm); + + ret = pwm_set_waveform_might_sleep(pwm, &wf, + cmd == PWM_IOCTL_SETEXACTWF); + + /* + * If userspace cares about rounding deviations it has + * to check the values anyhow, so simplify handling for + * them and don't signal uprounding. This matches the + * behaviour of PWM_IOCTL_ROUNDWF which also returns 0 + * in that case. + */ + if (ret == 1) + ret = 0; + + return ret; + } + + default: + return -ENOTTY; + } +} + +static const struct file_operations pwm_cdev_fileops = { + .open = pwm_cdev_open, + .release = pwm_cdev_release, + .owner = THIS_MODULE, + .unlocked_ioctl = pwm_cdev_ioctl, +}; + +static dev_t pwm_devt; + /** * __pwmchip_add() - register a new PWM chip * @chip: the PWM chip to add @@ -2162,7 +2435,17 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner) scoped_guard(pwmchip, chip) chip->operational = true; - ret = device_add(&chip->dev); + if (chip->ops->write_waveform) { + if (chip->id < PWM_MINOR_COUNT) + chip->dev.devt = MKDEV(MAJOR(pwm_devt), chip->id); + else + dev_warn(&chip->dev, "chip id too high to create a chardev\n"); + } + + cdev_init(&chip->cdev, &pwm_cdev_fileops); + chip->cdev.owner = owner; + + ret = cdev_device_add(&chip->cdev, &chip->dev); if (ret) goto err_device_add; @@ -2213,7 +2496,7 @@ void pwmchip_remove(struct pwm_chip *chip) idr_remove(&pwm_chips, chip->id); } - device_del(&chip->dev); + cdev_device_del(&chip->cdev, &chip->dev); } EXPORT_SYMBOL_GPL(pwmchip_remove); @@ -2357,9 +2640,16 @@ static int __init pwm_init(void) { int ret; + ret = alloc_chrdev_region(&pwm_devt, 0, PWM_MINOR_COUNT, "pwm"); + if (ret) { + pr_err("Failed to initialize chrdev region for PWM usage\n"); + return ret; + } + ret = class_register(&pwm_class); if (ret) { pr_err("Failed to initialize PWM class (%pe)\n", ERR_PTR(ret)); + unregister_chrdev_region(pwm_devt, 256); return ret; } diff --git a/drivers/pwm/pwm-adp5585.c b/drivers/pwm/pwm-adp5585.c index d79106d12181..dc2860979e24 100644 --- a/drivers/pwm/pwm-adp5585.c +++ b/drivers/pwm/pwm-adp5585.c @@ -33,21 +33,33 @@ #define ADP5585_PWM_MIN_PERIOD_NS (2ULL * NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ) #define ADP5585_PWM_MAX_PERIOD_NS (2ULL * 0xffff * NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ) +struct adp5585_pwm_chip { + unsigned int pwm_cfg; + unsigned int pwm_offt_low; + unsigned int pwm_ont_low; +}; + +struct adp5585_pwm { + const struct adp5585_pwm_chip *info; + struct regmap *regmap; + unsigned int ext_cfg; +}; + static int pwm_adp5585_request(struct pwm_chip *chip, struct pwm_device *pwm) { - struct regmap *regmap = pwmchip_get_drvdata(chip); + struct adp5585_pwm *adp5585_pwm = pwmchip_get_drvdata(chip); /* Configure the R3 pin as PWM output. */ - return regmap_update_bits(regmap, ADP5585_PIN_CONFIG_C, + return regmap_update_bits(adp5585_pwm->regmap, adp5585_pwm->ext_cfg, ADP5585_R3_EXTEND_CFG_MASK, ADP5585_R3_EXTEND_CFG_PWM_OUT); } static void pwm_adp5585_free(struct pwm_chip *chip, struct pwm_device *pwm) { - struct regmap *regmap = pwmchip_get_drvdata(chip); + struct adp5585_pwm *adp5585_pwm = pwmchip_get_drvdata(chip); - regmap_update_bits(regmap, ADP5585_PIN_CONFIG_C, + regmap_update_bits(adp5585_pwm->regmap, adp5585_pwm->ext_cfg, ADP5585_R3_EXTEND_CFG_MASK, ADP5585_R3_EXTEND_CFG_GPIO4); } @@ -56,15 +68,16 @@ static int pwm_adp5585_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { - struct regmap *regmap = pwmchip_get_drvdata(chip); + struct adp5585_pwm *adp5585_pwm = pwmchip_get_drvdata(chip); + const struct adp5585_pwm_chip *info = adp5585_pwm->info; + struct regmap *regmap = adp5585_pwm->regmap; u64 period, duty_cycle; u32 on, off; __le16 val; int ret; if (!state->enabled) { - regmap_clear_bits(regmap, ADP5585_GENERAL_CFG, ADP5585_OSC_EN); - regmap_clear_bits(regmap, ADP5585_PWM_CFG, ADP5585_PWM_EN); + regmap_clear_bits(regmap, info->pwm_cfg, ADP5585_PWM_EN); return 0; } @@ -85,45 +98,43 @@ static int pwm_adp5585_apply(struct pwm_chip *chip, off = div_u64(period, NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ) - on; val = cpu_to_le16(off); - ret = regmap_bulk_write(regmap, ADP5585_PWM_OFFT_LOW, &val, 2); + ret = regmap_bulk_write(regmap, info->pwm_offt_low, &val, 2); if (ret) return ret; val = cpu_to_le16(on); - ret = regmap_bulk_write(regmap, ADP5585_PWM_ONT_LOW, &val, 2); + ret = regmap_bulk_write(regmap, info->pwm_ont_low, &val, 2); if (ret) return ret; /* Enable PWM in continuous mode and no external AND'ing. */ - ret = regmap_update_bits(regmap, ADP5585_PWM_CFG, + ret = regmap_update_bits(regmap, info->pwm_cfg, ADP5585_PWM_IN_AND | ADP5585_PWM_MODE | ADP5585_PWM_EN, ADP5585_PWM_EN); if (ret) return ret; - ret = regmap_set_bits(regmap, ADP5585_GENERAL_CFG, ADP5585_OSC_EN); - if (ret) - return ret; - - return regmap_set_bits(regmap, ADP5585_PWM_CFG, ADP5585_PWM_EN); + return regmap_set_bits(regmap, info->pwm_cfg, ADP5585_PWM_EN); } static int pwm_adp5585_get_state(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { - struct regmap *regmap = pwmchip_get_drvdata(chip); + struct adp5585_pwm *adp5585_pwm = pwmchip_get_drvdata(chip); + const struct adp5585_pwm_chip *info = adp5585_pwm->info; + struct regmap *regmap = adp5585_pwm->regmap; unsigned int on, off; unsigned int val; __le16 on_off; int ret; - ret = regmap_bulk_read(regmap, ADP5585_PWM_OFFT_LOW, &on_off, 2); + ret = regmap_bulk_read(regmap, info->pwm_offt_low, &on_off, 2); if (ret) return ret; off = le16_to_cpu(on_off); - ret = regmap_bulk_read(regmap, ADP5585_PWM_ONT_LOW, &on_off, 2); + ret = regmap_bulk_read(regmap, info->pwm_ont_low, &on_off, 2); if (ret) return ret; on = le16_to_cpu(on_off); @@ -133,7 +144,7 @@ static int pwm_adp5585_get_state(struct pwm_chip *chip, state->polarity = PWM_POLARITY_NORMAL; - regmap_read(regmap, ADP5585_PWM_CFG, &val); + regmap_read(regmap, info->pwm_cfg, &val); state->enabled = !!(val & ADP5585_PWM_EN); return 0; @@ -148,18 +159,28 @@ static const struct pwm_ops adp5585_pwm_ops = { static int adp5585_pwm_probe(struct platform_device *pdev) { + const struct platform_device_id *id = platform_get_device_id(pdev); struct device *dev = &pdev->dev; struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent); + struct adp5585_pwm *adp5585_pwm; struct pwm_chip *chip; int ret; - chip = devm_pwmchip_alloc(dev, ADP5585_PWM_CHAN_NUM, 0); + chip = devm_pwmchip_alloc(dev, ADP5585_PWM_CHAN_NUM, + sizeof(*adp5585_pwm)); if (IS_ERR(chip)) return PTR_ERR(chip); + adp5585_pwm = pwmchip_get_drvdata(chip); + adp5585_pwm->regmap = adp5585->regmap; + adp5585_pwm->ext_cfg = adp5585->regs->ext_cfg; + + adp5585_pwm->info = (const struct adp5585_pwm_chip *)id->driver_data; + if (!adp5585_pwm->info) + return -ENODEV; + device_set_of_node_from_dev(dev, dev->parent); - pwmchip_set_drvdata(chip, adp5585->regmap); chip->ops = &adp5585_pwm_ops; ret = devm_pwmchip_add(dev, chip); @@ -169,8 +190,21 @@ static int adp5585_pwm_probe(struct platform_device *pdev) return 0; } +static const struct adp5585_pwm_chip adp5589_pwm_chip_info = { + .pwm_cfg = ADP5585_PWM_CFG, + .pwm_offt_low = ADP5585_PWM_OFFT_LOW, + .pwm_ont_low = ADP5585_PWM_ONT_LOW, +}; + +static const struct adp5585_pwm_chip adp5585_pwm_chip_info = { + .pwm_cfg = ADP5589_PWM_CFG, + .pwm_offt_low = ADP5589_PWM_OFFT_LOW, + .pwm_ont_low = ADP5589_PWM_ONT_LOW, +}; + static const struct platform_device_id adp5585_pwm_id_table[] = { - { "adp5585-pwm" }, + { "adp5585-pwm", (kernel_ulong_t)&adp5585_pwm_chip_info }, + { "adp5589-pwm", (kernel_ulong_t)&adp5589_pwm_chip_info }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(platform, adp5585_pwm_id_table); diff --git a/drivers/pwm/pwm-argon-fan-hat.c b/drivers/pwm/pwm-argon-fan-hat.c new file mode 100644 index 000000000000..2c59bd142d40 --- /dev/null +++ b/drivers/pwm/pwm-argon-fan-hat.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025 Marek Vasut + * + * Limitations: + * - no support for offset/polarity + * - fixed 30 kHz period + * + * Argon Fan HAT https://argon40.com/products/argon-fan-hat + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/pwm.h> + +#define ARGON40_FAN_HAT_PERIOD_NS 33333 /* ~30 kHz */ + +#define ARGON40_FAN_HAT_REG_DUTY_CYCLE 0x80 + +static int argon_fan_hat_round_waveform_tohw(struct pwm_chip *chip, + struct pwm_device *pwm, + const struct pwm_waveform *wf, + void *_wfhw) +{ + u8 *wfhw = _wfhw; + + if (wf->duty_length_ns > ARGON40_FAN_HAT_PERIOD_NS) + *wfhw = 100; + else + *wfhw = mul_u64_u64_div_u64(wf->duty_length_ns, 100, ARGON40_FAN_HAT_PERIOD_NS); + + return 0; +} + +static int argon_fan_hat_round_waveform_fromhw(struct pwm_chip *chip, + struct pwm_device *pwm, + const void *_wfhw, + struct pwm_waveform *wf) +{ + const u8 *wfhw = _wfhw; + + wf->period_length_ns = ARGON40_FAN_HAT_PERIOD_NS; + wf->duty_length_ns = DIV64_U64_ROUND_UP(wf->period_length_ns * *wfhw, 100); + wf->duty_offset_ns = 0; + + return 0; +} + +static int argon_fan_hat_write_waveform(struct pwm_chip *chip, + struct pwm_device *pwm, + const void *_wfhw) +{ + struct i2c_client *i2c = pwmchip_get_drvdata(chip); + const u8 *wfhw = _wfhw; + + return i2c_smbus_write_byte_data(i2c, ARGON40_FAN_HAT_REG_DUTY_CYCLE, *wfhw); +} + +static const struct pwm_ops argon_fan_hat_pwm_ops = { + .sizeof_wfhw = sizeof(u8), + .round_waveform_fromhw = argon_fan_hat_round_waveform_fromhw, + .round_waveform_tohw = argon_fan_hat_round_waveform_tohw, + .write_waveform = argon_fan_hat_write_waveform, + /* + * The controller does not provide any way to read info back, + * reading from the controller stops the fan, therefore there + * is no .read_waveform here. + */ +}; + +static int argon_fan_hat_i2c_probe(struct i2c_client *i2c) +{ + struct pwm_chip *chip = devm_pwmchip_alloc(&i2c->dev, 1, 0); + int ret; + + if (IS_ERR(chip)) + return PTR_ERR(chip); + + chip->ops = &argon_fan_hat_pwm_ops; + pwmchip_set_drvdata(chip, i2c); + + ret = devm_pwmchip_add(&i2c->dev, chip); + if (ret) + return dev_err_probe(&i2c->dev, ret, "Could not add PWM chip\n"); + + return 0; +} + +static const struct of_device_id argon_fan_hat_dt_ids[] = { + { .compatible = "argon40,fan-hat" }, + { }, +}; +MODULE_DEVICE_TABLE(of, argon_fan_hat_dt_ids); + +static struct i2c_driver argon_fan_hat_driver = { + .driver = { + .name = "argon-fan-hat", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = argon_fan_hat_dt_ids, + }, + .probe = argon_fan_hat_i2c_probe, +}; + +module_i2c_driver(argon_fan_hat_driver); + +MODULE_AUTHOR("Marek Vasut <marek.vasut+renesas@mailbox.org>"); +MODULE_DESCRIPTION("Argon40 Fan HAT"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c index b2f0abbbad63..06d22d0f7b26 100644 --- a/drivers/pwm/pwm-atmel.c +++ b/drivers/pwm/pwm-atmel.c @@ -91,9 +91,6 @@ struct atmel_pwm_chip { * hardware. */ u32 update_pending; - - /* Protects .update_pending */ - spinlock_t lock; }; static inline struct atmel_pwm_chip *to_atmel_pwm_chip(struct pwm_chip *chip) @@ -145,8 +142,6 @@ static void atmel_pwm_update_pending(struct atmel_pwm_chip *chip) static void atmel_pwm_set_pending(struct atmel_pwm_chip *chip, unsigned int ch) { - spin_lock(&chip->lock); - /* * Clear pending flags in hardware because otherwise there might still * be a stale flag in ISR. @@ -154,16 +149,12 @@ static void atmel_pwm_set_pending(struct atmel_pwm_chip *chip, unsigned int ch) atmel_pwm_update_pending(chip); chip->update_pending |= (1 << ch); - - spin_unlock(&chip->lock); } static int atmel_pwm_test_pending(struct atmel_pwm_chip *chip, unsigned int ch) { int ret = 0; - spin_lock(&chip->lock); - if (chip->update_pending & (1 << ch)) { atmel_pwm_update_pending(chip); @@ -171,8 +162,6 @@ static int atmel_pwm_test_pending(struct atmel_pwm_chip *chip, unsigned int ch) ret = 1; } - spin_unlock(&chip->lock); - return ret; } @@ -509,7 +498,6 @@ static int atmel_pwm_probe(struct platform_device *pdev) atmel_pwm->data = of_device_get_match_data(&pdev->dev); atmel_pwm->update_pending = 0; - spin_lock_init(&atmel_pwm->lock); atmel_pwm->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(atmel_pwm->base)) diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c index 04559a9de718..2c92ce754872 100644 --- a/drivers/pwm/pwm-clps711x.c +++ b/drivers/pwm/pwm-clps711x.c @@ -14,7 +14,6 @@ struct clps711x_chip { void __iomem *pmpcon; struct clk *clk; - spinlock_t lock; }; static inline struct clps711x_chip *to_clps711x_chip(struct pwm_chip *chip) @@ -42,7 +41,6 @@ static int clps711x_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, struct clps711x_chip *priv = to_clps711x_chip(chip); /* PWM0 - bits 4..7, PWM1 - bits 8..11 */ u32 shift = (pwm->hwpwm + 1) * 4; - unsigned long flags; u32 pmpcon, val; if (state->polarity != PWM_POLARITY_NORMAL) @@ -56,15 +54,11 @@ static int clps711x_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, else val = 0; - spin_lock_irqsave(&priv->lock, flags); - pmpcon = readl(priv->pmpcon); pmpcon &= ~(0xf << shift); pmpcon |= val << shift; writel(pmpcon, priv->pmpcon); - spin_unlock_irqrestore(&priv->lock, flags); - return 0; } @@ -93,8 +87,6 @@ static int clps711x_pwm_probe(struct platform_device *pdev) chip->ops = &clps711x_pwm_ops; - spin_lock_init(&priv->lock); - return devm_pwmchip_add(&pdev->dev, chip); } diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c index c45a5fca4cbb..6683931872fc 100644 --- a/drivers/pwm/pwm-fsl-ftm.c +++ b/drivers/pwm/pwm-fsl-ftm.c @@ -10,7 +10,6 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/mutex.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm.h> @@ -40,7 +39,6 @@ struct fsl_pwm_periodcfg { }; struct fsl_pwm_chip { - struct mutex lock; struct regmap *regmap; /* This value is valid iff a pwm is running */ @@ -89,11 +87,8 @@ static int fsl_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) struct fsl_pwm_chip *fpc = to_fsl_chip(chip); ret = clk_prepare_enable(fpc->ipg_clk); - if (!ret && fpc->soc->has_enable_bits) { - mutex_lock(&fpc->lock); + if (!ret && fpc->soc->has_enable_bits) regmap_set_bits(fpc->regmap, FTM_SC, BIT(pwm->hwpwm + 16)); - mutex_unlock(&fpc->lock); - } return ret; } @@ -102,11 +97,8 @@ static void fsl_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { struct fsl_pwm_chip *fpc = to_fsl_chip(chip); - if (fpc->soc->has_enable_bits) { - mutex_lock(&fpc->lock); + if (fpc->soc->has_enable_bits) regmap_clear_bits(fpc->regmap, FTM_SC, BIT(pwm->hwpwm + 16)); - mutex_unlock(&fpc->lock); - } clk_disable_unprepare(fpc->ipg_clk); } @@ -304,7 +296,7 @@ static int fsl_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, { struct fsl_pwm_chip *fpc = to_fsl_chip(chip); struct pwm_state *oldstate = &pwm->state; - int ret = 0; + int ret; /* * oldstate to newstate : action @@ -315,8 +307,6 @@ static int fsl_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, * disabled to enabled : update settings + enable */ - mutex_lock(&fpc->lock); - if (!newstate->enabled) { if (oldstate->enabled) { regmap_set_bits(fpc->regmap, FTM_OUTMASK, @@ -325,30 +315,28 @@ static int fsl_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, clk_disable_unprepare(fpc->clk[fpc->period.clk_select]); } - goto end_mutex; + return 0; } ret = fsl_pwm_apply_config(chip, pwm, newstate); if (ret) - goto end_mutex; + return ret; /* check if need to enable */ if (!oldstate->enabled) { ret = clk_prepare_enable(fpc->clk[fpc->period.clk_select]); if (ret) - goto end_mutex; + return ret; ret = clk_prepare_enable(fpc->clk[FSL_PWM_CLK_CNTEN]); if (ret) { clk_disable_unprepare(fpc->clk[fpc->period.clk_select]); - goto end_mutex; + return ret; } regmap_clear_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm)); } -end_mutex: - mutex_unlock(&fpc->lock); return ret; } @@ -408,8 +396,6 @@ static int fsl_pwm_probe(struct platform_device *pdev) return PTR_ERR(chip); fpc = to_fsl_chip(chip); - mutex_init(&fpc->lock); - fpc->soc = of_device_get_match_data(&pdev->dev); base = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c index 71542956feca..91e0b19f0c08 100644 --- a/drivers/pwm/pwm-img.c +++ b/drivers/pwm/pwm-img.c @@ -139,7 +139,6 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, (timebase << PWM_CH_CFG_TMBASE_SHIFT); img_pwm_writel(imgchip, PWM_CH_CFG(pwm->hwpwm), val); - pm_runtime_mark_last_busy(pwmchip_parent(chip)); pm_runtime_put_autosuspend(pwmchip_parent(chip)); return 0; @@ -175,7 +174,6 @@ static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) val &= ~BIT(pwm->hwpwm); img_pwm_writel(imgchip, PWM_CTRL_CFG, val); - pm_runtime_mark_last_busy(pwmchip_parent(chip)); pm_runtime_put_autosuspend(pwmchip_parent(chip)); } diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c index f351baa63453..1e614b2a0227 100644 --- a/drivers/pwm/pwm-lpc18xx-sct.c +++ b/drivers/pwm/pwm-lpc18xx-sct.c @@ -100,8 +100,6 @@ struct lpc18xx_pwm_chip { u64 max_period_ns; unsigned int period_event; unsigned long event_map; - struct mutex res_lock; - struct mutex period_lock; struct lpc18xx_pwm_data channeldata[LPC18XX_NUM_PWMS]; }; @@ -129,8 +127,6 @@ static void lpc18xx_pwm_set_conflict_res(struct lpc18xx_pwm_chip *lpc18xx_pwm, { u32 val; - mutex_lock(&lpc18xx_pwm->res_lock); - /* * Simultaneous set and clear may happen on an output, that is the case * when duty_ns == period_ns. LPC18xx SCT allows to set a conflict @@ -140,8 +136,6 @@ static void lpc18xx_pwm_set_conflict_res(struct lpc18xx_pwm_chip *lpc18xx_pwm, val &= ~LPC18XX_PWM_RES_MASK(pwm->hwpwm); val |= LPC18XX_PWM_RES(pwm->hwpwm, action); lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_RES_BASE, val); - - mutex_unlock(&lpc18xx_pwm->res_lock); } static void lpc18xx_pwm_config_period(struct pwm_chip *chip, u64 period_ns) @@ -200,8 +194,6 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, return -ERANGE; } - mutex_lock(&lpc18xx_pwm->period_lock); - requested_events = bitmap_weight(&lpc18xx_pwm->event_map, LPC18XX_PWM_EVENT_MAX); @@ -214,7 +206,6 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, lpc18xx_pwm->period_ns) { dev_err(pwmchip_parent(chip), "conflicting period requested for PWM %u\n", pwm->hwpwm); - mutex_unlock(&lpc18xx_pwm->period_lock); return -EBUSY; } @@ -224,8 +215,6 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, lpc18xx_pwm_config_period(chip, period_ns); } - mutex_unlock(&lpc18xx_pwm->period_lock); - lpc18xx_pwm_config_duty(chip, pwm, duty_ns); return 0; @@ -377,9 +366,6 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) if (lpc18xx_pwm->clk_rate > NSEC_PER_SEC) return dev_err_probe(&pdev->dev, -EINVAL, "pwm clock to fast\n"); - mutex_init(&lpc18xx_pwm->res_lock); - mutex_init(&lpc18xx_pwm->period_lock); - lpc18xx_pwm->max_period_ns = mul_u64_u64_div_u64(NSEC_PER_SEC, LPC18XX_PWM_TIMER_MAX, lpc18xx_pwm->clk_rate); diff --git a/drivers/pwm/pwm-mc33xs2410.c b/drivers/pwm/pwm-mc33xs2410.c index a1ac3445ccdb..6d99e3ff7239 100644 --- a/drivers/pwm/pwm-mc33xs2410.c +++ b/drivers/pwm/pwm-mc33xs2410.c @@ -17,11 +17,14 @@ * behavior of the output pin that is neither the old nor the new state, * rather something in between. */ +#define DEFAULT_SYMBOL_NAMESPACE "PWM_MC33XS2410" +#include <linux/auxiliary_bus.h> #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/math64.h> +#include <linux/mc33xs2410.h> #include <linux/minmax.h> #include <linux/module.h> #include <linux/of.h> @@ -120,12 +123,19 @@ static int mc33xs2410_read_reg(struct spi_device *spi, u8 reg, u16 *val, u8 flag return mc33xs2410_read_regs(spi, ®, flag, val, 1); } -static int mc33xs2410_read_reg_ctrl(struct spi_device *spi, u8 reg, u16 *val) +int mc33xs2410_read_reg_ctrl(struct spi_device *spi, u8 reg, u16 *val) { return mc33xs2410_read_reg(spi, reg, val, MC33XS2410_FRAME_IN_DATA_RD); } +EXPORT_SYMBOL_GPL(mc33xs2410_read_reg_ctrl); -static int mc33xs2410_modify_reg(struct spi_device *spi, u8 reg, u8 mask, u8 val) +int mc33xs2410_read_reg_diag(struct spi_device *spi, u8 reg, u16 *val) +{ + return mc33xs2410_read_reg(spi, reg, val, 0); +} +EXPORT_SYMBOL_GPL(mc33xs2410_read_reg_diag); + +int mc33xs2410_modify_reg(struct spi_device *spi, u8 reg, u8 mask, u8 val) { u16 tmp; int ret; @@ -139,6 +149,7 @@ static int mc33xs2410_modify_reg(struct spi_device *spi, u8 reg, u8 mask, u8 val return mc33xs2410_write_reg(spi, reg, tmp); } +EXPORT_SYMBOL_GPL(mc33xs2410_modify_reg); static u8 mc33xs2410_pwm_get_freq(u64 period) { @@ -314,6 +325,7 @@ static int mc33xs2410_reset(struct device *dev) static int mc33xs2410_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct auxiliary_device *adev; struct pwm_chip *chip; int ret; @@ -361,6 +373,10 @@ static int mc33xs2410_probe(struct spi_device *spi) if (ret < 0) return dev_err_probe(dev, ret, "Failed to add pwm chip\n"); + adev = devm_auxiliary_device_create(dev, "hwmon", NULL); + if (!adev) + return dev_err_probe(dev, -ENODEV, "Failed to register hwmon device\n"); + return 0; } diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index 33d3554b9197..6777c511622a 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -29,6 +29,7 @@ #define PWM45DWIDTH_FIXUP 0x30 #define PWMTHRES 0x30 #define PWM45THRES_FIXUP 0x34 +#define PWM_CK_26M_SEL_V3 0x74 #define PWM_CK_26M_SEL 0x210 #define PWM_CLK_DIV_MAX 7 @@ -36,7 +37,7 @@ struct pwm_mediatek_of_data { unsigned int num_pwms; bool pwm45_fixup; - bool has_ck_26m_sel; + u16 pwm_ck_26m_sel_reg; const unsigned int *reg_offset; }; @@ -64,6 +65,11 @@ static const unsigned int mtk_pwm_reg_offset_v2[] = { 0x0080, 0x00c0, 0x0100, 0x0140, 0x0180, 0x01c0, 0x0200, 0x0240 }; +/* PWM IP Version 3.0.2 */ +static const unsigned int mtk_pwm_reg_offset_v3[] = { + 0x0100, 0x0200, 0x0300, 0x0400, 0x0500, 0x0600, 0x0700, 0x0800 +}; + static inline struct pwm_mediatek_chip * to_pwm_mediatek_chip(struct pwm_chip *chip) { @@ -136,8 +142,8 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, } /* Make sure we use the bus clock and not the 26MHz clock */ - if (pc->soc->has_ck_26m_sel) - writel(0, pc->regs + PWM_CK_26M_SEL); + if (pc->soc->pwm_ck_26m_sel_reg) + writel(0, pc->regs + pc->soc->pwm_ck_26m_sel_reg); /* Using resolution in picosecond gets accuracy higher */ resolution = (u64)NSEC_PER_SEC * 1000; @@ -294,90 +300,92 @@ static int pwm_mediatek_probe(struct platform_device *pdev) static const struct pwm_mediatek_of_data mt2712_pwm_data = { .num_pwms = 8, .pwm45_fixup = false, - .has_ck_26m_sel = false, .reg_offset = mtk_pwm_reg_offset_v1, }; static const struct pwm_mediatek_of_data mt6795_pwm_data = { .num_pwms = 7, .pwm45_fixup = false, - .has_ck_26m_sel = false, .reg_offset = mtk_pwm_reg_offset_v1, }; static const struct pwm_mediatek_of_data mt7622_pwm_data = { .num_pwms = 6, .pwm45_fixup = false, - .has_ck_26m_sel = true, + .pwm_ck_26m_sel_reg = PWM_CK_26M_SEL, .reg_offset = mtk_pwm_reg_offset_v1, }; static const struct pwm_mediatek_of_data mt7623_pwm_data = { .num_pwms = 5, .pwm45_fixup = true, - .has_ck_26m_sel = false, .reg_offset = mtk_pwm_reg_offset_v1, }; static const struct pwm_mediatek_of_data mt7628_pwm_data = { .num_pwms = 4, .pwm45_fixup = true, - .has_ck_26m_sel = false, .reg_offset = mtk_pwm_reg_offset_v1, }; static const struct pwm_mediatek_of_data mt7629_pwm_data = { .num_pwms = 1, .pwm45_fixup = false, - .has_ck_26m_sel = false, .reg_offset = mtk_pwm_reg_offset_v1, }; static const struct pwm_mediatek_of_data mt7981_pwm_data = { .num_pwms = 3, .pwm45_fixup = false, - .has_ck_26m_sel = true, + .pwm_ck_26m_sel_reg = PWM_CK_26M_SEL, .reg_offset = mtk_pwm_reg_offset_v2, }; static const struct pwm_mediatek_of_data mt7986_pwm_data = { .num_pwms = 2, .pwm45_fixup = false, - .has_ck_26m_sel = true, + .pwm_ck_26m_sel_reg = PWM_CK_26M_SEL, .reg_offset = mtk_pwm_reg_offset_v1, }; static const struct pwm_mediatek_of_data mt7988_pwm_data = { .num_pwms = 8, .pwm45_fixup = false, - .has_ck_26m_sel = false, .reg_offset = mtk_pwm_reg_offset_v2, }; static const struct pwm_mediatek_of_data mt8183_pwm_data = { .num_pwms = 4, .pwm45_fixup = false, - .has_ck_26m_sel = true, + .pwm_ck_26m_sel_reg = PWM_CK_26M_SEL, .reg_offset = mtk_pwm_reg_offset_v1, }; static const struct pwm_mediatek_of_data mt8365_pwm_data = { .num_pwms = 3, .pwm45_fixup = false, - .has_ck_26m_sel = true, + .pwm_ck_26m_sel_reg = PWM_CK_26M_SEL, .reg_offset = mtk_pwm_reg_offset_v1, }; static const struct pwm_mediatek_of_data mt8516_pwm_data = { .num_pwms = 5, .pwm45_fixup = false, - .has_ck_26m_sel = true, + .pwm_ck_26m_sel_reg = PWM_CK_26M_SEL, .reg_offset = mtk_pwm_reg_offset_v1, }; +static const struct pwm_mediatek_of_data mt6991_pwm_data = { + .num_pwms = 4, + .pwm45_fixup = false, + .pwm_ck_26m_sel_reg = PWM_CK_26M_SEL_V3, + .reg_offset = mtk_pwm_reg_offset_v3, +}; + static const struct of_device_id pwm_mediatek_of_match[] = { { .compatible = "mediatek,mt2712-pwm", .data = &mt2712_pwm_data }, { .compatible = "mediatek,mt6795-pwm", .data = &mt6795_pwm_data }, + { .compatible = "mediatek,mt6991-pwm", .data = &mt6991_pwm_data }, { .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data }, { .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data }, { .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data }, diff --git a/drivers/pwm/pwm-microchip-core.c b/drivers/pwm/pwm-microchip-core.c index 12821b4bbf97..4ff32bb4c205 100644 --- a/drivers/pwm/pwm-microchip-core.c +++ b/drivers/pwm/pwm-microchip-core.c @@ -36,7 +36,6 @@ #include <linux/ktime.h> #include <linux/math.h> #include <linux/module.h> -#include <linux/mutex.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pwm.h> @@ -56,7 +55,6 @@ struct mchp_core_pwm_chip { struct clk *clk; void __iomem *base; - struct mutex lock; /* protects the shared period */ ktime_t update_timestamp; u32 sync_update_mask; u16 channel_enabled; @@ -360,17 +358,10 @@ static int mchp_core_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { struct mchp_core_pwm_chip *mchp_core_pwm = to_mchp_core_pwm(chip); - int ret; - - mutex_lock(&mchp_core_pwm->lock); mchp_core_pwm_wait_for_sync_update(mchp_core_pwm, pwm->hwpwm); - ret = mchp_core_pwm_apply_locked(chip, pwm, state); - - mutex_unlock(&mchp_core_pwm->lock); - - return ret; + return mchp_core_pwm_apply_locked(chip, pwm, state); } static int mchp_core_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, @@ -381,8 +372,6 @@ static int mchp_core_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm u16 prescale, period_steps; u8 duty_steps, posedge, negedge; - mutex_lock(&mchp_core_pwm->lock); - mchp_core_pwm_wait_for_sync_update(mchp_core_pwm, pwm->hwpwm); if (mchp_core_pwm->channel_enabled & (1 << pwm->hwpwm)) @@ -415,8 +404,6 @@ static int mchp_core_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm posedge = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_POSEDGE(pwm->hwpwm)); negedge = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_NEGEDGE(pwm->hwpwm)); - mutex_unlock(&mchp_core_pwm->lock); - if (negedge == posedge) { state->duty_cycle = state->period; state->period *= 2; @@ -469,8 +456,6 @@ static int mchp_core_pwm_probe(struct platform_device *pdev) &mchp_core_pwm->sync_update_mask)) mchp_core_pwm->sync_update_mask = 0; - mutex_init(&mchp_core_pwm->lock); - chip->ops = &mchp_core_pwm_ops; mchp_core_pwm->channel_enabled = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_EN(0)); diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c index 8a4a3d2df30d..0f5bdb0e395e 100644 --- a/drivers/pwm/pwm-pxa.c +++ b/drivers/pwm/pwm-pxa.c @@ -25,6 +25,7 @@ #include <linux/io.h> #include <linux/pwm.h> #include <linux/of.h> +#include <linux/reset.h> #include <asm/div64.h> @@ -161,6 +162,7 @@ static int pwm_probe(struct platform_device *pdev) struct pwm_chip *chip; struct pxa_pwm_chip *pc; struct device *dev = &pdev->dev; + struct reset_control *rst; int ret = 0; if (IS_ENABLED(CONFIG_OF) && id == NULL) @@ -179,6 +181,10 @@ static int pwm_probe(struct platform_device *pdev) if (IS_ERR(pc->clk)) return dev_err_probe(dev, PTR_ERR(pc->clk), "Failed to get clock\n"); + rst = devm_reset_control_get_optional_exclusive_deasserted(dev, NULL); + if (IS_ERR(rst)) + return PTR_ERR(rst); + chip->ops = &pxa_pwm_ops; if (IS_ENABLED(CONFIG_OF)) diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c index c5f50e5eaf41..67b85bdb491b 100644 --- a/drivers/pwm/pwm-rockchip.c +++ b/drivers/pwm/pwm-rockchip.c @@ -8,6 +8,8 @@ #include <linux/clk.h> #include <linux/io.h> +#include <linux/limits.h> +#include <linux/math64.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -61,6 +63,7 @@ static int rockchip_pwm_get_state(struct pwm_chip *chip, struct pwm_state *state) { struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); + u64 prescaled_ns = (u64)pc->data->prescaler * NSEC_PER_SEC; u32 enable_conf = pc->data->enable_conf; unsigned long clk_rate; u64 tmp; @@ -78,12 +81,12 @@ static int rockchip_pwm_get_state(struct pwm_chip *chip, clk_rate = clk_get_rate(pc->clk); tmp = readl_relaxed(pc->base + pc->data->regs.period); - tmp *= pc->data->prescaler * NSEC_PER_SEC; - state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate); + tmp *= prescaled_ns; + state->period = DIV_U64_ROUND_UP(tmp, clk_rate); tmp = readl_relaxed(pc->base + pc->data->regs.duty); - tmp *= pc->data->prescaler * NSEC_PER_SEC; - state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate); + tmp *= prescaled_ns; + state->duty_cycle = DIV_U64_ROUND_UP(tmp, clk_rate); val = readl_relaxed(pc->base + pc->data->regs.ctrl); state->enabled = (val & enable_conf) == enable_conf; @@ -103,8 +106,9 @@ static void rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); - unsigned long period, duty; - u64 clk_rate, div; + u64 prescaled_ns = (u64)pc->data->prescaler * NSEC_PER_SEC; + u64 clk_rate, tmp; + u32 period_ticks, duty_ticks; u32 ctrl; clk_rate = clk_get_rate(pc->clk); @@ -114,12 +118,15 @@ static void rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * bits, every possible input period can be obtained using the * default prescaler value for all practical clock rate values. */ - div = clk_rate * state->period; - period = DIV_ROUND_CLOSEST_ULL(div, - pc->data->prescaler * NSEC_PER_SEC); + tmp = mul_u64_u64_div_u64(clk_rate, state->period, prescaled_ns); + if (tmp > U32_MAX) + tmp = U32_MAX; + period_ticks = tmp; - div = clk_rate * state->duty_cycle; - duty = DIV_ROUND_CLOSEST_ULL(div, pc->data->prescaler * NSEC_PER_SEC); + tmp = mul_u64_u64_div_u64(clk_rate, state->duty_cycle, prescaled_ns); + if (tmp > U32_MAX) + tmp = U32_MAX; + duty_ticks = tmp; /* * Lock the period and duty of previous configuration, then @@ -131,8 +138,8 @@ static void rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, writel_relaxed(ctrl, pc->base + pc->data->regs.ctrl); } - writel(period, pc->base + pc->data->regs.period); - writel(duty, pc->base + pc->data->regs.duty); + writel(period_ticks, pc->base + pc->data->regs.period); + writel(duty_ticks, pc->base + pc->data->regs.duty); if (pc->data->supports_polarity) { ctrl &= ~PWM_POLARITY_MASK; diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c index d5b647e6be78..4a07315b0744 100644 --- a/drivers/pwm/pwm-sifive.c +++ b/drivers/pwm/pwm-sifive.c @@ -4,11 +4,28 @@ * For SiFive's PWM IP block documentation please refer Chapter 14 of * Reference Manual : https://static.dev.sifive.com/FU540-C000-v1.0.pdf * + * PWM output inversion: According to the SiFive Reference manual + * the output of each comparator is high whenever the value of pwms is + * greater than or equal to the corresponding pwmcmpX[Reference Manual]. + * + * Figure 29 in the same manual shows that the pwmcmpXcenter bit is + * hard-tied to 0 (XNOR), which effectively inverts the comparison so that + * the output goes HIGH when `pwms < pwmcmpX`. + * + * In other words, each pwmcmp register actually defines the **inactive** + * (low) period of the pulse, not the active time exactly opposite to what + * the documentation text implies. + * + * To compensate, this driver always **inverts** the duty value when reading + * or writing pwmcmp registers , so that users interact with a conventional + * **active-high** PWM interface. + * + * * Limitations: * - When changing both duty cycle and period, we cannot prevent in * software that the output might produce a period with mixed * settings (new period length and old duty cycle). - * - The hardware cannot generate a 100% duty cycle. + * - The hardware cannot generate a 0% duty cycle. * - The hardware generates only inverted output. */ #include <linux/clk.h> @@ -101,7 +118,7 @@ static void pwm_sifive_update_clock(struct pwm_sifive_ddata *ddata, /* As scale <= 15 the shift operation cannot overflow. */ num = (unsigned long long)NSEC_PER_SEC << (PWM_SIFIVE_CMPWIDTH + scale); - ddata->real_period = div64_ul(num, rate); + ddata->real_period = DIV_ROUND_UP_ULL(num, rate); dev_dbg(ddata->parent, "New real_period = %u ns\n", ddata->real_period); } @@ -110,9 +127,14 @@ static int pwm_sifive_get_state(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip); - u32 duty, val; + u32 duty, val, inactive; - duty = readl(ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm)); + inactive = readl(ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm)); + /* + * PWM hardware uses 'inactive' counts in pwmcmp, so invert to get actual duty. + * Here, 'inactive' is the low time and we compute duty as max_count - inactive. + */ + duty = (1U << PWM_SIFIVE_CMPWIDTH) - 1 - inactive; state->enabled = duty > 0; @@ -121,9 +143,9 @@ static int pwm_sifive_get_state(struct pwm_chip *chip, struct pwm_device *pwm, state->enabled = false; state->period = ddata->real_period; - state->duty_cycle = - (u64)duty * ddata->real_period >> PWM_SIFIVE_CMPWIDTH; - state->polarity = PWM_POLARITY_INVERSED; + state->duty_cycle = DIV_ROUND_UP_ULL((u64)duty * ddata->real_period, + (1U << PWM_SIFIVE_CMPWIDTH)); + state->polarity = PWM_POLARITY_NORMAL; return 0; } @@ -137,9 +159,10 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm, unsigned long long num; bool enabled; int ret = 0; - u32 frac; + u64 frac; + u32 inactive; - if (state->polarity != PWM_POLARITY_INVERSED) + if (state->polarity != PWM_POLARITY_NORMAL) return -EINVAL; cur_state = pwm->state; @@ -156,9 +179,12 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm, * consecutively */ num = (u64)duty_cycle * (1U << PWM_SIFIVE_CMPWIDTH); - frac = DIV64_U64_ROUND_CLOSEST(num, state->period); - /* The hardware cannot generate a 100% duty cycle */ - frac = min(frac, (1U << PWM_SIFIVE_CMPWIDTH) - 1); + frac = num; + do_div(frac, state->period); + /* The hardware cannot generate a 0% duty cycle */ + frac = min(frac, (u64)(1U << PWM_SIFIVE_CMPWIDTH) - 1); + /* pwmcmp register must be loaded with the inactive(invert the duty) */ + inactive = (1U << PWM_SIFIVE_CMPWIDTH) - 1 - frac; mutex_lock(&ddata->lock); if (state->period != ddata->approx_period) { @@ -190,7 +216,7 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm, } } - writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm)); + writel(inactive, ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm)); if (!state->enabled) clk_disable(ddata->clk); diff --git a/drivers/pwm/pwm-sophgo-sg2042.c b/drivers/pwm/pwm-sophgo-sg2042.c index ff4639d849ce..7d07b0ca7d29 100644 --- a/drivers/pwm/pwm-sophgo-sg2042.c +++ b/drivers/pwm/pwm-sophgo-sg2042.c @@ -13,6 +13,7 @@ * the running period. * - When PERIOD and HLPERIOD is set to 0, the PWM wave output will * be stopped and the output is pulled to high. + * - SG2044 supports both polarities, SG2042 only normal polarity. * See the datasheet [1] for more details. * [1]:https://github.com/sophgo/sophgo-doc/tree/main/SG2042/TRM */ @@ -41,6 +42,10 @@ #define SG2042_PWM_HLPERIOD(chan) ((chan) * 8 + 0) #define SG2042_PWM_PERIOD(chan) ((chan) * 8 + 4) +#define SG2044_PWM_POLARITY 0x40 +#define SG2044_PWM_PWMSTART 0x44 +#define SG2044_PWM_OE 0xd0 + #define SG2042_PWM_CHANNELNUM 4 /** @@ -53,6 +58,10 @@ struct sg2042_pwm_ddata { unsigned long clk_rate_hz; }; +struct sg2042_chip_data { + const struct pwm_ops ops; +}; + /* * period_ticks: PERIOD * hlperiod_ticks: HLPERIOD @@ -66,21 +75,13 @@ static void pwm_sg2042_config(struct sg2042_pwm_ddata *ddata, unsigned int chan, writel(hlperiod_ticks, base + SG2042_PWM_HLPERIOD(chan)); } -static int pwm_sg2042_apply(struct pwm_chip *chip, struct pwm_device *pwm, - const struct pwm_state *state) +static void pwm_sg2042_set_dutycycle(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) { struct sg2042_pwm_ddata *ddata = pwmchip_get_drvdata(chip); u32 hlperiod_ticks; u32 period_ticks; - if (state->polarity == PWM_POLARITY_INVERSED) - return -EINVAL; - - if (!state->enabled) { - pwm_sg2042_config(ddata, pwm->hwpwm, 0, 0); - return 0; - } - /* * Duration of High level (duty_cycle) = HLPERIOD x Period_of_input_clk * Duration of One Cycle (period) = PERIOD x Period_of_input_clk @@ -88,10 +89,26 @@ static int pwm_sg2042_apply(struct pwm_chip *chip, struct pwm_device *pwm, period_ticks = min(mul_u64_u64_div_u64(ddata->clk_rate_hz, state->period, NSEC_PER_SEC), U32_MAX); hlperiod_ticks = min(mul_u64_u64_div_u64(ddata->clk_rate_hz, state->duty_cycle, NSEC_PER_SEC), U32_MAX); - dev_dbg(pwmchip_parent(chip), "chan[%u]: PERIOD=%u, HLPERIOD=%u\n", - pwm->hwpwm, period_ticks, hlperiod_ticks); + dev_dbg(pwmchip_parent(chip), "chan[%u]: ENABLE=%u, PERIOD=%u, HLPERIOD=%u, POLARITY=%u\n", + pwm->hwpwm, state->enabled, period_ticks, hlperiod_ticks, state->polarity); pwm_sg2042_config(ddata, pwm->hwpwm, period_ticks, hlperiod_ticks); +} + +static int pwm_sg2042_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct sg2042_pwm_ddata *ddata = pwmchip_get_drvdata(chip); + + if (state->polarity == PWM_POLARITY_INVERSED) + return -EINVAL; + + if (!state->enabled) { + pwm_sg2042_config(ddata, pwm->hwpwm, 0, 0); + return 0; + } + + pwm_sg2042_set_dutycycle(chip, pwm, state); return 0; } @@ -123,13 +140,97 @@ static int pwm_sg2042_get_state(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } -static const struct pwm_ops pwm_sg2042_ops = { - .apply = pwm_sg2042_apply, - .get_state = pwm_sg2042_get_state, +static void pwm_sg2044_set_outputen(struct sg2042_pwm_ddata *ddata, struct pwm_device *pwm, + bool enabled) +{ + u32 pwmstart; + + pwmstart = readl(ddata->base + SG2044_PWM_PWMSTART); + + if (enabled) + pwmstart |= BIT(pwm->hwpwm); + else + pwmstart &= ~BIT(pwm->hwpwm); + + writel(pwmstart, ddata->base + SG2044_PWM_PWMSTART); +} + +static void pwm_sg2044_set_outputdir(struct sg2042_pwm_ddata *ddata, struct pwm_device *pwm, + bool enabled) +{ + u32 pwm_oe; + + pwm_oe = readl(ddata->base + SG2044_PWM_OE); + + if (enabled) + pwm_oe |= BIT(pwm->hwpwm); + else + pwm_oe &= ~BIT(pwm->hwpwm); + + writel(pwm_oe, ddata->base + SG2044_PWM_OE); +} + +static void pwm_sg2044_set_polarity(struct sg2042_pwm_ddata *ddata, struct pwm_device *pwm, + const struct pwm_state *state) +{ + u32 pwm_polarity; + + pwm_polarity = readl(ddata->base + SG2044_PWM_POLARITY); + + if (state->polarity == PWM_POLARITY_NORMAL) + pwm_polarity &= ~BIT(pwm->hwpwm); + else + pwm_polarity |= BIT(pwm->hwpwm); + + writel(pwm_polarity, ddata->base + SG2044_PWM_POLARITY); +} + +static int pwm_sg2044_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct sg2042_pwm_ddata *ddata = pwmchip_get_drvdata(chip); + + pwm_sg2044_set_polarity(ddata, pwm, state); + + pwm_sg2042_set_dutycycle(chip, pwm, state); + + /* + * re-enable PWMSTART to refresh the register period + */ + pwm_sg2044_set_outputen(ddata, pwm, false); + + if (!state->enabled) + return 0; + + pwm_sg2044_set_outputdir(ddata, pwm, true); + pwm_sg2044_set_outputen(ddata, pwm, true); + + return 0; +} + +static const struct sg2042_chip_data sg2042_chip_data = { + .ops = { + .apply = pwm_sg2042_apply, + .get_state = pwm_sg2042_get_state, + }, +}; + +static const struct sg2042_chip_data sg2044_chip_data = { + .ops = { + .apply = pwm_sg2044_apply, + .get_state = pwm_sg2042_get_state, + }, }; static const struct of_device_id sg2042_pwm_ids[] = { - { .compatible = "sophgo,sg2042-pwm" }, + { + .compatible = "sophgo,sg2042-pwm", + .data = &sg2042_chip_data + }, + { + .compatible = "sophgo,sg2044-pwm", + .data = &sg2044_chip_data + }, { } }; MODULE_DEVICE_TABLE(of, sg2042_pwm_ids); @@ -137,12 +238,17 @@ MODULE_DEVICE_TABLE(of, sg2042_pwm_ids); static int pwm_sg2042_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + const struct sg2042_chip_data *chip_data; struct sg2042_pwm_ddata *ddata; struct reset_control *rst; struct pwm_chip *chip; struct clk *clk; int ret; + chip_data = device_get_match_data(dev); + if (!chip_data) + return -ENODEV; + chip = devm_pwmchip_alloc(dev, SG2042_PWM_CHANNELNUM, sizeof(*ddata)); if (IS_ERR(chip)) return PTR_ERR(chip); @@ -170,7 +276,7 @@ static int pwm_sg2042_probe(struct platform_device *pdev) if (IS_ERR(rst)) return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset\n"); - chip->ops = &pwm_sg2042_ops; + chip->ops = &chip_data->ops; chip->atomic = true; ret = devm_pwmchip_add(dev, chip); @@ -190,5 +296,6 @@ static struct platform_driver pwm_sg2042_driver = { module_platform_driver(pwm_sg2042_driver); MODULE_AUTHOR("Chen Wang"); +MODULE_AUTHOR("Longbin Li <looong.bin@gmail.com>"); MODULE_DESCRIPTION("Sophgo SG2042 PWM driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c index 396b52672ce0..3b702b8f0c7f 100644 --- a/drivers/pwm/pwm-sti.c +++ b/drivers/pwm/pwm-sti.c @@ -92,7 +92,6 @@ struct sti_pwm_chip { struct pwm_device *cur; unsigned long configured; unsigned int en_count; - struct mutex sti_pwm_lock; /* To sync between enable/disable calls */ void __iomem *mmio; }; @@ -244,55 +243,46 @@ static int sti_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { struct sti_pwm_chip *pc = to_sti_pwmchip(chip); struct device *dev = pc->dev; - int ret = 0; + int ret; /* * Since we have a common enable for all PWM devices, do not enable if * already enabled. */ - mutex_lock(&pc->sti_pwm_lock); if (!pc->en_count) { ret = clk_enable(pc->pwm_clk); if (ret) - goto out; + return ret; ret = clk_enable(pc->cpt_clk); if (ret) - goto out; + return ret; ret = regmap_field_write(pc->pwm_out_en, 1); if (ret) { dev_err(dev, "failed to enable PWM device %u: %d\n", pwm->hwpwm, ret); - goto out; + return ret; } } pc->en_count++; -out: - mutex_unlock(&pc->sti_pwm_lock); - return ret; + return 0; } static void sti_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { struct sti_pwm_chip *pc = to_sti_pwmchip(chip); - mutex_lock(&pc->sti_pwm_lock); - - if (--pc->en_count) { - mutex_unlock(&pc->sti_pwm_lock); + if (--pc->en_count) return; - } regmap_field_write(pc->pwm_out_en, 0); clk_disable(pc->pwm_clk); clk_disable(pc->cpt_clk); - - mutex_unlock(&pc->sti_pwm_lock); } static void sti_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) @@ -594,7 +584,6 @@ static int sti_pwm_probe(struct platform_device *pdev) pc->dev = dev; pc->en_count = 0; - mutex_init(&pc->sti_pwm_lock); ret = sti_pwm_probe_regmap(pc); if (ret) diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c index 4b148f0afeb9..2594fb771b04 100644 --- a/drivers/pwm/pwm-stm32.c +++ b/drivers/pwm/pwm-stm32.c @@ -19,6 +19,7 @@ #define CCMR_CHANNEL_SHIFT 8 #define CCMR_CHANNEL_MASK 0xFF #define MAX_BREAKINPUT 2 +#define STM32_MAX_PWM_OUTPUT 4 struct stm32_breakinput { u32 index; @@ -768,10 +769,19 @@ static int stm32_pwm_probe_breakinputs(struct stm32_pwm *priv, return stm32_pwm_apply_breakinputs(priv); } -static void stm32_pwm_detect_complementary(struct stm32_pwm *priv) +static void stm32_pwm_detect_complementary(struct stm32_pwm *priv, struct stm32_timers *ddata) { u32 ccer; + if (ddata->ipidr) { + u32 val; + + /* Simply read from HWCFGR the number of complementary outputs (MP25). */ + regmap_read(priv->regmap, TIM_HWCFGR1, &val); + priv->have_complementary_output = !!FIELD_GET(TIM_HWCFGR1_NB_OF_DT, val); + return; + } + /* * If complementary bit doesn't exist writing 1 will have no * effect so we can detect it. @@ -783,22 +793,39 @@ static void stm32_pwm_detect_complementary(struct stm32_pwm *priv) priv->have_complementary_output = (ccer != 0); } -static unsigned int stm32_pwm_detect_channels(struct regmap *regmap, +static unsigned int stm32_pwm_detect_channels(struct stm32_timers *ddata, unsigned int *num_enabled) { + struct regmap *regmap = ddata->regmap; u32 ccer, ccer_backup; + regmap_read(regmap, TIM_CCER, &ccer_backup); + *num_enabled = hweight32(ccer_backup & TIM_CCER_CCXE); + + if (ddata->ipidr) { + u32 hwcfgr; + unsigned int npwm; + + /* Deduce from HWCFGR the number of outputs (MP25). */ + regmap_read(regmap, TIM_HWCFGR1, &hwcfgr); + + /* + * Timers may have more capture/compare channels than the + * actual number of PWM channel outputs (e.g. TIM_CH[1..4]). + */ + npwm = FIELD_GET(TIM_HWCFGR1_NB_OF_CC, hwcfgr); + + return npwm < STM32_MAX_PWM_OUTPUT ? npwm : STM32_MAX_PWM_OUTPUT; + } + /* * If channels enable bits don't exist writing 1 will have no * effect so we can detect and count them. */ - regmap_read(regmap, TIM_CCER, &ccer_backup); regmap_set_bits(regmap, TIM_CCER, TIM_CCER_CCXE); regmap_read(regmap, TIM_CCER, &ccer); regmap_write(regmap, TIM_CCER, ccer_backup); - *num_enabled = hweight32(ccer_backup & TIM_CCER_CCXE); - return hweight32(ccer & TIM_CCER_CCXE); } @@ -813,7 +840,7 @@ static int stm32_pwm_probe(struct platform_device *pdev) unsigned int i; int ret; - npwm = stm32_pwm_detect_channels(ddata->regmap, &num_enabled); + npwm = stm32_pwm_detect_channels(ddata, &num_enabled); chip = devm_pwmchip_alloc(dev, npwm, sizeof(*priv)); if (IS_ERR(chip)) @@ -834,7 +861,7 @@ static int stm32_pwm_probe(struct platform_device *pdev) return dev_err_probe(dev, ret, "Failed to configure breakinputs\n"); - stm32_pwm_detect_complementary(priv); + stm32_pwm_detect_complementary(priv, ddata); ret = devm_clk_rate_exclusive_get(dev, priv->clk); if (ret) @@ -907,6 +934,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stm32_pwm_pm_ops, stm32_pwm_suspend, stm32_pwm_r static const struct of_device_id stm32_pwm_of_match[] = { { .compatible = "st,stm32-pwm", }, + { .compatible = "st,stm32mp25-pwm", }, { /* end node */ }, }; MODULE_DEVICE_TABLE(of, stm32_pwm_of_match); diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index e60dc7d6b591..6c5591ca868b 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -21,7 +21,6 @@ #include <linux/pwm.h> #include <linux/reset.h> #include <linux/slab.h> -#include <linux/spinlock.h> #include <linux/time.h> #define PWM_CTRL_REG 0x0 @@ -85,7 +84,6 @@ struct sun4i_pwm_chip { struct clk *clk; struct reset_control *rst; void __iomem *base; - spinlock_t ctrl_lock; const struct sun4i_pwm_data *data; }; @@ -258,7 +256,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return ret; } - spin_lock(&sun4ichip->ctrl_lock); ctrl = sun4i_pwm_readl(sun4ichip, PWM_CTRL_REG); if (sun4ichip->data->has_direct_mod_clk_output) { @@ -266,7 +263,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, ctrl |= BIT_CH(PWM_BYPASS, pwm->hwpwm); /* We can skip other parameter */ sun4i_pwm_writel(sun4ichip, ctrl, PWM_CTRL_REG); - spin_unlock(&sun4ichip->ctrl_lock); return 0; } @@ -297,8 +293,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, sun4i_pwm_writel(sun4ichip, ctrl, PWM_CTRL_REG); - spin_unlock(&sun4ichip->ctrl_lock); - if (state->enabled) return 0; @@ -309,12 +303,10 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, else usleep_range(delay_us, delay_us * 2); - spin_lock(&sun4ichip->ctrl_lock); ctrl = sun4i_pwm_readl(sun4ichip, PWM_CTRL_REG); ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm); ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm); sun4i_pwm_writel(sun4ichip, ctrl, PWM_CTRL_REG); - spin_unlock(&sun4ichip->ctrl_lock); clk_disable_unprepare(sun4ichip->clk); @@ -456,8 +448,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev) chip->ops = &sun4i_pwm_ops; - spin_lock_init(&sun4ichip->ctrl_lock); - ret = pwmchip_add(chip); if (ret < 0) { dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret); diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c index 4b10a8dab312..a555cc3be4b3 100644 --- a/drivers/pwm/pwm-twl-led.c +++ b/drivers/pwm/pwm-twl-led.c @@ -61,10 +61,6 @@ #define TWL6040_LED_MODE_OFF 0x02 #define TWL6040_LED_MODE_MASK 0x03 -struct twl_pwmled_chip { - struct mutex mutex; -}; - static inline struct twl_pwmled_chip *to_twl(struct pwm_chip *chip) { return pwmchip_get_drvdata(chip); @@ -106,15 +102,13 @@ static int twl4030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm, static int twl4030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm) { - struct twl_pwmled_chip *twl = to_twl(chip); int ret; u8 val; - mutex_lock(&twl->mutex); ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG); if (ret < 0) { dev_err(pwmchip_parent(chip), "%s: Failed to read LEDEN\n", pwm->label); - goto out; + return ret; } val |= TWL4030_LED_TOGGLE(pwm->hwpwm, TWL4030_LED_PINS); @@ -123,23 +117,19 @@ static int twl4030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm) if (ret < 0) dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM\n", pwm->label); -out: - mutex_unlock(&twl->mutex); return ret; } static void twl4030_pwmled_disable(struct pwm_chip *chip, struct pwm_device *pwm) { - struct twl_pwmled_chip *twl = to_twl(chip); int ret; u8 val; - mutex_lock(&twl->mutex); ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG); if (ret < 0) { dev_err(pwmchip_parent(chip), "%s: Failed to read LEDEN\n", pwm->label); - goto out; + return; } val &= ~TWL4030_LED_TOGGLE(pwm->hwpwm, TWL4030_LED_PINS); @@ -147,9 +137,6 @@ static void twl4030_pwmled_disable(struct pwm_chip *chip, ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG); if (ret < 0) dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label); - -out: - mutex_unlock(&twl->mutex); } static int twl4030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm, @@ -209,16 +196,14 @@ static int twl6030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm, static int twl6030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm) { - struct twl_pwmled_chip *twl = to_twl(chip); int ret; u8 val; - mutex_lock(&twl->mutex); ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2); if (ret < 0) { dev_err(pwmchip_parent(chip), "%s: Failed to read PWM_CTRL2\n", pwm->label); - goto out; + return ret; } val &= ~TWL6040_LED_MODE_MASK; @@ -228,24 +213,20 @@ static int twl6030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm) if (ret < 0) dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM\n", pwm->label); -out: - mutex_unlock(&twl->mutex); return ret; } static void twl6030_pwmled_disable(struct pwm_chip *chip, struct pwm_device *pwm) { - struct twl_pwmled_chip *twl = to_twl(chip); int ret; u8 val; - mutex_lock(&twl->mutex); ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2); if (ret < 0) { dev_err(pwmchip_parent(chip), "%s: Failed to read PWM_CTRL2\n", pwm->label); - goto out; + return; } val &= ~TWL6040_LED_MODE_MASK; @@ -254,9 +235,6 @@ static void twl6030_pwmled_disable(struct pwm_chip *chip, ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2); if (ret < 0) dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label); - -out: - mutex_unlock(&twl->mutex); } static int twl6030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm, @@ -287,16 +265,14 @@ static int twl6030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm, static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm) { - struct twl_pwmled_chip *twl = to_twl(chip); int ret; u8 val; - mutex_lock(&twl->mutex); ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2); if (ret < 0) { dev_err(pwmchip_parent(chip), "%s: Failed to read PWM_CTRL2\n", pwm->label); - goto out; + return ret; } val &= ~TWL6040_LED_MODE_MASK; @@ -306,23 +282,19 @@ static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm) if (ret < 0) dev_err(pwmchip_parent(chip), "%s: Failed to request PWM\n", pwm->label); -out: - mutex_unlock(&twl->mutex); return ret; } static void twl6030_pwmled_free(struct pwm_chip *chip, struct pwm_device *pwm) { - struct twl_pwmled_chip *twl = to_twl(chip); int ret; u8 val; - mutex_lock(&twl->mutex); ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2); if (ret < 0) { dev_err(pwmchip_parent(chip), "%s: Failed to read PWM_CTRL2\n", pwm->label); - goto out; + return; } val &= ~TWL6040_LED_MODE_MASK; @@ -331,9 +303,6 @@ static void twl6030_pwmled_free(struct pwm_chip *chip, struct pwm_device *pwm) ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2); if (ret < 0) dev_err(pwmchip_parent(chip), "%s: Failed to free PWM\n", pwm->label); - -out: - mutex_unlock(&twl->mutex); } static const struct pwm_ops twl6030_pwmled_ops = { @@ -345,7 +314,6 @@ static const struct pwm_ops twl6030_pwmled_ops = { static int twl_pwmled_probe(struct platform_device *pdev) { struct pwm_chip *chip; - struct twl_pwmled_chip *twl; unsigned int npwm; const struct pwm_ops *ops; @@ -357,15 +325,12 @@ static int twl_pwmled_probe(struct platform_device *pdev) npwm = 1; } - chip = devm_pwmchip_alloc(&pdev->dev, npwm, sizeof(*twl)); + chip = devm_pwmchip_alloc(&pdev->dev, npwm, 0); if (IS_ERR(chip)) return PTR_ERR(chip); - twl = to_twl(chip); chip->ops = ops; - mutex_init(&twl->mutex); - return devm_pwmchip_add(&pdev->dev, chip); } diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 6d8988387da4..eaa6df1c9f80 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -1153,6 +1153,17 @@ config REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY touchscreen unit. The regulator is used to enable power to the TC358762, display and to control backlight. +config REGULATOR_RASPBERRYPI_TOUCHSCREEN_V2 + tristate "Raspberry Pi 7-inch touchscreen panel V2 regulator" + depends on GPIOLIB + depends on I2C && OF + select GPIO_REGMAP + select REGMAP_I2C + help + This driver supports regulator on the V2 Raspberry Pi touchscreen + unit. The regulator is used to enable power to the display and to + control backlight PWM. + config REGULATOR_RC5T583 tristate "RICOH RC5T583 Power regulators" depends on MFD_RC5T583 @@ -1192,7 +1203,7 @@ config REGULATOR_RT4801 The device supports two regulators (DSVP/DSVN). config REGULATOR_RT4803 - tristate "Richtek RT4803 boost regualtor" + tristate "Richtek RT4803 boost regulator" depends on I2C select REGMAP_I2C help diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index c0bc7a0f4e67..be98b29d6675 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -136,6 +136,7 @@ obj-$(CONFIG_REGULATOR_PBIAS) += pbias-regulator.o obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o obj-$(CONFIG_REGULATOR_RAA215300) += raa215300.o obj-$(CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY) += rpi-panel-attiny-regulator.o +obj-$(CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_V2) += rpi-panel-v2-regulator.o obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o obj-$(CONFIG_REGULATOR_RK808) += rk808-regulator.o obj-$(CONFIG_REGULATOR_RN5T618) += rn5t618-regulator.o diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c index 1bb048de3ecd..e803cc59d68a 100644 --- a/drivers/regulator/bd718x7-regulator.c +++ b/drivers/regulator/bd718x7-regulator.c @@ -134,9 +134,19 @@ static void voltage_change_done(struct regulator_dev *rdev, unsigned int sel, if (*mask) { /* - * Let's allow scheduling as we use I2C anyways. We just need to - * guarantee minimum of 1ms sleep - it shouldn't matter if we - * exceed it due to the scheduling. + * We had fault detection disabled for the duration of the + * voltage change. + * + * According to HW colleagues the maximum time it takes is + * 1000us. I assume that on systems with light load this + * might be less - and we could probably use DT to give + * system specific delay value if performance matters. + * + * Well, knowing we use I2C here and can add scheduling delays + * I don't think it is worth the hassle and I just add fixed + * 1ms sleep here (and allow scheduling). If this turns out to + * be a problem we can change it to delay and make the delay + * time configurable. */ msleep(1); @@ -173,16 +183,7 @@ static int voltage_change_prepare(struct regulator_dev *rdev, unsigned int sel, /* * If we increase LDO voltage when LDO is enabled we need to * disable the power-good detection until voltage has reached - * the new level. According to HW colleagues the maximum time - * it takes is 1000us. I assume that on systems with light load - * this might be less - and we could probably use DT to give - * system specific delay value if performance matters. - * - * Well, knowing we use I2C here and can add scheduling delays - * I don't think it is worth the hassle and I just add fixed - * 1ms sleep here (and allow scheduling). If this turns out to - * be a problem we can change it to delay and make the delay - * time configurable. + * the new level. */ if (new > now) { int tmp; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index cbd6d53ebfb5..8ed9b96518cf 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3797,6 +3797,16 @@ static int _regulator_do_set_suspend_voltage(struct regulator_dev *rdev, return 0; } +static int regulator_get_voltage_delta(struct regulator_dev *rdev, int uV) +{ + int current_uV = regulator_get_voltage_rdev(rdev); + + if (current_uV < 0) + return current_uV; + + return abs(current_uV - uV); +} + static int regulator_set_voltage_unlocked(struct regulator *regulator, int min_uV, int max_uV, suspend_state_t state) @@ -3804,8 +3814,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator, struct regulator_dev *rdev = regulator->rdev; struct regulator_voltage *voltage = ®ulator->voltage[state]; int ret = 0; + int current_uV, delta, new_delta; int old_min_uV, old_max_uV; - int current_uV; /* If we're setting the same range as last time the change * should be a noop (some cpufreq implementations use the same @@ -3852,6 +3862,37 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator, voltage->max_uV = old_max_uV; } + if (rdev->constraints->max_uV_step > 0) { + /* For regulators with a maximum voltage step, reaching the desired + * voltage might take a few retries. + */ + ret = regulator_get_voltage_delta(rdev, min_uV); + if (ret < 0) + goto out; + + delta = ret; + + while (delta > 0) { + ret = regulator_balance_voltage(rdev, state); + if (ret < 0) + goto out; + + ret = regulator_get_voltage_delta(rdev, min_uV); + if (ret < 0) + goto out; + + new_delta = ret; + + /* check that voltage is converging quickly enough */ + if (new_delta - delta > rdev->constraints->max_uV_step) { + ret = -EWOULDBLOCK; + goto out; + } + + delta = new_delta; + } + } + out: return ret; } diff --git a/drivers/regulator/mt6370-regulator.c b/drivers/regulator/mt6370-regulator.c index 27cb32b726e0..c2cea904b0ca 100644 --- a/drivers/regulator/mt6370-regulator.c +++ b/drivers/regulator/mt6370-regulator.c @@ -320,7 +320,7 @@ static int mt6370_regulator_irq_register(struct mt6370_priv *priv) return 0; } -static int mt6370_regualtor_register(struct mt6370_priv *priv) +static int mt6370_regulator_register(struct mt6370_priv *priv) { struct regulator_dev *rdev; struct regulator_config cfg = {}; @@ -363,7 +363,7 @@ static int mt6370_regulator_probe(struct platform_device *pdev) return -ENODEV; } - ret = mt6370_regualtor_register(priv); + ret = mt6370_regulator_register(priv); if (ret) return ret; diff --git a/drivers/regulator/mtk-dvfsrc-regulator.c b/drivers/regulator/mtk-dvfsrc-regulator.c index f5662c569464..c0c9a6751c26 100644 --- a/drivers/regulator/mtk-dvfsrc-regulator.c +++ b/drivers/regulator/mtk-dvfsrc-regulator.c @@ -117,6 +117,24 @@ static const struct dvfsrc_regulator_pdata mt6873_data = { .size = ARRAY_SIZE(mt6873_regulators), }; +static const unsigned int mt6893_voltages[] = { + 575000, + 600000, + 650000, + 725000, + 750000, +}; + +static const struct regulator_desc mt6893_regulators[] = { + MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt6893_voltages), + MTK_DVFSRC_VREG("dvfsrc-vscp", VSCP, mt6893_voltages), +}; + +static const struct dvfsrc_regulator_pdata mt6893_data = { + .descs = mt6893_regulators, + .size = ARRAY_SIZE(mt6893_regulators), +}; + static const unsigned int mt8183_voltages[] = { 725000, 800000, @@ -148,6 +166,24 @@ static const struct dvfsrc_regulator_pdata mt8195_data = { .size = ARRAY_SIZE(mt8195_regulators), }; +static const unsigned int mt8196_voltages[] = { + 575000, + 600000, + 650000, + 725000, + 825000, + 875000, +}; + +static const struct regulator_desc mt8196_regulators[] = { + MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt8196_voltages), +}; + +static const struct dvfsrc_regulator_pdata mt8196_data = { + .descs = mt8196_regulators, + .size = ARRAY_SIZE(mt8196_regulators), +}; + static int dvfsrc_vcore_regulator_probe(struct platform_device *pdev) { struct regulator_config config = { .dev = &pdev->dev }; @@ -173,9 +209,11 @@ static int dvfsrc_vcore_regulator_probe(struct platform_device *pdev) static const struct of_device_id mtk_dvfsrc_regulator_match[] = { { .compatible = "mediatek,mt6873-dvfsrc-regulator", .data = &mt6873_data }, + { .compatible = "mediatek,mt6893-dvfsrc-regulator", .data = &mt6893_data }, { .compatible = "mediatek,mt8183-dvfsrc-regulator", .data = &mt8183_data }, { .compatible = "mediatek,mt8192-dvfsrc-regulator", .data = &mt6873_data }, { .compatible = "mediatek,mt8195-dvfsrc-regulator", .data = &mt8195_data }, + { .compatible = "mediatek,mt8196-dvfsrc-regulator", .data = &mt8196_data }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mtk_dvfsrc_regulator_match); diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c index 14d19a6d6655..feadb21a8f30 100644 --- a/drivers/regulator/pca9450-regulator.c +++ b/drivers/regulator/pca9450-regulator.c @@ -17,12 +17,18 @@ #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/pca9450.h> +#include <dt-bindings/regulator/nxp,pca9450-regulator.h> + +static unsigned int pca9450_buck_get_mode(struct regulator_dev *rdev); +static int pca9450_buck_set_mode(struct regulator_dev *rdev, unsigned int mode); struct pc9450_dvs_config { unsigned int run_reg; /* dvs0 */ unsigned int run_mask; unsigned int standby_reg; /* dvs1 */ unsigned int standby_mask; + unsigned int mode_reg; /* ctrl */ + unsigned int mode_mask; }; struct pca9450_regulator_desc { @@ -80,6 +86,8 @@ static const struct regulator_ops pca9450_dvs_buck_regulator_ops = { .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_time_sel = regulator_set_voltage_time_sel, .set_ramp_delay = regulator_set_ramp_delay_regmap, + .set_mode = pca9450_buck_set_mode, + .get_mode = pca9450_buck_get_mode, }; static const struct regulator_ops pca9450_buck_regulator_ops = { @@ -90,6 +98,8 @@ static const struct regulator_ops pca9450_buck_regulator_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_mode = pca9450_buck_set_mode, + .get_mode = pca9450_buck_get_mode, }; static const struct regulator_ops pca9450_ldo_regulator_ops = { @@ -285,7 +295,64 @@ static int pca9450_set_dvs_levels(struct device_node *np, return ret; } -static const struct pca9450_regulator_desc pca9450a_regulators[] = { +static inline unsigned int pca9450_map_mode(unsigned int mode) +{ + switch (mode) { + case PCA9450_BUCK_MODE_AUTO: + return REGULATOR_MODE_NORMAL; + case PCA9450_BUCK_MODE_FORCE_PWM: + return REGULATOR_MODE_FAST; + default: + return REGULATOR_MODE_INVALID; + } +} + +static int pca9450_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) +{ + struct pca9450_regulator_desc *desc = container_of(rdev->desc, + struct pca9450_regulator_desc, desc); + const struct pc9450_dvs_config *dvs = &desc->dvs; + int val; + + switch (mode) { + case REGULATOR_MODE_FAST: + val = dvs->mode_mask; + break; + case REGULATOR_MODE_NORMAL: + val = 0; + break; + default: + return -EINVAL; + } + + dev_dbg(&rdev->dev, "pca9450 buck set_mode %#x, %#x, %#x\n", + dvs->mode_reg, dvs->mode_mask, val); + + return regmap_update_bits(rdev->regmap, dvs->mode_reg, + dvs->mode_mask, val); +} + +static unsigned int pca9450_buck_get_mode(struct regulator_dev *rdev) +{ + struct pca9450_regulator_desc *desc = container_of(rdev->desc, + struct pca9450_regulator_desc, desc); + const struct pc9450_dvs_config *dvs = &desc->dvs; + int ret = 0, regval; + + ret = regmap_read(rdev->regmap, dvs->mode_reg, ®val); + if (ret != 0) { + dev_err(&rdev->dev, + "Failed to get pca9450 buck mode: %d\n", ret); + return ret; + } + + if ((regval & dvs->mode_mask) == dvs->mode_mask) + return REGULATOR_MODE_FAST; + + return REGULATOR_MODE_NORMAL; +} + +static struct pca9450_regulator_desc pca9450a_regulators[] = { { .desc = { .name = "buck1", @@ -308,12 +375,15 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = { .enable_val = BUCK_ENMODE_ONREQ, .owner = THIS_MODULE, .of_parse_cb = pca9450_set_dvs_levels, + .of_map_mode = pca9450_map_mode, }, .dvs = { .run_reg = PCA9450_REG_BUCK1OUT_DVS0, .run_mask = BUCK1OUT_DVS0_MASK, .standby_reg = PCA9450_REG_BUCK1OUT_DVS1, .standby_mask = BUCK1OUT_DVS1_MASK, + .mode_reg = PCA9450_REG_BUCK1CTRL, + .mode_mask = BUCK1_FPWM, }, }, { @@ -338,12 +408,15 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = { .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table), .owner = THIS_MODULE, .of_parse_cb = pca9450_set_dvs_levels, + .of_map_mode = pca9450_map_mode, }, .dvs = { .run_reg = PCA9450_REG_BUCK2OUT_DVS0, .run_mask = BUCK2OUT_DVS0_MASK, .standby_reg = PCA9450_REG_BUCK2OUT_DVS1, .standby_mask = BUCK2OUT_DVS1_MASK, + .mode_reg = PCA9450_REG_BUCK2CTRL, + .mode_mask = BUCK2_FPWM, }, }, { @@ -368,12 +441,15 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = { .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table), .owner = THIS_MODULE, .of_parse_cb = pca9450_set_dvs_levels, + .of_map_mode = pca9450_map_mode, }, .dvs = { .run_reg = PCA9450_REG_BUCK3OUT_DVS0, .run_mask = BUCK3OUT_DVS0_MASK, .standby_reg = PCA9450_REG_BUCK3OUT_DVS1, .standby_mask = BUCK3OUT_DVS1_MASK, + .mode_reg = PCA9450_REG_BUCK3CTRL, + .mode_mask = BUCK3_FPWM, }, }, { @@ -393,6 +469,11 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = { .enable_mask = BUCK4_ENMODE_MASK, .enable_val = BUCK_ENMODE_ONREQ, .owner = THIS_MODULE, + .of_map_mode = pca9450_map_mode, + }, + .dvs = { + .mode_reg = PCA9450_REG_BUCK4CTRL, + .mode_mask = BUCK4_FPWM, }, }, { @@ -412,6 +493,11 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = { .enable_mask = BUCK5_ENMODE_MASK, .enable_val = BUCK_ENMODE_ONREQ, .owner = THIS_MODULE, + .of_map_mode = pca9450_map_mode, + }, + .dvs = { + .mode_reg = PCA9450_REG_BUCK5CTRL, + .mode_mask = BUCK5_FPWM, }, }, { @@ -431,6 +517,11 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = { .enable_mask = BUCK6_ENMODE_MASK, .enable_val = BUCK_ENMODE_ONREQ, .owner = THIS_MODULE, + .of_map_mode = pca9450_map_mode, + }, + .dvs = { + .mode_reg = PCA9450_REG_BUCK6CTRL, + .mode_mask = BUCK6_FPWM, }, }, { @@ -529,7 +620,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = { * Buck3 removed on PCA9450B and connected with Buck1 internal for dual phase * on PCA9450C as no Buck3. */ -static const struct pca9450_regulator_desc pca9450bc_regulators[] = { +static struct pca9450_regulator_desc pca9450bc_regulators[] = { { .desc = { .name = "buck1", @@ -552,12 +643,15 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = { .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table), .owner = THIS_MODULE, .of_parse_cb = pca9450_set_dvs_levels, + .of_map_mode = pca9450_map_mode, }, .dvs = { .run_reg = PCA9450_REG_BUCK1OUT_DVS0, .run_mask = BUCK1OUT_DVS0_MASK, .standby_reg = PCA9450_REG_BUCK1OUT_DVS1, .standby_mask = BUCK1OUT_DVS1_MASK, + .mode_reg = PCA9450_REG_BUCK1CTRL, + .mode_mask = BUCK1_FPWM, }, }, { @@ -582,12 +676,15 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = { .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table), .owner = THIS_MODULE, .of_parse_cb = pca9450_set_dvs_levels, + .of_map_mode = pca9450_map_mode, }, .dvs = { .run_reg = PCA9450_REG_BUCK2OUT_DVS0, .run_mask = BUCK2OUT_DVS0_MASK, .standby_reg = PCA9450_REG_BUCK2OUT_DVS1, .standby_mask = BUCK2OUT_DVS1_MASK, + .mode_reg = PCA9450_REG_BUCK2CTRL, + .mode_mask = BUCK2_FPWM, }, }, { @@ -607,6 +704,11 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = { .enable_mask = BUCK4_ENMODE_MASK, .enable_val = BUCK_ENMODE_ONREQ, .owner = THIS_MODULE, + .of_map_mode = pca9450_map_mode, + }, + .dvs = { + .mode_reg = PCA9450_REG_BUCK4CTRL, + .mode_mask = BUCK4_FPWM, }, }, { @@ -626,6 +728,11 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = { .enable_mask = BUCK5_ENMODE_MASK, .enable_val = BUCK_ENMODE_ONREQ, .owner = THIS_MODULE, + .of_map_mode = pca9450_map_mode, + }, + .dvs = { + .mode_reg = PCA9450_REG_BUCK5CTRL, + .mode_mask = BUCK5_FPWM, }, }, { @@ -645,6 +752,11 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = { .enable_mask = BUCK6_ENMODE_MASK, .enable_val = BUCK_ENMODE_ONREQ, .owner = THIS_MODULE, + .of_map_mode = pca9450_map_mode, + }, + .dvs = { + .mode_reg = PCA9450_REG_BUCK6CTRL, + .mode_mask = BUCK6_FPWM, }, }, { @@ -739,7 +851,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = { }, }; -static const struct pca9450_regulator_desc pca9451a_regulators[] = { +static struct pca9450_regulator_desc pca9451a_regulators[] = { { .desc = { .name = "buck1", @@ -761,12 +873,15 @@ static const struct pca9450_regulator_desc pca9451a_regulators[] = { .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table), .owner = THIS_MODULE, .of_parse_cb = pca9450_set_dvs_levels, + .of_map_mode = pca9450_map_mode, }, .dvs = { .run_reg = PCA9450_REG_BUCK1OUT_DVS0, .run_mask = BUCK1OUT_DVS0_MASK, .standby_reg = PCA9450_REG_BUCK1OUT_DVS1, .standby_mask = BUCK1OUT_DVS1_MASK, + .mode_reg = PCA9450_REG_BUCK1CTRL, + .mode_mask = BUCK1_FPWM, }, }, { @@ -790,12 +905,15 @@ static const struct pca9450_regulator_desc pca9451a_regulators[] = { .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table), .owner = THIS_MODULE, .of_parse_cb = pca9450_set_dvs_levels, + .of_map_mode = pca9450_map_mode, }, .dvs = { .run_reg = PCA9450_REG_BUCK2OUT_DVS0, .run_mask = BUCK2OUT_DVS0_MASK, .standby_reg = PCA9450_REG_BUCK2OUT_DVS1, .standby_mask = BUCK2OUT_DVS1_MASK, + .mode_reg = PCA9450_REG_BUCK2CTRL, + .mode_mask = BUCK2_FPWM, }, }, { @@ -815,6 +933,11 @@ static const struct pca9450_regulator_desc pca9451a_regulators[] = { .enable_mask = BUCK4_ENMODE_MASK, .enable_val = BUCK_ENMODE_ONREQ, .owner = THIS_MODULE, + .of_map_mode = pca9450_map_mode, + }, + .dvs = { + .mode_reg = PCA9450_REG_BUCK4CTRL, + .mode_mask = BUCK4_FPWM, }, }, { @@ -834,6 +957,11 @@ static const struct pca9450_regulator_desc pca9451a_regulators[] = { .enable_mask = BUCK5_ENMODE_MASK, .enable_val = BUCK_ENMODE_ONREQ, .owner = THIS_MODULE, + .of_map_mode = pca9450_map_mode, + }, + .dvs = { + .mode_reg = PCA9450_REG_BUCK5CTRL, + .mode_mask = BUCK5_FPWM, }, }, { @@ -853,6 +981,11 @@ static const struct pca9450_regulator_desc pca9451a_regulators[] = { .enable_mask = BUCK6_ENMODE_MASK, .enable_val = BUCK_ENMODE_ONREQ, .owner = THIS_MODULE, + .of_map_mode = pca9450_map_mode, + }, + .dvs = { + .mode_reg = PCA9450_REG_BUCK6CTRL, + .mode_mask = BUCK6_FPWM, }, }, { @@ -990,7 +1123,7 @@ static int pca9450_i2c_probe(struct i2c_client *i2c) { enum pca9450_chip_type type = (unsigned int)(uintptr_t) of_device_get_match_data(&i2c->dev); - const struct pca9450_regulator_desc *regulator_desc; + const struct pca9450_regulator_desc *regulator_desc; struct regulator_config config = { }; struct regulator_dev *ldo5; struct pca9450 *pca9450; diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index 7870722b6ee2..109f0aae09b1 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -1462,6 +1462,40 @@ static const struct rpmh_vreg_init_data pm7325_vreg_data[] = { {} }; +static const struct rpmh_vreg_init_data pm7550_vreg_data[] = { + RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"), + RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"), + RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"), + RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"), + RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"), + RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"), + RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), + RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2-l3"), + RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l2-l3"), + RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo515, "vdd-l4-l5"), + RPMH_VREG("ldo5", "ldo%s5", &pmic5_nldo515, "vdd-l4-l5"), + RPMH_VREG("ldo6", "ldo%s6", &pmic5_nldo515, "vdd-l6"), + RPMH_VREG("ldo7", "ldo%s7", &pmic5_nldo515, "vdd-l7"), + RPMH_VREG("ldo8", "ldo%s8", &pmic5_nldo515, "vdd-l8"), + RPMH_VREG("ldo9", "ldo%s9", &pmic5_nldo515, "vdd-l9-l10"), + RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo515, "vdd-l9-l10"), + RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo515, "vdd-l11"), + RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo515_mv, "vdd-l12-l14"), + RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo515_mv, "vdd-l13-l16"), + RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo, "vdd-l12-l14"), + RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo, "vdd-l15-l17-l18-l19-l20-l21-l22-l23"), + RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l16"), + RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l15-l17-l18-l19-l20-l21-l22-l23"), + RPMH_VREG("ldo18", "ldo%s18", &pmic5_pldo, "vdd-l15-l17-l18-l19-l20-l21-l22-l23"), + RPMH_VREG("ldo19", "ldo%s19", &pmic5_pldo, "vdd-l15-l17-l18-l19-l20-l21-l22-l23"), + RPMH_VREG("ldo20", "ldo%s20", &pmic5_pldo, "vdd-l15-l17-l18-l19-l20-l21-l22-l23"), + RPMH_VREG("ldo21", "ldo%s21", &pmic5_pldo, "vdd-l15-l17-l18-l19-l20-l21-l22-l23"), + RPMH_VREG("ldo22", "ldo%s22", &pmic5_pldo, "vdd-l15-l17-l18-l19-l20-l21-l22-l23"), + RPMH_VREG("ldo23", "ldo%s23", &pmic5_pldo, "vdd-l15-l17-l18-l19-l20-l21-l22-l23"), + RPMH_VREG("bob", "bob%s1", &pmic5_bob, "vdd-bob"), + {} +}; + static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = { RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps520, "vdd-s1"), RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps520, "vdd-s2"), @@ -1476,6 +1510,22 @@ static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = { {} }; +static const struct rpmh_vreg_init_data pmr735b_vreg_data[] = { + RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1-l2"), + RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l1-l2"), + RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"), + RPMH_VREG("ldo4", "ldo%s4", &pmic5_pldo_lv, "vdd-l4"), + RPMH_VREG("ldo5", "ldo%s5", &pmic5_nldo, "vdd-l5"), + RPMH_VREG("ldo6", "ldo%s6", &pmic5_nldo, "vdd-l6"), + RPMH_VREG("ldo7", "ldo%s7", &pmic5_nldo, "vdd-l7-l8"), + RPMH_VREG("ldo8", "ldo%s8", &pmic5_nldo, "vdd-l7-l8"), + RPMH_VREG("ldo9", "ldo%s9", &pmic5_nldo, "vdd-l9"), + RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo_lv, "vdd-l10"), + RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l11"), + RPMH_VREG("ldo12", "ldo%s12", &pmic5_nldo, "vdd-l12"), + {} +}; + static const struct rpmh_vreg_init_data pm660_vreg_data[] = { RPMH_VREG("smps1", "smp%s1", &pmic4_ftsmps426, "vdd-s1"), RPMH_VREG("smps2", "smp%s2", &pmic4_ftsmps426, "vdd-s2"), @@ -1664,10 +1714,18 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = { .data = pm7325_vreg_data, }, { + .compatible = "qcom,pm7550-rpmh-regulators", + .data = pm7550_vreg_data, + }, + { .compatible = "qcom,pmr735a-rpmh-regulators", .data = pmr735a_vreg_data, }, { + .compatible = "qcom,pmr735b-rpmh-regulators", + .data = pmr735b_vreg_data, + }, + { .compatible = "qcom,pm660-rpmh-regulators", .data = pm660_vreg_data, }, diff --git a/drivers/regulator/rpi-panel-v2-regulator.c b/drivers/regulator/rpi-panel-v2-regulator.c new file mode 100644 index 000000000000..30b78aa75ee3 --- /dev/null +++ b/drivers/regulator/rpi-panel-v2-regulator.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Raspberry Pi Ltd. + * Copyright (C) 2025 Marek Vasut + */ + +#include <linux/err.h> +#include <linux/gpio/driver.h> +#include <linux/gpio/regmap.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/pwm.h> +#include <linux/regmap.h> + +/* I2C registers of the microcontroller. */ +#define REG_ID 0x01 +#define REG_POWERON 0x02 +#define REG_PWM 0x03 + +/* Bits for poweron register */ +#define LCD_RESET_BIT BIT(0) +#define CTP_RESET_BIT BIT(1) + +/* Bits for the PWM register */ +#define PWM_BL_ENABLE BIT(7) +#define PWM_BL_MASK GENMASK(4, 0) + +/* Treat LCD_RESET and CTP_RESET as GPIOs */ +#define NUM_GPIO 2 + +static const struct regmap_config rpi_panel_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = REG_PWM, + .can_sleep = true, +}; + +static int rpi_panel_v2_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct regmap *regmap = pwmchip_get_drvdata(chip); + unsigned int duty; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) + return regmap_write(regmap, REG_PWM, 0); + + duty = pwm_get_relative_duty_cycle(state, PWM_BL_MASK); + return regmap_write(regmap, REG_PWM, duty | PWM_BL_ENABLE); +} + +static const struct pwm_ops rpi_panel_v2_pwm_ops = { + .apply = rpi_panel_v2_pwm_apply, +}; + +/* + * I2C driver interface functions + */ +static int rpi_panel_v2_i2c_probe(struct i2c_client *i2c) +{ + struct gpio_regmap_config gconfig = { + .ngpio = NUM_GPIO, + .ngpio_per_reg = NUM_GPIO, + .parent = &i2c->dev, + .reg_set_base = REG_POWERON, + }; + struct regmap *regmap; + struct pwm_chip *pc; + int ret; + + pc = devm_pwmchip_alloc(&i2c->dev, 1, 0); + if (IS_ERR(pc)) + return PTR_ERR(pc); + + pc->ops = &rpi_panel_v2_pwm_ops; + + regmap = devm_regmap_init_i2c(i2c, &rpi_panel_regmap_config); + if (IS_ERR(regmap)) + return dev_err_probe(&i2c->dev, PTR_ERR(regmap), "Failed to allocate regmap\n"); + + pwmchip_set_drvdata(pc, regmap); + + regmap_write(regmap, REG_POWERON, 0); + + gconfig.regmap = regmap; + ret = PTR_ERR_OR_ZERO(devm_gpio_regmap_register(&i2c->dev, &gconfig)); + if (ret) + return dev_err_probe(&i2c->dev, ret, "Failed to create gpiochip\n"); + + i2c_set_clientdata(i2c, regmap); + + return devm_pwmchip_add(&i2c->dev, pc); +} + +static void rpi_panel_v2_i2c_shutdown(struct i2c_client *client) +{ + struct regmap *regmap = i2c_get_clientdata(client); + + regmap_write(regmap, REG_PWM, 0); + regmap_write(regmap, REG_POWERON, 0); +} + +static const struct of_device_id rpi_panel_v2_dt_ids[] = { + { .compatible = "raspberrypi,touchscreen-panel-regulator-v2" }, + { }, +}; +MODULE_DEVICE_TABLE(of, rpi_panel_v2_dt_ids); + +static struct i2c_driver rpi_panel_v2_regulator_driver = { + .driver = { + .name = "rpi_touchscreen_v2", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = rpi_panel_v2_dt_ids, + }, + .probe = rpi_panel_v2_i2c_probe, + .shutdown = rpi_panel_v2_i2c_shutdown, +}; + +module_i2c_driver(rpi_panel_v2_regulator_driver); + +MODULE_AUTHOR("Dave Stevenson <dave.stevenson@raspberrypi.com>"); +MODULE_DESCRIPTION("Regulator device driver for Raspberry Pi 7-inch V2 touchscreen"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/rt5739.c b/drivers/regulator/rt5739.c index 91412c905ce6..5fcddd7c2da7 100644 --- a/drivers/regulator/rt5739.c +++ b/drivers/regulator/rt5739.c @@ -24,6 +24,8 @@ #define RT5739_REG_NSEL1 0x01 #define RT5739_REG_CNTL1 0x02 #define RT5739_REG_ID1 0x03 +#define RT5739_REG_ID2 0x04 +#define RT5739_REG_MON 0x05 #define RT5739_REG_CNTL2 0x06 #define RT5739_REG_CNTL4 0x08 @@ -236,11 +238,18 @@ static void rt5739_init_regulator_desc(struct regulator_desc *desc, } } +static bool rt5739_volatile_reg(struct device *dev, unsigned int reg) +{ + return reg == RT5739_REG_MON; +} + static const struct regmap_config rt5739_regmap_config = { .name = "rt5739", .reg_bits = 8, .val_bits = 8, .max_register = RT5739_REG_CNTL4, + .cache_type = REGCACHE_MAPLE, + .volatile_reg = rt5739_volatile_reg, }; static int rt5739_probe(struct i2c_client *i2c) diff --git a/drivers/regulator/rt6160-regulator.c b/drivers/regulator/rt6160-regulator.c index e2a0eee95c61..548ffdf537d3 100644 --- a/drivers/regulator/rt6160-regulator.c +++ b/drivers/regulator/rt6160-regulator.c @@ -31,8 +31,11 @@ #define RT6160_PGSTAT_MASK BIT(0) #define RT6160_VENDOR_ID 0xA0 +#define RT6166_VENDOR_ID 0xB0 #define RT6160_VOUT_MINUV 2025000 #define RT6160_VOUT_MAXUV 5200000 +#define RT6166_VOUT_MINUV 1800000 +#define RT6166_VOUD_MAXUV 4950000 #define RT6160_VOUT_STPUV 25000 #define RT6160_N_VOUTS ((RT6160_VOUT_MAXUV - RT6160_VOUT_MINUV) / RT6160_VOUT_STPUV + 1) @@ -43,6 +46,7 @@ struct rt6160_priv { struct gpio_desc *enable_gpio; struct regmap *regmap; bool enable_state; + uint8_t devid; }; static const unsigned int rt6160_ramp_tables[] = { @@ -260,15 +264,26 @@ static int rt6160_probe(struct i2c_client *i2c) if (ret) return ret; - if ((devid & RT6160_VID_MASK) != RT6160_VENDOR_ID) { + devid = devid & RT6160_VID_MASK; + + switch (devid) { + case RT6166_VENDOR_ID: + case RT6160_VENDOR_ID: + break; + default: dev_err(&i2c->dev, "VID not correct [0x%02x]\n", devid); return -ENODEV; } + priv->devid = devid; + priv->desc.name = "rt6160-buckboost"; priv->desc.type = REGULATOR_VOLTAGE; priv->desc.owner = THIS_MODULE; - priv->desc.min_uV = RT6160_VOUT_MINUV; + if (priv->devid == RT6166_VENDOR_ID) + priv->desc.min_uV = RT6166_VOUT_MINUV; + else + priv->desc.min_uV = RT6160_VOUT_MINUV; priv->desc.uV_step = RT6160_VOUT_STPUV; if (vsel_active_low) priv->desc.vsel_reg = RT6160_REG_VSELL; diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c index a85ea94f0673..9e391206f09d 100644 --- a/drivers/regulator/stm32-vrefbuf.c +++ b/drivers/regulator/stm32-vrefbuf.c @@ -67,7 +67,6 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev) writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); } - pm_runtime_mark_last_busy(priv->dev); pm_runtime_put_autosuspend(priv->dev); return ret; @@ -87,7 +86,6 @@ static int stm32_vrefbuf_disable(struct regulator_dev *rdev) val &= ~STM32_ENVR; writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); - pm_runtime_mark_last_busy(priv->dev); pm_runtime_put_autosuspend(priv->dev); return 0; @@ -104,7 +102,6 @@ static int stm32_vrefbuf_is_enabled(struct regulator_dev *rdev) ret = readl_relaxed(priv->base + STM32_VREFBUF_CSR) & STM32_ENVR; - pm_runtime_mark_last_busy(priv->dev); pm_runtime_put_autosuspend(priv->dev); return ret; @@ -125,7 +122,6 @@ static int stm32_vrefbuf_set_voltage_sel(struct regulator_dev *rdev, val = (val & ~STM32_VRS) | FIELD_PREP(STM32_VRS, sel); writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); - pm_runtime_mark_last_busy(priv->dev); pm_runtime_put_autosuspend(priv->dev); return 0; @@ -144,7 +140,6 @@ static int stm32_vrefbuf_get_voltage_sel(struct regulator_dev *rdev) val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); ret = FIELD_GET(STM32_VRS, val); - pm_runtime_mark_last_busy(priv->dev); pm_runtime_put_autosuspend(priv->dev); return ret; @@ -218,7 +213,6 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, rdev); - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; diff --git a/drivers/regulator/sy8827n.c b/drivers/regulator/sy8827n.c index f11ff38b36c9..0b811514782f 100644 --- a/drivers/regulator/sy8827n.c +++ b/drivers/regulator/sy8827n.c @@ -140,7 +140,8 @@ static int sy8827n_i2c_probe(struct i2c_client *client) return -EINVAL; } - di->en_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH); + di->en_gpio = devm_gpiod_get_optional(dev, "enable", + GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE); if (IS_ERR(di->en_gpio)) return PTR_ERR(di->en_gpio); diff --git a/drivers/regulator/tps6286x-regulator.c b/drivers/regulator/tps6286x-regulator.c index 75f441f36de7..e29aab06bf79 100644 --- a/drivers/regulator/tps6286x-regulator.c +++ b/drivers/regulator/tps6286x-regulator.c @@ -19,13 +19,22 @@ #define TPS6286X_CONTROL_FPWM BIT(4) #define TPS6286X_CONTROL_SWEN BIT(5) +#define TPS6286X_STATUS 0x05 + #define TPS6286X_MIN_MV 400 #define TPS6286X_MAX_MV 1675 #define TPS6286X_STEP_MV 5 +static bool tps6286x_volatile_reg(struct device *dev, unsigned int reg) +{ + return reg == TPS6286X_STATUS; +} + static const struct regmap_config tps6286x_regmap_config = { .reg_bits = 8, .val_bits = 8, + .cache_type = REGCACHE_MAPLE, + .volatile_reg = tps6286x_volatile_reg, }; static int tps6286x_set_mode(struct regulator_dev *rdev, unsigned int mode) diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c index c0f5f0a186a3..7b7d3ae39206 100644 --- a/drivers/regulator/tps6287x-regulator.c +++ b/drivers/regulator/tps6287x-regulator.c @@ -27,10 +27,17 @@ #define TPS6287X_CTRL3 0x03 #define TPS6287X_STATUS 0x04 +static bool tps6287x_volatile_reg(struct device *dev, unsigned int reg) +{ + return reg == TPS6287X_STATUS; +} + static const struct regmap_config tps6287x_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = TPS6287X_STATUS, + .cache_type = REGCACHE_MAPLE, + .volatile_reg = tps6287x_volatile_reg, }; static const struct linear_range tps6287x_voltage_ranges[] = { diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c index ac53792e3fed..ab882daec7c5 100644 --- a/drivers/regulator/tps6594-regulator.c +++ b/drivers/regulator/tps6594-regulator.c @@ -21,10 +21,6 @@ #define BUCK_NB 5 #define LDO_NB 4 #define MULTI_PHASE_NB 4 -/* TPS6593 and LP8764 supports OV, UV, SC, ILIM */ -#define REGS_INT_NB 4 -/* TPS65224 supports OV or UV */ -#define TPS65224_REGS_INT_NB 1 enum tps6594_regulator_id { /* DCDC's */ @@ -56,7 +52,7 @@ struct tps6594_regulator_irq_type { unsigned long event; }; -static struct tps6594_regulator_irq_type tps6594_ext_regulator_irq_types[] = { +static const struct tps6594_regulator_irq_type tps6594_ext_regulator_irq_types[] = { { TPS6594_IRQ_NAME_VCCA_OV, "VCCA", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN }, { TPS6594_IRQ_NAME_VCCA_UV, "VCCA", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE }, { TPS6594_IRQ_NAME_VMON1_OV, "VMON1", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN }, @@ -69,7 +65,7 @@ static struct tps6594_regulator_irq_type tps6594_ext_regulator_irq_types[] = { REGULATOR_EVENT_OVER_VOLTAGE_WARN }, }; -static struct tps6594_regulator_irq_type tps65224_ext_regulator_irq_types[] = { +static const struct tps6594_regulator_irq_type tps65224_ext_regulator_irq_types[] = { { TPS65224_IRQ_NAME_VCCA_UVOV, "VCCA", "voltage out of range", REGULATOR_EVENT_REGULATION_OUT }, { TPS65224_IRQ_NAME_VMON1_UVOV, "VMON1", "voltage out of range", @@ -80,13 +76,13 @@ static struct tps6594_regulator_irq_type tps65224_ext_regulator_irq_types[] = { struct tps6594_regulator_irq_data { struct device *dev; - struct tps6594_regulator_irq_type *type; + const struct tps6594_regulator_irq_type *type; struct regulator_dev *rdev; }; struct tps6594_ext_regulator_irq_data { struct device *dev; - struct tps6594_regulator_irq_type *type; + const struct tps6594_regulator_irq_type *type; }; #define TPS6594_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \ @@ -192,7 +188,7 @@ static const struct regulator_ops tps6594_ldos_4_ops = { .map_voltage = regulator_map_voltage_linear_range, }; -static const struct regulator_desc buck_regs[] = { +static const struct regulator_desc tps6594_buck_regs[] = { TPS6594_REGULATOR("BUCK1", "buck1", TPS6594_BUCK_1, REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET, TPS6594_REG_BUCKX_VOUT_1(0), @@ -262,7 +258,7 @@ static const struct regulator_desc tps65224_buck_regs[] = { 4, 0, 0, NULL, 0, 0), }; -static struct tps6594_regulator_irq_type tps6594_buck1_irq_types[] = { +static const struct tps6594_regulator_irq_type tps6594_buck1_irq_types[] = { { TPS6594_IRQ_NAME_BUCK1_OV, "BUCK1", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN }, { TPS6594_IRQ_NAME_BUCK1_UV, "BUCK1", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE }, { TPS6594_IRQ_NAME_BUCK1_SC, "BUCK1", "short circuit", REGULATOR_EVENT_REGULATION_OUT }, @@ -270,7 +266,7 @@ static struct tps6594_regulator_irq_type tps6594_buck1_irq_types[] = { REGULATOR_EVENT_OVER_CURRENT }, }; -static struct tps6594_regulator_irq_type tps6594_buck2_irq_types[] = { +static const struct tps6594_regulator_irq_type tps6594_buck2_irq_types[] = { { TPS6594_IRQ_NAME_BUCK2_OV, "BUCK2", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN }, { TPS6594_IRQ_NAME_BUCK2_UV, "BUCK2", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE }, { TPS6594_IRQ_NAME_BUCK2_SC, "BUCK2", "short circuit", REGULATOR_EVENT_REGULATION_OUT }, @@ -278,7 +274,7 @@ static struct tps6594_regulator_irq_type tps6594_buck2_irq_types[] = { REGULATOR_EVENT_OVER_CURRENT }, }; -static struct tps6594_regulator_irq_type tps6594_buck3_irq_types[] = { +static const struct tps6594_regulator_irq_type tps6594_buck3_irq_types[] = { { TPS6594_IRQ_NAME_BUCK3_OV, "BUCK3", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN }, { TPS6594_IRQ_NAME_BUCK3_UV, "BUCK3", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE }, { TPS6594_IRQ_NAME_BUCK3_SC, "BUCK3", "short circuit", REGULATOR_EVENT_REGULATION_OUT }, @@ -286,7 +282,7 @@ static struct tps6594_regulator_irq_type tps6594_buck3_irq_types[] = { REGULATOR_EVENT_OVER_CURRENT }, }; -static struct tps6594_regulator_irq_type tps6594_buck4_irq_types[] = { +static const struct tps6594_regulator_irq_type tps6594_buck4_irq_types[] = { { TPS6594_IRQ_NAME_BUCK4_OV, "BUCK4", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN }, { TPS6594_IRQ_NAME_BUCK4_UV, "BUCK4", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE }, { TPS6594_IRQ_NAME_BUCK4_SC, "BUCK4", "short circuit", REGULATOR_EVENT_REGULATION_OUT }, @@ -294,7 +290,7 @@ static struct tps6594_regulator_irq_type tps6594_buck4_irq_types[] = { REGULATOR_EVENT_OVER_CURRENT }, }; -static struct tps6594_regulator_irq_type tps6594_buck5_irq_types[] = { +static const struct tps6594_regulator_irq_type tps6594_buck5_irq_types[] = { { TPS6594_IRQ_NAME_BUCK5_OV, "BUCK5", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN }, { TPS6594_IRQ_NAME_BUCK5_UV, "BUCK5", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE }, { TPS6594_IRQ_NAME_BUCK5_SC, "BUCK5", "short circuit", REGULATOR_EVENT_REGULATION_OUT }, @@ -302,7 +298,7 @@ static struct tps6594_regulator_irq_type tps6594_buck5_irq_types[] = { REGULATOR_EVENT_OVER_CURRENT }, }; -static struct tps6594_regulator_irq_type tps6594_ldo1_irq_types[] = { +static const struct tps6594_regulator_irq_type tps6594_ldo1_irq_types[] = { { TPS6594_IRQ_NAME_LDO1_OV, "LDO1", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN }, { TPS6594_IRQ_NAME_LDO1_UV, "LDO1", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE }, { TPS6594_IRQ_NAME_LDO1_SC, "LDO1", "short circuit", REGULATOR_EVENT_REGULATION_OUT }, @@ -310,7 +306,7 @@ static struct tps6594_regulator_irq_type tps6594_ldo1_irq_types[] = { REGULATOR_EVENT_OVER_CURRENT }, }; -static struct tps6594_regulator_irq_type tps6594_ldo2_irq_types[] = { +static const struct tps6594_regulator_irq_type tps6594_ldo2_irq_types[] = { { TPS6594_IRQ_NAME_LDO2_OV, "LDO2", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN }, { TPS6594_IRQ_NAME_LDO2_UV, "LDO2", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE }, { TPS6594_IRQ_NAME_LDO2_SC, "LDO2", "short circuit", REGULATOR_EVENT_REGULATION_OUT }, @@ -318,7 +314,7 @@ static struct tps6594_regulator_irq_type tps6594_ldo2_irq_types[] = { REGULATOR_EVENT_OVER_CURRENT }, }; -static struct tps6594_regulator_irq_type tps6594_ldo3_irq_types[] = { +static const struct tps6594_regulator_irq_type tps6594_ldo3_irq_types[] = { { TPS6594_IRQ_NAME_LDO3_OV, "LDO3", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN }, { TPS6594_IRQ_NAME_LDO3_UV, "LDO3", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE }, { TPS6594_IRQ_NAME_LDO3_SC, "LDO3", "short circuit", REGULATOR_EVENT_REGULATION_OUT }, @@ -326,7 +322,7 @@ static struct tps6594_regulator_irq_type tps6594_ldo3_irq_types[] = { REGULATOR_EVENT_OVER_CURRENT }, }; -static struct tps6594_regulator_irq_type tps6594_ldo4_irq_types[] = { +static const struct tps6594_regulator_irq_type tps6594_ldo4_irq_types[] = { { TPS6594_IRQ_NAME_LDO4_OV, "LDO4", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN }, { TPS6594_IRQ_NAME_LDO4_UV, "LDO4", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE }, { TPS6594_IRQ_NAME_LDO4_SC, "LDO4", "short circuit", REGULATOR_EVENT_REGULATION_OUT }, @@ -334,42 +330,42 @@ static struct tps6594_regulator_irq_type tps6594_ldo4_irq_types[] = { REGULATOR_EVENT_OVER_CURRENT }, }; -static struct tps6594_regulator_irq_type tps65224_buck1_irq_types[] = { +static const struct tps6594_regulator_irq_type tps65224_buck1_irq_types[] = { { TPS65224_IRQ_NAME_BUCK1_UVOV, "BUCK1", "voltage out of range", REGULATOR_EVENT_REGULATION_OUT }, }; -static struct tps6594_regulator_irq_type tps65224_buck2_irq_types[] = { +static const struct tps6594_regulator_irq_type tps65224_buck2_irq_types[] = { { TPS65224_IRQ_NAME_BUCK2_UVOV, "BUCK2", "voltage out of range", REGULATOR_EVENT_REGULATION_OUT }, }; -static struct tps6594_regulator_irq_type tps65224_buck3_irq_types[] = { +static const struct tps6594_regulator_irq_type tps65224_buck3_irq_types[] = { { TPS65224_IRQ_NAME_BUCK3_UVOV, "BUCK3", "voltage out of range", REGULATOR_EVENT_REGULATION_OUT }, }; -static struct tps6594_regulator_irq_type tps65224_buck4_irq_types[] = { +static const struct tps6594_regulator_irq_type tps65224_buck4_irq_types[] = { { TPS65224_IRQ_NAME_BUCK4_UVOV, "BUCK4", "voltage out of range", REGULATOR_EVENT_REGULATION_OUT }, }; -static struct tps6594_regulator_irq_type tps65224_ldo1_irq_types[] = { +static const struct tps6594_regulator_irq_type tps65224_ldo1_irq_types[] = { { TPS65224_IRQ_NAME_LDO1_UVOV, "LDO1", "voltage out of range", REGULATOR_EVENT_REGULATION_OUT }, }; -static struct tps6594_regulator_irq_type tps65224_ldo2_irq_types[] = { +static const struct tps6594_regulator_irq_type tps65224_ldo2_irq_types[] = { { TPS65224_IRQ_NAME_LDO2_UVOV, "LDO2", "voltage out of range", REGULATOR_EVENT_REGULATION_OUT }, }; -static struct tps6594_regulator_irq_type tps65224_ldo3_irq_types[] = { +static const struct tps6594_regulator_irq_type tps65224_ldo3_irq_types[] = { { TPS65224_IRQ_NAME_LDO3_UVOV, "LDO3", "voltage out of range", REGULATOR_EVENT_REGULATION_OUT }, }; -static struct tps6594_regulator_irq_type *tps6594_bucks_irq_types[] = { +static const struct tps6594_regulator_irq_type *tps6594_bucks_irq_types[] = { tps6594_buck1_irq_types, tps6594_buck2_irq_types, tps6594_buck3_irq_types, @@ -377,21 +373,21 @@ static struct tps6594_regulator_irq_type *tps6594_bucks_irq_types[] = { tps6594_buck5_irq_types, }; -static struct tps6594_regulator_irq_type *tps6594_ldos_irq_types[] = { +static const struct tps6594_regulator_irq_type *tps6594_ldos_irq_types[] = { tps6594_ldo1_irq_types, tps6594_ldo2_irq_types, tps6594_ldo3_irq_types, tps6594_ldo4_irq_types, }; -static struct tps6594_regulator_irq_type *tps65224_bucks_irq_types[] = { +static const struct tps6594_regulator_irq_type *tps65224_bucks_irq_types[] = { tps65224_buck1_irq_types, tps65224_buck2_irq_types, tps65224_buck3_irq_types, tps65224_buck4_irq_types, }; -static struct tps6594_regulator_irq_type *tps65224_ldos_irq_types[] = { +static const struct tps6594_regulator_irq_type *tps65224_ldos_irq_types[] = { tps65224_ldo1_irq_types, tps65224_ldo2_irq_types, tps65224_ldo3_irq_types, @@ -516,11 +512,11 @@ static irqreturn_t tps6594_regulator_irq_handler(int irq, void *data) static int tps6594_request_reg_irqs(struct platform_device *pdev, struct regulator_dev *rdev, struct tps6594_regulator_irq_data *irq_data, - struct tps6594_regulator_irq_type *regs_irq_types, + const struct tps6594_regulator_irq_type *regs_irq_types, size_t interrupt_cnt, int *irq_idx) { - struct tps6594_regulator_irq_type *irq_type; + const struct tps6594_regulator_irq_type *irq_type; struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent); size_t j; int irq; @@ -549,6 +545,70 @@ static int tps6594_request_reg_irqs(struct platform_device *pdev, return 0; } +struct tps6594_regulator_desc { + const struct regulator_desc *multi_phase_regs; + unsigned int num_multi_phase_regs; + + const struct regulator_desc *buck_regs; + int num_buck_regs; + + const struct regulator_desc *ldo_regs; + int num_ldo_regs; + + const struct tps6594_regulator_irq_type **bucks_irq_types; + const struct tps6594_regulator_irq_type **ldos_irq_types; + int num_irq_types; + + const struct tps6594_regulator_irq_type *ext_irq_types; + int num_ext_irqs; +}; + +static const struct tps6594_regulator_desc tps65224_reg_desc = { + .multi_phase_regs = tps65224_multi_regs, + .num_multi_phase_regs = ARRAY_SIZE(tps65224_multi_regs), + .buck_regs = tps65224_buck_regs, + .num_buck_regs = ARRAY_SIZE(tps65224_buck_regs), + .ldo_regs = tps65224_ldo_regs, + .num_ldo_regs = ARRAY_SIZE(tps65224_ldo_regs), + .bucks_irq_types = tps65224_bucks_irq_types, + .ldos_irq_types = tps65224_ldos_irq_types, + .num_irq_types = 1, /* OV or UV */ + .ext_irq_types = tps65224_ext_regulator_irq_types, + .num_ext_irqs = ARRAY_SIZE(tps65224_ext_regulator_irq_types), +}; + +static const struct tps6594_regulator_desc tps652g1_reg_desc = { + .ldo_regs = tps65224_ldo_regs, + .num_ldo_regs = ARRAY_SIZE(tps65224_ldo_regs), + .buck_regs = tps65224_buck_regs, + .num_buck_regs = ARRAY_SIZE(tps65224_buck_regs), +}; + +static const struct tps6594_regulator_desc tps6594_reg_desc = { + .multi_phase_regs = tps6594_multi_regs, + .num_multi_phase_regs = ARRAY_SIZE(tps6594_multi_regs), + .buck_regs = tps6594_buck_regs, + .num_buck_regs = ARRAY_SIZE(tps6594_buck_regs), + .ldo_regs = tps6594_ldo_regs, + .num_ldo_regs = ARRAY_SIZE(tps6594_ldo_regs), + .bucks_irq_types = tps6594_bucks_irq_types, + .ldos_irq_types = tps6594_ldos_irq_types, + .num_irq_types = 4, /* OV, UV, SC and ILIM */ + .ext_irq_types = tps6594_ext_regulator_irq_types, + .num_ext_irqs = 2, /* only VCCA OV and UV */ +}; + +static const struct tps6594_regulator_desc lp8764_reg_desc = { + .multi_phase_regs = tps6594_multi_regs, + .num_multi_phase_regs = ARRAY_SIZE(tps6594_multi_regs), + .buck_regs = tps6594_buck_regs, + .num_buck_regs = ARRAY_SIZE(tps6594_buck_regs), + .bucks_irq_types = tps6594_bucks_irq_types, + .num_irq_types = 4, /* OV, UV, SC and ILIM */ + .ext_irq_types = tps6594_ext_regulator_irq_types, + .num_ext_irqs = ARRAY_SIZE(tps6594_ext_regulator_irq_types), +}; + static int tps6594_regulator_probe(struct platform_device *pdev) { struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent); @@ -558,42 +618,36 @@ static int tps6594_regulator_probe(struct platform_device *pdev) struct regulator_config config = {}; struct tps6594_regulator_irq_data *irq_data; struct tps6594_ext_regulator_irq_data *irq_ext_reg_data; - struct tps6594_regulator_irq_type *irq_type; - struct tps6594_regulator_irq_type *irq_types; + const struct tps6594_regulator_irq_type *irq_type; bool buck_configured[BUCK_NB] = { false }; bool buck_multi[MULTI_PHASE_NB] = { false }; + const struct tps6594_regulator_desc *desc; + const struct regulator_desc *multi_regs; - static const char *npname; + const char *npname; int error, i, irq, multi; int irq_idx = 0; int buck_idx = 0; - int nr_ldo; - int nr_buck; - int nr_types; - unsigned int irq_count; - unsigned int multi_phase_cnt; size_t reg_irq_nb; - struct tps6594_regulator_irq_type **bucks_irq_types; - const struct regulator_desc *multi_regs; - struct tps6594_regulator_irq_type **ldos_irq_types; - const struct regulator_desc *ldo_regs; - size_t interrupt_count; - - if (tps->chip_id == TPS65224) { - bucks_irq_types = tps65224_bucks_irq_types; - interrupt_count = ARRAY_SIZE(tps65224_buck1_irq_types); - multi_regs = tps65224_multi_regs; - ldos_irq_types = tps65224_ldos_irq_types; - ldo_regs = tps65224_ldo_regs; - multi_phase_cnt = ARRAY_SIZE(tps65224_multi_regs); - } else { - bucks_irq_types = tps6594_bucks_irq_types; - interrupt_count = ARRAY_SIZE(tps6594_buck1_irq_types); - multi_regs = tps6594_multi_regs; - ldos_irq_types = tps6594_ldos_irq_types; - ldo_regs = tps6594_ldo_regs; - multi_phase_cnt = ARRAY_SIZE(tps6594_multi_regs); - } + + switch (tps->chip_id) { + case TPS65224: + desc = &tps65224_reg_desc; + break; + case TPS652G1: + desc = &tps652g1_reg_desc; + break; + case TPS6594: + case TPS6593: + desc = &tps6594_reg_desc; + break; + case LP8764: + desc = &lp8764_reg_desc; + break; + default: + dev_err(tps->dev, "unknown chip_id %lu\n", tps->chip_id); + return -EINVAL; + }; enum { MULTI_BUCK12, @@ -614,13 +668,14 @@ static int tps6594_regulator_probe(struct platform_device *pdev) * In case of Multiphase configuration, value should be defined for * buck_configured to avoid creating bucks for every buck in multiphase */ - for (multi = 0; multi < multi_phase_cnt; multi++) { - np = of_find_node_by_name(tps->dev->of_node, multi_regs[multi].supply_name); + for (multi = 0; multi < desc->num_multi_phase_regs; multi++) { + multi_regs = &desc->multi_phase_regs[multi]; + np = of_find_node_by_name(tps->dev->of_node, multi_regs->supply_name); npname = of_node_full_name(np); np_pmic_parent = of_get_parent(of_get_parent(np)); if (of_node_cmp(of_node_full_name(np_pmic_parent), tps->dev->of_node->full_name)) continue; - if (strcmp(npname, multi_regs[multi].supply_name) == 0) { + if (strcmp(npname, multi_regs->supply_name) == 0) { switch (multi) { case MULTI_BUCK12: buck_multi[0] = true; @@ -653,123 +708,106 @@ static int tps6594_regulator_probe(struct platform_device *pdev) } } - if (tps->chip_id == TPS65224) { - nr_buck = ARRAY_SIZE(tps65224_buck_regs); - nr_ldo = ARRAY_SIZE(tps65224_ldo_regs); - nr_types = TPS65224_REGS_INT_NB; - } else { - nr_buck = ARRAY_SIZE(buck_regs); - nr_ldo = (tps->chip_id == LP8764) ? 0 : ARRAY_SIZE(tps6594_ldo_regs); - nr_types = REGS_INT_NB; - } - - reg_irq_nb = nr_types * (nr_buck + nr_ldo); + reg_irq_nb = desc->num_irq_types * (desc->num_buck_regs + desc->num_ldo_regs); irq_data = devm_kmalloc_array(tps->dev, reg_irq_nb, sizeof(struct tps6594_regulator_irq_data), GFP_KERNEL); if (!irq_data) return -ENOMEM; - for (i = 0; i < multi_phase_cnt; i++) { + for (i = 0; i < desc->num_multi_phase_regs; i++) { if (!buck_multi[i]) continue; - rdev = devm_regulator_register(&pdev->dev, &multi_regs[i], &config); + rdev = devm_regulator_register(&pdev->dev, &desc->multi_phase_regs[i], + &config); if (IS_ERR(rdev)) return dev_err_probe(tps->dev, PTR_ERR(rdev), "failed to register %s regulator\n", pdev->name); + if (!desc->num_irq_types) + continue; + /* config multiphase buck12+buck34 */ if (i == MULTI_BUCK12_34) buck_idx = 2; error = tps6594_request_reg_irqs(pdev, rdev, irq_data, - bucks_irq_types[buck_idx], - interrupt_count, &irq_idx); + desc->bucks_irq_types[buck_idx], + desc->num_irq_types, &irq_idx); if (error) return error; error = tps6594_request_reg_irqs(pdev, rdev, irq_data, - bucks_irq_types[buck_idx + 1], - interrupt_count, &irq_idx); + desc->bucks_irq_types[buck_idx + 1], + desc->num_irq_types, &irq_idx); if (error) return error; if (i == MULTI_BUCK123 || i == MULTI_BUCK1234) { error = tps6594_request_reg_irqs(pdev, rdev, irq_data, - tps6594_bucks_irq_types[buck_idx + 2], - interrupt_count, + desc->bucks_irq_types[buck_idx + 2], + desc->num_irq_types, &irq_idx); if (error) return error; } if (i == MULTI_BUCK1234) { error = tps6594_request_reg_irqs(pdev, rdev, irq_data, - tps6594_bucks_irq_types[buck_idx + 3], - interrupt_count, + desc->bucks_irq_types[buck_idx + 3], + desc->num_irq_types, &irq_idx); if (error) return error; } } - for (i = 0; i < nr_buck; i++) { + for (i = 0; i < desc->num_buck_regs; i++) { if (buck_configured[i]) continue; - const struct regulator_desc *buck_cfg = (tps->chip_id == TPS65224) ? - tps65224_buck_regs : buck_regs; - - rdev = devm_regulator_register(&pdev->dev, &buck_cfg[i], &config); + rdev = devm_regulator_register(&pdev->dev, &desc->buck_regs[i], &config); if (IS_ERR(rdev)) return dev_err_probe(tps->dev, PTR_ERR(rdev), "failed to register %s regulator\n", pdev->name); + if (!desc->num_irq_types) + continue; + error = tps6594_request_reg_irqs(pdev, rdev, irq_data, - bucks_irq_types[i], interrupt_count, &irq_idx); + desc->bucks_irq_types[i], + desc->num_irq_types, &irq_idx); if (error) return error; } - /* LP8764 doesn't have LDO */ - if (tps->chip_id != LP8764) { - for (i = 0; i < nr_ldo; i++) { - rdev = devm_regulator_register(&pdev->dev, &ldo_regs[i], &config); - if (IS_ERR(rdev)) - return dev_err_probe(tps->dev, PTR_ERR(rdev), - "failed to register %s regulator\n", - pdev->name); + for (i = 0; i < desc->num_ldo_regs; i++) { + rdev = devm_regulator_register(&pdev->dev, &desc->ldo_regs[i], &config); + if (IS_ERR(rdev)) + return dev_err_probe(tps->dev, PTR_ERR(rdev), + "failed to register %s regulator\n", + pdev->name); - error = tps6594_request_reg_irqs(pdev, rdev, irq_data, - ldos_irq_types[i], interrupt_count, - &irq_idx); - if (error) - return error; - } - } + if (!desc->num_irq_types) + continue; - if (tps->chip_id == TPS65224) { - irq_types = tps65224_ext_regulator_irq_types; - irq_count = ARRAY_SIZE(tps65224_ext_regulator_irq_types); - } else { - irq_types = tps6594_ext_regulator_irq_types; - if (tps->chip_id == LP8764) - irq_count = ARRAY_SIZE(tps6594_ext_regulator_irq_types); - else - /* TPS6593 supports only VCCA OV and UV */ - irq_count = 2; + error = tps6594_request_reg_irqs(pdev, rdev, irq_data, + desc->ldos_irq_types[i], + desc->num_irq_types, &irq_idx); + if (error) + return error; } irq_ext_reg_data = devm_kmalloc_array(tps->dev, - irq_count, + desc->num_ext_irqs, sizeof(struct tps6594_ext_regulator_irq_data), GFP_KERNEL); if (!irq_ext_reg_data) return -ENOMEM; - for (i = 0; i < irq_count; ++i) { - irq_type = &irq_types[i]; + for (i = 0; i < desc->num_ext_irqs; ++i) { + irq_type = &desc->ext_irq_types[i]; irq = platform_get_irq_byname(pdev, irq_type->irq_name); if (irq < 0) return -EINVAL; @@ -787,6 +825,7 @@ static int tps6594_regulator_probe(struct platform_device *pdev) "failed to request %s IRQ %d\n", irq_type->irq_name, irq); } + return 0; } diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index 6ee36adcbdba..bece5e635ee9 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -479,7 +479,7 @@ static int rpmsg_dev_probe(struct device *dev) struct rpmsg_endpoint *ept = NULL; int err; - err = dev_pm_domain_attach(dev, true); + err = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON); if (err) goto out; diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index b7f15f303ea2..967dc4f9eea8 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -130,6 +130,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd) struct ism_req_hdr *req = cmd; struct ism_resp_hdr *resp = cmd; + spin_lock(&ism->cmd_lock); __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req)); __ism_write_cmd(ism, req, 0, sizeof(*req)); @@ -143,6 +144,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd) } __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp)); out: + spin_unlock(&ism->cmd_lock); return resp->ret; } @@ -606,6 +608,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; spin_lock_init(&ism->lock); + spin_lock_init(&ism->cmd_lock); dev_set_drvdata(&pdev->dev, ism); ism->pdev = pdev; ism->dev.parent = &pdev->dev; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 7b4e7a61965a..adb9e7a94785 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -559,8 +559,8 @@ static int sas_ata_prereset(struct ata_link *link, unsigned long deadline) } static struct ata_port_operations sas_sata_ops = { - .prereset = sas_ata_prereset, - .hardreset = sas_ata_hard_reset, + .reset.prereset = sas_ata_prereset, + .reset.hardreset = sas_ata_hard_reset, .error_handler = ata_std_error_handler, .post_internal_cmd = sas_ata_post_internal, .qc_defer = ata_std_qc_defer, diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 9179f8aee964..615e06fd4ee8 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5971,7 +5971,8 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance) else instance->iopoll_q_count = 0; - num_msix_req = num_online_cpus() + instance->low_latency_index_start; + num_msix_req = blk_mq_num_online_queues(0) + + instance->low_latency_index_start; instance->msix_vectors = min(num_msix_req, instance->msix_vectors); @@ -5987,7 +5988,8 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance) /* Disable Balanced IOPS mode and try realloc vectors */ instance->perf_mode = MR_LATENCY_PERF_MODE; instance->low_latency_index_start = 1; - num_msix_req = num_online_cpus() + instance->low_latency_index_start; + num_msix_req = blk_mq_num_online_queues(0) + + instance->low_latency_index_start; instance->msix_vectors = min(num_msix_req, instance->msix_vectors); @@ -6243,7 +6245,7 @@ static int megasas_init_fw(struct megasas_instance *instance) intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? true : false; if (intr_coalescing && - (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) && + (blk_mq_num_online_queues(0) >= MR_HIGH_IOPS_QUEUE_COUNT) && (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES)) instance->perf_mode = MR_BALANCED_PERF_MODE; else @@ -6287,7 +6289,8 @@ static int megasas_init_fw(struct megasas_instance *instance) else instance->low_latency_index_start = 1; - num_msix_req = num_online_cpus() + instance->low_latency_index_start; + num_msix_req = blk_mq_num_online_queues(0) + + instance->low_latency_index_start; instance->msix_vectors = min(num_msix_req, instance->msix_vectors); @@ -6319,8 +6322,8 @@ static int megasas_init_fw(struct megasas_instance *instance) megasas_setup_reply_map(instance); dev_info(&instance->pdev->dev, - "current msix/online cpus\t: (%d/%d)\n", - instance->msix_vectors, (unsigned int)num_online_cpus()); + "current msix/max num queues\t: (%d/%u)\n", + instance->msix_vectors, blk_mq_num_online_queues(0)); dev_info(&instance->pdev->dev, "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index fe98c76e9be3..c4c6b5c6658c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -4533,13 +4533,13 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { /* user wants to control IRQ setting for target mode */ ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, - min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), - PCI_IRQ_MSIX); + blk_mq_num_online_queues(ha->msix_count) + min_vecs, + PCI_IRQ_MSIX); } else ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, - min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, - &desc); + blk_mq_num_online_queues(ha->msix_count) + min_vecs, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, + &desc); if (ret < 0) { ql_log(ql_log_fatal, vha, 0x00c7, diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index eeaa6af294b8..7ce49d8f21c9 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1141,6 +1141,11 @@ static void sd_config_write_same(struct scsi_disk *sdkp, out: lim->max_write_zeroes_sectors = sdkp->max_ws_blocks * (logical_block_size >> SECTOR_SHIFT); + + if (sdkp->zeroing_mode == SD_ZERO_WS16_UNMAP || + sdkp->zeroing_mode == SD_ZERO_WS10_UNMAP) + lim->max_hw_wzeroes_unmap_sectors = + lim->max_write_zeroes_sectors; } static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index ae6ce6f5d622..ff4217fef93b 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -52,7 +52,8 @@ void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim) if (type != T10_PI_TYPE3_PROTECTION) bi->flags |= BLK_INTEGRITY_REF_TAG; - bi->tuple_size = sizeof(struct t10_pi_tuple); + bi->metadata_size = sizeof(struct t10_pi_tuple); + bi->pi_tuple_size = bi->metadata_size; if (dif && type) { bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 3d40a63e378d..125944941601 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -5294,15 +5294,14 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) if (is_kdump_kernel()) { num_queue_groups = 1; } else { - int num_cpus; int max_queue_groups; max_queue_groups = min(ctrl_info->max_inbound_queues / 2, ctrl_info->max_outbound_queues - 1); max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); - num_cpus = num_online_cpus(); - num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); + num_queue_groups = + blk_mq_num_online_queues(ctrl_info->max_msix_vectors); num_queue_groups = min(num_queue_groups, max_queue_groups); } diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 21ce3e940192..96a69edddbe5 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -919,6 +919,7 @@ static int virtscsi_probe(struct virtio_device *vdev) /* We need to know how many queues before we allocate. */ num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); + num_queues = blk_mq_num_possible_queues(num_queues); num_targets = virtscsi_config_get(vdev, max_target) + 1; diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c index 5fffd0f003dc..b8d4da147d23 100644 --- a/drivers/soc/apple/rtkit.c +++ b/drivers/soc/apple/rtkit.c @@ -279,8 +279,7 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk, dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n", buffer->size, &buffer->iova); - if (buffer->iova && - (!rtk->ops->shmem_setup || !rtk->ops->shmem_destroy)) { + if (buffer->iova && !rtk->ops->shmem_setup) { err = -EINVAL; goto error; } diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c index dfdff186c805..dc52a2197d24 100644 --- a/drivers/soc/ti/pm33xx.c +++ b/drivers/soc/ti/pm33xx.c @@ -145,7 +145,7 @@ static int am33xx_do_sram_idle(u32 wfi_flags) return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags); } -static int __init am43xx_map_gic(void) +static int am43xx_map_gic(void) { gic_dist_base = ioremap(AM43XX_GIC_DIST_BASE, SZ_4K); diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c index 75d6f16efced..bc1e653080d9 100644 --- a/drivers/soundwire/bus_type.c +++ b/drivers/soundwire/bus_type.c @@ -101,7 +101,7 @@ static int sdw_drv_probe(struct device *dev) /* * attach to power domain but don't turn on (last arg) */ - ret = dev_pm_domain_attach(dev, false); + ret = dev_pm_domain_attach(dev, 0); if (ret) return ret; diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index c51da3fc3604..891729c9c564 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -99,6 +99,15 @@ config SPI_AMLOGIC_SPIFC_A1 This enables master mode support for the SPIFC (SPI flash controller) available in Amlogic A1 (A113L SoC). +config SPI_AMLOGIC_SPISG + tristate "Amlogic SPISG controller" + depends on COMMON_CLK + depends on ARCH_MESON || COMPILE_TEST + help + This enables master mode support for the SPISG (SPI scatter-gather + communication controller), which is available on platforms such as + Amlogic A4 SoCs. + config SPI_APPLE tristate "Apple SoC SPI Controller platform driver" depends on ARCH_APPLE || COMPILE_TEST @@ -647,10 +656,10 @@ config SPI_FSL_SPI config SPI_FSL_DSPI tristate "Freescale DSPI controller" select REGMAP_MMIO - depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST + depends on ARCH_MXC || ARCH_NXP || M5441x || COMPILE_TEST help This enables support for the Freescale DSPI controller in master - mode. VF610, LS1021A and ColdFire platforms uses the controller. + mode. S32, VF610, LS1021A and ColdFire platforms uses the controller. config SPI_FSL_ESPI tristate "Freescale eSPI controller" @@ -923,6 +932,14 @@ config SPI_RSPI help SPI driver for Renesas RSPI and QSPI blocks. +config SPI_RZV2H_RSPI + tristate "Renesas RZ/V2H RSPI controller" + depends on ARCH_RENESAS || COMPILE_TEST + help + RSPI driver for the Renesas RZ/V2H Serial Peripheral Interface (RSPI). + RSPI supports both SPI host and SPI target roles. This option only + enables the SPI host role. + config SPI_RZV2M_CSI tristate "Renesas RZ/V2M CSI controller" depends on ARCH_RENESAS || COMPILE_TEST @@ -1355,6 +1372,11 @@ if SPI_OFFLOAD comment "SPI Offload triggers" +config SPI_OFFLOAD_TRIGGER_ADI_UTIL_SD + tristate "SPI offload trigger using ADI sigma-delta utility" + help + SPI offload trigger from ADI sigma-delta utility FPGA IP block. + config SPI_OFFLOAD_TRIGGER_PWM tristate "SPI offload trigger using PWM" depends on PWM diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 4ea89f6fc531..062c85989c8c 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi-altera-platform.o obj-$(CONFIG_SPI_ALTERA_CORE) += spi-altera-core.o obj-$(CONFIG_SPI_ALTERA_DFL) += spi-altera-dfl.o obj-$(CONFIG_SPI_AMLOGIC_SPIFC_A1) += spi-amlogic-spifc-a1.o +obj-$(CONFIG_SPI_AMLOGIC_SPISG) += spi-amlogic-spisg.o obj-$(CONFIG_SPI_APPLE) += spi-apple.o obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o @@ -126,6 +127,7 @@ obj-$(CONFIG_MACH_REALTEK_RTL) += spi-realtek-rtl.o obj-$(CONFIG_SPI_REALTEK_SNAND) += spi-realtek-rtl-snand.o obj-$(CONFIG_SPI_RPCIF) += spi-rpc-if.o obj-$(CONFIG_SPI_RSPI) += spi-rspi.o +obj-$(CONFIG_SPI_RZV2H_RSPI) += spi-rzv2h-rspi.o obj-$(CONFIG_SPI_RZV2M_CSI) += spi-rzv2m-csi.o obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o obj-$(CONFIG_SPI_SC18IS602) += spi-sc18is602.o @@ -170,3 +172,4 @@ obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL) += spi-slave-system-control.o # SPI offload triggers obj-$(CONFIG_SPI_OFFLOAD_TRIGGER_PWM) += spi-offload-trigger-pwm.o +obj-$(CONFIG_SPI_OFFLOAD_TRIGGER_ADI_UTIL_SD) += spi-offload-trigger-adi-util-sigma-delta.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 2f6797324227..4e9bfd26aa80 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -965,7 +965,6 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) err = aq->ops->transfer(mem, op, offset); pm_runtime_put: - pm_runtime_mark_last_busy(&aq->pdev->dev); pm_runtime_put_autosuspend(&aq->pdev->dev); return err; } @@ -1168,7 +1167,6 @@ static int atmel_qspi_setup(struct spi_device *spi) aq->scr |= QSPI_SCR_SCBR(scbr); atmel_qspi_write(aq->scr, aq, QSPI_SCR); - pm_runtime_mark_last_busy(ctrl->dev.parent); pm_runtime_put_autosuspend(ctrl->dev.parent); return 0; @@ -1230,7 +1228,6 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi) aq->mr |= QSPI_MR_DLYBCT(cs_hold) | QSPI_MR_DLYCS(cs_inactive); atmel_qspi_write(aq->mr, aq, QSPI_MR); - pm_runtime_mark_last_busy(ctrl->dev.parent); pm_runtime_put_autosuspend(ctrl->dev.parent); return 0; @@ -1285,18 +1282,21 @@ static int atmel_qspi_dma_init(struct spi_controller *ctrl) struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); int ret; - aq->rx_chan = dma_request_chan(&aq->pdev->dev, "rx"); + aq->rx_chan = devm_dma_request_chan(&aq->pdev->dev, "rx"); if (IS_ERR(aq->rx_chan)) { ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan), "RX DMA channel is not available\n"); - goto null_rx_chan; + aq->rx_chan = NULL; + return ret; } - aq->tx_chan = dma_request_chan(&aq->pdev->dev, "tx"); + aq->tx_chan = devm_dma_request_chan(&aq->pdev->dev, "tx"); if (IS_ERR(aq->tx_chan)) { ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->tx_chan), "TX DMA channel is not available\n"); - goto release_rx_chan; + aq->rx_chan = NULL; + aq->tx_chan = NULL; + return ret; } ctrl->dma_rx = aq->rx_chan; @@ -1307,21 +1307,6 @@ static int atmel_qspi_dma_init(struct spi_controller *ctrl) dma_chan_name(aq->tx_chan), dma_chan_name(aq->rx_chan)); return 0; - -release_rx_chan: - dma_release_channel(aq->rx_chan); - aq->tx_chan = NULL; -null_rx_chan: - aq->rx_chan = NULL; - return ret; -} - -static void atmel_qspi_dma_release(struct atmel_qspi *aq) -{ - if (aq->rx_chan) - dma_release_channel(aq->rx_chan); - if (aq->tx_chan) - dma_release_channel(aq->tx_chan); } static const struct atmel_qspi_ops atmel_qspi_ops = { @@ -1426,14 +1411,13 @@ static int atmel_qspi_probe(struct platform_device *pdev) /* Request the IRQ */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - err = irq; - goto dma_release; - } + if (irq < 0) + return irq; + err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt, 0, dev_name(&pdev->dev), aq); if (err) - goto dma_release; + return err; pm_runtime_set_autosuspend_delay(&pdev->dev, 500); pm_runtime_use_autosuspend(&pdev->dev); @@ -1442,22 +1426,15 @@ static int atmel_qspi_probe(struct platform_device *pdev) err = atmel_qspi_init(aq); if (err) - goto dma_release; + return err; err = spi_register_controller(ctrl); if (err) - goto dma_release; + return err; - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; - -dma_release: - if (aq->caps->has_dma) - atmel_qspi_dma_release(aq); - - return err; } static int atmel_qspi_sama7g5_suspend(struct atmel_qspi *aq) @@ -1507,9 +1484,6 @@ static void atmel_qspi_remove(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret >= 0) { - if (aq->caps->has_dma) - atmel_qspi_dma_release(aq); - if (aq->caps->has_gclk) { ret = atmel_qspi_sama7g5_suspend(aq); if (ret) @@ -1582,7 +1556,6 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) atmel_qspi_write(aq->scr, aq, QSPI_SCR); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/spi/spi-amlogic-spisg.c b/drivers/spi/spi-amlogic-spisg.c new file mode 100644 index 000000000000..2ab8bdf2a676 --- /dev/null +++ b/drivers/spi/spi-amlogic-spisg.c @@ -0,0 +1,888 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Amlogic SPI communication Scatter-Gather Controller + * + * Copyright (C) 2025 Amlogic, Inc. All rights reserved + * + * Author: Sunny Luo <sunny.luo@amlogic.com> + * Author: Xianwei Zhao <xianwei.zhao@amlogic.com> + */ + +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/spi/spi.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/reset.h> +#include <linux/regmap.h> + +/* Register Map */ +#define SPISG_REG_CFG_READY 0x00 + +#define SPISG_REG_CFG_SPI 0x04 +#define CFG_BUS64_EN BIT(0) +#define CFG_SLAVE_EN BIT(1) +#define CFG_SLAVE_SELECT GENMASK(3, 2) +#define CFG_SFLASH_WP BIT(4) +#define CFG_SFLASH_HD BIT(5) +/* start on vsync rising */ +#define CFG_HW_POS BIT(6) +/* start on vsync falling */ +#define CFG_HW_NEG BIT(7) + +#define SPISG_REG_CFG_START 0x08 +#define CFG_BLOCK_NUM GENMASK(19, 0) +#define CFG_BLOCK_SIZE GENMASK(22, 20) +#define CFG_DATA_COMMAND BIT(23) +#define CFG_OP_MODE GENMASK(25, 24) +#define CFG_RXD_MODE GENMASK(27, 26) +#define CFG_TXD_MODE GENMASK(29, 28) +#define CFG_EOC BIT(30) +#define CFG_PEND BIT(31) + +#define SPISG_REG_CFG_BUS 0x0C +#define CFG_CLK_DIV GENMASK(7, 0) +#define CLK_DIV_WIDTH 8 +#define CFG_RX_TUNING GENMASK(11, 8) +#define CFG_TX_TUNING GENMASK(15, 12) +#define CFG_CS_SETUP GENMASK(19, 16) +#define CFG_LANE GENMASK(21, 20) +#define CFG_HALF_DUPLEX BIT(22) +#define CFG_B_L_ENDIAN BIT(23) +#define CFG_DC_MODE BIT(24) +#define CFG_NULL_CTL BIT(25) +#define CFG_DUMMY_CTL BIT(26) +#define CFG_READ_TURN GENMASK(28, 27) +#define CFG_KEEP_SS BIT(29) +#define CFG_CPHA BIT(30) +#define CFG_CPOL BIT(31) + +#define SPISG_REG_PIO_TX_DATA_L 0x10 +#define SPISG_REG_PIO_TX_DATA_H 0x14 +#define SPISG_REG_PIO_RX_DATA_L 0x18 +#define SPISG_REG_PIO_RX_DATA_H 0x1C +#define SPISG_REG_MEM_TX_ADDR_L 0x10 +#define SPISG_REG_MEM_TX_ADDR_H 0x14 +#define SPISG_REG_MEM_RX_ADDR_L 0x18 +#define SPISG_REG_MEM_RX_ADDR_H 0x1C +#define SPISG_REG_DESC_LIST_L 0x20 +#define SPISG_REG_DESC_LIST_H 0x24 +#define LIST_DESC_PENDING BIT(31) +#define SPISG_REG_DESC_CURRENT_L 0x28 +#define SPISG_REG_DESC_CURRENT_H 0x2c +#define SPISG_REG_IRQ_STS 0x30 +#define SPISG_REG_IRQ_ENABLE 0x34 +#define IRQ_RCH_DESC_EOC BIT(0) +#define IRQ_RCH_DESC_INVALID BIT(1) +#define IRQ_RCH_DESC_RESP BIT(2) +#define IRQ_RCH_DATA_RESP BIT(3) +#define IRQ_WCH_DESC_EOC BIT(4) +#define IRQ_WCH_DESC_INVALID BIT(5) +#define IRQ_WCH_DESC_RESP BIT(6) +#define IRQ_WCH_DATA_RESP BIT(7) +#define IRQ_DESC_ERR BIT(8) +#define IRQ_SPI_READY BIT(9) +#define IRQ_DESC_DONE BIT(10) +#define IRQ_DESC_CHAIN_DONE BIT(11) + +#define SPISG_MAX_REG 0x40 + +#define SPISG_BLOCK_MAX 0x100000 + +#define SPISG_OP_MODE_WRITE_CMD 0 +#define SPISG_OP_MODE_READ_STS 1 +#define SPISG_OP_MODE_WRITE 2 +#define SPISG_OP_MODE_READ 3 + +#define SPISG_DATA_MODE_NONE 0 +#define SPISG_DATA_MODE_PIO 1 +#define SPISG_DATA_MODE_MEM 2 +#define SPISG_DATA_MODE_SG 3 + +#define SPISG_CLK_DIV_MAX 256 +/* recommended by specification */ +#define SPISG_CLK_DIV_MIN 4 +#define DIV_NUM (SPISG_CLK_DIV_MAX - SPISG_CLK_DIV_MIN + 1) + +#define SPISG_PCLK_RATE_MIN 24000000 + +#define SPISG_SINGLE_SPI 0 +#define SPISG_DUAL_SPI 1 +#define SPISG_QUAD_SPI 2 + +struct spisg_sg_link { +#define LINK_ADDR_VALID BIT(0) +#define LINK_ADDR_EOC BIT(1) +#define LINK_ADDR_IRQ BIT(2) +#define LINK_ADDR_ACT GENMASK(5, 3) +#define LINK_ADDR_RING BIT(6) +#define LINK_ADDR_LEN GENMASK(31, 8) + u32 addr; + u32 addr1; +}; + +struct spisg_descriptor { + u32 cfg_start; + u32 cfg_bus; + u64 tx_paddr; + u64 rx_paddr; +}; + +struct spisg_descriptor_extra { + struct spisg_sg_link *tx_ccsg; + struct spisg_sg_link *rx_ccsg; + int tx_ccsg_len; + int rx_ccsg_len; +}; + +struct spisg_device { + struct spi_controller *controller; + struct platform_device *pdev; + struct regmap *map; + struct clk *core; + struct clk *pclk; + struct clk *sclk; + struct clk_div_table *tbl; + struct completion completion; + u32 status; + u32 speed_hz; + u32 effective_speed_hz; + u32 bytes_per_word; + u32 cfg_spi; + u32 cfg_start; + u32 cfg_bus; +}; + +static int spi_delay_to_sclk(u32 slck_speed_hz, struct spi_delay *delay) +{ + s32 ns; + + if (!delay) + return 0; + + if (delay->unit == SPI_DELAY_UNIT_SCK) + return delay->value; + + ns = spi_delay_to_ns(delay, NULL); + if (ns < 0) + return 0; + + return DIV_ROUND_UP_ULL(slck_speed_hz * ns, NSEC_PER_SEC); +} + +static inline u32 aml_spisg_sem_down_read(struct spisg_device *spisg) +{ + u32 ret; + + regmap_read(spisg->map, SPISG_REG_CFG_READY, &ret); + if (ret) + regmap_write(spisg->map, SPISG_REG_CFG_READY, 0); + + return ret; +} + +static inline void aml_spisg_sem_up_write(struct spisg_device *spisg) +{ + regmap_write(spisg->map, SPISG_REG_CFG_READY, 1); +} + +static int aml_spisg_set_speed(struct spisg_device *spisg, uint speed_hz) +{ + u32 cfg_bus; + + if (!speed_hz || speed_hz == spisg->speed_hz) + return 0; + + spisg->speed_hz = speed_hz; + clk_set_rate(spisg->sclk, speed_hz); + /* Store the div for the descriptor mode */ + regmap_read(spisg->map, SPISG_REG_CFG_BUS, &cfg_bus); + spisg->cfg_bus &= ~CFG_CLK_DIV; + spisg->cfg_bus |= cfg_bus & CFG_CLK_DIV; + spisg->effective_speed_hz = clk_get_rate(spisg->sclk); + dev_dbg(&spisg->pdev->dev, + "desired speed %dHz, effective speed %dHz\n", + speed_hz, spisg->effective_speed_hz); + + return 0; +} + +static bool aml_spisg_can_dma(struct spi_controller *ctlr, + struct spi_device *spi, + struct spi_transfer *xfer) +{ + return true; +} + +static void aml_spisg_sg_xlate(struct sg_table *sgt, struct spisg_sg_link *ccsg) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + ccsg->addr = FIELD_PREP(LINK_ADDR_VALID, 1) | + FIELD_PREP(LINK_ADDR_RING, 0) | + FIELD_PREP(LINK_ADDR_EOC, sg_is_last(sg)) | + FIELD_PREP(LINK_ADDR_LEN, sg_dma_len(sg)); + ccsg->addr1 = (u32)sg_dma_address(sg); + ccsg++; + } +} + +static int nbits_to_lane[] = { + SPISG_SINGLE_SPI, + SPISG_SINGLE_SPI, + SPISG_DUAL_SPI, + -EINVAL, + SPISG_QUAD_SPI +}; + +static int aml_spisg_setup_transfer(struct spisg_device *spisg, + struct spi_transfer *xfer, + struct spisg_descriptor *desc, + struct spisg_descriptor_extra *exdesc) +{ + int block_size, blocks; + struct device *dev = &spisg->pdev->dev; + struct spisg_sg_link *ccsg; + int ccsg_len; + dma_addr_t paddr; + int ret; + + memset(desc, 0, sizeof(*desc)); + if (exdesc) + memset(exdesc, 0, sizeof(*exdesc)); + aml_spisg_set_speed(spisg, xfer->speed_hz); + xfer->effective_speed_hz = spisg->effective_speed_hz; + + desc->cfg_start = spisg->cfg_start; + desc->cfg_bus = spisg->cfg_bus; + + block_size = xfer->bits_per_word >> 3; + blocks = xfer->len / block_size; + + desc->cfg_start |= FIELD_PREP(CFG_EOC, 0); + desc->cfg_bus |= FIELD_PREP(CFG_KEEP_SS, !xfer->cs_change); + desc->cfg_bus |= FIELD_PREP(CFG_NULL_CTL, 0); + + if (xfer->tx_buf || xfer->tx_dma) { + desc->cfg_bus |= FIELD_PREP(CFG_LANE, nbits_to_lane[xfer->tx_nbits]); + desc->cfg_start |= FIELD_PREP(CFG_OP_MODE, SPISG_OP_MODE_WRITE); + } + if (xfer->rx_buf || xfer->rx_dma) { + desc->cfg_bus |= FIELD_PREP(CFG_LANE, nbits_to_lane[xfer->rx_nbits]); + desc->cfg_start |= FIELD_PREP(CFG_OP_MODE, SPISG_OP_MODE_READ); + } + + if (FIELD_GET(CFG_OP_MODE, desc->cfg_start) == SPISG_OP_MODE_READ_STS) { + desc->cfg_start |= FIELD_PREP(CFG_BLOCK_SIZE, blocks) | + FIELD_PREP(CFG_BLOCK_NUM, 1); + } else { + blocks = min_t(int, blocks, SPISG_BLOCK_MAX); + desc->cfg_start |= FIELD_PREP(CFG_BLOCK_SIZE, block_size & 0x7) | + FIELD_PREP(CFG_BLOCK_NUM, blocks); + } + + if (xfer->tx_sg.nents && xfer->tx_sg.sgl) { + ccsg_len = xfer->tx_sg.nents * sizeof(struct spisg_sg_link); + ccsg = kzalloc(ccsg_len, GFP_KERNEL | GFP_DMA); + if (!ccsg) { + dev_err(dev, "alloc tx_ccsg failed\n"); + return -ENOMEM; + } + + aml_spisg_sg_xlate(&xfer->tx_sg, ccsg); + paddr = dma_map_single(dev, (void *)ccsg, + ccsg_len, DMA_TO_DEVICE); + ret = dma_mapping_error(dev, paddr); + if (ret) { + kfree(ccsg); + dev_err(dev, "tx ccsg map failed\n"); + return ret; + } + + desc->tx_paddr = paddr; + desc->cfg_start |= FIELD_PREP(CFG_TXD_MODE, SPISG_DATA_MODE_SG); + exdesc->tx_ccsg = ccsg; + exdesc->tx_ccsg_len = ccsg_len; + dma_sync_sgtable_for_device(spisg->controller->cur_tx_dma_dev, + &xfer->tx_sg, DMA_TO_DEVICE); + } else if (xfer->tx_buf || xfer->tx_dma) { + paddr = xfer->tx_dma; + if (!paddr) { + paddr = dma_map_single(dev, (void *)xfer->tx_buf, + xfer->len, DMA_TO_DEVICE); + ret = dma_mapping_error(dev, paddr); + if (ret) { + dev_err(dev, "tx buf map failed\n"); + return ret; + } + } + desc->tx_paddr = paddr; + desc->cfg_start |= FIELD_PREP(CFG_TXD_MODE, SPISG_DATA_MODE_MEM); + } + + if (xfer->rx_sg.nents && xfer->rx_sg.sgl) { + ccsg_len = xfer->rx_sg.nents * sizeof(struct spisg_sg_link); + ccsg = kzalloc(ccsg_len, GFP_KERNEL | GFP_DMA); + if (!ccsg) { + dev_err(dev, "alloc rx_ccsg failed\n"); + return -ENOMEM; + } + + aml_spisg_sg_xlate(&xfer->rx_sg, ccsg); + paddr = dma_map_single(dev, (void *)ccsg, + ccsg_len, DMA_TO_DEVICE); + ret = dma_mapping_error(dev, paddr); + if (ret) { + kfree(ccsg); + dev_err(dev, "rx ccsg map failed\n"); + return ret; + } + + desc->rx_paddr = paddr; + desc->cfg_start |= FIELD_PREP(CFG_RXD_MODE, SPISG_DATA_MODE_SG); + exdesc->rx_ccsg = ccsg; + exdesc->rx_ccsg_len = ccsg_len; + dma_sync_sgtable_for_device(spisg->controller->cur_rx_dma_dev, + &xfer->rx_sg, DMA_FROM_DEVICE); + } else if (xfer->rx_buf || xfer->rx_dma) { + paddr = xfer->rx_dma; + if (!paddr) { + paddr = dma_map_single(dev, xfer->rx_buf, + xfer->len, DMA_FROM_DEVICE); + ret = dma_mapping_error(dev, paddr); + if (ret) { + dev_err(dev, "rx buf map failed\n"); + return ret; + } + } + + desc->rx_paddr = paddr; + desc->cfg_start |= FIELD_PREP(CFG_RXD_MODE, SPISG_DATA_MODE_MEM); + } + + return 0; +} + +static void aml_spisg_cleanup_transfer(struct spisg_device *spisg, + struct spi_transfer *xfer, + struct spisg_descriptor *desc, + struct spisg_descriptor_extra *exdesc) +{ + struct device *dev = &spisg->pdev->dev; + + if (desc->tx_paddr) { + if (FIELD_GET(CFG_TXD_MODE, desc->cfg_start) == SPISG_DATA_MODE_SG) { + dma_unmap_single(dev, (dma_addr_t)desc->tx_paddr, + exdesc->tx_ccsg_len, DMA_TO_DEVICE); + kfree(exdesc->tx_ccsg); + dma_sync_sgtable_for_cpu(spisg->controller->cur_tx_dma_dev, + &xfer->tx_sg, DMA_TO_DEVICE); + } else if (!xfer->tx_dma) { + dma_unmap_single(dev, (dma_addr_t)desc->tx_paddr, + xfer->len, DMA_TO_DEVICE); + } + } + + if (desc->rx_paddr) { + if (FIELD_GET(CFG_RXD_MODE, desc->cfg_start) == SPISG_DATA_MODE_SG) { + dma_unmap_single(dev, (dma_addr_t)desc->rx_paddr, + exdesc->rx_ccsg_len, DMA_TO_DEVICE); + kfree(exdesc->rx_ccsg); + dma_sync_sgtable_for_cpu(spisg->controller->cur_rx_dma_dev, + &xfer->rx_sg, DMA_FROM_DEVICE); + } else if (!xfer->rx_dma) { + dma_unmap_single(dev, (dma_addr_t)desc->rx_paddr, + xfer->len, DMA_FROM_DEVICE); + } + } +} + +static void aml_spisg_setup_null_desc(struct spisg_device *spisg, + struct spisg_descriptor *desc, + u32 n_sclk) +{ + /* unit is the last xfer sclk */ + desc->cfg_start = spisg->cfg_start; + desc->cfg_bus = spisg->cfg_bus; + + desc->cfg_start |= FIELD_PREP(CFG_OP_MODE, SPISG_OP_MODE_WRITE) | + FIELD_PREP(CFG_BLOCK_SIZE, 1) | + FIELD_PREP(CFG_BLOCK_NUM, DIV_ROUND_UP(n_sclk, 8)); + + desc->cfg_bus |= FIELD_PREP(CFG_NULL_CTL, 1); +} + +static void aml_spisg_pending(struct spisg_device *spisg, + dma_addr_t desc_paddr, + bool trig, + bool irq_en) +{ + u32 desc_l, desc_h, cfg_spi, irq_enable; + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + desc_l = (u64)desc_paddr & 0xffffffff; + desc_h = (u64)desc_paddr >> 32; +#else + desc_l = desc_paddr & 0xffffffff; + desc_h = 0; +#endif + + cfg_spi = spisg->cfg_spi; + if (trig) + cfg_spi |= CFG_HW_POS; + else + desc_h |= LIST_DESC_PENDING; + + irq_enable = IRQ_RCH_DESC_INVALID | IRQ_RCH_DESC_RESP | + IRQ_RCH_DATA_RESP | IRQ_WCH_DESC_INVALID | + IRQ_WCH_DESC_RESP | IRQ_WCH_DATA_RESP | + IRQ_DESC_ERR | IRQ_DESC_CHAIN_DONE; + regmap_write(spisg->map, SPISG_REG_IRQ_ENABLE, irq_en ? irq_enable : 0); + regmap_write(spisg->map, SPISG_REG_CFG_SPI, cfg_spi); + regmap_write(spisg->map, SPISG_REG_DESC_LIST_L, desc_l); + regmap_write(spisg->map, SPISG_REG_DESC_LIST_H, desc_h); +} + +static irqreturn_t aml_spisg_irq(int irq, void *data) +{ + struct spisg_device *spisg = (void *)data; + u32 sts; + + spisg->status = 0; + regmap_read(spisg->map, SPISG_REG_IRQ_STS, &sts); + regmap_write(spisg->map, SPISG_REG_IRQ_STS, sts); + if (sts & (IRQ_RCH_DESC_INVALID | + IRQ_RCH_DESC_RESP | + IRQ_RCH_DATA_RESP | + IRQ_WCH_DESC_INVALID | + IRQ_WCH_DESC_RESP | + IRQ_WCH_DATA_RESP | + IRQ_DESC_ERR)) + spisg->status = sts; + else if (sts & IRQ_DESC_CHAIN_DONE) + spisg->status = 0; + else + return IRQ_NONE; + + complete(&spisg->completion); + + return IRQ_HANDLED; +} + +static int aml_spisg_transfer_one_message(struct spi_controller *ctlr, + struct spi_message *msg) +{ + struct spisg_device *spisg = spi_controller_get_devdata(ctlr); + struct device *dev = &spisg->pdev->dev; + unsigned long long ms = 0; + struct spi_transfer *xfer; + struct spisg_descriptor *descs, *desc; + struct spisg_descriptor_extra *exdescs, *exdesc; + dma_addr_t descs_paddr; + int desc_num = 1, descs_len; + u32 cs_hold_in_sclk = 0; + int ret = -EIO; + + if (!aml_spisg_sem_down_read(spisg)) { + spi_finalize_current_message(ctlr); + dev_err(dev, "controller busy\n"); + return -EBUSY; + } + + /* calculate the desc num for all xfer */ + list_for_each_entry(xfer, &msg->transfers, transfer_list) + desc_num++; + + /* alloc descriptor/extra-descriptor table */ + descs = kcalloc(desc_num, sizeof(*desc) + sizeof(*exdesc), + GFP_KERNEL | GFP_DMA); + if (!descs) { + spi_finalize_current_message(ctlr); + aml_spisg_sem_up_write(spisg); + return -ENOMEM; + } + descs_len = sizeof(*desc) * desc_num; + exdescs = (struct spisg_descriptor_extra *)(descs + desc_num); + + /* config descriptor for each xfer */ + desc = descs; + exdesc = exdescs; + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + ret = aml_spisg_setup_transfer(spisg, xfer, desc, exdesc); + if (ret) { + dev_err(dev, "config descriptor failed\n"); + goto end; + } + + /* calculate cs-setup delay with the first xfer speed */ + if (list_is_first(&xfer->transfer_list, &msg->transfers)) + desc->cfg_bus |= FIELD_PREP(CFG_CS_SETUP, + spi_delay_to_sclk(xfer->effective_speed_hz, &msg->spi->cs_setup)); + + /* calculate cs-hold delay with the last xfer speed */ + if (list_is_last(&xfer->transfer_list, &msg->transfers)) + cs_hold_in_sclk = + spi_delay_to_sclk(xfer->effective_speed_hz, &msg->spi->cs_hold); + + desc++; + exdesc++; + ms += DIV_ROUND_UP_ULL(8LL * MSEC_PER_SEC * xfer->len, + xfer->effective_speed_hz); + } + + if (cs_hold_in_sclk) + /* additional null-descriptor to achieve the cs-hold delay */ + aml_spisg_setup_null_desc(spisg, desc, cs_hold_in_sclk); + else + desc--; + + desc->cfg_bus |= FIELD_PREP(CFG_KEEP_SS, 0); + desc->cfg_start |= FIELD_PREP(CFG_EOC, 1); + + /* some tolerances */ + ms += ms + 20; + if (ms > UINT_MAX) + ms = UINT_MAX; + + descs_paddr = dma_map_single(dev, (void *)descs, + descs_len, DMA_TO_DEVICE); + ret = dma_mapping_error(dev, descs_paddr); + if (ret) { + dev_err(dev, "desc table map failed\n"); + goto end; + } + + reinit_completion(&spisg->completion); + aml_spisg_pending(spisg, descs_paddr, false, true); + if (wait_for_completion_timeout(&spisg->completion, + spi_controller_is_target(spisg->controller) ? + MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(ms))) + ret = spisg->status ? -EIO : 0; + else + ret = -ETIMEDOUT; + + dma_unmap_single(dev, descs_paddr, descs_len, DMA_TO_DEVICE); +end: + desc = descs; + exdesc = exdescs; + list_for_each_entry(xfer, &msg->transfers, transfer_list) + aml_spisg_cleanup_transfer(spisg, xfer, desc++, exdesc++); + kfree(descs); + + if (!ret) + msg->actual_length = msg->frame_length; + msg->status = ret; + spi_finalize_current_message(ctlr); + aml_spisg_sem_up_write(spisg); + + return ret; +} + +static int aml_spisg_prepare_message(struct spi_controller *ctlr, + struct spi_message *message) +{ + struct spisg_device *spisg = spi_controller_get_devdata(ctlr); + struct spi_device *spi = message->spi; + + if (!spi->bits_per_word || spi->bits_per_word % 8) { + dev_err(&spisg->pdev->dev, "invalid wordlen %d\n", spi->bits_per_word); + return -EINVAL; + } + + spisg->bytes_per_word = spi->bits_per_word >> 3; + + spisg->cfg_spi &= ~CFG_SLAVE_SELECT; + spisg->cfg_spi |= FIELD_PREP(CFG_SLAVE_SELECT, spi_get_chipselect(spi, 0)); + + spisg->cfg_bus &= ~(CFG_CPOL | CFG_CPHA | CFG_B_L_ENDIAN | CFG_HALF_DUPLEX); + spisg->cfg_bus |= FIELD_PREP(CFG_CPOL, !!(spi->mode & SPI_CPOL)) | + FIELD_PREP(CFG_CPHA, !!(spi->mode & SPI_CPHA)) | + FIELD_PREP(CFG_B_L_ENDIAN, !!(spi->mode & SPI_LSB_FIRST)) | + FIELD_PREP(CFG_HALF_DUPLEX, !!(spi->mode & SPI_3WIRE)); + + return 0; +} + +static int aml_spisg_setup(struct spi_device *spi) +{ + if (!spi->controller_state) + spi->controller_state = spi_controller_get_devdata(spi->controller); + + return 0; +} + +static void aml_spisg_cleanup(struct spi_device *spi) +{ + spi->controller_state = NULL; +} + +static int aml_spisg_target_abort(struct spi_controller *ctlr) +{ + struct spisg_device *spisg = spi_controller_get_devdata(ctlr); + + spisg->status = 0; + regmap_write(spisg->map, SPISG_REG_DESC_LIST_H, 0); + complete(&spisg->completion); + + return 0; +} + +static int aml_spisg_clk_init(struct spisg_device *spisg, void __iomem *base) +{ + struct device *dev = &spisg->pdev->dev; + struct clk_init_data init; + struct clk_divider *div; + struct clk_div_table *tbl; + char name[32]; + int ret, i; + + spisg->core = devm_clk_get_enabled(dev, "core"); + if (IS_ERR_OR_NULL(spisg->core)) { + dev_err(dev, "core clock request failed\n"); + return PTR_ERR(spisg->core); + } + + spisg->pclk = devm_clk_get_enabled(dev, "pclk"); + if (IS_ERR_OR_NULL(spisg->pclk)) { + dev_err(dev, "pclk clock request failed\n"); + return PTR_ERR(spisg->pclk); + } + + clk_set_min_rate(spisg->pclk, SPISG_PCLK_RATE_MIN); + + clk_disable_unprepare(spisg->pclk); + + tbl = devm_kzalloc(dev, sizeof(struct clk_div_table) * (DIV_NUM + 1), GFP_KERNEL); + if (!tbl) + return -ENOMEM; + + for (i = 0; i < DIV_NUM; i++) { + tbl[i].val = i + SPISG_CLK_DIV_MIN - 1; + tbl[i].div = i + SPISG_CLK_DIV_MIN; + } + spisg->tbl = tbl; + + div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL); + if (!div) + return -ENOMEM; + + div->flags = CLK_DIVIDER_ROUND_CLOSEST; + div->reg = base + SPISG_REG_CFG_BUS; + div->shift = __bf_shf(CFG_CLK_DIV); + div->width = CLK_DIV_WIDTH; + div->table = tbl; + + /* Register value should not be outside of the table */ + regmap_update_bits(spisg->map, SPISG_REG_CFG_BUS, CFG_CLK_DIV, + FIELD_PREP(CFG_CLK_DIV, SPISG_CLK_DIV_MIN - 1)); + + /* Register clk-divider */ + snprintf(name, sizeof(name), "%s_div", dev_name(dev)); + init.name = name; + init.ops = &clk_divider_ops; + init.flags = CLK_SET_RATE_PARENT; + init.parent_data = &(const struct clk_parent_data) { + .fw_name = "pclk", + }; + init.num_parents = 1; + div->hw.init = &init; + ret = devm_clk_hw_register(dev, &div->hw); + if (ret) { + dev_err(dev, "clock registration failed\n"); + return ret; + } + + spisg->sclk = devm_clk_hw_get_clk(dev, &div->hw, NULL); + if (IS_ERR_OR_NULL(spisg->sclk)) { + dev_err(dev, "get clock failed\n"); + return PTR_ERR(spisg->sclk); + } + + clk_prepare_enable(spisg->sclk); + + return 0; +} + +static int aml_spisg_probe(struct platform_device *pdev) +{ + struct spi_controller *ctlr; + struct spisg_device *spisg; + struct device *dev = &pdev->dev; + void __iomem *base; + int ret, irq; + + const struct regmap_config aml_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = SPISG_MAX_REG, + }; + + if (of_property_read_bool(dev->of_node, "spi-slave")) + ctlr = spi_alloc_target(dev, sizeof(*spisg)); + else + ctlr = spi_alloc_host(dev, sizeof(*spisg)); + if (!ctlr) + return dev_err_probe(dev, -ENOMEM, "controller allocation failed\n"); + + spisg = spi_controller_get_devdata(ctlr); + spisg->controller = ctlr; + + spisg->pdev = pdev; + platform_set_drvdata(pdev, spisg); + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return dev_err_probe(dev, PTR_ERR(base), "resource ioremap failed\n"); + + spisg->map = devm_regmap_init_mmio(dev, base, &aml_regmap_config); + if (IS_ERR(spisg->map)) + return dev_err_probe(dev, PTR_ERR(spisg->map), "regmap init failed\n"); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto out_controller; + } + + ret = device_reset_optional(dev); + if (ret) + return dev_err_probe(dev, ret, "reset dev failed\n"); + + ret = aml_spisg_clk_init(spisg, base); + if (ret) + return dev_err_probe(dev, ret, "clock init failed\n"); + + spisg->cfg_spi = 0; + spisg->cfg_start = 0; + spisg->cfg_bus = 0; + + spisg->cfg_spi = FIELD_PREP(CFG_SFLASH_WP, 1) | + FIELD_PREP(CFG_SFLASH_HD, 1); + if (spi_controller_is_target(ctlr)) { + spisg->cfg_spi |= FIELD_PREP(CFG_SLAVE_EN, 1); + spisg->cfg_bus = FIELD_PREP(CFG_TX_TUNING, 0xf); + } + /* default pending */ + spisg->cfg_start = FIELD_PREP(CFG_PEND, 1); + + pm_runtime_set_active(&spisg->pdev->dev); + pm_runtime_enable(&spisg->pdev->dev); + pm_runtime_resume_and_get(&spisg->pdev->dev); + + ctlr->num_chipselect = 4; + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST | + SPI_3WIRE | SPI_TX_QUAD | SPI_RX_QUAD; + ctlr->max_speed_hz = 1000 * 1000 * 100; + ctlr->min_speed_hz = 1000 * 10; + ctlr->setup = aml_spisg_setup; + ctlr->cleanup = aml_spisg_cleanup; + ctlr->prepare_message = aml_spisg_prepare_message; + ctlr->transfer_one_message = aml_spisg_transfer_one_message; + ctlr->target_abort = aml_spisg_target_abort; + ctlr->can_dma = aml_spisg_can_dma; + ctlr->max_dma_len = SPISG_BLOCK_MAX; + ctlr->auto_runtime_pm = true; + + dma_set_max_seg_size(&pdev->dev, SPISG_BLOCK_MAX); + + ret = devm_request_irq(&pdev->dev, irq, aml_spisg_irq, 0, NULL, spisg); + if (ret) { + dev_err(&pdev->dev, "irq request failed\n"); + goto out_clk; + } + + ret = devm_spi_register_controller(dev, ctlr); + if (ret) { + dev_err(&pdev->dev, "spi controller registration failed\n"); + goto out_clk; + } + + init_completion(&spisg->completion); + + pm_runtime_put(&spisg->pdev->dev); + + return 0; +out_clk: + if (spisg->core) + clk_disable_unprepare(spisg->core); + clk_disable_unprepare(spisg->pclk); +out_controller: + spi_controller_put(ctlr); + + return ret; +} + +static void aml_spisg_remove(struct platform_device *pdev) +{ + struct spisg_device *spisg = platform_get_drvdata(pdev); + + if (!pm_runtime_suspended(&pdev->dev)) { + pinctrl_pm_select_sleep_state(&spisg->pdev->dev); + clk_disable_unprepare(spisg->core); + clk_disable_unprepare(spisg->pclk); + } +} + +static int spisg_suspend_runtime(struct device *dev) +{ + struct spisg_device *spisg = dev_get_drvdata(dev); + + pinctrl_pm_select_sleep_state(&spisg->pdev->dev); + clk_disable_unprepare(spisg->sclk); + clk_disable_unprepare(spisg->core); + + return 0; +} + +static int spisg_resume_runtime(struct device *dev) +{ + struct spisg_device *spisg = dev_get_drvdata(dev); + + clk_prepare_enable(spisg->core); + clk_prepare_enable(spisg->sclk); + pinctrl_pm_select_default_state(&spisg->pdev->dev); + + return 0; +} + +static const struct dev_pm_ops amlogic_spisg_pm_ops = { + .runtime_suspend = spisg_suspend_runtime, + .runtime_resume = spisg_resume_runtime, +}; + +static const struct of_device_id amlogic_spisg_of_match[] = { + { + .compatible = "amlogic,a4-spisg", + }, + + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, amlogic_spisg_of_match); + +static struct platform_driver amlogic_spisg_driver = { + .probe = aml_spisg_probe, + .remove = aml_spisg_remove, + .driver = { + .name = "amlogic-spisg", + .of_match_table = amlogic_spisg_of_match, + .pm = &amlogic_spisg_pm_ops, + }, +}; + +module_platform_driver(amlogic_spisg_driver); + +MODULE_DESCRIPTION("Amlogic SPI Scatter-Gather Controller driver"); +MODULE_AUTHOR("Sunny Luo <sunny.luo@amlogic.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index d3c78f59b22c..177f9a33f3a2 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1469,7 +1469,6 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) ret = cqspi_mem_process(mem, op); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); if (ret) @@ -1970,7 +1969,6 @@ static int cqspi_probe(struct platform_device *pdev) goto probe_setup_failed; } - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 9e56bde87768..5ae09b21d23a 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -662,7 +662,6 @@ static int cdns_spi_probe(struct platform_device *pdev) /* Set to default valid value */ ctlr->max_speed_hz = xspi->clk_rate / 4; xspi->speed_hz = ctlr->max_speed_hz; - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); } else { ctlr->mode_bits |= SPI_NO_CS; diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c index 84279058f0f1..faa893f83dc5 100644 --- a/drivers/spi/spi-falcon.c +++ b/drivers/spi/spi-falcon.c @@ -94,8 +94,9 @@ struct falcon_sflash { struct spi_controller *host; }; -int falcon_sflash_xfer(struct spi_device *spi, struct spi_transfer *t, - unsigned long flags) +static int +falcon_sflash_xfer(struct spi_device *spi, struct spi_transfer *t, + unsigned long flags) { struct device *dev = &spi->dev; struct falcon_sflash *priv = spi_controller_get_devdata(spi->controller); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 0dcd49114095..4bd4377551b5 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -24,6 +24,7 @@ #define SPI_MCR 0x00 #define SPI_MCR_HOST BIT(31) +#define SPI_MCR_MTFE BIT(26) #define SPI_MCR_PCSIS(x) ((x) << 16) #define SPI_MCR_CLR_TXF BIT(11) #define SPI_MCR_CLR_RXF BIT(10) @@ -35,8 +36,9 @@ #define SPI_TCR 0x08 #define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16) -#define SPI_CTAR(x) (0x0c + (((x) & GENMASK(1, 0)) * 4)) +#define SPI_CTAR(x) (0x0c + (((x) & GENMASK(2, 0)) * 4)) #define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27)) +#define SPI_CTAR_DBR BIT(31) #define SPI_CTAR_CPOL BIT(26) #define SPI_CTAR_CPHA BIT(25) #define SPI_CTAR_LSBFE BIT(24) @@ -93,12 +95,14 @@ #define SPI_TXFR1 0x40 #define SPI_TXFR2 0x44 #define SPI_TXFR3 0x48 +#define SPI_TXFR4 0x4C #define SPI_RXFR0 0x7c #define SPI_RXFR1 0x80 #define SPI_RXFR2 0x84 #define SPI_RXFR3 0x88 +#define SPI_RXFR4 0x8C -#define SPI_CTARE(x) (0x11c + (((x) & GENMASK(1, 0)) * 4)) +#define SPI_CTARE(x) (0x11c + (((x) & GENMASK(2, 0)) * 4)) #define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16) #define SPI_CTARE_DTCP(x) ((x) & 0x7ff) @@ -109,6 +113,8 @@ #define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000) +#define SPI_25MHZ 25000000 + struct chip_data { u32 ctar_val; }; @@ -122,6 +128,7 @@ struct fsl_dspi_devtype_data { enum dspi_trans_mode trans_mode; u8 max_clock_factor; int fifo_size; + const struct regmap_config *regmap; }; enum { @@ -135,6 +142,102 @@ enum { LX2160A, MCF5441X, VF610, + S32G, + S32G_TARGET, +}; + +static const struct regmap_range dspi_yes_ranges[] = { + regmap_reg_range(SPI_MCR, SPI_MCR), + regmap_reg_range(SPI_TCR, SPI_CTAR(3)), + regmap_reg_range(SPI_SR, SPI_TXFR3), + regmap_reg_range(SPI_RXFR0, SPI_RXFR3), + regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)), + regmap_reg_range(SPI_SREX, SPI_SREX), +}; + +static const struct regmap_range s32g_dspi_yes_ranges[] = { + regmap_reg_range(SPI_MCR, SPI_MCR), + regmap_reg_range(SPI_TCR, SPI_CTAR(5)), + regmap_reg_range(SPI_SR, SPI_TXFR4), + regmap_reg_range(SPI_RXFR0, SPI_RXFR4), + regmap_reg_range(SPI_CTARE(0), SPI_CTARE(5)), + regmap_reg_range(SPI_SREX, SPI_SREX), +}; + +static const struct regmap_access_table dspi_access_table = { + .yes_ranges = dspi_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges), +}; + +static const struct regmap_access_table s32g_dspi_access_table = { + .yes_ranges = s32g_dspi_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(s32g_dspi_yes_ranges), +}; + +static const struct regmap_range dspi_volatile_ranges[] = { + regmap_reg_range(SPI_MCR, SPI_TCR), + regmap_reg_range(SPI_SR, SPI_SR), + regmap_reg_range(SPI_PUSHR, SPI_RXFR4), + regmap_reg_range(SPI_SREX, SPI_SREX), +}; + +static const struct regmap_access_table dspi_volatile_table = { + .yes_ranges = dspi_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges), +}; + +enum { + DSPI_REGMAP, + S32G_DSPI_REGMAP, + DSPI_XSPI_REGMAP, + S32G_DSPI_XSPI_REGMAP, + DSPI_PUSHR, +}; + +static const struct regmap_config dspi_regmap_config[] = { + [DSPI_REGMAP] = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = SPI_RXFR3, + .volatile_table = &dspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, + }, + [S32G_DSPI_REGMAP] = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = SPI_RXFR4, + .volatile_table = &dspi_volatile_table, + .wr_table = &s32g_dspi_access_table, + .rd_table = &s32g_dspi_access_table, + }, + [DSPI_XSPI_REGMAP] = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = SPI_SREX, + .volatile_table = &dspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, + }, + [S32G_DSPI_XSPI_REGMAP] = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = SPI_SREX, + .volatile_table = &dspi_volatile_table, + .wr_table = &s32g_dspi_access_table, + .rd_table = &s32g_dspi_access_table, + }, + [DSPI_PUSHR] = { + .name = "pushr", + .reg_bits = 16, + .val_bits = 16, + .reg_stride = 2, + .max_register = 0x2, + }, }; static const struct fsl_dspi_devtype_data devtype_data[] = { @@ -142,55 +245,77 @@ static const struct fsl_dspi_devtype_data devtype_data[] = { .trans_mode = DSPI_DMA_MODE, .max_clock_factor = 2, .fifo_size = 4, + .regmap = &dspi_regmap_config[DSPI_REGMAP], }, [LS1021A] = { /* Has A-011218 DMA erratum */ .trans_mode = DSPI_XSPI_MODE, .max_clock_factor = 8, .fifo_size = 4, + .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], }, [LS1012A] = { /* Has A-011218 DMA erratum */ .trans_mode = DSPI_XSPI_MODE, .max_clock_factor = 8, .fifo_size = 16, + .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], }, [LS1028A] = { .trans_mode = DSPI_XSPI_MODE, .max_clock_factor = 8, .fifo_size = 4, + .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], }, [LS1043A] = { /* Has A-011218 DMA erratum */ .trans_mode = DSPI_XSPI_MODE, .max_clock_factor = 8, .fifo_size = 16, + .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], }, [LS1046A] = { /* Has A-011218 DMA erratum */ .trans_mode = DSPI_XSPI_MODE, .max_clock_factor = 8, .fifo_size = 16, + .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], }, [LS2080A] = { .trans_mode = DSPI_XSPI_MODE, .max_clock_factor = 8, .fifo_size = 4, + .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], }, [LS2085A] = { .trans_mode = DSPI_XSPI_MODE, .max_clock_factor = 8, .fifo_size = 4, + .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], }, [LX2160A] = { .trans_mode = DSPI_XSPI_MODE, .max_clock_factor = 8, .fifo_size = 4, + .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], }, [MCF5441X] = { .trans_mode = DSPI_DMA_MODE, .max_clock_factor = 8, .fifo_size = 16, + .regmap = &dspi_regmap_config[DSPI_REGMAP], + }, + [S32G] = { + .trans_mode = DSPI_XSPI_MODE, + .max_clock_factor = 1, + .fifo_size = 5, + .regmap = &dspi_regmap_config[S32G_DSPI_XSPI_REGMAP], + }, + [S32G_TARGET] = { + .trans_mode = DSPI_DMA_MODE, + .max_clock_factor = 1, + .fifo_size = 5, + .regmap = &dspi_regmap_config[S32G_DSPI_REGMAP], }, }; @@ -225,6 +350,7 @@ struct fsl_dspi { const void *tx; void *rx; u16 tx_cmd; + bool mtf_enabled; const struct fsl_dspi_devtype_data *devtype_data; struct completion xfer_done; @@ -247,6 +373,12 @@ struct fsl_dspi { void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata); }; +static bool is_s32g_dspi(struct fsl_dspi *data) +{ + return data->devtype_data == &devtype_data[S32G] || + data->devtype_data == &devtype_data[S32G_TARGET]; +} + static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) { switch (dspi->oper_word_size) { @@ -595,7 +727,7 @@ static void dspi_release_dma(struct fsl_dspi *dspi) } static void hz_to_spi_baud(char *pbr, char *br, int speed_hz, - unsigned long clkrate) + unsigned long clkrate, bool mtf_enabled) { /* Valid baud rate pre-scaler values */ int pbr_tbl[4] = {2, 3, 5, 7}; @@ -612,7 +744,13 @@ static void hz_to_spi_baud(char *pbr, char *br, int speed_hz, for (i = 0; i < ARRAY_SIZE(brs); i++) for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) { - scale = brs[i] * pbr_tbl[j]; + if (mtf_enabled) { + /* In MTF mode DBR=1 so frequency is doubled */ + scale = (brs[i] * pbr_tbl[j]) / 2; + } else { + scale = brs[i] * pbr_tbl[j]; + } + if (scale >= scale_needed) { if (scale < minscale) { minscale = scale; @@ -746,8 +884,12 @@ static void dspi_setup_accel(struct fsl_dspi *dspi) struct spi_transfer *xfer = dspi->cur_transfer; bool odd = !!(dspi->len & 1); - /* No accel for frames not multiple of 8 bits at the moment */ - if (xfer->bits_per_word % 8) + /* + * No accel for DMA transfers or frames not multiples of 8 bits at the + * moment. + */ + if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE || + xfer->bits_per_word % 8) goto no_accel; if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) { @@ -756,10 +898,7 @@ static void dspi_setup_accel(struct fsl_dspi *dspi) dspi->oper_bits_per_word = 8; } else { /* Start off with maximum supported by hardware */ - if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) - dspi->oper_bits_per_word = 32; - else - dspi->oper_bits_per_word = 16; + dspi->oper_bits_per_word = 32; /* * And go down only if the buffer can't be sent with @@ -1027,6 +1166,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, return status; } +static int dspi_set_mtf(struct fsl_dspi *dspi) +{ + if (spi_controller_is_target(dspi->ctlr)) + return 0; + + if (dspi->mtf_enabled) + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_MTFE, + SPI_MCR_MTFE); + else + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_MTFE, 0); + + return 0; +} + static int dspi_setup(struct spi_device *spi) { struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller); @@ -1085,7 +1238,16 @@ static int dspi_setup(struct spi_device *spi) cs_sck_delay, sck_cs_delay); clkrate = clk_get_rate(dspi->clk); - hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate); + + if (is_s32g_dspi(dspi) && spi->max_speed_hz > SPI_25MHZ) + dspi->mtf_enabled = true; + else + dspi->mtf_enabled = false; + + dspi_set_mtf(dspi); + + hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate, + dspi->mtf_enabled); /* Set PCS to SCK delay scale values */ ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate); @@ -1107,6 +1269,9 @@ static int dspi_setup(struct spi_device *spi) SPI_CTAR_PBR(pbr) | SPI_CTAR_BR(br); + if (dspi->mtf_enabled) + chip->ctar_val |= SPI_CTAR_DBR; + if (spi->mode & SPI_LSB_FIRST) chip->ctar_val |= SPI_CTAR_LSBFE; } @@ -1160,112 +1325,14 @@ static const struct of_device_id fsl_dspi_dt_ids[] = { }, { .compatible = "fsl,lx2160a-dspi", .data = &devtype_data[LX2160A], + }, { + .compatible = "nxp,s32g2-dspi", + .data = &devtype_data[S32G], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids); -#ifdef CONFIG_PM_SLEEP -static int dspi_suspend(struct device *dev) -{ - struct fsl_dspi *dspi = dev_get_drvdata(dev); - - if (dspi->irq) - disable_irq(dspi->irq); - spi_controller_suspend(dspi->ctlr); - clk_disable_unprepare(dspi->clk); - - pinctrl_pm_select_sleep_state(dev); - - return 0; -} - -static int dspi_resume(struct device *dev) -{ - struct fsl_dspi *dspi = dev_get_drvdata(dev); - int ret; - - pinctrl_pm_select_default_state(dev); - - ret = clk_prepare_enable(dspi->clk); - if (ret) - return ret; - spi_controller_resume(dspi->ctlr); - if (dspi->irq) - enable_irq(dspi->irq); - - return 0; -} -#endif /* CONFIG_PM_SLEEP */ - -static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume); - -static const struct regmap_range dspi_yes_ranges[] = { - regmap_reg_range(SPI_MCR, SPI_MCR), - regmap_reg_range(SPI_TCR, SPI_CTAR(3)), - regmap_reg_range(SPI_SR, SPI_TXFR3), - regmap_reg_range(SPI_RXFR0, SPI_RXFR3), - regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)), - regmap_reg_range(SPI_SREX, SPI_SREX), -}; - -static const struct regmap_access_table dspi_access_table = { - .yes_ranges = dspi_yes_ranges, - .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges), -}; - -static const struct regmap_range dspi_volatile_ranges[] = { - regmap_reg_range(SPI_MCR, SPI_TCR), - regmap_reg_range(SPI_SR, SPI_SR), - regmap_reg_range(SPI_PUSHR, SPI_RXFR3), -}; - -static const struct regmap_access_table dspi_volatile_table = { - .yes_ranges = dspi_volatile_ranges, - .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges), -}; - -static const struct regmap_config dspi_regmap_config = { - .reg_bits = 32, - .val_bits = 32, - .reg_stride = 4, - .max_register = 0x88, - .volatile_table = &dspi_volatile_table, - .rd_table = &dspi_access_table, - .wr_table = &dspi_access_table, -}; - -static const struct regmap_range dspi_xspi_volatile_ranges[] = { - regmap_reg_range(SPI_MCR, SPI_TCR), - regmap_reg_range(SPI_SR, SPI_SR), - regmap_reg_range(SPI_PUSHR, SPI_RXFR3), - regmap_reg_range(SPI_SREX, SPI_SREX), -}; - -static const struct regmap_access_table dspi_xspi_volatile_table = { - .yes_ranges = dspi_xspi_volatile_ranges, - .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges), -}; - -static const struct regmap_config dspi_xspi_regmap_config[] = { - { - .reg_bits = 32, - .val_bits = 32, - .reg_stride = 4, - .max_register = 0x13c, - .volatile_table = &dspi_xspi_volatile_table, - .rd_table = &dspi_access_table, - .wr_table = &dspi_access_table, - }, - { - .name = "pushr", - .reg_bits = 16, - .val_bits = 16, - .reg_stride = 2, - .max_register = 0x2, - }, -}; - static int dspi_init(struct fsl_dspi *dspi) { unsigned int mcr; @@ -1301,6 +1368,50 @@ static int dspi_init(struct fsl_dspi *dspi) return 0; } +#ifdef CONFIG_PM_SLEEP +static int dspi_suspend(struct device *dev) +{ + struct fsl_dspi *dspi = dev_get_drvdata(dev); + + if (dspi->irq) + disable_irq(dspi->irq); + spi_controller_suspend(dspi->ctlr); + clk_disable_unprepare(dspi->clk); + + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int dspi_resume(struct device *dev) +{ + struct fsl_dspi *dspi = dev_get_drvdata(dev); + int ret; + + pinctrl_pm_select_default_state(dev); + + ret = clk_prepare_enable(dspi->clk); + if (ret) + return ret; + spi_controller_resume(dspi->ctlr); + + ret = dspi_init(dspi); + if (ret) { + dev_err(dev, "failed to initialize dspi during resume\n"); + return ret; + } + + dspi_set_mtf(dspi); + + if (dspi->irq) + enable_irq(dspi->irq); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume); + static int dspi_target_abort(struct spi_controller *host) { struct fsl_dspi *dspi = spi_controller_get_devdata(host); @@ -1325,7 +1436,6 @@ static int dspi_target_abort(struct spi_controller *host) static int dspi_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - const struct regmap_config *regmap_config; struct fsl_dspi_platform_data *pdata; struct spi_controller *ctlr; int ret, cs_num, bus_num = -1; @@ -1338,7 +1448,10 @@ static int dspi_probe(struct platform_device *pdev) if (!dspi) return -ENOMEM; - ctlr = spi_alloc_host(&pdev->dev, 0); + if (of_property_read_bool(np, "spi-slave")) + ctlr = spi_alloc_target(&pdev->dev, 0); + else + ctlr = spi_alloc_host(&pdev->dev, 0); if (!ctlr) return -ENOMEM; @@ -1377,9 +1490,6 @@ static int dspi_probe(struct platform_device *pdev) of_property_read_u32(np, "bus-num", &bus_num); ctlr->bus_num = bus_num; - if (of_property_read_bool(np, "spi-slave")) - ctlr->target = true; - dspi->devtype_data = of_device_get_match_data(&pdev->dev); if (!dspi->devtype_data) { dev_err(&pdev->dev, "can't get devtype_data\n"); @@ -1397,6 +1507,9 @@ static int dspi_probe(struct platform_device *pdev) dspi->pushr_tx = 0; } + if (spi_controller_is_target(ctlr) && is_s32g_dspi(dspi)) + dspi->devtype_data = &devtype_data[S32G_TARGET]; + if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); else @@ -1408,11 +1521,8 @@ static int dspi_probe(struct platform_device *pdev) goto out_ctlr_put; } - if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) - regmap_config = &dspi_xspi_regmap_config[0]; - else - regmap_config = &dspi_regmap_config; - dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config); + dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, + dspi->devtype_data->regmap); if (IS_ERR(dspi->regmap)) { dev_err(&pdev->dev, "failed to init regmap: %ld\n", PTR_ERR(dspi->regmap)); @@ -1423,7 +1533,7 @@ static int dspi_probe(struct platform_device *pdev) if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) { dspi->regmap_pushr = devm_regmap_init_mmio( &pdev->dev, base + SPI_PUSHR, - &dspi_xspi_regmap_config[1]); + &dspi_regmap_config[DSPI_PUSHR]); if (IS_ERR(dspi->regmap_pushr)) { dev_err(&pdev->dev, "failed to init pushr regmap: %ld\n", diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index 6a73eaa34cf7..f2f1d3298e6c 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c @@ -513,7 +513,6 @@ static int fsl_espi_setup(struct spi_device *spi) fsl_espi_setup_transfer(spi, NULL); - pm_runtime_mark_last_busy(espi->dev); pm_runtime_put_autosuspend(espi->dev); return 0; @@ -726,7 +725,6 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem, dev_info(dev, "irq = %u\n", irq); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 5e3818445234..67d4000c3cef 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -233,7 +233,6 @@ static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller) struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); - pm_runtime_mark_last_busy(fsl_lpspi->dev); pm_runtime_put_autosuspend(fsl_lpspi->dev); return 0; @@ -966,7 +965,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev) goto free_dma; } - pm_runtime_mark_last_busy(fsl_lpspi->dev); pm_runtime_put_autosuspend(fsl_lpspi->dev); return 0; diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index ea5f1b10b79e..c8dadb532c40 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -104,7 +104,7 @@ static inline int getmiso(const struct spi_device *spi) */ static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits, unsigned flags) + unsigned int nsecs, u32 word, u8 bits, unsigned int flags) { if (unlikely(spi->mode & SPI_LSB_FIRST)) return bitbang_txrx_le_cpha0(spi, nsecs, 0, flags, word, bits); @@ -113,7 +113,7 @@ static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi, } static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits, unsigned flags) + unsigned int nsecs, u32 word, u8 bits, unsigned int flags) { if (unlikely(spi->mode & SPI_LSB_FIRST)) return bitbang_txrx_le_cpha1(spi, nsecs, 0, flags, word, bits); @@ -122,7 +122,7 @@ static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi, } static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits, unsigned flags) + unsigned int nsecs, u32 word, u8 bits, unsigned int flags) { if (unlikely(spi->mode & SPI_LSB_FIRST)) return bitbang_txrx_le_cpha0(spi, nsecs, 1, flags, word, bits); @@ -131,7 +131,7 @@ static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi, } static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits, unsigned flags) + unsigned int nsecs, u32 word, u8 bits, unsigned int flags) { if (unlikely(spi->mode & SPI_LSB_FIRST)) return bitbang_txrx_le_cpha1(spi, nsecs, 1, flags, word, bits); @@ -150,7 +150,7 @@ static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi, */ static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits, unsigned flags) + unsigned int nsecs, u32 word, u8 bits, unsigned int flags) { flags = spi->controller->flags; if (unlikely(spi->mode & SPI_LSB_FIRST)) @@ -160,7 +160,7 @@ static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi, } static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits, unsigned flags) + unsigned int nsecs, u32 word, u8 bits, unsigned int flags) { flags = spi->controller->flags; if (unlikely(spi->mode & SPI_LSB_FIRST)) @@ -170,7 +170,7 @@ static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi, } static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits, unsigned flags) + unsigned int nsecs, u32 word, u8 bits, unsigned int flags) { flags = spi->controller->flags; if (unlikely(spi->mode & SPI_LSB_FIRST)) @@ -180,7 +180,7 @@ static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi, } static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits, unsigned flags) + unsigned int nsecs, u32 word, u8 bits, unsigned int flags) { flags = spi->controller->flags; if (unlikely(spi->mode & SPI_LSB_FIRST)) diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index c93d80a4d734..155ddeb8fcd4 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1748,7 +1748,6 @@ spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *m ret = spi_imx->devtype_data->prepare_message(spi_imx, msg); if (ret) { - pm_runtime_mark_last_busy(spi_imx->dev); pm_runtime_put_autosuspend(spi_imx->dev); } @@ -1760,7 +1759,6 @@ spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message { struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); - pm_runtime_mark_last_busy(spi_imx->dev); pm_runtime_put_autosuspend(spi_imx->dev); return 0; } @@ -1933,7 +1931,6 @@ static int spi_imx_probe(struct platform_device *pdev) goto out_register_controller; } - pm_runtime_mark_last_busy(spi_imx->dev); pm_runtime_put_autosuspend(spi_imx->dev); return ret; diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c index 5d5a546c62ea..13bbb2133507 100644 --- a/drivers/spi/spi-intel.c +++ b/drivers/spi/spi-intel.c @@ -189,6 +189,11 @@ struct intel_spi_mem_op { static bool writeable; module_param(writeable, bool, 0); MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)"); +static bool ignore_protection_status; +module_param(ignore_protection_status, bool, 0); +MODULE_PARM_DESC( + ignore_protection_status, + "Do not block SPI flash chip write access even if it is write-protected (default=0)"); static void intel_spi_dump_regs(struct intel_spi *ispi) { @@ -1248,13 +1253,15 @@ static void intel_spi_fill_partition(struct intel_spi *ispi, continue; /* - * If any of the regions have protection bits set, make the - * whole partition read-only to be on the safe side. + * If any of the regions have protection bits set and + * the ignore protection status parameter is not set, + * make the whole partition read-only to be on the safe side. * * Also if the user did not ask the chip to be writeable * mask the bit too. */ - if (!writeable || intel_spi_is_protected(ispi, base, limit)) { + if (!writeable || (!ignore_protection_status && + intel_spi_is_protected(ispi, base, limit))) { part->mask_flags |= MTD_WRITEABLE; ispi->protected = true; } diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c index fa828fcaaef2..d13a9b755c7f 100644 --- a/drivers/spi/spi-microchip-core-qspi.c +++ b/drivers/spi/spi-microchip-core-qspi.c @@ -194,7 +194,7 @@ static inline void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi) } } -static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi, bool word) +static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi) { u32 control, data; @@ -222,6 +222,87 @@ static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi, bool word) } } +static inline void mchp_coreqspi_write_read_op(struct mchp_coreqspi *qspi) +{ + u32 control, data; + + qspi->rx_len = qspi->tx_len; + + control = readl_relaxed(qspi->regs + REG_CONTROL); + control |= CONTROL_FLAGSX4; + writel_relaxed(control, qspi->regs + REG_CONTROL); + + while (qspi->tx_len >= 4) { + while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL) + ; + + data = qspi->txbuf ? *((u32 *)qspi->txbuf) : 0xaa; + if (qspi->txbuf) + qspi->txbuf += 4; + qspi->tx_len -= 4; + writel_relaxed(data, qspi->regs + REG_X4_TX_DATA); + + /* + * The rx FIFO is twice the size of the tx FIFO, so there is + * no requirement to block transmission if receive data is not + * ready, and it is fine to let the tx FIFO completely fill + * without reading anything from the rx FIFO. Once the tx FIFO + * has been filled and becomes non-full due to a transmission + * occurring there will always be something to receive. + * IOW, this is safe as TX_FIFO_SIZE + 4 < 2 * TX_FIFO_SIZE + */ + if (qspi->rx_len >= 4) { + if (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXAVAILABLE) { + data = readl_relaxed(qspi->regs + REG_X4_RX_DATA); + *(u32 *)qspi->rxbuf = data; + qspi->rxbuf += 4; + qspi->rx_len -= 4; + } + } + } + + /* + * Since transmission is not being blocked by clearing the rx FIFO, + * loop here until all received data "leaked" by the loop above has + * been dealt with. + */ + while (qspi->rx_len >= 4) { + while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY) + ; + data = readl_relaxed(qspi->regs + REG_X4_RX_DATA); + *(u32 *)qspi->rxbuf = data; + qspi->rxbuf += 4; + qspi->rx_len -= 4; + } + + /* + * Since rx_len and tx_len must be < 4 bytes at this point, there's no + * concern about overflowing the rx or tx FIFOs any longer. It's + * therefore safe to loop over the remainder of the transmit data before + * handling the remaining receive data. + */ + if (!qspi->tx_len) + return; + + control &= ~CONTROL_FLAGSX4; + writel_relaxed(control, qspi->regs + REG_CONTROL); + + while (qspi->tx_len--) { + while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL) + ; + data = qspi->txbuf ? *qspi->txbuf : 0xaa; + qspi->txbuf++; + writel_relaxed(data, qspi->regs + REG_TX_DATA); + } + + while (qspi->rx_len--) { + while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY) + ; + data = readl_relaxed(qspi->regs + REG_RX_DATA); + *qspi->rxbuf++ = (data & 0xFF); + } +} + static void mchp_coreqspi_enable_ints(struct mchp_coreqspi *qspi) { u32 mask = IEN_TXDONE | @@ -266,7 +347,7 @@ static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id) } static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi, - const struct spi_mem_op *op) + u32 max_freq) { unsigned long clk_hz; u32 control, baud_rate_val = 0; @@ -275,11 +356,11 @@ static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_devi if (!clk_hz) return -EINVAL; - baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq); + baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * max_freq); if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) { dev_err(&spi->dev, "could not configure the clock for spi clock %d Hz & system clock %ld Hz\n", - op->max_freq, clk_hz); + max_freq, clk_hz); return -EINVAL; } @@ -367,23 +448,13 @@ static inline void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const str writel_relaxed(frames, qspi->regs + REG_FRAMES); } -static int mchp_qspi_wait_for_ready(struct spi_mem *mem) +static int mchp_coreqspi_wait_for_ready(struct mchp_coreqspi *qspi) { - struct mchp_coreqspi *qspi = spi_controller_get_devdata - (mem->spi->controller); u32 status; - int ret; - ret = readl_poll_timeout(qspi->regs + REG_STATUS, status, + return readl_poll_timeout(qspi->regs + REG_STATUS, status, (status & STATUS_READY), 0, TIMEOUT_MS); - if (ret) { - dev_err(&mem->spi->dev, - "Timeout waiting on QSPI ready.\n"); - return -ETIMEDOUT; - } - - return ret; } static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) @@ -396,11 +467,13 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o int err, i; mutex_lock(&qspi->op_lock); - err = mchp_qspi_wait_for_ready(mem); - if (err) + err = mchp_coreqspi_wait_for_ready(qspi); + if (err) { + dev_err(&mem->spi->dev, "Timeout waiting on QSPI ready.\n"); goto error; + } - err = mchp_coreqspi_setup_clock(qspi, mem->spi, op); + err = mchp_coreqspi_setup_clock(qspi, mem->spi, op->max_freq); if (err) goto error; @@ -415,7 +488,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o qspi->rxbuf = NULL; qspi->tx_len = op->cmd.nbytes; qspi->rx_len = 0; - mchp_coreqspi_write_op(qspi, false); + mchp_coreqspi_write_op(qspi); } qspi->txbuf = &opaddr[0]; @@ -426,7 +499,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o qspi->rxbuf = NULL; qspi->tx_len = op->addr.nbytes; qspi->rx_len = 0; - mchp_coreqspi_write_op(qspi, false); + mchp_coreqspi_write_op(qspi); } if (op->data.nbytes) { @@ -435,7 +508,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o qspi->rxbuf = NULL; qspi->rx_len = 0; qspi->tx_len = op->data.nbytes; - mchp_coreqspi_write_op(qspi, true); + mchp_coreqspi_write_op(qspi); } else { qspi->txbuf = NULL; qspi->rxbuf = (u8 *)op->data.buf.in; @@ -515,6 +588,109 @@ static const struct spi_controller_mem_caps mchp_coreqspi_mem_caps = { .per_op_freq = true, }; +static int mchp_coreqspi_unprepare_message(struct spi_controller *ctlr, struct spi_message *m) +{ + struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr); + + /* + * This delay is required for the driver to function correctly, + * but no explanation has been determined for why it is required. + */ + udelay(750); + + mutex_unlock(&qspi->op_lock); + + return 0; +} + +static int mchp_coreqspi_prepare_message(struct spi_controller *ctlr, struct spi_message *m) +{ + struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr); + struct spi_transfer *t = NULL; + u32 control, frames; + u32 total_bytes = 0, cmd_bytes = 0, idle_cycles = 0; + int ret; + bool quad = false, dual = false; + + mutex_lock(&qspi->op_lock); + ret = mchp_coreqspi_wait_for_ready(qspi); + if (ret) { + mutex_unlock(&qspi->op_lock); + dev_err(&ctlr->dev, "Timeout waiting on QSPI ready.\n"); + return ret; + } + + ret = mchp_coreqspi_setup_clock(qspi, m->spi, m->spi->max_speed_hz); + if (ret) { + mutex_unlock(&qspi->op_lock); + return ret; + } + + control = readl_relaxed(qspi->regs + REG_CONTROL); + control &= ~(CONTROL_MODE12_MASK | CONTROL_MODE0); + writel_relaxed(control, qspi->regs + REG_CONTROL); + + reinit_completion(&qspi->data_completion); + + list_for_each_entry(t, &m->transfers, transfer_list) { + total_bytes += t->len; + if (!cmd_bytes && !(t->tx_buf && t->rx_buf)) + cmd_bytes = t->len; + if (!t->rx_buf) + cmd_bytes = total_bytes; + if (t->tx_nbits == SPI_NBITS_QUAD || t->rx_nbits == SPI_NBITS_QUAD) + quad = true; + else if (t->tx_nbits == SPI_NBITS_DUAL || t->rx_nbits == SPI_NBITS_DUAL) + dual = true; + } + + control = readl_relaxed(qspi->regs + REG_CONTROL); + if (quad) { + control |= (CONTROL_MODE0 | CONTROL_MODE12_EX_RW); + } else if (dual) { + control &= ~CONTROL_MODE0; + control |= CONTROL_MODE12_FULL; + } else { + control &= ~(CONTROL_MODE12_MASK | CONTROL_MODE0); + } + writel_relaxed(control, qspi->regs + REG_CONTROL); + + frames = total_bytes & BYTESUPPER_MASK; + writel_relaxed(frames, qspi->regs + REG_FRAMESUP); + frames = total_bytes & BYTESLOWER_MASK; + frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT; + frames |= idle_cycles << FRAMES_IDLE_SHIFT; + control = readl_relaxed(qspi->regs + REG_CONTROL); + if (control & CONTROL_MODE12_MASK) + frames |= (1 << FRAMES_SHIFT); + + frames |= FRAMES_FLAGWORD; + writel_relaxed(frames, qspi->regs + REG_FRAMES); + + return 0; +}; + +static int mchp_coreqspi_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, + struct spi_transfer *t) +{ + struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr); + + qspi->tx_len = t->len; + + if (t->tx_buf) + qspi->txbuf = (u8 *)t->tx_buf; + + if (!t->rx_buf) { + mchp_coreqspi_write_op(qspi); + } else { + qspi->rxbuf = (u8 *)t->rx_buf; + qspi->rx_len = t->len; + mchp_coreqspi_write_read_op(qspi); + } + + return 0; +} + static int mchp_coreqspi_probe(struct platform_device *pdev) { struct spi_controller *ctlr; @@ -562,6 +738,12 @@ static int mchp_coreqspi_probe(struct platform_device *pdev) ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; ctlr->dev.of_node = np; + ctlr->min_speed_hz = clk_get_rate(qspi->clk) / 30; + ctlr->prepare_message = mchp_coreqspi_prepare_message; + ctlr->unprepare_message = mchp_coreqspi_unprepare_message; + ctlr->transfer_one = mchp_coreqspi_transfer_one; + ctlr->num_chipselect = 2; + ctlr->use_gpio_descriptors = true; ret = devm_spi_register_controller(&pdev->dev, ctlr); if (ret) diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 4b0a1c0db041..a6032d44771b 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -220,6 +220,14 @@ static const struct mtk_spi_compatible mt6893_compat = { .no_need_unprepare = true, }; +static const struct mtk_spi_compatible mt6991_compat = { + .need_pad_sel = true, + .must_tx = true, + .enhance_timing = true, + .dma_ext = true, + .ipm_design = true, +}; + /* * A piece of default chip info unless the platform * supplies it. @@ -245,6 +253,9 @@ static const struct of_device_id mtk_spi_of_match[] = { { .compatible = "mediatek,mt6765-spi", .data = (void *)&mt6765_compat, }, + { .compatible = "mediatek,mt6991-spi", + .data = (void *)&mt6991_compat, + }, { .compatible = "mediatek,mt7622-spi", .data = (void *)&mt7622_compat, }, diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c index 85ab5ce96c4d..5cc4632e13d7 100644 --- a/drivers/spi/spi-mtk-nor.c +++ b/drivers/spi/spi-mtk-nor.c @@ -918,7 +918,6 @@ static int mtk_nor_probe(struct platform_device *pdev) if (ret < 0) goto err_probe; - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq); diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index e63c77e41823..c7d4827f1bf1 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -968,7 +968,6 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) /* Invalidate the data in the AHB buffer. */ nxp_fspi_invalid(f); - pm_runtime_mark_last_busy(f->dev); pm_runtime_put_autosuspend(f->dev); return err; diff --git a/drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c b/drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c new file mode 100644 index 000000000000..035d088d4d33 --- /dev/null +++ b/drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Analog Devices Inc. + * Copyright (C) 2025 BayLibre, SAS + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/spi/offload/provider.h> + +static bool adi_util_sigma_delta_match(struct spi_offload_trigger *trigger, + enum spi_offload_trigger_type type, + u64 *args, u32 nargs) +{ + return type == SPI_OFFLOAD_TRIGGER_DATA_READY && nargs == 0; +} + +static const struct spi_offload_trigger_ops adi_util_sigma_delta_ops = { + .match = adi_util_sigma_delta_match, +}; + +static int adi_util_sigma_delta_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_offload_trigger_info info = { + .fwnode = dev_fwnode(dev), + .ops = &adi_util_sigma_delta_ops, + }; + struct clk *clk; + + clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "Failed to get clock\n"); + + return devm_spi_offload_trigger_register(dev, &info); +} + +static const struct of_device_id adi_util_sigma_delta_of_match_table[] = { + { .compatible = "adi,util-sigma-delta-spi", }, + { } +}; +MODULE_DEVICE_TABLE(of, adi_util_sigma_delta_of_match_table); + +static struct platform_driver adi_util_sigma_delta_driver = { + .probe = adi_util_sigma_delta_probe, + .driver = { + .name = "adi-util-sigma-delta-spi", + .of_match_table = adi_util_sigma_delta_of_match_table, + }, +}; +module_platform_driver(adi_util_sigma_delta_driver); + +MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>"); +MODULE_DESCRIPTION("ADI Sigma-Delta SPI offload trigger utility driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 70bb74b3bd9c..6dc58a30804a 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -272,7 +272,6 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable) mcspi_write_chconf0(spi, l); - pm_runtime_mark_last_busy(mcspi->dev); pm_runtime_put_autosuspend(mcspi->dev); } } @@ -1102,7 +1101,6 @@ static int omap2_mcspi_setup(struct spi_device *spi) if (ret && initial_setup) omap2_mcspi_cleanup(spi); - pm_runtime_mark_last_busy(mcspi->dev); pm_runtime_put_autosuspend(mcspi->dev); return ret; @@ -1379,7 +1377,6 @@ static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi) ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN; omap2_mcspi_set_mode(ctlr); - pm_runtime_mark_last_busy(mcspi->dev); pm_runtime_put_autosuspend(mcspi->dev); return 0; } diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c index e27642c4dea4..8577a19705de 100644 --- a/drivers/spi/spi-pci1xxxx.c +++ b/drivers/spi/spi-pci1xxxx.c @@ -23,6 +23,7 @@ #define SYS_FREQ_DEFAULT (62500000) #define PCI1XXXX_SPI_MAX_CLOCK_HZ (30000000) +#define PCI1XXXX_SPI_CLK_25MHZ (25000000) #define PCI1XXXX_SPI_CLK_20MHZ (20000000) #define PCI1XXXX_SPI_CLK_15MHZ (15000000) #define PCI1XXXX_SPI_CLK_12MHZ (12000000) @@ -96,8 +97,8 @@ #define SPI_DMA_CH1_DONE_INT BIT(1) #define SPI_DMA_CH0_ABORT_INT BIT(16) #define SPI_DMA_CH1_ABORT_INT BIT(17) -#define SPI_DMA_DONE_INT_MASK (SPI_DMA_CH0_DONE_INT | SPI_DMA_CH1_DONE_INT) -#define SPI_DMA_ABORT_INT_MASK (SPI_DMA_CH0_ABORT_INT | SPI_DMA_CH1_ABORT_INT) +#define SPI_DMA_DONE_INT_MASK(x) (1 << (x)) +#define SPI_DMA_ABORT_INT_MASK(x) (1 << (16 + (x))) #define DMA_CH_CONTROL_LIE BIT(3) #define DMA_CH_CONTROL_RIE BIT(4) #define DMA_INTR_EN (DMA_CH_CONTROL_RIE | DMA_CH_CONTROL_LIE) @@ -131,12 +132,15 @@ #define SPI_SUSPEND_CONFIG 0x101 #define SPI_RESUME_CONFIG 0x203 +#define NUM_VEC_PER_INST 3 + struct pci1xxxx_spi_internal { u8 hw_inst; u8 clkdiv; - int irq; + int irq[NUM_VEC_PER_INST]; int mode; bool spi_xfer_in_progress; + atomic_t dma_completion_count; void *rx_buf; bool dma_aborted_rd; u32 bytes_recvd; @@ -160,8 +164,10 @@ struct pci1xxxx_spi { u8 dev_rev; void __iomem *reg_base; void __iomem *dma_offset_bar; - /* lock to safely access the DMA registers in isr */ - spinlock_t dma_reg_lock; + /* lock to safely access the DMA RD registers in isr */ + spinlock_t dma_rd_reg_lock; + /* lock to safely access the DMA RD registers in isr */ + spinlock_t dma_wr_reg_lock; bool can_dma; struct pci1xxxx_spi_internal *spi_int[] __counted_by(total_hw_instances); }; @@ -192,6 +198,9 @@ static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = { MODULE_DEVICE_TABLE(pci, pci1xxxx_spi_pci_id_table); +static irqreturn_t pci1xxxx_spi_isr_dma_rd(int irq, void *dev); +static irqreturn_t pci1xxxx_spi_isr_dma_wr(int irq, void *dev); + static int pci1xxxx_set_sys_lock(struct pci1xxxx_spi *par) { writel(SPI_SYSLOCK, par->reg_base + SPI_SYSLOCK_REG); @@ -212,13 +221,16 @@ static void pci1xxxx_release_sys_lock(struct pci1xxxx_spi *par) writel(0x0, par->reg_base + SPI_SYSLOCK_REG); } -static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq) +static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int hw_inst, int num_vector) { struct pci_dev *pdev = spi_bus->dev; u32 pf_num; u32 regval; int ret; + if (num_vector != hw_inst * NUM_VEC_PER_INST) + return -EOPNOTSUPP; + /* * DEV REV Registers is a system register, HW Syslock bit * should be acquired before accessing the register @@ -246,16 +258,6 @@ static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq) if (spi_bus->dev_rev < 0xC0 || pf_num) return -EOPNOTSUPP; - /* - * DMA Supported only with MSI Interrupts - * One of the SPI instance's MSI vector address and data - * is used for DMA Interrupt - */ - if (!irq_get_msi_desc(irq)) { - dev_warn(&pdev->dev, "Error MSI Interrupt not supported, will operate in PIO mode\n"); - return -EOPNOTSUPP; - } - spi_bus->dma_offset_bar = pcim_iomap(pdev, 2, pci_resource_len(pdev, 2)); if (!spi_bus->dma_offset_bar) { dev_warn(&pdev->dev, "Error failed to map dma bar, will operate in PIO mode\n"); @@ -272,29 +274,91 @@ static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq) return 0; } -static int pci1xxxx_spi_dma_init(struct pci1xxxx_spi *spi_bus, int irq) +static void pci1xxxx_spi_dma_config(struct pci1xxxx_spi *spi_bus) { + struct pci1xxxx_spi_internal *spi_sub_ptr; + u8 iter, irq_index; struct msi_msg msi; + u32 regval; + u16 data; + + irq_index = spi_bus->total_hw_instances; + for (iter = 0; iter < spi_bus->total_hw_instances; iter++) { + spi_sub_ptr = spi_bus->spi_int[iter]; + get_cached_msi_msg(spi_sub_ptr->irq[1], &msi); + if (iter == 0) { + writel(msi.address_hi, spi_bus->dma_offset_bar + + SPI_DMA_INTR_IMWR_WDONE_HIGH); + writel(msi.address_hi, spi_bus->dma_offset_bar + + SPI_DMA_INTR_IMWR_WABORT_HIGH); + writel(msi.address_hi, spi_bus->dma_offset_bar + + SPI_DMA_INTR_IMWR_RDONE_HIGH); + writel(msi.address_hi, spi_bus->dma_offset_bar + + SPI_DMA_INTR_IMWR_RABORT_HIGH); + writel(msi.address_lo, spi_bus->dma_offset_bar + + SPI_DMA_INTR_IMWR_WDONE_LOW); + writel(msi.address_lo, spi_bus->dma_offset_bar + + SPI_DMA_INTR_IMWR_WABORT_LOW); + writel(msi.address_lo, spi_bus->dma_offset_bar + + SPI_DMA_INTR_IMWR_RDONE_LOW); + writel(msi.address_lo, spi_bus->dma_offset_bar + + SPI_DMA_INTR_IMWR_RABORT_LOW); + writel(0, spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA); + writel(0, spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA); + } + regval = readl(spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA); + data = msi.data + irq_index; + writel((regval | (data << (iter * 16))), spi_bus->dma_offset_bar + + SPI_DMA_INTR_WR_IMWR_DATA); + regval = readl(spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA); + irq_index++; + + data = msi.data + irq_index; + regval = readl(spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA); + writel(regval | (data << (iter * 16)), spi_bus->dma_offset_bar + + SPI_DMA_INTR_RD_IMWR_DATA); + regval = readl(spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA); + irq_index++; + } +} + +static int pci1xxxx_spi_dma_init(struct pci1xxxx_spi *spi_bus, int hw_inst, int num_vector) +{ + struct pci1xxxx_spi_internal *spi_sub_ptr; + u8 iter, irq_index; int ret; - ret = pci1xxxx_check_spi_can_dma(spi_bus, irq); + irq_index = hw_inst; + ret = pci1xxxx_check_spi_can_dma(spi_bus, hw_inst, num_vector); if (ret) return ret; - spin_lock_init(&spi_bus->dma_reg_lock); - get_cached_msi_msg(irq, &msi); + spin_lock_init(&spi_bus->dma_rd_reg_lock); + spin_lock_init(&spi_bus->dma_wr_reg_lock); writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN); writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN); - writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_HIGH); - writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_HIGH); - writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_HIGH); - writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_HIGH); - writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_LOW); - writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_LOW); - writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_LOW); - writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_LOW); - writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA); - writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA); + + for (iter = 0; iter < hw_inst; iter++) { + spi_sub_ptr = spi_bus->spi_int[iter]; + spi_sub_ptr->irq[1] = pci_irq_vector(spi_bus->dev, irq_index); + ret = devm_request_irq(&spi_bus->dev->dev, spi_sub_ptr->irq[1], + pci1xxxx_spi_isr_dma_wr, PCI1XXXX_IRQ_FLAGS, + pci_name(spi_bus->dev), spi_sub_ptr); + if (ret < 0) + return ret; + + irq_index++; + + spi_sub_ptr->irq[2] = pci_irq_vector(spi_bus->dev, irq_index); + ret = devm_request_irq(&spi_bus->dev->dev, spi_sub_ptr->irq[2], + pci1xxxx_spi_isr_dma_rd, PCI1XXXX_IRQ_FLAGS, + pci_name(spi_bus->dev), spi_sub_ptr); + if (ret < 0) + return ret; + + irq_index++; + } + pci1xxxx_spi_dma_config(spi_bus); dma_set_max_seg_size(&spi_bus->dev->dev, PCI1XXXX_SPI_BUFFER_SIZE); spi_bus->can_dma = true; return 0; @@ -318,12 +382,14 @@ static void pci1xxxx_spi_set_cs(struct spi_device *spi, bool enable) writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst)); } -static u8 pci1xxxx_get_clock_div(u32 hz) +static u8 pci1xxxx_get_clock_div(struct pci1xxxx_spi *par, u32 hz) { u8 val = 0; if (hz >= PCI1XXXX_SPI_MAX_CLOCK_HZ) val = 2; + else if (par->dev_rev >= 0xC0 && hz >= PCI1XXXX_SPI_CLK_25MHZ) + val = 1; else if ((hz < PCI1XXXX_SPI_MAX_CLOCK_HZ) && (hz >= PCI1XXXX_SPI_CLK_20MHZ)) val = 3; else if ((hz < PCI1XXXX_SPI_CLK_20MHZ) && (hz >= PCI1XXXX_SPI_CLK_15MHZ)) @@ -398,13 +464,14 @@ static void pci1xxxx_spi_setup(struct pci1xxxx_spi *par, u8 hw_inst, u32 mode, writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst)); } -static void pci1xxxx_start_spi_xfer(struct pci1xxxx_spi_internal *p, u8 hw_inst) +static void pci1xxxx_start_spi_xfer(struct pci1xxxx_spi_internal *p) { u32 regval; - regval = readl(p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst)); + atomic_set(&p->dma_completion_count, 0); + regval = readl(p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst)); regval |= SPI_MST_CTL_GO; - writel(regval, p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst)); + writel(regval, p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst)); } static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr, @@ -423,7 +490,7 @@ static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr, p->spi_xfer_in_progress = true; p->bytes_recvd = 0; - clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz); + clkdiv = pci1xxxx_get_clock_div(par, xfer->speed_hz); tx_buf = xfer->tx_buf; rx_buf = xfer->rx_buf; transfer_len = xfer->len; @@ -448,7 +515,7 @@ static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr, &tx_buf[bytes_transfered], len); bytes_transfered += len; pci1xxxx_spi_setup(par, p->hw_inst, spi->mode, clkdiv, len); - pci1xxxx_start_spi_xfer(p, p->hw_inst); + pci1xxxx_start_spi_xfer(p); /* Wait for DMA_TERM interrupt */ result = wait_for_completion_timeout(&p->spi_xfer_done, @@ -474,7 +541,6 @@ static int pci1xxxx_spi_transfer_with_dma(struct spi_controller *spi_ctlr, { struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr); struct pci1xxxx_spi *par = p->parent; - dma_addr_t rx_dma_addr = 0; dma_addr_t tx_dma_addr = 0; int ret = 0; u32 regval; @@ -483,6 +549,7 @@ static int pci1xxxx_spi_transfer_with_dma(struct spi_controller *spi_ctlr, p->tx_sgl = xfer->tx_sg.sgl; p->rx_sgl = xfer->rx_sg.sgl; p->rx_buf = xfer->rx_buf; + atomic_set(&p->dma_completion_count, 1); regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); @@ -492,20 +559,16 @@ static int pci1xxxx_spi_transfer_with_dma(struct spi_controller *spi_ctlr, } p->xfer = xfer; p->mode = spi->mode; - p->clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz); + p->clkdiv = pci1xxxx_get_clock_div(par, xfer->speed_hz); p->bytes_recvd = 0; p->rx_buf = xfer->rx_buf; regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); tx_dma_addr = sg_dma_address(p->tx_sgl); - rx_dma_addr = sg_dma_address(p->rx_sgl); p->tx_sgl_len = sg_dma_len(p->tx_sgl); - p->rx_sgl_len = sg_dma_len(p->rx_sgl); pci1xxxx_spi_setup(par, p->hw_inst, p->mode, p->clkdiv, p->tx_sgl_len); pci1xxxx_spi_setup_dma_to_io(p, (tx_dma_addr), p->tx_sgl_len); - if (rx_dma_addr) - pci1xxxx_spi_setup_dma_from_io(p, rx_dma_addr, p->rx_sgl_len); writel(p->hw_inst, par->dma_offset_bar + SPI_DMA_RD_DOORBELL_REG); reinit_completion(&p->spi_xfer_done); @@ -595,83 +658,111 @@ static irqreturn_t pci1xxxx_spi_isr_io(int irq, void *dev) return spi_int_fired; } -static void pci1xxxx_spi_setup_next_dma_transfer(struct pci1xxxx_spi_internal *p) +static void pci1xxxx_spi_setup_next_dma_to_io_transfer(struct pci1xxxx_spi_internal *p) { dma_addr_t tx_dma_addr = 0; - dma_addr_t rx_dma_addr = 0; u32 prev_len; p->tx_sgl = sg_next(p->tx_sgl); - if (p->rx_sgl) - p->rx_sgl = sg_next(p->rx_sgl); - if (!p->tx_sgl) { - /* Clear xfer_done */ - complete(&p->spi_xfer_done); - } else { + if (p->tx_sgl) { tx_dma_addr = sg_dma_address(p->tx_sgl); prev_len = p->tx_sgl_len; p->tx_sgl_len = sg_dma_len(p->tx_sgl); + pci1xxxx_spi_setup_dma_to_io(p, tx_dma_addr, p->tx_sgl_len); + writel(p->hw_inst, p->parent->dma_offset_bar + SPI_DMA_RD_DOORBELL_REG); if (prev_len != p->tx_sgl_len) pci1xxxx_spi_setup(p->parent, p->hw_inst, p->mode, p->clkdiv, p->tx_sgl_len); - pci1xxxx_spi_setup_dma_to_io(p, tx_dma_addr, p->tx_sgl_len); - if (p->rx_sgl) { - rx_dma_addr = sg_dma_address(p->rx_sgl); - p->rx_sgl_len = sg_dma_len(p->rx_sgl); - pci1xxxx_spi_setup_dma_from_io(p, rx_dma_addr, p->rx_sgl_len); - } - writel(p->hw_inst, p->parent->dma_offset_bar + SPI_DMA_RD_DOORBELL_REG); } } -static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev) +static void pci1xxxx_spi_setup_next_dma_from_io_transfer(struct pci1xxxx_spi_internal *p) +{ + dma_addr_t rx_dma_addr = 0; + + if (p->rx_sgl) { + rx_dma_addr = sg_dma_address(p->rx_sgl); + p->rx_sgl_len = sg_dma_len(p->rx_sgl); + pci1xxxx_spi_setup_dma_from_io(p, rx_dma_addr, p->rx_sgl_len); + writel(p->hw_inst, p->parent->dma_offset_bar + SPI_DMA_WR_DOORBELL_REG); + } +} + +static irqreturn_t pci1xxxx_spi_isr_dma_rd(int irq, void *dev) { struct pci1xxxx_spi_internal *p = dev; irqreturn_t spi_int_fired = IRQ_NONE; unsigned long flags; u32 regval; - spin_lock_irqsave(&p->parent->dma_reg_lock, flags); /* Clear the DMA RD INT and start spi xfer*/ regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_RD_STS); - if (regval & SPI_DMA_DONE_INT_MASK) { - if (regval & SPI_DMA_CH0_DONE_INT) - pci1xxxx_start_spi_xfer(p, SPI0); - if (regval & SPI_DMA_CH1_DONE_INT) - pci1xxxx_start_spi_xfer(p, SPI1); - spi_int_fired = IRQ_HANDLED; - } - if (regval & SPI_DMA_ABORT_INT_MASK) { - p->dma_aborted_rd = true; - spi_int_fired = IRQ_HANDLED; + if (regval) { + if (regval & SPI_DMA_DONE_INT_MASK(p->hw_inst)) { + /* Start the SPI transfer only if both DMA read and write are completed */ + if (atomic_inc_return(&p->dma_completion_count) == 2) + pci1xxxx_start_spi_xfer(p); + spi_int_fired = IRQ_HANDLED; + } + if (regval & SPI_DMA_ABORT_INT_MASK(p->hw_inst)) { + p->dma_aborted_rd = true; + spi_int_fired = IRQ_HANDLED; + } + spin_lock_irqsave(&p->parent->dma_rd_reg_lock, flags); + writel((SPI_DMA_DONE_INT_MASK(p->hw_inst) | SPI_DMA_ABORT_INT_MASK(p->hw_inst)), + p->parent->dma_offset_bar + SPI_DMA_INTR_RD_CLR); + spin_unlock_irqrestore(&p->parent->dma_rd_reg_lock, flags); } - writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_RD_CLR); + return spi_int_fired; +} + +static irqreturn_t pci1xxxx_spi_isr_dma_wr(int irq, void *dev) +{ + struct pci1xxxx_spi_internal *p = dev; + irqreturn_t spi_int_fired = IRQ_NONE; + unsigned long flags; + u32 regval; /* Clear the DMA WR INT */ regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_WR_STS); - if (regval & SPI_DMA_DONE_INT_MASK) { - if (regval & SPI_DMA_CH0_DONE_INT) - pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI0]); - - if (regval & SPI_DMA_CH1_DONE_INT) - pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI1]); + if (regval) { + if (regval & SPI_DMA_DONE_INT_MASK(p->hw_inst)) { + spi_int_fired = IRQ_HANDLED; + if (sg_is_last(p->rx_sgl)) { + complete(&p->spi_xfer_done); + } else { + p->rx_sgl = sg_next(p->rx_sgl); + if (atomic_inc_return(&p->dma_completion_count) == 2) + pci1xxxx_start_spi_xfer(p); + } - spi_int_fired = IRQ_HANDLED; - } - if (regval & SPI_DMA_ABORT_INT_MASK) { - p->dma_aborted_wr = true; - spi_int_fired = IRQ_HANDLED; + } + if (regval & SPI_DMA_ABORT_INT_MASK(p->hw_inst)) { + p->dma_aborted_wr = true; + spi_int_fired = IRQ_HANDLED; + } + spin_lock_irqsave(&p->parent->dma_wr_reg_lock, flags); + writel((SPI_DMA_DONE_INT_MASK(p->hw_inst) | SPI_DMA_ABORT_INT_MASK(p->hw_inst)), + p->parent->dma_offset_bar + SPI_DMA_INTR_WR_CLR); + spin_unlock_irqrestore(&p->parent->dma_wr_reg_lock, flags); } - writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_WR_CLR); - spin_unlock_irqrestore(&p->parent->dma_reg_lock, flags); + return spi_int_fired; +} + +static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev) +{ + struct pci1xxxx_spi_internal *p = dev; + irqreturn_t spi_int_fired = IRQ_NONE; + u32 regval; /* Clear the SPI GO_BIT Interrupt */ regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); if (regval & SPI_INTR) { - writel(p->hw_inst, p->parent->dma_offset_bar + SPI_DMA_WR_DOORBELL_REG); + pci1xxxx_spi_setup_next_dma_from_io_transfer(p); + pci1xxxx_spi_setup_next_dma_to_io_transfer(p); spi_int_fired = IRQ_HANDLED; + writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); } - writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); return spi_int_fired; } @@ -761,7 +852,7 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * if (!spi_bus->reg_base) return -EINVAL; - num_vector = pci_alloc_irq_vectors(pdev, 1, hw_inst_cnt, + num_vector = pci_alloc_irq_vectors(pdev, 1, hw_inst_cnt * NUM_VEC_PER_INST, PCI_IRQ_INTX | PCI_IRQ_MSI); if (num_vector < 0) { dev_err(&pdev->dev, "Error allocating MSI vectors\n"); @@ -775,27 +866,23 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * regval &= ~SPI_INTR; writel(regval, spi_bus->reg_base + SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst)); - spi_sub_ptr->irq = pci_irq_vector(pdev, 0); + spi_sub_ptr->irq[0] = pci_irq_vector(pdev, 0); if (num_vector >= hw_inst_cnt) - ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq, + ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq[0], pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS, pci_name(pdev), spi_sub_ptr); else - ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq, + ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq[0], pci1xxxx_spi_shared_isr, PCI1XXXX_IRQ_FLAGS | IRQF_SHARED, pci_name(pdev), spi_bus); if (ret < 0) { dev_err(&pdev->dev, "Unable to request irq : %d", - spi_sub_ptr->irq); + spi_sub_ptr->irq[0]); return -ENODEV; } - ret = pci1xxxx_spi_dma_init(spi_bus, spi_sub_ptr->irq); - if (ret && ret != -EOPNOTSUPP) - return ret; - /* This register is only applicable for 1st instance */ regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0)); if (!only_sec_inst) @@ -817,13 +904,13 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * writel(regval, spi_bus->reg_base + SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst)); if (num_vector >= hw_inst_cnt) { - spi_sub_ptr->irq = pci_irq_vector(pdev, iter); - ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq, + spi_sub_ptr->irq[0] = pci_irq_vector(pdev, iter); + ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq[0], pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS, pci_name(pdev), spi_sub_ptr); if (ret < 0) { dev_err(&pdev->dev, "Unable to request irq : %d", - spi_sub_ptr->irq); + spi_sub_ptr->irq[0]); return -ENODEV; } } @@ -846,6 +933,10 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * if (ret) return ret; } + ret = pci1xxxx_spi_dma_init(spi_bus, hw_inst_cnt, num_vector); + if (ret && ret != -EOPNOTSUPP) + return ret; + pci_set_drvdata(pdev, spi_bus); return 0; diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c index f2e1a27b410d..a8c4eb1cbde1 100644 --- a/drivers/spi/spi-qpic-snand.c +++ b/drivers/spi/spi-qpic-snand.c @@ -59,12 +59,6 @@ #define OOB_BUF_SIZE 128 #define ecceng_to_qspi(eng) container_of(eng, struct qpic_spi_nand, ecc_eng) -struct qpic_snand_op { - u32 cmd_reg; - u32 addr1_reg; - u32 addr2_reg; -}; - struct snandc_read_status { __le32 snandc_flash; __le32 snandc_buffer; @@ -283,9 +277,22 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand) goto err_free_ecc_cfg; } - if (ecc_cfg->strength != 4) { + switch (ecc_cfg->strength) { + case 4: + ecc_cfg->ecc_mode = ECC_MODE_4BIT; + ecc_cfg->ecc_bytes_hw = 7; + ecc_cfg->spare_bytes = 4; + break; + + case 8: + ecc_cfg->ecc_mode = ECC_MODE_8BIT; + ecc_cfg->ecc_bytes_hw = 13; + ecc_cfg->spare_bytes = 2; + break; + + default: dev_err(snandc->dev, - "only 4 bits ECC strength is supported\n"); + "only 4 or 8 bits ECC strength is supported\n"); ret = -EOPNOTSUPP; goto err_free_ecc_cfg; } @@ -302,13 +309,11 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand) nand->ecc.ctx.priv = ecc_cfg; snandc->qspi->mtd = mtd; - ecc_cfg->ecc_bytes_hw = 7; - ecc_cfg->spare_bytes = 4; ecc_cfg->bbm_size = 1; ecc_cfg->bch_enabled = true; ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size; - ecc_cfg->steps = 4; + ecc_cfg->steps = cwperpage; ecc_cfg->cw_data = 516; ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes; bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1; @@ -365,7 +370,7 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand) FIELD_PREP(ECC_SW_RESET, 0) | FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) | FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) | - FIELD_PREP(ECC_MODE_MASK, 0) | + FIELD_PREP(ECC_MODE_MASK, ecc_cfg->ecc_mode) | FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw); ecc_cfg->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203); @@ -608,10 +613,16 @@ static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc, bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); - if (snandc->data_buffer[bbpos] == 0xff) - snandc->data_buffer[bbpos + 1] = 0xff; - if (snandc->data_buffer[bbpos] != 0xff) - snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos]; + /* + * TODO: The SPINAND code expects two bad block marker bytes + * at the beginning of the OOB area, but the OOB layout used by + * the driver has only one. Duplicate that for now in order to + * avoid certain blocks to be marked as bad. + * + * This can be removed once single-byte bad block marker support + * gets implemented in the SPINAND code. + */ + snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos]; memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes); @@ -851,7 +862,7 @@ static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc, int data_size, oob_size; if (i == (num_cw - 1)) { - data_size = 512 - ((num_cw - 1) << 2); + data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; } else { @@ -1310,7 +1321,6 @@ static int qcom_spi_write_page(struct qcom_nand_controller *snandc, static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc, const struct spi_mem_op *op) { - struct qpic_snand_op s_op = {}; u32 cmd; int ret, opcode; @@ -1318,34 +1328,24 @@ static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc, if (ret < 0) return ret; - s_op.cmd_reg = cmd; - s_op.addr1_reg = op->addr.val; - s_op.addr2_reg = 0; - opcode = op->cmd.opcode; switch (opcode) { case SPINAND_WRITE_EN: return 0; case SPINAND_PROGRAM_EXECUTE: - s_op.addr1_reg = op->addr.val << 16; - s_op.addr2_reg = op->addr.val >> 16 & 0xff; - snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg); - snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); + snandc->qspi->addr1 = cpu_to_le32(op->addr.val << 16); + snandc->qspi->addr2 = cpu_to_le32(op->addr.val >> 16 & 0xff); snandc->qspi->cmd = cpu_to_le32(cmd); return qcom_spi_program_execute(snandc, op); case SPINAND_READ: - s_op.addr1_reg = (op->addr.val << 16); - s_op.addr2_reg = op->addr.val >> 16 & 0xff; - snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg); - snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); + snandc->qspi->addr1 = cpu_to_le32(op->addr.val << 16); + snandc->qspi->addr2 = cpu_to_le32(op->addr.val >> 16 & 0xff); snandc->qspi->cmd = cpu_to_le32(cmd); return 0; case SPINAND_ERASE: - s_op.addr2_reg = (op->addr.val >> 16) & 0xffff; - s_op.addr1_reg = op->addr.val; - snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16); - snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); + snandc->qspi->addr1 = cpu_to_le32(op->addr.val << 16); + snandc->qspi->addr2 = cpu_to_le32(op->addr.val >> 16 & 0xffff); snandc->qspi->cmd = cpu_to_le32(cmd); return qcom_spi_block_erase(snandc); default: @@ -1357,10 +1357,10 @@ static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc, qcom_clear_read_regs(snandc); qcom_clear_bam_transaction(snandc); - snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg); + snandc->regs->cmd = cpu_to_le32(cmd); snandc->regs->exec = cpu_to_le32(1); - snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg); - snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg); + snandc->regs->addr0 = cpu_to_le32(op->addr.val); + snandc->regs->addr1 = cpu_to_le32(0); qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL); qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c index f3fe10eddb6a..9eba5c0a60f2 100644 --- a/drivers/spi/spi-rockchip-sfc.c +++ b/drivers/spi/spi-rockchip-sfc.c @@ -565,7 +565,6 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op ret = rockchip_sfc_xfer_done(sfc, 100000); out: - pm_runtime_mark_last_busy(sfc->dev); pm_runtime_put_autosuspend(sfc->dev); return ret; @@ -712,7 +711,6 @@ static int rockchip_sfc_probe(struct platform_device *pdev) if (ret) goto err_register; - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; @@ -799,7 +797,6 @@ static int rockchip_sfc_resume(struct device *dev) rockchip_sfc_init(sfc); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 92faaf614f8e..8e1d911b88b5 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -1404,7 +1404,6 @@ static const struct platform_device_id spi_driver_ids[] = { MODULE_DEVICE_TABLE(platform, spi_driver_ids); -#ifdef CONFIG_PM_SLEEP static int rspi_suspend(struct device *dev) { struct rspi_data *rspi = dev_get_drvdata(dev); @@ -1419,11 +1418,7 @@ static int rspi_resume(struct device *dev) return spi_controller_resume(rspi->ctlr); } -static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume); -#define DEV_PM_OPS &rspi_pm_ops -#else -#define DEV_PM_OPS NULL -#endif /* CONFIG_PM_SLEEP */ +static DEFINE_SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume); static struct platform_driver rspi_driver = { .probe = rspi_probe, @@ -1431,7 +1426,7 @@ static struct platform_driver rspi_driver = { .id_table = spi_driver_ids, .driver = { .name = "renesas_spi", - .pm = DEV_PM_OPS, + .pm = pm_sleep_ptr(&rspi_pm_ops), .of_match_table = of_match_ptr(rspi_of_match), }, }; diff --git a/drivers/spi/spi-rzv2h-rspi.c b/drivers/spi/spi-rzv2h-rspi.c new file mode 100644 index 000000000000..dcc431ba60a9 --- /dev/null +++ b/drivers/spi/spi-rzv2h-rspi.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Renesas RZ/V2H Renesas Serial Peripheral Interface (RSPI) + * + * Copyright (C) 2025 Renesas Electronics Corporation + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/limits.h> +#include <linux/log2.h> +#include <linux/math.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/reset.h> +#include <linux/spi/spi.h> +#include <linux/wait.h> + +/* Registers */ +#define RSPI_SPDR 0x00 +#define RSPI_SPCR 0x08 +#define RSPI_SSLP 0x10 +#define RSPI_SPBR 0x11 +#define RSPI_SPSCR 0x13 +#define RSPI_SPCMD 0x14 +#define RSPI_SPDCR2 0x44 +#define RSPI_SPSR 0x52 +#define RSPI_SPSRC 0x6a +#define RSPI_SPFCR 0x6c + +/* Register SPCR */ +#define RSPI_SPCR_MSTR BIT(30) +#define RSPI_SPCR_SPRIE BIT(17) +#define RSPI_SPCR_SCKASE BIT(12) +#define RSPI_SPCR_SPE BIT(0) + +/* Register SPBR */ +#define RSPI_SPBR_SPR_MIN 0 +#define RSPI_SPBR_SPR_MAX 255 + +/* Register SPCMD */ +#define RSPI_SPCMD_SSLA GENMASK(25, 24) +#define RSPI_SPCMD_SPB GENMASK(20, 16) +#define RSPI_SPCMD_LSBF BIT(12) +#define RSPI_SPCMD_SSLKP BIT(7) +#define RSPI_SPCMD_BRDV GENMASK(3, 2) +#define RSPI_SPCMD_CPOL BIT(1) +#define RSPI_SPCMD_CPHA BIT(0) + +#define RSPI_SPCMD_BRDV_MIN 0 +#define RSPI_SPCMD_BRDV_MAX 3 + +/* Register SPDCR2 */ +#define RSPI_SPDCR2_TTRG GENMASK(11, 8) +#define RSPI_SPDCR2_RTRG GENMASK(3, 0) +#define RSPI_FIFO_SIZE 16 + +/* Register SPSR */ +#define RSPI_SPSR_SPRF BIT(15) + +/* Register RSPI_SPSRC */ +#define RSPI_SPSRC_CLEAR 0xfd80 + +#define RSPI_RESET_NUM 2 +#define RSPI_CLK_NUM 3 + +struct rzv2h_rspi_priv { + struct reset_control_bulk_data resets[RSPI_RESET_NUM]; + struct spi_controller *controller; + void __iomem *base; + struct clk *tclk; + wait_queue_head_t wait; + unsigned int bytes_per_word; + u32 freq; + u16 status; +}; + +#define RZV2H_RSPI_TX(func, type) \ +static inline void rzv2h_rspi_tx_##type(struct rzv2h_rspi_priv *rspi, \ + const void *txbuf, \ + unsigned int index) { \ + type buf = 0; \ + \ + if (txbuf) \ + buf = ((type *)txbuf)[index]; \ + \ + func(buf, rspi->base + RSPI_SPDR); \ +} + +#define RZV2H_RSPI_RX(func, type) \ +static inline void rzv2h_rspi_rx_##type(struct rzv2h_rspi_priv *rspi, \ + void *rxbuf, \ + unsigned int index) { \ + type buf = func(rspi->base + RSPI_SPDR); \ + \ + if (rxbuf) \ + ((type *)rxbuf)[index] = buf; \ +} + +RZV2H_RSPI_TX(writel, u32) +RZV2H_RSPI_TX(writew, u16) +RZV2H_RSPI_TX(writeb, u8) +RZV2H_RSPI_RX(readl, u32) +RZV2H_RSPI_RX(readw, u16) +RZV2H_RSPI_RX(readl, u8) + +static void rzv2h_rspi_reg_rmw(const struct rzv2h_rspi_priv *rspi, + int reg_offs, u32 bit_mask, u32 value) +{ + u32 tmp; + + value <<= __ffs(bit_mask); + tmp = (readl(rspi->base + reg_offs) & ~bit_mask) | value; + writel(tmp, rspi->base + reg_offs); +} + +static inline void rzv2h_rspi_spe_disable(const struct rzv2h_rspi_priv *rspi) +{ + rzv2h_rspi_reg_rmw(rspi, RSPI_SPCR, RSPI_SPCR_SPE, 0); +} + +static inline void rzv2h_rspi_spe_enable(const struct rzv2h_rspi_priv *rspi) +{ + rzv2h_rspi_reg_rmw(rspi, RSPI_SPCR, RSPI_SPCR_SPE, 1); +} + +static inline void rzv2h_rspi_clear_fifos(const struct rzv2h_rspi_priv *rspi) +{ + writeb(1, rspi->base + RSPI_SPFCR); +} + +static inline void rzv2h_rspi_clear_all_irqs(struct rzv2h_rspi_priv *rspi) +{ + writew(RSPI_SPSRC_CLEAR, rspi->base + RSPI_SPSRC); + rspi->status = 0; +} + +static irqreturn_t rzv2h_rx_irq_handler(int irq, void *data) +{ + struct rzv2h_rspi_priv *rspi = data; + + rspi->status = readw(rspi->base + RSPI_SPSR); + wake_up(&rspi->wait); + + return IRQ_HANDLED; +} + +static inline int rzv2h_rspi_wait_for_interrupt(struct rzv2h_rspi_priv *rspi, + u32 wait_mask) +{ + return wait_event_timeout(rspi->wait, (rspi->status & wait_mask), + HZ) == 0 ? -ETIMEDOUT : 0; +} + +static void rzv2h_rspi_send(struct rzv2h_rspi_priv *rspi, const void *txbuf, + unsigned int index) +{ + switch (rspi->bytes_per_word) { + case 4: + rzv2h_rspi_tx_u32(rspi, txbuf, index); + break; + case 2: + rzv2h_rspi_tx_u16(rspi, txbuf, index); + break; + default: + rzv2h_rspi_tx_u8(rspi, txbuf, index); + } +} + +static int rzv2h_rspi_receive(struct rzv2h_rspi_priv *rspi, void *rxbuf, + unsigned int index) +{ + int ret; + + ret = rzv2h_rspi_wait_for_interrupt(rspi, RSPI_SPSR_SPRF); + if (ret) + return ret; + + switch (rspi->bytes_per_word) { + case 4: + rzv2h_rspi_rx_u32(rspi, rxbuf, index); + break; + case 2: + rzv2h_rspi_rx_u16(rspi, rxbuf, index); + break; + default: + rzv2h_rspi_rx_u8(rspi, rxbuf, index); + } + + return 0; +} + +static int rzv2h_rspi_transfer_one(struct spi_controller *controller, + struct spi_device *spi, + struct spi_transfer *transfer) +{ + struct rzv2h_rspi_priv *rspi = spi_controller_get_devdata(controller); + unsigned int words_to_transfer, i; + int ret = 0; + + transfer->effective_speed_hz = rspi->freq; + words_to_transfer = transfer->len / rspi->bytes_per_word; + + for (i = 0; i < words_to_transfer; i++) { + rzv2h_rspi_clear_all_irqs(rspi); + + rzv2h_rspi_send(rspi, transfer->tx_buf, i); + + ret = rzv2h_rspi_receive(rspi, transfer->rx_buf, i); + if (ret) + break; + } + + rzv2h_rspi_clear_all_irqs(rspi); + + if (ret) + transfer->error = SPI_TRANS_FAIL_IO; + + spi_finalize_current_transfer(controller); + + return ret; +} + +static inline u32 rzv2h_rspi_calc_bitrate(unsigned long tclk_rate, u8 spr, + u8 brdv) +{ + return DIV_ROUND_UP(tclk_rate, (2 * (spr + 1) * (1 << brdv))); +} + +static u32 rzv2h_rspi_setup_clock(struct rzv2h_rspi_priv *rspi, u32 hz) +{ + unsigned long tclk_rate; + int spr; + u8 brdv; + + /* + * From the manual: + * Bit rate = f(RSPI_n_TCLK)/(2*(n+1)*2^(N)) + * + * Where: + * * RSPI_n_TCLK is fixed to 200MHz on V2H + * * n = SPR - is RSPI_SPBR.SPR (from 0 to 255) + * * N = BRDV - is RSPI_SPCMD.BRDV (from 0 to 3) + */ + tclk_rate = clk_get_rate(rspi->tclk); + for (brdv = RSPI_SPCMD_BRDV_MIN; brdv <= RSPI_SPCMD_BRDV_MAX; brdv++) { + spr = DIV_ROUND_UP(tclk_rate, hz * (1 << (brdv + 1))); + spr--; + if (spr >= RSPI_SPBR_SPR_MIN && spr <= RSPI_SPBR_SPR_MAX) + goto clock_found; + } + + return 0; + +clock_found: + rzv2h_rspi_reg_rmw(rspi, RSPI_SPCMD, RSPI_SPCMD_BRDV, brdv); + writeb(spr, rspi->base + RSPI_SPBR); + + return rzv2h_rspi_calc_bitrate(tclk_rate, spr, brdv); +} + +static int rzv2h_rspi_prepare_message(struct spi_controller *ctlr, + struct spi_message *message) +{ + struct rzv2h_rspi_priv *rspi = spi_controller_get_devdata(ctlr); + const struct spi_device *spi = message->spi; + struct spi_transfer *xfer; + u32 speed_hz = U32_MAX; + u8 bits_per_word; + u32 conf32; + u16 conf16; + + /* Make sure SPCR.SPE is 0 before amending the configuration */ + rzv2h_rspi_spe_disable(rspi); + + /* Configure the device to work in "host" mode */ + conf32 = RSPI_SPCR_MSTR; + + /* Auto-stop function */ + conf32 |= RSPI_SPCR_SCKASE; + + /* SPI receive buffer full interrupt enable */ + conf32 |= RSPI_SPCR_SPRIE; + + writel(conf32, rspi->base + RSPI_SPCR); + + /* Use SPCMD0 only */ + writeb(0x0, rspi->base + RSPI_SPSCR); + + /* Setup mode */ + conf32 = FIELD_PREP(RSPI_SPCMD_CPOL, !!(spi->mode & SPI_CPOL)); + conf32 |= FIELD_PREP(RSPI_SPCMD_CPHA, !!(spi->mode & SPI_CPHA)); + conf32 |= FIELD_PREP(RSPI_SPCMD_LSBF, !!(spi->mode & SPI_LSB_FIRST)); + conf32 |= FIELD_PREP(RSPI_SPCMD_SSLKP, 1); + conf32 |= FIELD_PREP(RSPI_SPCMD_SSLA, spi_get_chipselect(spi, 0)); + writel(conf32, rspi->base + RSPI_SPCMD); + if (spi->mode & SPI_CS_HIGH) + writeb(BIT(spi_get_chipselect(spi, 0)), rspi->base + RSPI_SSLP); + else + writeb(0, rspi->base + RSPI_SSLP); + + /* Setup FIFO thresholds */ + conf16 = FIELD_PREP(RSPI_SPDCR2_TTRG, RSPI_FIFO_SIZE - 1); + conf16 |= FIELD_PREP(RSPI_SPDCR2_RTRG, 0); + writew(conf16, rspi->base + RSPI_SPDCR2); + + rzv2h_rspi_clear_fifos(rspi); + + list_for_each_entry(xfer, &message->transfers, transfer_list) { + if (!xfer->speed_hz) + continue; + + speed_hz = min(xfer->speed_hz, speed_hz); + bits_per_word = xfer->bits_per_word; + } + + if (speed_hz == U32_MAX) + return -EINVAL; + + rspi->bytes_per_word = roundup_pow_of_two(BITS_TO_BYTES(bits_per_word)); + rzv2h_rspi_reg_rmw(rspi, RSPI_SPCMD, RSPI_SPCMD_SPB, bits_per_word - 1); + + rspi->freq = rzv2h_rspi_setup_clock(rspi, speed_hz); + if (!rspi->freq) + return -EINVAL; + + rzv2h_rspi_spe_enable(rspi); + + return 0; +} + +static int rzv2h_rspi_unprepare_message(struct spi_controller *ctlr, + struct spi_message *message) +{ + struct rzv2h_rspi_priv *rspi = spi_controller_get_devdata(ctlr); + + rzv2h_rspi_spe_disable(rspi); + + return 0; +} + +static int rzv2h_rspi_probe(struct platform_device *pdev) +{ + struct spi_controller *controller; + struct device *dev = &pdev->dev; + struct rzv2h_rspi_priv *rspi; + struct clk_bulk_data *clks; + unsigned long tclk_rate; + int irq_rx, ret, i; + + controller = devm_spi_alloc_host(dev, sizeof(*rspi)); + if (!controller) + return -ENOMEM; + + rspi = spi_controller_get_devdata(controller); + platform_set_drvdata(pdev, rspi); + + rspi->controller = controller; + + rspi->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(rspi->base)) + return PTR_ERR(rspi->base); + + ret = devm_clk_bulk_get_all_enabled(dev, &clks); + if (ret != RSPI_CLK_NUM) + return dev_err_probe(dev, ret >= 0 ? -EINVAL : ret, + "cannot get clocks\n"); + for (i = 0; i < RSPI_CLK_NUM; i++) { + if (!strcmp(clks[i].id, "tclk")) { + rspi->tclk = clks[i].clk; + break; + } + } + + if (!rspi->tclk) + return dev_err_probe(dev, -EINVAL, "Failed to get tclk\n"); + + tclk_rate = clk_get_rate(rspi->tclk); + + rspi->resets[0].id = "presetn"; + rspi->resets[1].id = "tresetn"; + ret = devm_reset_control_bulk_get_exclusive(dev, RSPI_RESET_NUM, + rspi->resets); + if (ret) + return dev_err_probe(dev, ret, "cannot get resets\n"); + + irq_rx = platform_get_irq_byname(pdev, "rx"); + if (irq_rx < 0) + return dev_err_probe(dev, irq_rx, "cannot get IRQ 'rx'\n"); + + ret = reset_control_bulk_deassert(RSPI_RESET_NUM, rspi->resets); + if (ret) + return dev_err_probe(dev, ret, "failed to deassert resets\n"); + + init_waitqueue_head(&rspi->wait); + + ret = devm_request_irq(dev, irq_rx, rzv2h_rx_irq_handler, 0, + dev_name(dev), rspi); + if (ret) { + dev_err(dev, "cannot request `rx` IRQ\n"); + goto quit_resets; + } + + controller->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | + SPI_LSB_FIRST; + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); + controller->prepare_message = rzv2h_rspi_prepare_message; + controller->unprepare_message = rzv2h_rspi_unprepare_message; + controller->num_chipselect = 4; + controller->transfer_one = rzv2h_rspi_transfer_one; + controller->min_speed_hz = rzv2h_rspi_calc_bitrate(tclk_rate, + RSPI_SPBR_SPR_MAX, + RSPI_SPCMD_BRDV_MAX); + controller->max_speed_hz = rzv2h_rspi_calc_bitrate(tclk_rate, + RSPI_SPBR_SPR_MIN, + RSPI_SPCMD_BRDV_MIN); + + device_set_node(&controller->dev, dev_fwnode(dev)); + + ret = spi_register_controller(controller); + if (ret) { + dev_err(dev, "register controller failed\n"); + goto quit_resets; + } + + return 0; + +quit_resets: + reset_control_bulk_assert(RSPI_RESET_NUM, rspi->resets); + + return ret; +} + +static void rzv2h_rspi_remove(struct platform_device *pdev) +{ + struct rzv2h_rspi_priv *rspi = platform_get_drvdata(pdev); + + spi_unregister_controller(rspi->controller); + + reset_control_bulk_assert(RSPI_RESET_NUM, rspi->resets); +} + +static const struct of_device_id rzv2h_rspi_match[] = { + { .compatible = "renesas,r9a09g057-rspi" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rzv2h_rspi_match); + +static struct platform_driver rzv2h_rspi_drv = { + .probe = rzv2h_rspi_probe, + .remove = rzv2h_rspi_remove, + .driver = { + .name = "rzv2h_rspi", + .of_match_table = rzv2h_rspi_match, + }, +}; +module_platform_driver(rzv2h_rspi_drv); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>"); +MODULE_DESCRIPTION("Renesas RZ/V2H(P) Serial Peripheral Interface Driver"); diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 9c47f5741c5f..b1567243ae19 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -1045,14 +1045,12 @@ static int s3c64xx_spi_setup(struct spi_device *spi) } } - pm_runtime_mark_last_busy(&sdd->pdev->dev); pm_runtime_put_autosuspend(&sdd->pdev->dev); s3c64xx_spi_set_cs(spi, false); return 0; setup_exit: - pm_runtime_mark_last_busy(&sdd->pdev->dev); pm_runtime_put_autosuspend(&sdd->pdev->dev); /* setup() returns with device de-selected */ s3c64xx_spi_set_cs(spi, false); @@ -1384,7 +1382,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n", mem_res, sdd->fifo_depth); - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; diff --git a/drivers/spi/spi-sg2044-nor.c b/drivers/spi/spi-sg2044-nor.c index a59aa3fc55d2..af48b1fcda93 100644 --- a/drivers/spi/spi-sg2044-nor.c +++ b/drivers/spi/spi-sg2044-nor.c @@ -84,12 +84,18 @@ #define SPIFMC_MAX_READ_SIZE 0x10000 +struct sg204x_spifmc_chip_info { + bool has_opt_reg; + u32 rd_fifo_int_trigger_level; +}; + struct sg2044_spifmc { struct spi_controller *ctrl; void __iomem *io_base; struct device *dev; struct mutex lock; struct clk *clk; + const struct sg204x_spifmc_chip_info *chip_info; }; static int sg2044_spifmc_wait_int(struct sg2044_spifmc *spifmc, u8 int_type) @@ -139,7 +145,7 @@ static ssize_t sg2044_spifmc_read_64k(struct sg2044_spifmc *spifmc, reg = sg2044_spifmc_init_reg(spifmc); reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT; - reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE; + reg |= spifmc->chip_info->rd_fifo_int_trigger_level; reg |= SPIFMC_TRAN_CSR_WITH_CMD; reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX; @@ -335,7 +341,8 @@ static ssize_t sg2044_spifmc_trans_reg(struct sg2044_spifmc *spifmc, reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX; reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX; - writel(SPIFMC_OPT_DISABLE_FIFO_FLUSH, spifmc->io_base + SPIFMC_OPT); + if (spifmc->chip_info->has_opt_reg) + writel(SPIFMC_OPT_DISABLE_FIFO_FLUSH, spifmc->io_base + SPIFMC_OPT); } else { /* * If write values to the Status Register, @@ -457,6 +464,11 @@ static int sg2044_spifmc_probe(struct platform_device *pdev) ret = devm_mutex_init(dev, &spifmc->lock); if (ret) return ret; + spifmc->chip_info = device_get_match_data(&pdev->dev); + if (!spifmc->chip_info) { + dev_err(&pdev->dev, "Failed to get specific chip info\n"); + return -EINVAL; + } sg2044_spifmc_init(spifmc); sg2044_spifmc_init_reg(spifmc); @@ -468,8 +480,19 @@ static int sg2044_spifmc_probe(struct platform_device *pdev) return 0; } +static const struct sg204x_spifmc_chip_info sg2044_chip_info = { + .has_opt_reg = true, + .rd_fifo_int_trigger_level = SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE, +}; + +static const struct sg204x_spifmc_chip_info sg2042_chip_info = { + .has_opt_reg = false, + .rd_fifo_int_trigger_level = SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE, +}; + static const struct of_device_id sg2044_spifmc_match[] = { - { .compatible = "sophgo,sg2044-spifmc-nor" }, + { .compatible = "sophgo,sg2044-spifmc-nor", .data = &sg2044_chip_info }, + { .compatible = "sophgo,sg2042-spifmc-nor", .data = &sg2042_chip_info }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sg2044_spifmc_match); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 94a867967e02..b695870fae8c 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -1320,7 +1320,6 @@ static const struct platform_device_id spi_driver_ids[] = { }; MODULE_DEVICE_TABLE(platform, spi_driver_ids); -#ifdef CONFIG_PM_SLEEP static int sh_msiof_spi_suspend(struct device *dev) { struct sh_msiof_spi_priv *p = dev_get_drvdata(dev); @@ -1335,12 +1334,8 @@ static int sh_msiof_spi_resume(struct device *dev) return spi_controller_resume(p->ctlr); } -static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend, - sh_msiof_spi_resume); -#define DEV_PM_OPS (&sh_msiof_spi_pm_ops) -#else -#define DEV_PM_OPS NULL -#endif /* CONFIG_PM_SLEEP */ +static DEFINE_SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend, + sh_msiof_spi_resume); static struct platform_driver sh_msiof_spi_drv = { .probe = sh_msiof_spi_probe, @@ -1348,7 +1343,7 @@ static struct platform_driver sh_msiof_spi_drv = { .id_table = spi_driver_ids, .driver = { .name = "spi_sh_msiof", - .pm = DEV_PM_OPS, + .pm = pm_sleep_ptr(&sh_msiof_spi_pm_ops), .of_match_table = of_match_ptr(sh_msiof_match), }, }; diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c index ae794058b381..ad75f5f0f2bf 100644 --- a/drivers/spi/spi-sprd.c +++ b/drivers/spi/spi-sprd.c @@ -982,7 +982,6 @@ static int sprd_spi_probe(struct platform_device *pdev) if (ret) goto err_rpm_put; - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c index 4cff976ab16f..49ab4c515156 100644 --- a/drivers/spi/spi-st-ssc4.c +++ b/drivers/spi/spi-st-ssc4.c @@ -378,8 +378,7 @@ static void spi_st_remove(struct platform_device *pdev) pinctrl_pm_select_sleep_state(&pdev->dev); } -#ifdef CONFIG_PM -static int spi_st_runtime_suspend(struct device *dev) +static int __maybe_unused spi_st_runtime_suspend(struct device *dev) { struct spi_controller *host = dev_get_drvdata(dev); struct spi_st *spi_st = spi_controller_get_devdata(host); @@ -392,7 +391,7 @@ static int spi_st_runtime_suspend(struct device *dev) return 0; } -static int spi_st_runtime_resume(struct device *dev) +static int __maybe_unused spi_st_runtime_resume(struct device *dev) { struct spi_controller *host = dev_get_drvdata(dev); struct spi_st *spi_st = spi_controller_get_devdata(host); @@ -403,10 +402,8 @@ static int spi_st_runtime_resume(struct device *dev) return ret; } -#endif -#ifdef CONFIG_PM_SLEEP -static int spi_st_suspend(struct device *dev) +static int __maybe_unused spi_st_suspend(struct device *dev) { struct spi_controller *host = dev_get_drvdata(dev); int ret; @@ -418,7 +415,7 @@ static int spi_st_suspend(struct device *dev) return pm_runtime_force_suspend(dev); } -static int spi_st_resume(struct device *dev) +static int __maybe_unused spi_st_resume(struct device *dev) { struct spi_controller *host = dev_get_drvdata(dev); int ret; @@ -429,7 +426,6 @@ static int spi_st_resume(struct device *dev) return pm_runtime_force_resume(dev); } -#endif static const struct dev_pm_ops spi_st_pm = { SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume) @@ -445,7 +441,7 @@ MODULE_DEVICE_TABLE(of, stm_spi_match); static struct platform_driver spi_st_driver = { .driver = { .name = "spi-st", - .pm = &spi_st_pm, + .pm = pm_sleep_ptr(&spi_st_pm), .of_match_table = of_match_ptr(stm_spi_match), }, .probe = spi_st_probe, diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c index 4ab7e86f4bd5..f36fd36da269 100644 --- a/drivers/spi/spi-stm32-ospi.c +++ b/drivers/spi/spi-stm32-ospi.c @@ -547,7 +547,6 @@ static int stm32_ospi_poll_status(struct spi_mem *mem, ret = stm32_ospi_send(mem->spi, op); mutex_unlock(&ospi->lock); - pm_runtime_mark_last_busy(ospi->dev); pm_runtime_put_autosuspend(ospi->dev); return ret; @@ -571,7 +570,6 @@ static int stm32_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) ret = stm32_ospi_send(mem->spi, op); mutex_unlock(&ospi->lock); - pm_runtime_mark_last_busy(ospi->dev); pm_runtime_put_autosuspend(ospi->dev); return ret; @@ -628,7 +626,6 @@ static ssize_t stm32_ospi_dirmap_read(struct spi_mem_dirmap_desc *desc, ret = stm32_ospi_send(desc->mem->spi, &op); mutex_unlock(&ospi->lock); - pm_runtime_mark_last_busy(ospi->dev); pm_runtime_put_autosuspend(ospi->dev); return ret ?: len; @@ -713,7 +710,6 @@ end_of_transfer: msg->status = ret; spi_finalize_current_message(ctrl); - pm_runtime_mark_last_busy(ospi->dev); pm_runtime_put_autosuspend(ospi->dev); return ret; @@ -750,7 +746,6 @@ static int stm32_ospi_setup(struct spi_device *spi) mutex_unlock(&ospi->lock); - pm_runtime_mark_last_busy(ospi->dev); pm_runtime_put_autosuspend(ospi->dev); return 0; @@ -771,9 +766,7 @@ static int stm32_ospi_get_resources(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct stm32_ospi *ospi = platform_get_drvdata(pdev); - struct resource *res; - struct reserved_mem *rmem = NULL; - struct device_node *node; + struct resource *res, _res; int ret; ospi->regs_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); @@ -825,18 +818,14 @@ static int stm32_ospi_get_resources(struct platform_device *pdev) goto err_dma; } - node = of_parse_phandle(dev->of_node, "memory-region", 0); - if (node) - rmem = of_reserved_mem_lookup(node); - of_node_put(node); - - if (rmem) { - ospi->mm_size = rmem->size; - ospi->mm_base = devm_ioremap(dev, rmem->base, rmem->size); - if (!ospi->mm_base) { - dev_err(dev, "unable to map memory region: %pa+%pa\n", - &rmem->base, &rmem->size); - ret = -ENOMEM; + res = &_res; + ret = of_reserved_mem_region_to_resource(dev->of_node, 0, res); + if (!ret) { + ospi->mm_size = resource_size(res); + ospi->mm_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ospi->mm_base)) { + dev_err(dev, "unable to map memory region: %pR\n", res); + ret = PTR_ERR(ospi->mm_base); goto err_dma; } @@ -953,7 +942,6 @@ static int stm32_ospi_probe(struct platform_device *pdev) goto err_pm_resume; } - pm_runtime_mark_last_busy(ospi->dev); pm_runtime_put_autosuspend(ospi->dev); return 0; @@ -1032,7 +1020,6 @@ static int __maybe_unused stm32_ospi_resume(struct device *dev) writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR); writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1); - pm_runtime_mark_last_busy(ospi->dev); pm_runtime_put_autosuspend(ospi->dev); return 0; diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 9691197bbf5a..f2d19f1c5ab1 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -463,7 +463,6 @@ static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op * ret = stm32_qspi_send(mem->spi, op); mutex_unlock(&qspi->lock); - pm_runtime_mark_last_busy(qspi->dev); pm_runtime_put_autosuspend(qspi->dev); return ret; @@ -487,7 +486,6 @@ static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) ret = stm32_qspi_send(mem->spi, op); mutex_unlock(&qspi->lock); - pm_runtime_mark_last_busy(qspi->dev); pm_runtime_put_autosuspend(qspi->dev); return ret; @@ -543,7 +541,6 @@ static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc, ret = stm32_qspi_send(desc->mem->spi, &op); mutex_unlock(&qspi->lock); - pm_runtime_mark_last_busy(qspi->dev); pm_runtime_put_autosuspend(qspi->dev); return ret ?: len; @@ -627,7 +624,6 @@ end_of_transfer: msg->status = ret; spi_finalize_current_message(ctrl); - pm_runtime_mark_last_busy(qspi->dev); pm_runtime_put_autosuspend(qspi->dev); return ret; @@ -684,7 +680,6 @@ static int stm32_qspi_setup(struct spi_device *spi) writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR); mutex_unlock(&qspi->lock); - pm_runtime_mark_last_busy(qspi->dev); pm_runtime_put_autosuspend(qspi->dev); return 0; @@ -858,7 +853,6 @@ static int stm32_qspi_probe(struct platform_device *pdev) if (ret) goto err_pm_runtime_free; - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; @@ -938,7 +932,6 @@ static int __maybe_unused stm32_qspi_resume(struct device *dev) writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR); writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index da3517d7102d..2c804c1aef98 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -9,7 +9,9 @@ #include <linux/debugfs.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/dmaengine.h> +#include <linux/genalloc.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/module.h> @@ -154,6 +156,9 @@ /* STM32H7_SPI_I2SCFGR bit fields */ #define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0) +/* STM32MP25_SPICFG2 bit fields */ +#define STM32MP25_SPI_CFG2_RDIOM BIT(13) + /* STM32MP25 SPI registers bit fields */ #define STM32MP25_SPI_HWCFGR1 0x3F0 @@ -222,6 +227,7 @@ struct stm32_spi_reg { * @rx: SPI RX data register * @tx: SPI TX data register * @fullcfg: SPI full or limited feature set register + * @rdy_en: SPI ready feature register */ struct stm32_spi_regspec { const struct stm32_spi_reg en; @@ -235,6 +241,7 @@ struct stm32_spi_regspec { const struct stm32_spi_reg rx; const struct stm32_spi_reg tx; const struct stm32_spi_reg fullcfg; + const struct stm32_spi_reg rdy_en; }; struct stm32_spi; @@ -276,7 +283,7 @@ struct stm32_spi_cfg { int (*config)(struct stm32_spi *spi); void (*set_bpw)(struct stm32_spi *spi); int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type); - void (*set_data_idleness)(struct stm32_spi *spi, u32 length); + void (*set_data_idleness)(struct stm32_spi *spi, struct spi_transfer *xfer); int (*set_number_of_data)(struct stm32_spi *spi, u32 length); void (*write_tx)(struct stm32_spi *spi); void (*read_rx)(struct stm32_spi *spi); @@ -323,6 +330,11 @@ struct stm32_spi_cfg { * @dma_rx: dma channel for RX transfer * @phys_addr: SPI registers physical base address * @device_mode: the controller is configured as SPI device + * @sram_pool: SRAM pool for DMA transfers + * @sram_rx_buf_size: size of SRAM buffer for RX transfer + * @sram_rx_buf: SRAM buffer for RX transfer + * @sram_dma_rx_buf: SRAM buffer physical address for RX transfer + * @mdma_rx: MDMA channel for RX transfer */ struct stm32_spi { struct device *dev; @@ -357,6 +369,12 @@ struct stm32_spi { dma_addr_t phys_addr; bool device_mode; + + struct gen_pool *sram_pool; + size_t sram_rx_buf_size; + void *sram_rx_buf; + dma_addr_t sram_dma_rx_buf; + struct dma_chan *mdma_rx; }; static const struct stm32_spi_regspec stm32fx_spi_regspec = { @@ -415,6 +433,8 @@ static const struct stm32_spi_regspec stm32mp25_spi_regspec = { .tx = { STM32H7_SPI_TXDR }, .fullcfg = { STM32MP25_SPI_HWCFGR1, STM32MP25_SPI_HWCFGR1_FULLCFG }, + + .rdy_en = { STM32H7_SPI_CFG2, STM32MP25_SPI_CFG2_RDIOM }, }; static inline void stm32_spi_set_bits(struct stm32_spi *spi, @@ -878,8 +898,11 @@ static void stm32h7_spi_disable(struct stm32_spi *spi) if (spi->cur_usedma && spi->dma_tx) dmaengine_terminate_async(spi->dma_tx); - if (spi->cur_usedma && spi->dma_rx) + if (spi->cur_usedma && spi->dma_rx) { dmaengine_terminate_async(spi->dma_rx); + if (spi->mdma_rx) + dmaengine_terminate_async(spi->mdma_rx); + } stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE); @@ -1091,10 +1114,13 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) } if (sr & STM32H7_SPI_SR_EOT) { + dev_dbg(spi->dev, "End of transfer\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) stm32h7_spi_read_rxfifo(spi); if (!spi->cur_usedma || - (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX)) + (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) || + (spi->mdma_rx && (spi->cur_comm == SPI_SIMPLEX_RX || + spi->cur_comm == SPI_FULL_DUPLEX))) end = true; } @@ -1111,6 +1137,11 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) spin_unlock_irqrestore(&spi->lock, flags); if (end) { + if (spi->cur_usedma && spi->mdma_rx) { + dmaengine_pause(spi->dma_rx); + /* Wait for callback */ + return IRQ_HANDLED; + } stm32h7_spi_disable(spi); spi_finalize_current_transfer(ctrl); } @@ -1172,15 +1203,21 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl, else clrb |= spi->cfg->regs->cs_high.mask; - dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n", + if (spi_dev->mode & SPI_READY) + setb |= spi->cfg->regs->rdy_en.mask; + else + clrb |= spi->cfg->regs->rdy_en.mask; + + dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d rdy=%d\n", !!(spi_dev->mode & SPI_CPOL), !!(spi_dev->mode & SPI_CPHA), !!(spi_dev->mode & SPI_LSB_FIRST), - !!(spi_dev->mode & SPI_CS_HIGH)); + !!(spi_dev->mode & SPI_CS_HIGH), + !!(spi_dev->mode & SPI_READY)); spin_lock_irqsave(&spi->lock, flags); - /* CPOL, CPHA and LSB FIRST bits have common register */ + /* CPOL, CPHA, LSB FIRST, CS_HIGH and RDY_EN bits have common register */ if (clrb || setb) writel_relaxed( (readl_relaxed(spi->base + spi->cfg->regs->cpol.reg) & @@ -1410,6 +1447,8 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi) /* Enable the interrupts */ if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE; + if (spi->mdma_rx && (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_FULL_DUPLEX)) + ier |= STM32H7_SPI_IER_EOTIE; stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier); @@ -1420,6 +1459,121 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi) } /** + * stm32_spi_prepare_rx_dma_mdma_chaining - Prepare RX DMA and MDMA chaining + * @spi: pointer to the spi controller data structure + * @xfer: pointer to the spi transfer + * @rx_dma_conf: pointer to the DMA configuration for RX channel + * @rx_dma_desc: pointer to the RX DMA descriptor + * @rx_mdma_desc: pointer to the RX MDMA descriptor + * + * It must return 0 if the chaining is possible or an error code if not. + */ +static int stm32_spi_prepare_rx_dma_mdma_chaining(struct stm32_spi *spi, + struct spi_transfer *xfer, + struct dma_slave_config *rx_dma_conf, + struct dma_async_tx_descriptor **rx_dma_desc, + struct dma_async_tx_descriptor **rx_mdma_desc) +{ + struct dma_async_tx_descriptor *_mdma_desc = *rx_mdma_desc; + struct dma_async_tx_descriptor *_dma_desc = *rx_dma_desc; + struct dma_slave_config rx_mdma_conf = {0}; + u32 sram_period, nents = 0, spi_s_len; + struct sg_table dma_sgt, mdma_sgt; + struct scatterlist *spi_s, *s; + dma_addr_t dma_buf; + int i, ret; + + sram_period = spi->sram_rx_buf_size / 2; + + /* Configure MDMA RX channel */ + rx_mdma_conf.direction = rx_dma_conf->direction; + rx_mdma_conf.src_addr = spi->sram_dma_rx_buf; + rx_mdma_conf.peripheral_config = rx_dma_conf->peripheral_config; + rx_mdma_conf.peripheral_size = rx_dma_conf->peripheral_size; + dmaengine_slave_config(spi->mdma_rx, &rx_mdma_conf); + + /* Count the number of entries needed */ + for_each_sg(xfer->rx_sg.sgl, spi_s, xfer->rx_sg.nents, i) + if (sg_dma_len(spi_s) > sram_period) + nents += DIV_ROUND_UP(sg_dma_len(spi_s), sram_period); + else + nents++; + + /* Prepare DMA slave_sg DBM transfer DEV_TO_MEM (RX>MEM=SRAM) */ + ret = sg_alloc_table(&dma_sgt, nents, GFP_ATOMIC); + if (ret) + return ret; + + spi_s = xfer->rx_sg.sgl; + spi_s_len = sg_dma_len(spi_s); + dma_buf = spi->sram_dma_rx_buf; + for_each_sg(dma_sgt.sgl, s, dma_sgt.nents, i) { + size_t bytes = min_t(size_t, spi_s_len, sram_period); + + sg_dma_len(s) = bytes; + sg_dma_address(s) = dma_buf; + spi_s_len -= bytes; + + if (!spi_s_len && sg_next(spi_s)) { + spi_s = sg_next(spi_s); + spi_s_len = sg_dma_len(spi_s); + dma_buf = spi->sram_dma_rx_buf; + } else { /* DMA configured in DBM: it will swap between the SRAM periods */ + if (i & 1) + dma_buf += sram_period; + else + dma_buf = spi->sram_dma_rx_buf; + } + } + + _dma_desc = dmaengine_prep_slave_sg(spi->dma_rx, dma_sgt.sgl, + dma_sgt.nents, rx_dma_conf->direction, + DMA_PREP_INTERRUPT); + sg_free_table(&dma_sgt); + + if (!_dma_desc) + return -EINVAL; + + /* Prepare MDMA slave_sg transfer MEM_TO_MEM (SRAM>DDR) */ + ret = sg_alloc_table(&mdma_sgt, nents, GFP_ATOMIC); + if (ret) { + _dma_desc = NULL; + return ret; + } + + spi_s = xfer->rx_sg.sgl; + spi_s_len = sg_dma_len(spi_s); + dma_buf = sg_dma_address(spi_s); + for_each_sg(mdma_sgt.sgl, s, mdma_sgt.nents, i) { + size_t bytes = min_t(size_t, spi_s_len, sram_period); + + sg_dma_len(s) = bytes; + sg_dma_address(s) = dma_buf; + spi_s_len -= bytes; + + if (!spi_s_len && sg_next(spi_s)) { + spi_s = sg_next(spi_s); + spi_s_len = sg_dma_len(spi_s); + dma_buf = sg_dma_address(spi_s); + } else { + dma_buf += bytes; + } + } + + _mdma_desc = dmaengine_prep_slave_sg(spi->mdma_rx, mdma_sgt.sgl, + mdma_sgt.nents, rx_mdma_conf.direction, + DMA_PREP_INTERRUPT); + sg_free_table(&mdma_sgt); + + if (!_mdma_desc) { + _dma_desc = NULL; + return -EINVAL; + } + + return 0; +} + +/** * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA * @spi: pointer to the spi controller data structure * @xfer: pointer to the spi_transfer structure @@ -1430,38 +1584,43 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi) static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, struct spi_transfer *xfer) { + struct dma_async_tx_descriptor *rx_mdma_desc = NULL, *rx_dma_desc = NULL; + struct dma_async_tx_descriptor *tx_dma_desc = NULL; struct dma_slave_config tx_dma_conf, rx_dma_conf; - struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc; unsigned long flags; + int ret = 0; spin_lock_irqsave(&spi->lock, flags); - rx_dma_desc = NULL; if (spi->rx_buf && spi->dma_rx) { stm32_spi_dma_config(spi, spi->dma_rx, &rx_dma_conf, DMA_DEV_TO_MEM); - dmaengine_slave_config(spi->dma_rx, &rx_dma_conf); - - /* Enable Rx DMA request */ - stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg, - spi->cfg->regs->dma_rx_en.mask); - - rx_dma_desc = dmaengine_prep_slave_sg( - spi->dma_rx, xfer->rx_sg.sgl, - xfer->rx_sg.nents, - rx_dma_conf.direction, - DMA_PREP_INTERRUPT); + if (spi->mdma_rx) { + rx_dma_conf.peripheral_size = 1; + dmaengine_slave_config(spi->dma_rx, &rx_dma_conf); + + ret = stm32_spi_prepare_rx_dma_mdma_chaining(spi, xfer, &rx_dma_conf, + &rx_dma_desc, &rx_mdma_desc); + if (ret) { /* RX DMA MDMA chaining not possible, fallback to DMA only */ + rx_dma_conf.peripheral_config = 0; + rx_dma_desc = NULL; + } + } + if (!rx_dma_desc) { + dmaengine_slave_config(spi->dma_rx, &rx_dma_conf); + rx_dma_desc = dmaengine_prep_slave_sg(spi->dma_rx, xfer->rx_sg.sgl, + xfer->rx_sg.nents, + rx_dma_conf.direction, + DMA_PREP_INTERRUPT); + } } - tx_dma_desc = NULL; if (spi->tx_buf && spi->dma_tx) { stm32_spi_dma_config(spi, spi->dma_tx, &tx_dma_conf, DMA_MEM_TO_DEV); dmaengine_slave_config(spi->dma_tx, &tx_dma_conf); - - tx_dma_desc = dmaengine_prep_slave_sg( - spi->dma_tx, xfer->tx_sg.sgl, - xfer->tx_sg.nents, - tx_dma_conf.direction, - DMA_PREP_INTERRUPT); + tx_dma_desc = dmaengine_prep_slave_sg(spi->dma_tx, xfer->tx_sg.sgl, + xfer->tx_sg.nents, + tx_dma_conf.direction, + DMA_PREP_INTERRUPT); } if ((spi->tx_buf && spi->dma_tx && !tx_dma_desc) || @@ -1472,9 +1631,25 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, goto dma_desc_error; if (rx_dma_desc) { - rx_dma_desc->callback = spi->cfg->dma_rx_cb; - rx_dma_desc->callback_param = spi; + if (rx_mdma_desc) { + rx_mdma_desc->callback = spi->cfg->dma_rx_cb; + rx_mdma_desc->callback_param = spi; + } else { + rx_dma_desc->callback = spi->cfg->dma_rx_cb; + rx_dma_desc->callback_param = spi; + } + /* Enable Rx DMA request */ + stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg, + spi->cfg->regs->dma_rx_en.mask); + if (rx_mdma_desc) { + if (dma_submit_error(dmaengine_submit(rx_mdma_desc))) { + dev_err(spi->dev, "Rx MDMA submit failed\n"); + goto dma_desc_error; + } + /* Enable Rx MDMA channel */ + dma_async_issue_pending(spi->mdma_rx); + } if (dma_submit_error(dmaengine_submit(rx_dma_desc))) { dev_err(spi->dev, "Rx DMA submit failed\n"); goto dma_desc_error; @@ -1509,6 +1684,8 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, return 1; dma_submit_error: + if (spi->mdma_rx) + dmaengine_terminate_sync(spi->mdma_rx); if (spi->dma_rx) dmaengine_terminate_sync(spi->dma_rx); @@ -1520,6 +1697,9 @@ dma_desc_error: dev_info(spi->dev, "DMA issue: fall back to irq transfer\n"); + if (spi->sram_rx_buf) + memset(spi->sram_rx_buf, 0, spi->sram_rx_buf_size); + spi->cur_usedma = false; return spi->cfg->transfer_one_irq(spi); } @@ -1702,11 +1882,26 @@ static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type) * stm32h7_spi_data_idleness - configure minimum time delay inserted between two * consecutive data frames in host mode * @spi: pointer to the spi controller data structure - * @len: transfer len + * @xfer: pointer to spi transfer */ -static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len) +static void stm32h7_spi_data_idleness(struct stm32_spi *spi, struct spi_transfer *xfer) { u32 cfg2_clrb = 0, cfg2_setb = 0; + u32 len = xfer->len; + u32 spi_delay_ns; + + spi_delay_ns = spi_delay_to_ns(&xfer->word_delay, xfer); + + if (spi->cur_midi != 0) { + dev_warn(spi->dev, "st,spi-midi-ns DT property is deprecated\n"); + if (spi_delay_ns) { + dev_warn(spi->dev, "Overriding st,spi-midi-ns with word_delay_ns %d\n", + spi_delay_ns); + spi->cur_midi = spi_delay_ns; + } + } else { + spi->cur_midi = spi_delay_ns; + } cfg2_clrb |= STM32H7_SPI_CFG2_MIDI; if ((len > 1) && (spi->cur_midi > 0)) { @@ -1768,6 +1963,13 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, spi->cur_bpw = transfer->bits_per_word; spi->cfg->set_bpw(spi); + if (spi_dev->mode & SPI_READY && spi->cur_bpw < 8) { + writel_relaxed(readl_relaxed(spi->base + spi->cfg->regs->rdy_en.reg) & + ~spi->cfg->regs->rdy_en.mask, + spi->base + spi->cfg->regs->rdy_en.reg); + dev_dbg(spi->dev, "RDY logic disabled as bits per word < 8\n"); + } + /* Update spi->cur_speed with real clock speed */ if (STM32_SPI_HOST_MODE(spi)) { mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, @@ -1790,7 +1992,7 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, spi->cur_comm = comm_type; if (STM32_SPI_HOST_MODE(spi) && spi->cfg->set_data_idleness) - spi->cfg->set_data_idleness(spi, transfer->len); + spi->cfg->set_data_idleness(spi, transfer); if (spi->cur_bpw <= 8) nb_words = transfer->len; @@ -1871,6 +2073,9 @@ static int stm32_spi_unprepare_msg(struct spi_controller *ctrl, spi->cfg->disable(spi); + if (spi->sram_rx_buf) + memset(spi->sram_rx_buf, 0, spi->sram_rx_buf_size); + return 0; } @@ -2069,9 +2274,15 @@ static int stm32_spi_probe(struct platform_device *pdev) struct resource *res; struct reset_control *rst; struct device_node *np = pdev->dev.of_node; + const struct stm32_spi_cfg *cfg; bool device_mode; int ret; - const struct stm32_spi_cfg *cfg = of_device_get_match_data(&pdev->dev); + + cfg = of_device_get_match_data(&pdev->dev); + if (!cfg) { + dev_err(&pdev->dev, "Failed to get match data for platform\n"); + return -ENODEV; + } device_mode = of_property_read_bool(np, "spi-slave"); if (!cfg->has_device_mode && device_mode) { @@ -2179,7 +2390,7 @@ static int stm32_spi_probe(struct platform_device *pdev) ctrl->auto_runtime_pm = true; ctrl->bus_num = pdev->id; ctrl->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | - SPI_3WIRE; + SPI_3WIRE | SPI_READY; ctrl->bits_per_word_mask = spi->cfg->get_bpw_mask(spi); ctrl->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min; ctrl->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max; @@ -2219,6 +2430,33 @@ static int stm32_spi_probe(struct platform_device *pdev) if (spi->dma_tx || spi->dma_rx) ctrl->can_dma = stm32_spi_can_dma; + spi->sram_pool = of_gen_pool_get(pdev->dev.of_node, "sram", 0); + if (spi->sram_pool) { + spi->sram_rx_buf_size = gen_pool_size(spi->sram_pool); + dev_info(&pdev->dev, "SRAM pool: %zu KiB for RX DMA/MDMA chaining\n", + spi->sram_rx_buf_size / 1024); + spi->sram_rx_buf = gen_pool_dma_zalloc(spi->sram_pool, spi->sram_rx_buf_size, + &spi->sram_dma_rx_buf); + if (!spi->sram_rx_buf) { + dev_err(&pdev->dev, "failed to allocate SRAM buffer\n"); + } else { + spi->mdma_rx = dma_request_chan(spi->dev, "rxm2m"); + if (IS_ERR(spi->mdma_rx)) { + ret = PTR_ERR(spi->mdma_rx); + spi->mdma_rx = NULL; + if (ret == -EPROBE_DEFER) { + goto err_pool_free; + } else { + gen_pool_free(spi->sram_pool, + (unsigned long)spi->sram_rx_buf, + spi->sram_rx_buf_size); + dev_warn(&pdev->dev, + "failed to request rx mdma channel, DMA only\n"); + } + } + } + } + pm_runtime_set_autosuspend_delay(&pdev->dev, STM32_SPI_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(&pdev->dev); @@ -2233,7 +2471,6 @@ static int stm32_spi_probe(struct platform_device *pdev) goto err_pm_disable; } - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); dev_info(&pdev->dev, "driver initialized (%s mode)\n", @@ -2246,6 +2483,13 @@ err_pm_disable: pm_runtime_put_noidle(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); + + if (spi->mdma_rx) + dma_release_channel(spi->mdma_rx); +err_pool_free: + if (spi->sram_pool) + gen_pool_free(spi->sram_pool, (unsigned long)spi->sram_rx_buf, + spi->sram_rx_buf_size); err_dma_release: if (spi->dma_tx) dma_release_channel(spi->dma_tx); @@ -2276,6 +2520,11 @@ static void stm32_spi_remove(struct platform_device *pdev) dma_release_channel(ctrl->dma_tx); if (ctrl->dma_rx) dma_release_channel(ctrl->dma_rx); + if (spi->mdma_rx) + dma_release_channel(spi->mdma_rx); + if (spi->sram_rx_buf) + gen_pool_free(spi->sram_pool, (unsigned long)spi->sram_rx_buf, + spi->sram_rx_buf_size); clk_disable_unprepare(spi->clk); @@ -2342,7 +2591,6 @@ static int __maybe_unused stm32_spi_resume(struct device *dev) spi->cfg->config(spi); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index a284d2794586..0b7eaccbc797 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -158,7 +158,6 @@ static int ti_qspi_setup(struct spi_device *spi) return ret; } - pm_runtime_mark_last_busy(qspi->dev); ret = pm_runtime_put_autosuspend(qspi->dev); if (ret < 0) { dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n"); @@ -195,7 +194,6 @@ static void ti_qspi_setup_clk(struct ti_qspi *qspi, u32 speed_hz) ctx_reg->clkctrl = clk_ctrl_new; } - pm_runtime_mark_last_busy(qspi->dev); pm_runtime_put_autosuspend(qspi->dev); } diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c index ded709b2b459..d59cc8a18484 100644 --- a/drivers/spi/spi-xilinx.c +++ b/drivers/spi/spi-xilinx.c @@ -89,8 +89,8 @@ struct xilinx_spi { u8 bytes_per_word; int buffer_size; /* buffer size in words */ u32 cs_inactive; /* Level of the CS pins when inactive*/ - unsigned int (*read_fn)(void __iomem *); - void (*write_fn)(u32, void __iomem *); + unsigned int (*read_fn)(void __iomem *addr); + void (*write_fn)(u32 val, void __iomem *addr); }; static void xspi_write32(u32 val, void __iomem *addr) @@ -251,6 +251,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) if (xspi->irq >= 0 && (xspi->force_irq || remaining_words > xspi->buffer_size)) { u32 isr; + use_irq = true; /* Inhibit irq to avoid spurious irqs on tx_empty*/ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 595b6dc10845..502fd5eccc83 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -1330,7 +1330,6 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) goto clk_dis_all; } - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 0ffa3f9f2870..a388f372b27a 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -427,7 +427,7 @@ static int spi_probe(struct device *dev) if (spi->irq < 0) spi->irq = 0; - ret = dev_pm_domain_attach(dev, true); + ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON); if (ret) return ret; @@ -1723,7 +1723,6 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); static void spi_idle_runtime_pm(struct spi_controller *ctlr) { if (ctlr->auto_runtime_pm) { - pm_runtime_mark_last_busy(ctlr->dev.parent); pm_runtime_put_autosuspend(ctlr->dev.parent); } } @@ -3856,7 +3855,6 @@ static int spi_set_cs_timing(struct spi_device *spi) } status = spi->controller->set_cs_timing(spi); - pm_runtime_mark_last_busy(parent); pm_runtime_put_autosuspend(parent); } else { status = spi->controller->set_cs_timing(spi); @@ -3991,7 +3989,6 @@ int spi_setup(struct spi_device *spi) status = 0; spi_set_cs(spi, false, true); - pm_runtime_mark_last_busy(spi->controller->dev.parent); pm_runtime_put_autosuspend(spi->controller->dev.parent); } else { spi_set_cs(spi, false, true); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 6108959c28d9..5300c942a2a4 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -703,6 +703,7 @@ static const struct class spidev_class = { * spidev_dt_ids array below. Both arrays are kept in the same ordering. */ static const struct spi_device_id spidev_spi_ids[] = { + { .name = /* abb */ "spi-sensor" }, { .name = /* cisco */ "spi-petra" }, { .name = /* dh */ "dhcom-board" }, { .name = /* elgin */ "jg10309-01" }, @@ -735,6 +736,7 @@ static int spidev_of_check(struct device *dev) } static const struct of_device_id spidev_dt_ids[] = { + { .compatible = "abb,spi-sensor", .data = &spidev_of_check }, { .compatible = "cisco,spi-petra", .data = &spidev_of_check }, { .compatible = "dh,dhcom-board", .data = &spidev_of_check }, { .compatible = "elgin,jg10309-01", .data = &spidev_of_check }, diff --git a/drivers/staging/gpib/common/gpib_os.c b/drivers/staging/gpib/common/gpib_os.c index a193d64db033..38c6abc2ffd2 100644 --- a/drivers/staging/gpib/common/gpib_os.c +++ b/drivers/staging/gpib/common/gpib_os.c @@ -610,7 +610,7 @@ int ibclose(struct inode *inode, struct file *filep) long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg) { - unsigned int minor = iminor(filep->f_path.dentry->d_inode); + unsigned int minor = iminor(file_inode(filep)); struct gpib_board *board; struct gpib_file_private *file_priv = filep->private_data; long retval = -ENOTTY; diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c index 9bff21068721..c2fbdb534f61 100644 --- a/drivers/thermal/armada_thermal.c +++ b/drivers/thermal/armada_thermal.c @@ -408,7 +408,7 @@ static int armada_get_temp_legacy(struct thermal_zone_device *thermal, return ret; } -static struct thermal_zone_device_ops legacy_ops = { +static const struct thermal_zone_device_ops legacy_ops = { .get_temp = armada_get_temp_legacy, }; diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c index 2077e85ef5ca..a8d4b766ba21 100644 --- a/drivers/thermal/da9062-thermal.c +++ b/drivers/thermal/da9062-thermal.c @@ -137,7 +137,7 @@ static int da9062_thermal_get_temp(struct thermal_zone_device *z, return 0; } -static struct thermal_zone_device_ops da9062_thermal_ops = { +static const struct thermal_zone_device_ops da9062_thermal_ops = { .get_temp = da9062_thermal_get_temp, }; diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c index f9157a47156b..723bc72f0626 100644 --- a/drivers/thermal/dove_thermal.c +++ b/drivers/thermal/dove_thermal.c @@ -106,7 +106,7 @@ static int dove_get_temp(struct thermal_zone_device *thermal, return 0; } -static struct thermal_zone_device_ops ops = { +static const struct thermal_zone_device_ops ops = { .get_temp = dove_get_temp, }; diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index bab52e6b3b15..38c993d1bcb3 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -361,7 +361,7 @@ static bool imx_should_bind(struct thermal_zone_device *tz, return trip->type == THERMAL_TRIP_PASSIVE; } -static struct thermal_zone_device_ops imx_tz_ops = { +static const struct thermal_zone_device_ops imx_tz_ops = { .should_bind = imx_should_bind, .get_temp = imx_get_temp, .change_mode = imx_change_mode, diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 0e07693ecf59..908cc1bf57f1 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -515,7 +515,7 @@ eval_odvp: return result; } -static struct thermal_zone_device_ops int3400_thermal_ops = { +static const struct thermal_zone_device_ops int3400_thermal_ops = { .get_temp = int3400_thermal_get_temp, .change_mode = int3400_thermal_change_mode, }; @@ -690,6 +690,7 @@ static const struct acpi_device_id int3400_thermal_match[] = { {"INTC1068", 0}, {"INTC10A0", 0}, {"INTC10D4", 0}, + {"INTC10FC", 0}, {} }; diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index 5a925a8df7b3..ba63796761eb 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -276,6 +276,7 @@ static const struct acpi_device_id int3403_device_ids[] = { {"INTC1069", 0}, {"INTC10A1", 0}, {"INTC10D5", 0}, + {"INTC10FD", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, int3403_device_ids); diff --git a/drivers/thermal/intel/int340x_thermal/platform_temperature_control.c b/drivers/thermal/intel/int340x_thermal/platform_temperature_control.c index 2d6504514893..0ccc72c93499 100644 --- a/drivers/thermal/intel/int340x_thermal/platform_temperature_control.c +++ b/drivers/thermal/intel/int340x_thermal/platform_temperature_control.c @@ -38,6 +38,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/debugfs.h> #include <linux/pci.h> #include "processor_thermal_device.h" @@ -49,14 +50,16 @@ struct mmio_reg { }; #define MAX_ATTR_GROUP_NAME_LEN 32 -#define PTC_MAX_ATTRS 3 +#define PTC_MAX_ATTRS 4 struct ptc_data { u32 offset; + struct pci_dev *pdev; struct attribute_group ptc_attr_group; struct attribute *ptc_attrs[PTC_MAX_ATTRS]; struct device_attribute temperature_target_attr; struct device_attribute enable_attr; + struct device_attribute thermal_tolerance_attr; char group_name[MAX_ATTR_GROUP_NAME_LEN]; }; @@ -78,6 +81,7 @@ static u32 ptc_offsets[PTC_MAX_INSTANCES] = {0x5B20, 0x5B28, 0x5B30}; static const char * const ptc_strings[] = { "temperature_target", "enable", + "thermal_tolerance", NULL }; @@ -177,6 +181,8 @@ PTC_SHOW(temperature_target); PTC_STORE(temperature_target); PTC_SHOW(enable); PTC_STORE(enable); +PTC_SHOW(thermal_tolerance); +PTC_STORE(thermal_tolerance); #define ptc_init_attribute(_name)\ do {\ @@ -193,9 +199,11 @@ static int ptc_create_groups(struct pci_dev *pdev, int instance, struct ptc_data ptc_init_attribute(temperature_target); ptc_init_attribute(enable); + ptc_init_attribute(thermal_tolerance); data->ptc_attrs[index++] = &data->temperature_target_attr.attr; data->ptc_attrs[index++] = &data->enable_attr.attr; + data->ptc_attrs[index++] = &data->thermal_tolerance_attr.attr; data->ptc_attrs[index] = NULL; snprintf(data->group_name, MAX_ATTR_GROUP_NAME_LEN, @@ -209,6 +217,63 @@ static int ptc_create_groups(struct pci_dev *pdev, int instance, struct ptc_data } static struct ptc_data ptc_instance[PTC_MAX_INSTANCES]; +static struct dentry *ptc_debugfs; + +#define PTC_TEMP_OVERRIDE_ENABLE_INDEX 4 +#define PTC_TEMP_OVERRIDE_INDEX 5 + +static ssize_t ptc_temperature_write(struct file *file, const char __user *data, + size_t count, loff_t *ppos) +{ + struct ptc_data *ptc_instance = file->private_data; + struct pci_dev *pdev = ptc_instance->pdev; + char buf[32]; + ssize_t len; + u32 value; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, data, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtouint(buf, 0, &value)) + return -EINVAL; + + if (ptc_mmio_regs[PTC_TEMP_OVERRIDE_INDEX].units) + value /= ptc_mmio_regs[PTC_TEMP_OVERRIDE_INDEX].units; + + if (value > ptc_mmio_regs[PTC_TEMP_OVERRIDE_INDEX].mask) + return -EINVAL; + + if (!value) { + ptc_mmio_write(pdev, ptc_instance->offset, PTC_TEMP_OVERRIDE_ENABLE_INDEX, 0); + } else { + ptc_mmio_write(pdev, ptc_instance->offset, PTC_TEMP_OVERRIDE_INDEX, value); + ptc_mmio_write(pdev, ptc_instance->offset, PTC_TEMP_OVERRIDE_ENABLE_INDEX, 1); + } + + return count; +} + +static const struct file_operations ptc_fops = { + .open = simple_open, + .write = ptc_temperature_write, + .llseek = generic_file_llseek, +}; + +static void ptc_create_debugfs(void) +{ + ptc_debugfs = debugfs_create_dir("platform_temperature_control", NULL); + + debugfs_create_file("temperature_0", 0200, ptc_debugfs, &ptc_instance[0], &ptc_fops); + debugfs_create_file("temperature_1", 0200, ptc_debugfs, &ptc_instance[1], &ptc_fops); + debugfs_create_file("temperature_2", 0200, ptc_debugfs, &ptc_instance[2], &ptc_fops); +} + +static void ptc_delete_debugfs(void) +{ + debugfs_remove_recursive(ptc_debugfs); +} int proc_thermal_ptc_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv) { @@ -217,8 +282,11 @@ int proc_thermal_ptc_add(struct pci_dev *pdev, struct proc_thermal_device *proc_ for (i = 0; i < PTC_MAX_INSTANCES; i++) { ptc_instance[i].offset = ptc_offsets[i]; + ptc_instance[i].pdev = pdev; ptc_create_groups(pdev, i, &ptc_instance[i]); } + + ptc_create_debugfs(); } return 0; @@ -234,6 +302,8 @@ void proc_thermal_ptc_remove(struct pci_dev *pdev) for (i = 0; i < PTC_MAX_INSTANCES; i++) sysfs_remove_group(&pdev->dev.kobj, &ptc_instance[i].ptc_attr_group); + + ptc_delete_debugfs(); } } EXPORT_SYMBOL_GPL(proc_thermal_ptc_remove); diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h index 9a6ca43b6fa2..49398794124a 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h @@ -31,6 +31,7 @@ #define PCI_DEVICE_ID_INTEL_SKL_THERMAL 0x1903 #define PCI_DEVICE_ID_INTEL_TGL_THERMAL 0x9A03 #define PCI_DEVICE_ID_INTEL_PTL_THERMAL 0xB01D +#define PCI_DEVICE_ID_INTEL_WCL_THERMAL 0xFD1D struct power_config { u32 index; diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c index 00160936070a..d4d7e8e147d2 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c @@ -499,6 +499,10 @@ static const struct pci_device_id proc_thermal_pci_ids[] = { PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MSI_SUPPORT | PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR | PROC_THERMAL_FEATURE_PTC) }, + { PCI_DEVICE_DATA(INTEL, WCL_THERMAL, PROC_THERMAL_FEATURE_MSI_SUPPORT | + PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_DLVR | + PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_HINT | + PROC_THERMAL_FEATURE_POWER_FLOOR | PROC_THERMAL_FEATURE_PTC) }, { }, }; diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c index 3a028b78d9af..1f3d22b659db 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c @@ -442,6 +442,7 @@ int proc_thermal_rfim_add(struct pci_dev *pdev, struct proc_thermal_device *proc switch (pdev->device) { case PCI_DEVICE_ID_INTEL_LNLM_THERMAL: case PCI_DEVICE_ID_INTEL_PTL_THERMAL: + case PCI_DEVICE_ID_INTEL_WCL_THERMAL: dlvr_mmio_regs_table = lnl_dlvr_mmio_regs; dlvr_mapping = lnl_dlvr_mapping; break; diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c index 7c2265231668..4619e090f756 100644 --- a/drivers/thermal/kirkwood_thermal.c +++ b/drivers/thermal/kirkwood_thermal.c @@ -48,7 +48,7 @@ static int kirkwood_get_temp(struct thermal_zone_device *thermal, return 0; } -static struct thermal_zone_device_ops ops = { +static const struct thermal_zone_device_ops ops = { .get_temp = kirkwood_get_temp, }; diff --git a/drivers/thermal/loongson2_thermal.c b/drivers/thermal/loongson2_thermal.c index 2d6b75b0539f..ea4dd2fb1f47 100644 --- a/drivers/thermal/loongson2_thermal.c +++ b/drivers/thermal/loongson2_thermal.c @@ -112,13 +112,19 @@ static int loongson2_thermal_set_trips(struct thermal_zone_device *tz, int low, return loongson2_thermal_set(data, low/MILLI, high/MILLI, true); } -static struct thermal_zone_device_ops loongson2_of_thermal_ops = { +static const struct thermal_zone_device_ops loongson2_2k1000_of_thermal_ops = { .get_temp = loongson2_2k1000_get_temp, .set_trips = loongson2_thermal_set_trips, }; +static const struct thermal_zone_device_ops loongson2_2k2000_of_thermal_ops = { + .get_temp = loongson2_2k2000_get_temp, + .set_trips = loongson2_thermal_set_trips, +}; + static int loongson2_thermal_probe(struct platform_device *pdev) { + const struct thermal_zone_device_ops *thermal_ops; struct device *dev = &pdev->dev; struct loongson2_thermal_data *data; struct thermal_zone_device *tzd; @@ -140,7 +146,9 @@ static int loongson2_thermal_probe(struct platform_device *pdev) if (IS_ERR(data->temp_reg)) return PTR_ERR(data->temp_reg); - loongson2_of_thermal_ops.get_temp = loongson2_2k2000_get_temp; + thermal_ops = &loongson2_2k2000_of_thermal_ops; + } else { + thermal_ops = &loongson2_2k1000_of_thermal_ops; } irq = platform_get_irq(pdev, 0); @@ -152,8 +160,7 @@ static int loongson2_thermal_probe(struct platform_device *pdev) loongson2_thermal_set(data, 0, 0, false); for (i = 0; i <= LOONGSON2_MAX_SENSOR_SEL_NUM; i++) { - tzd = devm_thermal_of_zone_register(dev, i, data, - &loongson2_of_thermal_ops); + tzd = devm_thermal_of_zone_register(dev, i, data, thermal_ops); if (!IS_ERR(tzd)) break; diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c index 985925147ac0..f4d1e66d7db9 100644 --- a/drivers/thermal/mediatek/lvts_thermal.c +++ b/drivers/thermal/mediatek/lvts_thermal.c @@ -125,7 +125,11 @@ struct lvts_ctrl_data { struct lvts_data { const struct lvts_ctrl_data *lvts_ctrl; + const u32 *conn_cmd; + const u32 *init_cmd; int num_lvts_ctrl; + int num_conn_cmd; + int num_init_cmd; int temp_factor; int temp_offset; int gt_calib_bit_offset; @@ -571,7 +575,7 @@ static irqreturn_t lvts_irq_handler(int irq, void *data) return iret; } -static struct thermal_zone_device_ops lvts_ops = { +static const struct thermal_zone_device_ops lvts_ops = { .get_temp = lvts_get_temp, .set_trips = lvts_set_trips, }; @@ -902,7 +906,7 @@ static void lvts_ctrl_monitor_enable(struct device *dev, struct lvts_ctrl *lvts_ * each write in the configuration register must be separated by a * delay of 2 us. */ -static void lvts_write_config(struct lvts_ctrl *lvts_ctrl, u32 *cmds, int nr_cmds) +static void lvts_write_config(struct lvts_ctrl *lvts_ctrl, const u32 *cmds, int nr_cmds) { int i; @@ -985,9 +989,10 @@ static int lvts_ctrl_set_enable(struct lvts_ctrl *lvts_ctrl, int enable) static int lvts_ctrl_connect(struct device *dev, struct lvts_ctrl *lvts_ctrl) { - u32 id, cmds[] = { 0xC103FFFF, 0xC502FF55 }; + const struct lvts_data *lvts_data = lvts_ctrl->lvts_data; + u32 id; - lvts_write_config(lvts_ctrl, cmds, ARRAY_SIZE(cmds)); + lvts_write_config(lvts_ctrl, lvts_data->conn_cmd, lvts_data->num_conn_cmd); /* * LVTS_ID : Get ID and status of the thermal controller @@ -1006,17 +1011,9 @@ static int lvts_ctrl_connect(struct device *dev, struct lvts_ctrl *lvts_ctrl) static int lvts_ctrl_initialize(struct device *dev, struct lvts_ctrl *lvts_ctrl) { - /* - * Write device mask: 0xC1030000 - */ - u32 cmds[] = { - 0xC1030E01, 0xC1030CFC, 0xC1030A8C, 0xC103098D, 0xC10308F1, - 0xC10307A6, 0xC10306B8, 0xC1030500, 0xC1030420, 0xC1030300, - 0xC1030030, 0xC10300F6, 0xC1030050, 0xC1030060, 0xC10300AC, - 0xC10300FC, 0xC103009D, 0xC10300F1, 0xC10300E1 - }; + const struct lvts_data *lvts_data = lvts_ctrl->lvts_data; - lvts_write_config(lvts_ctrl, cmds, ARRAY_SIZE(cmds)); + lvts_write_config(lvts_ctrl, lvts_data->init_cmd, lvts_data->num_init_cmd); return 0; } @@ -1445,6 +1442,25 @@ static int lvts_resume(struct device *dev) return 0; } +static const u32 default_conn_cmds[] = { 0xC103FFFF, 0xC502FF55 }; +static const u32 mt7988_conn_cmds[] = { 0xC103FFFF, 0xC502FC55 }; + +/* + * Write device mask: 0xC1030000 + */ +static const u32 default_init_cmds[] = { + 0xC1030E01, 0xC1030CFC, 0xC1030A8C, 0xC103098D, 0xC10308F1, + 0xC10307A6, 0xC10306B8, 0xC1030500, 0xC1030420, 0xC1030300, + 0xC1030030, 0xC10300F6, 0xC1030050, 0xC1030060, 0xC10300AC, + 0xC10300FC, 0xC103009D, 0xC10300F1, 0xC10300E1 +}; + +static const u32 mt7988_init_cmds[] = { + 0xC1030300, 0xC1030420, 0xC1030500, 0xC10307A6, 0xC1030CFC, + 0xC1030A8C, 0xC103098D, 0xC10308F1, 0xC1030B04, 0xC1030E01, + 0xC10306B8 +}; + /* * The MT8186 calibration data is stored as packed 3-byte little-endian * values using a weird layout that makes sense only when viewed as a 32-bit @@ -1739,7 +1755,11 @@ static const struct lvts_ctrl_data mt8195_lvts_ap_data_ctrl[] = { static const struct lvts_data mt7988_lvts_ap_data = { .lvts_ctrl = mt7988_lvts_ap_data_ctrl, + .conn_cmd = mt7988_conn_cmds, + .init_cmd = mt7988_init_cmds, .num_lvts_ctrl = ARRAY_SIZE(mt7988_lvts_ap_data_ctrl), + .num_conn_cmd = ARRAY_SIZE(mt7988_conn_cmds), + .num_init_cmd = ARRAY_SIZE(mt7988_init_cmds), .temp_factor = LVTS_COEFF_A_MT7988, .temp_offset = LVTS_COEFF_B_MT7988, .gt_calib_bit_offset = 24, @@ -1747,7 +1767,11 @@ static const struct lvts_data mt7988_lvts_ap_data = { static const struct lvts_data mt8186_lvts_data = { .lvts_ctrl = mt8186_lvts_data_ctrl, + .conn_cmd = default_conn_cmds, + .init_cmd = default_init_cmds, .num_lvts_ctrl = ARRAY_SIZE(mt8186_lvts_data_ctrl), + .num_conn_cmd = ARRAY_SIZE(default_conn_cmds), + .num_init_cmd = ARRAY_SIZE(default_init_cmds), .temp_factor = LVTS_COEFF_A_MT7988, .temp_offset = LVTS_COEFF_B_MT7988, .gt_calib_bit_offset = 24, @@ -1756,7 +1780,11 @@ static const struct lvts_data mt8186_lvts_data = { static const struct lvts_data mt8188_lvts_mcu_data = { .lvts_ctrl = mt8188_lvts_mcu_data_ctrl, + .conn_cmd = default_conn_cmds, + .init_cmd = default_init_cmds, .num_lvts_ctrl = ARRAY_SIZE(mt8188_lvts_mcu_data_ctrl), + .num_conn_cmd = ARRAY_SIZE(default_conn_cmds), + .num_init_cmd = ARRAY_SIZE(default_init_cmds), .temp_factor = LVTS_COEFF_A_MT8195, .temp_offset = LVTS_COEFF_B_MT8195, .gt_calib_bit_offset = 20, @@ -1765,7 +1793,11 @@ static const struct lvts_data mt8188_lvts_mcu_data = { static const struct lvts_data mt8188_lvts_ap_data = { .lvts_ctrl = mt8188_lvts_ap_data_ctrl, + .conn_cmd = default_conn_cmds, + .init_cmd = default_init_cmds, .num_lvts_ctrl = ARRAY_SIZE(mt8188_lvts_ap_data_ctrl), + .num_conn_cmd = ARRAY_SIZE(default_conn_cmds), + .num_init_cmd = ARRAY_SIZE(default_init_cmds), .temp_factor = LVTS_COEFF_A_MT8195, .temp_offset = LVTS_COEFF_B_MT8195, .gt_calib_bit_offset = 20, @@ -1774,7 +1806,11 @@ static const struct lvts_data mt8188_lvts_ap_data = { static const struct lvts_data mt8192_lvts_mcu_data = { .lvts_ctrl = mt8192_lvts_mcu_data_ctrl, + .conn_cmd = default_conn_cmds, + .init_cmd = default_init_cmds, .num_lvts_ctrl = ARRAY_SIZE(mt8192_lvts_mcu_data_ctrl), + .num_conn_cmd = ARRAY_SIZE(default_conn_cmds), + .num_init_cmd = ARRAY_SIZE(default_init_cmds), .temp_factor = LVTS_COEFF_A_MT8195, .temp_offset = LVTS_COEFF_B_MT8195, .gt_calib_bit_offset = 24, @@ -1783,7 +1819,11 @@ static const struct lvts_data mt8192_lvts_mcu_data = { static const struct lvts_data mt8192_lvts_ap_data = { .lvts_ctrl = mt8192_lvts_ap_data_ctrl, + .conn_cmd = default_conn_cmds, + .init_cmd = default_init_cmds, .num_lvts_ctrl = ARRAY_SIZE(mt8192_lvts_ap_data_ctrl), + .num_conn_cmd = ARRAY_SIZE(default_conn_cmds), + .num_init_cmd = ARRAY_SIZE(default_init_cmds), .temp_factor = LVTS_COEFF_A_MT8195, .temp_offset = LVTS_COEFF_B_MT8195, .gt_calib_bit_offset = 24, @@ -1792,7 +1832,11 @@ static const struct lvts_data mt8192_lvts_ap_data = { static const struct lvts_data mt8195_lvts_mcu_data = { .lvts_ctrl = mt8195_lvts_mcu_data_ctrl, + .conn_cmd = default_conn_cmds, + .init_cmd = default_init_cmds, .num_lvts_ctrl = ARRAY_SIZE(mt8195_lvts_mcu_data_ctrl), + .num_conn_cmd = ARRAY_SIZE(default_conn_cmds), + .num_init_cmd = ARRAY_SIZE(default_init_cmds), .temp_factor = LVTS_COEFF_A_MT8195, .temp_offset = LVTS_COEFF_B_MT8195, .gt_calib_bit_offset = 24, @@ -1801,7 +1845,11 @@ static const struct lvts_data mt8195_lvts_mcu_data = { static const struct lvts_data mt8195_lvts_ap_data = { .lvts_ctrl = mt8195_lvts_ap_data_ctrl, + .conn_cmd = default_conn_cmds, + .init_cmd = default_init_cmds, .num_lvts_ctrl = ARRAY_SIZE(mt8195_lvts_ap_data_ctrl), + .num_conn_cmd = ARRAY_SIZE(default_conn_cmds), + .num_init_cmd = ARRAY_SIZE(default_init_cmds), .temp_factor = LVTS_COEFF_A_MT8195, .temp_offset = LVTS_COEFF_B_MT8195, .gt_calib_bit_offset = 24, diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c index 991d1573983d..75eaa9a68ab8 100644 --- a/drivers/thermal/qcom/lmh.c +++ b/drivers/thermal/qcom/lmh.c @@ -209,8 +209,7 @@ static int lmh_probe(struct platform_device *pdev) } lmh_data->irq = platform_get_irq(pdev, 0); - lmh_data->domain = irq_domain_create_linear(of_fwnode_handle(np), 1, &lmh_irq_ops, - lmh_data); + lmh_data->domain = irq_domain_create_linear(dev_fwnode(dev), 1, &lmh_irq_ops, lmh_data); if (!lmh_data->domain) { dev_err(dev, "Error adding irq_domain\n"); return -EINVAL; diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index a81e7d6e865f..f39ca0ddd17b 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/err.h> @@ -16,31 +18,51 @@ #include "../thermal_hwmon.h" +#define QPNP_TM_REG_DIG_MINOR 0x00 #define QPNP_TM_REG_DIG_MAJOR 0x01 #define QPNP_TM_REG_TYPE 0x04 #define QPNP_TM_REG_SUBTYPE 0x05 #define QPNP_TM_REG_STATUS 0x08 +#define QPNP_TM_REG_IRQ_STATUS 0x10 #define QPNP_TM_REG_SHUTDOWN_CTRL1 0x40 #define QPNP_TM_REG_ALARM_CTRL 0x46 +/* TEMP_DAC_STGx registers are only present for TEMP_GEN2 v2.0 */ +#define QPNP_TM_REG_TEMP_DAC_STG1 0x47 +#define QPNP_TM_REG_TEMP_DAC_STG2 0x48 +#define QPNP_TM_REG_TEMP_DAC_STG3 0x49 +#define QPNP_TM_REG_LITE_TEMP_CFG1 0x50 +#define QPNP_TM_REG_LITE_TEMP_CFG2 0x51 + #define QPNP_TM_TYPE 0x09 #define QPNP_TM_SUBTYPE_GEN1 0x08 #define QPNP_TM_SUBTYPE_GEN2 0x09 +#define QPNP_TM_SUBTYPE_LITE 0xC0 #define STATUS_GEN1_STAGE_MASK GENMASK(1, 0) #define STATUS_GEN2_STATE_MASK GENMASK(6, 4) -#define STATUS_GEN2_STATE_SHIFT 4 -#define SHUTDOWN_CTRL1_OVERRIDE_S2 BIT(6) +/* IRQ status only needed for TEMP_ALARM_LITE */ +#define IRQ_STATUS_MASK BIT(0) + +#define SHUTDOWN_CTRL1_OVERRIDE_STAGE2 BIT(6) #define SHUTDOWN_CTRL1_THRESHOLD_MASK GENMASK(1, 0) #define SHUTDOWN_CTRL1_RATE_25HZ BIT(3) #define ALARM_CTRL_FORCE_ENABLE BIT(7) +#define LITE_TEMP_CFG_THRESHOLD_MASK GENMASK(3, 2) + #define THRESH_COUNT 4 #define STAGE_COUNT 3 +enum overtemp_stage { + STAGE1 = 0, + STAGE2, + STAGE3, +}; + /* Over-temperature trip point values in mC */ static const long temp_map_gen1[THRESH_COUNT][STAGE_COUNT] = { { 105000, 125000, 145000 }, @@ -63,24 +85,68 @@ static const long temp_map_gen2_v1[THRESH_COUNT][STAGE_COUNT] = { #define TEMP_STAGE_HYSTERESIS 2000 +/* + * For TEMP_GEN2 v2.0, TEMP_DAC_STG1/2/3 registers are used to set the threshold + * for each stage independently. + * TEMP_DAC_STG* = 0 --> 80 C + * Each 8 step increase in TEMP_DAC_STG* value corresponds to 5 C (5000 mC). + */ +#define TEMP_DAC_MIN 80000 +#define TEMP_DAC_SCALE_NUM 8 +#define TEMP_DAC_SCALE_DEN 5000 + +#define TEMP_DAC_TEMP_TO_REG(temp) \ + (((temp) - TEMP_DAC_MIN) * TEMP_DAC_SCALE_NUM / TEMP_DAC_SCALE_DEN) +#define TEMP_DAC_REG_TO_TEMP(reg) \ + (TEMP_DAC_MIN + (reg) * TEMP_DAC_SCALE_DEN / TEMP_DAC_SCALE_NUM) + +static const long temp_dac_max[STAGE_COUNT] = { + 119375, 159375, 159375 +}; + +/* + * TEMP_ALARM_LITE has two stages: warning and shutdown with independently + * configured threshold temperatures. + */ + +static const long temp_lite_warning_map[THRESH_COUNT] = { + 115000, 125000, 135000, 145000 +}; + +static const long temp_lite_shutdown_map[THRESH_COUNT] = { + 135000, 145000, 160000, 175000 +}; + /* Temperature in Milli Celsius reported during stage 0 if no ADC is present */ #define DEFAULT_TEMP 37000 +struct qpnp_tm_chip; + +struct spmi_temp_alarm_data { + const struct thermal_zone_device_ops *ops; + const long (*temp_map)[THRESH_COUNT][STAGE_COUNT]; + int (*sync_thresholds)(struct qpnp_tm_chip *chip); + int (*get_temp_stage)(struct qpnp_tm_chip *chip); + int (*configure_trip_temps)(struct qpnp_tm_chip *chip); +}; + struct qpnp_tm_chip { struct regmap *map; struct device *dev; struct thermal_zone_device *tz_dev; + const struct spmi_temp_alarm_data *data; unsigned int subtype; long temp; - unsigned int thresh; unsigned int stage; unsigned int base; + unsigned int ntrips; /* protects .thresh, .stage and chip registers */ struct mutex lock; bool initialized; + bool require_stage2_shutdown; + long temp_thresh_map[STAGE_COUNT]; struct iio_channel *adc; - const long (*temp_map)[THRESH_COUNT][STAGE_COUNT]; }; /* This array maps from GEN2 alarm state to GEN1 alarm stage */ @@ -114,34 +180,66 @@ static int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 data) */ static long qpnp_tm_decode_temp(struct qpnp_tm_chip *chip, unsigned int stage) { - if (!chip->temp_map || chip->thresh >= THRESH_COUNT || stage == 0 || - stage > STAGE_COUNT) + if (stage == 0 || stage > STAGE_COUNT) return 0; - return (*chip->temp_map)[chip->thresh][stage - 1]; + return chip->temp_thresh_map[stage - 1]; } /** - * qpnp_tm_get_temp_stage() - return over-temperature stage + * qpnp_tm_gen1_get_temp_stage() - return over-temperature stage * @chip: Pointer to the qpnp_tm chip * - * Return: stage (GEN1) or state (GEN2) on success, or errno on failure. + * Return: stage on success, or errno on failure. */ -static int qpnp_tm_get_temp_stage(struct qpnp_tm_chip *chip) +static int qpnp_tm_gen1_get_temp_stage(struct qpnp_tm_chip *chip) { int ret; - u8 reg = 0; + u8 reg; ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, ®); if (ret < 0) return ret; - if (chip->subtype == QPNP_TM_SUBTYPE_GEN1) - ret = reg & STATUS_GEN1_STAGE_MASK; - else - ret = (reg & STATUS_GEN2_STATE_MASK) >> STATUS_GEN2_STATE_SHIFT; + return FIELD_GET(STATUS_GEN1_STAGE_MASK, reg); +} - return ret; +/** + * qpnp_tm_gen2_get_temp_stage() - return over-temperature stage + * @chip: Pointer to the qpnp_tm chip + * + * Return: stage on success, or errno on failure. + */ +static int qpnp_tm_gen2_get_temp_stage(struct qpnp_tm_chip *chip) +{ + int ret; + u8 reg; + + ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, ®); + if (ret < 0) + return ret; + + ret = FIELD_GET(STATUS_GEN2_STATE_MASK, reg); + + return alarm_state_map[ret]; +} + +/** + * qpnp_tm_lite_get_temp_stage() - return over-temperature stage + * @chip: Pointer to the qpnp_tm chip + * + * Return: alarm interrupt state on success, or errno on failure. + */ +static int qpnp_tm_lite_get_temp_stage(struct qpnp_tm_chip *chip) +{ + u8 reg = 0; + int ret; + + ret = qpnp_tm_read(chip, QPNP_TM_REG_IRQ_STATUS, ®); + if (ret < 0) + return ret; + + return FIELD_GET(IRQ_STATUS_MASK, reg); } /* @@ -150,23 +248,16 @@ static int qpnp_tm_get_temp_stage(struct qpnp_tm_chip *chip) */ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip) { - unsigned int stage, stage_new, stage_old; + unsigned int stage_new, stage_old; int ret; WARN_ON(!mutex_is_locked(&chip->lock)); - ret = qpnp_tm_get_temp_stage(chip); + ret = chip->data->get_temp_stage(chip); if (ret < 0) return ret; - stage = ret; - - if (chip->subtype == QPNP_TM_SUBTYPE_GEN1) { - stage_new = stage; - stage_old = chip->stage; - } else { - stage_new = alarm_state_map[stage]; - stage_old = alarm_state_map[chip->stage]; - } + stage_new = ret; + stage_old = chip->stage; if (stage_new > stage_old) { /* increasing stage, use lower bound */ @@ -178,7 +269,7 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip) - TEMP_STAGE_HYSTERESIS; } - chip->stage = stage; + chip->stage = stage_new; return 0; } @@ -218,35 +309,35 @@ static int qpnp_tm_get_temp(struct thermal_zone_device *tz, int *temp) static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip, int temp) { - long stage2_threshold_min = (*chip->temp_map)[THRESH_MIN][1]; - long stage2_threshold_max = (*chip->temp_map)[THRESH_MAX][1]; - bool disable_s2_shutdown = false; - u8 reg; + long stage2_threshold_min = (*chip->data->temp_map)[THRESH_MIN][STAGE2]; + long stage2_threshold_max = (*chip->data->temp_map)[THRESH_MAX][STAGE2]; + bool disable_stage2_shutdown = false; + u8 reg, threshold; WARN_ON(!mutex_is_locked(&chip->lock)); /* - * Default: S2 and S3 shutdown enabled, thresholds at + * Default: Stage 2 and Stage 3 shutdown enabled, thresholds at * lowest threshold set, monitoring at 25Hz */ reg = SHUTDOWN_CTRL1_RATE_25HZ; if (temp == THERMAL_TEMP_INVALID || temp < stage2_threshold_min) { - chip->thresh = THRESH_MIN; + threshold = THRESH_MIN; goto skip; } if (temp <= stage2_threshold_max) { - chip->thresh = THRESH_MAX - + threshold = THRESH_MAX - ((stage2_threshold_max - temp) / TEMP_THRESH_STEP); - disable_s2_shutdown = true; + disable_stage2_shutdown = true; } else { - chip->thresh = THRESH_MAX; + threshold = THRESH_MAX; if (chip->adc) - disable_s2_shutdown = true; + disable_stage2_shutdown = true; else dev_warn(chip->dev, "No ADC is configured and critical temperature %d mC is above the maximum stage 2 threshold of %ld mC! Configuring stage 2 shutdown at %ld mC.\n", @@ -254,9 +345,11 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip, } skip: - reg |= chip->thresh; - if (disable_s2_shutdown) - reg |= SHUTDOWN_CTRL1_OVERRIDE_S2; + memcpy(chip->temp_thresh_map, chip->data->temp_map[threshold], + sizeof(chip->temp_thresh_map)); + reg |= threshold; + if (disable_stage2_shutdown && !chip->require_stage2_shutdown) + reg |= SHUTDOWN_CTRL1_OVERRIDE_STAGE2; return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg); } @@ -282,6 +375,146 @@ static const struct thermal_zone_device_ops qpnp_tm_sensor_ops = { .set_trip_temp = qpnp_tm_set_trip_temp, }; +static int qpnp_tm_gen2_rev2_set_temp_thresh(struct qpnp_tm_chip *chip, unsigned int trip, int temp) +{ + int ret, temp_cfg; + u8 reg; + + WARN_ON(!mutex_is_locked(&chip->lock)); + + if (trip >= STAGE_COUNT) { + dev_err(chip->dev, "invalid TEMP_DAC trip = %d\n", trip); + return -EINVAL; + } else if (temp < TEMP_DAC_MIN || temp > temp_dac_max[trip]) { + dev_err(chip->dev, "invalid TEMP_DAC temp = %d\n", temp); + return -EINVAL; + } + + reg = TEMP_DAC_TEMP_TO_REG(temp); + temp_cfg = TEMP_DAC_REG_TO_TEMP(reg); + + ret = qpnp_tm_write(chip, QPNP_TM_REG_TEMP_DAC_STG1 + trip, reg); + if (ret < 0) { + dev_err(chip->dev, "TEMP_DAC_STG write failed, ret=%d\n", ret); + return ret; + } + + chip->temp_thresh_map[trip] = temp_cfg; + + return 0; +} + +static int qpnp_tm_gen2_rev2_set_trip_temp(struct thermal_zone_device *tz, + const struct thermal_trip *trip, int temp) +{ + unsigned int trip_index = THERMAL_TRIP_PRIV_TO_INT(trip->priv); + struct qpnp_tm_chip *chip = thermal_zone_device_priv(tz); + int ret; + + mutex_lock(&chip->lock); + ret = qpnp_tm_gen2_rev2_set_temp_thresh(chip, trip_index, temp); + mutex_unlock(&chip->lock); + + return ret; +} + +static const struct thermal_zone_device_ops qpnp_tm_gen2_rev2_sensor_ops = { + .get_temp = qpnp_tm_get_temp, + .set_trip_temp = qpnp_tm_gen2_rev2_set_trip_temp, +}; + +static int qpnp_tm_lite_set_temp_thresh(struct qpnp_tm_chip *chip, unsigned int trip, int temp) +{ + int ret, temp_cfg, i; + const long *temp_map; + u8 reg, thresh; + u16 addr; + + WARN_ON(!mutex_is_locked(&chip->lock)); + + if (trip >= STAGE_COUNT) { + dev_err(chip->dev, "invalid TEMP_LITE trip = %d\n", trip); + return -EINVAL; + } + + switch (trip) { + case 0: + temp_map = temp_lite_warning_map; + addr = QPNP_TM_REG_LITE_TEMP_CFG1; + break; + case 1: + /* + * The second trip point is purely in software to facilitate + * a controlled shutdown after the warning threshold is crossed + * but before the automatic hardware shutdown threshold is + * crossed. + */ + return 0; + case 2: + temp_map = temp_lite_shutdown_map; + addr = QPNP_TM_REG_LITE_TEMP_CFG2; + break; + default: + return 0; + } + + if (temp < temp_map[THRESH_MIN] || temp > temp_map[THRESH_MAX]) { + dev_err(chip->dev, "invalid TEMP_LITE temp = %d\n", temp); + return -EINVAL; + } + + thresh = 0; + temp_cfg = temp_map[thresh]; + for (i = THRESH_MAX; i >= THRESH_MIN; i--) { + if (temp >= temp_map[i]) { + thresh = i; + temp_cfg = temp_map[i]; + break; + } + } + + if (temp_cfg == chip->temp_thresh_map[trip]) + return 0; + + ret = qpnp_tm_read(chip, addr, ®); + if (ret < 0) { + dev_err(chip->dev, "LITE_TEMP_CFG read failed, ret=%d\n", ret); + return ret; + } + + reg &= ~LITE_TEMP_CFG_THRESHOLD_MASK; + reg |= FIELD_PREP(LITE_TEMP_CFG_THRESHOLD_MASK, thresh); + + ret = qpnp_tm_write(chip, addr, reg); + if (ret < 0) { + dev_err(chip->dev, "LITE_TEMP_CFG write failed, ret=%d\n", ret); + return ret; + } + + chip->temp_thresh_map[trip] = temp_cfg; + + return 0; +} + +static int qpnp_tm_lite_set_trip_temp(struct thermal_zone_device *tz, + const struct thermal_trip *trip, int temp) +{ + unsigned int trip_index = THERMAL_TRIP_PRIV_TO_INT(trip->priv); + struct qpnp_tm_chip *chip = thermal_zone_device_priv(tz); + int ret; + + mutex_lock(&chip->lock); + ret = qpnp_tm_lite_set_temp_thresh(chip, trip_index, temp); + mutex_unlock(&chip->lock); + + return ret; +} + +static const struct thermal_zone_device_ops qpnp_tm_lite_sensor_ops = { + .get_temp = qpnp_tm_get_temp, + .set_trip_temp = qpnp_tm_lite_set_trip_temp, +}; + static irqreturn_t qpnp_tm_isr(int irq, void *data) { struct qpnp_tm_chip *chip = data; @@ -291,49 +524,227 @@ static irqreturn_t qpnp_tm_isr(int irq, void *data) return IRQ_HANDLED; } -/* - * This function initializes the internal temp value based on only the - * current thermal stage and threshold. Setup threshold control and - * disable shutdown override. - */ -static int qpnp_tm_init(struct qpnp_tm_chip *chip) +/* Read the hardware default stage threshold temperatures */ +static int qpnp_tm_sync_thresholds(struct qpnp_tm_chip *chip) { - unsigned int stage; + u8 reg, threshold; int ret; + + ret = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, ®); + if (ret < 0) + return ret; + + threshold = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK; + memcpy(chip->temp_thresh_map, chip->data->temp_map[threshold], + sizeof(chip->temp_thresh_map)); + + return ret; +} + +static int qpnp_tm_configure_trip_temp(struct qpnp_tm_chip *chip) +{ + int crit_temp, ret; + + ret = thermal_zone_get_crit_temp(chip->tz_dev, &crit_temp); + if (ret) + crit_temp = THERMAL_TEMP_INVALID; + + mutex_lock(&chip->lock); + ret = qpnp_tm_update_critical_trip_temp(chip, crit_temp); + mutex_unlock(&chip->lock); + + return ret; +} + +/* Configure TEMP_DAC registers based on DT thermal_zone trips */ +static int qpnp_tm_gen2_rev2_configure_trip_temps_cb(struct thermal_trip *trip, void *data) +{ + struct qpnp_tm_chip *chip = data; + int ret; + + mutex_lock(&chip->lock); + trip->priv = THERMAL_INT_TO_TRIP_PRIV(chip->ntrips); + ret = qpnp_tm_gen2_rev2_set_temp_thresh(chip, chip->ntrips, trip->temperature); + chip->ntrips++; + mutex_unlock(&chip->lock); + + return ret; +} + +static int qpnp_tm_gen2_rev2_configure_trip_temps(struct qpnp_tm_chip *chip) +{ + int ret, i; + + ret = thermal_zone_for_each_trip(chip->tz_dev, + qpnp_tm_gen2_rev2_configure_trip_temps_cb, chip); + if (ret < 0) + return ret; + + /* Verify that trips are strictly increasing. */ + for (i = 1; i < STAGE_COUNT; i++) { + if (chip->temp_thresh_map[i] <= chip->temp_thresh_map[i - 1]) { + dev_err(chip->dev, "Threshold %d=%ld <= threshold %d=%ld\n", + i, chip->temp_thresh_map[i], i - 1, + chip->temp_thresh_map[i - 1]); + return -EINVAL; + } + } + + return 0; +} + +/* Read the hardware default TEMP_DAC stage threshold temperatures */ +static int qpnp_tm_gen2_rev2_sync_thresholds(struct qpnp_tm_chip *chip) +{ + int ret, i; u8 reg = 0; - int crit_temp; + + for (i = 0; i < STAGE_COUNT; i++) { + ret = qpnp_tm_read(chip, QPNP_TM_REG_TEMP_DAC_STG1 + i, ®); + if (ret < 0) + return ret; + + chip->temp_thresh_map[i] = TEMP_DAC_REG_TO_TEMP(reg); + } + + return 0; +} + +/* Configure TEMP_LITE registers based on DT thermal_zone trips */ +static int qpnp_tm_lite_configure_trip_temps_cb(struct thermal_trip *trip, void *data) +{ + struct qpnp_tm_chip *chip = data; + int ret; mutex_lock(&chip->lock); + trip->priv = THERMAL_INT_TO_TRIP_PRIV(chip->ntrips); + ret = qpnp_tm_lite_set_temp_thresh(chip, chip->ntrips, trip->temperature); + chip->ntrips++; + mutex_unlock(&chip->lock); - ret = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, ®); + return ret; +} + +static int qpnp_tm_lite_configure_trip_temps(struct qpnp_tm_chip *chip) +{ + int ret; + + ret = thermal_zone_for_each_trip(chip->tz_dev, qpnp_tm_lite_configure_trip_temps_cb, chip); + if (ret < 0) + return ret; + + /* Verify that trips are strictly increasing. */ + if (chip->temp_thresh_map[2] <= chip->temp_thresh_map[0]) { + dev_err(chip->dev, "Threshold 2=%ld <= threshold 0=%ld\n", + chip->temp_thresh_map[2], chip->temp_thresh_map[0]); + return -EINVAL; + } + + return 0; +} + +/* Read the hardware default TEMP_LITE stage threshold temperatures */ +static int qpnp_tm_lite_sync_thresholds(struct qpnp_tm_chip *chip) +{ + int ret, thresh; + u8 reg = 0; + + /* + * Store the warning trip temp in temp_thresh_map[0] and the shutdown trip + * temp in temp_thresh_map[2]. The second trip point is purely in software + * to facilitate a controlled shutdown after the warning threshold is + * crossed but before the automatic hardware shutdown threshold is + * crossed. Thus, there is no register to read for the second trip + * point. + */ + ret = qpnp_tm_read(chip, QPNP_TM_REG_LITE_TEMP_CFG1, ®); if (ret < 0) - goto out; + return ret; - chip->thresh = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK; - chip->temp = DEFAULT_TEMP; + thresh = FIELD_GET(LITE_TEMP_CFG_THRESHOLD_MASK, reg); + chip->temp_thresh_map[0] = temp_lite_warning_map[thresh]; - ret = qpnp_tm_get_temp_stage(chip); + ret = qpnp_tm_read(chip, QPNP_TM_REG_LITE_TEMP_CFG2, ®); if (ret < 0) - goto out; - chip->stage = ret; + return ret; - stage = chip->subtype == QPNP_TM_SUBTYPE_GEN1 - ? chip->stage : alarm_state_map[chip->stage]; + thresh = FIELD_GET(LITE_TEMP_CFG_THRESHOLD_MASK, reg); + chip->temp_thresh_map[2] = temp_lite_shutdown_map[thresh]; - if (stage) - chip->temp = qpnp_tm_decode_temp(chip, stage); + return 0; +} - mutex_unlock(&chip->lock); +static const struct spmi_temp_alarm_data spmi_temp_alarm_data = { + .ops = &qpnp_tm_sensor_ops, + .temp_map = &temp_map_gen1, + .sync_thresholds = qpnp_tm_sync_thresholds, + .configure_trip_temps = qpnp_tm_configure_trip_temp, + .get_temp_stage = qpnp_tm_gen1_get_temp_stage, +}; - ret = thermal_zone_get_crit_temp(chip->tz_dev, &crit_temp); - if (ret) - crit_temp = THERMAL_TEMP_INVALID; +static const struct spmi_temp_alarm_data spmi_temp_alarm_gen2_data = { + .ops = &qpnp_tm_sensor_ops, + .temp_map = &temp_map_gen1, + .sync_thresholds = qpnp_tm_sync_thresholds, + .configure_trip_temps = qpnp_tm_configure_trip_temp, + .get_temp_stage = qpnp_tm_gen2_get_temp_stage, +}; - mutex_lock(&chip->lock); +static const struct spmi_temp_alarm_data spmi_temp_alarm_gen2_rev1_data = { + .ops = &qpnp_tm_sensor_ops, + .temp_map = &temp_map_gen2_v1, + .sync_thresholds = qpnp_tm_sync_thresholds, + .configure_trip_temps = qpnp_tm_configure_trip_temp, + .get_temp_stage = qpnp_tm_gen2_get_temp_stage, +}; - ret = qpnp_tm_update_critical_trip_temp(chip, crit_temp); +static const struct spmi_temp_alarm_data spmi_temp_alarm_gen2_rev2_data = { + .ops = &qpnp_tm_gen2_rev2_sensor_ops, + .sync_thresholds = qpnp_tm_gen2_rev2_sync_thresholds, + .configure_trip_temps = qpnp_tm_gen2_rev2_configure_trip_temps, + .get_temp_stage = qpnp_tm_gen2_get_temp_stage, +}; + +static const struct spmi_temp_alarm_data spmi_temp_alarm_lite_data = { + .ops = &qpnp_tm_lite_sensor_ops, + .sync_thresholds = qpnp_tm_lite_sync_thresholds, + .configure_trip_temps = qpnp_tm_lite_configure_trip_temps, + .get_temp_stage = qpnp_tm_lite_get_temp_stage, +}; + +/* + * This function initializes the internal temp value based on only the + * current thermal stage and threshold. + */ +static int qpnp_tm_threshold_init(struct qpnp_tm_chip *chip) +{ + int ret; + + ret = chip->data->sync_thresholds(chip); + if (ret < 0) + return ret; + + ret = chip->data->get_temp_stage(chip); + if (ret < 0) + return ret; + chip->stage = ret; + chip->temp = DEFAULT_TEMP; + + if (chip->stage) + chip->temp = qpnp_tm_decode_temp(chip, chip->stage); + + return ret; +} + +/* This function initializes threshold control and disables shutdown override. */ +static int qpnp_tm_init(struct qpnp_tm_chip *chip) +{ + int ret; + u8 reg; + + ret = chip->data->configure_trip_temps(chip); if (ret < 0) - goto out; + return ret; /* Enable the thermal alarm PMIC module in always-on mode. */ reg = ALARM_CTRL_FORCE_ENABLE; @@ -341,8 +752,6 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip) chip->initialized = true; -out: - mutex_unlock(&chip->lock); return ret; } @@ -350,8 +759,8 @@ static int qpnp_tm_probe(struct platform_device *pdev) { struct qpnp_tm_chip *chip; struct device_node *node; - u8 type, subtype, dig_major; - u32 res; + u8 type, subtype, dig_major, dig_minor; + u32 res, dig_revision; int ret, irq; node = pdev->dev.of_node; @@ -402,18 +811,53 @@ static int qpnp_tm_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, ret, "could not read dig_major\n"); + ret = qpnp_tm_read(chip, QPNP_TM_REG_DIG_MINOR, &dig_minor); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "could not read dig_minor\n"); + if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1 - && subtype != QPNP_TM_SUBTYPE_GEN2)) { + && subtype != QPNP_TM_SUBTYPE_GEN2 + && subtype != QPNP_TM_SUBTYPE_LITE)) { dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n", type, subtype); return -ENODEV; } chip->subtype = subtype; - if (subtype == QPNP_TM_SUBTYPE_GEN2 && dig_major >= 1) - chip->temp_map = &temp_map_gen2_v1; + if (subtype == QPNP_TM_SUBTYPE_GEN1) + chip->data = &spmi_temp_alarm_data; + else if (subtype == QPNP_TM_SUBTYPE_GEN2 && dig_major == 0) + chip->data = &spmi_temp_alarm_gen2_data; + else if (subtype == QPNP_TM_SUBTYPE_GEN2 && dig_major == 1) + chip->data = &spmi_temp_alarm_gen2_rev1_data; + else if (subtype == QPNP_TM_SUBTYPE_GEN2 && dig_major >= 2) + chip->data = &spmi_temp_alarm_gen2_rev2_data; + else if (subtype == QPNP_TM_SUBTYPE_LITE) + chip->data = &spmi_temp_alarm_lite_data; else - chip->temp_map = &temp_map_gen1; + return -ENODEV; + + if (chip->subtype == QPNP_TM_SUBTYPE_GEN2) { + dig_revision = (dig_major << 8) | dig_minor; + /* + * Check if stage 2 automatic partial shutdown must remain + * enabled to avoid potential repeated faults upon reaching + * over-temperature stage 3. + */ + switch (dig_revision) { + case 0x0001: + case 0x0002: + case 0x0100: + case 0x0101: + chip->require_stage2_shutdown = true; + break; + } + } + + ret = qpnp_tm_threshold_init(chip); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "threshold init failed\n"); /* * Register the sensor before initializing the hardware to be able to @@ -421,7 +865,7 @@ static int qpnp_tm_probe(struct platform_device *pdev) * before the hardware initialization is completed. */ chip->tz_dev = devm_thermal_of_zone_register( - &pdev->dev, 0, chip, &qpnp_tm_sensor_ops); + &pdev->dev, 0, chip, chip->data->ops); if (IS_ERR(chip->tz_dev)) return dev_err_probe(&pdev->dev, PTR_ERR(chip->tz_dev), "failed to register sensor\n"); diff --git a/drivers/thermal/renesas/rcar_thermal.c b/drivers/thermal/renesas/rcar_thermal.c index 00a66ee0a5b0..fdd7afdc4ff6 100644 --- a/drivers/thermal/renesas/rcar_thermal.c +++ b/drivers/thermal/renesas/rcar_thermal.c @@ -277,7 +277,7 @@ static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) return rcar_thermal_get_current_temp(priv, temp); } -static struct thermal_zone_device_ops rcar_thermal_zone_ops = { +static const struct thermal_zone_device_ops rcar_thermal_zone_ops = { .get_temp = rcar_thermal_get_temp, }; diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index a8ad85feb68f..3beff9b6fac3 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/nvmem-consumer.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> @@ -69,16 +70,18 @@ struct chip_tsadc_table { * struct rockchip_tsadc_chip - hold the private data of tsadc chip * @chn_offset: the channel offset of the first channel * @chn_num: the channel number of tsadc chip - * @tshut_temp: the hardware-controlled shutdown temperature value + * @trim_slope: used to convert the trim code to a temperature in millicelsius + * @tshut_temp: the hardware-controlled shutdown temperature value, with no trim * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO) * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH) * @initialize: SoC special initialize tsadc controller method * @irq_ack: clear the interrupt * @control: enable/disable method for the tsadc controller - * @get_temp: get the temperature + * @get_temp: get the raw temperature, unadjusted by trim * @set_alarm_temp: set the high temperature interrupt * @set_tshut_temp: set the hardware-controlled shutdown temperature * @set_tshut_mode: set the hardware-controlled shutdown mode + * @get_trim_code: convert a hardware temperature code to one adjusted for by trim * @table: the chip-specific conversion table */ struct rockchip_tsadc_chip { @@ -86,6 +89,9 @@ struct rockchip_tsadc_chip { int chn_offset; int chn_num; + /* Used to convert trim code to trim temp */ + int trim_slope; + /* The hardware-controlled tshut property */ int tshut_temp; enum tshut_mode tshut_mode; @@ -105,6 +111,8 @@ struct rockchip_tsadc_chip { int (*set_tshut_temp)(const struct chip_tsadc_table *table, int chn, void __iomem *reg, int temp); void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); + int (*get_trim_code)(const struct chip_tsadc_table *table, + int code, int trim_base, int trim_base_frac); /* Per-table methods */ struct chip_tsadc_table table; @@ -114,12 +122,16 @@ struct rockchip_tsadc_chip { * struct rockchip_thermal_sensor - hold the information of thermal sensor * @thermal: pointer to the platform/configuration data * @tzd: pointer to a thermal zone + * @of_node: pointer to the device_node representing this sensor, if any * @id: identifier of the thermal sensor + * @trim_temp: per-sensor trim temperature value */ struct rockchip_thermal_sensor { struct rockchip_thermal_data *thermal; struct thermal_zone_device *tzd; + struct device_node *of_node; int id; + int trim_temp; }; /** @@ -132,7 +144,11 @@ struct rockchip_thermal_sensor { * @pclk: the advanced peripherals bus clock * @grf: the general register file will be used to do static set by software * @regs: the base address of tsadc controller - * @tshut_temp: the hardware-controlled shutdown temperature value + * @trim_base: major component of sensor trim value, in Celsius + * @trim_base_frac: minor component of sensor trim value, in Decicelsius + * @trim: fallback thermal trim value for each channel + * @tshut_temp: the hardware-controlled shutdown temperature value, with no trim + * @trim_temp: the fallback trim temperature for the whole sensor * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO) * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH) */ @@ -149,7 +165,12 @@ struct rockchip_thermal_data { struct regmap *grf; void __iomem *regs; + int trim_base; + int trim_base_frac; + int trim; + int tshut_temp; + int trim_temp; enum tshut_mode tshut_mode; enum tshut_polarity tshut_polarity; }; @@ -249,6 +270,9 @@ struct rockchip_thermal_data { #define GRF_CON_TSADC_CH_INV (0x10001 << 1) + +#define RK_MAX_TEMP (180000) + /** * struct tsadc_table - code to temperature conversion table * @code: the value of adc channel @@ -1045,7 +1069,7 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs, writel_relaxed(val, regs + TSADCV2_INT_EN); } -static void rk_tsadcv3_tshut_mode(int chn, void __iomem *regs, +static void rk_tsadcv4_tshut_mode(int chn, void __iomem *regs, enum tshut_mode mode) { u32 val_gpio, val_cru; @@ -1061,6 +1085,15 @@ static void rk_tsadcv3_tshut_mode(int chn, void __iomem *regs, writel_relaxed(val_cru, regs + TSADCV3_HSHUT_CRU_INT_EN); } +static int rk_tsadcv2_get_trim_code(const struct chip_tsadc_table *table, + int code, int trim_base, int trim_base_frac) +{ + int temp = trim_base * 1000 + trim_base_frac * 100; + u32 base_code = rk_tsadcv2_temp_to_code(table, temp); + + return code - base_code; +} + static const struct rockchip_tsadc_chip px30_tsadc_data = { /* cpu, gpu */ .chn_offset = 0, @@ -1284,6 +1317,30 @@ static const struct rockchip_tsadc_chip rk3568_tsadc_data = { }, }; +static const struct rockchip_tsadc_chip rk3576_tsadc_data = { + /* top, big_core, little_core, ddr, npu, gpu */ + .chn_offset = 0, + .chn_num = 6, /* six channels for tsadc */ + .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ + .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ + .tshut_temp = 95000, + .initialize = rk_tsadcv8_initialize, + .irq_ack = rk_tsadcv4_irq_ack, + .control = rk_tsadcv4_control, + .get_temp = rk_tsadcv4_get_temp, + .set_alarm_temp = rk_tsadcv3_alarm_temp, + .set_tshut_temp = rk_tsadcv3_tshut_temp, + .set_tshut_mode = rk_tsadcv4_tshut_mode, + .get_trim_code = rk_tsadcv2_get_trim_code, + .trim_slope = 923, + .table = { + .id = rk3588_code_table, + .length = ARRAY_SIZE(rk3588_code_table), + .data_mask = TSADCV4_DATA_MASK, + .mode = ADC_INCREMENT, + }, +}; + static const struct rockchip_tsadc_chip rk3588_tsadc_data = { /* top, big_core0, big_core1, little_core, center, gpu, npu */ .chn_offset = 0, @@ -1297,7 +1354,7 @@ static const struct rockchip_tsadc_chip rk3588_tsadc_data = { .get_temp = rk_tsadcv4_get_temp, .set_alarm_temp = rk_tsadcv3_alarm_temp, .set_tshut_temp = rk_tsadcv3_tshut_temp, - .set_tshut_mode = rk_tsadcv3_tshut_mode, + .set_tshut_mode = rk_tsadcv4_tshut_mode, .table = { .id = rk3588_code_table, .length = ARRAY_SIZE(rk3588_code_table), @@ -1343,6 +1400,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = { .data = (void *)&rk3568_tsadc_data, }, { + .compatible = "rockchip,rk3576-tsadc", + .data = (void *)&rk3576_tsadc_data, + }, + { .compatible = "rockchip,rk3588-tsadc", .data = (void *)&rk3588_tsadc_data, }, @@ -1387,7 +1448,7 @@ static int rockchip_thermal_set_trips(struct thermal_zone_device *tz, int low, i __func__, sensor->id, low, high); return tsadc->set_alarm_temp(&tsadc->table, - sensor->id, thermal->regs, high); + sensor->id, thermal->regs, high + sensor->trim_temp); } static int rockchip_thermal_get_temp(struct thermal_zone_device *tz, int *out_temp) @@ -1399,6 +1460,8 @@ static int rockchip_thermal_get_temp(struct thermal_zone_device *tz, int *out_te retval = tsadc->get_temp(&tsadc->table, sensor->id, thermal->regs, out_temp); + *out_temp -= sensor->trim_temp; + return retval; } @@ -1407,6 +1470,104 @@ static const struct thermal_zone_device_ops rockchip_of_thermal_ops = { .set_trips = rockchip_thermal_set_trips, }; +/** + * rockchip_get_efuse_value - read an OTP cell from a device node + * @np: pointer to the device node with the nvmem-cells property + * @cell_name: name of cell that should be read + * @value: pointer to where the read value will be placed + * + * Return: Negative errno on failure, during which *value will not be touched, + * or 0 on success. + */ +static int rockchip_get_efuse_value(struct device_node *np, const char *cell_name, + int *value) +{ + struct nvmem_cell *cell; + int ret = 0; + size_t len; + u8 *buf; + int i; + + cell = of_nvmem_cell_get(np, cell_name); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + buf = nvmem_cell_read(cell, &len); + + nvmem_cell_put(cell); + + if (IS_ERR(buf)) + return PTR_ERR(buf); + + if (len > sizeof(*value)) { + ret = -ERANGE; + goto exit; + } + + /* Copy with implicit endian conversion */ + *value = 0; + for (i = 0; i < len; i++) + *value |= (int) buf[i] << (8 * i); + +exit: + kfree(buf); + return ret; +} + +static int rockchip_get_trim_configuration(struct device *dev, struct device_node *np, + struct rockchip_thermal_data *thermal) +{ + const struct rockchip_tsadc_chip *tsadc = thermal->chip; + int trim_base = 0, trim_base_frac = 0, trim = 0; + int trim_code; + int ret; + + thermal->trim_base = 0; + thermal->trim_base_frac = 0; + thermal->trim = 0; + + if (!tsadc->get_trim_code) + return 0; + + ret = rockchip_get_efuse_value(np, "trim_base", &trim_base); + if (ret < 0) { + if (ret == -ENOENT) { + trim_base = 30; + dev_dbg(dev, "trim_base is absent, defaulting to 30\n"); + } else { + dev_err(dev, "failed reading nvmem value of trim_base: %pe\n", + ERR_PTR(ret)); + return ret; + } + } + ret = rockchip_get_efuse_value(np, "trim_base_frac", &trim_base_frac); + if (ret < 0) { + if (ret == -ENOENT) { + dev_dbg(dev, "trim_base_frac is absent, defaulting to 0\n"); + } else { + dev_err(dev, "failed reading nvmem value of trim_base_frac: %pe\n", + ERR_PTR(ret)); + return ret; + } + } + thermal->trim_base = trim_base; + thermal->trim_base_frac = trim_base_frac; + + /* + * If the tsadc node contains the trim property, then it is used in the + * absence of per-channel trim values + */ + if (!rockchip_get_efuse_value(np, "trim", &trim)) + thermal->trim = trim; + if (trim) { + trim_code = tsadc->get_trim_code(&tsadc->table, trim, + trim_base, trim_base_frac); + thermal->trim_temp = thermal->chip->trim_slope * trim_code; + } + + return 0; +} + static int rockchip_configure_from_dt(struct device *dev, struct device_node *np, struct rockchip_thermal_data *thermal) @@ -1467,6 +1628,8 @@ static int rockchip_configure_from_dt(struct device *dev, if (IS_ERR(thermal->grf)) dev_warn(dev, "Missing rockchip,grf property\n"); + rockchip_get_trim_configuration(dev, np, thermal); + return 0; } @@ -1477,23 +1640,50 @@ rockchip_thermal_register_sensor(struct platform_device *pdev, int id) { const struct rockchip_tsadc_chip *tsadc = thermal->chip; + struct device *dev = &pdev->dev; + int trim = thermal->trim; + int trim_code, tshut_temp; + int trim_temp = 0; int error; + if (thermal->trim_temp) + trim_temp = thermal->trim_temp; + + if (tsadc->get_trim_code && sensor->of_node) { + error = rockchip_get_efuse_value(sensor->of_node, "trim", &trim); + if (error < 0 && error != -ENOENT) { + dev_err(dev, "failed reading trim of sensor %d: %pe\n", + id, ERR_PTR(error)); + return error; + } + if (trim) { + trim_code = tsadc->get_trim_code(&tsadc->table, trim, + thermal->trim_base, + thermal->trim_base_frac); + trim_temp = thermal->chip->trim_slope * trim_code; + } + } + + sensor->trim_temp = trim_temp; + + dev_dbg(dev, "trim of sensor %d is %d\n", id, sensor->trim_temp); + + tshut_temp = min(thermal->tshut_temp + sensor->trim_temp, RK_MAX_TEMP); + tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs, - thermal->tshut_temp); + error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs, tshut_temp); if (error) - dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n", - __func__, thermal->tshut_temp, error); + dev_err(dev, "%s: invalid tshut=%d, error=%d\n", + __func__, tshut_temp, error); sensor->thermal = thermal; sensor->id = id; - sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, id, sensor, + sensor->tzd = devm_thermal_of_zone_register(dev, id, sensor, &rockchip_of_thermal_ops); if (IS_ERR(sensor->tzd)) { error = PTR_ERR(sensor->tzd); - dev_err(&pdev->dev, "failed to register sensor %d: %d\n", + dev_err(dev, "failed to register sensor %d: %d\n", id, error); return error; } @@ -1516,9 +1706,11 @@ static int rockchip_thermal_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct rockchip_thermal_data *thermal; + struct device_node *child; int irq; int i; int error; + u32 chn; irq = platform_get_irq(pdev, 0); if (irq < 0) @@ -1569,6 +1761,18 @@ static int rockchip_thermal_probe(struct platform_device *pdev) thermal->chip->initialize(thermal->grf, thermal->regs, thermal->tshut_polarity); + for_each_available_child_of_node(np, child) { + if (!of_property_read_u32(child, "reg", &chn)) { + if (chn < thermal->chip->chn_num) + thermal->sensors[chn].of_node = child; + else + dev_warn(&pdev->dev, + "sensor address (%d) too large, ignoring its trim\n", + chn); + } + + } + for (i = 0; i < thermal->chip->chn_num; i++) { error = rockchip_thermal_register_sensor(pdev, thermal, &thermal->sensors[i], @@ -1638,8 +1842,11 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev) static int __maybe_unused rockchip_thermal_resume(struct device *dev) { struct rockchip_thermal_data *thermal = dev_get_drvdata(dev); - int i; + const struct rockchip_tsadc_chip *tsadc = thermal->chip; + struct rockchip_thermal_sensor *sensor; + int tshut_temp; int error; + int i; error = clk_enable(thermal->clk); if (error) @@ -1653,21 +1860,23 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev) rockchip_thermal_reset_controller(thermal->reset); - thermal->chip->initialize(thermal->grf, thermal->regs, - thermal->tshut_polarity); + tsadc->initialize(thermal->grf, thermal->regs, thermal->tshut_polarity); for (i = 0; i < thermal->chip->chn_num; i++) { - int id = thermal->sensors[i].id; + sensor = &thermal->sensors[i]; + + tshut_temp = min(thermal->tshut_temp + sensor->trim_temp, + RK_MAX_TEMP); - thermal->chip->set_tshut_mode(id, thermal->regs, + tsadc->set_tshut_mode(sensor->id, thermal->regs, thermal->tshut_mode); - error = thermal->chip->set_tshut_temp(&thermal->chip->table, - id, thermal->regs, - thermal->tshut_temp); + error = tsadc->set_tshut_temp(&thermal->chip->table, + sensor->id, thermal->regs, + tshut_temp); if (error) dev_err(dev, "%s: invalid tshut=%d, error=%d\n", - __func__, thermal->tshut_temp, error); + __func__, tshut_temp, error); } thermal->chip->control(thermal->regs, true); diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c index bb96be947521..603dadcd3df5 100644 --- a/drivers/thermal/spear_thermal.c +++ b/drivers/thermal/spear_thermal.c @@ -41,7 +41,7 @@ static inline int thermal_get_temp(struct thermal_zone_device *thermal, return 0; } -static struct thermal_zone_device_ops ops = { +static const struct thermal_zone_device_ops ops = { .get_temp = thermal_get_temp, }; diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c index a14a37d54698..1470ca519def 100644 --- a/drivers/thermal/st/st_thermal.c +++ b/drivers/thermal/st/st_thermal.c @@ -132,7 +132,7 @@ static int st_thermal_get_temp(struct thermal_zone_device *th, int *temperature) return 0; } -static struct thermal_zone_device_ops st_tz_ops = { +static const struct thermal_zone_device_ops st_tz_ops = { .get_temp = st_thermal_get_temp, }; diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c index 926f1052e6de..53a5c649f4b1 100644 --- a/drivers/thermal/tegra/soctherm.c +++ b/drivers/thermal/tegra/soctherm.c @@ -1206,7 +1206,7 @@ static const struct irq_domain_ops soctherm_oc_domain_ops = { /** * soctherm_oc_int_init() - Initial enabling of the over * current interrupts - * @np: The devicetree node for soctherm + * @fwnode: The devicetree node for soctherm * @num_irqs: The number of new interrupt requests * * Sets the over current interrupt request chip data @@ -1215,7 +1215,7 @@ static const struct irq_domain_ops soctherm_oc_domain_ops = { * -ENOMEM (out of memory), or irq_base if the function failed to * allocate the irqs */ -static int soctherm_oc_int_init(struct device_node *np, int num_irqs) +static int soctherm_oc_int_init(struct fwnode_handle *fwnode, int num_irqs) { if (!num_irqs) { pr_info("%s(): OC interrupts are not enabled\n", __func__); @@ -1234,10 +1234,8 @@ static int soctherm_oc_int_init(struct device_node *np, int num_irqs) soc_irq_cdata.irq_chip.irq_set_type = soctherm_oc_irq_set_type; soc_irq_cdata.irq_chip.irq_set_wake = NULL; - soc_irq_cdata.domain = irq_domain_create_linear(of_fwnode_handle(np), num_irqs, - &soctherm_oc_domain_ops, - &soc_irq_cdata); - + soc_irq_cdata.domain = irq_domain_create_linear(fwnode, num_irqs, &soctherm_oc_domain_ops, + &soc_irq_cdata); if (!soc_irq_cdata.domain) { pr_err("%s: Failed to create IRQ domain\n", __func__); return -ENOMEM; @@ -1968,10 +1966,9 @@ static void tegra_soctherm_throttle(struct device *dev) static int soctherm_interrupts_init(struct platform_device *pdev, struct tegra_soctherm *tegra) { - struct device_node *np = pdev->dev.of_node; int ret; - ret = soctherm_oc_int_init(np, TEGRA_SOC_OC_IRQ_MAX); + ret = soctherm_oc_int_init(dev_fwnode(&pdev->dev), TEGRA_SOC_OC_IRQ_MAX); if (ret < 0) { dev_err(&pdev->dev, "soctherm_oc_int_init failed\n"); return ret; diff --git a/drivers/thermal/testing/zone.c b/drivers/thermal/testing/zone.c index 1f4e450100e2..4257d813d572 100644 --- a/drivers/thermal/testing/zone.c +++ b/drivers/thermal/testing/zone.c @@ -381,7 +381,7 @@ static int tt_zone_get_temp(struct thermal_zone_device *tz, int *temp) return 0; } -static struct thermal_zone_device_ops tt_zone_ops = { +static const struct thermal_zone_device_ops tt_zone_ops = { .get_temp = tt_zone_get_temp, }; diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index 24b9055a0b6c..d80612506a33 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -40,10 +40,13 @@ temp_show(struct device *dev, struct device_attribute *attr, char *buf) ret = thermal_zone_get_temp(tz, &temperature); - if (ret) - return ret; + if (!ret) + return sprintf(buf, "%d\n", temperature); - return sprintf(buf, "%d\n", temperature); + if (ret == -EAGAIN) + return -ENODATA; + + return ret; } static ssize_t diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index 0213381fa358..d16c207a1a9b 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -399,7 +399,7 @@ static int serdev_drv_probe(struct device *dev) const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver); int ret; - ret = dev_pm_domain_attach(dev, true); + ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON); if (ret) return ret; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 2dea9e42a0f8..ea5f0af1e8d2 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -2369,8 +2369,7 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) for (; count; --count, ++epfile) { BUG_ON(mutex_is_locked(&epfile->mutex)); if (epfile->dentry) { - d_delete(epfile->dentry); - dput(epfile->dentry); + simple_recursive_removal(epfile->dentry, NULL); epfile->dentry = NULL; } } diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index fcce84a726f2..b51e132b0cd2 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1561,7 +1561,6 @@ static void destroy_ep_files (struct dev_data *dev) spin_lock_irq (&dev->lock); while (!list_empty(&dev->epfiles)) { struct ep_data *ep; - struct inode *parent; struct dentry *dentry; /* break link to FS */ @@ -1571,7 +1570,6 @@ static void destroy_ep_files (struct dev_data *dev) dentry = ep->dentry; ep->dentry = NULL; - parent = d_inode(dentry->d_parent); /* break link to controller */ mutex_lock(&ep->lock); @@ -1586,10 +1584,7 @@ static void destroy_ep_files (struct dev_data *dev) put_ep (ep); /* break link to dcache */ - inode_lock(parent); - d_delete (dentry); - dput (dentry); - inode_unlock(parent); + simple_recursive_removal(dentry, NULL); spin_lock_irq (&dev->lock); } diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c index f5d09a91e554..b97fb7b0cb2c 100644 --- a/drivers/usb/gadget/udc/pxa25x_udc.c +++ b/drivers/usb/gadget/udc/pxa25x_udc.c @@ -2348,15 +2348,14 @@ static int pxa25x_udc_probe(struct platform_device *pdev) dev->transceiver = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2); if (gpio_is_valid(dev->mach->gpio_pullup)) { - retval = devm_gpio_request(&pdev->dev, dev->mach->gpio_pullup, - "pca25x_udc GPIO PULLUP"); + retval = devm_gpio_request_one(&pdev->dev, dev->mach->gpio_pullup, + GPIOF_OUT_INIT_LOW, "pca25x_udc GPIO PULLUP"); if (retval) { dev_dbg(&pdev->dev, "can't get pullup gpio %d, err: %d\n", dev->mach->gpio_pullup, retval); goto err; } - gpio_direction_output(dev->mach->gpio_pullup, 0); } timer_setup(&dev->timer, udc_watchdog, 0); diff --git a/drivers/virt/coco/efi_secret/efi_secret.c b/drivers/virt/coco/efi_secret/efi_secret.c index 1864f9f80617..5946c5abeae8 100644 --- a/drivers/virt/coco/efi_secret/efi_secret.c +++ b/drivers/virt/coco/efi_secret/efi_secret.c @@ -31,8 +31,6 @@ struct efi_secret { struct dentry *secrets_dir; - struct dentry *fs_dir; - struct dentry *fs_files[EFI_SECRET_NUM_FILES]; void __iomem *secret_data; u64 secret_data_len; }; @@ -119,10 +117,8 @@ static void wipe_memory(void *addr, size_t size) static int efi_secret_unlink(struct inode *dir, struct dentry *dentry) { - struct efi_secret *s = efi_secret_get(); struct inode *inode = d_inode(dentry); struct secret_entry *e = (struct secret_entry *)inode->i_private; - int i; if (e) { /* Zero out the secret data */ @@ -132,19 +128,7 @@ static int efi_secret_unlink(struct inode *dir, struct dentry *dentry) inode->i_private = NULL; - for (i = 0; i < EFI_SECRET_NUM_FILES; i++) - if (s->fs_files[i] == dentry) - s->fs_files[i] = NULL; - - /* - * securityfs_remove tries to lock the directory's inode, but we reach - * the unlink callback when it's already locked - */ - inode_unlock(dir); - securityfs_remove(dentry); - inode_lock(dir); - - return 0; + return simple_unlink(inode, dentry); } static const struct inode_operations efi_secret_dir_inode_operations = { @@ -194,15 +178,6 @@ unmap: static void efi_secret_securityfs_teardown(struct platform_device *dev) { struct efi_secret *s = efi_secret_get(); - int i; - - for (i = (EFI_SECRET_NUM_FILES - 1); i >= 0; i--) { - securityfs_remove(s->fs_files[i]); - s->fs_files[i] = NULL; - } - - securityfs_remove(s->fs_dir); - s->fs_dir = NULL; securityfs_remove(s->secrets_dir); s->secrets_dir = NULL; @@ -217,7 +192,7 @@ static int efi_secret_securityfs_setup(struct platform_device *dev) unsigned char *ptr; struct secret_header *h; struct secret_entry *e; - struct dentry *dent; + struct dentry *dent, *dir; char guid_str[EFI_VARIABLE_GUID_LEN + 1]; ptr = (void __force *)s->secret_data; @@ -240,8 +215,6 @@ static int efi_secret_securityfs_setup(struct platform_device *dev) } s->secrets_dir = NULL; - s->fs_dir = NULL; - memset(s->fs_files, 0, sizeof(s->fs_files)); dent = securityfs_create_dir("secrets", NULL); if (IS_ERR(dent)) { @@ -251,14 +224,13 @@ static int efi_secret_securityfs_setup(struct platform_device *dev) } s->secrets_dir = dent; - dent = securityfs_create_dir("coco", s->secrets_dir); - if (IS_ERR(dent)) { + dir = securityfs_create_dir("coco", s->secrets_dir); + if (IS_ERR(dir)) { dev_err(&dev->dev, "Error creating coco securityfs directory entry err=%ld\n", - PTR_ERR(dent)); - return PTR_ERR(dent); + PTR_ERR(dir)); + return PTR_ERR(dir); } - d_inode(dent)->i_op = &efi_secret_dir_inode_operations; - s->fs_dir = dent; + d_inode(dir)->i_op = &efi_secret_dir_inode_operations; bytes_left = h->len - sizeof(*h); ptr += sizeof(*h); @@ -274,15 +246,14 @@ static int efi_secret_securityfs_setup(struct platform_device *dev) if (efi_guidcmp(e->guid, NULL_GUID)) { efi_guid_to_str(&e->guid, guid_str); - dent = securityfs_create_file(guid_str, 0440, s->fs_dir, (void *)e, + dent = securityfs_create_file(guid_str, 0440, dir, (void *)e, &efi_secret_bin_file_fops); if (IS_ERR(dent)) { dev_err(&dev->dev, "Error creating efi_secret securityfs entry\n"); ret = PTR_ERR(dent); goto err_cleanup; } - - s->fs_files[i++] = dent; + i++; } ptr += e->len; bytes_left -= e->len; diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index 1f60c9d5cb18..a7b297dae489 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -329,20 +329,21 @@ create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd) for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) { unsigned int this_vecs = affd->set_size[i]; + unsigned int nr_masks; int j; - struct cpumask *result = group_cpus_evenly(this_vecs); + struct cpumask *result = group_cpus_evenly(this_vecs, &nr_masks); if (!result) { kfree(masks); return NULL; } - for (j = 0; j < this_vecs; j++) + for (j = 0; j < nr_masks; j++) cpumask_copy(&masks[curvec + j], &result[j]); kfree(result); - curvec += this_vecs; - usedvecs += this_vecs; + curvec += nr_masks; + usedvecs += nr_masks; } /* Fill out vectors at the end that don't need affinity */ |