summaryrefslogtreecommitdiff
path: root/drivers/nvmem/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvmem/core.c')
-rw-r--r--drivers/nvmem/core.c268
1 files changed, 131 insertions, 137 deletions
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 980123fb4dde..fd2a9698d1c9 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -47,9 +47,6 @@ struct nvmem_cell {
static DEFINE_MUTEX(nvmem_mutex);
static DEFINE_IDA(nvmem_ida);
-static DEFINE_MUTEX(nvmem_cell_mutex);
-static LIST_HEAD(nvmem_cell_tables);
-
static DEFINE_MUTEX(nvmem_lookup_mutex);
static LIST_HEAD(nvmem_lookup_list);
@@ -179,18 +176,41 @@ static ssize_t type_show(struct device *dev,
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
- return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
+ return sysfs_emit(buf, "%s\n", nvmem_type_str[nvmem->type]);
}
static DEVICE_ATTR_RO(type);
+static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ return sysfs_emit(buf, "%d\n", nvmem->read_only);
+}
+
+static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+ int ret = kstrtobool(buf, &nvmem->read_only);
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(force_ro);
+
static struct attribute *nvmem_attrs[] = {
+ &dev_attr_force_ro.attr,
&dev_attr_type.attr,
NULL,
};
static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev;
@@ -203,19 +223,12 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
dev = kobj_to_dev(kobj);
nvmem = to_nvmem_device(dev);
- /* Stop the user from reading */
- if (pos >= nvmem->size)
- return 0;
-
if (!IS_ALIGNED(pos, nvmem->stride))
return -EINVAL;
if (count < nvmem->word_size)
return -EINVAL;
- if (pos + count > nvmem->size)
- count = nvmem->size - pos;
-
count = round_down(count, nvmem->word_size);
if (!nvmem->reg_read)
@@ -230,7 +243,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
}
static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev;
@@ -243,22 +256,15 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
dev = kobj_to_dev(kobj);
nvmem = to_nvmem_device(dev);
- /* Stop the user from writing */
- if (pos >= nvmem->size)
- return -EFBIG;
-
if (!IS_ALIGNED(pos, nvmem->stride))
return -EINVAL;
if (count < nvmem->word_size)
return -EINVAL;
- if (pos + count > nvmem->size)
- count = nvmem->size - pos;
-
count = round_down(count, nvmem->word_size);
- if (!nvmem->reg_write)
+ if (!nvmem->reg_write || nvmem->read_only)
return -EPERM;
rc = nvmem_reg_write(nvmem, pos, buf, count);
@@ -289,21 +295,49 @@ static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
}
static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *attr, int i)
+ const struct bin_attribute *attr,
+ int i)
{
struct device *dev = kobj_to_dev(kobj);
struct nvmem_device *nvmem = to_nvmem_device(dev);
- attr->size = nvmem->size;
-
return nvmem_bin_attr_get_umode(nvmem);
}
+static size_t nvmem_bin_attr_size(struct kobject *kobj,
+ const struct bin_attribute *attr,
+ int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ return nvmem->size;
+}
+
+static umode_t nvmem_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ /*
+ * If the device has no .reg_write operation, do not allow
+ * configuration as read-write.
+ * If the device is set as read-only by configuration, it
+ * can be forced into read-write mode using the 'force_ro'
+ * attribute.
+ */
+ if (attr == &dev_attr_force_ro.attr && !nvmem->reg_write)
+ return 0; /* Attribute not visible */
+
+ return attr->mode;
+}
+
static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
const char *id, int index);
static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct nvmem_cell_entry *entry;
@@ -337,29 +371,26 @@ destroy_cell:
}
/* default read/write permissions */
-static struct bin_attribute bin_attr_rw_nvmem = {
+static const struct bin_attribute bin_attr_rw_nvmem = {
.attr = {
.name = "nvmem",
.mode = 0644,
},
- .read = bin_attr_nvmem_read,
- .write = bin_attr_nvmem_write,
+ .read_new = bin_attr_nvmem_read,
+ .write_new = bin_attr_nvmem_write,
};
-static struct bin_attribute *nvmem_bin_attributes[] = {
+static const struct bin_attribute *const nvmem_bin_attributes[] = {
&bin_attr_rw_nvmem,
NULL,
};
static const struct attribute_group nvmem_bin_group = {
- .bin_attrs = nvmem_bin_attributes,
+ .bin_attrs_new = nvmem_bin_attributes,
.attrs = nvmem_attrs,
.is_bin_visible = nvmem_bin_attr_is_visible,
-};
-
-/* Cell attributes will be dynamically allocated */
-static struct attribute_group nvmem_cells_group = {
- .name = "cells",
+ .bin_size = nvmem_bin_attr_size,
+ .is_visible = nvmem_attr_is_visible,
};
static const struct attribute_group *nvmem_dev_groups[] = {
@@ -367,17 +398,12 @@ static const struct attribute_group *nvmem_dev_groups[] = {
NULL,
};
-static const struct attribute_group *nvmem_cells_groups[] = {
- &nvmem_cells_group,
- NULL,
-};
-
-static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
+static const struct bin_attribute bin_attr_nvmem_eeprom_compat = {
.attr = {
.name = "eeprom",
},
- .read = bin_attr_nvmem_read,
- .write = bin_attr_nvmem_write,
+ .read_new = bin_attr_nvmem_read,
+ .write_new = bin_attr_nvmem_write,
};
/*
@@ -396,10 +422,9 @@ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
if (!config->base_dev)
return -EINVAL;
- if (config->type == NVMEM_TYPE_FRAM)
- bin_attr_nvmem_eeprom_compat.attr.name = "fram";
-
nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
+ if (config->type == NVMEM_TYPE_FRAM)
+ nvmem->eeprom.attr.name = "fram";
nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
nvmem->eeprom.size = nvmem->size;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -429,23 +454,25 @@ static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
{
- struct bin_attribute **cells_attrs, *attrs;
+ struct attribute_group group = {
+ .name = "cells",
+ };
struct nvmem_cell_entry *entry;
+ const struct bin_attribute **pattrs;
+ struct bin_attribute *attrs;
unsigned int ncells = 0, i = 0;
int ret = 0;
mutex_lock(&nvmem_mutex);
- if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) {
- nvmem_cells_group.bin_attrs = NULL;
+ if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated)
goto unlock_mutex;
- }
/* Allocate an array of attributes with a sentinel */
ncells = list_count_nodes(&nvmem->cells);
- cells_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
- sizeof(struct bin_attribute *), GFP_KERNEL);
- if (!cells_attrs) {
+ pattrs = devm_kcalloc(&nvmem->dev, ncells + 1,
+ sizeof(struct bin_attribute *), GFP_KERNEL);
+ if (!pattrs) {
ret = -ENOMEM;
goto unlock_mutex;
}
@@ -460,24 +487,25 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
list_for_each_entry(entry, &nvmem->cells, node) {
sysfs_bin_attr_init(&attrs[i]);
attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL,
- "%s@%x", entry->name,
- entry->offset);
- attrs[i].attr.mode = 0444;
+ "%s@%x,%x", entry->name,
+ entry->offset,
+ entry->bit_offset);
+ attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem);
attrs[i].size = entry->bytes;
- attrs[i].read = &nvmem_cell_attr_read;
+ attrs[i].read_new = &nvmem_cell_attr_read;
attrs[i].private = entry;
if (!attrs[i].attr.name) {
ret = -ENOMEM;
goto unlock_mutex;
}
- cells_attrs[i] = &attrs[i];
+ pattrs[i] = &attrs[i];
i++;
}
- nvmem_cells_group.bin_attrs = cells_attrs;
+ group.bin_attrs_new = pattrs;
- ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
+ ret = device_add_group(&nvmem->dev, &group);
if (ret)
goto unlock_mutex;
@@ -563,9 +591,11 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
cell->nbits = info->nbits;
cell->np = info->np;
- if (cell->nbits)
+ if (cell->nbits) {
cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
BITS_PER_BYTE);
+ cell->raw_len = ALIGN(cell->bytes, nvmem->word_size);
+ }
if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
dev_err(&nvmem->dev,
@@ -574,6 +604,18 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
return -EINVAL;
}
+ if (!IS_ALIGNED(cell->raw_len, nvmem->word_size)) {
+ dev_err(&nvmem->dev,
+ "cell %s raw len %zd unaligned to nvmem word size %d\n",
+ cell->name ?: "<unknown>", cell->raw_len,
+ nvmem->word_size);
+
+ if (info->raw_len)
+ return -EINVAL;
+
+ cell->raw_len = ALIGN(cell->raw_len, nvmem->word_size);
+ }
+
return 0;
}
@@ -674,41 +716,6 @@ int nvmem_unregister_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
-static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
-{
- const struct nvmem_cell_info *info;
- struct nvmem_cell_table *table;
- struct nvmem_cell_entry *cell;
- int rval = 0, i;
-
- mutex_lock(&nvmem_cell_mutex);
- list_for_each_entry(table, &nvmem_cell_tables, node) {
- if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
- for (i = 0; i < table->ncells; i++) {
- info = &table->cells[i];
-
- cell = kzalloc(sizeof(*cell), GFP_KERNEL);
- if (!cell) {
- rval = -ENOMEM;
- goto out;
- }
-
- rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
- if (rval) {
- kfree(cell);
- goto out;
- }
-
- nvmem_cell_entry_add(cell);
- }
- }
- }
-
-out:
- mutex_unlock(&nvmem_cell_mutex);
- return rval;
-}
-
static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
{
@@ -806,6 +813,13 @@ static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_nod
if (addr && len == (2 * sizeof(u32))) {
info.bit_offset = be32_to_cpup(addr++);
info.nbits = be32_to_cpup(addr);
+ if (info.bit_offset >= BITS_PER_BYTE * info.bytes ||
+ info.nbits < 1 ||
+ info.bit_offset + info.nbits > BITS_PER_BYTE * info.bytes) {
+ dev_err(dev, "nvmem: invalid bits on %pOF\n", child);
+ of_node_put(child);
+ return -EINVAL;
+ }
}
info.np = of_node_get(child);
@@ -988,10 +1002,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
goto err_remove_cells;
}
- rval = nvmem_add_cells_from_table(nvmem);
- if (rval)
- goto err_remove_cells;
-
if (config->add_legacy_fixed_of_cells) {
rval = nvmem_add_cells_from_legacy_of(nvmem);
if (rval)
@@ -1224,7 +1234,7 @@ static void devm_nvmem_device_release(struct device *dev, void *res)
}
/**
- * devm_nvmem_device_put() - put alredy got nvmem device
+ * devm_nvmem_device_put() - put already got nvmem device
*
* @dev: Device that uses the nvmem device.
* @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
@@ -1242,7 +1252,7 @@ void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
/**
- * nvmem_device_put() - put alredy got nvmem device
+ * nvmem_device_put() - put already got nvmem device
*
* @nvmem: pointer to nvmem device that needs to be released.
*/
@@ -1253,13 +1263,13 @@ void nvmem_device_put(struct nvmem_device *nvmem)
EXPORT_SYMBOL_GPL(nvmem_device_put);
/**
- * devm_nvmem_device_get() - Get nvmem cell of device form a given id
+ * devm_nvmem_device_get() - Get nvmem device of device form a given id
*
* @dev: Device that requests the nvmem device.
* @id: name id for the requested nvmem device.
*
- * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
- * on success. The nvmem_cell will be freed by the automatically once the
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success. The nvmem_device will be freed by the automatically once the
* device is freed.
*/
struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
@@ -1594,21 +1604,29 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put);
static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
{
u8 *p, *b;
- int i, extra, bit_offset = cell->bit_offset;
+ int i, extra, bytes_offset;
+ int bit_offset = cell->bit_offset;
p = b = buf;
- if (bit_offset) {
+
+ bytes_offset = bit_offset / BITS_PER_BYTE;
+ b += bytes_offset;
+ bit_offset %= BITS_PER_BYTE;
+
+ if (bit_offset % BITS_PER_BYTE) {
/* First shift */
- *b++ >>= bit_offset;
+ *p = *b++ >> bit_offset;
/* setup rest of the bytes if any */
for (i = 1; i < cell->bytes; i++) {
/* Get bits from next byte and shift them towards msb */
- *p |= *b << (BITS_PER_BYTE - bit_offset);
+ *p++ |= *b << (BITS_PER_BYTE - bit_offset);
- p = b;
- *b++ >>= bit_offset;
+ *p = *b++ >> bit_offset;
}
+ } else if (p != b) {
+ memmove(p, b, cell->bytes - bytes_offset);
+ p += cell->bytes - 1;
} else {
/* point to the msb */
p += cell->bytes - 1;
@@ -1757,6 +1775,8 @@ static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, si
return -EINVAL;
if (cell->bit_offset || cell->nbits) {
+ if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes)
+ return -EINVAL;
buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
if (IS_ERR(buf))
return PTR_ERR(buf);
@@ -2090,32 +2110,6 @@ int nvmem_device_write(struct nvmem_device *nvmem,
EXPORT_SYMBOL_GPL(nvmem_device_write);
/**
- * nvmem_add_cell_table() - register a table of cell info entries
- *
- * @table: table of cell info entries
- */
-void nvmem_add_cell_table(struct nvmem_cell_table *table)
-{
- mutex_lock(&nvmem_cell_mutex);
- list_add_tail(&table->node, &nvmem_cell_tables);
- mutex_unlock(&nvmem_cell_mutex);
-}
-EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
-
-/**
- * nvmem_del_cell_table() - remove a previously registered cell info table
- *
- * @table: table of cell info entries
- */
-void nvmem_del_cell_table(struct nvmem_cell_table *table)
-{
- mutex_lock(&nvmem_cell_mutex);
- list_del(&table->node);
- mutex_unlock(&nvmem_cell_mutex);
-}
-EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
-
-/**
* nvmem_add_cell_lookups() - register a list of cell lookup entries
*
* @entries: array of cell lookup entries