summaryrefslogtreecommitdiff
path: root/drivers/iio/industrialio-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iio/industrialio-core.c')
-rw-r--r--drivers/iio/industrialio-core.c179
1 files changed, 123 insertions, 56 deletions
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 4302093b92c7..5ffda104d4b2 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -11,6 +11,7 @@
#include <linux/anon_inodes.h>
#include <linux/cdev.h>
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -25,6 +26,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
+#include <linux/wordpart.h>
#include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>
@@ -94,6 +96,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_DELTA_VELOCITY] = "deltavelocity",
[IIO_COLORTEMP] = "colortemp",
[IIO_CHROMATICITY] = "chromaticity",
+ [IIO_ATTENTION] = "attention",
};
static const char * const iio_modifier_names[] = {
@@ -408,11 +411,15 @@ static ssize_t iio_debugfs_write_reg(struct file *file,
char buf[80];
int ret;
- count = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, userbuf, count))
- return -EFAULT;
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf,
+ count);
+ if (ret < 0)
+ return ret;
- buf[count] = 0;
+ buf[ret] = '\0';
ret = sscanf(buf, "%i %i", &reg, &val);
@@ -666,7 +673,6 @@ static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type,
vals[1]);
case IIO_VAL_FRACTIONAL:
tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
- tmp1 = vals[1];
tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1);
if ((tmp2 < 0) && (tmp0 == 0))
return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
@@ -726,22 +732,27 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
}
EXPORT_SYMBOL_GPL(iio_format_value);
-static ssize_t iio_read_channel_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ssize_t do_iio_read_channel_label(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *c,
+ char *buf)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
if (indio_dev->info->read_label)
- return indio_dev->info->read_label(indio_dev, this_attr->c, buf);
+ return indio_dev->info->read_label(indio_dev, c, buf);
- if (this_attr->c->extend_name)
- return sysfs_emit(buf, "%s\n", this_attr->c->extend_name);
+ if (c->extend_name)
+ return sysfs_emit(buf, "%s\n", c->extend_name);
return -EINVAL;
}
+static ssize_t iio_read_channel_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return do_iio_read_channel_label(dev_to_iio_dev(dev),
+ to_iio_dev_attr(attr)->c, buf);
+}
+
static ssize_t iio_read_channel_info(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -757,9 +768,11 @@ static ssize_t iio_read_channel_info(struct device *dev,
INDIO_MAX_RAW_ELEMENTS,
vals, &val_len,
this_attr->address);
- else
+ else if (indio_dev->info->read_raw)
ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
&vals[0], &vals[1], this_attr->address);
+ else
+ return -EINVAL;
if (ret < 0)
return ret;
@@ -841,6 +854,9 @@ static ssize_t iio_read_channel_info_avail(struct device *dev,
int length;
int type;
+ if (!indio_dev->info->read_avail)
+ return -EINVAL;
+
ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
&vals, &type, &length,
this_attr->address);
@@ -954,8 +970,10 @@ static ssize_t iio_write_channel_info(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret, fract_mult = 100000;
int integer, fract = 0;
+ long long integer64;
bool is_char = false;
bool scale_db = false;
+ bool is_64bit = false;
/* Assumes decimal - precision based on number of digits */
if (!indio_dev->info->write_raw)
@@ -979,6 +997,9 @@ static ssize_t iio_write_channel_info(struct device *dev,
case IIO_VAL_CHAR:
is_char = true;
break;
+ case IIO_VAL_INT_64:
+ is_64bit = true;
+ break;
default:
return -EINVAL;
}
@@ -989,6 +1010,13 @@ static ssize_t iio_write_channel_info(struct device *dev,
if (sscanf(buf, "%c", &ch) != 1)
return -EINVAL;
integer = ch;
+ } else if (is_64bit) {
+ ret = kstrtoll(buf, 0, &integer64);
+ if (ret)
+ return ret;
+
+ fract = upper_32_bits(integer64);
+ integer = lower_32_bits(integer64);
} else {
ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract,
scale_db);
@@ -1643,19 +1671,20 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
struct iio_dev *indio_dev;
size_t alloc_size;
- alloc_size = sizeof(struct iio_dev_opaque);
- if (sizeof_priv) {
- alloc_size = ALIGN(alloc_size, IIO_DMA_MINALIGN);
- alloc_size += sizeof_priv;
- }
+ if (sizeof_priv)
+ alloc_size = ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN) + sizeof_priv;
+ else
+ alloc_size = sizeof(*iio_dev_opaque);
iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL);
if (!iio_dev_opaque)
return NULL;
indio_dev = &iio_dev_opaque->indio_dev;
- indio_dev->priv = (char *)iio_dev_opaque +
- ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN);
+
+ if (sizeof_priv)
+ ACCESS_PRIVATE(indio_dev, priv) = (char *)iio_dev_opaque +
+ ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN);
indio_dev->dev.parent = parent;
indio_dev->dev.type = &iio_device_type;
@@ -1809,31 +1838,24 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct iio_dev *indio_dev = ib->indio_dev;
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_ioctl_handler *h;
- int ret = -ENODEV;
-
- mutex_lock(&iio_dev_opaque->info_exist_lock);
+ int ret;
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
/*
* The NULL check here is required to prevent crashing when a device
* is being removed while userspace would still have open file handles
* to try to access this device.
*/
if (!indio_dev->info)
- goto out_unlock;
+ return -ENODEV;
list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
ret = h->ioctl(indio_dev, filp, cmd, arg);
if (ret != IIO_IOCTL_UNHANDLED)
- break;
+ return ret;
}
- if (ret == IIO_IOCTL_UNHANDLED)
- ret = -ENODEV;
-
-out_unlock:
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
- return ret;
+ return -ENODEV;
}
static const struct file_operations iio_buffer_fileops = {
@@ -1907,7 +1929,7 @@ static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev)
int i;
av_masks = indio_dev->available_scan_masks;
- masklength = indio_dev->masklength;
+ masklength = iio_get_masklength(indio_dev);
longs_per_mask = BITS_TO_LONGS(masklength);
/*
@@ -1960,6 +1982,49 @@ static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev)
}
}
+/**
+ * iio_active_scan_mask_index - Get index of the active scan mask inside the
+ * available scan masks array
+ * @indio_dev: the IIO device containing the active and available scan masks
+ *
+ * Returns: the index or -EINVAL if active_scan_mask is not set
+ */
+int iio_active_scan_mask_index(struct iio_dev *indio_dev)
+
+{
+ const unsigned long *av_masks;
+ unsigned int masklength = iio_get_masklength(indio_dev);
+ int i = 0;
+
+ if (!indio_dev->active_scan_mask)
+ return -EINVAL;
+
+ /*
+ * As in iio_scan_mask_match and iio_sanity_check_avail_scan_masks,
+ * the condition here do not handle multi-long masks correctly.
+ * It only checks the first long to be zero, and will use such mask
+ * as a terminator even if there was bits set after the first long.
+ *
+ * This should be fine since the available_scan_mask has already been
+ * sanity tested using iio_sanity_check_avail_scan_masks.
+ *
+ * See iio_scan_mask_match and iio_sanity_check_avail_scan_masks for
+ * more details
+ */
+ av_masks = indio_dev->available_scan_masks;
+ while (*av_masks) {
+ if (indio_dev->active_scan_mask == av_masks)
+ return i;
+ av_masks += BITS_TO_LONGS(masklength);
+ i++;
+ }
+
+ dev_warn(indio_dev->dev.parent,
+ "active scan mask is not part of the available scan masks\n");
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(iio_active_scan_mask_index);
+
int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
@@ -2061,18 +2126,16 @@ void iio_device_unregister(struct iio_dev *indio_dev)
cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev);
- mutex_lock(&iio_dev_opaque->info_exist_lock);
-
- iio_device_unregister_debugfs(indio_dev);
+ scoped_guard(mutex, &iio_dev_opaque->info_exist_lock) {
+ iio_device_unregister_debugfs(indio_dev);
- iio_disable_all_buffers(indio_dev);
+ iio_disable_all_buffers(indio_dev);
- indio_dev->info = NULL;
+ indio_dev->info = NULL;
- iio_device_wakeup_eventset(indio_dev);
- iio_buffer_wakeup_poll(indio_dev);
-
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
+ iio_device_wakeup_eventset(indio_dev);
+ iio_buffer_wakeup_poll(indio_dev);
+ }
iio_buffers_free_sysfs_and_mask(indio_dev);
}
@@ -2097,17 +2160,19 @@ int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
EXPORT_SYMBOL_GPL(__devm_iio_device_register);
/**
- * iio_device_claim_direct_mode - Keep device in direct mode
+ * __iio_device_claim_direct - Keep device in direct mode
* @indio_dev: the iio_dev associated with the device
*
* If the device is in direct mode it is guaranteed to stay
- * that way until iio_device_release_direct_mode() is called.
+ * that way until __iio_device_release_direct() is called.
*
- * Use with iio_device_release_direct_mode()
+ * Use with __iio_device_release_direct().
*
- * Returns: 0 on success, -EBUSY on failure.
+ * Drivers should only call iio_device_claim_direct().
+ *
+ * Returns: true on success, false on failure.
*/
-int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
+bool __iio_device_claim_direct(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
@@ -2115,26 +2180,28 @@ int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
if (iio_buffer_enabled(indio_dev)) {
mutex_unlock(&iio_dev_opaque->mlock);
- return -EBUSY;
+ return false;
}
- return 0;
+ return true;
}
-EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode);
+EXPORT_SYMBOL_GPL(__iio_device_claim_direct);
/**
- * iio_device_release_direct_mode - releases claim on direct mode
+ * __iio_device_release_direct - releases claim on direct mode
* @indio_dev: the iio_dev associated with the device
*
* Release the claim. Device is no longer guaranteed to stay
* in direct mode.
*
- * Use with iio_device_claim_direct_mode()
+ * Drivers should only call iio_device_release_direct().
+ *
+ * Use with __iio_device_claim_direct()
*/
-void iio_device_release_direct_mode(struct iio_dev *indio_dev)
+void __iio_device_release_direct(struct iio_dev *indio_dev)
{
mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock);
}
-EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
+EXPORT_SYMBOL_GPL(__iio_device_release_direct);
/**
* iio_device_claim_buffer_mode - Keep device in buffer mode