summaryrefslogtreecommitdiff
path: root/net/rfkill/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/rfkill/core.c')
-rw-r--r--net/rfkill/core.c114
1 files changed, 74 insertions, 40 deletions
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 5b1927d66f0d..7d3e82e4c2fc 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -48,6 +48,7 @@ struct rfkill {
bool persistent;
bool polling_paused;
bool suspended;
+ bool need_sync;
const struct rfkill_ops *ops;
void *data;
@@ -78,6 +79,7 @@ struct rfkill_data {
struct mutex mtx;
wait_queue_head_t read_wait;
bool input_handler;
+ u8 max_size;
};
@@ -367,6 +369,17 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
rfkill_event(rfkill);
}
+static void rfkill_sync(struct rfkill *rfkill)
+{
+ lockdep_assert_held(&rfkill_global_mutex);
+
+ if (!rfkill->need_sync)
+ return;
+
+ rfkill_set_block(rfkill, rfkill_global_states[rfkill->type].cur);
+ rfkill->need_sync = false;
+}
+
static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
{
int i;
@@ -526,18 +539,14 @@ bool rfkill_get_global_sw_state(const enum rfkill_type type)
#endif
bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
- bool blocked, unsigned long reason)
+ bool blocked,
+ enum rfkill_hard_block_reasons reason)
{
unsigned long flags;
bool ret, prev;
BUG_ON(!rfkill);
- if (WARN(reason &
- ~(RFKILL_HARD_BLOCK_SIGNAL | RFKILL_HARD_BLOCK_NOT_OWNER),
- "hw_state reason not supported: 0x%lx", reason))
- return blocked;
-
spin_lock_irqsave(&rfkill->lock, flags);
prev = !!(rfkill->hard_block_reasons & reason);
if (blocked) {
@@ -684,7 +693,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%s\n", rfkill->name);
+ return sysfs_emit(buf, "%s\n", rfkill->name);
}
static DEVICE_ATTR_RO(name);
@@ -693,7 +702,7 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%s\n", rfkill_types[rfkill->type]);
+ return sysfs_emit(buf, "%s\n", rfkill_types[rfkill->type]);
}
static DEVICE_ATTR_RO(type);
@@ -702,7 +711,7 @@ static ssize_t index_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%d\n", rfkill->idx);
+ return sysfs_emit(buf, "%d\n", rfkill->idx);
}
static DEVICE_ATTR_RO(index);
@@ -711,7 +720,7 @@ static ssize_t persistent_show(struct device *dev,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%d\n", rfkill->persistent);
+ return sysfs_emit(buf, "%d\n", rfkill->persistent);
}
static DEVICE_ATTR_RO(persistent);
@@ -720,7 +729,7 @@ static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
+ return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0);
}
static DEVICE_ATTR_RO(hard);
@@ -729,7 +738,11 @@ static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_sync(rfkill);
+ mutex_unlock(&rfkill_global_mutex);
+
+ return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0);
}
static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
@@ -750,6 +763,7 @@ static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&rfkill_global_mutex);
+ rfkill_sync(rfkill);
rfkill_set_block(rfkill, state);
mutex_unlock(&rfkill_global_mutex);
@@ -763,7 +777,7 @@ static ssize_t hard_block_reasons_show(struct device *dev,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "0x%lx\n", rfkill->hard_block_reasons);
+ return sysfs_emit(buf, "0x%lx\n", rfkill->hard_block_reasons);
}
static DEVICE_ATTR_RO(hard_block_reasons);
@@ -782,7 +796,11 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_sync(rfkill);
+ mutex_unlock(&rfkill_global_mutex);
+
+ return sysfs_emit(buf, "%d\n", user_state_from_blocked(rfkill->state));
}
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
@@ -804,6 +822,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&rfkill_global_mutex);
+ rfkill_sync(rfkill);
rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
mutex_unlock(&rfkill_global_mutex);
@@ -831,7 +850,7 @@ static void rfkill_release(struct device *dev)
kfree(rfkill);
}
-static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int rfkill_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
struct rfkill *rfkill = to_rfkill(dev);
unsigned long flags;
@@ -1031,14 +1050,10 @@ static void rfkill_uevent_work(struct work_struct *work)
static void rfkill_sync_work(struct work_struct *work)
{
- struct rfkill *rfkill;
- bool cur;
-
- rfkill = container_of(work, struct rfkill, sync_work);
+ struct rfkill *rfkill = container_of(work, struct rfkill, sync_work);
mutex_lock(&rfkill_global_mutex);
- cur = rfkill_global_states[rfkill->type].cur;
- rfkill_set_block(rfkill, cur);
+ rfkill_sync(rfkill);
mutex_unlock(&rfkill_global_mutex);
}
@@ -1086,6 +1101,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
round_jiffies_relative(POLL_INTERVAL));
if (!rfkill->persistent || rfkill_epo_lock_active) {
+ rfkill->need_sync = true;
schedule_work(&rfkill->sync_work);
} else {
#ifdef CONFIG_RFKILL_INPUT
@@ -1153,12 +1169,13 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
if (!data)
return -ENOMEM;
+ data->max_size = RFKILL_EVENT_SIZE_V1;
+
INIT_LIST_HEAD(&data->events);
mutex_init(&data->mtx);
init_waitqueue_head(&data->read_wait);
mutex_lock(&rfkill_global_mutex);
- mutex_lock(&data->mtx);
/*
* start getting events from elsewhere but hold mtx to get
* startup events added first
@@ -1168,11 +1185,13 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
goto free;
+ rfkill_sync(rfkill);
rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
+ mutex_lock(&data->mtx);
list_add_tail(&ev->list, &data->events);
+ mutex_unlock(&data->mtx);
}
list_add(&data->list, &rfkill_fds);
- mutex_unlock(&data->mtx);
mutex_unlock(&rfkill_global_mutex);
file->private_data = data;
@@ -1180,7 +1199,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
return stream_open(inode, file);
free:
- mutex_unlock(&data->mtx);
mutex_unlock(&rfkill_global_mutex);
mutex_destroy(&data->mtx);
list_for_each_entry_safe(ev, tmp, &data->events, list)
@@ -1235,6 +1253,7 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
list);
sz = min_t(unsigned long, sizeof(ev->ev), count);
+ sz = min_t(unsigned long, sz, data->max_size);
ret = sz;
if (copy_to_user(buf, &ev->ev, sz))
ret = -EFAULT;
@@ -1249,6 +1268,7 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
+ struct rfkill_data *data = file->private_data;
struct rfkill *rfkill;
struct rfkill_event_ext ev;
int ret;
@@ -1263,6 +1283,7 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
* our API version even in a write() call, if it cares.
*/
count = min(count, sizeof(ev));
+ count = min_t(size_t, count, data->max_size);
if (copy_from_user(&ev, buf, count))
return -EFAULT;
@@ -1322,31 +1343,47 @@ static int rfkill_fop_release(struct inode *inode, struct file *file)
return 0;
}
-#ifdef CONFIG_RFKILL_INPUT
static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct rfkill_data *data = file->private_data;
+ int ret = -ENOTTY;
+ u32 size;
if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
- return -ENOSYS;
-
- if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
- return -ENOSYS;
+ return -ENOTTY;
mutex_lock(&data->mtx);
-
- if (!data->input_handler) {
- if (atomic_inc_return(&rfkill_input_disabled) == 1)
- printk(KERN_DEBUG "rfkill: input handler disabled\n");
- data->input_handler = true;
+ switch (_IOC_NR(cmd)) {
+#ifdef CONFIG_RFKILL_INPUT
+ case RFKILL_IOC_NOINPUT:
+ if (!data->input_handler) {
+ if (atomic_inc_return(&rfkill_input_disabled) == 1)
+ printk(KERN_DEBUG "rfkill: input handler disabled\n");
+ data->input_handler = true;
+ }
+ ret = 0;
+ break;
+#endif
+ case RFKILL_IOC_MAX_SIZE:
+ if (get_user(size, (__u32 __user *)arg)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (size < RFKILL_EVENT_SIZE_V1 || size > U8_MAX) {
+ ret = -EINVAL;
+ break;
+ }
+ data->max_size = size;
+ ret = 0;
+ break;
+ default:
+ break;
}
-
mutex_unlock(&data->mtx);
- return 0;
+ return ret;
}
-#endif
static const struct file_operations rfkill_fops = {
.owner = THIS_MODULE,
@@ -1355,11 +1392,8 @@ static const struct file_operations rfkill_fops = {
.write = rfkill_fop_write,
.poll = rfkill_fop_poll,
.release = rfkill_fop_release,
-#ifdef CONFIG_RFKILL_INPUT
.unlocked_ioctl = rfkill_fop_ioctl,
.compat_ioctl = compat_ptr_ioctl,
-#endif
- .llseek = no_llseek,
};
#define RFKILL_NAME "rfkill"