summaryrefslogtreecommitdiff
path: root/drivers/char/mem.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2016-06-01 19:21:42 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-08-31 15:21:18 +0200
commit148a1bc84398039e2b96ff78678c4d9a67f81452 (patch)
tree7c477963ee32e086a8d374b70b0015b93ffc5601 /drivers/char/mem.c
parentd61f3088ea79c1f11fc8fe869d6f90a4cc97a452 (diff)
drivers: char: mem: Check {read,write}_kmem() addresses
Arriving at read_kmem() with an offset representing a bogus kernel address (e.g. 0 from a simple "cat /dev/kmem") leads to copy_to_user faulting on the kernel-side read. x86_64 happens to get away with this since the optimised implementation uses "rep movs*", thus the user write (which is allowed to fault) and the kernel read are the same instruction, the kernel-side fault falls into the user-side fixup handler and the chain of events which transpires ends up returning an error as one might expect, even if it's an inappropriate -EFAULT. On other architectures, though, the read is not covered by the fixup entry for the write, and we get a big scary "Unable to hande kernel paging request..." dump. The more typical use-case of mmap_kmem() has always (within living memory at least) returned -EIO for addresses which don't satisfy pfn_valid(), so let's make that consistent across {read,write}_kem() too. Reported-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/char/mem.c')
-rw-r--r--drivers/char/mem.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index a33163dbb913..5bb1985ec484 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -381,6 +381,9 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
int err = 0;
+ if (!pfn_valid(PFN_DOWN(p)))
+ return -EIO;
+
read = 0;
if (p < (unsigned long) high_memory) {
low_count = count;
@@ -509,6 +512,9 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
int err = 0;
+ if (!pfn_valid(PFN_DOWN(p)))
+ return -EIO;
+
if (p < (unsigned long) high_memory) {
unsigned long to_write = min_t(unsigned long, count,
(unsigned long)high_memory - p);