summaryrefslogtreecommitdiff
path: root/drivers/video/fbdev/core/fb_defio.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/fbdev/core/fb_defio.c')
-rw-r--r--drivers/video/fbdev/core/fb_defio.c104
1 files changed, 60 insertions, 44 deletions
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 806ecd32219b..4fc93f253e06 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -23,33 +23,63 @@
#include <linux/rmap.h>
#include <linux/pagemap.h>
-static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
+static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long offs)
{
- void *screen_base = (void __force *) info->screen_base;
- struct page *page;
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+ const void *screen_buffer = info->screen_buffer;
+ struct page *page = NULL;
+
+ if (fbdefio->get_page)
+ return fbdefio->get_page(info, offs);
- if (is_vmalloc_addr(screen_base + offs))
- page = vmalloc_to_page(screen_base + offs);
- else
+ if (is_vmalloc_addr(screen_buffer + offs))
+ page = vmalloc_to_page(screen_buffer + offs);
+ else if (info->fix.smem_start)
page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
+ if (page)
+ get_page(page);
+
return page;
}
+static struct fb_deferred_io_pageref *fb_deferred_io_pageref_lookup(struct fb_info *info,
+ unsigned long offset,
+ struct page *page)
+{
+ unsigned long pgoff = offset >> PAGE_SHIFT;
+ struct fb_deferred_io_pageref *pageref;
+
+ if (fb_WARN_ON_ONCE(info, pgoff >= info->npagerefs))
+ return NULL; /* incorrect allocation size */
+
+ /* 1:1 mapping between pageref and page offset */
+ pageref = &info->pagerefs[pgoff];
+
+ if (pageref->page)
+ goto out;
+
+ pageref->page = page;
+ pageref->offset = pgoff << PAGE_SHIFT;
+ INIT_LIST_HEAD(&pageref->list);
+
+out:
+ if (fb_WARN_ON_ONCE(info, pageref->page != page))
+ return NULL; /* inconsistent state */
+ return pageref;
+}
+
static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
unsigned long offset,
struct page *page)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
struct list_head *pos = &fbdefio->pagereflist;
- unsigned long pgoff = offset >> PAGE_SHIFT;
struct fb_deferred_io_pageref *pageref, *cur;
- if (WARN_ON_ONCE(pgoff >= info->npagerefs))
- return NULL; /* incorrect allocation size */
-
- /* 1:1 mapping between pageref and page offset */
- pageref = &info->pagerefs[pgoff];
+ pageref = fb_deferred_io_pageref_lookup(info, offset, page);
+ if (!pageref)
+ return NULL;
/*
* This check is to catch the case where a new process could start
@@ -60,9 +90,6 @@ static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info
if (!list_empty(&pageref->list))
goto pageref_already_added;
- pageref->page = page;
- pageref->offset = pgoff << PAGE_SHIFT;
-
if (unlikely(fbdefio->sort_pagereflist)) {
/*
* We loop through the list of pagerefs before adding in
@@ -101,19 +128,14 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
if (offset >= info->fix.smem_len)
return VM_FAULT_SIGBUS;
- page = fb_deferred_io_page(info, offset);
+ page = fb_deferred_io_get_page(info, offset);
if (!page)
return VM_FAULT_SIGBUS;
- get_page(page);
+ if (!vmf->vma->vm_file)
+ fb_err(info, "no mapping available\n");
- if (vmf->vma->vm_file)
- page->mapping = vmf->vma->vm_file->f_mapping;
- else
- printk(KERN_ERR "no mapping available\n");
-
- BUG_ON(!page->mapping);
- page->index = vmf->pgoff; /* for page_mkclean() */
+ BUG_ON(!info->fbdefio->mapping);
vmf->page = page;
return 0;
@@ -161,9 +183,9 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long
/*
* We want the page to remain locked from ->page_mkwrite until
- * the PTE is marked dirty to avoid page_mkclean() being called
- * before the PTE is updated, which would leave the page ignored
- * by defio.
+ * the PTE is marked dirty to avoid mapping_wrprotect_range()
+ * being called before the PTE is updated, which would leave
+ * the page ignored by defio.
* Do this by locking the page here and informing the caller
* about it with VM_FAULT_LOCKED.
*/
@@ -241,14 +263,17 @@ static void fb_deferred_io_work(struct work_struct *work)
struct fb_deferred_io_pageref *pageref, *next;
struct fb_deferred_io *fbdefio = info->fbdefio;
- /* here we mkclean the pages, then do all deferred IO */
+ /* here we wrprotect the page's mappings, then do all deferred IO. */
mutex_lock(&fbdefio->lock);
+#ifdef CONFIG_MMU
list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
- struct page *cur = pageref->page;
- lock_page(cur);
- page_mkclean(cur);
- unlock_page(cur);
+ struct page *page = pageref->page;
+ pgoff_t pgoff = pageref->offset >> PAGE_SHIFT;
+
+ mapping_wrprotect_range(fbdefio->mapping, pgoff,
+ page_to_pfn(page), 1);
}
+#endif
/* driver's callback with pagereflist */
fbdefio->deferred_io(info, &fbdefio->pagereflist);
@@ -264,7 +289,7 @@ int fb_deferred_io_init(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
struct fb_deferred_io_pageref *pagerefs;
- unsigned long npagerefs, i;
+ unsigned long npagerefs;
int ret;
BUG_ON(!fbdefio);
@@ -286,8 +311,6 @@ int fb_deferred_io_init(struct fb_info *info)
ret = -ENOMEM;
goto err;
}
- for (i = 0; i < npagerefs; ++i)
- INIT_LIST_HEAD(&pagerefs[i].list);
info->npagerefs = npagerefs;
info->pagerefs = pagerefs;
@@ -305,6 +328,7 @@ void fb_deferred_io_open(struct fb_info *info,
{
struct fb_deferred_io *fbdefio = info->fbdefio;
+ fbdefio->mapping = file->f_mapping;
file->f_mapping->a_ops = &fb_deferred_io_aops;
fbdefio->open_count++;
}
@@ -312,16 +336,7 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
static void fb_deferred_io_lastclose(struct fb_info *info)
{
- struct page *page;
- int i;
-
flush_delayed_work(&info->deferred_work);
-
- /* clear out the mapping that we setup */
- for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
- page = fb_deferred_io_page(info, i);
- page->mapping = NULL;
- }
}
void fb_deferred_io_release(struct fb_info *info)
@@ -341,5 +356,6 @@ void fb_deferred_io_cleanup(struct fb_info *info)
kvfree(info->pagerefs);
mutex_destroy(&fbdefio->lock);
+ fbdefio->mapping = NULL;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);