summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 09:40:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 09:40:26 -0700
commit9f3938346a5c1fa504647670edb5fea5756cfb00 (patch)
tree7cf6d24d6b076c8db8571494984924cac03703a2 /drivers/gpu
parent69a7aebcf019ab3ff5764525ad6858fbe23bb86d (diff)
parent317b6e128247f75976b0fc2b9fd8d2c20ef13b3a (diff)
Merge branch 'kmap_atomic' of git://github.com/congwang/linux
Pull kmap_atomic cleanup from Cong Wang. It's been in -next for a long time, and it gets rid of the (no longer used) second argument to k[un]map_atomic(). Fix up a few trivial conflicts in various drivers, and do an "evil merge" to catch some new uses that have come in since Cong's tree. * 'kmap_atomic' of git://github.com/congwang/linux: (59 commits) feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename] drbd: remove the second argument of k[un]map_atomic() zcache: remove the second argument of k[un]map_atomic() gma500: remove the second argument of k[un]map_atomic() dm: remove the second argument of k[un]map_atomic() tomoyo: remove the second argument of k[un]map_atomic() sunrpc: remove the second argument of k[un]map_atomic() rds: remove the second argument of k[un]map_atomic() net: remove the second argument of k[un]map_atomic() mm: remove the second argument of k[un]map_atomic() lib: remove the second argument of k[un]map_atomic() power: remove the second argument of k[un]map_atomic() kdb: remove the second argument of k[un]map_atomic() udf: remove the second argument of k[un]map_atomic() ubifs: remove the second argument of k[un]map_atomic() squashfs: remove the second argument of k[un]map_atomic() reiserfs: remove the second argument of k[un]map_atomic() ocfs2: remove the second argument of k[un]map_atomic() ntfs: remove the second argument of k[un]map_atomic() ...
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_cache.c8
-rw-r--r--drivers/gpu/drm/gma500/mmu.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c6
4 files changed, 30 insertions, 30 deletions
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 592865381c6e..4b8653b932f9 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -41,10 +41,10 @@ drm_clflush_page(struct page *page)
if (unlikely(page == NULL))
return;
- page_virtual = kmap_atomic(page, KM_USER0);
+ page_virtual = kmap_atomic(page);
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
clflush(page_virtual + i);
- kunmap_atomic(page_virtual, KM_USER0);
+ kunmap_atomic(page_virtual);
}
static void drm_cache_flush_clflush(struct page *pages[],
@@ -87,10 +87,10 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
if (unlikely(page == NULL))
continue;
- page_virtual = kmap_atomic(page, KM_USER0);
+ page_virtual = kmap_atomic(page);
flush_dcache_range((unsigned long)page_virtual,
(unsigned long)page_virtual + PAGE_SIZE);
- kunmap_atomic(page_virtual, KM_USER0);
+ kunmap_atomic(page_virtual);
}
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index c904d73b1de3..e80ee82f6caf 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -125,14 +125,14 @@ static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
int i;
uint8_t *clf;
- clf = kmap_atomic(page, KM_USER0);
+ clf = kmap_atomic(page);
mb();
for (i = 0; i < clflush_count; ++i) {
psb_clflush(clf);
clf += clflush_add;
}
mb();
- kunmap_atomic(clf, KM_USER0);
+ kunmap_atomic(clf);
}
static void psb_pages_clflush(struct psb_mmu_driver *driver,
@@ -325,7 +325,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
spin_lock(lock);
- v = kmap_atomic(pt->p, KM_USER0);
+ v = kmap_atomic(pt->p);
clf = (uint8_t *) v;
ptes = (uint32_t *) v;
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
@@ -341,7 +341,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
mb();
}
- kunmap_atomic(v, KM_USER0);
+ kunmap_atomic(v);
spin_unlock(lock);
pt->count = 0;
@@ -376,18 +376,18 @@ struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
continue;
}
- v = kmap_atomic(pd->p, KM_USER0);
+ v = kmap_atomic(pd->p);
pd->tables[index] = pt;
v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
pt->index = index;
- kunmap_atomic((void *) v, KM_USER0);
+ kunmap_atomic((void *) v);
if (pd->hw_context != -1) {
psb_mmu_clflush(pd->driver, (void *) &v[index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
}
- pt->v = kmap_atomic(pt->p, KM_USER0);
+ pt->v = kmap_atomic(pt->p);
return pt;
}
@@ -404,7 +404,7 @@ static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
spin_unlock(lock);
return NULL;
}
- pt->v = kmap_atomic(pt->p, KM_USER0);
+ pt->v = kmap_atomic(pt->p);
return pt;
}
@@ -413,9 +413,9 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
struct psb_mmu_pd *pd = pt->pd;
uint32_t *v;
- kunmap_atomic(pt->v, KM_USER0);
+ kunmap_atomic(pt->v);
if (pt->count == 0) {
- v = kmap_atomic(pd->p, KM_USER0);
+ v = kmap_atomic(pd->p);
v[pt->index] = pd->invalid_pde;
pd->tables[pt->index] = NULL;
@@ -424,7 +424,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
(void *) &v[pt->index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
- kunmap_atomic(pt->v, KM_USER0);
+ kunmap_atomic(pt->v);
spin_unlock(&pd->driver->lock);
psb_mmu_free_pt(pt);
return;
@@ -457,7 +457,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
down_read(&driver->sem);
spin_lock(&driver->lock);
- v = kmap_atomic(pd->p, KM_USER0);
+ v = kmap_atomic(pd->p);
v += start;
while (gtt_pages--) {
@@ -467,7 +467,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
/*ttm_tt_cache_flush(&pd->p, num_pages);*/
psb_pages_clflush(pd->driver, &pd->p, num_pages);
- kunmap_atomic(v, KM_USER0);
+ kunmap_atomic(v);
spin_unlock(&driver->lock);
if (pd->hw_context != -1)
@@ -830,9 +830,9 @@ int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
uint32_t *v;
spin_lock(lock);
- v = kmap_atomic(pd->p, KM_USER0);
+ v = kmap_atomic(pd->p);
tmp = v[psb_mmu_pd_index(virtual)];
- kunmap_atomic(v, KM_USER0);
+ kunmap_atomic(v);
spin_unlock(lock);
if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 2f75d203a2bf..c10cf5e2443a 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -309,11 +309,11 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
goto out_err;
preempt_disable();
- from_virtual = kmap_atomic(from_page, KM_USER0);
- to_virtual = kmap_atomic(to_page, KM_USER1);
+ from_virtual = kmap_atomic(from_page);
+ to_virtual = kmap_atomic(to_page);
memcpy(to_virtual, from_virtual, PAGE_SIZE);
- kunmap_atomic(to_virtual, KM_USER1);
- kunmap_atomic(from_virtual, KM_USER0);
+ kunmap_atomic(to_virtual);
+ kunmap_atomic(from_virtual);
preempt_enable();
page_cache_release(from_page);
}
@@ -365,11 +365,11 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
goto out_err;
}
preempt_disable();
- from_virtual = kmap_atomic(from_page, KM_USER0);
- to_virtual = kmap_atomic(to_page, KM_USER1);
+ from_virtual = kmap_atomic(from_page);
+ to_virtual = kmap_atomic(to_page);
memcpy(to_virtual, from_virtual, PAGE_SIZE);
- kunmap_atomic(to_virtual, KM_USER1);
- kunmap_atomic(from_virtual, KM_USER0);
+ kunmap_atomic(to_virtual);
+ kunmap_atomic(from_virtual);
preempt_enable();
set_page_dirty(to_page);
mark_page_accessed(to_page);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index f4e7763a7694..51c9ba5cd2fb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -136,10 +136,10 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
if (likely(page_virtual != NULL)) {
desc_virtual->ppn = page_to_pfn(page);
- kunmap_atomic(page_virtual, KM_USER0);
+ kunmap_atomic(page_virtual);
}
- page_virtual = kmap_atomic(page, KM_USER0);
+ page_virtual = kmap_atomic(page);
desc_virtual = page_virtual - 1;
prev_pfn = ~(0UL);
@@ -169,7 +169,7 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
}
if (likely(page_virtual != NULL))
- kunmap_atomic(page_virtual, KM_USER0);
+ kunmap_atomic(page_virtual);
return 0;
out_err: