summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/gma500/gem.c
diff options
context:
space:
mode:
authorThomas Zimmermann <tzimmermann@suse.de>2021-10-15 10:40:53 +0200
committerThomas Zimmermann <tzimmermann@suse.de>2021-10-19 10:38:54 +0200
commitf2d061ed01b3f0e0c7a4daebed91e26f54de4086 (patch)
tree2100f2c9ea2dd25111e063cc2b73e8e256cd4711 /drivers/gpu/drm/gma500/gem.c
parente1f80341e312088f0e6c46107db7098e30e6d764 (diff)
drm/gma500: Rename struct gtt_range to struct psb_gem_object
struct gtt_range represents a GEM object. Rename the structure to struct psb_gem_object and update all users. No functional changes. Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de> Acked-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20211015084053.13708-11-tzimmermann@suse.de
Diffstat (limited to 'drivers/gpu/drm/gma500/gem.c')
-rw-r--r--drivers/gpu/drm/gma500/gem.c106
1 files changed, 54 insertions, 52 deletions
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index c93d09e1921e..8d65af80bb08 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -21,9 +21,10 @@
#include "gem.h"
#include "psb_drv.h"
-int psb_gem_pin(struct gtt_range *gt)
+int psb_gem_pin(struct psb_gem_object *pobj)
{
- struct drm_device *dev = gt->gem.dev;
+ struct drm_gem_object *obj = &pobj->base;
+ struct drm_device *dev = obj->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gpu_base = dev_priv->gtt.gatt_start;
struct page **pages;
@@ -32,29 +33,29 @@ int psb_gem_pin(struct gtt_range *gt)
mutex_lock(&dev_priv->gtt_mutex);
- if (gt->in_gart || gt->stolen)
+ if (pobj->in_gart || pobj->stolen)
goto out; /* already mapped */
- pages = drm_gem_get_pages(&gt->gem);
+ pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto err_mutex_unlock;
}
- npages = gt->gem.size / PAGE_SIZE;
+ npages = obj->size / PAGE_SIZE;
set_pages_array_wc(pages, npages);
- psb_gtt_insert_pages(dev_priv, &gt->resource, pages);
+ psb_gtt_insert_pages(dev_priv, &pobj->resource, pages);
psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), pages,
- (gpu_base + gt->offset), npages, 0, 0,
+ (gpu_base + pobj->offset), npages, 0, 0,
PSB_MMU_CACHED_MEMORY);
- gt->npage = npages;
- gt->pages = pages;
+ pobj->npage = npages;
+ pobj->pages = pages;
out:
- ++gt->in_gart;
+ ++pobj->in_gart;
mutex_unlock(&dev_priv->gtt_mutex);
return 0;
@@ -64,31 +65,32 @@ err_mutex_unlock:
return ret;
}
-void psb_gem_unpin(struct gtt_range *gt)
+void psb_gem_unpin(struct psb_gem_object *pobj)
{
- struct drm_device *dev = gt->gem.dev;
+ struct drm_gem_object *obj = &pobj->base;
+ struct drm_device *dev = obj->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gpu_base = dev_priv->gtt.gatt_start;
mutex_lock(&dev_priv->gtt_mutex);
- WARN_ON(!gt->in_gart);
+ WARN_ON(!pobj->in_gart);
- --gt->in_gart;
+ --pobj->in_gart;
- if (gt->in_gart || gt->stolen)
+ if (pobj->in_gart || pobj->stolen)
goto out;
psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
- (gpu_base + gt->offset), gt->npage, 0, 0);
- psb_gtt_remove_pages(dev_priv, &gt->resource);
+ (gpu_base + pobj->offset), pobj->npage, 0, 0);
+ psb_gtt_remove_pages(dev_priv, &pobj->resource);
/* Reset caching flags */
- set_pages_array_wb(gt->pages, gt->npage);
+ set_pages_array_wb(pobj->pages, pobj->npage);
- drm_gem_put_pages(&gt->gem, gt->pages, true, false);
- gt->pages = NULL;
- gt->npage = 0;
+ drm_gem_put_pages(obj, pobj->pages, true, false);
+ pobj->pages = NULL;
+ pobj->npage = 0;
out:
mutex_unlock(&dev_priv->gtt_mutex);
@@ -98,18 +100,18 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
static void psb_gem_free_object(struct drm_gem_object *obj)
{
- struct gtt_range *gt = to_gtt_range(obj);
+ struct psb_gem_object *pobj = to_psb_gem_object(obj);
drm_gem_object_release(obj);
/* Undo the mmap pin if we are destroying the object */
- if (gt->mmapping)
- psb_gem_unpin(gt);
+ if (pobj->mmapping)
+ psb_gem_unpin(pobj);
- WARN_ON(gt->in_gart && !gt->stolen);
+ WARN_ON(pobj->in_gart && !pobj->stolen);
- release_resource(&gt->resource);
- kfree(gt);
+ release_resource(&pobj->resource);
+ kfree(pobj);
}
static const struct vm_operations_struct psb_gem_vm_ops = {
@@ -123,31 +125,31 @@ static const struct drm_gem_object_funcs psb_gem_object_funcs = {
.vm_ops = &psb_gem_vm_ops,
};
-struct gtt_range *
+struct psb_gem_object *
psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct gtt_range *gt;
+ struct psb_gem_object *pobj;
struct drm_gem_object *obj;
int ret;
size = roundup(size, PAGE_SIZE);
- gt = kzalloc(sizeof(*gt), GFP_KERNEL);
- if (!gt)
+ pobj = kzalloc(sizeof(*pobj), GFP_KERNEL);
+ if (!pobj)
return ERR_PTR(-ENOMEM);
- obj = &gt->gem;
+ obj = &pobj->base;
/* GTT resource */
- ret = psb_gtt_allocate_resource(dev_priv, &gt->resource, name, size, align, stolen,
- &gt->offset);
+ ret = psb_gtt_allocate_resource(dev_priv, &pobj->resource, name, size, align, stolen,
+ &pobj->offset);
if (ret)
goto err_kfree;
if (stolen) {
- gt->stolen = true;
- gt->in_gart = 1;
+ pobj->stolen = true;
+ pobj->in_gart = 1;
}
/* GEM object */
@@ -165,12 +167,12 @@ psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen,
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
}
- return gt;
+ return pobj;
err_release_resource:
- release_resource(&gt->resource);
+ release_resource(&pobj->resource);
err_kfree:
- kfree(gt);
+ kfree(pobj);
return ERR_PTR(ret);
}
@@ -188,7 +190,7 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
size_t pitch, size;
- struct gtt_range *gt;
+ struct psb_gem_object *pobj;
struct drm_gem_object *obj;
u32 handle;
int ret;
@@ -201,10 +203,10 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
if (!size)
return -EINVAL;
- gt = psb_gem_create(dev, size, "gem", false, PAGE_SIZE);
- if (IS_ERR(gt))
- return PTR_ERR(gt);
- obj = &gt->gem;
+ pobj = psb_gem_create(dev, size, "gem", false, PAGE_SIZE);
+ if (IS_ERR(pobj))
+ return PTR_ERR(pobj);
+ obj = &pobj->base;
ret = drm_gem_handle_create(file, obj, &handle);
if (ret)
@@ -243,7 +245,7 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj;
- struct gtt_range *r;
+ struct psb_gem_object *pobj;
int err;
vm_fault_t ret;
unsigned long pfn;
@@ -255,7 +257,7 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
dev = obj->dev;
dev_priv = to_drm_psb_private(dev);
- r = to_gtt_range(obj);
+ pobj = to_psb_gem_object(obj);
/* Make sure we don't parallel update on a fault, nor move or remove
something from beneath our feet */
@@ -263,14 +265,14 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
/* For now the mmap pins the object and it stays pinned. As things
stand that will do us no harm */
- if (r->mmapping == 0) {
- err = psb_gem_pin(r);
+ if (pobj->mmapping == 0) {
+ err = psb_gem_pin(pobj);
if (err < 0) {
dev_err(dev->dev, "gma500: pin failed: %d\n", err);
ret = vmf_error(err);
goto fail;
}
- r->mmapping = 1;
+ pobj->mmapping = 1;
}
/* Page relative to the VMA start - we must calculate this ourselves
@@ -278,10 +280,10 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
/* CPU view of the page, don't go via the GART for CPU writes */
- if (r->stolen)
- pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
+ if (pobj->stolen)
+ pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT;
else
- pfn = page_to_pfn(r->pages[page_offset]);
+ pfn = page_to_pfn(pobj->pages[page_offset]);
ret = vmf_insert_pfn(vma, vmf->address, pfn);
fail:
mutex_unlock(&dev_priv->mmap_mutex);