summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/udl
diff options
context:
space:
mode:
authorThomas Zimmermann <tzimmermann@suse.de>2019-11-07 10:43:06 +0100
committerThomas Zimmermann <tzimmermann@suse.de>2019-11-08 12:30:07 +0100
commit08b22f65b309649057edfbae1d8772b04210b486 (patch)
tree235cfcc142e557bfeba276b716e236714196e845 /drivers/gpu/drm/udl
parent1d48b9e988a8874225e38fc1ac9967666e7eafd7 (diff)
drm/udl: Switch to SHMEM
Udl's GEM code and the generic SHMEM are almost identical. Replace the former with SHMEM. The dmabuf support in udl is being replaced with generic GEM PRIME functions. The main difference is in the caching flags for mmap pages. By default, SHMEM always sets (uncached) write combining. In udl's memory management code, only imported buffers use write combining. Memory pages of locally created buffer objects are mmap'ed with caching enabled. To keep the optimization, udl provides its own mmap function for GEM objects where it fixes up the mapping flags. v3: - restore udl vmap that enables caching v2: - remove obsolete code in a separate patch Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de> Acked-by: Gerd Hoffmann <kraxel@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191107094307.19870-4-tzimmermann@suse.de
Diffstat (limited to 'drivers/gpu/drm/udl')
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c29
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c66
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c101
5 files changed, 138 insertions, 60 deletions
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index b4d179b87f01..145b2a95ce58 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -5,6 +5,7 @@ config DRM_UDL
depends on USB_SUPPORT
depends on USB_ARCH_HAS_HCD
select USB
+ select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
This is a KMS driver for the USB displaylink video adapters.
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 778a0b652f64..563cc5809e56 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -8,6 +8,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
@@ -32,23 +33,7 @@ static int udl_usb_resume(struct usb_interface *interface)
return 0;
}
-static const struct vm_operations_struct udl_gem_vm_ops = {
- .fault = udl_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-static const struct file_operations udl_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = udl_drm_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(udl_driver_fops);
static void udl_driver_release(struct drm_device *dev)
{
@@ -63,18 +48,10 @@ static struct drm_driver driver = {
.release = udl_driver_release,
/* gem hooks */
- .gem_free_object_unlocked = udl_gem_free_object,
.gem_create_object = udl_driver_gem_create_object,
- .gem_vm_ops = &udl_gem_vm_ops,
- .dumb_create = udl_dumb_create,
- .dumb_map_offset = udl_gem_mmap,
.fops = &udl_driver_fops,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = udl_gem_prime_export,
- .gem_prime_import = udl_gem_prime_import,
+ DRM_GEM_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index fc312e791d18..630e64abc986 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -85,6 +85,7 @@ struct udl_gem_object {
struct udl_framebuffer {
struct drm_framebuffer base;
struct udl_gem_object *obj;
+ struct drm_gem_shmem_object *shmem;
bool active_16; /* active on the 16-bit channel */
};
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index ef3504d06343..f8153b726343 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -15,6 +15,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_modeset_helper.h>
#include "udl_drv.h"
@@ -94,16 +95,14 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
if (!fb->active_16)
return 0;
- if (!fb->obj->vmapping) {
- ret = udl_gem_vmap(fb->obj);
- if (ret == -ENOMEM) {
+ if (!fb->shmem->vaddr) {
+ void *vaddr;
+
+ vaddr = drm_gem_shmem_vmap(&fb->shmem->base);
+ if (IS_ERR(vaddr)) {
DRM_ERROR("failed to vmap fb\n");
return 0;
}
- if (!fb->obj->vmapping) {
- DRM_ERROR("failed to vmapping\n");
- return 0;
- }
}
aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
@@ -127,7 +126,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
const int byte_offset = line_offset + (x << log_bpp);
const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
if (udl_render_hline(dev, log_bpp, &urb,
- (char *) fb->obj->vmapping,
+ (char *) fb->shmem->vaddr,
&cmd, byte_offset, dev_byte_offset,
width << log_bpp,
&bytes_identical, &bytes_sent))
@@ -281,6 +280,7 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
unsigned num_clips)
{
struct udl_framebuffer *ufb = to_udl_fb(fb);
+ struct dma_buf_attachment *import_attach;
int i;
int ret = 0;
@@ -289,8 +289,10 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
if (!ufb->active_16)
goto unlock;
- if (ufb->obj->base.import_attach) {
- ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
+ import_attach = ufb->shmem->base.import_attach;
+
+ if (import_attach) {
+ ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
DMA_FROM_DEVICE);
if (ret)
goto unlock;
@@ -304,10 +306,9 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
break;
}
- if (ufb->obj->base.import_attach) {
- ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
+ if (import_attach)
+ ret = dma_buf_end_cpu_access(import_attach->dmabuf,
DMA_FROM_DEVICE);
- }
unlock:
drm_modeset_unlock_all(fb->dev);
@@ -319,8 +320,8 @@ static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct udl_framebuffer *ufb = to_udl_fb(fb);
- if (ufb->obj)
- drm_gem_object_put_unlocked(&ufb->obj->base);
+ if (ufb->shmem)
+ drm_gem_object_put_unlocked(&ufb->shmem->base);
drm_framebuffer_cleanup(fb);
kfree(ufb);
@@ -336,11 +337,11 @@ static int
udl_framebuffer_init(struct drm_device *dev,
struct udl_framebuffer *ufb,
const struct drm_mode_fb_cmd2 *mode_cmd,
- struct udl_gem_object *obj)
+ struct drm_gem_shmem_object *shmem)
{
int ret;
- ufb->obj = obj;
+ ufb->shmem = shmem;
drm_helper_mode_fill_fb_struct(dev, &ufb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
return ret;
@@ -356,7 +357,8 @@ static int udlfb_create(struct drm_fb_helper *helper,
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
- struct udl_gem_object *obj;
+ struct drm_gem_shmem_object *shmem;
+ void *vaddr;
uint32_t size;
int ret = 0;
@@ -373,12 +375,15 @@ static int udlfb_create(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- obj = udl_gem_alloc_object(dev, size);
- if (!obj)
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem)) {
+ ret = PTR_ERR(shmem);
goto out;
+ }
- ret = udl_gem_vmap(obj);
- if (ret) {
+ vaddr = drm_gem_shmem_vmap(&shmem->base);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
DRM_ERROR("failed to vmap fb\n");
goto out_gfree;
}
@@ -389,7 +394,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
goto out_gfree;
}
- ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
+ ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, shmem);
if (ret)
goto out_gfree;
@@ -397,20 +402,20 @@ static int udlfb_create(struct drm_fb_helper *helper,
ufbdev->helper.fb = fb;
- info->screen_base = ufbdev->ufb.obj->vmapping;
+ info->screen_base = vaddr;
info->fix.smem_len = size;
- info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
+ info->fix.smem_start = (unsigned long)vaddr;
info->fbops = &udlfb_ops;
drm_fb_helper_fill_info(info, &ufbdev->helper, sizes);
DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
fb->width, fb->height,
- ufbdev->ufb.obj->vmapping);
+ ufbdev->ufb.shmem->vaddr);
return ret;
out_gfree:
- drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+ drm_gem_object_put_unlocked(&ufbdev->ufb.shmem->base);
out:
return ret;
}
@@ -424,10 +429,10 @@ static void udl_fbdev_destroy(struct drm_device *dev,
{
drm_fb_helper_unregister_fbi(&ufbdev->helper);
drm_fb_helper_fini(&ufbdev->helper);
- if (ufbdev->ufb.obj) {
+ if (ufbdev->ufb.shmem) {
drm_framebuffer_unregister_private(&ufbdev->ufb.base);
drm_framebuffer_cleanup(&ufbdev->ufb.base);
- drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+ drm_gem_object_put_unlocked(&ufbdev->ufb.shmem->base);
}
}
@@ -518,7 +523,8 @@ udl_fb_user_fb_create(struct drm_device *dev,
if (ufb == NULL)
return ERR_PTR(-ENOMEM);
- ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
+ ret = udl_framebuffer_init(dev, ufb, mode_cmd,
+ to_drm_gem_shmem_obj(obj));
if (ret) {
kfree(ufb);
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 628749cc1143..9762265edfcf 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -7,25 +7,118 @@
#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mode.h>
#include <drm/drm_prime.h>
#include "udl_drv.h"
/*
+ * GEM object funcs
+ */
+
+static void udl_gem_object_free_object(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ /* Fbdev emulation vmaps the buffer. Unmap it here for consistency
+ * with the original udl GEM code.
+ *
+ * TODO: Switch to generic fbdev emulation and release the
+ * GEM object with drm_gem_shmem_free_object().
+ */
+ if (shmem->vaddr)
+ drm_gem_shmem_vunmap(obj, shmem->vaddr);
+
+ drm_gem_shmem_free_object(obj);
+}
+
+static int udl_gem_object_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_shmem_mmap(obj, vma);
+ if (ret)
+ return ret;
+
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ if (obj->import_attach)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
+ return 0;
+}
+
+static void *udl_gem_object_vmap(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ int ret;
+
+ ret = mutex_lock_interruptible(&shmem->vmap_lock);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (shmem->vmap_use_count++ > 0)
+ goto out;
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret)
+ goto err_zero_use;
+
+ if (obj->import_attach)
+ shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+ else
+ shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, PAGE_KERNEL);
+
+ if (!shmem->vaddr) {
+ DRM_DEBUG_KMS("Failed to vmap pages\n");
+ ret = -ENOMEM;
+ goto err_put_pages;
+ }
+
+out:
+ mutex_unlock(&shmem->vmap_lock);
+ return shmem->vaddr;
+
+err_put_pages:
+ drm_gem_shmem_put_pages(shmem);
+err_zero_use:
+ shmem->vmap_use_count = 0;
+ mutex_unlock(&shmem->vmap_lock);
+ return ERR_PTR(ret);
+}
+
+static const struct drm_gem_object_funcs udl_gem_object_funcs = {
+ .free = udl_gem_object_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = udl_gem_object_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .mmap = udl_gem_object_mmap,
+};
+
+/*
* Helpers for struct drm_driver
*/
struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
size_t size)
{
- struct udl_gem_object *obj;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
return NULL;
- return &obj->base;
+ obj = &shmem->base;
+ obj->funcs = &udl_gem_object_funcs;
+
+ return obj;
}
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,