diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_drv.c')
| -rw-r--r-- | drivers/gpu/drm/msm/msm_drv.c | 848 |
1 files changed, 321 insertions, 527 deletions
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 4bd028fa7500..7e977fec4100 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -7,30 +7,20 @@ #include <linux/dma-mapping.h> #include <linux/fault-inject.h> -#include <linux/kthread.h> +#include <linux/debugfs.h> #include <linux/of_address.h> -#include <linux/sched/mm.h> #include <linux/uaccess.h> -#include <uapi/linux/sched/types.h> -#include <drm/drm_aperture.h> -#include <drm/drm_bridge.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> -#include <drm/drm_prime.h> #include <drm/drm_of.h> -#include <drm/drm_vblank.h> -#include "disp/msm_disp_snapshot.h" #include "msm_drv.h" #include "msm_debugfs.h" -#include "msm_fence.h" #include "msm_gem.h" #include "msm_gpu.h" #include "msm_kms.h" -#include "msm_mmu.h" -#include "adreno/adreno_gpu.h" /* * MSM driver version: @@ -49,27 +39,13 @@ * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT * - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST) + * - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA + * - 1.13.0 - Add VM_BIND */ #define MSM_VERSION_MAJOR 1 -#define MSM_VERSION_MINOR 10 +#define MSM_VERSION_MINOR 13 #define MSM_VERSION_PATCHLEVEL 0 -static void msm_deinit_vram(struct drm_device *ddev); - -static const struct drm_mode_config_funcs mode_config_funcs = { - .fb_create = msm_framebuffer_create, - .atomic_check = msm_atomic_check, - .atomic_commit = drm_atomic_helper_commit, -}; - -static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = { - .atomic_commit_tail = msm_atomic_commit_tail, -}; - -static char *vram = "16m"; -MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)"); -module_param(vram, charp, 0); - bool dumpstate; MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors"); module_param(dumpstate, bool, 0600); @@ -78,130 +54,23 @@ static bool modeset = true; MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)"); module_param(modeset, bool, 0600); -#ifdef CONFIG_FAULT_INJECTION +static bool separate_gpu_kms; +MODULE_PARM_DESC(separate_gpu_drm, "Use separate DRM device for the GPU (0=single DRM device for both GPU and display (default), 1=two DRM devices)"); +module_param(separate_gpu_kms, bool, 0400); + DECLARE_FAULT_ATTR(fail_gem_alloc); DECLARE_FAULT_ATTR(fail_gem_iova); -#endif -static irqreturn_t msm_irq(int irq, void *arg) +bool msm_gpu_no_components(void) { - struct drm_device *dev = arg; - struct msm_drm_private *priv = dev->dev_private; - struct msm_kms *kms = priv->kms; - - BUG_ON(!kms); - - return kms->funcs->irq(kms); -} - -static void msm_irq_preinstall(struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - struct msm_kms *kms = priv->kms; - - BUG_ON(!kms); - - kms->funcs->irq_preinstall(kms); -} - -static int msm_irq_postinstall(struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - struct msm_kms *kms = priv->kms; - - BUG_ON(!kms); - - if (kms->funcs->irq_postinstall) - return kms->funcs->irq_postinstall(kms); - - return 0; -} - -static int msm_irq_install(struct drm_device *dev, unsigned int irq) -{ - struct msm_drm_private *priv = dev->dev_private; - struct msm_kms *kms = priv->kms; - int ret; - - if (irq == IRQ_NOTCONNECTED) - return -ENOTCONN; - - msm_irq_preinstall(dev); - - ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev); - if (ret) - return ret; - - kms->irq_requested = true; - - ret = msm_irq_postinstall(dev); - if (ret) { - free_irq(irq, dev); - return ret; - } - - return 0; + return separate_gpu_kms; } -static void msm_irq_uninstall(struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - struct msm_kms *kms = priv->kms; - - kms->funcs->irq_uninstall(kms); - if (kms->irq_requested) - free_irq(kms->irq, dev); -} - -struct msm_vblank_work { - struct work_struct work; - struct drm_crtc *crtc; - bool enable; - struct msm_drm_private *priv; -}; - -static void vblank_ctrl_worker(struct work_struct *work) -{ - struct msm_vblank_work *vbl_work = container_of(work, - struct msm_vblank_work, work); - struct msm_drm_private *priv = vbl_work->priv; - struct msm_kms *kms = priv->kms; - - if (vbl_work->enable) - kms->funcs->enable_vblank(kms, vbl_work->crtc); - else - kms->funcs->disable_vblank(kms, vbl_work->crtc); - - kfree(vbl_work); -} - -static int vblank_ctrl_queue_work(struct msm_drm_private *priv, - struct drm_crtc *crtc, bool enable) -{ - struct msm_vblank_work *vbl_work; - - vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC); - if (!vbl_work) - return -ENOMEM; - - INIT_WORK(&vbl_work->work, vblank_ctrl_worker); - - vbl_work->crtc = crtc; - vbl_work->enable = enable; - vbl_work->priv = priv; - - queue_work(priv->wq, &vbl_work->work); - - return 0; -} - -static int msm_drm_uninit(struct device *dev) +static int msm_drm_uninit(struct device *dev, const struct component_ops *gpu_ops) { struct platform_device *pdev = to_platform_device(dev); struct msm_drm_private *priv = platform_get_drvdata(pdev); struct drm_device *ddev = priv->dev; - struct msm_kms *kms = priv->kms; - int i; /* * Shutdown the hw if we're far enough along where things might be on. @@ -212,202 +81,34 @@ static int msm_drm_uninit(struct device *dev) */ if (ddev->registered) { drm_dev_unregister(ddev); - drm_atomic_helper_shutdown(ddev); - } - - /* We must cancel and cleanup any pending vblank enable/disable - * work before msm_irq_uninstall() to avoid work re-enabling an - * irq after uninstall has disabled it. - */ - - flush_workqueue(priv->wq); - - /* clean up event worker threads */ - for (i = 0; i < priv->num_crtcs; i++) { - if (priv->event_thread[i].worker) - kthread_destroy_worker(priv->event_thread[i].worker); + if (priv->kms) + msm_drm_kms_unregister(dev); } msm_gem_shrinker_cleanup(ddev); - drm_kms_helper_poll_fini(ddev); - msm_perf_debugfs_cleanup(priv); msm_rd_debugfs_cleanup(priv); - if (kms) - msm_disp_snapshot_destroy(ddev); + if (priv->kms) + msm_drm_kms_uninit(dev); - drm_mode_config_cleanup(ddev); - - for (i = 0; i < priv->num_bridges; i++) - drm_bridge_remove(priv->bridges[i]); - priv->num_bridges = 0; - - if (kms) { - pm_runtime_get_sync(dev); - msm_irq_uninstall(ddev); - pm_runtime_put_sync(dev); - } - - if (kms && kms->funcs) - kms->funcs->destroy(kms); - - msm_deinit_vram(ddev); - - component_unbind_all(dev, ddev); + if (gpu_ops) + gpu_ops->unbind(dev, dev, NULL); + else + component_unbind_all(dev, ddev); ddev->dev_private = NULL; drm_dev_put(ddev); - destroy_workqueue(priv->wq); - return 0; } -struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev) -{ - struct msm_gem_address_space *aspace; - struct msm_mmu *mmu; - struct device *mdp_dev = dev->dev; - struct device *mdss_dev = mdp_dev->parent; - struct device *iommu_dev; - - /* - * IOMMUs can be a part of MDSS device tree binding, or the - * MDP/DPU device. - */ - if (device_iommu_mapped(mdp_dev)) - iommu_dev = mdp_dev; - else - iommu_dev = mdss_dev; - - mmu = msm_iommu_new(iommu_dev, 0); - if (IS_ERR(mmu)) - return ERR_CAST(mmu); - - if (!mmu) { - drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n"); - return NULL; - } - - aspace = msm_gem_address_space_create(mmu, "mdp_kms", - 0x1000, 0x100000000 - 0x1000); - if (IS_ERR(aspace)) { - dev_err(mdp_dev, "aspace create, error %pe\n", aspace); - mmu->funcs->destroy(mmu); - } - - return aspace; -} - -bool msm_use_mmu(struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - - /* - * a2xx comes with its own MMU - * On other platforms IOMMU can be declared specified either for the - * MDP/DPU device or for its parent, MDSS device. - */ - return priv->is_a2xx || - device_iommu_mapped(dev->dev) || - device_iommu_mapped(dev->dev->parent); -} - -static int msm_init_vram(struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - struct device_node *node; - unsigned long size = 0; - int ret = 0; - - /* In the device-tree world, we could have a 'memory-region' - * phandle, which gives us a link to our "vram". Allocating - * is all nicely abstracted behind the dma api, but we need - * to know the entire size to allocate it all in one go. There - * are two cases: - * 1) device with no IOMMU, in which case we need exclusive - * access to a VRAM carveout big enough for all gpu - * buffers - * 2) device with IOMMU, but where the bootloader puts up - * a splash screen. In this case, the VRAM carveout - * need only be large enough for fbdev fb. But we need - * exclusive access to the buffer to avoid the kernel - * using those pages for other purposes (which appears - * as corruption on screen before we have a chance to - * load and do initial modeset) - */ - - node = of_parse_phandle(dev->dev->of_node, "memory-region", 0); - if (node) { - struct resource r; - ret = of_address_to_resource(node, 0, &r); - of_node_put(node); - if (ret) - return ret; - size = r.end - r.start + 1; - DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); - - /* if we have no IOMMU, then we need to use carveout allocator. - * Grab the entire DMA chunk carved out in early startup in - * mach-msm: - */ - } else if (!msm_use_mmu(dev)) { - DRM_INFO("using %s VRAM carveout\n", vram); - size = memparse(vram, NULL); - } - - if (size) { - unsigned long attrs = 0; - void *p; - - priv->vram.size = size; - - drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); - spin_lock_init(&priv->vram.lock); - - attrs |= DMA_ATTR_NO_KERNEL_MAPPING; - attrs |= DMA_ATTR_WRITE_COMBINE; - - /* note that for no-kernel-mapping, the vaddr returned - * is bogus, but non-null if allocation succeeded: - */ - p = dma_alloc_attrs(dev->dev, size, - &priv->vram.paddr, GFP_KERNEL, attrs); - if (!p) { - DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n"); - priv->vram.paddr = 0; - return -ENOMEM; - } - - DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n", - (uint32_t)priv->vram.paddr, - (uint32_t)(priv->vram.paddr + size)); - } - - return ret; -} - -static void msm_deinit_vram(struct drm_device *ddev) -{ - struct msm_drm_private *priv = ddev->dev_private; - unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING; - - if (!priv->vram.paddr) - return; - - drm_mm_takedown(&priv->vram.mm); - dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr, - attrs); -} - -static int msm_drm_init(struct device *dev, const struct drm_driver *drv) +static int msm_drm_init(struct device *dev, const struct drm_driver *drv, + const struct component_ops *gpu_ops) { struct msm_drm_private *priv = dev_get_drvdata(dev); struct drm_device *ddev; - struct msm_kms *kms; - struct drm_crtc *crtc; int ret; if (drm_firmware_drivers_only()) @@ -421,12 +122,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) ddev->dev_private = priv; priv->dev = ddev; - priv->wq = alloc_ordered_workqueue("msm", 0); - if (!priv->wq) { - ret = -ENOMEM; - goto err_put_dev; - } - INIT_LIST_HEAD(&priv->objects); mutex_init(&priv->obj_lock); @@ -439,127 +134,59 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock); drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock); + /* Initialize stall-on-fault */ + spin_lock_init(&priv->fault_stall_lock); + priv->stall_enabled = true; + /* Teach lockdep about lock ordering wrt. shrinker: */ fs_reclaim_acquire(GFP_KERNEL); might_lock(&priv->lru.lock); fs_reclaim_release(GFP_KERNEL); - drm_mode_config_init(ddev); - - ret = msm_init_vram(ddev); - if (ret) - goto err_cleanup_mode_config; + if (priv->kms_init) { + ret = drmm_mode_config_init(ddev); + if (ret) + goto err_put_dev; + } dma_set_max_seg_size(dev, UINT_MAX); /* Bind all our sub-components: */ - ret = component_bind_all(dev, ddev); + if (gpu_ops) + ret = gpu_ops->bind(dev, dev, NULL); + else + ret = component_bind_all(dev, ddev); if (ret) - goto err_deinit_vram; + goto err_put_dev; - /* the fw fb could be anywhere in memory */ - ret = drm_aperture_remove_framebuffers(drv); + ret = msm_gem_shrinker_init(ddev); if (ret) goto err_msm_uninit; - msm_gem_shrinker_init(ddev); - if (priv->kms_init) { - ret = priv->kms_init(ddev); - if (ret) { - DRM_DEV_ERROR(dev, "failed to load kms\n"); - priv->kms = NULL; - goto err_msm_uninit; - } - kms = priv->kms; - } else { - /* valid only for the dummy headless case, where of_node=NULL */ - WARN_ON(dev->of_node); - kms = NULL; - } - - /* Enable normalization of plane zpos */ - ddev->mode_config.normalize_zpos = true; - - if (kms) { - kms->dev = ddev; - ret = kms->funcs->hw_init(kms); - if (ret) { - DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret); - goto err_msm_uninit; - } - } - - drm_helper_move_panel_connectors_to_head(ddev); - - ddev->mode_config.funcs = &mode_config_funcs; - ddev->mode_config.helper_private = &mode_config_helper_funcs; - - drm_for_each_crtc(crtc, ddev) { - struct msm_drm_thread *ev_thread; - - /* initialize event thread */ - ev_thread = &priv->event_thread[drm_crtc_index(crtc)]; - ev_thread->dev = ddev; - ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id); - if (IS_ERR(ev_thread->worker)) { - ret = PTR_ERR(ev_thread->worker); - DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n"); - ev_thread->worker = NULL; - goto err_msm_uninit; - } - - sched_set_fifo(ev_thread->worker->task); - } - - ret = drm_vblank_init(ddev, priv->num_crtcs); - if (ret < 0) { - DRM_DEV_ERROR(dev, "failed to initialize vblank\n"); - goto err_msm_uninit; - } - - if (kms) { - pm_runtime_get_sync(dev); - ret = msm_irq_install(ddev, kms->irq); - pm_runtime_put_sync(dev); - if (ret < 0) { - DRM_DEV_ERROR(dev, "failed to install IRQ handler\n"); + ret = msm_drm_kms_init(dev, drv); + if (ret) goto err_msm_uninit; - } } ret = drm_dev_register(ddev, 0); if (ret) goto err_msm_uninit; - if (kms) { - ret = msm_disp_snapshot_init(ddev); - if (ret) - DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret); - } - drm_mode_config_reset(ddev); - ret = msm_debugfs_late_init(ddev); if (ret) goto err_msm_uninit; - drm_kms_helper_poll_init(ddev); - - if (kms) - msm_fbdev_setup(ddev); + if (priv->kms_init) + msm_drm_kms_post_init(dev); return 0; err_msm_uninit: - msm_drm_uninit(dev); + msm_drm_uninit(dev, gpu_ops); return ret; -err_deinit_vram: - msm_deinit_vram(ddev); -err_cleanup_mode_config: - drm_mode_config_cleanup(ddev); - destroy_workqueue(priv->wq); err_put_dev: drm_dev_put(ddev); @@ -583,11 +210,42 @@ static void load_gpu(struct drm_device *dev) mutex_unlock(&init_lock); } +/** + * msm_context_vm - lazily create the context's VM + * + * @dev: the drm device + * @ctx: the context + * + * The VM is lazily created, so that userspace has a chance to opt-in to having + * a userspace managed VM before the VM is created. + * + * Note that this does not return a reference to the VM. Once the VM is created, + * it exists for the lifetime of the context. + */ +struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx) +{ + static DEFINE_MUTEX(init_lock); + struct msm_drm_private *priv = dev->dev_private; + + /* Once ctx->vm is created it is valid for the lifetime of the context: */ + if (ctx->vm) + return ctx->vm; + + mutex_lock(&init_lock); + if (!ctx->vm) { + ctx->vm = msm_gpu_create_private_vm( + priv->gpu, current, !ctx->userspace_managed_vm); + + } + mutex_unlock(&init_lock); + + return ctx->vm; +} + static int context_init(struct drm_device *dev, struct drm_file *file) { static atomic_t ident = ATOMIC_INIT(0); - struct msm_drm_private *priv = dev->dev_private; - struct msm_file_private *ctx; + struct msm_context *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -599,7 +257,6 @@ static int context_init(struct drm_device *dev, struct drm_file *file) kref_init(&ctx->ref); msm_submitqueue_init(dev, ctx); - ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current); file->driver_priv = ctx; ctx->seqno = atomic_inc_return(&ident); @@ -617,49 +274,28 @@ static int msm_open(struct drm_device *dev, struct drm_file *file) return context_init(dev, file); } -static void context_close(struct msm_file_private *ctx) +static void context_close(struct msm_context *ctx) { + ctx->closed = true; msm_submitqueue_close(ctx); - msm_file_private_put(ctx); + msm_context_put(ctx); } static void msm_postclose(struct drm_device *dev, struct drm_file *file) { struct msm_drm_private *priv = dev->dev_private; - struct msm_file_private *ctx = file->driver_priv; + struct msm_context *ctx = file->driver_priv; /* * It is not possible to set sysprof param to non-zero if gpu * is not initialized: */ if (priv->gpu) - msm_file_private_set_sysprof(ctx, priv->gpu, 0); + msm_context_set_sysprof(ctx, priv->gpu, 0); context_close(ctx); } -int msm_crtc_enable_vblank(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct msm_drm_private *priv = dev->dev_private; - struct msm_kms *kms = priv->kms; - if (!kms) - return -ENXIO; - drm_dbg_vbl(dev, "crtc=%u", crtc->base.id); - return vblank_ctrl_queue_work(priv, crtc, true); -} - -void msm_crtc_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct msm_drm_private *priv = dev->dev_private; - struct msm_kms *kms = priv->kms; - if (!kms) - return; - drm_dbg_vbl(dev, "crtc=%u", crtc->base.id); - vblank_ctrl_queue_work(priv, crtc, false); -} - /* * DRM ioctls: */ @@ -787,11 +423,14 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev, uint64_t *iova) { struct msm_drm_private *priv = dev->dev_private; - struct msm_file_private *ctx = file->driver_priv; + struct msm_context *ctx = file->driver_priv; if (!priv->gpu) return -EINVAL; + if (msm_context_is_vmbind(ctx)) + return UERR(EINVAL, dev, "VM_BIND is enabled"); + if (should_fail(&fail_gem_iova, obj->size)) return -ENOMEM; @@ -799,7 +438,7 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev, * Don't pin the memory here - just get an address so that userspace can * be productive */ - return msm_gem_get_iova(obj, ctx->aspace, iova); + return msm_gem_get_iova(obj, msm_context_vm(dev, ctx), iova); } static int msm_ioctl_gem_info_set_iova(struct drm_device *dev, @@ -807,19 +446,109 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev, uint64_t iova) { struct msm_drm_private *priv = dev->dev_private; - struct msm_file_private *ctx = file->driver_priv; + struct msm_context *ctx = file->driver_priv; + struct drm_gpuvm *vm = msm_context_vm(dev, ctx); if (!priv->gpu) return -EINVAL; + if (msm_context_is_vmbind(ctx)) + return UERR(EINVAL, dev, "VM_BIND is enabled"); + /* Only supported if per-process address space is supported: */ - if (priv->gpu->aspace == ctx->aspace) - return -EOPNOTSUPP; + if (priv->gpu->vm == vm) + return UERR(EOPNOTSUPP, dev, "requires per-process pgtables"); if (should_fail(&fail_gem_iova, obj->size)) return -ENOMEM; - return msm_gem_set_iova(obj, ctx->aspace, iova); + return msm_gem_set_iova(obj, vm, iova); +} + +static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj, + __user void *metadata, + u32 metadata_size) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + void *new_metadata; + void *buf; + int ret; + + /* Impose a moderate upper bound on metadata size: */ + if (metadata_size > 128) { + return -EOVERFLOW; + } + + /* Use a temporary buf to keep copy_from_user() outside of gem obj lock: */ + buf = memdup_user(metadata, metadata_size); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + ret = msm_gem_lock_interruptible(obj); + if (ret) + goto out; + + new_metadata = + krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL); + if (!new_metadata) { + ret = -ENOMEM; + goto out; + } + + msm_obj->metadata = new_metadata; + msm_obj->metadata_size = metadata_size; + memcpy(msm_obj->metadata, buf, metadata_size); + + msm_gem_unlock(obj); + +out: + kfree(buf); + + return ret; +} + +static int msm_ioctl_gem_info_get_metadata(struct drm_gem_object *obj, + __user void *metadata, + u32 *metadata_size) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + void *buf; + int ret, len; + + if (!metadata) { + /* + * Querying the size is inherently racey, but + * EXT_external_objects expects the app to confirm + * via device and driver UUIDs that the exporter and + * importer versions match. All we can do from the + * kernel side is check the length under obj lock + * when userspace tries to retrieve the metadata + */ + *metadata_size = msm_obj->metadata_size; + return 0; + } + + ret = msm_gem_lock_interruptible(obj); + if (ret) + return ret; + + /* Avoid copy_to_user() under gem obj lock: */ + len = msm_obj->metadata_size; + buf = kmemdup(msm_obj->metadata, len, GFP_KERNEL); + + msm_gem_unlock(obj); + + if (*metadata_size < len) { + ret = -ETOOSMALL; + } else if (copy_to_user(metadata, buf, len)) { + ret = -EFAULT; + } else { + *metadata_size = len; + } + + kfree(buf); + + return 0; } static int msm_ioctl_gem_info(struct drm_device *dev, void *data, @@ -844,6 +573,8 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, break; case MSM_INFO_SET_NAME: case MSM_INFO_GET_NAME: + case MSM_INFO_SET_METADATA: + case MSM_INFO_GET_METADATA: break; default: return -EINVAL; @@ -866,7 +597,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value); break; case MSM_INFO_GET_FLAGS: - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { ret = -EINVAL; break; } @@ -896,7 +627,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, break; case MSM_INFO_GET_NAME: if (args->value && (args->len < strlen(msm_obj->name))) { - ret = -EINVAL; + ret = -ETOOSMALL; break; } args->len = strlen(msm_obj->name); @@ -906,6 +637,14 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, ret = -EFAULT; } break; + case MSM_INFO_SET_METADATA: + ret = msm_ioctl_gem_info_set_metadata( + obj, u64_to_user_ptr(args->value), args->len); + break; + case MSM_INFO_GET_METADATA: + ret = msm_ioctl_gem_info_get_metadata( + obj, u64_to_user_ptr(args->value), &args->len); + break; } drm_gem_object_put(obj); @@ -1055,6 +794,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = { DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_VM_BIND, msm_ioctl_vm_bind, DRM_RENDER_ALLOW), }; static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file) @@ -1076,57 +816,81 @@ static const struct file_operations fops = { .show_fdinfo = drm_show_fdinfo, }; +#define DRIVER_FEATURES_GPU ( \ + DRIVER_GEM | \ + DRIVER_GEM_GPUVA | \ + DRIVER_RENDER | \ + DRIVER_SYNCOBJ | \ + DRIVER_SYNCOBJ_TIMELINE | \ + 0 ) + +#define DRIVER_FEATURES_KMS ( \ + DRIVER_GEM | \ + DRIVER_GEM_GPUVA | \ + DRIVER_ATOMIC | \ + DRIVER_MODESET | \ + 0 ) + static const struct drm_driver msm_driver = { - .driver_features = DRIVER_GEM | - DRIVER_RENDER | - DRIVER_ATOMIC | - DRIVER_MODESET | - DRIVER_SYNCOBJ, + .driver_features = DRIVER_FEATURES_GPU | DRIVER_FEATURES_KMS, .open = msm_open, .postclose = msm_postclose, .dumb_create = msm_gem_dumb_create, .dumb_map_offset = msm_gem_dumb_map_offset, + .gem_prime_import = msm_gem_prime_import, .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, #ifdef CONFIG_DEBUG_FS .debugfs_init = msm_debugfs_init, #endif + MSM_FBDEV_DRIVER_OPS, .show_fdinfo = msm_show_fdinfo, .ioctls = msm_ioctls, .num_ioctls = ARRAY_SIZE(msm_ioctls), .fops = &fops, .name = "msm", .desc = "MSM Snapdragon DRM", - .date = "20130625", .major = MSM_VERSION_MAJOR, .minor = MSM_VERSION_MINOR, .patchlevel = MSM_VERSION_PATCHLEVEL, }; -int msm_pm_prepare(struct device *dev) -{ - struct msm_drm_private *priv = dev_get_drvdata(dev); - struct drm_device *ddev = priv ? priv->dev : NULL; - - if (!priv || !priv->kms) - return 0; - - return drm_mode_config_helper_suspend(ddev); -} - -void msm_pm_complete(struct device *dev) -{ - struct msm_drm_private *priv = dev_get_drvdata(dev); - struct drm_device *ddev = priv ? priv->dev : NULL; - - if (!priv || !priv->kms) - return; - - drm_mode_config_helper_resume(ddev); -} +static const struct drm_driver msm_kms_driver = { + .driver_features = DRIVER_FEATURES_KMS, + .open = msm_open, + .postclose = msm_postclose, + .dumb_create = msm_gem_dumb_create, + .dumb_map_offset = msm_gem_dumb_map_offset, + .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = msm_debugfs_init, +#endif + MSM_FBDEV_DRIVER_OPS, + .show_fdinfo = msm_show_fdinfo, + .fops = &fops, + .name = "msm-kms", + .desc = "MSM Snapdragon DRM", + .major = MSM_VERSION_MAJOR, + .minor = MSM_VERSION_MINOR, + .patchlevel = MSM_VERSION_PATCHLEVEL, +}; -static const struct dev_pm_ops msm_pm_ops = { - .prepare = msm_pm_prepare, - .complete = msm_pm_complete, +static const struct drm_driver msm_gpu_driver = { + .driver_features = DRIVER_FEATURES_GPU, + .open = msm_open, + .postclose = msm_postclose, + .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = msm_debugfs_init, +#endif + .show_fdinfo = msm_show_fdinfo, + .ioctls = msm_ioctls, + .num_ioctls = ARRAY_SIZE(msm_ioctls), + .fops = &fops, + .name = "msm", + .desc = "MSM Snapdragon DRM", + .major = MSM_VERSION_MAJOR, + .minor = MSM_VERSION_MINOR, + .patchlevel = MSM_VERSION_PATCHLEVEL, }; /* @@ -1139,7 +903,7 @@ static const struct dev_pm_ops msm_pm_ops = { * is no external component that we need to add since LVDS is within MDP4 * itself. */ -static int add_components_mdp(struct device *master_dev, +static int add_mdp_components(struct device *master_dev, struct component_match **matchptr) { struct device_node *np = master_dev->of_node; @@ -1184,6 +948,43 @@ static int add_components_mdp(struct device *master_dev, return 0; } +#if !IS_REACHABLE(CONFIG_DRM_MSM_MDP5) || !IS_REACHABLE(CONFIG_DRM_MSM_DPU) +bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver) +{ + /* If just a single driver is enabled, use it no matter what */ + return true; +} +#else + +static bool prefer_mdp5 = true; +MODULE_PARM_DESC(prefer_mdp5, "Select whether MDP5 or DPU driver should be preferred"); +module_param(prefer_mdp5, bool, 0444); + +/* list all platforms supported by both mdp5 and dpu drivers */ +static const char *const msm_mdp5_dpu_migration[] = { + "qcom,msm8917-mdp5", + "qcom,msm8937-mdp5", + "qcom,msm8953-mdp5", + "qcom,msm8996-mdp5", + "qcom,sdm630-mdp5", + "qcom,sdm660-mdp5", + NULL, +}; + +bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver) +{ + /* If it is not an MDP5 device, do not try MDP5 driver */ + if (!of_device_is_compatible(dev->of_node, "qcom,mdp5")) + return dpu_driver; + + /* If it is not in the migration list, use MDP5 */ + if (!of_device_compatible_match(dev->of_node, msm_mdp5_dpu_migration)) + return !dpu_driver; + + return prefer_mdp5 ? !dpu_driver : dpu_driver; +} +#endif + /* * We don't know what's the best binding to link the gpu with the drm device. * Fow now, we just hunt for all the possible gpus that we support, and add them @@ -1206,7 +1007,7 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - if (of_device_is_available(np)) + if (of_device_is_available(np) && adreno_has_gpu(np)) drm_of_component_match_add(dev, matchptr, component_compare_of, np); of_node_put(np); @@ -1216,12 +1017,16 @@ static int add_gpu_components(struct device *dev, static int msm_drm_bind(struct device *dev) { - return msm_drm_init(dev, &msm_driver); + return msm_drm_init(dev, + msm_gpu_no_components() ? + &msm_kms_driver : + &msm_driver, + NULL); } static void msm_drm_unbind(struct device *dev) { - msm_drm_uninit(dev); + msm_drm_uninit(dev, NULL); } const struct component_master_ops msm_drm_ops = { @@ -1230,7 +1035,8 @@ const struct component_master_ops msm_drm_ops = { }; int msm_drv_probe(struct device *master_dev, - int (*kms_init)(struct drm_device *dev)) + int (*kms_init)(struct drm_device *dev), + struct msm_kms *kms) { struct msm_drm_private *priv; struct component_match *match = NULL; @@ -1240,19 +1046,22 @@ int msm_drv_probe(struct device *master_dev, if (!priv) return -ENOMEM; + priv->kms = kms; priv->kms_init = kms_init; dev_set_drvdata(master_dev, priv); /* Add mdp components if we have KMS. */ if (kms_init) { - ret = add_components_mdp(master_dev, &match); + ret = add_mdp_components(master_dev, &match); if (ret) return ret; } - ret = add_gpu_components(master_dev, &match); - if (ret) - return ret; + if (!msm_gpu_no_components()) { + ret = add_gpu_components(master_dev, &match); + if (ret) + return ret; + } /* on all devices that I am aware of, iommu's which can map * any address the cpu can see are used: @@ -1268,48 +1077,33 @@ int msm_drv_probe(struct device *master_dev, return 0; } -/* - * Platform driver: - * Used only for headlesss GPU instances - */ - -static int msm_pdev_probe(struct platform_device *pdev) -{ - return msm_drv_probe(&pdev->dev, NULL); -} - -static int msm_pdev_remove(struct platform_device *pdev) +int msm_gpu_probe(struct platform_device *pdev, + const struct component_ops *ops) { - component_master_del(&pdev->dev, &msm_drm_ops); + struct msm_drm_private *priv; + int ret; - return 0; -} + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; -void msm_drv_shutdown(struct platform_device *pdev) -{ - struct msm_drm_private *priv = platform_get_drvdata(pdev); - struct drm_device *drm = priv ? priv->dev : NULL; + platform_set_drvdata(pdev, priv); - /* - * Shutdown the hw if we're far enough along where things might be on. - * If we run this too early, we'll end up panicking in any variety of - * places. Since we don't register the drm device until late in - * msm_drm_init, drm_dev->registered is used as an indicator that the - * shutdown will be successful. + /* on all devices that I am aware of, iommu's which can map + * any address the cpu can see are used: */ - if (drm && drm->registered && priv->kms) - drm_atomic_helper_shutdown(drm); + ret = dma_set_mask_and_coherent(&pdev->dev, ~0); + if (ret) + return ret; + + return msm_drm_init(&pdev->dev, &msm_gpu_driver, ops); } -static struct platform_driver msm_platform_driver = { - .probe = msm_pdev_probe, - .remove = msm_pdev_remove, - .shutdown = msm_drv_shutdown, - .driver = { - .name = "msm", - .pm = &msm_pm_ops, - }, -}; +void msm_gpu_remove(struct platform_device *pdev, + const struct component_ops *ops) +{ + msm_drm_uninit(&pdev->dev, ops); +} static int __init msm_drm_register(void) { @@ -1325,13 +1119,13 @@ static int __init msm_drm_register(void) adreno_register(); msm_mdp4_register(); msm_mdss_register(); - return platform_driver_register(&msm_platform_driver); + + return 0; } static void __exit msm_drm_unregister(void) { DBG("fini"); - platform_driver_unregister(&msm_platform_driver); msm_mdss_unregister(); msm_mdp4_unregister(); msm_dp_unregister(); |
