diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem_atomic_helper.c')
| -rw-r--r-- | drivers/gpu/drm/drm_gem_atomic_helper.c | 270 |
1 files changed, 158 insertions, 112 deletions
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index a27135084ae5..569d41a65a0b 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -1,9 +1,12 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include <linux/dma-resv.h> +#include <linux/dma-fence-chain.h> +#include <linux/export.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_atomic_uapi.h> +#include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -24,11 +27,8 @@ * call drm_gem_plane_helper_prepare_fb() from their implementation of * struct &drm_plane_helper.prepare_fb . It sets the plane's fence from * the framebuffer so that the DRM core can synchronize access automatically. - * * drm_gem_plane_helper_prepare_fb() can also be used directly as - * implementation of prepare_fb. For drivers based on - * struct drm_simple_display_pipe, drm_gem_simple_display_pipe_prepare_fb() - * provides equivalent functionality. + * implementation of prepare_fb. * * .. code-block:: c * @@ -39,11 +39,6 @@ * . prepare_fb = drm_gem_plane_helper_prepare_fb, * }; * - * struct drm_simple_display_pipe_funcs driver_pipe_funcs = { - * ..., - * . prepare_fb = drm_gem_simple_display_pipe_prepare_fb, - * }; - * * A driver using a shadow buffer copies the content of the shadow buffers * into the HW's framebuffer memory during an atomic update. This requires * a mapping of the shadow buffer into kernel address space. The mappings @@ -52,7 +47,7 @@ * * The helpers for shadow-buffered planes establish and release mappings, * and provide struct drm_shadow_plane_state, which stores the plane's mapping - * for commit-tail functons. + * for commit-tail functions. * * Shadow-buffered planes can easily be enabled by using the provided macros * %DRM_GEM_SHADOW_PLANE_FUNCS and %DRM_GEM_SHADOW_PLANE_HELPER_FUNCS. @@ -135,49 +130,105 @@ * GEM based framebuffer drivers which have their buffers always pinned in * memory. * - * See drm_atomic_set_fence_for_plane() for a discussion of implicit and - * explicit fencing in atomic modeset updates. + * This function is the default implementation for GEM drivers of + * &drm_plane_helper_funcs.prepare_fb if no callback is provided. */ -int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) +int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state) { - struct drm_gem_object *obj; - struct dma_fence *fence; + struct dma_fence *fence = dma_fence_get(state->fence); + enum dma_resv_usage usage; + size_t i; + int ret; if (!state->fb) return 0; - obj = drm_gem_fb_get_obj(state->fb, 0); - fence = dma_resv_get_excl_unlocked(obj->resv); - drm_atomic_set_fence_for_plane(state, fence); + /* + * Only add the kernel fences here if there is already a fence set via + * explicit fencing interfaces on the atomic ioctl. + * + * This way explicit fencing can be used to overrule implicit fencing, + * which is important to make explicit fencing use-cases work: One + * example is using one buffer for 2 screens with different refresh + * rates. Implicit fencing will clamp rendering to the refresh rate of + * the slower screen, whereas explicit fence allows 2 independent + * render and display loops on a single buffer. If a driver allows + * obeys both implicit and explicit fences for plane updates, then it + * will break all the benefits of explicit fencing. + */ + usage = fence ? DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_WRITE; + + for (i = 0; i < state->fb->format->num_planes; ++i) { + struct drm_gem_object *obj = drm_gem_fb_get_obj(state->fb, i); + struct dma_fence *new; + + if (!obj) { + ret = -EINVAL; + goto error; + } + + ret = dma_resv_get_singleton(obj->resv, usage, &new); + if (ret) + goto error; + + if (new && fence) { + struct dma_fence_chain *chain = dma_fence_chain_alloc(); + if (!chain) { + ret = -ENOMEM; + goto error; + } + + dma_fence_chain_init(chain, fence, new, 1); + fence = &chain->base; + + } else if (new) { + fence = new; + } + } + + dma_fence_put(state->fence); + state->fence = fence; return 0; + +error: + dma_fence_put(fence); + return ret; } EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb); +/* + * Shadow-buffered Planes + */ + /** - * drm_gem_simple_display_pipe_prepare_fb - prepare_fb helper for &drm_simple_display_pipe - * @pipe: Simple display pipe - * @plane_state: Plane state - * - * This function uses drm_gem_plane_helper_prepare_fb() to extract the exclusive fence - * from &drm_gem_object.resv and attaches it to plane state for the atomic - * helper to wait on. This is necessary to correctly implement implicit - * synchronization for any buffers shared as a struct &dma_buf. Drivers can use - * this as their &drm_simple_display_pipe_funcs.prepare_fb callback. - * - * See drm_atomic_set_fence_for_plane() for a discussion of implicit and - * explicit fencing in atomic modeset updates. + * __drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state + * @plane: the plane + * @new_shadow_plane_state: the new shadow-buffered plane state + * + * This function duplicates shadow-buffered plane state. This is helpful for drivers + * that subclass struct drm_shadow_plane_state. + * + * The function does not duplicate existing mappings of the shadow buffers. + * Mappings are maintained during the atomic commit by the plane's prepare_fb + * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb() + * for corresponding helpers. */ -int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *plane_state) +void +__drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane, + struct drm_shadow_plane_state *new_shadow_plane_state) { - return drm_gem_plane_helper_prepare_fb(&pipe->plane, plane_state); -} -EXPORT_SYMBOL(drm_gem_simple_display_pipe_prepare_fb); + struct drm_plane_state *plane_state = plane->state; + struct drm_shadow_plane_state *shadow_plane_state = + to_drm_shadow_plane_state(plane_state); -/* - * Shadow-buffered Planes - */ + __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base); + + drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state, + &shadow_plane_state->fmtcnv_state); +} +EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state); /** * drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state @@ -208,13 +259,27 @@ drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane) new_shadow_plane_state = kzalloc(sizeof(*new_shadow_plane_state), GFP_KERNEL); if (!new_shadow_plane_state) return NULL; - __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base); + __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state); return &new_shadow_plane_state->base; } EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state); /** + * __drm_gem_destroy_shadow_plane_state - cleans up shadow-buffered plane state + * @shadow_plane_state: the shadow-buffered plane state + * + * This function cleans up shadow-buffered plane state. Helpful for drivers that + * subclass struct drm_shadow_plane_state. + */ +void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state) +{ + drm_format_conv_state_release(&shadow_plane_state->fmtcnv_state); + __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base); +} +EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state); + +/** * drm_gem_destroy_shadow_plane_state - deletes shadow-buffered plane state * @plane: the plane * @plane_state: the plane state of type struct drm_shadow_plane_state @@ -229,12 +294,32 @@ void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane, struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); - __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base); + __drm_gem_destroy_shadow_plane_state(shadow_plane_state); kfree(shadow_plane_state); } EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state); /** + * __drm_gem_reset_shadow_plane - resets a shadow-buffered plane + * @plane: the plane + * @shadow_plane_state: the shadow-buffered plane state + * + * This function resets state for shadow-buffered planes. Helpful + * for drivers that subclass struct drm_shadow_plane_state. + */ +void __drm_gem_reset_shadow_plane(struct drm_plane *plane, + struct drm_shadow_plane_state *shadow_plane_state) +{ + if (shadow_plane_state) { + __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); + drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); + } else { + __drm_atomic_helper_plane_reset(plane, NULL); + } +} +EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); + +/** * drm_gem_reset_shadow_plane - resets a shadow-buffered plane * @plane: the plane * @@ -253,135 +338,96 @@ void drm_gem_reset_shadow_plane(struct drm_plane *plane) } shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL); - if (!shadow_plane_state) - return; - __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); + __drm_gem_reset_shadow_plane(plane, shadow_plane_state); } EXPORT_SYMBOL(drm_gem_reset_shadow_plane); /** - * drm_gem_prepare_shadow_fb - prepares shadow framebuffers + * drm_gem_begin_shadow_fb_access - prepares shadow framebuffers for CPU access * @plane: the plane * @plane_state: the plane state of type struct drm_shadow_plane_state * - * This function implements struct &drm_plane_helper_funcs.prepare_fb. It + * This function implements struct &drm_plane_helper_funcs.begin_fb_access. It * maps all buffer objects of the plane's framebuffer into kernel address - * space and stores them in &struct drm_shadow_plane_state.map. The - * framebuffer will be synchronized as part of the atomic commit. + * space and stores them in struct &drm_shadow_plane_state.map. The first data + * bytes are available in struct &drm_shadow_plane_state.data. * - * See drm_gem_cleanup_shadow_fb() for cleanup. + * See drm_gem_end_shadow_fb_access() for cleanup. * * Returns: * 0 on success, or a negative errno code otherwise. */ -int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state) +int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state) { struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; - struct drm_gem_object *obj; - struct dma_buf_map map; - int ret; - size_t i; if (!fb) return 0; - ret = drm_gem_plane_helper_prepare_fb(plane, plane_state); - if (ret) - return ret; - - for (i = 0; i < ARRAY_SIZE(shadow_plane_state->map); ++i) { - obj = drm_gem_fb_get_obj(fb, i); - if (!obj) - continue; - ret = drm_gem_vmap(obj, &map); - if (ret) - goto err_drm_gem_vunmap; - shadow_plane_state->map[i] = map; - } - - return 0; - -err_drm_gem_vunmap: - while (i) { - --i; - obj = drm_gem_fb_get_obj(fb, i); - if (!obj) - continue; - drm_gem_vunmap(obj, &shadow_plane_state->map[i]); - } - return ret; + return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data); } -EXPORT_SYMBOL(drm_gem_prepare_shadow_fb); +EXPORT_SYMBOL(drm_gem_begin_shadow_fb_access); /** - * drm_gem_cleanup_shadow_fb - releases shadow framebuffers + * drm_gem_end_shadow_fb_access - releases shadow framebuffers from CPU access * @plane: the plane * @plane_state: the plane state of type struct drm_shadow_plane_state * - * This function implements struct &drm_plane_helper_funcs.cleanup_fb. - * This function unmaps all buffer objects of the plane's framebuffer. + * This function implements struct &drm_plane_helper_funcs.end_fb_access. It + * undoes all effects of drm_gem_begin_shadow_fb_access() in reverse order. * - * See drm_gem_prepare_shadow_fb() for more inforamtion. + * See drm_gem_begin_shadow_fb_access() for more information. */ -void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state) +void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state) { struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; - size_t i = ARRAY_SIZE(shadow_plane_state->map); - struct drm_gem_object *obj; if (!fb) return; - while (i) { - --i; - obj = drm_gem_fb_get_obj(fb, i); - if (!obj) - continue; - drm_gem_vunmap(obj, &shadow_plane_state->map[i]); - } + drm_gem_fb_vunmap(fb, shadow_plane_state->map); } -EXPORT_SYMBOL(drm_gem_cleanup_shadow_fb); +EXPORT_SYMBOL(drm_gem_end_shadow_fb_access); /** - * drm_gem_simple_kms_prepare_shadow_fb - prepares shadow framebuffers + * drm_gem_simple_kms_begin_shadow_fb_access - prepares shadow framebuffers for CPU access * @pipe: the simple display pipe * @plane_state: the plane state of type struct drm_shadow_plane_state * - * This function implements struct drm_simple_display_funcs.prepare_fb. It - * maps all buffer objects of the plane's framebuffer into kernel address - * space and stores them in struct drm_shadow_plane_state.map. The - * framebuffer will be synchronized as part of the atomic commit. + * This function implements struct drm_simple_display_funcs.begin_fb_access. * - * See drm_gem_simple_kms_cleanup_shadow_fb() for cleanup. + * See drm_gem_begin_shadow_fb_access() for details and + * drm_gem_simple_kms_cleanup_shadow_fb() for cleanup. * * Returns: * 0 on success, or a negative errno code otherwise. */ -int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *plane_state) +int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state) { - return drm_gem_prepare_shadow_fb(&pipe->plane, plane_state); + return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state); } -EXPORT_SYMBOL(drm_gem_simple_kms_prepare_shadow_fb); +EXPORT_SYMBOL(drm_gem_simple_kms_begin_shadow_fb_access); /** - * drm_gem_simple_kms_cleanup_shadow_fb - releases shadow framebuffers + * drm_gem_simple_kms_end_shadow_fb_access - releases shadow framebuffers from CPU access * @pipe: the simple display pipe * @plane_state: the plane state of type struct drm_shadow_plane_state * - * This function implements struct drm_simple_display_funcs.cleanup_fb. - * This function unmaps all buffer objects of the plane's framebuffer. + * This function implements struct drm_simple_display_funcs.end_fb_access. + * It undoes all effects of drm_gem_simple_kms_begin_shadow_fb_access() in + * reverse order. * - * See drm_gem_simple_kms_prepare_shadow_fb(). + * See drm_gem_simple_kms_begin_shadow_fb_access(). */ -void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *plane_state) +void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state) { - drm_gem_cleanup_shadow_fb(&pipe->plane, plane_state); + drm_gem_end_shadow_fb_access(&pipe->plane, plane_state); } -EXPORT_SYMBOL(drm_gem_simple_kms_cleanup_shadow_fb); +EXPORT_SYMBOL(drm_gem_simple_kms_end_shadow_fb_access); /** * drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane |
