diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_kms.c')
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 1658 |
1 files changed, 434 insertions, 1224 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 04d66592f605..d32ce1cb579e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1,48 +1,39 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * **************************************************************************/ +#include "vmwgfx_kms.h" + +#include "vmwgfx_bo.h" +#include "vmwgfx_resource_priv.h" +#include "vmwgfx_vkms.h" +#include "vmw_surface_cache.h" + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> -#include <drm/drm_plane_helper.h> #include <drm/drm_rect.h> #include <drm/drm_sysfs.h> -#include <drm/drm_vblank.h> - -#include "vmwgfx_kms.h" +#include <drm/drm_edid.h> -/* Might need a hrtimer here? */ -#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) +void vmw_du_init(struct vmw_display_unit *du) +{ + vmw_vkms_crtc_init(&du->crtc); +} void vmw_du_cleanup(struct vmw_display_unit *du) { + struct vmw_private *dev_priv = vmw_priv(du->primary.dev); + + vmw_vkms_crtc_cleanup(&du->crtc); drm_plane_cleanup(&du->primary); - drm_plane_cleanup(&du->cursor); + if (vmw_cmd_supported(dev_priv)) + drm_plane_cleanup(&du->cursor.base); drm_connector_unregister(&du->connector); drm_crtc_cleanup(&du->crtc); @@ -50,239 +41,6 @@ void vmw_du_cleanup(struct vmw_display_unit *du) drm_connector_cleanup(&du->connector); } -/* - * Display Unit Cursor functions - */ - -static int vmw_cursor_update_image(struct vmw_private *dev_priv, - u32 *image, u32 width, u32 height, - u32 hotspotX, u32 hotspotY) -{ - struct { - u32 cmd; - SVGAFifoCmdDefineAlphaCursor cursor; - } *cmd; - u32 image_size = width * height * 4; - u32 cmd_size = sizeof(*cmd) + image_size; - - if (!image) - return -EINVAL; - - cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size); - if (unlikely(cmd == NULL)) - return -ENOMEM; - - memset(cmd, 0, sizeof(*cmd)); - - memcpy(&cmd[1], image, image_size); - - cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR; - cmd->cursor.id = 0; - cmd->cursor.width = width; - cmd->cursor.height = height; - cmd->cursor.hotspotX = hotspotX; - cmd->cursor.hotspotY = hotspotY; - - vmw_fifo_commit_flush(dev_priv, cmd_size); - - return 0; -} - -static int vmw_cursor_update_bo(struct vmw_private *dev_priv, - struct vmw_buffer_object *bo, - u32 width, u32 height, - u32 hotspotX, u32 hotspotY) -{ - struct ttm_bo_kmap_obj map; - unsigned long kmap_offset; - unsigned long kmap_num; - void *virtual; - bool dummy; - int ret; - - kmap_offset = 0; - kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; - - ret = ttm_bo_reserve(&bo->base, true, false, NULL); - if (unlikely(ret != 0)) { - DRM_ERROR("reserve failed\n"); - return -EINVAL; - } - - ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map); - if (unlikely(ret != 0)) - goto err_unreserve; - - virtual = ttm_kmap_obj_virtual(&map, &dummy); - ret = vmw_cursor_update_image(dev_priv, virtual, width, height, - hotspotX, hotspotY); - - ttm_bo_kunmap(&map); -err_unreserve: - ttm_bo_unreserve(&bo->base); - - return ret; -} - - -static void vmw_cursor_update_position(struct vmw_private *dev_priv, - bool show, int x, int y) -{ - u32 *fifo_mem = dev_priv->mmio_virt; - uint32_t count; - - spin_lock(&dev_priv->cursor_lock); - vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); - vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X); - vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y); - count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT); - vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); - spin_unlock(&dev_priv->cursor_lock); -} - - -void vmw_kms_cursor_snoop(struct vmw_surface *srf, - struct ttm_object_file *tfile, - struct ttm_buffer_object *bo, - SVGA3dCmdHeader *header) -{ - struct ttm_bo_kmap_obj map; - unsigned long kmap_offset; - unsigned long kmap_num; - SVGA3dCopyBox *box; - unsigned box_count; - void *virtual; - bool dummy; - struct vmw_dma_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdSurfaceDMA dma; - } *cmd; - int i, ret; - - cmd = container_of(header, struct vmw_dma_cmd, header); - - /* No snooper installed */ - if (!srf->snooper.image) - return; - - if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { - DRM_ERROR("face and mipmap for cursors should never != 0\n"); - return; - } - - if (cmd->header.size < 64) { - DRM_ERROR("at least one full copy box must be given\n"); - return; - } - - box = (SVGA3dCopyBox *)&cmd[1]; - box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / - sizeof(SVGA3dCopyBox); - - if (cmd->dma.guest.ptr.offset % PAGE_SIZE || - box->x != 0 || box->y != 0 || box->z != 0 || - box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || - box->d != 1 || box_count != 1) { - /* TODO handle none page aligned offsets */ - /* TODO handle more dst & src != 0 */ - /* TODO handle more then one copy */ - DRM_ERROR("Cant snoop dma request for cursor!\n"); - DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", - box->srcx, box->srcy, box->srcz, - box->x, box->y, box->z, - box->w, box->h, box->d, box_count, - cmd->dma.guest.ptr.offset); - return; - } - - kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; - kmap_num = (64*64*4) >> PAGE_SHIFT; - - ret = ttm_bo_reserve(bo, true, false, NULL); - if (unlikely(ret != 0)) { - DRM_ERROR("reserve failed\n"); - return; - } - - ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); - if (unlikely(ret != 0)) - goto err_unreserve; - - virtual = ttm_kmap_obj_virtual(&map, &dummy); - - if (box->w == 64 && cmd->dma.guest.pitch == 64*4) { - memcpy(srf->snooper.image, virtual, 64*64*4); - } else { - /* Image is unsigned pointer. */ - for (i = 0; i < box->h; i++) - memcpy(srf->snooper.image + i * 64, - virtual + i * cmd->dma.guest.pitch, - box->w * 4); - } - - srf->snooper.age++; - - ttm_bo_kunmap(&map); -err_unreserve: - ttm_bo_unreserve(bo); -} - -/** - * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots - * - * @dev_priv: Pointer to the device private struct. - * - * Clears all legacy hotspots. - */ -void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct vmw_display_unit *du; - struct drm_crtc *crtc; - - drm_modeset_lock_all(dev); - drm_for_each_crtc(crtc, dev) { - du = vmw_crtc_to_du(crtc); - - du->hotspot_x = 0; - du->hotspot_y = 0; - } - drm_modeset_unlock_all(dev); -} - -void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct vmw_display_unit *du; - struct drm_crtc *crtc; - - mutex_lock(&dev->mode_config.mutex); - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - du = vmw_crtc_to_du(crtc); - if (!du->cursor_surface || - du->cursor_age == du->cursor_surface->snooper.age) - continue; - - du->cursor_age = du->cursor_surface->snooper.age; - vmw_cursor_update_image(dev_priv, - du->cursor_surface->snooper.image, - 64, 64, - du->hotspot_x + du->core_hotspot_x, - du->hotspot_y + du->core_hotspot_y); - } - - mutex_unlock(&dev->mode_config.mutex); -} - - -void vmw_du_cursor_plane_destroy(struct drm_plane *plane) -{ - vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); - - drm_plane_cleanup(plane); -} - void vmw_du_primary_plane_destroy(struct drm_plane *plane) { @@ -293,31 +51,25 @@ void vmw_du_primary_plane_destroy(struct drm_plane *plane) /** - * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface + * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface * * @vps: plane state associated with the display surface - * @unreference: true if we also want to unreference the display. */ -void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, - bool unreference) +void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps) { - if (vps->surf) { + struct vmw_surface *surf = vmw_user_object_surface(&vps->uo); + + if (surf) { if (vps->pinned) { - vmw_resource_unpin(&vps->surf->res); + vmw_resource_unpin(&surf->res); vps->pinned--; } - - if (unreference) { - if (vps->pinned) - DRM_ERROR("Surface still pinned\n"); - vmw_surface_unreference(&vps->surf); - } } } /** - * vmw_du_plane_cleanup_fb - Unpins the cursor + * vmw_du_plane_cleanup_fb - Unpins the plane surface * * @plane: display plane * @old_state: Contains the FB to clean up @@ -332,99 +84,7 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane, { struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); - vmw_du_plane_unpin_surf(vps, false); -} - - -/** - * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it - * - * @plane: display plane - * @new_state: info on the new plane state, including the FB - * - * Returns 0 on success - */ -int -vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) -{ - struct drm_framebuffer *fb = new_state->fb; - struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); - - - if (vps->surf) - vmw_surface_unreference(&vps->surf); - - if (vps->bo) - vmw_bo_unreference(&vps->bo); - - if (fb) { - if (vmw_framebuffer_to_vfb(fb)->bo) { - vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer; - vmw_bo_reference(vps->bo); - } else { - vps->surf = vmw_framebuffer_to_vfbs(fb)->surface; - vmw_surface_reference(vps->surf); - } - } - - return 0; -} - - -void -vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; - struct vmw_private *dev_priv = vmw_priv(crtc->dev); - struct vmw_display_unit *du = vmw_crtc_to_du(crtc); - struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state); - s32 hotspot_x, hotspot_y; - int ret = 0; - - - hotspot_x = du->hotspot_x; - hotspot_y = du->hotspot_y; - - if (plane->state->fb) { - hotspot_x += plane->state->fb->hot_x; - hotspot_y += plane->state->fb->hot_y; - } - - du->cursor_surface = vps->surf; - du->cursor_bo = vps->bo; - - if (vps->surf) { - du->cursor_age = du->cursor_surface->snooper.age; - - ret = vmw_cursor_update_image(dev_priv, - vps->surf->snooper.image, - 64, 64, hotspot_x, - hotspot_y); - } else if (vps->bo) { - ret = vmw_cursor_update_bo(dev_priv, vps->bo, - plane->state->crtc_w, - plane->state->crtc_h, - hotspot_x, hotspot_y); - } else { - vmw_cursor_update_position(dev_priv, false, 0, 0); - return; - } - - if (!ret) { - du->cursor_x = plane->state->crtc_x + du->set_gui_x; - du->cursor_y = plane->state->crtc_y + du->set_gui_y; - - vmw_cursor_update_position(dev_priv, true, - du->cursor_x + hotspot_x, - du->cursor_y + hotspot_y); - - du->core_hotspot_x = hotspot_x - du->hotspot_x; - du->core_hotspot_y = hotspot_y - du->hotspot_y; - } else { - DRM_ERROR("Failed to update cursor image\n"); - } + vmw_du_plane_unpin_surf(vps); } @@ -441,97 +101,54 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, * Returns 0 on success */ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct drm_crtc_state *crtc_state = NULL; - struct drm_framebuffer *new_fb = state->fb; + struct drm_framebuffer *new_fb = new_state->fb; + struct drm_framebuffer *old_fb = old_state->fb; int ret; - if (state->crtc) - crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); - - ret = drm_atomic_helper_check_plane_state(state, crtc_state, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - false, true); - - if (!ret && new_fb) { - struct drm_crtc *crtc = state->crtc; - struct vmw_connector_state *vcs; - struct vmw_display_unit *du = vmw_crtc_to_du(crtc); - - vcs = vmw_connector_state_to_vcs(du->connector.state); - } - - - return ret; -} - - -/** - * vmw_du_cursor_plane_atomic_check - check if the new state is okay - * - * @plane: cursor plane - * @state: info on the new plane state - * - * This is a chance to fail if the new cursor state does not fit - * our requirements. - * - * Returns 0 on success - */ -int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *new_state) -{ - int ret = 0; - struct drm_crtc_state *crtc_state = NULL; - struct vmw_surface *surface = NULL; - struct drm_framebuffer *fb = new_state->fb; + /* + * Ignore damage clips if the framebuffer attached to the plane's state + * has changed since the last plane update (page-flip). In this case, a + * full plane update should happen because uploads are done per-buffer. + */ + if (old_fb != new_fb) + new_state->ignore_damage_clips = true; if (new_state->crtc) - crtc_state = drm_atomic_get_new_crtc_state(new_state->state, + crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - true, true); - if (ret) - return ret; - - /* Turning off */ - if (!fb) - return 0; - - /* A lot of the code assumes this */ - if (new_state->crtc_w != 64 || new_state->crtc_h != 64) { - DRM_ERROR("Invalid cursor dimensions (%d, %d)\n", - new_state->crtc_w, new_state->crtc_h); - ret = -EINVAL; - } - - if (!vmw_framebuffer_to_vfb(fb)->bo) - surface = vmw_framebuffer_to_vfbs(fb)->surface; - - if (surface && !surface->snooper.image) { - DRM_ERROR("surface not suitable for cursor\n"); - ret = -EINVAL; - } - + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, true); return ret; } - int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *new_state) + struct drm_atomic_state *state) { + struct vmw_private *vmw = vmw_priv(crtc->dev); + struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, + crtc); struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc); int connector_mask = drm_connector_mask(&du->connector); bool has_primary = new_state->plane_mask & drm_plane_mask(crtc->primary); - /* We always want to have an active plane with an active CRTC */ - if (has_primary != new_state->enable) - return -EINVAL; + /* + * This is fine in general, but broken userspace might expect + * some actual rendering so give a clue as why it's blank. + */ + if (new_state->enable && !has_primary) + drm_dbg_driver(&vmw->drm, + "CRTC without a primary plane will be blank.\n"); if (new_state->connector_mask != connector_mask && @@ -552,26 +169,11 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) -{ -} - - -void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { - struct drm_pending_vblank_event *event = crtc->state->event; - - if (event) { - crtc->state->event = NULL; - - spin_lock_irq(&crtc->dev->event_lock); - drm_crtc_send_vblank_event(crtc, event); - spin_unlock_irq(&crtc->dev->event_lock); - } + vmw_vkms_crtc_atomic_begin(crtc, state); } - /** * vmw_du_crtc_duplicate_state - duplicate crtc state * @crtc: DRM crtc @@ -629,8 +231,7 @@ void vmw_du_crtc_reset(struct drm_crtc *crtc) return; } - crtc->state = &vcs->base; - crtc->state->crtc = crtc; + __drm_atomic_helper_crtc_reset(crtc, &vcs->base); } @@ -673,13 +274,10 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane) vps->pinned = 0; vps->cpp = 0; - /* Each ref counted resource needs to be acquired again */ - if (vps->surf) - (void) vmw_surface_reference(vps->surf); - - if (vps->bo) - (void) vmw_bo_reference(vps->bo); + vps->cursor.mob = NULL; + /* Each ref counted resource needs to be acquired again */ + vmw_user_object_ref(&vps->uo); state = &vps->base; __drm_atomic_helper_plane_duplicate_state(plane, state); @@ -699,7 +297,6 @@ void vmw_du_plane_reset(struct drm_plane *plane) { struct vmw_plane_state *vps; - if (plane->state) vmw_du_plane_destroy_state(plane, plane->state); @@ -728,13 +325,8 @@ vmw_du_plane_destroy_state(struct drm_plane *plane, { struct vmw_plane_state *vps = vmw_plane_state_to_vps(state); - /* Should have been freed by cleanup_fb */ - if (vps->surf) - vmw_surface_unreference(&vps->surf); - - if (vps->bo) - vmw_bo_unreference(&vps->bo); + vmw_user_object_unref(&vps->uo); drm_atomic_helper_plane_destroy_state(plane, state); } @@ -827,11 +419,22 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) { struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(framebuffer); + struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo); + struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo); + if (bo) { + vmw_bo_dirty_release(bo); + /* + * bo->dirty is reference counted so it being NULL + * means that the surface wasn't coherent to begin + * with and so we have to free the dirty tracker + * in the vmw_resource + */ + if (!bo->dirty && surf && surf->res.dirty) + surf->res.func->dirty_free(&surf->res); + } drm_framebuffer_cleanup(framebuffer); - vmw_surface_unreference(&vfbs->surface); - if (vfbs->base.user_obj) - ttm_base_object_unref(&vfbs->base.user_obj); + vmw_user_object_unref(&vfbs->uo); kfree(vfbs); } @@ -865,9 +468,9 @@ int vmw_kms_readback(struct vmw_private *dev_priv, user_fence_rep, vclips, num_clips, NULL); case vmw_du_screen_target: - return vmw_kms_stdu_dma(dev_priv, file_priv, vfb, - user_fence_rep, NULL, vclips, num_clips, - 1, false, true, NULL); + return vmw_kms_stdu_readback(dev_priv, file_priv, vfb, + user_fence_rep, NULL, vclips, num_clips, + 1, NULL); default: WARN_ONCE(true, "Readback called with invalid display system.\n"); @@ -876,34 +479,56 @@ int vmw_kms_readback(struct vmw_private *dev_priv, return -ENOSYS; } +static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle) +{ + struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb); + struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo); + + if (WARN_ON(!bo)) + return -EINVAL; + return drm_gem_handle_create(file_priv, &bo->tbo.base, handle); +} static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { + .create_handle = vmw_framebuffer_surface_create_handle, .destroy = vmw_framebuffer_surface_destroy, .dirty = drm_atomic_helper_dirtyfb, }; static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, - struct vmw_surface *surface, + struct vmw_user_object *uo, struct vmw_framebuffer **out, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 - *mode_cmd, - bool is_bo_proxy) + *mode_cmd) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct vmw_framebuffer_surface *vfbs; - enum SVGA3dSurfaceFormat format; + struct vmw_surface *surface; int ret; - struct drm_format_name_buf format_name; /* 3D is only supported on HWv8 and newer hosts */ if (dev_priv->active_display_unit == vmw_du_legacy) return -ENOSYS; + surface = vmw_user_object_surface(uo); + /* * Sanity checks. */ + if (!drm_any_plane_has_format(&dev_priv->drm, + mode_cmd->pixel_format, + mode_cmd->modifier[0])) { + drm_dbg(&dev_priv->drm, + "unsupported pixel format %p4cc / modifier 0x%llx\n", + &mode_cmd->pixel_format, mode_cmd->modifier[0]); + return -EINVAL; + } + /* Surface must be marked as a scanout. */ if (unlikely(!surface->metadata.scanout)) return -EINVAL; @@ -918,44 +543,18 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, return -EINVAL; } - switch (mode_cmd->pixel_format) { - case DRM_FORMAT_ARGB8888: - format = SVGA3D_A8R8G8B8; - break; - case DRM_FORMAT_XRGB8888: - format = SVGA3D_X8R8G8B8; - break; - case DRM_FORMAT_RGB565: - format = SVGA3D_R5G6B5; - break; - case DRM_FORMAT_XRGB1555: - format = SVGA3D_A1R5G5B5; - break; - default: - DRM_ERROR("Invalid pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); - return -EINVAL; - } - - /* - * For DX, surface format validation is done when surface->scanout - * is set. - */ - if (!has_sm4_context(dev_priv) && format != surface->metadata.format) { - DRM_ERROR("Invalid surface format for requested mode.\n"); - return -EINVAL; - } - vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); if (!vfbs) { ret = -ENOMEM; goto out_err1; } - drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); - vfbs->surface = vmw_surface_reference(surface); - vfbs->base.user_handle = mode_cmd->handles[0]; - vfbs->is_bo_proxy = is_bo_proxy; + drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, info, mode_cmd); + memcpy(&vfbs->uo, uo, sizeof(vfbs->uo)); + vmw_user_object_ref(&vfbs->uo); + + if (vfbs->uo.buffer) + vfbs->base.base.obj[0] = &vfbs->uo.buffer->tbo.base; *out = &vfbs->base; @@ -967,7 +566,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, return 0; out_err2: - vmw_surface_unreference(&surface); + vmw_user_object_unref(&vfbs->uo); kfree(vfbs); out_err1: return ret; @@ -977,268 +576,60 @@ out_err1: * Buffer-object framebuffer code */ -static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) +static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle) { struct vmw_framebuffer_bo *vfbd = - vmw_framebuffer_to_vfbd(framebuffer); - - drm_framebuffer_cleanup(framebuffer); - vmw_bo_unreference(&vfbd->buffer); - if (vfbd->base.user_obj) - ttm_base_object_unref(&vfbd->base.user_obj); - - kfree(vfbd); + vmw_framebuffer_to_vfbd(fb); + return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle); } -static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer, - struct drm_file *file_priv, - unsigned int flags, unsigned int color, - struct drm_clip_rect *clips, - unsigned int num_clips) +static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) { - struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); struct vmw_framebuffer_bo *vfbd = vmw_framebuffer_to_vfbd(framebuffer); - struct drm_clip_rect norect; - int ret, increment = 1; - - drm_modeset_lock_all(dev_priv->dev); - - ret = ttm_read_lock(&dev_priv->reservation_sem, true); - if (unlikely(ret != 0)) { - drm_modeset_unlock_all(dev_priv->dev); - return ret; - } - - if (!num_clips) { - num_clips = 1; - clips = &norect; - norect.x1 = norect.y1 = 0; - norect.x2 = framebuffer->width; - norect.y2 = framebuffer->height; - } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { - num_clips /= 2; - increment = 2; - } - - switch (dev_priv->active_display_unit) { - case vmw_du_legacy: - ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0, - clips, num_clips, increment); - break; - default: - ret = -EINVAL; - WARN_ONCE(true, "Dirty called with invalid display system.\n"); - break; - } - - vmw_fifo_flush(dev_priv, false); - ttm_read_unlock(&dev_priv->reservation_sem); - - drm_modeset_unlock_all(dev_priv->dev); - return ret; -} - -static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer, - struct drm_file *file_priv, - unsigned int flags, unsigned int color, - struct drm_clip_rect *clips, - unsigned int num_clips) -{ - struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); - - if (dev_priv->active_display_unit == vmw_du_legacy) - return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags, - color, clips, num_clips); + vmw_bo_dirty_release(vfbd->buffer); + drm_framebuffer_cleanup(framebuffer); + vmw_bo_unreference(&vfbd->buffer); - return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color, - clips, num_clips); + kfree(vfbd); } static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { + .create_handle = vmw_framebuffer_bo_create_handle, .destroy = vmw_framebuffer_bo_destroy, - .dirty = vmw_framebuffer_bo_dirty_ext, + .dirty = drm_atomic_helper_dirtyfb, }; -/** - * Pin the bofer in a location suitable for access by the - * display system. - */ -static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) -{ - struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); - struct vmw_buffer_object *buf; - struct ttm_placement *placement; - int ret; - - buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : - vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; - - if (!buf) - return 0; - - switch (dev_priv->active_display_unit) { - case vmw_du_legacy: - vmw_overlay_pause_all(dev_priv); - ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false); - vmw_overlay_resume_all(dev_priv); - break; - case vmw_du_screen_object: - case vmw_du_screen_target: - if (vfb->bo) { - if (dev_priv->capabilities & SVGA_CAP_3D) { - /* - * Use surface DMA to get content to - * sreen target surface. - */ - placement = &vmw_vram_gmr_placement; - } else { - /* Use CPU blit. */ - placement = &vmw_sys_placement; - } - } else { - /* Use surface / image update */ - placement = &vmw_mob_placement; - } - - return vmw_bo_pin_in_placement(dev_priv, buf, placement, false); - default: - return -EINVAL; - } - - return ret; -} - -static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb) -{ - struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); - struct vmw_buffer_object *buf; - - buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : - vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; - - if (WARN_ON(!buf)) - return 0; - - return vmw_bo_unpin(dev_priv, buf, false); -} - -/** - * vmw_create_bo_proxy - create a proxy surface for the buffer object - * - * @dev: DRM device - * @mode_cmd: parameters for the new surface - * @bo_mob: MOB backing the buffer object - * @srf_out: newly created surface - * - * When the content FB is a buffer object, we create a surface as a proxy to the - * same buffer. This way we can do a surface copy rather than a surface DMA. - * This is a more efficient approach - * - * RETURNS: - * 0 on success, error code otherwise - */ -static int vmw_create_bo_proxy(struct drm_device *dev, - const struct drm_mode_fb_cmd2 *mode_cmd, - struct vmw_buffer_object *bo_mob, - struct vmw_surface **srf_out) -{ - struct vmw_surface_metadata metadata = {0}; - uint32_t format; - struct vmw_resource *res; - unsigned int bytes_pp; - struct drm_format_name_buf format_name; - int ret; - - switch (mode_cmd->pixel_format) { - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_XRGB8888: - format = SVGA3D_X8R8G8B8; - bytes_pp = 4; - break; - - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XRGB1555: - format = SVGA3D_R5G6B5; - bytes_pp = 2; - break; - - case 8: - format = SVGA3D_P8; - bytes_pp = 1; - break; - - default: - DRM_ERROR("Invalid framebuffer format %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); - return -EINVAL; - } - - metadata.format = format; - metadata.mip_levels[0] = 1; - metadata.num_sizes = 1; - metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp; - metadata.base_size.height = mode_cmd->height; - metadata.base_size.depth = 1; - metadata.scanout = true; - - ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out); - if (ret) { - DRM_ERROR("Failed to allocate proxy content buffer\n"); - return ret; - } - - res = &(*srf_out)->res; - - /* Reserve and switch the backing mob. */ - mutex_lock(&res->dev_priv->cmdbuf_mutex); - (void) vmw_resource_reserve(res, false, true); - vmw_bo_unreference(&res->backup); - res->backup = vmw_bo_reference(bo_mob); - res->backup_offset = 0; - vmw_resource_unreserve(res, false, false, false, NULL, 0); - mutex_unlock(&res->dev_priv->cmdbuf_mutex); - - return 0; -} - - - static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, - struct vmw_buffer_object *bo, + struct vmw_bo *bo, struct vmw_framebuffer **out, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct vmw_framebuffer_bo *vfbd; unsigned int requested_size; - struct drm_format_name_buf format_name; int ret; requested_size = mode_cmd->height * mode_cmd->pitches[0]; - if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) { + if (unlikely(requested_size > bo->tbo.base.size)) { DRM_ERROR("Screen buffer object size is too small " "for requested mode.\n"); return -EINVAL; } - /* Limited framebuffer color depth support for screen objects */ - if (dev_priv->active_display_unit == vmw_du_screen_object) { - switch (mode_cmd->pixel_format) { - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - break; - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_RGB565: - break; - default: - DRM_ERROR("Invalid pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); - return -EINVAL; - } + if (!drm_any_plane_has_format(&dev_priv->drm, + mode_cmd->pixel_format, + mode_cmd->modifier[0])) { + drm_dbg(&dev_priv->drm, + "unsupported pixel format %p4cc / modifier 0x%llx\n", + &mode_cmd->pixel_format, mode_cmd->modifier[0]); + return -EINVAL; } vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); @@ -1247,10 +638,10 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, goto out_err1; } - drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); + vfbd->base.base.obj[0] = &bo->tbo.base; + drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, info, mode_cmd); vfbd->base.bo = true; vfbd->buffer = vmw_bo_reference(bo); - vfbd->base.user_handle = mode_cmd->handles[0]; *out = &vfbd->base; ret = drm_framebuffer_init(dev, &vfbd->base.base, @@ -1271,6 +662,7 @@ out_err1: /** * vmw_kms_srf_ok - check if a surface can be created * + * @dev_priv: Pointer to device private struct. * @width: requested width * @height: requested height * @@ -1290,57 +682,27 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height) * vmw_kms_new_framebuffer - Create a new framebuffer. * * @dev_priv: Pointer to device private struct. - * @bo: Pointer to buffer object to wrap the kms framebuffer around. - * Either @bo or @surface must be NULL. - * @surface: Pointer to a surface to wrap the kms framebuffer around. - * Either @bo or @surface must be NULL. - * @only_2d: No presents will occur to this buffer object based framebuffer. - * This helps the code to do some important optimizations. + * @uo: Pointer to user object to wrap the kms framebuffer around. + * Either the buffer or surface inside the user object must be NULL. + * @info: pixel format information. * @mode_cmd: Frame-buffer metadata. */ struct vmw_framebuffer * vmw_kms_new_framebuffer(struct vmw_private *dev_priv, - struct vmw_buffer_object *bo, - struct vmw_surface *surface, - bool only_2d, + struct vmw_user_object *uo, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct vmw_framebuffer *vfb = NULL; - bool is_bo_proxy = false; int ret; - /* - * We cannot use the SurfaceDMA command in an non-accelerated VM, - * therefore, wrap the buffer object in a surface so we can use the - * SurfaceCopy command. - */ - if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && - bo && only_2d && - mode_cmd->width > 64 && /* Don't create a proxy for cursor */ - dev_priv->active_display_unit == vmw_du_screen_target) { - ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd, - bo, &surface); - if (ret) - return ERR_PTR(ret); - - is_bo_proxy = true; - } - /* Create the new framebuffer depending one what we have */ - if (surface) { - ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, - mode_cmd, - is_bo_proxy); - - /* - * vmw_create_bo_proxy() adds a reference that is no longer - * needed - */ - if (is_bo_proxy) - vmw_surface_unreference(&surface); - } else if (bo) { - ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb, - mode_cmd); + if (vmw_user_object_surface(uo)) { + ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb, + info, mode_cmd); + } else if (uo->buffer) { + ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb, + info, mode_cmd); } else { BUG(); } @@ -1348,9 +710,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv, if (ret) return ERR_PTR(ret); - vfb->pin = vmw_framebuffer_pin; - vfb->unpin = vmw_framebuffer_unpin; - return vfb; } @@ -1360,73 +719,60 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv, static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct vmw_private *dev_priv = vmw_priv(dev); - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_framebuffer *vfb = NULL; - struct vmw_surface *surface = NULL; - struct vmw_buffer_object *bo = NULL; - struct ttm_base_object *user_obj; + struct vmw_user_object uo = {0}; + struct vmw_bo *bo; + struct vmw_surface *surface; int ret; - /* - * Take a reference on the user object of the resource - * backing the kms fb. This ensures that user-space handle - * lookups on that resource will always work as long as - * it's registered with a kms framebuffer. This is important, - * since vmw_execbuf_process identifies resources in the - * command stream using user-space handles. - */ - - user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]); - if (unlikely(user_obj == NULL)) { - DRM_ERROR("Could not locate requested kms frame buffer.\n"); - return ERR_PTR(-ENOENT); - } - - /** - * End conditioned code. - */ - /* returns either a bo or surface */ - ret = vmw_user_lookup_handle(dev_priv, tfile, - mode_cmd->handles[0], - &surface, &bo); - if (ret) + ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0], + &uo); + if (ret) { + DRM_ERROR("Invalid buffer object handle %u (0x%x).\n", + mode_cmd->handles[0], mode_cmd->handles[0]); goto err_out; + } - if (!bo && + if (vmw_user_object_surface(&uo) && !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) { - DRM_ERROR("Surface size cannot exceed %dx%d", + DRM_ERROR("Surface size cannot exceed %dx%d\n", dev_priv->texture_max_width, dev_priv->texture_max_height); + ret = -EINVAL; goto err_out; } - vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface, - !(dev_priv->capabilities & SVGA_CAP_3D), - mode_cmd); + vfb = vmw_kms_new_framebuffer(dev_priv, &uo, info, mode_cmd); if (IS_ERR(vfb)) { ret = PTR_ERR(vfb); goto err_out; - } + } err_out: - /* vmw_user_lookup_handle takes one ref so does new_fb */ - if (bo) - vmw_bo_unreference(&bo); - if (surface) - vmw_surface_unreference(&surface); + bo = vmw_user_object_buffer(&uo); + surface = vmw_user_object_surface(&uo); + /* vmw_user_object_lookup takes one ref so does new_fb */ + vmw_user_object_unref(&uo); if (ret) { DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); - ttm_base_object_unref(&user_obj); return ERR_PTR(ret); - } else - vfb->user_obj = user_obj; + } + + ttm_bo_reserve(&bo->tbo, false, false, NULL); + ret = vmw_bo_dirty_add(bo); + if (!ret && surface && surface->res.func->dirty_alloc) { + surface->res.coherent = true; + ret = surface->res.func->dirty_alloc(&surface->res); + } + ttm_bo_unreserve(&bo->tbo); return &vfb->base; } @@ -1482,7 +828,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev, * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is * limit on primary bounding box */ - if (pixel_mem > dev_priv->prim_bb_mem) { + if (pixel_mem > dev_priv->max_primary_mem) { VMW_DEBUG_KMS("Combined output size too large.\n"); return -EINVAL; } @@ -1492,7 +838,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev, !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) { bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4; - if (bb_mem > dev_priv->prim_bb_mem) { + if (bb_mem > dev_priv->max_primary_mem) { VMW_DEBUG_KMS("Topology is beyond supported limits.\n"); return -EINVAL; } @@ -1767,7 +1113,7 @@ int vmw_kms_present(struct vmw_private *dev_priv, if (ret) return ret; - vmw_fifo_flush(dev_priv, false); + vmw_cmd_flush(dev_priv, false); return 0; } @@ -1779,19 +1125,45 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv) return; dev_priv->hotplug_mode_update_property = - drm_property_create_range(dev_priv->dev, + drm_property_create_range(&dev_priv->drm, DRM_MODE_PROP_IMMUTABLE, "hotplug_mode_update", 0, 1); +} - if (!dev_priv->hotplug_mode_update_property) - return; +static void +vmw_atomic_commit_tail(struct drm_atomic_state *old_state) +{ + struct vmw_private *vmw = vmw_priv(old_state->dev); + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + int i; + drm_atomic_helper_commit_tail(old_state); + + if (vmw->vkms_enabled) { + for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + (void)old_crtc_state; + flush_work(&du->vkms.crc_generator_work); + } + } } +static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = { + .atomic_commit_tail = vmw_atomic_commit_tail, +}; + int vmw_kms_init(struct vmw_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int ret; + static const char *display_unit_names[] = { + "Invalid", + "Legacy", + "Screen Object", + "Screen Target", + "Invalid (max)" + }; drm_mode_config_init(dev); dev->mode_config.funcs = &vmw_kms_funcs; @@ -1799,6 +1171,8 @@ int vmw_kms_init(struct vmw_private *dev_priv) dev->mode_config.min_height = 1; dev->mode_config.max_width = dev_priv->texture_max_width; dev->mode_config.max_height = dev_priv->texture_max_height; + dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32; + dev->mode_config.helper_private = &vmw_mode_config_helpers; drm_mode_create_suggested_offset_properties(dev); vmw_kms_create_hotplug_mode_update_property(dev_priv); @@ -1809,6 +1183,9 @@ int vmw_kms_init(struct vmw_private *dev_priv) if (ret) /* Fallback */ ret = vmw_kms_ldu_init_display(dev_priv); } + BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1)); + drm_info(&dev_priv->drm, "%s display unit initialized\n", + display_unit_names[dev_priv->active_display_unit]); return ret; } @@ -1822,52 +1199,13 @@ int vmw_kms_close(struct vmw_private *dev_priv) * but since it destroys encoders and our destructor calls * drm_encoder_cleanup which takes the lock we deadlock. */ - drm_mode_config_cleanup(dev_priv->dev); + drm_mode_config_cleanup(&dev_priv->drm); if (dev_priv->active_display_unit == vmw_du_legacy) ret = vmw_kms_ldu_close_display(dev_priv); return ret; } -int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_vmw_cursor_bypass_arg *arg = data; - struct vmw_display_unit *du; - struct drm_crtc *crtc; - int ret = 0; - - - mutex_lock(&dev->mode_config.mutex); - if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - du = vmw_crtc_to_du(crtc); - du->hotspot_x = arg->xhot; - du->hotspot_y = arg->yhot; - } - - mutex_unlock(&dev->mode_config.mutex); - return 0; - } - - crtc = drm_crtc_find(dev, file_priv, arg->crtc_id); - if (!crtc) { - ret = -ENOENT; - goto out; - } - - du = vmw_crtc_to_du(crtc); - - du->hotspot_x = arg->xhot; - du->hotspot_y = arg->yhot; - -out: - mutex_unlock(&dev->mode_config.mutex); - - return ret; -} - int vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned width, unsigned height, unsigned pitch, unsigned bpp, unsigned depth) @@ -1875,11 +1213,11 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv, if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); else if (vmw_fifo_have_pitchlock(vmw_priv)) - vmw_mmio_write(pitch, vmw_priv->mmio_virt + - SVGA_FIFO_PITCHLOCK); + vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch); vmw_write(vmw_priv, SVGA_REG_WIDTH, width); vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); - vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); + if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0) + vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) { DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n", @@ -1890,37 +1228,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv, return 0; } +static bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, - uint32_t pitch, - uint32_t height) -{ - return ((u64) pitch * (u64) height) < (u64) - ((dev_priv->active_display_unit == vmw_du_screen_target) ? - dev_priv->prim_bb_mem : dev_priv->vram_size); -} - - -/** - * Function called by DRM code called with vbl_lock held. - */ -u32 vmw_get_vblank_counter(struct drm_crtc *crtc) -{ - return 0; -} - -/** - * Function called by DRM code called with vbl_lock held. - */ -int vmw_enable_vblank(struct drm_crtc *crtc) -{ - return -EINVAL; -} - -/** - * Function called by DRM code called with vbl_lock held. - */ -void vmw_disable_vblank(struct drm_crtc *crtc) + u64 pitch, + u64 height) { + return (pitch * height) < (u64)dev_priv->vram_size; } /** @@ -1933,7 +1246,7 @@ void vmw_disable_vblank(struct drm_crtc *crtc) static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned int num_rects, struct drm_rect *rects) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct vmw_display_unit *du; struct drm_connector *con; struct drm_connector_list_iter conn_iter; @@ -1951,7 +1264,7 @@ retry: if (ret == -EDEADLK) { drm_modeset_backoff(&ctx); goto retry; - } + } goto out_fini; } } @@ -1966,8 +1279,8 @@ retry: du->gui_x = rects[du->unit].x1; du->gui_y = rects[du->unit].y1; } else { - du->pref_width = 800; - du->pref_height = 600; + du->pref_width = VMWGFX_MIN_INITIAL_WIDTH; + du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT; du->pref_active = false; du->gui_x = 0; du->gui_y = 0; @@ -1994,13 +1307,13 @@ retry: } con->status = vmw_du_connector_detect(con, true); } - - drm_sysfs_hotplug_event(dev); out_fini: drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); mutex_unlock(&dev->mode_config.mutex); - + + drm_sysfs_hotplug_event(dev); + return 0; } @@ -2043,88 +1356,11 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force) connector_status_connected : connector_status_disconnected); } -static struct drm_display_mode vmw_kms_connector_builtin[] = { - /* 640x480@60Hz */ - { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, - 752, 800, 0, 480, 489, 492, 525, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 800x600@60Hz */ - { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, - 968, 1056, 0, 600, 601, 605, 628, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1024x768@60Hz */ - { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, - 1184, 1344, 0, 768, 771, 777, 806, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1152x864@75Hz */ - { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, - 1344, 1600, 0, 864, 865, 868, 900, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1280x768@60Hz */ - { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, - 1472, 1664, 0, 768, 771, 778, 798, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1280x800@60Hz */ - { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, - 1480, 1680, 0, 800, 803, 809, 831, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1280x960@60Hz */ - { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, - 1488, 1800, 0, 960, 961, 964, 1000, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1280x1024@60Hz */ - { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, - 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1360x768@60Hz */ - { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, - 1536, 1792, 0, 768, 771, 777, 795, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1440x1050@60Hz */ - { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, - 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1440x900@60Hz */ - { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, - 1672, 1904, 0, 900, 903, 909, 934, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1600x1200@60Hz */ - { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, - 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1680x1050@60Hz */ - { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, - 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1792x1344@60Hz */ - { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, - 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1853x1392@60Hz */ - { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, - 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1920x1200@60Hz */ - { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, - 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1920x1440@60Hz */ - { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, - 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 2560x1600@60Hz */ - { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, - 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* Terminate */ - { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, -}; - /** * vmw_guess_mode_timing - Provide fake timings for a * 60Hz vrefresh mode. * - * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay + * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay * members filled in. */ void vmw_guess_mode_timing(struct drm_display_mode *mode) @@ -2138,92 +1374,9 @@ void vmw_guess_mode_timing(struct drm_display_mode *mode) mode->vtotal = mode->vsync_end + 50; mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6; - mode->vrefresh = drm_mode_vrefresh(mode); } -int vmw_du_connector_fill_modes(struct drm_connector *connector, - uint32_t max_width, uint32_t max_height) -{ - struct vmw_display_unit *du = vmw_connector_to_du(connector); - struct drm_device *dev = connector->dev; - struct vmw_private *dev_priv = vmw_priv(dev); - struct drm_display_mode *mode = NULL; - struct drm_display_mode *bmode; - struct drm_display_mode prefmode = { DRM_MODE("preferred", - DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) - }; - int i; - u32 assumed_bpp = 4; - - if (dev_priv->assume_16bpp) - assumed_bpp = 2; - - max_width = min(max_width, dev_priv->texture_max_width); - max_height = min(max_height, dev_priv->texture_max_height); - - /* - * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/ - * HEIGHT registers. - */ - if (dev_priv->active_display_unit == vmw_du_screen_target) { - max_width = min(max_width, dev_priv->stdu_max_width); - max_height = min(max_height, dev_priv->stdu_max_height); - } - - /* Add preferred mode */ - mode = drm_mode_duplicate(dev, &prefmode); - if (!mode) - return 0; - mode->hdisplay = du->pref_width; - mode->vdisplay = du->pref_height; - vmw_guess_mode_timing(mode); - - if (vmw_kms_validate_mode_vram(dev_priv, - mode->hdisplay * assumed_bpp, - mode->vdisplay)) { - drm_mode_probed_add(connector, mode); - } else { - drm_mode_destroy(dev, mode); - mode = NULL; - } - - if (du->pref_mode) { - list_del_init(&du->pref_mode->head); - drm_mode_destroy(dev, du->pref_mode); - } - - /* mode might be null here, this is intended */ - du->pref_mode = mode; - - for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { - bmode = &vmw_kms_connector_builtin[i]; - if (bmode->hdisplay > max_width || - bmode->vdisplay > max_height) - continue; - - if (!vmw_kms_validate_mode_vram(dev_priv, - bmode->hdisplay * assumed_bpp, - bmode->vdisplay)) - continue; - - mode = drm_mode_duplicate(dev, bmode); - if (!mode) - return 0; - mode->vrefresh = drm_mode_vrefresh(mode); - - drm_mode_probed_add(connector, mode); - } - - drm_connector_list_update(connector); - /* Move the prefered mode first, help apps pick the right mode. */ - drm_mode_sort(&connector->modes); - - return 1; -} - /** * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl * @dev: drm device for the ioctl @@ -2250,19 +1403,20 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_mode_config *mode_config = &dev->mode_config; struct drm_vmw_update_layout_arg *arg = (struct drm_vmw_update_layout_arg *)data; - void __user *user_rects; + const void __user *user_rects; struct drm_vmw_rect *rects; struct drm_rect *drm_rects; unsigned rects_size; int ret, i; if (!arg->num_outputs) { - struct drm_rect def_rect = {0, 0, 800, 600}; - VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n", - def_rect.x1, def_rect.y1, - def_rect.x2, def_rect.y2); + struct drm_rect def_rect = {0, 0, + VMWGFX_MIN_INITIAL_WIDTH, + VMWGFX_MIN_INITIAL_HEIGHT}; vmw_du_update_layout(dev_priv, 1, &def_rect); return 0; + } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) { + return -E2BIG; } rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); @@ -2367,7 +1521,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, if (dirty->crtc) { units[num_units++] = vmw_crtc_to_du(dirty->crtc); } else { - list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, + list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list, head) { struct drm_plane *plane = crtc->primary; @@ -2387,7 +1541,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, dirty->unit = unit; if (dirty->fifo_reserve_size > 0) { - dirty->cmd = VMW_FIFO_RESERVE(dev_priv, + dirty->cmd = VMW_CMD_RESERVE(dev_priv, dirty->fifo_reserve_size); if (!dirty->cmd) return -ENOMEM; @@ -2482,7 +1636,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, if (file_priv) vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, user_fence_rep, fence, - handle, -1, NULL); + handle, -1); if (out_fence) *out_fence = fence; else @@ -2490,135 +1644,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, } /** - * vmw_kms_update_proxy - Helper function to update a proxy surface from - * its backing MOB. - * - * @res: Pointer to the surface resource - * @clips: Clip rects in framebuffer (surface) space. - * @num_clips: Number of clips in @clips. - * @increment: Integer with which to increment the clip counter when looping. - * Used to skip a predetermined number of clip rects. - * - * This function makes sure the proxy surface is updated from its backing MOB - * using the region given by @clips. The surface resource @res and its backing - * MOB needs to be reserved and validated on call. - */ -int vmw_kms_update_proxy(struct vmw_resource *res, - const struct drm_clip_rect *clips, - unsigned num_clips, - int increment) -{ - struct vmw_private *dev_priv = res->dev_priv; - struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size; - struct { - SVGA3dCmdHeader header; - SVGA3dCmdUpdateGBImage body; - } *cmd; - SVGA3dBox *box; - size_t copy_size = 0; - int i; - - if (!clips) - return 0; - - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips); - if (!cmd) - return -ENOMEM; - - for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) { - box = &cmd->body.box; - - cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; - cmd->header.size = sizeof(cmd->body); - cmd->body.image.sid = res->id; - cmd->body.image.face = 0; - cmd->body.image.mipmap = 0; - - if (clips->x1 > size->width || clips->x2 > size->width || - clips->y1 > size->height || clips->y2 > size->height) { - DRM_ERROR("Invalid clips outsize of framebuffer.\n"); - return -EINVAL; - } - - box->x = clips->x1; - box->y = clips->y1; - box->z = 0; - box->w = clips->x2 - clips->x1; - box->h = clips->y2 - clips->y1; - box->d = 1; - - copy_size += sizeof(*cmd); - } - - vmw_fifo_commit(dev_priv, copy_size); - - return 0; -} - -int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, - unsigned unit, - u32 max_width, - u32 max_height, - struct drm_connector **p_con, - struct drm_crtc **p_crtc, - struct drm_display_mode **p_mode) -{ - struct drm_connector *con; - struct vmw_display_unit *du; - struct drm_display_mode *mode; - int i = 0; - int ret = 0; - - mutex_lock(&dev_priv->dev->mode_config.mutex); - list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list, - head) { - if (i == unit) - break; - - ++i; - } - - if (i != unit) { - DRM_ERROR("Could not find initial display unit.\n"); - ret = -EINVAL; - goto out_unlock; - } - - if (list_empty(&con->modes)) - (void) vmw_du_connector_fill_modes(con, max_width, max_height); - - if (list_empty(&con->modes)) { - DRM_ERROR("Could not find initial display mode.\n"); - ret = -EINVAL; - goto out_unlock; - } - - du = vmw_connector_to_du(con); - *p_con = con; - *p_crtc = &du->crtc; - - list_for_each_entry(mode, &con->modes, head) { - if (mode->type & DRM_MODE_TYPE_PREFERRED) - break; - } - - if (mode->type & DRM_MODE_TYPE_PREFERRED) - *p_mode = mode; - else { - WARN_ONCE(true, "Could not find initial preferred mode.\n"); - *p_mode = list_first_entry(&con->modes, - struct drm_display_mode, - head); - } - - out_unlock: - mutex_unlock(&dev_priv->dev->mode_config.mutex); - - return ret; -} - -/** - * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement + * vmw_kms_create_implicit_placement_property - Set up the implicit placement * property. * * @dev_priv: Pointer to a device private struct. @@ -2632,7 +1658,7 @@ vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv) return; dev_priv->implicit_placement_property = - drm_property_create_range(dev_priv->dev, + drm_property_create_range(&dev_priv->drm, DRM_MODE_PROP_IMMUTABLE, "implicit_placement", 0, 1); } @@ -2734,13 +1760,26 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update) struct vmw_framebuffer_bo *vfbbo = container_of(update->vfb, typeof(*vfbbo), base); - ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false, - update->cpu_blit); + /* + * For screen targets we want a mappable bo, for everything else we want + * accelerated i.e. host backed (vram or gmr) bo. If the display unit + * is not screen target then mob's shouldn't be available. + */ + if (update->dev_priv->active_display_unit == vmw_du_screen_target) { + vmw_bo_placement_set(vfbbo->buffer, + VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR, + VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR); + } else { + WARN_ON(update->dev_priv->has_mob); + vmw_bo_placement_set_default_accelerated(vfbbo->buffer); + } + ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer); } else { struct vmw_framebuffer_surface *vfbs = container_of(update->vfb, typeof(*vfbs), base); + struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo); - ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res, + ret = vmw_validation_add_resource(&val_ctx, &surf->res, 0, VMW_RES_DIRTY_NONE, NULL, NULL); } @@ -2753,7 +1792,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update) goto out_unref; reserved_size = update->calc_fifo_size(update, num_hits); - cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size); + cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size); if (!cmd_start) { ret = -ENOMEM; goto out_revert; @@ -2802,7 +1841,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update) if (reserved_size < submit_size) submit_size = 0; - vmw_fifo_commit(update->dev_priv, submit_size); + vmw_cmd_commit(update->dev_priv, submit_size); vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx, update->out_fence, NULL); @@ -2815,3 +1854,174 @@ out_unref: vmw_validation_unref_lists(&val_ctx); return ret; } + +/** + * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback + * + * @connector: the drm connector, part of a DU container + * @mode: drm mode to check + * + * Returns MODE_OK on success, or a drm_mode_status error code. + */ +enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector, + const struct drm_display_mode *mode) +{ + enum drm_mode_status ret; + struct drm_device *dev = connector->dev; + struct vmw_private *dev_priv = vmw_priv(dev); + u32 assumed_cpp = 4; + + if (dev_priv->assume_16bpp) + assumed_cpp = 2; + + ret = drm_mode_validate_size(mode, dev_priv->texture_max_width, + dev_priv->texture_max_height); + if (ret != MODE_OK) + return ret; + + if (!vmw_kms_validate_mode_vram(dev_priv, + mode->hdisplay * assumed_cpp, + mode->vdisplay)) + return MODE_MEM; + + return MODE_OK; +} + +/** + * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback + * + * @connector: the drm connector, part of a DU container + * + * Returns the number of added modes. + */ +int vmw_connector_get_modes(struct drm_connector *connector) +{ + struct vmw_display_unit *du = vmw_connector_to_du(connector); + struct drm_device *dev = connector->dev; + struct vmw_private *dev_priv = vmw_priv(dev); + struct drm_display_mode *mode = NULL; + struct drm_display_mode prefmode = { DRM_MODE("preferred", + DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) + }; + u32 max_width; + u32 max_height; + u32 num_modes; + + /* Add preferred mode */ + mode = drm_mode_duplicate(dev, &prefmode); + if (!mode) + return 0; + + mode->hdisplay = du->pref_width; + mode->vdisplay = du->pref_height; + vmw_guess_mode_timing(mode); + drm_mode_set_name(mode); + + drm_mode_probed_add(connector, mode); + drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); + + /* Probe connector for all modes not exceeding our geom limits */ + max_width = dev_priv->texture_max_width; + max_height = dev_priv->texture_max_height; + + if (dev_priv->active_display_unit == vmw_du_screen_target) { + max_width = min(dev_priv->stdu_max_width, max_width); + max_height = min(dev_priv->stdu_max_height, max_height); + } + + num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height); + + return num_modes; +} + +struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo) +{ + if (uo->buffer) + vmw_user_bo_ref(uo->buffer); + else if (uo->surface) + vmw_surface_reference(uo->surface); + return uo; +} + +void vmw_user_object_unref(struct vmw_user_object *uo) +{ + if (uo->buffer) + vmw_user_bo_unref(&uo->buffer); + else if (uo->surface) + vmw_surface_unreference(&uo->surface); +} + +struct vmw_bo * +vmw_user_object_buffer(struct vmw_user_object *uo) +{ + if (uo->buffer) + return uo->buffer; + else if (uo->surface) + return uo->surface->res.guest_memory_bo; + return NULL; +} + +struct vmw_surface * +vmw_user_object_surface(struct vmw_user_object *uo) +{ + if (uo->buffer) + return uo->buffer->dumb_surface; + return uo->surface; +} + +void *vmw_user_object_map(struct vmw_user_object *uo) +{ + struct vmw_bo *bo = vmw_user_object_buffer(uo); + + WARN_ON(!bo); + return vmw_bo_map_and_cache(bo); +} + +void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size) +{ + struct vmw_bo *bo = vmw_user_object_buffer(uo); + + WARN_ON(!bo); + return vmw_bo_map_and_cache_size(bo, size); +} + +void vmw_user_object_unmap(struct vmw_user_object *uo) +{ + struct vmw_bo *bo = vmw_user_object_buffer(uo); + int ret; + + WARN_ON(!bo); + + /* Fence the mob creation so we are guarateed to have the mob */ + ret = ttm_bo_reserve(&bo->tbo, false, false, NULL); + if (ret != 0) + return; + + vmw_bo_unmap(bo); + vmw_bo_pin_reserved(bo, false); + + ttm_bo_unreserve(&bo->tbo); +} + +bool vmw_user_object_is_mapped(struct vmw_user_object *uo) +{ + struct vmw_bo *bo; + + if (!uo || vmw_user_object_is_null(uo)) + return false; + + bo = vmw_user_object_buffer(uo); + + if (WARN_ON(!bo)) + return false; + + WARN_ON(bo->map.bo && !bo->map.virtual); + return bo->map.virtual; +} + +bool vmw_user_object_is_null(struct vmw_user_object *uo) +{ + return !uo->buffer && !uo->surface; +} |
