diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_drv.h')
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 1025 |
1 files changed, 567 insertions, 458 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index cd607ba9c2fe..f2abaf1bda6a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1,60 +1,60 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * **************************************************************************/ #ifndef _VMWGFX_DRV_H_ #define _VMWGFX_DRV_H_ -#include "vmwgfx_validation.h" -#include "vmwgfx_reg.h" -#include <drm/drmP.h> -#include <drm/vmwgfx_drm.h> -#include <drm/drm_hashtab.h> -#include <drm/drm_auth.h> #include <linux/suspend.h> -#include <drm/ttm/ttm_bo_driver.h> +#include <linux/sync_file.h> +#include <linux/hashtable.h> + +#include <drm/drm_auth.h> +#include <drm/drm_device.h> +#include <drm/drm_file.h> +#include <drm/drm_print.h> +#include <drm/drm_rect.h> + #include <drm/ttm/ttm_execbuf_util.h> -#include <drm/ttm/ttm_module.h> -#include "vmwgfx_fence.h" +#include <drm/ttm/ttm_tt.h> +#include <drm/ttm/ttm_placement.h> +#include <drm/ttm/ttm_bo.h> + #include "ttm_object.h" -#include "ttm_lock.h" -#include <linux/sync_file.h> + +#include "vmwgfx_fence.h" +#include "vmwgfx_reg.h" +#include "vmwgfx_validation.h" + +/* + * FIXME: vmwgfx_drm.h needs to be last due to dependencies. + * uapi headers should not depend on header files outside uapi/. + */ +#include <drm/vmwgfx_drm.h> + #define VMWGFX_DRIVER_NAME "vmwgfx" -#define VMWGFX_DRIVER_DATE "20180704" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 15 +#define VMWGFX_DRIVER_MINOR 21 #define VMWGFX_DRIVER_PATCHLEVEL 0 -#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) -#define VMWGFX_MAX_RELOCATIONS 2048 -#define VMWGFX_MAX_VALIDATIONS 2048 -#define VMWGFX_MAX_DISPLAYS 16 +#define VMWGFX_NUM_DISPLAY_UNITS 8 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 -#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1 + +#define VMWGFX_MIN_INITIAL_WIDTH 1280 +#define VMWGFX_MIN_INITIAL_HEIGHT 800 + +#define VMWGFX_PCI_ID_SVGA2 0x0405 +#define VMWGFX_PCI_ID_SVGA3 0x0406 + +/* + * This has to match get_count_order(SVGA_IRQFLAG_MAX) + */ +#define VMWGFX_MAX_NUM_IRQS 6 /* * Perhaps we should have sysfs entries for these. @@ -62,7 +62,7 @@ #define VMWGFX_NUM_GB_CONTEXT 256 #define VMWGFX_NUM_GB_SHADER 20000 #define VMWGFX_NUM_GB_SURFACE 32768 -#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS +#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS #define VMWGFX_NUM_DXCONTEXT 256 #define VMWGFX_NUM_DXQUERY 512 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ @@ -70,92 +70,82 @@ VMWGFX_NUM_GB_SURFACE +\ VMWGFX_NUM_GB_SCREEN_TARGET) -#define VMW_PL_GMR (TTM_PL_PRIV + 0) -#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0) -#define VMW_PL_MOB (TTM_PL_PRIV + 1) -#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1) +#define VMW_PL_GMR (TTM_PL_PRIV + 0) +#define VMW_PL_MOB (TTM_PL_PRIV + 1) +#define VMW_PL_SYSTEM (TTM_PL_PRIV + 2) #define VMW_RES_CONTEXT ttm_driver_type0 #define VMW_RES_SURFACE ttm_driver_type1 #define VMW_RES_STREAM ttm_driver_type2 #define VMW_RES_FENCE ttm_driver_type3 #define VMW_RES_SHADER ttm_driver_type4 +#define VMW_RES_HT_ORDER 12 + +#define MKSSTAT_CAPACITY_LOG2 5U +#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2) struct vmw_fpriv { - struct drm_master *locked_master; struct ttm_object_file *tfile; bool gb_aware; /* user-space is guest-backed aware */ }; -struct vmw_buffer_object { - struct ttm_buffer_object base; - struct list_head res_list; - s32 pin_count; - /* Not ref-counted. Protected by binding_mutex */ - struct vmw_resource *dx_query_ctx; - /* Protected by reservation */ - struct ttm_bo_kmap_obj map; -}; - -/** - * struct vmw_validate_buffer - Carries validation info about buffers. - * - * @base: Validation info for TTM. - * @hash: Hash entry for quick lookup of the TTM buffer object. - * - * This structure contains also driver private validation info - * on top of the info needed by TTM. - */ -struct vmw_validate_buffer { - struct ttm_validate_buffer base; - struct drm_hash_item hash; - bool validate_as_mob; +struct vmwgfx_hash_item { + struct hlist_node head; + unsigned long key; }; struct vmw_res_func; - /** * struct vmw-resource - base class for hardware resources * * @kref: For refcounting. * @dev_priv: Pointer to the device private for this resource. Immutable. * @id: Device id. Protected by @dev_priv::resource_lock. - * @backup_size: Backup buffer size. Immutable. - * @res_dirty: Resource contains data not yet in the backup buffer. Protected - * by resource reserved. - * @backup_dirty: Backup buffer contains data not yet in the HW resource. - * Protecte by resource reserved. - * @backup: The backup buffer if any. Protected by resource reserved. - * @backup_offset: Offset into the backup buffer if any. Protected by resource - * reserved. Note that only a few resource types can have a @backup_offset - * different from zero. + * @guest_memory_size: Guest memory buffer size. Immutable. + * @res_dirty: Resource contains data not yet in the guest memory buffer. + * Protected by resource reserved. + * @guest_memory_dirty: Guest memory buffer contains data not yet in the HW + * resource. Protected by resource reserved. + * @coherent: Emulate coherency by tracking vm accesses. + * @guest_memory_bo: The guest memory buffer if any. Protected by resource + * reserved. + * @guest_memory_offset: Offset into the guest memory buffer if any. Protected + * by resource reserved. Note that only a few resource types can have a + * @guest_memory_offset different from zero. * @pin_count: The pin count for this resource. A pinned resource has a * pin-count greater than zero. It is not on the resource LRU lists and its - * backup buffer is pinned. Hence it can't be evicted. + * guest memory buffer is pinned. Hence it can't be evicted. * @func: Method vtable for this resource. Immutable. + * @mob_node; Node for the MOB guest memory rbtree. Protected by + * @guest_memory_bo reserved. * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. - * @mob_head: List head for the MOB backup list. Protected by @backup reserved. * @binding_head: List head for the context binding list. Protected by * the @dev_priv::binding_mutex * @res_free: The resource destructor. * @hw_destroy: Callback to destroy the resource on the device, as part of * resource destruction. */ +struct vmw_bo; +struct vmw_bo; +struct vmw_resource_dirty; struct vmw_resource { struct kref kref; struct vmw_private *dev_priv; int id; - unsigned long backup_size; - bool res_dirty; - bool backup_dirty; - struct vmw_buffer_object *backup; - unsigned long backup_offset; + u32 used_prio; + unsigned long guest_memory_size; + u32 res_dirty : 1; + u32 guest_memory_dirty : 1; + u32 coherent : 1; + struct vmw_bo *guest_memory_bo; + unsigned long guest_memory_offset; unsigned long pin_count; const struct vmw_res_func *func; + struct rb_node mob_node; struct list_head lru_head; - struct list_head mob_head; struct list_head binding_head; + struct vmw_resource_dirty *dirty; void (*res_free) (struct vmw_resource *res); void (*hw_destroy) (struct vmw_resource *res); }; @@ -172,6 +162,7 @@ enum vmw_res_type { vmw_res_dx_context, vmw_res_cotable, vmw_res_view, + vmw_res_streamoutput, vmw_res_max }; @@ -180,44 +171,72 @@ enum vmw_res_type { */ enum vmw_cmdbuf_res_type { vmw_cmdbuf_res_shader, - vmw_cmdbuf_res_view + vmw_cmdbuf_res_view, + vmw_cmdbuf_res_streamoutput }; struct vmw_cmdbuf_res_manager; struct vmw_cursor_snooper { - size_t age; + size_t id; uint32_t *image; }; struct vmw_framebuffer; struct vmw_surface_offset; -struct vmw_surface { - struct vmw_resource res; - SVGA3dSurfaceAllFlags flags; - uint32_t format; - uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; +/** + * struct vmw_surface_metadata - Metadata describing a surface. + * + * @flags: Device flags. + * @format: Surface SVGA3D_x format. + * @mip_levels: Mip level for each face. For GB first index is used only. + * @multisample_count: Sample count. + * @multisample_pattern: Sample patterns. + * @quality_level: Quality level. + * @autogen_filter: Filter for automatically generated mipmaps. + * @array_size: Number of array elements for a 1D/2D texture. For cubemap + texture number of faces * array_size. This should be 0 for pre + SM4 device. + * @buffer_byte_stride: Buffer byte stride. + * @num_sizes: Size of @sizes. For GB surface this should always be 1. + * @base_size: Surface dimension. + * @sizes: Array representing mip sizes. Legacy only. + * @scanout: Whether this surface will be used for scanout. + * + * This tracks metadata for both legacy and guest backed surface. + */ +struct vmw_surface_metadata { + u64 flags; + u32 format; + u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; + u32 multisample_count; + u32 multisample_pattern; + u32 quality_level; + u32 autogen_filter; + u32 array_size; + u32 num_sizes; + u32 buffer_byte_stride; struct drm_vmw_size base_size; struct drm_vmw_size *sizes; - uint32_t num_sizes; bool scanout; - uint32_t array_size; - /* TODO so far just a extra pointer */ +}; + +/** + * struct vmw_surface: Resource structure for a surface. + * + * @res: The base resource for this surface. + * @metadata: Metadata for this surface resource. + * @snooper: Cursor data. Legacy surface only. + * @offsets: Legacy surface only. + * @view_list: List of views bound to this surface. + */ +struct vmw_surface { + struct vmw_resource res; + struct vmw_surface_metadata metadata; struct vmw_cursor_snooper snooper; struct vmw_surface_offset *offsets; - SVGA3dTextureFilter autogen_filter; - uint32_t multisample_count; struct list_head view_list; - SVGA3dMSPattern multisample_pattern; - SVGA3dMSQualityLevel quality_level; -}; - -struct vmw_marker_queue { - struct list_head head; - u64 lag; - u64 lag_time; - spinlock_t lock; }; struct vmw_fifo_state { @@ -229,8 +248,6 @@ struct vmw_fifo_state { uint32_t capabilities; struct mutex fifo_mutex; struct rw_semaphore rwsem; - struct vmw_marker_queue marker_queue; - bool dx; }; /** @@ -257,7 +274,6 @@ struct vmw_res_cache_entry { * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. */ enum vmw_dma_map_mode { - vmw_dma_phys, /* Use physical page addresses */ vmw_dma_alloc_coherent, /* Use TTM coherent pages */ vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ vmw_dma_map_bind, /* Unmap from DMA just before unbind */ @@ -276,7 +292,6 @@ struct vmw_sg_table { struct page **pages; const dma_addr_t *addrs; struct sg_table *sgt; - unsigned long num_regions; unsigned long num_pages; }; @@ -297,12 +312,24 @@ struct vmw_sg_table { struct vmw_piter { struct page **pages; const dma_addr_t *addrs; - struct sg_page_iter iter; + struct sg_dma_page_iter iter; unsigned long i; unsigned long num_pages; bool (*next)(struct vmw_piter *); dma_addr_t (*dma_address)(struct vmw_piter *); - struct page *(*page)(struct vmw_piter *); +}; + + +struct vmw_ttm_tt { + struct ttm_tt dma_ttm; + struct vmw_private *dev_priv; + int gmr_id; + struct vmw_mob *mob; + int mem_type; + struct sg_table sgt; + struct vmw_sg_table vsgt; + bool mapped; + bool bound; }; /* @@ -312,7 +339,8 @@ enum vmw_display_unit_type { vmw_du_invalid = 0, vmw_du_legacy, vmw_du_screen_object, - vmw_du_screen_target + vmw_du_screen_target, + vmw_du_max }; struct vmw_validation_context; @@ -350,13 +378,13 @@ struct vmw_ctx_validation_info; * @ctx: The validation context */ struct vmw_sw_context{ - struct drm_open_hash res_ht; - bool res_ht_initialized; + DECLARE_HASHTABLE(res_ht, VMW_RES_HT_ORDER); bool kernel; struct vmw_fpriv *fp; + struct drm_file *filp; uint32_t *cmd_bounce; uint32_t cmd_bounce_size; - struct vmw_buffer_object *cur_query_bo; + struct vmw_bo *cur_query_bo; struct list_head bo_relocations; struct list_head res_relocations; uint32_t *buf_start; @@ -368,7 +396,7 @@ struct vmw_sw_context{ struct list_head staged_cmd_res; struct list_head ctx_list; struct vmw_ctx_validation_info *dx_ctx_node; - struct vmw_buffer_object *dx_query_mob; + struct vmw_bo *dx_query_mob; struct vmw_resource *dx_query_ctx; struct vmw_cmdbuf_res_manager *man; struct vmw_validation_context *ctx; @@ -377,19 +405,6 @@ struct vmw_sw_context{ struct vmw_legacy_display; struct vmw_overlay; -struct vmw_master { - struct ttm_lock lock; -}; - -struct vmw_vga_topology_state { - uint32_t width; - uint32_t height; - uint32_t primary; - uint32_t pos_x; - uint32_t pos_y; -}; - - /* * struct vmw_otable - Guest Memory OBject table metadata * @@ -406,7 +421,7 @@ struct vmw_otable_batch { unsigned num_otables; struct vmw_otable *otables; struct vmw_resource *context; - struct ttm_buffer_object *otable_bo; + struct vmw_bo *otable_bo; }; enum { @@ -415,19 +430,36 @@ enum { VMW_IRQTHREAD_MAX }; +/** + * enum vmw_sm_type - Graphics context capability supported by device. + * @VMW_SM_LEGACY: Pre DX context. + * @VMW_SM_4: Context support upto SM4. + * @VMW_SM_4_1: Context support upto SM4_1. + * @VMW_SM_5: Context support up to SM5. + * @VMW_SM_5_1X: Adds support for sm5_1 and gl43 extensions. + * @VMW_SM_MAX: Should be the last. + */ +enum vmw_sm_type { + VMW_SM_LEGACY = 0, + VMW_SM_4, + VMW_SM_4_1, + VMW_SM_5, + VMW_SM_5_1X, + VMW_SM_MAX +}; + struct vmw_private { - struct ttm_bo_device bdev; - - struct vmw_fifo_state fifo; - - struct drm_device *dev; - unsigned long vmw_chipset; - unsigned int io_start; - uint32_t vram_start; - uint32_t vram_size; - uint32_t prim_bb_mem; - uint32_t mmio_start; - uint32_t mmio_size; + struct drm_device drm; + struct ttm_device bdev; + + u32 pci_id; + resource_size_t io_start; + resource_size_t vram_start; + resource_size_t vram_size; + resource_size_t max_primary_mem; + u32 __iomem *rmmio; + u32 *fifo_mem; + resource_size_t fifo_mem_size; uint32_t fb_max_width; uint32_t fb_max_height; uint32_t texture_max_width; @@ -436,7 +468,6 @@ struct vmw_private { uint32_t stdu_max_height; uint32_t initial_width; uint32_t initial_height; - u32 *mmio_virt; uint32_t capabilities; uint32_t capabilities2; uint32_t max_gmr_ids; @@ -447,35 +478,21 @@ struct vmw_private { bool has_gmr; bool has_mob; spinlock_t hw_lock; - spinlock_t cap_lock; - bool has_dx; bool assume_16bpp; - bool has_sm4_1; + u32 irqs[VMWGFX_MAX_NUM_IRQS]; + u32 num_irq_vectors; - /* - * VGA registers. - */ - - struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; - uint32_t vga_width; - uint32_t vga_height; - uint32_t vga_bpp; - uint32_t vga_bpl; - uint32_t vga_pitchlock; - - uint32_t num_displays; + enum vmw_sm_type sm_type; /* * Framebuffer info. */ - void *fb_info; enum vmw_display_unit_type active_display_unit; struct vmw_legacy_display *ldu_priv; struct vmw_overlay *overlay_priv; struct drm_property *hotplug_mode_update_property; struct drm_property *implicit_placement_property; - struct mutex global_kms_state_mutex; spinlock_t cursor_lock; struct drm_atomic_state *suspend_state; @@ -485,11 +502,6 @@ struct vmw_private { spinlock_t resource_lock; struct idr res_idr[vmw_res_max]; - /* - * Block lastclose from racing with firstopen. - */ - - struct mutex init_mutex; /* * A resource manager for kernel-only surfaces and @@ -511,7 +523,7 @@ struct vmw_private { int cmdbuf_waiters; /* Protected by waiter_lock */ int error_waiters; /* Protected by waiter_lock */ int fifo_queue_waiters; /* Protected by waiter_lock */ - uint32_t last_read_seqno; + atomic_t last_read_seqno; struct vmw_fence_manager *fman; uint32_t irq_mask; /* Updates protected by waiter_lock */ @@ -535,38 +547,21 @@ struct vmw_private { struct mutex binding_mutex; /** - * Operating mode. + * PM management. */ - - bool stealth; - bool enable_fb; - spinlock_t svga_lock; - - /** - * Master management. - */ - - struct vmw_master *active_master; - struct vmw_master fbdev_master; struct notifier_block pm_nb; bool refuse_hibernation; bool suspend_locked; - struct mutex release_mutex; atomic_t num_fifo_resources; /* - * Replace this with an rwsem as soon as we have down_xx_interruptible() - */ - struct ttm_lock reservation_sem; - - /* * Query processing. These members * are protected by the cmdbuf mutex. */ - struct vmw_buffer_object *dummy_query_bo; - struct vmw_buffer_object *pinned_bo; + struct vmw_bo *dummy_query_bo; + struct vmw_bo *pinned_bo; uint32_t query_cid; uint32_t query_cid_valid; bool dummy_query_bo_pinned; @@ -591,11 +586,26 @@ struct vmw_private { */ struct vmw_otable_batch otable_batch; + struct vmw_fifo_state *fifo; struct vmw_cmdbuf_man *cman; DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); - /* Validation memory reservation */ - struct vmw_validation_mem vvm; + uint32 *devcaps; + + bool vkms_enabled; + struct workqueue_struct *crc_workq; + + /* + * mksGuestStat instance-descriptor and pid arrays + */ + struct page *mksstat_user_pages[MKSSTAT_CAPACITY]; + atomic_t mksstat_user_pids[MKSSTAT_CAPACITY]; + +#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) + struct page *mksstat_kern_pages[MKSSTAT_CAPACITY]; + u8 mksstat_kern_top_timer[MKSSTAT_CAPACITY]; + atomic_t mksstat_kern_pids[MKSSTAT_CAPACITY]; +#endif }; static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) @@ -605,7 +615,12 @@ static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) static inline struct vmw_private *vmw_priv(struct drm_device *dev) { - return (struct vmw_private *)dev->dev_private; + return container_of(dev, struct vmw_private, drm); +} + +static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev) +{ + return container_of(bdev, struct vmw_private, bdev); } static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) @@ -613,9 +628,12 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) return (struct vmw_fpriv *)file_priv->driver_priv; } -static inline struct vmw_master *vmw_master(struct drm_master *master) +/* + * SVGA v3 has mmio register access and lacks fifo cmds + */ +static inline bool vmw_is_svga_v3(const struct vmw_private *dev) { - return (struct vmw_master *) master->driver_priv; + return dev->pci_id == VMWGFX_PCI_ID_SVGA3; } /* @@ -628,10 +646,14 @@ static inline struct vmw_master *vmw_master(struct drm_master *master) static inline void vmw_write(struct vmw_private *dev_priv, unsigned int offset, uint32_t value) { - spin_lock(&dev_priv->hw_lock); - outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); - outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); - spin_unlock(&dev_priv->hw_lock); + if (vmw_is_svga_v3(dev_priv)) { + iowrite32(value, dev_priv->rmmio + offset); + } else { + spin_lock(&dev_priv->hw_lock); + outl(offset, dev_priv->io_start + SVGA_INDEX_PORT); + outl(value, dev_priv->io_start + SVGA_VALUE_PORT); + spin_unlock(&dev_priv->hw_lock); + } } static inline uint32_t vmw_read(struct vmw_private *dev_priv, @@ -639,16 +661,72 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, { u32 val; - spin_lock(&dev_priv->hw_lock); - outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); - val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); - spin_unlock(&dev_priv->hw_lock); + if (vmw_is_svga_v3(dev_priv)) { + val = ioread32(dev_priv->rmmio + offset); + } else { + spin_lock(&dev_priv->hw_lock); + outl(offset, dev_priv->io_start + SVGA_INDEX_PORT); + val = inl(dev_priv->io_start + SVGA_VALUE_PORT); + spin_unlock(&dev_priv->hw_lock); + } return val; } +/** + * has_sm4_context - Does the device support SM4 context. + * @dev_priv: Device private. + * + * Return: Bool value if device support SM4 context or not. + */ +static inline bool has_sm4_context(const struct vmw_private *dev_priv) +{ + return (dev_priv->sm_type >= VMW_SM_4); +} + +/** + * has_sm4_1_context - Does the device support SM4_1 context. + * @dev_priv: Device private. + * + * Return: Bool value if device support SM4_1 context or not. + */ +static inline bool has_sm4_1_context(const struct vmw_private *dev_priv) +{ + return (dev_priv->sm_type >= VMW_SM_4_1); +} + +/** + * has_sm5_context - Does the device support SM5 context. + * @dev_priv: Device private. + * + * Return: Bool value if device support SM5 context or not. + */ +static inline bool has_sm5_context(const struct vmw_private *dev_priv) +{ + return (dev_priv->sm_type >= VMW_SM_5); +} + +/** + * has_gl43_context - Does the device support GL43 context. + * @dev_priv: Device private. + * + * Return: Bool value if device support SM5 context or not. + */ +static inline bool has_gl43_context(const struct vmw_private *dev_priv) +{ + return (dev_priv->sm_type >= VMW_SM_5_1X); +} + + +static inline u32 vmw_max_num_uavs(struct vmw_private *dev_priv) +{ + return (has_gl43_context(dev_priv) ? + SVGA3D_DX11_1_MAX_UAVIEWS : SVGA3D_MAX_UAVIEWS); +} + extern void vmw_svga_enable(struct vmw_private *dev_priv); extern void vmw_svga_disable(struct vmw_private *dev_priv); +bool vmwgfx_supported(struct vmw_private *vmw); /** @@ -662,6 +740,26 @@ extern int vmw_gmr_bind(struct vmw_private *dev_priv, extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); /** + * User handles + */ +struct vmw_user_object { + struct vmw_surface *surface; + struct vmw_bo *buffer; +}; + +int vmw_user_object_lookup(struct vmw_private *dev_priv, struct drm_file *filp, + u32 handle, struct vmw_user_object *uo); +struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo); +void vmw_user_object_unref(struct vmw_user_object *uo); +bool vmw_user_object_is_null(struct vmw_user_object *uo); +struct vmw_surface *vmw_user_object_surface(struct vmw_user_object *uo); +struct vmw_bo *vmw_user_object_buffer(struct vmw_user_object *uo); +void *vmw_user_object_map(struct vmw_user_object *uo); +void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size); +void vmw_user_object_unmap(struct vmw_user_object *uo); +bool vmw_user_object_is_mapped(struct vmw_user_object *uo); + +/** * Resource utilities - vmwgfx_resource.c */ struct vmw_user_resource_conv; @@ -670,27 +768,18 @@ extern void vmw_resource_unreference(struct vmw_resource **p_res); extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); extern struct vmw_resource * vmw_resource_reference_unless_doomed(struct vmw_resource *res); -extern int vmw_resource_validate(struct vmw_resource *res, bool intr); +extern int vmw_resource_validate(struct vmw_resource *res, bool intr, + bool dirtying); extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, bool no_backup); extern bool vmw_resource_needs_backup(const struct vmw_resource *res); -extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t handle, - struct vmw_surface **out_surf, - struct vmw_buffer_object **out_buf); extern int vmw_user_resource_lookup_handle( struct vmw_private *dev_priv, struct ttm_object_file *tfile, uint32_t handle, const struct vmw_user_resource_conv *converter, struct vmw_resource **p_res); -extern struct vmw_resource * -vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t handle, - const struct vmw_user_resource_conv * - converter); + extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, @@ -700,92 +789,49 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, uint32_t *inout_id, struct vmw_resource **out); extern void vmw_resource_unreserve(struct vmw_resource *res, - bool switch_backup, - struct vmw_buffer_object *new_backup, - unsigned long new_backup_offset); + bool dirty_set, + bool dirty, + bool switch_guest_memory, + struct vmw_bo *new_guest_memory, + unsigned long new_guest_memory_offset); extern void vmw_query_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); -extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); -extern void vmw_resource_evict_all(struct vmw_private *dev_priv); -extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); + struct ttm_resource *old_mem, + struct ttm_resource *new_mem); +int vmw_query_readback_all(struct vmw_bo *dx_query_mob); +void vmw_resource_evict_all(struct vmw_private *dev_priv); +void vmw_resource_unbind_list(struct vmw_bo *vbo); +void vmw_resource_mob_attach(struct vmw_resource *res); +void vmw_resource_mob_detach(struct vmw_resource *res); +void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, + pgoff_t end); +int vmw_resource_clean(struct vmw_resource *res); +int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, + pgoff_t end, pgoff_t *num_prefault); /** - * vmw_user_resource_noref_release - release a user resource pointer looked up - * without reference + * vmw_resource_mob_attached - Whether a resource currently has a mob attached + * @res: The resource + * + * Return: true if the resource has a mob attached, false otherwise. */ -static inline void vmw_user_resource_noref_release(void) +static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) { - ttm_base_object_noref_release(); + return !RB_EMPTY_NODE(&res->mob_node); } /** - * Buffer object helper functions - vmwgfx_bo.c - */ -extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, - struct vmw_buffer_object *bo, - struct ttm_placement *placement, - bool interruptible); -extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, - bool interruptible); -extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, - bool interruptible); -extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv, - struct vmw_buffer_object *bo, - bool interruptible); -extern int vmw_bo_unpin(struct vmw_private *vmw_priv, - struct vmw_buffer_object *bo, - bool interruptible); -extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, - SVGAGuestPtr *ptr); -extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin); -extern void vmw_bo_bo_free(struct ttm_buffer_object *bo); -extern int vmw_bo_init(struct vmw_private *dev_priv, - struct vmw_buffer_object *vmw_bo, - size_t size, struct ttm_placement *placement, - bool interuptable, - void (*bo_free)(struct ttm_buffer_object *bo)); -extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile); -extern int vmw_user_bo_alloc(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t size, - bool shareable, - uint32_t *handle, - struct vmw_buffer_object **p_dma_buf, - struct ttm_base_object **p_base); -extern int vmw_user_bo_reference(struct ttm_object_file *tfile, - struct vmw_buffer_object *dma_buf, - uint32_t *handle); -extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_user_bo_lookup(struct ttm_object_file *tfile, - uint32_t id, struct vmw_buffer_object **out, - struct ttm_base_object **base); -extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, - struct vmw_fence_obj *fence); -extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); -extern void vmw_bo_unmap(struct vmw_buffer_object *vbo); -extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); -extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); -extern struct vmw_buffer_object * -vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle); - -/** - * vmw_user_bo_noref_release - release a buffer object pointer looked up - * without reference + * GEM related functionality - vmwgfx_gem.c */ -static inline void vmw_user_bo_noref_release(void) -{ - ttm_base_object_noref_release(); -} - +struct vmw_bo_params; +extern const struct drm_gem_object_funcs vmw_gem_object_funcs; +extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, + struct drm_file *filp, + uint32_t size, + uint32_t *handle, + struct vmw_bo **p_vbo); +extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +extern void vmw_debugfs_gem_init(struct vmw_private *vdev); /** * Misc Ioctl functionality - vmwgfx_ioctl.c @@ -799,66 +845,79 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern __poll_t vmw_fops_poll(struct file *filp, - struct poll_table_struct *wait); -extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, - size_t count, loff_t *offset); /** * Fifo utilities - vmwgfx_fifo.c */ -extern int vmw_fifo_init(struct vmw_private *dev_priv, - struct vmw_fifo_state *fifo); -extern void vmw_fifo_release(struct vmw_private *dev_priv, - struct vmw_fifo_state *fifo); -extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); +extern struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv); +extern void vmw_fifo_destroy(struct vmw_private *dev_priv); +extern bool vmw_cmd_supported(struct vmw_private *vmw); extern void * -vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); -extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); -extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); -extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, - uint32_t *seqno); -extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason); +vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); +extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes); +extern void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); +extern int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno); +extern bool vmw_supports_3d(struct vmw_private *dev_priv); extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); -extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); -extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, - uint32_t cid); -extern int vmw_fifo_flush(struct vmw_private *dev_priv, - bool interruptible); +extern int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, + uint32_t cid); +extern int vmw_cmd_flush(struct vmw_private *dev_priv, + bool interruptible); + +#define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id) \ +({ \ + vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({ \ + DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \ + __func__, (unsigned int) __bytes); \ + NULL; \ + }); \ +}) + +#define VMW_CMD_RESERVE(__priv, __bytes) \ + VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID) + /** - * TTM glue - vmwgfx_ttm_glue.c + * vmw_fifo_caps - Returns the capabilities of the FIFO command + * queue or 0 if fifo memory isn't present. + * @dev_priv: The device private context */ +static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv) +{ + if (!dev_priv->fifo_mem || !dev_priv->fifo) + return 0; + return dev_priv->fifo->capabilities; +} + -extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); +/** + * vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3 + * is enabled in the FIFO. + * @dev_priv: The device private context + */ +static inline bool +vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) +{ + return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0; +} -extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, - size_t gran); /** * TTM buffer object driver - vmwgfx_ttm_buffer.c */ extern const size_t vmw_tt_size; extern struct ttm_placement vmw_vram_placement; -extern struct ttm_placement vmw_vram_ne_placement; -extern struct ttm_placement vmw_vram_sys_placement; -extern struct ttm_placement vmw_vram_gmr_placement; -extern struct ttm_placement vmw_vram_gmr_ne_placement; extern struct ttm_placement vmw_sys_placement; -extern struct ttm_placement vmw_sys_ne_placement; -extern struct ttm_placement vmw_evictable_placement; -extern struct ttm_placement vmw_srf_placement; -extern struct ttm_placement vmw_mob_placement; -extern struct ttm_placement vmw_mob_ne_placement; -extern struct ttm_placement vmw_nonfixed_placement; -extern struct ttm_bo_driver vmw_bo_driver; -extern int vmw_dma_quiescent(struct drm_device *dev); -extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); -extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo); +extern struct ttm_device_funcs vmw_bo_driver; extern const struct vmw_sg_table * vmw_bo_sg_table(struct ttm_buffer_object *bo); +int vmw_bo_create_and_populate(struct vmw_private *dev_priv, + size_t bo_size, + u32 domain, + struct vmw_bo **bo_p); + extern void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, unsigned long p_offs); @@ -896,15 +955,15 @@ static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) */ static inline struct page *vmw_piter_page(struct vmw_piter *viter) { - return viter->page(viter); + return viter->pages[viter->i]; } /** * Command submission - vmwgfx_execbuf.c */ -extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, - struct drm_file *file_priv, size_t size); +extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int vmw_execbuf_process(struct drm_file *file_priv, struct vmw_private *dev_priv, void __user *user_commands, @@ -924,25 +983,21 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, struct vmw_private *dev_priv, struct vmw_fence_obj **p_fence, uint32_t *p_handle); -extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, +extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd, - struct sync_file *sync_file); + int32_t out_fence_fd); bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); /** * IRQs and wating - vmwgfx_irq.c */ -extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, - uint32_t seqno, bool interruptible, - unsigned long timeout); -extern int vmw_irq_install(struct drm_device *dev, int irq); +extern int vmw_irq_install(struct vmw_private *dev_priv); extern void vmw_irq_uninstall(struct drm_device *dev); extern bool vmw_seqno_passed(struct vmw_private *dev_priv, uint32_t seqno); @@ -952,39 +1007,14 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, uint32_t seqno, bool interruptible, unsigned long timeout); -extern void vmw_update_seqno(struct vmw_private *dev_priv, - struct vmw_fifo_state *fifo_state); -extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); -extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); -extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); -extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); -extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, - int *waiter_count); -extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv, - u32 flag, int *waiter_count); - -/** - * Rudimentary fence-like objects currently used only for throttling - - * vmwgfx_marker.c - */ - -extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); -extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); -extern int vmw_marker_push(struct vmw_marker_queue *queue, - uint32_t seqno); -extern int vmw_marker_pull(struct vmw_marker_queue *queue, - uint32_t signaled_seqno); -extern int vmw_wait_lag(struct vmw_private *dev_priv, - struct vmw_marker_queue *queue, uint32_t us); - -/** - * Kernel framebuffer - vmwgfx_fb.c - */ - -int vmw_fb_init(struct vmw_private *vmw_priv); -int vmw_fb_close(struct vmw_private *dev_priv); -int vmw_fb_off(struct vmw_private *vmw_priv); -int vmw_fb_on(struct vmw_private *vmw_priv); +bool vmw_seqno_waiter_add(struct vmw_private *dev_priv); +bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv); +bool vmw_goal_waiter_add(struct vmw_private *dev_priv); +bool vmw_goal_waiter_remove(struct vmw_private *dev_priv); +bool vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, + int *waiter_count); +bool vmw_generic_waiter_remove(struct vmw_private *dev_priv, + u32 flag, int *waiter_count); /** * Kernel modesetting - vmwgfx_kms.c @@ -992,11 +1022,8 @@ int vmw_fb_on(struct vmw_private *vmw_priv); int vmw_kms_init(struct vmw_private *dev_priv); int vmw_kms_close(struct vmw_private *dev_priv); -int vmw_kms_save_vga(struct vmw_private *vmw_priv); -int vmw_kms_restore_vga(struct vmw_private *vmw_priv); int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); void vmw_kms_cursor_snoop(struct vmw_surface *srf, struct ttm_object_file *tfile, struct ttm_buffer_object *bo, @@ -1004,13 +1031,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, int vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned width, unsigned height, unsigned pitch, unsigned bpp, unsigned depth); -void vmw_kms_idle_workqueues(struct vmw_master *vmaster); -bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, - uint32_t pitch, - uint32_t height); -u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe); -int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe); -void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe); int vmw_kms_present(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, @@ -1020,21 +1040,10 @@ int vmw_kms_present(struct vmw_private *dev_priv, uint32_t num_clips); int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); int vmw_kms_suspend(struct drm_device *dev); int vmw_kms_resume(struct drm_device *dev); void vmw_kms_lost_device(struct drm_device *dev); -int vmw_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, - struct drm_mode_create_dumb *args); - -int vmw_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset); -int vmw_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle); extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); extern void vmw_resource_unpin(struct vmw_resource *res); extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); @@ -1047,7 +1056,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv); int vmw_overlay_close(struct vmw_private *dev_priv); int vmw_overlay_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int vmw_overlay_stop_all(struct vmw_private *dev_priv); int vmw_overlay_resume_all(struct vmw_private *dev_priv); int vmw_overlay_pause_all(struct vmw_private *dev_priv); int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); @@ -1059,7 +1067,14 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); * GMR Id manager */ -extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; +int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); +void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type); + +/** + * System memory manager + */ +int vmw_sys_man_init(struct vmw_private *dev_priv); +void vmw_sys_man_fini(struct vmw_private *dev_priv); /** * Prime - vmwgfx_prime.c @@ -1073,6 +1088,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); +struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *table); /* * MemoryOBject management - vmwgfx_mob.c @@ -1094,10 +1112,6 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv); extern const struct vmw_user_resource_conv *user_context_converter; -extern int vmw_context_check(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - int id, - struct vmw_resource **p_res); extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, @@ -1109,15 +1123,14 @@ extern struct vmw_cmdbuf_res_manager * vmw_context_res_man(struct vmw_resource *ctx); extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, SVGACOTableType cotable_type); -extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); struct vmw_ctx_binding_state; extern struct vmw_ctx_binding_state * vmw_context_binding_state(struct vmw_resource *ctx); extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, bool readback); extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, - struct vmw_buffer_object *mob); -extern struct vmw_buffer_object * + struct vmw_bo *mob); +extern struct vmw_bo * vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); @@ -1127,7 +1140,6 @@ vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); extern const struct vmw_user_resource_conv *user_surface_converter; -extern void vmw_surface_res_free(struct vmw_resource *res); extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, @@ -1138,23 +1150,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int vmw_surface_check(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t handle, int *id); -extern int vmw_surface_validate(struct vmw_private *dev_priv, - struct vmw_surface *srf); -int vmw_surface_gb_priv_define(struct drm_device *dev, - uint32_t user_accounting_size, - SVGA3dSurfaceAllFlags svga3d_flags, - SVGA3dSurfaceFormat format, - bool for_scanout, - uint32_t num_mip_levels, - uint32_t multisample_count, - uint32_t array_size, - struct drm_vmw_size size, - SVGA3dMSPattern multisample_pattern, - SVGA3dMSQualityLevel quality_level, - struct vmw_surface **srf_out); extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1162,6 +1157,19 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int vmw_gb_surface_define(struct vmw_private *dev_priv, + const struct vmw_surface_metadata *req, + struct vmw_surface **srf_out); +struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw, + struct vmw_bo *bo, + u32 handle); +u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw, + struct vmw_bo *bo, + u32 handle); +int vmw_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + /* * Shader management - vmwgfx_shader.c */ @@ -1195,13 +1203,30 @@ vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, u32 user_key, SVGA3dShaderType shader_type); /* + * Streamoutput management + */ +struct vmw_resource * +vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man, + u32 user_key); +int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man, + struct vmw_resource *ctx, + SVGA3dStreamOutputId user_key, + struct list_head *list); +void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size); +int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man, + SVGA3dStreamOutputId user_key, + struct list_head *list); +void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv, + struct list_head *list, + bool readback); + +/* * Command buffer managed resources - vmwgfx_cmdbuf_res.c */ extern struct vmw_cmdbuf_res_manager * vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); -extern size_t vmw_cmdbuf_res_man_size(void); extern struct vmw_resource * vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, enum vmw_cmdbuf_res_type res_type, @@ -1239,8 +1264,7 @@ struct vmw_cmdbuf_header; extern struct vmw_cmdbuf_man * vmw_cmdbuf_man_create(struct vmw_private *dev_priv); -extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, - size_t size, size_t default_size); +extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size); extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man); extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man); extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, @@ -1300,17 +1324,67 @@ void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n); -int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, +int vmw_bo_cpu_blit(struct vmw_bo *dst, u32 dst_offset, u32 dst_stride, - struct ttm_buffer_object *src, + struct vmw_bo *src, u32 src_offset, u32 src_stride, u32 w, u32 h, struct vmw_diff_cpy *diff); /* Host messaging -vmwgfx_msg.c: */ +void vmw_disable_backdoor(void); int vmw_host_get_guestinfo(const char *guest_info_param, char *buffer, size_t *length); -int vmw_host_log(const char *log); +__printf(1, 2) int vmw_host_printf(const char *fmt, ...); +int vmw_msg_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/* Host mksGuestStats -vmwgfx_msg.c: */ +int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv); + +int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vmw_mksstat_remove_all(struct vmw_private *dev_priv); + +/* VMW logging */ + +/** + * VMW_DEBUG_USER - Debug output for user-space debugging. + * + * @fmt: printf() like format string. + * + * This macro is for logging user-space error and debugging messages for e.g. + * command buffer execution errors due to malformed commands, invalid context, + * etc. + */ +#define VMW_DEBUG_USER(fmt, ...) \ + DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) + +/* Resource dirtying - vmwgfx_page_dirty.c */ +bool vmw_bo_is_dirty(struct vmw_bo *vbo); +void vmw_bo_dirty_scan(struct vmw_bo *vbo); +int vmw_bo_dirty_add(struct vmw_bo *vbo); +void vmw_bo_dirty_clear(struct vmw_bo *vbo); +void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res); +void vmw_bo_dirty_clear_res(struct vmw_resource *res); +void vmw_bo_dirty_release(struct vmw_bo *vbo); +void vmw_bo_dirty_unmap(struct vmw_bo *vbo, + pgoff_t start, pgoff_t end); +vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); +vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); + + +/** + * VMW_DEBUG_KMS - Debug output for kernel mode-setting + * + * This macro is for debugging vmwgfx mode-setting code. + */ +#define VMW_DEBUG_KMS(fmt, ...) \ + DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /** * Inline helper functions @@ -1331,31 +1405,6 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) return srf; } -static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) -{ - struct vmw_buffer_object *tmp_buf = *buf; - - *buf = NULL; - if (tmp_buf != NULL) { - struct ttm_buffer_object *bo = &tmp_buf->base; - - ttm_bo_unref(&bo); - } -} - -static inline struct vmw_buffer_object * -vmw_bo_reference(struct vmw_buffer_object *buf) -{ - if (ttm_bo_reference(&buf->base)) - return buf; - return NULL; -} - -static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) -{ - return &ttm_mem_glob; -} - static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) { atomic_inc(&dev_priv->num_fifo_resources); @@ -1367,28 +1416,88 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv) } /** - * vmw_mmio_read - Perform a MMIO read from volatile memory + * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory * - * @addr: The address to read from + * @fifo_reg: The fifo register to read from * * This function is intended to be equivalent to ioread32() on * memremap'd memory, but without byteswapping. */ -static inline u32 vmw_mmio_read(u32 *addr) +static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg) { - return READ_ONCE(*addr); + BUG_ON(vmw_is_svga_v3(vmw)); + return READ_ONCE(*(vmw->fifo_mem + fifo_reg)); } /** - * vmw_mmio_write - Perform a MMIO write to volatile memory + * vmw_fifo_mem_write - Perform a MMIO write to volatile memory * - * @addr: The address to write to + * @addr: The fifo register to write to * * This function is intended to be equivalent to iowrite32 on * memremap'd memory, but without byteswapping. */ -static inline void vmw_mmio_write(u32 value, u32 *addr) +static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg, + u32 value) { - WRITE_ONCE(*addr, value); + BUG_ON(vmw_is_svga_v3(vmw)); + WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value); } + +static inline u32 vmw_fence_read(struct vmw_private *dev_priv) +{ + u32 fence; + if (vmw_is_svga_v3(dev_priv)) + fence = vmw_read(dev_priv, SVGA_REG_FENCE); + else + fence = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE); + return fence; +} + +static inline void vmw_fence_write(struct vmw_private *dev_priv, + u32 fence) +{ + BUG_ON(vmw_is_svga_v3(dev_priv)); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, fence); +} + +static inline u32 vmw_irq_status_read(struct vmw_private *vmw) +{ + u32 status; + if (vmw_is_svga_v3(vmw)) + status = vmw_read(vmw, SVGA_REG_IRQ_STATUS); + else + status = inl(vmw->io_start + SVGA_IRQSTATUS_PORT); + return status; +} + +static inline void vmw_irq_status_write(struct vmw_private *vmw, + uint32 status) +{ + if (vmw_is_svga_v3(vmw)) + vmw_write(vmw, SVGA_REG_IRQ_STATUS, status); + else + outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT); +} + +static inline bool vmw_has_fences(struct vmw_private *vmw) +{ + if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS | + SVGA_CAP_CMD_BUFFERS_2)) != 0) + return true; + return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0; +} + +static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model, + u32 shader_type) +{ + SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX; + + if (shader_model >= VMW_SM_5) + max_allowed = SVGA3D_SHADERTYPE_MAX; + else if (shader_model >= VMW_SM_4) + max_allowed = SVGA3D_SHADERTYPE_DX10_MAX; + return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed; +} + #endif |
