diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_drv.h')
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 831 |
1 files changed, 362 insertions, 469 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 3596f3923ea3..f2abaf1bda6a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1,27 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * **************************************************************************/ @@ -30,18 +11,19 @@ #include <linux/suspend.h> #include <linux/sync_file.h> +#include <linux/hashtable.h> #include <drm/drm_auth.h> #include <drm/drm_device.h> #include <drm/drm_file.h> -#include <drm/drm_hashtab.h> +#include <drm/drm_print.h> #include <drm/drm_rect.h> -#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_execbuf_util.h> -#include <drm/ttm/ttm_module.h> +#include <drm/ttm/ttm_tt.h> +#include <drm/ttm/ttm_placement.h> +#include <drm/ttm/ttm_bo.h> -#include "ttm_lock.h" #include "ttm_object.h" #include "vmwgfx_fence.h" @@ -56,16 +38,23 @@ #define VMWGFX_DRIVER_NAME "vmwgfx" -#define VMWGFX_DRIVER_DATE "20200114" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 18 +#define VMWGFX_DRIVER_MINOR 21 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) -#define VMWGFX_MAX_RELOCATIONS 2048 -#define VMWGFX_MAX_VALIDATIONS 2048 -#define VMWGFX_MAX_DISPLAYS 16 +#define VMWGFX_NUM_DISPLAY_UNITS 8 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 -#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1 + +#define VMWGFX_MIN_INITIAL_WIDTH 1280 +#define VMWGFX_MIN_INITIAL_HEIGHT 800 + +#define VMWGFX_PCI_ID_SVGA2 0x0405 +#define VMWGFX_PCI_ID_SVGA3 0x0406 + +/* + * This has to match get_count_order(SVGA_IRQFLAG_MAX) + */ +#define VMWGFX_MAX_NUM_IRQS 6 /* * Perhaps we should have sysfs entries for these. @@ -73,7 +62,7 @@ #define VMWGFX_NUM_GB_CONTEXT 256 #define VMWGFX_NUM_GB_SHADER 20000 #define VMWGFX_NUM_GB_SURFACE 32768 -#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS +#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS #define VMWGFX_NUM_DXCONTEXT 256 #define VMWGFX_NUM_DXQUERY 512 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ @@ -81,86 +70,55 @@ VMWGFX_NUM_GB_SURFACE +\ VMWGFX_NUM_GB_SCREEN_TARGET) -#define VMW_PL_GMR (TTM_PL_PRIV + 0) -#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0) -#define VMW_PL_MOB (TTM_PL_PRIV + 1) -#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1) +#define VMW_PL_GMR (TTM_PL_PRIV + 0) +#define VMW_PL_MOB (TTM_PL_PRIV + 1) +#define VMW_PL_SYSTEM (TTM_PL_PRIV + 2) #define VMW_RES_CONTEXT ttm_driver_type0 #define VMW_RES_SURFACE ttm_driver_type1 #define VMW_RES_STREAM ttm_driver_type2 #define VMW_RES_FENCE ttm_driver_type3 #define VMW_RES_SHADER ttm_driver_type4 +#define VMW_RES_HT_ORDER 12 + +#define MKSSTAT_CAPACITY_LOG2 5U +#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2) struct vmw_fpriv { struct ttm_object_file *tfile; bool gb_aware; /* user-space is guest-backed aware */ }; -/** - * struct vmw_buffer_object - TTM buffer object with vmwgfx additions - * @base: The TTM buffer object - * @res_tree: RB tree of resources using this buffer object as a backing MOB - * @pin_count: pin depth - * @cpu_writers: Number of synccpu write grabs. Protected by reservation when - * increased. May be decreased without reservation. - * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB - * @map: Kmap object for semi-persistent mappings - * @res_prios: Eviction priority counts for attached resources - * @dirty: structure for user-space dirty-tracking - */ -struct vmw_buffer_object { - struct ttm_buffer_object base; - struct rb_root res_tree; - s32 pin_count; - atomic_t cpu_writers; - /* Not ref-counted. Protected by binding_mutex */ - struct vmw_resource *dx_query_ctx; - /* Protected by reservation */ - struct ttm_bo_kmap_obj map; - u32 res_prios[TTM_MAX_BO_PRIORITY]; - struct vmw_bo_dirty *dirty; -}; - -/** - * struct vmw_validate_buffer - Carries validation info about buffers. - * - * @base: Validation info for TTM. - * @hash: Hash entry for quick lookup of the TTM buffer object. - * - * This structure contains also driver private validation info - * on top of the info needed by TTM. - */ -struct vmw_validate_buffer { - struct ttm_validate_buffer base; - struct drm_hash_item hash; - bool validate_as_mob; +struct vmwgfx_hash_item { + struct hlist_node head; + unsigned long key; }; struct vmw_res_func; - /** * struct vmw-resource - base class for hardware resources * * @kref: For refcounting. * @dev_priv: Pointer to the device private for this resource. Immutable. * @id: Device id. Protected by @dev_priv::resource_lock. - * @backup_size: Backup buffer size. Immutable. - * @res_dirty: Resource contains data not yet in the backup buffer. Protected - * by resource reserved. - * @backup_dirty: Backup buffer contains data not yet in the HW resource. + * @guest_memory_size: Guest memory buffer size. Immutable. + * @res_dirty: Resource contains data not yet in the guest memory buffer. * Protected by resource reserved. + * @guest_memory_dirty: Guest memory buffer contains data not yet in the HW + * resource. Protected by resource reserved. * @coherent: Emulate coherency by tracking vm accesses. - * @backup: The backup buffer if any. Protected by resource reserved. - * @backup_offset: Offset into the backup buffer if any. Protected by resource - * reserved. Note that only a few resource types can have a @backup_offset - * different from zero. + * @guest_memory_bo: The guest memory buffer if any. Protected by resource + * reserved. + * @guest_memory_offset: Offset into the guest memory buffer if any. Protected + * by resource reserved. Note that only a few resource types can have a + * @guest_memory_offset different from zero. * @pin_count: The pin count for this resource. A pinned resource has a * pin-count greater than zero. It is not on the resource LRU lists and its - * backup buffer is pinned. Hence it can't be evicted. + * guest memory buffer is pinned. Hence it can't be evicted. * @func: Method vtable for this resource. Immutable. - * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved. + * @mob_node; Node for the MOB guest memory rbtree. Protected by + * @guest_memory_bo reserved. * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. * @binding_head: List head for the context binding list. Protected by * the @dev_priv::binding_mutex @@ -168,18 +126,20 @@ struct vmw_res_func; * @hw_destroy: Callback to destroy the resource on the device, as part of * resource destruction. */ +struct vmw_bo; +struct vmw_bo; struct vmw_resource_dirty; struct vmw_resource { struct kref kref; struct vmw_private *dev_priv; int id; u32 used_prio; - unsigned long backup_size; + unsigned long guest_memory_size; u32 res_dirty : 1; - u32 backup_dirty : 1; + u32 guest_memory_dirty : 1; u32 coherent : 1; - struct vmw_buffer_object *backup; - unsigned long backup_offset; + struct vmw_bo *guest_memory_bo; + unsigned long guest_memory_offset; unsigned long pin_count; const struct vmw_res_func *func; struct rb_node mob_node; @@ -218,7 +178,7 @@ enum vmw_cmdbuf_res_type { struct vmw_cmdbuf_res_manager; struct vmw_cursor_snooper { - size_t age; + size_t id; uint32_t *image; }; @@ -279,13 +239,6 @@ struct vmw_surface { struct list_head view_list; }; -struct vmw_marker_queue { - struct list_head head; - u64 lag; - u64 lag_time; - spinlock_t lock; -}; - struct vmw_fifo_state { unsigned long reserved_size; u32 *dynamic_buffer; @@ -295,8 +248,6 @@ struct vmw_fifo_state { uint32_t capabilities; struct mutex fifo_mutex; struct rw_semaphore rwsem; - struct vmw_marker_queue marker_queue; - bool dx; }; /** @@ -323,7 +274,6 @@ struct vmw_res_cache_entry { * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. */ enum vmw_dma_map_mode { - vmw_dma_phys, /* Use physical page addresses */ vmw_dma_alloc_coherent, /* Use TTM coherent pages */ vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ vmw_dma_map_bind, /* Unmap from DMA just before unbind */ @@ -342,7 +292,6 @@ struct vmw_sg_table { struct page **pages; const dma_addr_t *addrs; struct sg_table *sgt; - unsigned long num_regions; unsigned long num_pages; }; @@ -368,7 +317,19 @@ struct vmw_piter { unsigned long num_pages; bool (*next)(struct vmw_piter *); dma_addr_t (*dma_address)(struct vmw_piter *); - struct page *(*page)(struct vmw_piter *); +}; + + +struct vmw_ttm_tt { + struct ttm_tt dma_ttm; + struct vmw_private *dev_priv; + int gmr_id; + struct vmw_mob *mob; + int mem_type; + struct sg_table sgt; + struct vmw_sg_table vsgt; + bool mapped; + bool bound; }; /* @@ -378,7 +339,8 @@ enum vmw_display_unit_type { vmw_du_invalid = 0, vmw_du_legacy, vmw_du_screen_object, - vmw_du_screen_target + vmw_du_screen_target, + vmw_du_max }; struct vmw_validation_context; @@ -416,13 +378,13 @@ struct vmw_ctx_validation_info; * @ctx: The validation context */ struct vmw_sw_context{ - struct drm_open_hash res_ht; - bool res_ht_initialized; + DECLARE_HASHTABLE(res_ht, VMW_RES_HT_ORDER); bool kernel; struct vmw_fpriv *fp; + struct drm_file *filp; uint32_t *cmd_bounce; uint32_t cmd_bounce_size; - struct vmw_buffer_object *cur_query_bo; + struct vmw_bo *cur_query_bo; struct list_head bo_relocations; struct list_head res_relocations; uint32_t *buf_start; @@ -434,7 +396,7 @@ struct vmw_sw_context{ struct list_head staged_cmd_res; struct list_head ctx_list; struct vmw_ctx_validation_info *dx_ctx_node; - struct vmw_buffer_object *dx_query_mob; + struct vmw_bo *dx_query_mob; struct vmw_resource *dx_query_ctx; struct vmw_cmdbuf_res_manager *man; struct vmw_validation_context *ctx; @@ -443,15 +405,6 @@ struct vmw_sw_context{ struct vmw_legacy_display; struct vmw_overlay; -struct vmw_vga_topology_state { - uint32_t width; - uint32_t height; - uint32_t primary; - uint32_t pos_x; - uint32_t pos_y; -}; - - /* * struct vmw_otable - Guest Memory OBject table metadata * @@ -468,7 +421,7 @@ struct vmw_otable_batch { unsigned num_otables; struct vmw_otable *otables; struct vmw_resource *context; - struct ttm_buffer_object *otable_bo; + struct vmw_bo *otable_bo; }; enum { @@ -483,6 +436,7 @@ enum { * @VMW_SM_4: Context support upto SM4. * @VMW_SM_4_1: Context support upto SM4_1. * @VMW_SM_5: Context support up to SM5. + * @VMW_SM_5_1X: Adds support for sm5_1 and gl43 extensions. * @VMW_SM_MAX: Should be the last. */ enum vmw_sm_type { @@ -490,23 +444,22 @@ enum vmw_sm_type { VMW_SM_4, VMW_SM_4_1, VMW_SM_5, + VMW_SM_5_1X, VMW_SM_MAX }; struct vmw_private { - struct ttm_bo_device bdev; - - struct vmw_fifo_state fifo; - - struct drm_device *dev; - struct drm_vma_offset_manager vma_manager; - unsigned long vmw_chipset; - unsigned int io_start; - uint32_t vram_start; - uint32_t vram_size; - uint32_t prim_bb_mem; - uint32_t mmio_start; - uint32_t mmio_size; + struct drm_device drm; + struct ttm_device bdev; + + u32 pci_id; + resource_size_t io_start; + resource_size_t vram_start; + resource_size_t vram_size; + resource_size_t max_primary_mem; + u32 __iomem *rmmio; + u32 *fifo_mem; + resource_size_t fifo_mem_size; uint32_t fb_max_width; uint32_t fb_max_height; uint32_t texture_max_width; @@ -515,7 +468,6 @@ struct vmw_private { uint32_t stdu_max_height; uint32_t initial_width; uint32_t initial_height; - u32 *mmio_virt; uint32_t capabilities; uint32_t capabilities2; uint32_t max_gmr_ids; @@ -526,8 +478,9 @@ struct vmw_private { bool has_gmr; bool has_mob; spinlock_t hw_lock; - spinlock_t cap_lock; bool assume_16bpp; + u32 irqs[VMWGFX_MAX_NUM_IRQS]; + u32 num_irq_vectors; enum vmw_sm_type sm_type; @@ -535,13 +488,11 @@ struct vmw_private { * Framebuffer info. */ - void *fb_info; enum vmw_display_unit_type active_display_unit; struct vmw_legacy_display *ldu_priv; struct vmw_overlay *overlay_priv; struct drm_property *hotplug_mode_update_property; struct drm_property *implicit_placement_property; - struct mutex global_kms_state_mutex; spinlock_t cursor_lock; struct drm_atomic_state *suspend_state; @@ -572,7 +523,7 @@ struct vmw_private { int cmdbuf_waiters; /* Protected by waiter_lock */ int error_waiters; /* Protected by waiter_lock */ int fifo_queue_waiters; /* Protected by waiter_lock */ - uint32_t last_read_seqno; + atomic_t last_read_seqno; struct vmw_fence_manager *fman; uint32_t irq_mask; /* Updates protected by waiter_lock */ @@ -596,35 +547,21 @@ struct vmw_private { struct mutex binding_mutex; /** - * Operating mode. - */ - - bool stealth; - bool enable_fb; - spinlock_t svga_lock; - - /** * PM management. */ struct notifier_block pm_nb; bool refuse_hibernation; bool suspend_locked; - struct mutex release_mutex; atomic_t num_fifo_resources; /* - * Replace this with an rwsem as soon as we have down_xx_interruptible() - */ - struct ttm_lock reservation_sem; - - /* * Query processing. These members * are protected by the cmdbuf mutex. */ - struct vmw_buffer_object *dummy_query_bo; - struct vmw_buffer_object *pinned_bo; + struct vmw_bo *dummy_query_bo; + struct vmw_bo *pinned_bo; uint32_t query_cid; uint32_t query_cid_valid; bool dummy_query_bo_pinned; @@ -649,11 +586,26 @@ struct vmw_private { */ struct vmw_otable_batch otable_batch; + struct vmw_fifo_state *fifo; struct vmw_cmdbuf_man *cman; DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); - /* Validation memory reservation */ - struct vmw_validation_mem vvm; + uint32 *devcaps; + + bool vkms_enabled; + struct workqueue_struct *crc_workq; + + /* + * mksGuestStat instance-descriptor and pid arrays + */ + struct page *mksstat_user_pages[MKSSTAT_CAPACITY]; + atomic_t mksstat_user_pids[MKSSTAT_CAPACITY]; + +#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) + struct page *mksstat_kern_pages[MKSSTAT_CAPACITY]; + u8 mksstat_kern_top_timer[MKSSTAT_CAPACITY]; + atomic_t mksstat_kern_pids[MKSSTAT_CAPACITY]; +#endif }; static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) @@ -663,7 +615,12 @@ static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) static inline struct vmw_private *vmw_priv(struct drm_device *dev) { - return (struct vmw_private *)dev->dev_private; + return container_of(dev, struct vmw_private, drm); +} + +static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev) +{ + return container_of(bdev, struct vmw_private, bdev); } static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) @@ -672,6 +629,14 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) } /* + * SVGA v3 has mmio register access and lacks fifo cmds + */ +static inline bool vmw_is_svga_v3(const struct vmw_private *dev) +{ + return dev->pci_id == VMWGFX_PCI_ID_SVGA3; +} + +/* * The locking here is fine-grained, so that it is performed once * for every read- and write operation. This is of course costly, but we * don't perform much register access in the timing critical paths anyway. @@ -681,10 +646,14 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) static inline void vmw_write(struct vmw_private *dev_priv, unsigned int offset, uint32_t value) { - spin_lock(&dev_priv->hw_lock); - outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); - outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); - spin_unlock(&dev_priv->hw_lock); + if (vmw_is_svga_v3(dev_priv)) { + iowrite32(value, dev_priv->rmmio + offset); + } else { + spin_lock(&dev_priv->hw_lock); + outl(offset, dev_priv->io_start + SVGA_INDEX_PORT); + outl(value, dev_priv->io_start + SVGA_VALUE_PORT); + spin_unlock(&dev_priv->hw_lock); + } } static inline uint32_t vmw_read(struct vmw_private *dev_priv, @@ -692,10 +661,14 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, { u32 val; - spin_lock(&dev_priv->hw_lock); - outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); - val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); - spin_unlock(&dev_priv->hw_lock); + if (vmw_is_svga_v3(dev_priv)) { + val = ioread32(dev_priv->rmmio + offset); + } else { + spin_lock(&dev_priv->hw_lock); + outl(offset, dev_priv->io_start + SVGA_INDEX_PORT); + val = inl(dev_priv->io_start + SVGA_VALUE_PORT); + spin_unlock(&dev_priv->hw_lock); + } return val; } @@ -733,8 +706,27 @@ static inline bool has_sm5_context(const struct vmw_private *dev_priv) return (dev_priv->sm_type >= VMW_SM_5); } +/** + * has_gl43_context - Does the device support GL43 context. + * @dev_priv: Device private. + * + * Return: Bool value if device support SM5 context or not. + */ +static inline bool has_gl43_context(const struct vmw_private *dev_priv) +{ + return (dev_priv->sm_type >= VMW_SM_5_1X); +} + + +static inline u32 vmw_max_num_uavs(struct vmw_private *dev_priv) +{ + return (has_gl43_context(dev_priv) ? + SVGA3D_DX11_1_MAX_UAVIEWS : SVGA3D_MAX_UAVIEWS); +} + extern void vmw_svga_enable(struct vmw_private *dev_priv); extern void vmw_svga_disable(struct vmw_private *dev_priv); +bool vmwgfx_supported(struct vmw_private *vmw); /** @@ -748,6 +740,26 @@ extern int vmw_gmr_bind(struct vmw_private *dev_priv, extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); /** + * User handles + */ +struct vmw_user_object { + struct vmw_surface *surface; + struct vmw_bo *buffer; +}; + +int vmw_user_object_lookup(struct vmw_private *dev_priv, struct drm_file *filp, + u32 handle, struct vmw_user_object *uo); +struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo); +void vmw_user_object_unref(struct vmw_user_object *uo); +bool vmw_user_object_is_null(struct vmw_user_object *uo); +struct vmw_surface *vmw_user_object_surface(struct vmw_user_object *uo); +struct vmw_bo *vmw_user_object_buffer(struct vmw_user_object *uo); +void *vmw_user_object_map(struct vmw_user_object *uo); +void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size); +void vmw_user_object_unmap(struct vmw_user_object *uo); +bool vmw_user_object_is_mapped(struct vmw_user_object *uo); + +/** * Resource utilities - vmwgfx_resource.c */ struct vmw_user_resource_conv; @@ -761,23 +773,13 @@ extern int vmw_resource_validate(struct vmw_resource *res, bool intr, extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, bool no_backup); extern bool vmw_resource_needs_backup(const struct vmw_resource *res); -extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t handle, - struct vmw_surface **out_surf, - struct vmw_buffer_object **out_buf); extern int vmw_user_resource_lookup_handle( struct vmw_private *dev_priv, struct ttm_object_file *tfile, uint32_t handle, const struct vmw_user_resource_conv *converter, struct vmw_resource **p_res); -extern struct vmw_resource * -vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t handle, - const struct vmw_user_resource_conv * - converter); + extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, @@ -789,19 +791,21 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, extern void vmw_resource_unreserve(struct vmw_resource *res, bool dirty_set, bool dirty, - bool switch_backup, - struct vmw_buffer_object *new_backup, - unsigned long new_backup_offset); + bool switch_guest_memory, + struct vmw_bo *new_guest_memory, + unsigned long new_guest_memory_offset); extern void vmw_query_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); -extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); -extern void vmw_resource_evict_all(struct vmw_private *dev_priv); -extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); + struct ttm_resource *old_mem, + struct ttm_resource *new_mem); +int vmw_query_readback_all(struct vmw_bo *dx_query_mob); +void vmw_resource_evict_all(struct vmw_private *dev_priv); +void vmw_resource_unbind_list(struct vmw_bo *vbo); void vmw_resource_mob_attach(struct vmw_resource *res); void vmw_resource_mob_detach(struct vmw_resource *res); void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, pgoff_t end); -int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, +int vmw_resource_clean(struct vmw_resource *res); +int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, pgoff_t end, pgoff_t *num_prefault); /** @@ -816,130 +820,18 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) } /** - * vmw_user_resource_noref_release - release a user resource pointer looked up - * without reference + * GEM related functionality - vmwgfx_gem.c */ -static inline void vmw_user_resource_noref_release(void) -{ - ttm_base_object_noref_release(); -} - -/** - * Buffer object helper functions - vmwgfx_bo.c - */ -extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, - struct vmw_buffer_object *bo, - struct ttm_placement *placement, - bool interruptible); -extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, - bool interruptible); -extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, - bool interruptible); -extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv, - struct vmw_buffer_object *bo, - bool interruptible); -extern int vmw_bo_unpin(struct vmw_private *vmw_priv, - struct vmw_buffer_object *bo, - bool interruptible); -extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, - SVGAGuestPtr *ptr); -extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin); -extern void vmw_bo_bo_free(struct ttm_buffer_object *bo); -extern int vmw_bo_init(struct vmw_private *dev_priv, - struct vmw_buffer_object *vmw_bo, - size_t size, struct ttm_placement *placement, - bool interruptible, - void (*bo_free)(struct ttm_buffer_object *bo)); -extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile); -extern int vmw_user_bo_alloc(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t size, - bool shareable, - uint32_t *handle, - struct vmw_buffer_object **p_dma_buf, - struct ttm_base_object **p_base); -extern int vmw_user_bo_reference(struct ttm_object_file *tfile, - struct vmw_buffer_object *dma_buf, - uint32_t *handle); -extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_user_bo_lookup(struct ttm_object_file *tfile, - uint32_t id, struct vmw_buffer_object **out, - struct ttm_base_object **base); -extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, - struct vmw_fence_obj *fence); -extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); -extern void vmw_bo_unmap(struct vmw_buffer_object *vbo); -extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); -extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); -extern struct vmw_buffer_object * -vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle); - -/** - * vmw_user_bo_noref_release - release a buffer object pointer looked up - * without reference - */ -static inline void vmw_user_bo_noref_release(void) -{ - ttm_base_object_noref_release(); -} - -/** - * vmw_bo_adjust_prio - Adjust the buffer object eviction priority - * according to attached resources - * @vbo: The struct vmw_buffer_object - */ -static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo) -{ - int i = ARRAY_SIZE(vbo->res_prios); - - while (i--) { - if (vbo->res_prios[i]) { - vbo->base.priority = i; - return; - } - } - - vbo->base.priority = 3; -} - -/** - * vmw_bo_prio_add - Notify a buffer object of a newly attached resource - * eviction priority - * @vbo: The struct vmw_buffer_object - * @prio: The resource priority - * - * After being notified, the code assigns the highest resource eviction priority - * to the backing buffer object (mob). - */ -static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio) -{ - if (vbo->res_prios[prio]++ == 0) - vmw_bo_prio_adjust(vbo); -} - -/** - * vmw_bo_prio_del - Notify a buffer object of a resource with a certain - * priority being removed - * @vbo: The struct vmw_buffer_object - * @prio: The resource priority - * - * After being notified, the code assigns the highest resource eviction priority - * to the backing buffer object (mob). - */ -static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio) -{ - if (--vbo->res_prios[prio] == 0) - vmw_bo_prio_adjust(vbo); -} +struct vmw_bo_params; +extern const struct drm_gem_object_funcs vmw_gem_object_funcs; +extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, + struct drm_file *filp, + uint32_t size, + uint32_t *handle, + struct vmw_bo **p_vbo); +extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +extern void vmw_debugfs_gem_init(struct vmw_private *vdev); /** * Misc Ioctl functionality - vmwgfx_ioctl.c @@ -953,53 +845,63 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern __poll_t vmw_fops_poll(struct file *filp, - struct poll_table_struct *wait); -extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, - size_t count, loff_t *offset); /** * Fifo utilities - vmwgfx_fifo.c */ -extern int vmw_fifo_init(struct vmw_private *dev_priv, - struct vmw_fifo_state *fifo); -extern void vmw_fifo_release(struct vmw_private *dev_priv, - struct vmw_fifo_state *fifo); +extern struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv); +extern void vmw_fifo_destroy(struct vmw_private *dev_priv); +extern bool vmw_cmd_supported(struct vmw_private *vmw); extern void * -vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); -extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); -extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); -extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, - uint32_t *seqno); +vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); +extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes); +extern void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); +extern int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno); +extern bool vmw_supports_3d(struct vmw_private *dev_priv); extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); -extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); -extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, - uint32_t cid); -extern int vmw_fifo_flush(struct vmw_private *dev_priv, - bool interruptible); +extern int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, + uint32_t cid); +extern int vmw_cmd_flush(struct vmw_private *dev_priv, + bool interruptible); -#define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id) \ +#define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id) \ ({ \ - vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({ \ + vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({ \ DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \ __func__, (unsigned int) __bytes); \ NULL; \ }); \ }) -#define VMW_FIFO_RESERVE(__priv, __bytes) \ - VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID) +#define VMW_CMD_RESERVE(__priv, __bytes) \ + VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID) + /** - * TTM glue - vmwgfx_ttm_glue.c + * vmw_fifo_caps - Returns the capabilities of the FIFO command + * queue or 0 if fifo memory isn't present. + * @dev_priv: The device private context */ +static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv) +{ + if (!dev_priv->fifo_mem || !dev_priv->fifo) + return 0; + return dev_priv->fifo->capabilities; +} -extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); -extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, - size_t gran); +/** + * vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3 + * is enabled in the FIFO. + * @dev_priv: The device private context + */ +static inline bool +vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) +{ + return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0; +} /** * TTM buffer object driver - vmwgfx_ttm_buffer.c @@ -1007,22 +909,15 @@ extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, extern const size_t vmw_tt_size; extern struct ttm_placement vmw_vram_placement; -extern struct ttm_placement vmw_vram_ne_placement; -extern struct ttm_placement vmw_vram_sys_placement; -extern struct ttm_placement vmw_vram_gmr_placement; -extern struct ttm_placement vmw_vram_gmr_ne_placement; extern struct ttm_placement vmw_sys_placement; -extern struct ttm_placement vmw_sys_ne_placement; -extern struct ttm_placement vmw_evictable_placement; -extern struct ttm_placement vmw_srf_placement; -extern struct ttm_placement vmw_mob_placement; -extern struct ttm_placement vmw_mob_ne_placement; -extern struct ttm_placement vmw_nonfixed_placement; -extern struct ttm_bo_driver vmw_bo_driver; -extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); -extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo); +extern struct ttm_device_funcs vmw_bo_driver; extern const struct vmw_sg_table * vmw_bo_sg_table(struct ttm_buffer_object *bo); +int vmw_bo_create_and_populate(struct vmw_private *dev_priv, + size_t bo_size, + u32 domain, + struct vmw_bo **bo_p); + extern void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, unsigned long p_offs); @@ -1060,7 +955,7 @@ static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) */ static inline struct page *vmw_piter_page(struct vmw_piter *viter) { - return viter->page(viter); + return viter->pages[viter->i]; } /** @@ -1088,25 +983,21 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, struct vmw_private *dev_priv, struct vmw_fence_obj **p_fence, uint32_t *p_handle); -extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, +extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd, - struct sync_file *sync_file); + int32_t out_fence_fd); bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); /** * IRQs and wating - vmwgfx_irq.c */ -extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, - uint32_t seqno, bool interruptible, - unsigned long timeout); -extern int vmw_irq_install(struct drm_device *dev, int irq); +extern int vmw_irq_install(struct vmw_private *dev_priv); extern void vmw_irq_uninstall(struct drm_device *dev); extern bool vmw_seqno_passed(struct vmw_private *dev_priv, uint32_t seqno); @@ -1116,39 +1007,14 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, uint32_t seqno, bool interruptible, unsigned long timeout); -extern void vmw_update_seqno(struct vmw_private *dev_priv, - struct vmw_fifo_state *fifo_state); -extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); -extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); -extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); -extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); -extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, - int *waiter_count); -extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv, - u32 flag, int *waiter_count); - -/** - * Rudimentary fence-like objects currently used only for throttling - - * vmwgfx_marker.c - */ - -extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); -extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); -extern int vmw_marker_push(struct vmw_marker_queue *queue, - uint32_t seqno); -extern int vmw_marker_pull(struct vmw_marker_queue *queue, - uint32_t signaled_seqno); -extern int vmw_wait_lag(struct vmw_private *dev_priv, - struct vmw_marker_queue *queue, uint32_t us); - -/** - * Kernel framebuffer - vmwgfx_fb.c - */ - -int vmw_fb_init(struct vmw_private *vmw_priv); -int vmw_fb_close(struct vmw_private *dev_priv); -int vmw_fb_off(struct vmw_private *vmw_priv); -int vmw_fb_on(struct vmw_private *vmw_priv); +bool vmw_seqno_waiter_add(struct vmw_private *dev_priv); +bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv); +bool vmw_goal_waiter_add(struct vmw_private *dev_priv); +bool vmw_goal_waiter_remove(struct vmw_private *dev_priv); +bool vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, + int *waiter_count); +bool vmw_generic_waiter_remove(struct vmw_private *dev_priv, + u32 flag, int *waiter_count); /** * Kernel modesetting - vmwgfx_kms.c @@ -1158,7 +1024,6 @@ int vmw_kms_init(struct vmw_private *dev_priv); int vmw_kms_close(struct vmw_private *dev_priv); int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); void vmw_kms_cursor_snoop(struct vmw_surface *srf, struct ttm_object_file *tfile, struct ttm_buffer_object *bo, @@ -1166,12 +1031,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, int vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned width, unsigned height, unsigned pitch, unsigned bpp, unsigned depth); -bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, - uint32_t pitch, - uint32_t height); -u32 vmw_get_vblank_counter(struct drm_crtc *crtc); -int vmw_enable_vblank(struct drm_crtc *crtc); -void vmw_disable_vblank(struct drm_crtc *crtc); int vmw_kms_present(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, @@ -1181,21 +1040,10 @@ int vmw_kms_present(struct vmw_private *dev_priv, uint32_t num_clips); int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); int vmw_kms_suspend(struct drm_device *dev); int vmw_kms_resume(struct drm_device *dev); void vmw_kms_lost_device(struct drm_device *dev); -int vmw_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, - struct drm_mode_create_dumb *args); - -int vmw_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset); -int vmw_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle); extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); extern void vmw_resource_unpin(struct vmw_resource *res); extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); @@ -1219,7 +1067,14 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); * GMR Id manager */ -extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; +int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); +void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type); + +/** + * System memory manager + */ +int vmw_sys_man_init(struct vmw_private *dev_priv); +void vmw_sys_man_fini(struct vmw_private *dev_priv); /** * Prime - vmwgfx_prime.c @@ -1233,6 +1088,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); +struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *table); /* * MemoryOBject management - vmwgfx_mob.c @@ -1265,15 +1123,14 @@ extern struct vmw_cmdbuf_res_manager * vmw_context_res_man(struct vmw_resource *ctx); extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, SVGACOTableType cotable_type); -extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); struct vmw_ctx_binding_state; extern struct vmw_ctx_binding_state * vmw_context_binding_state(struct vmw_resource *ctx); extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, bool readback); extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, - struct vmw_buffer_object *mob); -extern struct vmw_buffer_object * + struct vmw_bo *mob); +extern struct vmw_bo * vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); @@ -1293,18 +1150,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int vmw_surface_gb_priv_define(struct drm_device *dev, - uint32_t user_accounting_size, - SVGA3dSurfaceAllFlags svga3d_flags, - SVGA3dSurfaceFormat format, - bool for_scanout, - uint32_t num_mip_levels, - uint32_t multisample_count, - uint32_t array_size, - struct drm_vmw_size size, - SVGA3dMSPattern multisample_pattern, - SVGA3dMSQualityLevel quality_level, - struct vmw_surface **srf_out); extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1313,9 +1158,17 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, struct drm_file *file_priv); int vmw_gb_surface_define(struct vmw_private *dev_priv, - uint32_t user_accounting_size, const struct vmw_surface_metadata *req, struct vmw_surface **srf_out); +struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw, + struct vmw_bo *bo, + u32 handle); +u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw, + struct vmw_bo *bo, + u32 handle); +int vmw_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); /* * Shader management - vmwgfx_shader.c @@ -1374,7 +1227,6 @@ void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv, extern struct vmw_cmdbuf_res_manager * vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); -extern size_t vmw_cmdbuf_res_man_size(void); extern struct vmw_resource * vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, enum vmw_cmdbuf_res_type res_type, @@ -1412,8 +1264,7 @@ struct vmw_cmdbuf_header; extern struct vmw_cmdbuf_man * vmw_cmdbuf_man_create(struct vmw_private *dev_priv); -extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, - size_t size, size_t default_size); +extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size); extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man); extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man); extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, @@ -1473,20 +1324,32 @@ void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n); -int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, +int vmw_bo_cpu_blit(struct vmw_bo *dst, u32 dst_offset, u32 dst_stride, - struct ttm_buffer_object *src, + struct vmw_bo *src, u32 src_offset, u32 src_stride, u32 w, u32 h, struct vmw_diff_cpy *diff); /* Host messaging -vmwgfx_msg.c: */ +void vmw_disable_backdoor(void); int vmw_host_get_guestinfo(const char *guest_info_param, char *buffer, size_t *length); -int vmw_host_log(const char *log); +__printf(1, 2) int vmw_host_printf(const char *fmt, ...); int vmw_msg_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +/* Host mksGuestStats -vmwgfx_msg.c: */ +int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv); + +int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vmw_mksstat_remove_all(struct vmw_private *dev_priv); + /* VMW logging */ /** @@ -1502,26 +1365,18 @@ int vmw_msg_ioctl(struct drm_device *dev, void *data, DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* Resource dirtying - vmwgfx_page_dirty.c */ -void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo); -int vmw_bo_dirty_add(struct vmw_buffer_object *vbo); +bool vmw_bo_is_dirty(struct vmw_bo *vbo); +void vmw_bo_dirty_scan(struct vmw_bo *vbo); +int vmw_bo_dirty_add(struct vmw_bo *vbo); +void vmw_bo_dirty_clear(struct vmw_bo *vbo); void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res); void vmw_bo_dirty_clear_res(struct vmw_resource *res); -void vmw_bo_dirty_release(struct vmw_buffer_object *vbo); -void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, +void vmw_bo_dirty_release(struct vmw_bo *vbo); +void vmw_bo_dirty_unmap(struct vmw_bo *vbo, pgoff_t start, pgoff_t end); vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, - enum page_entry_size pe_size); -#endif -/* Transparent hugepage support - vmwgfx_thp.c */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -extern const struct ttm_mem_type_manager_func vmw_thp_func; -#else -#define vmw_thp_func ttm_bo_manager_func -#endif /** * VMW_DEBUG_KMS - Debug output for kernel mode-setting @@ -1550,28 +1405,6 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) return srf; } -static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) -{ - struct vmw_buffer_object *tmp_buf = *buf; - - *buf = NULL; - if (tmp_buf != NULL) { - ttm_bo_put(&tmp_buf->base); - } -} - -static inline struct vmw_buffer_object * -vmw_bo_reference(struct vmw_buffer_object *buf) -{ - ttm_bo_get(&buf->base); - return buf; -} - -static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) -{ - return &ttm_mem_glob; -} - static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) { atomic_inc(&dev_priv->num_fifo_resources); @@ -1583,28 +1416,88 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv) } /** - * vmw_mmio_read - Perform a MMIO read from volatile memory + * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory * - * @addr: The address to read from + * @fifo_reg: The fifo register to read from * * This function is intended to be equivalent to ioread32() on * memremap'd memory, but without byteswapping. */ -static inline u32 vmw_mmio_read(u32 *addr) +static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg) { - return READ_ONCE(*addr); + BUG_ON(vmw_is_svga_v3(vmw)); + return READ_ONCE(*(vmw->fifo_mem + fifo_reg)); } /** - * vmw_mmio_write - Perform a MMIO write to volatile memory + * vmw_fifo_mem_write - Perform a MMIO write to volatile memory * - * @addr: The address to write to + * @addr: The fifo register to write to * * This function is intended to be equivalent to iowrite32 on * memremap'd memory, but without byteswapping. */ -static inline void vmw_mmio_write(u32 value, u32 *addr) +static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg, + u32 value) +{ + BUG_ON(vmw_is_svga_v3(vmw)); + WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value); +} + +static inline u32 vmw_fence_read(struct vmw_private *dev_priv) { - WRITE_ONCE(*addr, value); + u32 fence; + if (vmw_is_svga_v3(dev_priv)) + fence = vmw_read(dev_priv, SVGA_REG_FENCE); + else + fence = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE); + return fence; } + +static inline void vmw_fence_write(struct vmw_private *dev_priv, + u32 fence) +{ + BUG_ON(vmw_is_svga_v3(dev_priv)); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, fence); +} + +static inline u32 vmw_irq_status_read(struct vmw_private *vmw) +{ + u32 status; + if (vmw_is_svga_v3(vmw)) + status = vmw_read(vmw, SVGA_REG_IRQ_STATUS); + else + status = inl(vmw->io_start + SVGA_IRQSTATUS_PORT); + return status; +} + +static inline void vmw_irq_status_write(struct vmw_private *vmw, + uint32 status) +{ + if (vmw_is_svga_v3(vmw)) + vmw_write(vmw, SVGA_REG_IRQ_STATUS, status); + else + outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT); +} + +static inline bool vmw_has_fences(struct vmw_private *vmw) +{ + if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS | + SVGA_CAP_CMD_BUFFERS_2)) != 0) + return true; + return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0; +} + +static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model, + u32 shader_type) +{ + SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX; + + if (shader_model >= VMW_SM_5) + max_allowed = SVGA3D_SHADERTYPE_MAX; + else if (shader_model >= VMW_SM_4) + max_allowed = SVGA3D_SHADERTYPE_DX10_MAX; + return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed; +} + #endif |
