diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_shader.c')
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 205 |
1 files changed, 62 insertions, 143 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index e139fdfd1635..69dfe69ce0f8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -27,9 +27,10 @@ #include <drm/ttm/ttm_placement.h> +#include "vmwgfx_binding.h" +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" -#include "vmwgfx_binding.h" struct vmw_shader { struct vmw_resource res; @@ -53,10 +54,6 @@ struct vmw_dx_shader { struct list_head cotable_head; }; -static uint64_t vmw_user_shader_size; -static uint64_t vmw_shader_size; -static size_t vmw_shader_dx_size; - static void vmw_user_shader_free(struct vmw_resource *res); static struct vmw_resource * vmw_user_shader_base_to_res(struct ttm_base_object *base); @@ -79,7 +76,6 @@ static void vmw_dx_shader_commit_notify(struct vmw_resource *res, enum vmw_cmdbuf_res_state state); static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type); static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type); -static uint64_t vmw_user_shader_size; static const struct vmw_user_resource_conv user_shader_conv = { .object_type = VMW_RES_SHADER, @@ -93,12 +89,13 @@ const struct vmw_user_resource_conv *user_shader_converter = static const struct vmw_res_func vmw_gb_shader_func = { .res_type = vmw_res_shader, - .needs_backup = true, + .needs_guest_memory = true, .may_evict = true, .prio = 3, .dirty_prio = 3, .type_name = "guest backed shaders", - .backup_placement = &vmw_mob_placement, + .domain = VMW_BO_DOMAIN_MOB, + .busy_domain = VMW_BO_DOMAIN_MOB, .create = vmw_gb_shader_create, .destroy = vmw_gb_shader_destroy, .bind = vmw_gb_shader_bind, @@ -107,12 +104,13 @@ static const struct vmw_res_func vmw_gb_shader_func = { static const struct vmw_res_func vmw_dx_shader_func = { .res_type = vmw_res_shader, - .needs_backup = true, + .needs_guest_memory = true, .may_evict = true, .prio = 3, .dirty_prio = 3, .type_name = "dx shaders", - .backup_placement = &vmw_mob_placement, + .domain = VMW_BO_DOMAIN_MOB, + .busy_domain = VMW_BO_DOMAIN_MOB, .create = vmw_dx_shader_create, /* * The destroy callback is only called with a committed resource on @@ -125,7 +123,7 @@ static const struct vmw_res_func vmw_dx_shader_func = { .commit_notify = vmw_dx_shader_commit_notify, }; -/** +/* * Shader management: */ @@ -163,7 +161,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, SVGA3dShaderType type, uint8_t num_input_sig, uint8_t num_output_sig, - struct vmw_buffer_object *byte_code, + struct vmw_bo *byte_code, void (*res_free) (struct vmw_resource *res)) { struct vmw_shader *shader = vmw_res_to_shader(res); @@ -180,10 +178,10 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, return ret; } - res->backup_size = size; + res->guest_memory_size = size; if (byte_code) { - res->backup = vmw_bo_reference(byte_code); - res->backup_offset = offset; + res->guest_memory_bo = vmw_user_bo_ref(byte_code); + res->guest_memory_offset = offset; } shader->size = size; shader->type = type; @@ -222,7 +220,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res) goto out_no_fifo; } - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { ret = -ENOMEM; goto out_no_fifo; @@ -233,7 +231,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res) cmd->body.shid = res->id; cmd->body.type = shader->type; cmd->body.sizeInBytes = shader->size; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_fifo_resource_inc(dev_priv); return 0; @@ -254,19 +252,19 @@ static int vmw_gb_shader_bind(struct vmw_resource *res, } *cmd; struct ttm_buffer_object *bo = val_buf->bo; - BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + BUG_ON(bo->resource->mem_type != VMW_PL_MOB); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; cmd->header.size = sizeof(cmd->body); cmd->body.shid = res->id; - cmd->body.mobid = bo->mem.start; - cmd->body.offsetInBytes = res->backup_offset; - res->backup_dirty = false; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + cmd->body.mobid = bo->resource->start; + cmd->body.offsetInBytes = res->guest_memory_offset; + res->guest_memory_dirty = false; + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -282,9 +280,9 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, } *cmd; struct vmw_fence_obj *fence; - BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); + BUG_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -293,7 +291,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, cmd->body.shid = res->id; cmd->body.mobid = SVGA3D_INVALID_ID; cmd->body.offsetInBytes = 0; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); /* * Create a fence object and fence the backup buffer. @@ -324,7 +322,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) mutex_lock(&dev_priv->binding_mutex); vmw_binding_res_list_scrub(&res->binding_head); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { mutex_unlock(&dev_priv->binding_mutex); return -ENOMEM; @@ -333,7 +331,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER; cmd->header.size = sizeof(cmd->body); cmd->body.shid = res->id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); mutex_unlock(&dev_priv->binding_mutex); vmw_resource_release_id(res); vmw_fifo_resource_dec(dev_priv); @@ -394,7 +392,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res) if (!list_empty(&shader->cotable_head) || !shader->committed) return 0; - cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), shader->ctx->id); + cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), shader->ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -402,9 +400,9 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res) cmd->header.size = sizeof(cmd->body); cmd->body.cid = shader->ctx->id; cmd->body.shid = shader->id; - cmd->body.mobid = res->backup->base.mem.start; - cmd->body.offsetInBytes = res->backup_offset; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + cmd->body.mobid = res->guest_memory_bo->tbo.resource->start; + cmd->body.offsetInBytes = res->guest_memory_offset; + vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cotable_add_resource(shader->cotable, &shader->cotable_head); @@ -450,7 +448,7 @@ static int vmw_dx_shader_bind(struct vmw_resource *res, struct vmw_private *dev_priv = res->dev_priv; struct ttm_buffer_object *bo = val_buf->bo; - BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + BUG_ON(bo->resource->mem_type != VMW_PL_MOB); mutex_lock(&dev_priv->binding_mutex); vmw_dx_shader_unscrub(res); mutex_unlock(&dev_priv->binding_mutex); @@ -481,7 +479,7 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res) return 0; WARN_ON_ONCE(!shader->committed); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -491,7 +489,7 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res) cmd->body.shid = res->id; cmd->body.mobid = SVGA3D_INVALID_ID; cmd->body.offsetInBytes = 0; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); res->id = -1; list_del_init(&shader->cotable_head); @@ -513,7 +511,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res, struct vmw_fence_obj *fence; int ret; - BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); + BUG_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB); mutex_lock(&dev_priv->binding_mutex); ret = vmw_dx_shader_scrub(res); @@ -563,16 +561,14 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, * * @res: The shader resource * - * Frees the DX shader resource and updates memory accounting. + * Frees the DX shader resource. */ static void vmw_dx_shader_res_free(struct vmw_resource *res) { - struct vmw_private *dev_priv = res->dev_priv; struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); vmw_resource_unreference(&shader->cotable); kfree(shader); - ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); } /** @@ -594,30 +590,13 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, struct vmw_dx_shader *shader; struct vmw_resource *res; struct vmw_private *dev_priv = ctx->dev_priv; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (!vmw_shader_dx_size) - vmw_shader_dx_size = ttm_round_pot(sizeof(*shader)); - if (!vmw_shader_id_ok(user_key, shader_type)) return -EINVAL; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size, - &ttm_opt_ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader " - "creation.\n"); - return ret; - } - shader = kmalloc(sizeof(*shader), GFP_KERNEL); if (!shader) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); return -ENOMEM; } @@ -654,7 +633,7 @@ out_resource_init: -/** +/* * User-space shader management: */ @@ -669,24 +648,18 @@ static void vmw_user_shader_free(struct vmw_resource *res) { struct vmw_user_shader *ushader = container_of(res, struct vmw_user_shader, shader.res); - struct vmw_private *dev_priv = res->dev_priv; ttm_base_object_kfree(ushader, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_shader_size); } static void vmw_shader_free(struct vmw_resource *res) { struct vmw_shader *shader = vmw_res_to_shader(res); - struct vmw_private *dev_priv = res->dev_priv; kfree(shader); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_shader_size); } -/** +/* * This function is called when user space has no more references on the * base object. It releases the base-object's reference on the resource object. */ @@ -706,12 +679,11 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - return ttm_ref_object_base_unref(tfile, arg->handle, - TTM_REF_USAGE); + return ttm_ref_object_base_unref(tfile, arg->handle); } static int vmw_user_shader_alloc(struct vmw_private *dev_priv, - struct vmw_buffer_object *buffer, + struct vmw_bo *buffer, size_t shader_size, size_t offset, SVGA3dShaderType shader_type, @@ -722,31 +694,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, { struct vmw_user_shader *ushader; struct vmw_resource *res, *tmp; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (unlikely(vmw_user_shader_size == 0)) - vmw_user_shader_size = - ttm_round_pot(sizeof(struct vmw_user_shader)) + - VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_user_shader_size, - &ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader " - "creation.\n"); - goto out; - } - ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); if (unlikely(!ushader)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_shader_size); ret = -ENOMEM; goto out; } @@ -769,7 +720,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, tmp = vmw_resource_reference(res); ret = ttm_base_object_init(tfile, &ushader->base, false, VMW_RES_SHADER, - &vmw_user_shader_base_release, NULL); + &vmw_user_shader_base_release); if (unlikely(ret != 0)) { vmw_resource_unreference(&tmp); @@ -786,38 +737,17 @@ out: static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, - struct vmw_buffer_object *buffer, + struct vmw_bo *buffer, size_t shader_size, size_t offset, SVGA3dShaderType shader_type) { struct vmw_shader *shader; struct vmw_resource *res; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (unlikely(vmw_shader_size == 0)) - vmw_shader_size = - ttm_round_pot(sizeof(struct vmw_shader)) + - VMW_IDA_ACC_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_shader_size, - &ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader " - "creation.\n"); - goto out_err; - } - shader = kzalloc(sizeof(*shader), GFP_KERNEL); if (unlikely(!shader)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_shader_size); ret = -ENOMEM; goto out_err; } @@ -844,20 +774,18 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, { struct vmw_private *dev_priv = vmw_priv(dev); struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_buffer_object *buffer = NULL; + struct vmw_bo *buffer = NULL; SVGA3dShaderType shader_type; int ret; if (buffer_handle != SVGA3D_INVALID_ID) { - ret = vmw_user_bo_lookup(tfile, buffer_handle, - &buffer, NULL); + ret = vmw_user_bo_lookup(file_priv, buffer_handle, &buffer); if (unlikely(ret != 0)) { VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n"); return ret; } - if ((u64)buffer->base.num_pages * PAGE_SIZE < - (u64)size + (u64)offset) { + if ((u64)buffer->tbo.base.size < (u64)size + (u64)offset) { VMW_DEBUG_USER("Illegal buffer- or shader size.\n"); ret = -EINVAL; goto out_bad_arg; @@ -877,17 +805,11 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, goto out_bad_arg; } - ret = ttm_read_lock(&dev_priv->reservation_sem, true); - if (unlikely(ret != 0)) - goto out_bad_arg; - ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset, shader_type, num_input_sig, num_output_sig, tfile, shader_handle); - - ttm_read_unlock(&dev_priv->reservation_sem); out_bad_arg: - vmw_bo_unreference(&buffer); + vmw_user_bo_unref(&buffer); return ret; } @@ -946,13 +868,13 @@ int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, * vmw_compat_shader_add - Create a compat shader and stage it for addition * as a command buffer managed resource. * + * @dev_priv: Pointer to device private structure. * @man: Pointer to the compat shader manager identifying the shader namespace. * @user_key: The key that is used to identify the shader. The key is * unique to the shader type. * @bytecode: Pointer to the bytecode of the shader. * @shader_type: Shader type. - * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is - * to be created with. + * @size: Command size. * @list: Caller's list of staged command buffer resource actions. * */ @@ -964,34 +886,31 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, struct list_head *list) { struct ttm_operation_ctx ctx = { false, true }; - struct vmw_buffer_object *buf; + struct vmw_bo *buf; struct ttm_bo_kmap_obj map; bool is_iomem; int ret; struct vmw_resource *res; + struct vmw_bo_params bo_params = { + .domain = VMW_BO_DOMAIN_SYS, + .busy_domain = VMW_BO_DOMAIN_SYS, + .bo_type = ttm_bo_type_device, + .size = size, + .pin = false, + .keep_resv = true, + }; if (!vmw_shader_id_ok(user_key, shader_type)) return -EINVAL; - /* Allocate and pin a DMA buffer */ - buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (unlikely(!buf)) - return -ENOMEM; - - ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement, - true, vmw_bo_bo_free); + ret = vmw_bo_create(dev_priv, &bo_params, &buf); if (unlikely(ret != 0)) goto out; - ret = ttm_bo_reserve(&buf->base, false, true, NULL); - if (unlikely(ret != 0)) - goto no_reserve; - /* Map and copy shader bytecode. */ - ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT, - &map); + ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map); if (unlikely(ret != 0)) { - ttm_bo_unreserve(&buf->base); + ttm_bo_unreserve(&buf->tbo); goto no_reserve; } @@ -999,9 +918,9 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, WARN_ON(is_iomem); ttm_bo_kunmap(&map); - ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx); + ret = ttm_bo_validate(&buf->tbo, &buf->placement, &ctx); WARN_ON(ret != 0); - ttm_bo_unreserve(&buf->base); + ttm_bo_unreserve(&buf->tbo); res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type); if (unlikely(ret != 0)) |
