diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 302 |
1 files changed, 110 insertions, 192 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 833e909706a9..a0fc0801abb0 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -29,17 +29,19 @@ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> * Dave Airlie */ + +#include <linux/io.h> #include <linux/list.h> #include <linux/slab.h> -#include <drm/drmP.h> -#include <drm/radeon_drm.h> + #include <drm/drm_cache.h> +#include <drm/drm_prime.h> +#include <drm/radeon_drm.h> + #include "radeon.h" #include "radeon_trace.h" +#include "radeon_ttm.h" - -int radeon_ttm_init(struct radeon_device *rdev); -void radeon_ttm_fini(struct radeon_device *rdev); static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); /* @@ -47,44 +49,20 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); * function are calling it. */ -static void radeon_update_memory_usage(struct radeon_bo *bo, - unsigned mem_type, int sign) -{ - struct radeon_device *rdev = bo->rdev; - u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; - - switch (mem_type) { - case TTM_PL_TT: - if (sign > 0) - atomic64_add(size, &rdev->gtt_usage); - else - atomic64_sub(size, &rdev->gtt_usage); - break; - case TTM_PL_VRAM: - if (sign > 0) - atomic64_add(size, &rdev->vram_usage); - else - atomic64_sub(size, &rdev->vram_usage); - break; - } -} - static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct radeon_bo *bo; bo = container_of(tbo, struct radeon_bo, tbo); - radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); - mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); radeon_bo_clear_surface_reg(bo); WARN_ON_ONCE(!list_empty(&bo->va)); - if (bo->gem_base.import_attach) - drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); - drm_gem_object_release(&bo->gem_base); + if (bo->tbo.base.import_attach) + drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); + drm_gem_object_release(&bo->tbo.base); kfree(bo); } @@ -100,7 +78,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) u32 c = 0, i; rbo->placement.placement = rbo->placements; - rbo->placement.busy_placement = rbo->placements; if (domain & RADEON_GEM_DOMAIN_VRAM) { /* Try placing BOs which don't need CPU access outside of the * CPU accessible part of VRAM @@ -109,66 +86,37 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { rbo->placements[c].fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + rbo->placements[c].mem_type = TTM_PL_VRAM; + rbo->placements[c++].flags = 0; } rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + rbo->placements[c].mem_type = TTM_PL_VRAM; + rbo->placements[c++].flags = 0; } if (domain & RADEON_GEM_DOMAIN_GTT) { - if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_TT; - - } else if ((rbo->flags & RADEON_GEM_GTT_WC) || - (rbo->rdev->flags & RADEON_IS_AGP)) { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_TT; - } else { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_TT; - } + rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_TT; + rbo->placements[c++].flags = 0; } if (domain & RADEON_GEM_DOMAIN_CPU) { - if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_SYSTEM; - - } else if ((rbo->flags & RADEON_GEM_GTT_WC) || - rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_SYSTEM; - } else { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_SYSTEM; - } + rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = 0; } if (!c) { rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = 0; } rbo->placement.num_placement = c; - rbo->placement.num_busy_placement = c; for (i = 0; i < c; ++i) { if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && - (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && + (rbo->placements[i].mem_type == TTM_PL_VRAM) && !rbo->placements[i].fpfn) rbo->placements[i].lpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; @@ -180,13 +128,12 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) int radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, bool kernel, u32 domain, u32 flags, struct sg_table *sg, - struct reservation_object *resv, + struct dma_resv *resv, struct radeon_bo **bo_ptr) { struct radeon_bo *bo; enum ttm_bo_type type; unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; - size_t acc_size; int r; size = ALIGN(size, PAGE_SIZE); @@ -200,13 +147,11 @@ int radeon_bo_create(struct radeon_device *rdev, } *bo_ptr = NULL; - acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, - sizeof(struct radeon_bo)); - bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; - drm_gem_private_object_init(rdev->ddev, &bo->gem_base, size); + drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size); + bo->tbo.base.funcs = &radeon_gem_object_funcs; bo->rdev = rdev; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); @@ -256,9 +201,9 @@ int radeon_bo_create(struct radeon_device *rdev, radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); - r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, - &bo->placement, page_align, !kernel, acc_size, - sg, resv, &radeon_ttm_bo_destroy); + r = ttm_bo_init_validate(&rdev->mman.bdev, &bo->tbo, type, + &bo->placement, page_align, !kernel, sg, resv, + &radeon_ttm_bo_destroy); up_read(&rdev->pm.mclk_lock); if (unlikely(r != 0)) { return r; @@ -273,7 +218,12 @@ int radeon_bo_create(struct radeon_device *rdev, int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) { bool is_iomem; - int r; + long r; + + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL, + false, MAX_SCHEDULE_TIMEOUT); + if (r < 0) + return r; if (bo->kptr) { if (ptr) { @@ -281,7 +231,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) } return 0; } - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); + r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap); if (r) { return r; } @@ -307,18 +257,15 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) if (bo == NULL) return NULL; - ttm_bo_get(&bo->tbo); + drm_gem_object_get(&bo->tbo.base); return bo; } void radeon_bo_unref(struct radeon_bo **bo) { - struct ttm_buffer_object *tbo; - if ((*bo) == NULL) return; - tbo = &((*bo)->tbo); - ttm_bo_put(tbo); + drm_gem_object_put(&(*bo)->tbo.base); *bo = NULL; } @@ -328,11 +275,11 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, struct ttm_operation_ctx ctx = { false, false }; int r, i; - if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) return -EPERM; - if (bo->pin_count) { - bo->pin_count++; + if (bo->tbo.pin_count) { + ttm_bo_pin(&bo->tbo); if (gpu_addr) *gpu_addr = radeon_bo_gpu_offset(bo); @@ -357,20 +304,18 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, radeon_ttm_placement_from_domain(bo, domain); for (i = 0; i < bo->placement.num_placement; i++) { /* force to pin into visible video ram */ - if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && + if ((bo->placements[i].mem_type == TTM_PL_VRAM) && !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) bo->placements[i].lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; else bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; - - bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (likely(r == 0)) { - bo->pin_count = 1; + ttm_bo_pin(&bo->tbo); if (gpu_addr != NULL) *gpu_addr = radeon_bo_gpu_offset(bo); if (domain == RADEON_GEM_DOMAIN_VRAM) @@ -388,36 +333,22 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); } -int radeon_bo_unpin(struct radeon_bo *bo) +void radeon_bo_unpin(struct radeon_bo *bo) { - struct ttm_operation_ctx ctx = { false, false }; - int r, i; - - if (!bo->pin_count) { - dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); - return 0; - } - bo->pin_count--; - if (bo->pin_count) - return 0; - for (i = 0; i < bo->placement.num_placement; i++) { - bo->placements[i].lpfn = 0; - bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; - } - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (likely(r == 0)) { - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) + ttm_bo_unpin(&bo->tbo); + if (!bo->tbo.pin_count) { + if (bo->tbo.resource->mem_type == TTM_PL_VRAM) bo->rdev->vram_pin_size -= radeon_bo_size(bo); else bo->rdev->gart_pin_size -= radeon_bo_size(bo); - } else { - dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); } - return r; } int radeon_bo_evict_vram(struct radeon_device *rdev) { + struct ttm_device *bdev = &rdev->mman.bdev; + struct ttm_resource_manager *man; + /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ #ifndef CONFIG_HIBERNATION if (rdev->flags & RADEON_IS_IGP) { @@ -426,7 +357,10 @@ int radeon_bo_evict_vram(struct radeon_device *rdev) return 0; } #endif - return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); + man = ttm_manager_type(bdev, TTM_PL_VRAM); + if (!man) + return 0; + return ttm_resource_manager_evict_all(bdev, man); } void radeon_bo_force_delete(struct radeon_device *rdev) @@ -439,13 +373,13 @@ void radeon_bo_force_delete(struct radeon_device *rdev) dev_err(rdev->dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { dev_err(rdev->dev, "%p %p %lu %lu force free\n", - &bo->gem_base, bo, (unsigned long)bo->gem_base.size, - *((unsigned long *)&bo->gem_base.refcount)); + &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size, + *((unsigned long *)&bo->tbo.base.refcount)); mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); /* this should unref the ttm bo */ - drm_gem_object_put_unlocked(&bo->gem_base); + drm_gem_object_put(&bo->tbo.base); } } @@ -480,7 +414,9 @@ void radeon_bo_fini(struct radeon_device *rdev) static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) { u64 real_vram_size = rdev->mc.real_vram_size; - u64 vram_usage = atomic64_read(&rdev->vram_usage); + struct ttm_resource_manager *man = + ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); + u64 vram_usage = ttm_resource_manager_usage(man); /* This function is based on the current VRAM usage. * @@ -528,29 +464,32 @@ static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) } int radeon_bo_list_validate(struct radeon_device *rdev, - struct ww_acquire_ctx *ticket, + struct drm_exec *exec, struct list_head *head, int ring) { struct ttm_operation_ctx ctx = { true, false }; struct radeon_bo_list *lobj; - struct list_head duplicates; - int r; u64 bytes_moved = 0, initial_bytes_moved; u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); + int r; - INIT_LIST_HEAD(&duplicates); - r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); - if (unlikely(r != 0)) { - return r; + drm_exec_until_all_locked(exec) { + list_for_each_entry(lobj, head, list) { + r = drm_exec_prepare_obj(exec, &lobj->robj->tbo.base, + 1); + drm_exec_retry_on_contention(exec); + if (unlikely(r && r != -EALREADY)) + return r; + } } - list_for_each_entry(lobj, head, tv.head) { + list_for_each_entry(lobj, head, list) { struct radeon_bo *bo = lobj->robj; - if (!bo->pin_count) { + if (!bo->tbo.pin_count) { u32 domain = lobj->preferred_domains; u32 allowed = lobj->allowed_domains; u32 current_domain = - radeon_mem_type_to_domain(bo->tbo.mem.mem_type); + radeon_mem_type_to_domain(bo->tbo.resource->mem_type); /* Check if this buffer will be moved and don't move it * if we have moved too many buffers for this IB already. @@ -583,7 +522,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev, domain = lobj->allowed_domains; goto retry; } - ttm_eu_backoff_reservation(ticket, head); return r; } } @@ -591,11 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev, lobj->tiling_flags = bo->tiling_flags; } - list_for_each_entry(lobj, &duplicates, tv.head) { - lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj); - lobj->tiling_flags = lobj->robj->tiling_flags; - } - return 0; } @@ -607,13 +540,12 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) int steal; int i; - lockdep_assert_held(&bo->tbo.resv->lock.base); + dma_resv_assert_held(bo->tbo.base.resv); if (!bo->tiling_flags) return 0; if (bo->surface_reg >= 0) { - reg = &rdev->surface_regs[bo->surface_reg]; i = bo->surface_reg; goto out; } @@ -626,7 +558,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) break; old_object = reg->bo; - if (old_object->pin_count == 0) + if (old_object->tbo.pin_count == 0) steal = i; } @@ -649,8 +581,8 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) out: radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, - bo->tbo.mem.start << PAGE_SHIFT, - bo->tbo.num_pages << PAGE_SHIFT); + bo->tbo.resource->start << PAGE_SHIFT, + bo->tbo.base.size); return 0; } @@ -733,7 +665,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo, uint32_t *tiling_flags, uint32_t *pitch) { - lockdep_assert_held(&bo->tbo.resv->lock.base); + dma_resv_assert_held(bo->tbo.base.resv); if (tiling_flags) *tiling_flags = bo->tiling_flags; @@ -745,7 +677,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop) { if (!force_drop) - lockdep_assert_held(&bo->tbo.resv->lock.base); + dma_resv_assert_held(bo->tbo.base.resv); if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) return 0; @@ -755,7 +687,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, return 0; } - if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { + if (bo->tbo.resource->mem_type != TTM_PL_VRAM) { if (!has_moved) return 0; @@ -770,9 +702,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, return radeon_bo_get_surface_reg(bo); } -void radeon_bo_move_notify(struct ttm_buffer_object *bo, - bool evict, - struct ttm_mem_reg *new_mem) +void radeon_bo_move_notify(struct ttm_buffer_object *bo) { struct radeon_bo *rbo; @@ -782,16 +712,9 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo, rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 1); radeon_vm_bo_invalidate(rbo->rdev, rbo); - - /* update statistics */ - if (!new_mem) - return; - - radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); - radeon_update_memory_usage(rbo, new_mem->mem_type, 1); } -int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) +vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { struct ttm_operation_ctx ctx = { false, false }; struct radeon_device *rdev; @@ -804,58 +727,47 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 0); rdev = rbo->rdev; - if (bo->mem.mem_type != TTM_PL_VRAM) + if (bo->resource->mem_type != TTM_PL_VRAM) return 0; - size = bo->mem.num_pages << PAGE_SHIFT; - offset = bo->mem.start << PAGE_SHIFT; + size = bo->resource->size; + offset = bo->resource->start << PAGE_SHIFT; if ((offset + size) <= rdev->mc.visible_vram_size) return 0; /* Can't move a pinned BO to visible VRAM */ - if (rbo->pin_count > 0) - return -EINVAL; + if (rbo->tbo.pin_count > 0) + return VM_FAULT_SIGBUS; /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; for (i = 0; i < rbo->placement.num_placement; i++) { /* Force into visible VRAM */ - if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && + if ((rbo->placements[i].mem_type == TTM_PL_VRAM) && (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) rbo->placements[i].lpfn = lpfn; } r = ttm_bo_validate(bo, &rbo->placement, &ctx); if (unlikely(r == -ENOMEM)) { radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); - return ttm_bo_validate(bo, &rbo->placement, &ctx); - } else if (unlikely(r != 0)) { - return r; + r = ttm_bo_validate(bo, &rbo->placement, &ctx); + } else if (likely(!r)) { + offset = bo->resource->start << PAGE_SHIFT; + /* this should never happen */ + if ((offset + size) > rdev->mc.visible_vram_size) + return VM_FAULT_SIGBUS; } - offset = bo->mem.start << PAGE_SHIFT; - /* this should never happen */ - if ((offset + size) > rdev->mc.visible_vram_size) - return -EINVAL; + if (unlikely(r == -EBUSY || r == -ERESTARTSYS)) + return VM_FAULT_NOPAGE; + else if (unlikely(r)) + return VM_FAULT_SIGBUS; + ttm_bo_move_to_lru_tail_unlocked(bo); return 0; } -int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) -{ - int r; - - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); - if (unlikely(r != 0)) - return r; - if (mem_type) - *mem_type = bo->tbo.mem.mem_type; - - r = ttm_bo_wait(&bo->tbo, true, no_wait); - ttm_bo_unreserve(&bo->tbo); - return r; -} - /** * radeon_bo_fence - add fence to buffer object * @@ -867,10 +779,16 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, bool shared) { - struct reservation_object *resv = bo->tbo.resv; + struct dma_resv *resv = bo->tbo.base.resv; + int r; + + r = dma_resv_reserve_fences(resv, 1); + if (r) { + /* As last resort on OOM we block for the fence */ + dma_fence_wait(&fence->base, false); + return; + } - if (shared) - reservation_object_add_shared_fence(resv, &fence->base); - else - reservation_object_add_excl_fence(resv, &fence->base); + dma_resv_add_fence(resv, &fence->base, shared ? + DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE); } |
