diff options
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo.h | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_mmio.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_vm.c | 8 |
4 files changed, 36 insertions, 11 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 4693372ec82e..7a5118bf4dc0 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -251,9 +251,11 @@ static int xe_tt_map_sg(struct ttm_tt *tt) if (xe_tt->sg) return 0; - ret = sg_alloc_table_from_pages(&xe_tt->sgt, tt->pages, num_pages, - 0, (u64)num_pages << PAGE_SHIFT, - GFP_KERNEL); + ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, + num_pages, 0, + (u64)num_pages << PAGE_SHIFT, + xe_sg_segment_size(xe_tt->dev), + GFP_KERNEL); if (ret) return ret; diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 7e111332c35a..2d08622f58a7 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -296,6 +296,30 @@ void xe_bo_put_commit(struct llist_head *deferred); struct sg_table *xe_bo_get_sg(struct xe_bo *bo); +/* + * xe_sg_segment_size() - Provides upper limit for sg segment size. + * @dev: device pointer + * + * Returns the maximum segment size for the 'struct scatterlist' + * elements. + */ +static inline unsigned int xe_sg_segment_size(struct device *dev) +{ + struct scatterlist __maybe_unused sg; + size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1; + + max = min_t(size_t, max, dma_max_mapping_size(dev)); + + /* + * The iommu_dma_map_sg() function ensures iova allocation doesn't + * cross dma segment boundary. It does so by padding some sg elements. + * This can cause overflow, ending up with sg->length being set to 0. + * Avoid this by ensuring maximum segment size is half of 'max' + * rounded down to PAGE_SIZE. + */ + return round_down(max / 2, PAGE_SIZE); +} + #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) /** * xe_bo_is_mem_type - Whether the bo currently resides in the given diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index c7fbb1cc1f64..4c270a07136e 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -11,6 +11,7 @@ #include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" #include "regs/xe_regs.h" +#include "xe_bo.h" #include "xe_device.h" #include "xe_gt.h" #include "xe_gt_mcr.h" @@ -26,11 +27,7 @@ static int xe_set_dma_info(struct xe_device *xe) unsigned int mask_size = xe->info.dma_mask_size; int err; - /* - * We don't have a max segment size, so set it to the max so sg's - * debugging layer doesn't complain - */ - dma_set_max_seg_size(xe->drm.dev, UINT_MAX); + dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); if (err) diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 40295beea3a2..25a61735aac8 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -117,9 +117,11 @@ mm_closed: if (ret) goto out; - ret = sg_alloc_table_from_pages(&vma->userptr.sgt, pages, pinned, - 0, (u64)pinned << PAGE_SHIFT, - GFP_KERNEL); + ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages, + pinned, 0, + (u64)pinned << PAGE_SHIFT, + xe_sg_segment_size(xe->drm.dev), + GFP_KERNEL); if (ret) { vma->userptr.sg = NULL; goto out; |