summaryrefslogtreecommitdiff
path: root/drivers/xen/swiotlb-xen.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-03-18 17:14:23 +0100
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2021-03-19 04:58:25 +0000
commit2d29960af0bee8cc6731b9bd3964850c9e7a6840 (patch)
treeba8681e49c8ea749f5c2fe64af50a1aa3f457329 /drivers/xen/swiotlb-xen.c
parent73f620951b2b594bdc38722c0d647c3b3312af7a (diff)
swiotlb: dynamically allocate io_tlb_default_mem
Instead of allocating ->list and ->orig_addr separately just do one dynamic allocation for the actual io_tlb_mem structure. This simplifies a lot of the initialization code, and also allows to just check io_tlb_default_mem to see if swiotlb is in use. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen/swiotlb-xen.c')
-rw-r--r--drivers/xen/swiotlb-xen.c22
1 files changed, 8 insertions, 14 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 5329ad54a5f3..4c89afc0df62 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -158,17 +158,14 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
int __ref xen_swiotlb_init(void)
{
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
- unsigned long nslabs, bytes, order;
- unsigned int repeat = 3;
+ unsigned long bytes = swiotlb_size_or_default();
+ unsigned long nslabs = bytes >> IO_TLB_SHIFT;
+ unsigned int order, repeat = 3;
int rc = -ENOMEM;
char *start;
- nslabs = swiotlb_nr_tbl();
- if (!nslabs)
- nslabs = DEFAULT_NSLABS;
retry:
m_ret = XEN_SWIOTLB_ENOMEM;
- bytes = nslabs << IO_TLB_SHIFT;
order = get_order(bytes);
/*
@@ -221,19 +218,16 @@ error:
#ifdef CONFIG_X86
void __init xen_swiotlb_init_early(void)
{
- unsigned long nslabs, bytes;
+ unsigned long bytes = swiotlb_size_or_default();
+ unsigned long nslabs = bytes >> IO_TLB_SHIFT;
unsigned int repeat = 3;
char *start;
int rc;
- nslabs = swiotlb_nr_tbl();
- if (!nslabs)
- nslabs = DEFAULT_NSLABS;
retry:
/*
* Get IO TLB memory from any location.
*/
- bytes = nslabs << IO_TLB_SHIFT;
start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
if (!start)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
@@ -248,8 +242,8 @@ retry:
if (repeat--) {
/* Min is 2MB */
nslabs = max(1024UL, (nslabs >> 1));
- pr_info("Lowering to %luMB\n",
- (nslabs << IO_TLB_SHIFT) >> 20);
+ bytes = nslabs << IO_TLB_SHIFT;
+ pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
}
panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
@@ -548,7 +542,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask;
+ return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask;
}
const struct dma_map_ops xen_swiotlb_dma_ops = {