summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-07 10:53:02 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-07 10:53:02 -0700
commite51418191f5a741b5f94764798c81bf69dec4806 (patch)
tree227e06b51b75402744c4076b567a12cd72027416 /drivers
parent25d8d4eecace9de5a6a2193e4df1917afbdd3052 (diff)
parentd7b461caa6cc64dd190577b46b0ec892a8d5e7c0 (diff)
Merge tag 'for-linus-5.9-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen updates from Juergen Gross: - two trivial comment fixes - a small series for the Xen balloon driver fixing some issues - a series of the Xen privcmd driver targeting elimination of using get_user_pages*() in this driver - a series for the Xen swiotlb driver cleaning it up and adding support for letting the kernel run as dom0 on Rpi4 * tag 'for-linus-5.9-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/arm: call dma_to_phys on the dma_addr_t parameter of dma_cache_maint xen/arm: introduce phys/dma translations in xen_dma_sync_for_* swiotlb-xen: introduce phys_to_dma/dma_to_phys translations swiotlb-xen: remove XEN_PFN_PHYS swiotlb-xen: add struct device * parameter to is_xen_swiotlb_buffer swiotlb-xen: add struct device * parameter to xen_dma_sync_for_device swiotlb-xen: add struct device * parameter to xen_dma_sync_for_cpu swiotlb-xen: add struct device * parameter to xen_bus_to_phys swiotlb-xen: add struct device * parameter to xen_phys_to_bus swiotlb-xen: remove start_dma_addr swiotlb-xen: use vmalloc_to_page on vmalloc virt addresses Revert "xen/balloon: Fix crash when ballooning on x86 32 bit PAE" xen/balloon: make the balloon wait interruptible xen/balloon: fix accounting in alloc_xenballooned_pages error path xen: hypercall.h: fix duplicated word xen/gntdev: gntdev.h: drop a duplicated word xen/privcmd: Convert get_user_pages*() to pin_user_pages*() xen/privcmd: Mark pages as dirty xen/privcmd: Corrected error handling path
Diffstat (limited to 'drivers')
-rw-r--r--drivers/xen/balloon.c26
-rw-r--r--drivers/xen/privcmd.c32
-rw-r--r--drivers/xen/swiotlb-xen.c119
3 files changed, 96 insertions, 81 deletions
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 77c57568e5d7..b1d8b028bf80 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -266,20 +266,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
return NULL;
}
-#ifdef CONFIG_SPARSEMEM
- {
- unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
- unsigned long pfn = res->start >> PAGE_SHIFT;
-
- if (pfn > limit) {
- pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
- pfn, limit);
- release_memory_resource(res);
- return NULL;
- }
- }
-#endif
-
return res;
}
@@ -568,11 +554,13 @@ static int add_ballooned_pages(int nr_pages)
if (xen_hotplug_unpopulated) {
st = reserve_additional_memory();
if (st != BP_ECANCELED) {
+ int rc;
+
mutex_unlock(&balloon_mutex);
- wait_event(balloon_wq,
+ rc = wait_event_interruptible(balloon_wq,
!list_empty(&ballooned_pages));
mutex_lock(&balloon_mutex);
- return 0;
+ return rc ? -ENOMEM : 0;
}
}
@@ -630,6 +618,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
out_undo:
mutex_unlock(&balloon_mutex);
free_xenballooned_pages(pgno, pages);
+ /*
+ * NB: free_xenballooned_pages will only subtract pgno pages, but since
+ * target_unpopulated is incremented with nr_pages at the start we need
+ * to remove the remaining ones also, or accounting will be screwed.
+ */
+ balloon_stats.target_unpopulated -= nr_pages - pgno;
return ret;
}
EXPORT_SYMBOL(alloc_xenballooned_pages);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index a250d118144a..095d683ad574 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -580,13 +580,13 @@ out_unlock:
static int lock_pages(
struct privcmd_dm_op_buf kbufs[], unsigned int num,
- struct page *pages[], unsigned int nr_pages)
+ struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
{
unsigned int i;
for (i = 0; i < num; i++) {
unsigned int requested;
- int pinned;
+ int page_count;
requested = DIV_ROUND_UP(
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
@@ -594,14 +594,15 @@ static int lock_pages(
if (requested > nr_pages)
return -ENOSPC;
- pinned = get_user_pages_fast(
+ page_count = pin_user_pages_fast(
(unsigned long) kbufs[i].uptr,
requested, FOLL_WRITE, pages);
- if (pinned < 0)
- return pinned;
+ if (page_count < 0)
+ return page_count;
- nr_pages -= pinned;
- pages += pinned;
+ *pinned += page_count;
+ nr_pages -= page_count;
+ pages += page_count;
}
return 0;
@@ -609,15 +610,7 @@ static int lock_pages(
static void unlock_pages(struct page *pages[], unsigned int nr_pages)
{
- unsigned int i;
-
- if (!pages)
- return;
-
- for (i = 0; i < nr_pages; i++) {
- if (pages[i])
- put_page(pages[i]);
- }
+ unpin_user_pages_dirty_lock(pages, nr_pages, true);
}
static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
@@ -630,6 +623,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
struct xen_dm_op_buf *xbufs = NULL;
unsigned int i;
long rc;
+ unsigned int pinned = 0;
if (copy_from_user(&kdata, udata, sizeof(kdata)))
return -EFAULT;
@@ -683,9 +677,11 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
goto out;
}
- rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
- if (rc)
+ rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
+ if (rc < 0) {
+ nr_pages = pinned;
goto out;
+ }
for (i = 0; i < kdata.num; i++) {
set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index b6d27762c6f8..39a0f2e0847c 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -52,37 +52,39 @@ static unsigned long xen_io_tlb_nslabs;
* Quick lookup value of the bus address of the IOTLB.
*/
-static u64 start_dma_addr;
-
-/*
- * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
- * can be 32bit when dma_addr_t is 64bit leading to a loss in
- * information if the shift is done before casting to 64bit.
- */
-static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
+static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
{
unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
- dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
+ phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
- dma |= paddr & ~XEN_PAGE_MASK;
+ baddr |= paddr & ~XEN_PAGE_MASK;
+ return baddr;
+}
- return dma;
+static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
}
-static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
+static inline phys_addr_t xen_bus_to_phys(struct device *dev,
+ phys_addr_t baddr)
{
unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
- dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
- phys_addr_t paddr = dma;
-
- paddr |= baddr & ~XEN_PAGE_MASK;
+ phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
+ (baddr & ~XEN_PAGE_MASK);
return paddr;
}
-static inline dma_addr_t xen_virt_to_bus(void *address)
+static inline phys_addr_t xen_dma_to_phys(struct device *dev,
+ dma_addr_t dma_addr)
{
- return xen_phys_to_bus(virt_to_phys(address));
+ return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
+}
+
+static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
+{
+ return xen_phys_to_dma(dev, virt_to_phys(address));
}
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
@@ -99,11 +101,11 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
return 0;
}
-static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
+static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
{
- unsigned long bfn = XEN_PFN_DOWN(dma_addr);
+ unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
unsigned long xen_pfn = bfn_to_local_pfn(bfn);
- phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
+ phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
/* If the address is outside our domain, it CAN
* have the same virtual address as another address
@@ -241,7 +243,6 @@ retry:
m_ret = XEN_SWIOTLB_EFIXUP;
goto error;
}
- start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
if (early) {
if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
verbose))
@@ -307,12 +308,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
- /* At this point dma_handle is the physical address, next we are
+ /* At this point dma_handle is the dma address, next we are
* going to set it to the machine address.
* Do not use virt_to_phys(ret) because on ARM it doesn't correspond
* to *dma_handle. */
- phys = *dma_handle;
- dev_addr = xen_phys_to_bus(phys);
+ phys = dma_to_phys(hwdev, *dma_handle);
+ dev_addr = xen_phys_to_dma(hwdev, phys);
if (((dev_addr + size - 1 <= dma_mask)) &&
!range_straddles_page_boundary(phys, size))
*dma_handle = dev_addr;
@@ -322,6 +323,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
return NULL;
}
+ *dma_handle = phys_to_dma(hwdev, *dma_handle);
SetPageXenRemapped(virt_to_page(ret));
}
memset(ret, 0, size);
@@ -335,23 +337,30 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
int order = get_order(size);
phys_addr_t phys;
u64 dma_mask = DMA_BIT_MASK(32);
+ struct page *page;
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
/* do not use virt_to_phys because on ARM it doesn't return you the
* physical address */
- phys = xen_bus_to_phys(dev_addr);
+ phys = xen_dma_to_phys(hwdev, dev_addr);
/* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT);
+ if (is_vmalloc_addr(vaddr))
+ page = vmalloc_to_page(vaddr);
+ else
+ page = virt_to_page(vaddr);
+
if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
range_straddles_page_boundary(phys, size)) &&
- TestClearPageXenRemapped(virt_to_page(vaddr)))
+ TestClearPageXenRemapped(page))
xen_destroy_contiguous_region(phys, order);
- xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
+ xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
+ attrs);
}
/*
@@ -367,7 +376,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
phys_addr_t map, phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = xen_phys_to_bus(phys);
+ dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
BUG_ON(dir == DMA_NONE);
/*
@@ -386,13 +395,13 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
- map = swiotlb_tbl_map_single(dev, start_dma_addr, phys,
- size, size, dir, attrs);
+ map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start),
+ phys, size, size, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
phys = map;
- dev_addr = xen_phys_to_bus(map);
+ dev_addr = xen_phys_to_dma(dev, map);
/*
* Ensure that the address returned is DMA'ble
@@ -404,8 +413,12 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
}
done:
- if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_device(dev_addr, phys, size, dir);
+ if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
+ if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
+ arch_sync_dma_for_device(phys, size, dir);
+ else
+ xen_dma_sync_for_device(dev, dev_addr, size, dir);
+ }
return dev_addr;
}
@@ -420,15 +433,19 @@ done:
static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- phys_addr_t paddr = xen_bus_to_phys(dev_addr);
+ phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
- if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_cpu(dev_addr, paddr, size, dir);
+ if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
+ if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
+ arch_sync_dma_for_cpu(paddr, size, dir);
+ else
+ xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
+ }
/* NOTE: We use dev_addr here, not paddr! */
- if (is_xen_swiotlb_buffer(dev_addr))
+ if (is_xen_swiotlb_buffer(hwdev, dev_addr))
swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
}
@@ -436,12 +453,16 @@ static void
xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
- phys_addr_t paddr = xen_bus_to_phys(dma_addr);
+ phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
- if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_cpu(dma_addr, paddr, size, dir);
+ if (!dev_is_dma_coherent(dev)) {
+ if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
+ arch_sync_dma_for_cpu(paddr, size, dir);
+ else
+ xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
+ }
- if (is_xen_swiotlb_buffer(dma_addr))
+ if (is_xen_swiotlb_buffer(dev, dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
}
@@ -449,13 +470,17 @@ static void
xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
- phys_addr_t paddr = xen_bus_to_phys(dma_addr);
+ phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
- if (is_xen_swiotlb_buffer(dma_addr))
+ if (is_xen_swiotlb_buffer(dev, dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
- if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_device(dma_addr, paddr, size, dir);
+ if (!dev_is_dma_coherent(dev)) {
+ if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
+ arch_sync_dma_for_device(paddr, size, dir);
+ else
+ xen_dma_sync_for_device(dev, dma_addr, size, dir);
+ }
}
/*
@@ -536,7 +561,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
+ return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
}
const struct dma_map_ops xen_swiotlb_dma_ops = {