summaryrefslogtreecommitdiff
path: root/tools/testing/nvdimm
diff options
context:
space:
mode:
authorRoss Zwisler <ross.zwisler@linux.intel.com>2015-08-27 13:14:20 -0600
committerDan Williams <dan.j.williams@intel.com>2015-08-27 19:38:28 -0400
commit67a3e8fe90156d41cd480d3dfbb40f3bc007c262 (patch)
treefcd0fa657c6424ba874e46dfc5349cb55f630f6b /tools/testing/nvdimm
parente2e05394e4a3420dab96f728df4531893494e15d (diff)
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For rough comparison I did some simple read testing using PMEM to compare reads of write combining (WC) mappings vs write-back (WB). This was done on a random lab machine. PMEM reads from a write combining mapping: # dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000 100000+0 records in 100000+0 records out 409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s PMEM reads from a write-back mapping: # dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000 1000000+0 records in 1000000+0 records out 4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s To be able to safely support a write-back aperture I needed to add support for the "read flush" _DSM flag, as outlined in the DSM spec: http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf This flag tells the ND BLK driver that it needs to flush the cache lines associated with the aperture after the aperture is moved but before any new data is read. This ensures that any stale cache lines from the previous contents of the aperture will be discarded from the processor cache, and the new data will be read properly from the DIMM. We know that the cache lines are clean and will be discarded without any writeback because either a) the previous aperture operation was a read, and we never modified the contents of the aperture, or b) the previous aperture operation was a write and we must have written back the dirtied contents of the aperture to the DIMM before the I/O was completed. In order to add support for the "read flush" flag I needed to add a generic routine to invalidate cache lines, mmio_flush_range(). This is protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently only supported on x86. Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'tools/testing/nvdimm')
-rw-r--r--tools/testing/nvdimm/Kbuild2
-rw-r--r--tools/testing/nvdimm/test/iomap.c30
-rw-r--r--tools/testing/nvdimm/test/nfit.c10
3 files changed, 37 insertions, 5 deletions
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 04c5fc09576d..94a5e0eda2d2 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -1,8 +1,10 @@
ldflags-y += --wrap=ioremap_wc
+ldflags-y += --wrap=memremap
ldflags-y += --wrap=devm_ioremap_nocache
ldflags-y += --wrap=devm_memremap
ldflags-y += --wrap=ioremap_nocache
ldflags-y += --wrap=iounmap
+ldflags-y += --wrap=memunmap
ldflags-y += --wrap=__devm_request_region
ldflags-y += --wrap=__request_region
ldflags-y += --wrap=__release_region
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index ff1e00458864..179d2289f3a8 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -89,12 +89,25 @@ void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
nfit_res = get_nfit_res(offset);
rcu_read_unlock();
if (nfit_res)
- return (void __iomem *) nfit_res->buf + offset
- - nfit_res->res->start;
+ return nfit_res->buf + offset - nfit_res->res->start;
return devm_memremap(dev, offset, size, flags);
}
EXPORT_SYMBOL(__wrap_devm_memremap);
+void *__wrap_memremap(resource_size_t offset, size_t size,
+ unsigned long flags)
+{
+ struct nfit_test_resource *nfit_res;
+
+ rcu_read_lock();
+ nfit_res = get_nfit_res(offset);
+ rcu_read_unlock();
+ if (nfit_res)
+ return nfit_res->buf + offset - nfit_res->res->start;
+ return memremap(offset, size, flags);
+}
+EXPORT_SYMBOL(__wrap_memremap);
+
void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
{
return __nfit_test_ioremap(offset, size, ioremap_nocache);
@@ -120,6 +133,19 @@ void __wrap_iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(__wrap_iounmap);
+void __wrap_memunmap(void *addr)
+{
+ struct nfit_test_resource *nfit_res;
+
+ rcu_read_lock();
+ nfit_res = get_nfit_res((unsigned long) addr);
+ rcu_read_unlock();
+ if (nfit_res)
+ return;
+ return memunmap(addr);
+}
+EXPORT_SYMBOL(__wrap_memunmap);
+
static struct resource *nfit_test_request_region(struct device *dev,
struct resource *parent, resource_size_t start,
resource_size_t n, const char *name, int flags)
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 28dba918524e..021e6f97f33e 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -1029,9 +1029,13 @@ static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
lane = nd_region_acquire_lane(nd_region);
if (rw)
- memcpy(mmio->base + dpa, iobuf, len);
- else
- memcpy(iobuf, mmio->base + dpa, len);
+ memcpy(mmio->addr.base + dpa, iobuf, len);
+ else {
+ memcpy(iobuf, mmio->addr.base + dpa, len);
+
+ /* give us some some coverage of the mmio_flush_range() API */
+ mmio_flush_range(mmio->addr.base + dpa, len);
+ }
nd_region_release_lane(nd_region, lane);
return 0;