summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-09-11 12:09:28 -0400
committerChristoph Hellwig <hch@lst.de>2017-09-11 12:29:40 -0400
commit044a9df1a7cbb89f48fcc0e9e39997989342966b (patch)
tree0bb6cdf7a6d92428d4198597d12950121caf72d7 /drivers
parent9620cfba97a8b88ae91f0e275e8ff110b578bb6e (diff)
nvme-pci: implement the HMB entry number and size limitations
Adds support for the new Host Memory Buffer Minimum Descriptor Entry Size and Host Memory Maximum Descriptors Entries field that were added in TP 4002 HMB Enhancements. These allow the controller to advertise limits for the usual number of segments in the host memory buffer, as well as a minimum usable per-segment size. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Keith Busch <keith.busch@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/pci.c6
3 files changed, 10 insertions, 1 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8040fc14fd15..acc816b67582 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1897,6 +1897,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->cntlid = le16_to_cpu(id->cntlid);
ctrl->hmpre = le32_to_cpu(id->hmpre);
ctrl->hmmin = le32_to_cpu(id->hmmin);
+ ctrl->hmminds = le32_to_cpu(id->hmminds);
+ ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
}
kfree(id);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b8ba7c85e61b..d3f3c4447515 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -181,8 +181,11 @@ struct nvme_ctrl {
u64 ps_max_latency_us;
bool apst_enabled;
+ /* PCIe only: */
u32 hmpre;
u32 hmmin;
+ u32 hmminds;
+ u16 hmmaxd;
/* Fabrics only */
u16 sqsize;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5ed12fbfaad6..4a2121335f48 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1625,6 +1625,10 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size);
max_entries = tmp;
+
+ if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
+ max_entries = dev->ctrl.hmmaxd;
+
descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
&descs_dma, GFP_KERNEL);
if (!descs)
@@ -1681,7 +1685,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
/* start big and work our way down */
for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
- chunk_size >= PAGE_SIZE * 2;
+ chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
chunk_size /= 2) {
if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
if (!min || dev->host_mem_size >= min)