summaryrefslogtreecommitdiff
path: root/drivers/s390/block/scm_blk.c
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2014-12-05 16:41:47 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-12-08 09:42:44 +0100
commitde88d0d28fe932637eb5b7ebf9e638256cf07979 (patch)
tree832876b01901d8c29c09ea170bc69f8dacfec25d /drivers/s390/block/scm_blk.c
parent9d4df77fab7347a74a9938521ffad8d8fab2671d (diff)
s390/scm_block: allocate aidaw pages only when necessary
AOBs (the structure describing the HW request) need to be 4K aligned but very little of that page is actually used. With this patch we place aidaws at the end of the AOB page and only allocate a separate page for aidaws when we have to (lists of aidaws must not cross page boundaries). Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/block/scm_blk.c')
-rw-r--r--drivers/s390/block/scm_blk.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 5b2abadea094..f5c369ce7e73 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -121,7 +121,8 @@ static void scm_request_done(struct scm_request *scmrq)
u64 aidaw = msb->data_addr;
unsigned long flags;
- if ((msb->flags & MSB_FLAG_IDA) && aidaw)
+ if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
+ IS_ALIGNED(aidaw, PAGE_SIZE))
mempool_free(virt_to_page(aidaw), aidaw_pool);
spin_lock_irqsave(&list_lock, flags);
@@ -134,26 +135,47 @@ static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
}
-struct aidaw *scm_aidaw_alloc(void)
+static inline struct aidaw *scm_aidaw_alloc(void)
{
struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
return page ? page_address(page) : NULL;
}
+static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
+{
+ unsigned long _aidaw = (unsigned long) aidaw;
+ unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
+
+ return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
+}
+
+struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
+{
+ struct aidaw *aidaw;
+
+ if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
+ return scmrq->next_aidaw;
+
+ aidaw = scm_aidaw_alloc();
+ if (aidaw)
+ memset(aidaw, 0, PAGE_SIZE);
+ return aidaw;
+}
+
static int scm_request_prepare(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
struct scm_device *scmdev = bdev->gendisk->private_data;
- struct aidaw *aidaw = scm_aidaw_alloc();
struct msb *msb = &scmrq->aob->msb[0];
struct req_iterator iter;
+ struct aidaw *aidaw;
struct bio_vec bv;
+ aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(scmrq->request));
if (!aidaw)
return -ENOMEM;
- memset(aidaw, 0, PAGE_SIZE);
msb->bs = MSB_BS_4K;
scmrq->aob->request.msb_count = 1;
msb->scm_addr = scmdev->address +
@@ -188,6 +210,8 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
scmrq->bdev = bdev;
scmrq->retries = 4;
scmrq->error = 0;
+ /* We don't use all msbs - place aidaws at the end of the aob page. */
+ scmrq->next_aidaw = (void *) &aob->msb[1];
scm_request_cluster_init(scmrq);
}