diff options
Diffstat (limited to 'crypto/async_tx/async_pq.c')
| -rw-r--r-- | crypto/async_tx/async_pq.c | 298 |
1 files changed, 172 insertions, 126 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 91d5d385899e..9e4bb7fbde25 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -1,23 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com> * Copyright(c) 2009 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. */ #include <linux/kernel.h> #include <linux/interrupt.h> @@ -27,8 +11,8 @@ #include <linux/async_tx.h> #include <linux/gfp.h> -/** - * pq_scribble_page - space to hold throwaway P or Q buffer for +/* + * struct pq_scribble_page - space to hold throwaway P or Q buffer for * synchronous gen_syndrome */ static struct page *pq_scribble_page; @@ -42,53 +26,27 @@ static struct page *pq_scribble_page; #define P(b, d) (b[d-2]) #define Q(b, d) (b[d-1]) -/** +#define MAX_DISKS 255 + +/* * do_async_gen_syndrome - asynchronously calculate P and/or Q */ static __async_inline struct dma_async_tx_descriptor * -do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, - const unsigned char *scfs, unsigned int offset, int disks, - size_t len, dma_addr_t *dma_src, +do_async_gen_syndrome(struct dma_chan *chan, + const unsigned char *scfs, int disks, + struct dmaengine_unmap_data *unmap, + enum dma_ctrl_flags dma_flags, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct dma_device *dma = chan->device; - enum dma_ctrl_flags dma_flags = 0; enum async_tx_flags flags_orig = submit->flags; dma_async_tx_callback cb_fn_orig = submit->cb_fn; dma_async_tx_callback cb_param_orig = submit->cb_param; int src_cnt = disks - 2; - unsigned char coefs[src_cnt]; unsigned short pq_src_cnt; dma_addr_t dma_dest[2]; int src_off = 0; - int idx; - int i; - - /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ - if (P(blocks, disks)) - dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, - len, DMA_BIDIRECTIONAL); - else - dma_flags |= DMA_PREP_PQ_DISABLE_P; - if (Q(blocks, disks)) - dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, - len, DMA_BIDIRECTIONAL); - else - dma_flags |= DMA_PREP_PQ_DISABLE_Q; - - /* convert source addresses being careful to collapse 'empty' - * sources and update the coefficients accordingly - */ - for (i = 0, idx = 0; i < src_cnt; i++) { - if (blocks[i] == NULL) - continue; - dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, - DMA_TO_DEVICE); - coefs[idx] = scfs[i]; - idx++; - } - src_cnt = idx; while (src_cnt > 0) { submit->flags = flags_orig; @@ -100,11 +58,9 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, if (src_cnt > pq_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; submit->flags |= ASYNC_TX_FENCE; - dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = NULL; submit->cb_param = NULL; } else { - dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = cb_fn_orig; submit->cb_param = cb_param_orig; if (cb_fn_orig) @@ -113,15 +69,16 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - /* Since we have clobbered the src_list we are committed - * to doing this asynchronously. Drivers force forward - * progress in case they can not provide a descriptor + /* Drivers force forward progress in case they can not provide + * a descriptor */ for (;;) { + dma_dest[0] = unmap->addr[disks - 2]; + dma_dest[1] = unmap->addr[disks - 1]; tx = dma->device_prep_dma_pq(chan, dma_dest, - &dma_src[src_off], + &unmap->addr[src_off], pq_src_cnt, - &coefs[src_off], len, + &scfs[src_off], unmap->len, dma_flags); if (likely(tx)) break; @@ -129,6 +86,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, dma_async_issue_pending(chan); } + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); submit->depend_tx = tx; @@ -142,15 +100,16 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, return tx; } -/** +/* * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome */ static void -do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, +do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) { void **srcs; int i; + int start = -1, stop = disks - 3; if (submit->scribble) srcs = submit->scribble; @@ -160,18 +119,43 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, for (i = 0; i < disks; i++) { if (blocks[i] == NULL) { BUG_ON(i > disks - 3); /* P or Q can't be zero */ - srcs[i] = (void*)raid6_empty_zero_page; - } else - srcs[i] = page_address(blocks[i]) + offset; + srcs[i] = raid6_get_zero_page(); + } else { + srcs[i] = page_address(blocks[i]) + offsets[i]; + + if (i < disks - 2) { + stop = i; + if (start == -1) + start = i; + } + } } - raid6_call.gen_syndrome(disks, len, srcs); + if (submit->flags & ASYNC_TX_PQ_XOR_DST) { + BUG_ON(!raid6_call.xor_syndrome); + if (start >= 0) + raid6_call.xor_syndrome(disks, start, stop, len, srcs); + } else + raid6_call.gen_syndrome(disks, len, srcs); async_tx_sync_epilog(submit); } +static inline bool +is_dma_pq_aligned_offs(struct dma_device *dev, unsigned int *offs, + int src_cnt, size_t len) +{ + int i; + + for (i = 0; i < src_cnt; i++) { + if (!is_dma_pq_aligned(dev, offs[i], 0, len)) + return false; + } + return true; +} + /** * async_gen_syndrome - asynchronously calculate a raid6 syndrome * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 - * @offset: common offset into each block (src and dest) to start transaction + * @offsets: offset array into each block (src and dest) to start transaction * @disks: number of blocks (including missing P or Q, see below) * @len: length of operation in bytes * @submit: submission/completion modifiers @@ -188,13 +172,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, * set to NULL those buffers will be replaced with the raid6_zero_page * in the synchronous path and omitted in the hardware-asynchronous * path. - * - * 'blocks' note: if submit->scribble is NULL then the contents of - * 'blocks' may be overwritten to perform address conversions - * (dma_map_page() or page_address()). */ struct dma_async_tx_descriptor * -async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, +async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) { int src_cnt = disks - 2; @@ -202,26 +182,72 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, &P(blocks, disks), 2, blocks, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; - dma_addr_t *dma_src = NULL; + struct dmaengine_unmap_data *unmap = NULL; - BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); + BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks))); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) blocks; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); - if (dma_src && device && + /* XORing P/Q is only implemented in software */ + if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && (src_cnt <= dma_maxpq(device, 0) || dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && - is_dma_pq_aligned(device, offset, 0, len)) { + is_dma_pq_aligned_offs(device, offsets, disks, len)) { + struct dma_async_tx_descriptor *tx; + enum dma_ctrl_flags dma_flags = 0; + unsigned char coefs[MAX_DISKS]; + int i, j; + /* run the p+q asynchronously */ pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); - return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, - disks, len, dma_src, submit); + + /* convert source addresses being careful to collapse 'empty' + * sources and update the coefficients accordingly + */ + unmap->len = len; + for (i = 0, j = 0; i < src_cnt; i++) { + if (blocks[i] == NULL) + continue; + unmap->addr[j] = dma_map_page(device->dev, blocks[i], + offsets[i], len, DMA_TO_DEVICE); + coefs[j] = raid6_gfexp[i]; + unmap->to_cnt++; + j++; + } + + /* + * DMAs use destinations as sources, + * so use BIDIRECTIONAL mapping + */ + unmap->bidi_cnt++; + if (P(blocks, disks)) + unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), + P(offsets, disks), + len, DMA_BIDIRECTIONAL); + else { + unmap->addr[j++] = 0; + dma_flags |= DMA_PREP_PQ_DISABLE_P; + } + + unmap->bidi_cnt++; + if (Q(blocks, disks)) + unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), + Q(offsets, disks), + len, DMA_BIDIRECTIONAL); + else { + unmap->addr[j++] = 0; + dma_flags |= DMA_PREP_PQ_DISABLE_Q; + } + + tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit); + dmaengine_unmap_put(unmap); + return tx; } + dmaengine_unmap_put(unmap); + /* run the pq synchronously */ pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); @@ -230,13 +256,13 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, if (!P(blocks, disks)) { P(blocks, disks) = pq_scribble_page; - BUG_ON(len + offset > PAGE_SIZE); + P(offsets, disks) = 0; } if (!Q(blocks, disks)) { Q(blocks, disks) = pq_scribble_page; - BUG_ON(len + offset > PAGE_SIZE); + Q(offsets, disks) = 0; } - do_sync_gen_syndrome(blocks, offset, disks, len, submit); + do_sync_gen_syndrome(blocks, offsets, disks, len, submit); return NULL; } @@ -255,11 +281,12 @@ pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, si /** * async_syndrome_val - asynchronously validate a raid6 syndrome * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 - * @offset: common offset into each block (src and dest) to start transaction + * @offsets: common offset into each block (src and dest) to start transaction * @disks: number of blocks (including missing P or Q, see below) * @len: length of operation in bytes * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set * @spare: temporary result buffer for the synchronous case + * @s_off: spare buffer page offset * @submit: submission / completion modifiers * * The same notes from async_gen_syndrome apply to the 'blocks', @@ -268,59 +295,69 @@ pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, si * specified. */ struct dma_async_tx_descriptor * -async_syndrome_val(struct page **blocks, unsigned int offset, int disks, +async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks, size_t len, enum sum_check_flags *pqres, struct page *spare, - struct async_submit_ctl *submit) + unsigned int s_off, struct async_submit_ctl *submit) { struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx; - unsigned char coefs[disks-2]; + unsigned char coefs[MAX_DISKS]; enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; - dma_addr_t *dma_src = NULL; - int src_cnt = 0; + struct dmaengine_unmap_data *unmap = NULL; - BUG_ON(disks < 4); + BUG_ON(disks < 4 || disks > MAX_DISKS); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) blocks; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); - if (dma_src && device && disks <= dma_maxpq(device, 0) && - is_dma_pq_aligned(device, offset, 0, len)) { + if (unmap && disks <= dma_maxpq(device, 0) && + is_dma_pq_aligned_offs(device, offsets, disks, len)) { struct device *dev = device->dev; - dma_addr_t *pq = &dma_src[disks-2]; - int i; + dma_addr_t pq[2]; + int i, j = 0, src_cnt = 0; pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); - if (!P(blocks, disks)) + + unmap->len = len; + for (i = 0; i < disks-2; i++) + if (likely(blocks[i])) { + unmap->addr[j] = dma_map_page(dev, blocks[i], + offsets[i], len, + DMA_TO_DEVICE); + coefs[j] = raid6_gfexp[i]; + unmap->to_cnt++; + src_cnt++; + j++; + } + + if (!P(blocks, disks)) { + pq[0] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_P; - else + } else { pq[0] = dma_map_page(dev, P(blocks, disks), - offset, len, + P(offsets, disks), len, DMA_TO_DEVICE); - if (!Q(blocks, disks)) + unmap->addr[j++] = pq[0]; + unmap->to_cnt++; + } + if (!Q(blocks, disks)) { + pq[1] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_Q; - else + } else { pq[1] = dma_map_page(dev, Q(blocks, disks), - offset, len, + Q(offsets, disks), len, DMA_TO_DEVICE); + unmap->addr[j++] = pq[1]; + unmap->to_cnt++; + } if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - for (i = 0; i < disks-2; i++) - if (likely(blocks[i])) { - dma_src[src_cnt] = dma_map_page(dev, blocks[i], - offset, len, - DMA_TO_DEVICE); - coefs[src_cnt] = raid6_gfexp[i]; - src_cnt++; - } - for (;;) { - tx = device->device_prep_dma_pq_val(chan, pq, dma_src, + tx = device->device_prep_dma_pq_val(chan, pq, + unmap->addr, src_cnt, coefs, len, pqres, @@ -330,12 +367,14 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, async_tx_quiesce(&submit->depend_tx); dma_async_issue_pending(chan); } - async_tx_submit(chan, tx, submit); - return tx; + dma_set_unmap(tx, unmap); + async_tx_submit(chan, tx, submit); } else { struct page *p_src = P(blocks, disks); + unsigned int p_off = P(offsets, disks); struct page *q_src = Q(blocks, disks); + unsigned int q_off = Q(offsets, disks); enum async_tx_flags flags_orig = submit->flags; dma_async_tx_callback cb_fn_orig = submit->cb_fn; void *scribble = submit->scribble; @@ -361,35 +400,42 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, if (p_src) { init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL, NULL, NULL, scribble); - tx = async_xor(spare, blocks, offset, disks-2, len, submit); + tx = async_xor_offs(spare, s_off, + blocks, offsets, disks-2, len, submit); async_tx_quiesce(&tx); - p = page_address(p_src) + offset; - s = page_address(spare) + offset; + p = page_address(p_src) + p_off; + s = page_address(spare) + s_off; *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P; } if (q_src) { P(blocks, disks) = NULL; Q(blocks, disks) = spare; + Q(offsets, disks) = s_off; init_async_submit(submit, 0, NULL, NULL, NULL, scribble); - tx = async_gen_syndrome(blocks, offset, disks, len, submit); + tx = async_gen_syndrome(blocks, offsets, disks, + len, submit); async_tx_quiesce(&tx); - q = page_address(q_src) + offset; - s = page_address(spare) + offset; + q = page_address(q_src) + q_off; + s = page_address(spare) + s_off; *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q; } /* restore P, Q and submit */ P(blocks, disks) = p_src; + P(offsets, disks) = p_off; Q(blocks, disks) = q_src; + Q(offsets, disks) = q_off; submit->cb_fn = cb_fn_orig; submit->cb_param = cb_param_orig; submit->flags = flags_orig; async_tx_sync_epilog(submit); - - return NULL; + tx = NULL; } + dmaengine_unmap_put(unmap); + + return tx; } EXPORT_SYMBOL_GPL(async_syndrome_val); @@ -407,7 +453,7 @@ static int __init async_pq_init(void) static void __exit async_pq_exit(void) { - put_page(pq_scribble_page); + __free_page(pq_scribble_page); } module_init(async_pq_init); |
