summaryrefslogtreecommitdiff
path: root/arch/x86/crypto
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2018-09-05 09:18:43 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2018-09-14 14:08:53 +0800
commita788848116454d753b13a4888e0e31ada3c4d393 (patch)
treea1578d1f658cb0f7a69cdf86f40321325c936881 /arch/x86/crypto
parenta7e7edfea23fa1f3c910eac897aac41ec9584c38 (diff)
crypto: aesni - don't use GFP_ATOMIC allocation if the request doesn't cross a page in gcm
This patch fixes gcmaes_crypt_by_sg so that it won't use memory allocation if the data doesn't cross a page boundary. Authenticated encryption may be used by dm-crypt. If the encryption or decryption fails, it would result in I/O error and filesystem corruption. The function gcmaes_crypt_by_sg is using GFP_ATOMIC allocation that can fail anytime. This patch fixes the logic so that it won't attempt the failing allocation if the data doesn't cross a page boundary. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto')
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index acbe7e8336d8..e4b78f962874 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -817,7 +817,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
/* Linearize assoc, if not already linear */
if (req->src->length >= assoclen && req->src->length &&
(!PageHighMem(sg_page(req->src)) ||
- req->src->offset + req->src->length < PAGE_SIZE)) {
+ req->src->offset + req->src->length <= PAGE_SIZE)) {
scatterwalk_start(&assoc_sg_walk, req->src);
assoc = scatterwalk_map(&assoc_sg_walk);
} else {