crypto: x86/aes-xts - simplify loop in xts_crypt_slowpath()
Since the total length processed by the loop in xts_crypt_slowpath() is
a multiple of AES_BLOCK_SIZE, just round the length down to
AES_BLOCK_SIZE even on the last step. This doesn't change behavior, as
the last step will process a multiple of AES_BLOCK_SIZE regardless.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 110b328..02a4c0c 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -935,16 +935,13 @@ xts_crypt_slowpath(struct skcipher_request *req, xts_crypt_func crypt_func)
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, AES_BLOCK_SIZE);
-
kernel_fpu_begin();
- (*crypt_func)(&ctx->crypt_ctx, walk.src.virt.addr,
- walk.dst.virt.addr, nbytes, req->iv);
+ (*crypt_func)(&ctx->crypt_ctx,
+ walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes & ~(AES_BLOCK_SIZE - 1), req->iv);
kernel_fpu_end();
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ err = skcipher_walk_done(&walk,
+ walk.nbytes & (AES_BLOCK_SIZE - 1));
}
if (err || !tail)