| From foo@baz Thu Nov 9 09:48:01 CET 2017 |
| From: Li Zhong <zhong@linux.vnet.ibm.com> |
| Date: Fri, 20 Jan 2017 16:35:33 +0800 |
| Subject: crypto: vmx - disable preemption to enable vsx in aes_ctr.c |
| |
| From: Li Zhong <zhong@linux.vnet.ibm.com> |
| |
| |
| [ Upstream commit 7dede913fc2ab9c0d3bff3a49e26fa9e858b0c13 ] |
| |
| Some preemptible check warnings were reported from enable_kernel_vsx(). This |
| patch disables preemption in aes_ctr.c before enabling vsx, and they are now |
| consistent with other files in the same directory. |
| |
| Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com> |
| Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> |
| Signed-off-by: Sasha Levin <alexander.levin@verizon.com> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| drivers/crypto/vmx/aes_ctr.c | 6 ++++++ |
| 1 file changed, 6 insertions(+) |
| |
| --- a/drivers/crypto/vmx/aes_ctr.c |
| +++ b/drivers/crypto/vmx/aes_ctr.c |
| @@ -80,11 +80,13 @@ static int p8_aes_ctr_setkey(struct cryp |
| int ret; |
| struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); |
| |
| + preempt_disable(); |
| pagefault_disable(); |
| enable_kernel_vsx(); |
| ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); |
| disable_kernel_vsx(); |
| pagefault_enable(); |
| + preempt_enable(); |
| |
| ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); |
| return ret; |
| @@ -99,11 +101,13 @@ static void p8_aes_ctr_final(struct p8_a |
| u8 *dst = walk->dst.virt.addr; |
| unsigned int nbytes = walk->nbytes; |
| |
| + preempt_disable(); |
| pagefault_disable(); |
| enable_kernel_vsx(); |
| aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); |
| disable_kernel_vsx(); |
| pagefault_enable(); |
| + preempt_enable(); |
| |
| crypto_xor(keystream, src, nbytes); |
| memcpy(dst, keystream, nbytes); |
| @@ -132,6 +136,7 @@ static int p8_aes_ctr_crypt(struct blkci |
| blkcipher_walk_init(&walk, dst, src, nbytes); |
| ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); |
| while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { |
| + preempt_disable(); |
| pagefault_disable(); |
| enable_kernel_vsx(); |
| aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, |
| @@ -143,6 +148,7 @@ static int p8_aes_ctr_crypt(struct blkci |
| walk.iv); |
| disable_kernel_vsx(); |
| pagefault_enable(); |
| + preempt_enable(); |
| |
| /* We need to update IV mostly for last bytes/round */ |
| inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE; |