| From 765872248304a69cce4aa0198da2a26cba781c14 Mon Sep 17 00:00:00 2001 |
| From: Peter Zijlstra <peterz@infradead.org> |
| Date: Mon, 14 Nov 2011 18:19:27 +0100 |
| Subject: [PATCH 227/274] x86: crypto: Reduce preempt disabled regions |
| |
| Restrict the preempt disabled regions to the actual floating point |
| operations and enable preemption for the administrative actions. |
| |
| This is necessary on RT to avoid that kfree and other operations are |
| called with preemption disabled. |
| |
| Reported-and-tested-by: Carsten Emde <cbe@osadl.org> |
| Signed-off-by: Peter Zijlstra <peterz@infradead.org> |
| Cc: stable-rt@vger.kernel.org |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| arch/x86/crypto/aesni-intel_glue.c | 24 +++++++++++++----------- |
| 1 file changed, 13 insertions(+), 11 deletions(-) |
| |
| diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c |
| index c799352..0efa5144 100644 |
| --- a/arch/x86/crypto/aesni-intel_glue.c |
| +++ b/arch/x86/crypto/aesni-intel_glue.c |
| @@ -290,14 +290,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc, |
| err = blkcipher_walk_virt(desc, &walk); |
| desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
| |
| - kernel_fpu_begin(); |
| while ((nbytes = walk.nbytes)) { |
| + kernel_fpu_begin(); |
| aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| - nbytes & AES_BLOCK_MASK); |
| + nbytes & AES_BLOCK_MASK); |
| + kernel_fpu_end(); |
| nbytes &= AES_BLOCK_SIZE - 1; |
| err = blkcipher_walk_done(desc, &walk, nbytes); |
| } |
| - kernel_fpu_end(); |
| |
| return err; |
| } |
| @@ -314,14 +314,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc, |
| err = blkcipher_walk_virt(desc, &walk); |
| desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
| |
| - kernel_fpu_begin(); |
| while ((nbytes = walk.nbytes)) { |
| + kernel_fpu_begin(); |
| aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| nbytes & AES_BLOCK_MASK); |
| + kernel_fpu_end(); |
| nbytes &= AES_BLOCK_SIZE - 1; |
| err = blkcipher_walk_done(desc, &walk, nbytes); |
| } |
| - kernel_fpu_end(); |
| |
| return err; |
| } |
| @@ -360,14 +360,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc, |
| err = blkcipher_walk_virt(desc, &walk); |
| desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
| |
| - kernel_fpu_begin(); |
| while ((nbytes = walk.nbytes)) { |
| + kernel_fpu_begin(); |
| aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| nbytes & AES_BLOCK_MASK, walk.iv); |
| + kernel_fpu_end(); |
| nbytes &= AES_BLOCK_SIZE - 1; |
| err = blkcipher_walk_done(desc, &walk, nbytes); |
| } |
| - kernel_fpu_end(); |
| |
| return err; |
| } |
| @@ -384,14 +384,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc, |
| err = blkcipher_walk_virt(desc, &walk); |
| desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
| |
| - kernel_fpu_begin(); |
| while ((nbytes = walk.nbytes)) { |
| + kernel_fpu_begin(); |
| aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| nbytes & AES_BLOCK_MASK, walk.iv); |
| + kernel_fpu_end(); |
| nbytes &= AES_BLOCK_SIZE - 1; |
| err = blkcipher_walk_done(desc, &walk, nbytes); |
| } |
| - kernel_fpu_end(); |
| |
| return err; |
| } |
| @@ -446,18 +446,20 @@ static int ctr_crypt(struct blkcipher_desc *desc, |
| err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); |
| desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
| |
| - kernel_fpu_begin(); |
| while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { |
| + kernel_fpu_begin(); |
| aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| nbytes & AES_BLOCK_MASK, walk.iv); |
| + kernel_fpu_end(); |
| nbytes &= AES_BLOCK_SIZE - 1; |
| err = blkcipher_walk_done(desc, &walk, nbytes); |
| } |
| if (walk.nbytes) { |
| + kernel_fpu_begin(); |
| ctr_crypt_final(ctx, &walk); |
| + kernel_fpu_end(); |
| err = blkcipher_walk_done(desc, &walk, 0); |
| } |
| - kernel_fpu_end(); |
| |
| return err; |
| } |
| -- |
| 1.7.10.4 |
| |