| Subject: x86: crypto: Reduce preempt disabled regions |
| From: Peter Zijlstra <peterz@infradead.org> |
| Date: Mon, 14 Nov 2011 18:19:27 +0100 |
| |
| Restrict the preempt disabled regions to the actual floating point |
| operations and enable preemption for the administrative actions. |
| |
| This is necessary on RT to avoid that kfree and other operations are |
| called with preemption disabled. |
| |
| Reported-and-tested-by: Carsten Emde <cbe@osadl.org> |
| Signed-off-by: Peter Zijlstra <peterz@infradead.org> |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| arch/x86/crypto/aesni-intel_glue.c | 22 ++++++++++++---------- |
| 1 file changed, 12 insertions(+), 10 deletions(-) |
| |
| --- a/arch/x86/crypto/aesni-intel_glue.c |
| +++ b/arch/x86/crypto/aesni-intel_glue.c |
| @@ -374,14 +374,14 @@ static int ecb_encrypt(struct skcipher_r |
| |
| err = skcipher_walk_virt(&walk, req, true); |
| |
| - kernel_fpu_begin(); |
| while ((nbytes = walk.nbytes)) { |
| + kernel_fpu_begin(); |
| aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| nbytes & AES_BLOCK_MASK); |
| + kernel_fpu_end(); |
| nbytes &= AES_BLOCK_SIZE - 1; |
| err = skcipher_walk_done(&walk, nbytes); |
| } |
| - kernel_fpu_end(); |
| |
| return err; |
| } |
| @@ -396,14 +396,14 @@ static int ecb_decrypt(struct skcipher_r |
| |
| err = skcipher_walk_virt(&walk, req, true); |
| |
| - kernel_fpu_begin(); |
| while ((nbytes = walk.nbytes)) { |
| + kernel_fpu_begin(); |
| aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| nbytes & AES_BLOCK_MASK); |
| + kernel_fpu_end(); |
| nbytes &= AES_BLOCK_SIZE - 1; |
| err = skcipher_walk_done(&walk, nbytes); |
| } |
| - kernel_fpu_end(); |
| |
| return err; |
| } |
| @@ -418,14 +418,14 @@ static int cbc_encrypt(struct skcipher_r |
| |
| err = skcipher_walk_virt(&walk, req, true); |
| |
| - kernel_fpu_begin(); |
| while ((nbytes = walk.nbytes)) { |
| + kernel_fpu_begin(); |
| aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| nbytes & AES_BLOCK_MASK, walk.iv); |
| + kernel_fpu_end(); |
| nbytes &= AES_BLOCK_SIZE - 1; |
| err = skcipher_walk_done(&walk, nbytes); |
| } |
| - kernel_fpu_end(); |
| |
| return err; |
| } |
| @@ -440,14 +440,14 @@ static int cbc_decrypt(struct skcipher_r |
| |
| err = skcipher_walk_virt(&walk, req, true); |
| |
| - kernel_fpu_begin(); |
| while ((nbytes = walk.nbytes)) { |
| + kernel_fpu_begin(); |
| aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| nbytes & AES_BLOCK_MASK, walk.iv); |
| + kernel_fpu_end(); |
| nbytes &= AES_BLOCK_SIZE - 1; |
| err = skcipher_walk_done(&walk, nbytes); |
| } |
| - kernel_fpu_end(); |
| |
| return err; |
| } |
| @@ -497,18 +497,20 @@ static int ctr_crypt(struct skcipher_req |
| |
| err = skcipher_walk_virt(&walk, req, true); |
| |
| - kernel_fpu_begin(); |
| while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { |
| + kernel_fpu_begin(); |
| aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| nbytes & AES_BLOCK_MASK, walk.iv); |
| + kernel_fpu_end(); |
| nbytes &= AES_BLOCK_SIZE - 1; |
| err = skcipher_walk_done(&walk, nbytes); |
| } |
| if (walk.nbytes) { |
| + kernel_fpu_begin(); |
| ctr_crypt_final(ctx, &walk); |
| + kernel_fpu_end(); |
| err = skcipher_walk_done(&walk, 0); |
| } |
| - kernel_fpu_end(); |
| |
| return err; |
| } |