| From 0bf3291da54be5f9817331408d3b6f14440a36cf Mon Sep 17 00:00:00 2001 |
| From: Gilad Ben-Yossef <gilad@benyossef.com> |
| Date: Wed, 29 Jan 2020 16:37:54 +0200 |
| Subject: [PATCH] crypto: ccree - protect against empty or NULL scatterlists |
| |
| commit ce0fc6db38decf0d2919bfe783de6d6b76e421a9 upstream. |
| |
| Deal gracefully with a NULL or empty scatterlist which can happen |
| if both cryptlen and assoclen are zero and we're doing in-place |
| AEAD encryption. |
| |
| This fixes a crash when this causes us to try and map a NULL page, |
| at least with some platforms / DMA mapping configs. |
| |
| Cc: stable@vger.kernel.org # v4.19+ |
| Reported-by: Geert Uytterhoeven <geert+renesas@glider.be> |
| Tested-by: Geert Uytterhoeven <geert+renesas@glider.be> |
| Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> |
| Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c |
| index c81ad33f9115..29eb96499966 100644 |
| --- a/drivers/crypto/ccree/cc_buffer_mgr.c |
| +++ b/drivers/crypto/ccree/cc_buffer_mgr.c |
| @@ -87,6 +87,8 @@ static unsigned int cc_get_sgl_nents(struct device *dev, |
| { |
| unsigned int nents = 0; |
| |
| + *lbytes = 0; |
| + |
| while (nbytes && sg_list) { |
| nents++; |
| /* get the number of bytes in the last entry */ |
| @@ -95,6 +97,7 @@ static unsigned int cc_get_sgl_nents(struct device *dev, |
| nbytes : sg_list->length; |
| sg_list = sg_next(sg_list); |
| } |
| + |
| dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); |
| return nents; |
| } |
| @@ -311,37 +314,25 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, |
| unsigned int nbytes, int direction, u32 *nents, |
| u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) |
| { |
| - if (sg_is_last(sg)) { |
| - /* One entry only case -set to DLLI */ |
| - if (dma_map_sg(dev, sg, 1, direction) != 1) { |
| - dev_err(dev, "dma_map_sg() single buffer failed\n"); |
| - return -ENOMEM; |
| - } |
| - dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", |
| - &sg_dma_address(sg), sg_page(sg), sg_virt(sg), |
| - sg->offset, sg->length); |
| - *lbytes = nbytes; |
| - *nents = 1; |
| - *mapped_nents = 1; |
| - } else { /*sg_is_last*/ |
| - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); |
| - if (*nents > max_sg_nents) { |
| - *nents = 0; |
| - dev_err(dev, "Too many fragments. current %d max %d\n", |
| - *nents, max_sg_nents); |
| - return -ENOMEM; |
| - } |
| - /* In case of mmu the number of mapped nents might |
| - * be changed from the original sgl nents |
| - */ |
| - *mapped_nents = dma_map_sg(dev, sg, *nents, direction); |
| - if (*mapped_nents == 0) { |
| - *nents = 0; |
| - dev_err(dev, "dma_map_sg() sg buffer failed\n"); |
| - return -ENOMEM; |
| - } |
| + int ret = 0; |
| + |
| + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); |
| + if (*nents > max_sg_nents) { |
| + *nents = 0; |
| + dev_err(dev, "Too many fragments. current %d max %d\n", |
| + *nents, max_sg_nents); |
| + return -ENOMEM; |
| } |
| |
| + ret = dma_map_sg(dev, sg, *nents, direction); |
| + if (dma_mapping_error(dev, ret)) { |
| + *nents = 0; |
| + dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); |
| + return -ENOMEM; |
| + } |
| + |
| + *mapped_nents = ret; |
| + |
| return 0; |
| } |
| |
| @@ -576,11 +567,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) |
| sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, |
| areq_ctx->assoclen, req->cryptlen); |
| |
| - dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); |
| + dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, |
| + DMA_BIDIRECTIONAL); |
| if (req->src != req->dst) { |
| dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", |
| sg_virt(req->dst)); |
| - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), |
| + dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, |
| DMA_BIDIRECTIONAL); |
| } |
| if (drvdata->coherent && |
| @@ -902,7 +894,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, |
| &src_last_bytes); |
| sg_index = areq_ctx->src_sgl->length; |
| //check where the data starts |
| - while (sg_index <= size_to_skip) { |
| + while (src_mapped_nents && (sg_index <= size_to_skip)) { |
| src_mapped_nents--; |
| offset -= areq_ctx->src_sgl->length; |
| sgl = sg_next(areq_ctx->src_sgl); |
| @@ -929,7 +921,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, |
| size_for_map += crypto_aead_ivsize(tfm); |
| |
| rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, |
| - &areq_ctx->dst.nents, |
| + &areq_ctx->dst.mapped_nents, |
| LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, |
| &dst_mapped_nents); |
| if (rc) |
| @@ -942,7 +934,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, |
| offset = size_to_skip; |
| |
| //check where the data starts |
| - while (sg_index <= size_to_skip) { |
| + while (dst_mapped_nents && sg_index <= size_to_skip) { |
| dst_mapped_nents--; |
| offset -= areq_ctx->dst_sgl->length; |
| sgl = sg_next(areq_ctx->dst_sgl); |
| @@ -1144,7 +1136,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) |
| if (is_gcm4543) |
| size_to_map += crypto_aead_ivsize(tfm); |
| rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, |
| - &areq_ctx->src.nents, |
| + &areq_ctx->src.mapped_nents, |
| (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + |
| LLI_MAX_NUM_OF_DATA_ENTRIES), |
| &dummy, &mapped_nents); |
| diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h |
| index a726016bdbc1..846b262a9029 100644 |
| --- a/drivers/crypto/ccree/cc_buffer_mgr.h |
| +++ b/drivers/crypto/ccree/cc_buffer_mgr.h |
| @@ -25,6 +25,7 @@ enum cc_sg_cpy_direct { |
| |
| struct cc_mlli { |
| cc_sram_addr_t sram_addr; |
| + unsigned int mapped_nents; |
| unsigned int nents; //sg nents |
| unsigned int mlli_nents; //mlli nents might be different than the above |
| }; |
| -- |
| 2.7.4 |
| |