| // SPDX-License-Identifier: GPL-2.0 | 
 | /* | 
 |  * Functions related to generic helpers functions | 
 |  */ | 
 | #include <linux/kernel.h> | 
 | #include <linux/module.h> | 
 | #include <linux/bio.h> | 
 | #include <linux/blkdev.h> | 
 | #include <linux/scatterlist.h> | 
 |  | 
 | #include "blk.h" | 
 |  | 
 | static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, | 
 | 		gfp_t gfp) | 
 | { | 
 | 	struct bio *new = bio_alloc(gfp, nr_pages); | 
 |  | 
 | 	if (bio) { | 
 | 		bio_chain(bio, new); | 
 | 		submit_bio(bio); | 
 | 	} | 
 |  | 
 | 	return new; | 
 | } | 
 |  | 
 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 
 | 		sector_t nr_sects, gfp_t gfp_mask, int flags, | 
 | 		struct bio **biop) | 
 | { | 
 | 	struct request_queue *q = bdev_get_queue(bdev); | 
 | 	struct bio *bio = *biop; | 
 | 	unsigned int granularity; | 
 | 	unsigned int op; | 
 | 	int alignment; | 
 | 	sector_t bs_mask; | 
 |  | 
 | 	if (!q) | 
 | 		return -ENXIO; | 
 |  | 
 | 	if (bdev_read_only(bdev)) | 
 | 		return -EPERM; | 
 |  | 
 | 	if (flags & BLKDEV_DISCARD_SECURE) { | 
 | 		if (!blk_queue_secure_erase(q)) | 
 | 			return -EOPNOTSUPP; | 
 | 		op = REQ_OP_SECURE_ERASE; | 
 | 	} else { | 
 | 		if (!blk_queue_discard(q)) | 
 | 			return -EOPNOTSUPP; | 
 | 		op = REQ_OP_DISCARD; | 
 | 	} | 
 |  | 
 | 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | 
 | 	if ((sector | nr_sects) & bs_mask) | 
 | 		return -EINVAL; | 
 |  | 
 | 	/* Zero-sector (unknown) and one-sector granularities are the same.  */ | 
 | 	granularity = max(q->limits.discard_granularity >> 9, 1U); | 
 | 	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; | 
 |  | 
 | 	while (nr_sects) { | 
 | 		unsigned int req_sects; | 
 | 		sector_t end_sect, tmp; | 
 |  | 
 | 		/* | 
 | 		 * Issue in chunks of the user defined max discard setting, | 
 | 		 * ensuring that bi_size doesn't overflow | 
 | 		 */ | 
 | 		req_sects = min_t(sector_t, nr_sects, | 
 | 					q->limits.max_discard_sectors); | 
 | 		if (!req_sects) | 
 | 			goto fail; | 
 | 		if (req_sects > UINT_MAX >> 9) | 
 | 			req_sects = UINT_MAX >> 9; | 
 |  | 
 | 		/* | 
 | 		 * If splitting a request, and the next starting sector would be | 
 | 		 * misaligned, stop the discard at the previous aligned sector. | 
 | 		 */ | 
 | 		end_sect = sector + req_sects; | 
 | 		tmp = end_sect; | 
 | 		if (req_sects < nr_sects && | 
 | 		    sector_div(tmp, granularity) != alignment) { | 
 | 			end_sect = end_sect - alignment; | 
 | 			sector_div(end_sect, granularity); | 
 | 			end_sect = end_sect * granularity + alignment; | 
 | 			req_sects = end_sect - sector; | 
 | 		} | 
 |  | 
 | 		bio = next_bio(bio, 0, gfp_mask); | 
 | 		bio->bi_iter.bi_sector = sector; | 
 | 		bio_set_dev(bio, bdev); | 
 | 		bio_set_op_attrs(bio, op, 0); | 
 |  | 
 | 		bio->bi_iter.bi_size = req_sects << 9; | 
 | 		nr_sects -= req_sects; | 
 | 		sector = end_sect; | 
 |  | 
 | 		/* | 
 | 		 * We can loop for a long time in here, if someone does | 
 | 		 * full device discards (like mkfs). Be nice and allow | 
 | 		 * us to schedule out to avoid softlocking if preempt | 
 | 		 * is disabled. | 
 | 		 */ | 
 | 		cond_resched(); | 
 | 	} | 
 |  | 
 | 	*biop = bio; | 
 | 	return 0; | 
 |  | 
 | fail: | 
 | 	if (bio) { | 
 | 		submit_bio_wait(bio); | 
 | 		bio_put(bio); | 
 | 	} | 
 | 	*biop = NULL; | 
 | 	return -EOPNOTSUPP; | 
 | } | 
 | EXPORT_SYMBOL(__blkdev_issue_discard); | 
 |  | 
 | /** | 
 |  * blkdev_issue_discard - queue a discard | 
 |  * @bdev:	blockdev to issue discard for | 
 |  * @sector:	start sector | 
 |  * @nr_sects:	number of sectors to discard | 
 |  * @gfp_mask:	memory allocation flags (for bio_alloc) | 
 |  * @flags:	BLKDEV_DISCARD_* flags to control behaviour | 
 |  * | 
 |  * Description: | 
 |  *    Issue a discard request for the sectors in question. | 
 |  */ | 
 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 
 | 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | 
 | { | 
 | 	struct bio *bio = NULL; | 
 | 	struct blk_plug plug; | 
 | 	int ret; | 
 |  | 
 | 	blk_start_plug(&plug); | 
 | 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, | 
 | 			&bio); | 
 | 	if (!ret && bio) { | 
 | 		ret = submit_bio_wait(bio); | 
 | 		if (ret == -EOPNOTSUPP) | 
 | 			ret = 0; | 
 | 		bio_put(bio); | 
 | 	} | 
 | 	blk_finish_plug(&plug); | 
 |  | 
 | 	return ret; | 
 | } | 
 | EXPORT_SYMBOL(blkdev_issue_discard); | 
 |  | 
 | /** | 
 |  * __blkdev_issue_write_same - generate number of bios with same page | 
 |  * @bdev:	target blockdev | 
 |  * @sector:	start sector | 
 |  * @nr_sects:	number of sectors to write | 
 |  * @gfp_mask:	memory allocation flags (for bio_alloc) | 
 |  * @page:	page containing data to write | 
 |  * @biop:	pointer to anchor bio | 
 |  * | 
 |  * Description: | 
 |  *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. | 
 |  */ | 
 | static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | 
 | 		sector_t nr_sects, gfp_t gfp_mask, struct page *page, | 
 | 		struct bio **biop) | 
 | { | 
 | 	struct request_queue *q = bdev_get_queue(bdev); | 
 | 	unsigned int max_write_same_sectors; | 
 | 	struct bio *bio = *biop; | 
 | 	sector_t bs_mask; | 
 |  | 
 | 	if (!q) | 
 | 		return -ENXIO; | 
 |  | 
 | 	if (bdev_read_only(bdev)) | 
 | 		return -EPERM; | 
 |  | 
 | 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | 
 | 	if ((sector | nr_sects) & bs_mask) | 
 | 		return -EINVAL; | 
 |  | 
 | 	if (!bdev_write_same(bdev)) | 
 | 		return -EOPNOTSUPP; | 
 |  | 
 | 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */ | 
 | 	max_write_same_sectors = UINT_MAX >> 9; | 
 |  | 
 | 	while (nr_sects) { | 
 | 		bio = next_bio(bio, 1, gfp_mask); | 
 | 		bio->bi_iter.bi_sector = sector; | 
 | 		bio_set_dev(bio, bdev); | 
 | 		bio->bi_vcnt = 1; | 
 | 		bio->bi_io_vec->bv_page = page; | 
 | 		bio->bi_io_vec->bv_offset = 0; | 
 | 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | 
 | 		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); | 
 |  | 
 | 		if (nr_sects > max_write_same_sectors) { | 
 | 			bio->bi_iter.bi_size = max_write_same_sectors << 9; | 
 | 			nr_sects -= max_write_same_sectors; | 
 | 			sector += max_write_same_sectors; | 
 | 		} else { | 
 | 			bio->bi_iter.bi_size = nr_sects << 9; | 
 | 			nr_sects = 0; | 
 | 		} | 
 | 		cond_resched(); | 
 | 	} | 
 |  | 
 | 	*biop = bio; | 
 | 	return 0; | 
 | } | 
 |  | 
 | /** | 
 |  * blkdev_issue_write_same - queue a write same operation | 
 |  * @bdev:	target blockdev | 
 |  * @sector:	start sector | 
 |  * @nr_sects:	number of sectors to write | 
 |  * @gfp_mask:	memory allocation flags (for bio_alloc) | 
 |  * @page:	page containing data | 
 |  * | 
 |  * Description: | 
 |  *    Issue a write same request for the sectors in question. | 
 |  */ | 
 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | 
 | 				sector_t nr_sects, gfp_t gfp_mask, | 
 | 				struct page *page) | 
 | { | 
 | 	struct bio *bio = NULL; | 
 | 	struct blk_plug plug; | 
 | 	int ret; | 
 |  | 
 | 	blk_start_plug(&plug); | 
 | 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, | 
 | 			&bio); | 
 | 	if (ret == 0 && bio) { | 
 | 		ret = submit_bio_wait(bio); | 
 | 		bio_put(bio); | 
 | 	} | 
 | 	blk_finish_plug(&plug); | 
 | 	return ret; | 
 | } | 
 | EXPORT_SYMBOL(blkdev_issue_write_same); | 
 |  | 
 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, | 
 | 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | 
 | 		struct bio **biop, unsigned flags) | 
 | { | 
 | 	struct bio *bio = *biop; | 
 | 	unsigned int max_write_zeroes_sectors; | 
 | 	struct request_queue *q = bdev_get_queue(bdev); | 
 |  | 
 | 	if (!q) | 
 | 		return -ENXIO; | 
 |  | 
 | 	if (bdev_read_only(bdev)) | 
 | 		return -EPERM; | 
 |  | 
 | 	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ | 
 | 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); | 
 |  | 
 | 	if (max_write_zeroes_sectors == 0) | 
 | 		return -EOPNOTSUPP; | 
 |  | 
 | 	while (nr_sects) { | 
 | 		bio = next_bio(bio, 0, gfp_mask); | 
 | 		bio->bi_iter.bi_sector = sector; | 
 | 		bio_set_dev(bio, bdev); | 
 | 		bio->bi_opf = REQ_OP_WRITE_ZEROES; | 
 | 		if (flags & BLKDEV_ZERO_NOUNMAP) | 
 | 			bio->bi_opf |= REQ_NOUNMAP; | 
 |  | 
 | 		if (nr_sects > max_write_zeroes_sectors) { | 
 | 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; | 
 | 			nr_sects -= max_write_zeroes_sectors; | 
 | 			sector += max_write_zeroes_sectors; | 
 | 		} else { | 
 | 			bio->bi_iter.bi_size = nr_sects << 9; | 
 | 			nr_sects = 0; | 
 | 		} | 
 | 		cond_resched(); | 
 | 	} | 
 |  | 
 | 	*biop = bio; | 
 | 	return 0; | 
 | } | 
 |  | 
 | /* | 
 |  * Convert a number of 512B sectors to a number of pages. | 
 |  * The result is limited to a number of pages that can fit into a BIO. | 
 |  * Also make sure that the result is always at least 1 (page) for the cases | 
 |  * where nr_sects is lower than the number of sectors in a page. | 
 |  */ | 
 | static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) | 
 | { | 
 | 	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); | 
 |  | 
 | 	return min(pages, (sector_t)BIO_MAX_PAGES); | 
 | } | 
 |  | 
 | static int __blkdev_issue_zero_pages(struct block_device *bdev, | 
 | 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | 
 | 		struct bio **biop) | 
 | { | 
 | 	struct request_queue *q = bdev_get_queue(bdev); | 
 | 	struct bio *bio = *biop; | 
 | 	int bi_size = 0; | 
 | 	unsigned int sz; | 
 |  | 
 | 	if (!q) | 
 | 		return -ENXIO; | 
 |  | 
 | 	if (bdev_read_only(bdev)) | 
 | 		return -EPERM; | 
 |  | 
 | 	while (nr_sects != 0) { | 
 | 		bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), | 
 | 			       gfp_mask); | 
 | 		bio->bi_iter.bi_sector = sector; | 
 | 		bio_set_dev(bio, bdev); | 
 | 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | 
 |  | 
 | 		while (nr_sects != 0) { | 
 | 			sz = min((sector_t) PAGE_SIZE, nr_sects << 9); | 
 | 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); | 
 | 			nr_sects -= bi_size >> 9; | 
 | 			sector += bi_size >> 9; | 
 | 			if (bi_size < sz) | 
 | 				break; | 
 | 		} | 
 | 		cond_resched(); | 
 | 	} | 
 |  | 
 | 	*biop = bio; | 
 | 	return 0; | 
 | } | 
 |  | 
 | /** | 
 |  * __blkdev_issue_zeroout - generate number of zero filed write bios | 
 |  * @bdev:	blockdev to issue | 
 |  * @sector:	start sector | 
 |  * @nr_sects:	number of sectors to write | 
 |  * @gfp_mask:	memory allocation flags (for bio_alloc) | 
 |  * @biop:	pointer to anchor bio | 
 |  * @flags:	controls detailed behavior | 
 |  * | 
 |  * Description: | 
 |  *  Zero-fill a block range, either using hardware offload or by explicitly | 
 |  *  writing zeroes to the device. | 
 |  * | 
 |  *  If a device is using logical block provisioning, the underlying space will | 
 |  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP. | 
 |  * | 
 |  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return | 
 |  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. | 
 |  */ | 
 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 
 | 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, | 
 | 		unsigned flags) | 
 | { | 
 | 	int ret; | 
 | 	sector_t bs_mask; | 
 |  | 
 | 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | 
 | 	if ((sector | nr_sects) & bs_mask) | 
 | 		return -EINVAL; | 
 |  | 
 | 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, | 
 | 			biop, flags); | 
 | 	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) | 
 | 		return ret; | 
 |  | 
 | 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, | 
 | 					 biop); | 
 | } | 
 | EXPORT_SYMBOL(__blkdev_issue_zeroout); | 
 |  | 
 | /** | 
 |  * blkdev_issue_zeroout - zero-fill a block range | 
 |  * @bdev:	blockdev to write | 
 |  * @sector:	start sector | 
 |  * @nr_sects:	number of sectors to write | 
 |  * @gfp_mask:	memory allocation flags (for bio_alloc) | 
 |  * @flags:	controls detailed behavior | 
 |  * | 
 |  * Description: | 
 |  *  Zero-fill a block range, either using hardware offload or by explicitly | 
 |  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the | 
 |  *  valid values for %flags. | 
 |  */ | 
 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 
 | 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags) | 
 | { | 
 | 	int ret = 0; | 
 | 	sector_t bs_mask; | 
 | 	struct bio *bio; | 
 | 	struct blk_plug plug; | 
 | 	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); | 
 |  | 
 | 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | 
 | 	if ((sector | nr_sects) & bs_mask) | 
 | 		return -EINVAL; | 
 |  | 
 | retry: | 
 | 	bio = NULL; | 
 | 	blk_start_plug(&plug); | 
 | 	if (try_write_zeroes) { | 
 | 		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, | 
 | 						  gfp_mask, &bio, flags); | 
 | 	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | 
 | 		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, | 
 | 						gfp_mask, &bio); | 
 | 	} else { | 
 | 		/* No zeroing offload support */ | 
 | 		ret = -EOPNOTSUPP; | 
 | 	} | 
 | 	if (ret == 0 && bio) { | 
 | 		ret = submit_bio_wait(bio); | 
 | 		bio_put(bio); | 
 | 	} | 
 | 	blk_finish_plug(&plug); | 
 | 	if (ret && try_write_zeroes) { | 
 | 		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | 
 | 			try_write_zeroes = false; | 
 | 			goto retry; | 
 | 		} | 
 | 		if (!bdev_write_zeroes_sectors(bdev)) { | 
 | 			/* | 
 | 			 * Zeroing offload support was indicated, but the | 
 | 			 * device reported ILLEGAL REQUEST (for some devices | 
 | 			 * there is no non-destructive way to verify whether | 
 | 			 * WRITE ZEROES is actually supported). | 
 | 			 */ | 
 | 			ret = -EOPNOTSUPP; | 
 | 		} | 
 | 	} | 
 |  | 
 | 	return ret; | 
 | } | 
 | EXPORT_SYMBOL(blkdev_issue_zeroout); |