struct dm_dev *metadata_dev;
struct dm_dev *origin_dev;
- uint32_t block_size;
dm_block_t nr_blocks;
- unsigned sectors_per_block_shift;
+ uint32_t sectors_per_block;
+ int sectors_per_block_shift;
struct era_metadata *md;
struct workqueue_struct *wq;
/*----------------------------------------------------------------
* Remapping.
*---------------------------------------------------------------*/
+static bool block_size_is_power_of_two(struct era *era)
+{
+ return era->sectors_per_block_shift >= 0;
+}
+
static dm_block_t get_block(struct era *era, struct bio *bio)
{
- return bio->bi_iter.bi_sector >> era->sectors_per_block_shift;
+ sector_t block_nr = bio->bi_iter.bi_sector;
+
+ if (!block_size_is_power_of_two(era))
+ (void) sector_div(block_nr, era->sectors_per_block);
+ else
+ block_nr >>= era->sectors_per_block_shift;
+
+ return block_nr;
}
static void remap_to_origin(struct era *era, struct bio *bio)
static dm_block_t calc_nr_blocks(struct era *era)
{
- return dm_sector_div_up(era->ti->len, era->block_size);
+ return dm_sector_div_up(era->ti->len, era->sectors_per_block);
}
/*
return -EINVAL;
}
- r = sscanf(argv[2], "%u%c", &era->block_size, &dummy);
+ r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy);
if (r != 1) {
ti->error = "Error parsing block size";
era_destroy(era);
return -EINVAL;
}
- era->sectors_per_block_shift = __ffs(era->block_size);
- r = dm_set_target_max_io_len(ti, era->block_size);
+ r = dm_set_target_max_io_len(ti, era->sectors_per_block);
if (r) {
ti->error = "could not set max io len";
era_destroy(era);
return -EINVAL;
}
- md = metadata_open(era->metadata_dev->bdev, era->block_size, true);
+ if (era->sectors_per_block & (era->sectors_per_block - 1))
+ era->sectors_per_block_shift = -1;
+ else
+ era->sectors_per_block_shift = __ffs(era->sectors_per_block);
+
+ md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true);
if (IS_ERR(md)) {
ti->error = "Error reading metadata";
era_destroy(era);
format_dev_t(buf, era->metadata_dev->bdev->bd_dev);
DMEMIT("%s ", buf);
format_dev_t(buf, era->origin_dev->bdev->bd_dev);
- DMEMIT("%s %u", buf, era->block_size);
+ DMEMIT("%s %u", buf, era->sectors_per_block);
break;
}
/*
* If the system-determined stacked limits are compatible with the
- * cache's blocksize (io_opt is a factor) do not override them.
+ * era device's blocksize (io_opt is a factor) do not override them.
*/
- if (io_opt_sectors < era->block_size ||
- do_div(io_opt_sectors, era->block_size)) {
+ if (io_opt_sectors < era->sectors_per_block ||
+ do_div(io_opt_sectors, era->sectors_per_block)) {
blk_limits_io_min(limits, 0);
- blk_limits_io_opt(limits, era->block_size << SECTOR_SHIFT);
+ blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT);
}
}