struct dm_deferred_entry *all_io_entry;
struct dm_thin_new_mapping *overwrite_mapping;
struct rb_node rb_node;
+ dm_block_t blocks_this_allocation;
};
static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
return pool->sectors_per_block_shift >= 0;
}
-static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
+static dm_block_t __get_bio_block(struct thin_c *tc, struct bio *bio,
+ sector_t *block_offset)
{
struct pool *pool = tc->pool;
sector_t block_nr = bio->bi_iter.bi_sector;
+ sector_t offset;
- if (block_size_is_power_of_two(pool))
+ if (block_size_is_power_of_two(pool)) {
+ if (block_offset)
+ *block_offset = block_nr & (pool->sectors_per_block - 1);
block_nr >>= pool->sectors_per_block_shift;
- else
- (void) sector_div(block_nr, pool->sectors_per_block);
+ } else {
+ offset = sector_div(block_nr, pool->sectors_per_block);
+ if (block_offset)
+ *block_offset = offset;
+ }
return block_nr;
}
+static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
+{
+ return __get_bio_block(tc, bio, NULL);
+}
+
static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
struct pool *pool = tc->pool;
rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
}
+#if 1
+
+static void __record_extent(struct thin_c *tc, struct bio_list *bios)
+{
+ struct dm_thin_endio_hook *pbd;
+ struct bio *bio, *first_bio = NULL, *last_bio;
+ sector_t virt_block_offset;
+
+ /*
+ * Only allocate blocks for this extent on behalf of
+ * the first block aligned bio.
+ */
+ bio_list_for_each(bio, bios) {
+ /*
+ * Must find first block-aligned bio in the extent,
+ * if one doesn't exist don't process as extent!
+ */
+ if (!first_bio) {
+ (void) __get_bio_block(tc, bio, &virt_block_offset);
+ if (!virt_block_offset)
+ first_bio = bio;
+ continue;
+ }
+ pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+ pbd->blocks_this_allocation = 0;
+ }
+
+ if (first_bio) {
+ last_bio = bios->tail;
+
+ /*
+ * When this extent's first bio is allocated it will also
+ * prealloc the blocks needed for the rest of the extent.
+ */
+ pbd = dm_per_bio_data(first_bio, sizeof(struct dm_thin_endio_hook));
+ pbd->blocks_this_allocation =
+ get_bio_block(tc, last_bio) - get_bio_block(tc, first_bio) + 1;
+ }
+
+ /* merge all bios onto deferred_bio_list */
+ bio_list_merge(&tc->deferred_bio_list, bios);
+}
+
+static void __extract_sorted_bios(struct thin_c *tc)
+{
+ struct rb_node *node;
+ struct dm_thin_endio_hook *pbd;
+ struct bio *bio, *prev_bio;
+ struct bio_list bios;
+ bool extent_found;
+
+ bio_list_init(&bios);
+ extent_found = false;
+ prev_bio = NULL;
+ for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
+ pbd = thin_pbd(node);
+ bio = thin_bio(pbd);
+
+ /* Transfer each bio out of the sorted rb_tree */
+ rb_erase(&pbd->rb_node, &tc->sort_bio_list);
+
+ /* Identify logical extents within the bio list */
+ if (!prev_bio || (bio_end_sector(prev_bio) == bio->bi_iter.bi_sector)) {
+ /*
+ * This may be the first bio, which happens to be the start
+ * of an extent that we aren't aware of yet, so don't mark
+ * extent_found yet.
+ */
+ bio_list_add(&bios, bio);
+ if (prev_bio)
+ extent_found = true;
+ prev_bio = bio;
+ continue;
+ }
+
+ /* prev_bio and bio are not contiguous, but was an extent identified? */
+ if (extent_found)
+ __record_extent(tc, &bios);
+ else
+ bio_list_merge(&tc->deferred_bio_list, &bios);
+
+ bio_list_init(&bios);
+ bio_list_add(&bios, bio);
+ extent_found = false;
+ prev_bio = bio;
+ }
+
+ /*
+ * Account for possibility that there was only one bio or
+ * that the last bios were contiguous.
+ */
+ if (!bio_list_empty(&bios)) {
+ if (extent_found)
+ __record_extent(tc, &bios);
+ else
+ bio_list_merge(&tc->deferred_bio_list, &bios);
+ }
+
+ WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
+}
+
+#else
+
static void __extract_sorted_bios(struct thin_c *tc)
{
struct rb_node *node;
pbd = thin_pbd(node);
bio = thin_bio(pbd);
- bio_list_add(&tc->deferred_bio_list, bio);
+ /* Transfer each bio out of the sorted rb_tree */
rb_erase(&pbd->rb_node, &tc->sort_bio_list);
+ bio_list_add(&tc->deferred_bio_list, bio);
}
WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
}
+#endif
+
static void __sort_thin_deferred_bios(struct thin_c *tc)
{
struct bio *bio;
h->shared_read_entry = NULL;
h->all_io_entry = NULL;
h->overwrite_mapping = NULL;
+ h->blocks_this_allocation = 1;
}
/*