struct bio_list deferred_bio_list;
struct bio_list retry_on_resume_list;
struct rb_root sort_bio_list; /* sorted list of deferred bios */
+ struct mutex deferred_bio_mutex;
};
/*----------------------------------------------------------------*/
return r;
}
*result = data_block;
+ DMERR("allocated data_block for virt_block=%llu blocks_this_allocation=%llu",
+ bio_hints->bio_virt_block, bio_hints->pbd->blocks_this_allocation);
/*
* Now pre-allocate the rest of the blocks needed for this extent.
r = commit_prepared_block(tc, virt_block, data_block);
if (r)
return r;
+
+ if (virt_block >= 1 && virt_block <= 10)
+ DMERR("preallocated data_block for virt_block=%llu", virt_block);
}
return 0;
{
int r;
dm_block_t free_blocks;
+ struct dm_thin_lookup_result lookup_result;
struct pool *pool = tc->pool;
if (get_pool_mode(pool) != PM_WRITE)
if (r)
return r;
- if (!bio_hints || bio_hints->pbd->blocks_this_allocation == 1) {
+ if (!bio_hints->pbd->blocks_this_allocation) {
+ /* Lookup preallocated block */
+ // FIXME: preallocated block should _never_ not be available...
+ r = dm_thin_find_block(tc->td, bio_hints->bio_virt_block, 1, &lookup_result);
+ if (!r)
+ *result = lookup_result.block;
+ if (r == -ENODATA)
+ DMERR("couldn't find preallocated data_block for virt_block=%llu",
+ bio_hints->bio_virt_block);
+ return r;
+ }
+
+ if (bio_hints->pbd->blocks_this_allocation == 1) {
r = dm_pool_alloc_data_block(pool->pmd, result);
if (r) {
metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
struct dm_cell_key key;
struct dm_thin_lookup_result lookup_result;
+ struct dm_thin_endio_hook *pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+
/*
* If cell is already occupied, then the block is already
* being provisioned so we have nothing further to do here.
*/
build_virtual_key(tc->td, block, &key);
- if (bio_detain(pool, &key, bio, &cell))
+ if (bio_detain(pool, &key, bio, &cell)) {
+ if (pbd->blocks_this_allocation > 1)
+ DMERR("extent head bio for virt_block=%llu was already detained?", block);
+ else
+ DMERR("bio_sector=%llu for virt_block=%llu was detained",
+ bio->bi_iter.bi_sector, block);
return;
+ }
+
+ if (pbd->blocks_this_allocation > 1)
+ DMERR("processing extent head bio_sector=%llu for virt_block=%llu",
+ bio->bi_iter.bi_sector, block);
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
switch (r) {
case 0:
+ // FIXME: why are the same bios on the deferred_list?
+ if (pbd->blocks_this_allocation > 1)
+ DMERR("extent head bio for virt_block=%llu already allocated", block);
+
if (lookup_result.shared) {
process_shared_bio(tc, bio, block, &lookup_result);
cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
break;
case -ENODATA:
+ //if (pbd->blocks_this_allocation > 1)
+ // DMERR("-ENODATA for extent head bio for virt_block=%llu", block);
if (bio_data_dir(bio) == READ && tc->origin_dev) {
inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
remap_to_origin_and_issue(tc, bio);
- } else
+ } else {
+ if (pbd->blocks_this_allocation > 1)
+ DMERR("allocating extent head bio_sector=%llu for virt_block=%llu",
+ bio->bi_iter.bi_sector, block);
+ else
+ DMERR("allocating bio_sector=%llu for virt_block=%llu",
+ bio->bi_iter.bi_sector, block);
provision_block(tc, bio, block, cell);
+ }
break;
default:
struct bio *bio, *first_bio = NULL, *last_bio;
sector_t virt_block_offset;
+ DMERR("raw extent first=%llu last=%llu size=%u",
+ get_bio_block(tc, bio_list_peek(bios)), get_bio_block(tc, bios->tail), bio_list_size(bios));
+
/*
* Only allocate blocks for this extent on behalf of
* the first block aligned bio.
if (first_bio) {
last_bio = bios->tail;
+ DMERR(" extent first=%llu last=%llu size=%u",
+ get_bio_block(tc, first_bio), get_bio_block(tc, last_bio), bio_list_size(bios));
+
/*
* When this extent's first bio is allocated it will also
* prealloc the blocks needed for the rest of the extent.
pbd = dm_per_bio_data(first_bio, sizeof(struct dm_thin_endio_hook));
pbd->blocks_this_allocation =
get_bio_block(tc, last_bio) - get_bio_block(tc, first_bio) + 1;
+
+ if (pbd->blocks_this_allocation == 2)
+ bio_list_for_each(bio, bios) {
+ DMERR("extent bio_sector=%llu block=%llu",
+ bio->bi_iter.bi_sector, get_bio_block(tc, bio));
+ }
}
/* merge all bios onto deferred_bio_list */
struct bio_list bios;
struct blk_plug plug;
+ // FIXME: is it possible that we're running this method concurrently!?
+ mutex_lock(&tc->deferred_bio_mutex);
+
+ //DMERR("------------------------------------------------------");
+
if (tc->requeue_mode) {
requeue_bio_list(tc, &tc->deferred_bio_list);
+ mutex_unlock(&tc->deferred_bio_mutex);
return;
}
if (bio_list_empty(&tc->deferred_bio_list)) {
spin_unlock_irqrestore(&tc->lock, flags);
+ mutex_unlock(&tc->deferred_bio_mutex);
return;
}
+ // FIXME: discards are interleaved with regular bios!.. damnit
+
/*
* FIXME: allow sorting to be enabled/disabled via ctr and/or
* message (and auto-disable if data device is non-rotational?)
if (bio->bi_rw & REQ_DISCARD)
pool->process_discard(tc, bio);
- else
+ else {
+ DMERR("bio_sector=%llu for virt_block=%llu",
+ bio->bi_iter.bi_sector, get_bio_block(tc, bio));
+
pool->process_bio(tc, bio);
+ }
}
blk_finish_plug(&plug);
+
+ DMERR("------------------------------------------------------");
+
+ mutex_unlock(&tc->deferred_bio_mutex);
}
static void process_deferred_bios(struct pool *pool)
goto out_unlock;
}
spin_lock_init(&tc->lock);
+ mutex_init(&tc->deferred_bio_mutex);
bio_list_init(&tc->deferred_bio_list);
bio_list_init(&tc->retry_on_resume_list);
tc->sort_bio_list = RB_ROOT;