mempool_free(m, m->tc->pool->mapping_pool);
}
+static int commit_prepared_block(struct thin_c *tc,
+ dm_block_t virt_block, dm_block_t data_block)
+{
+ int r;
+
+ /*
+ * Commit the prepared block into the mapping btree.
+ * Any I/O for this block arriving after this point will get
+ * remapped to it directly.
+ */
+ r = dm_thin_insert_block(tc->td, virt_block, data_block);
+ if (r)
+ metadata_operation_failed(tc->pool, "dm_thin_insert_block", r);
+
+ return r;
+}
+
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
goto out;
}
- /*
- * Commit the prepared block into the mapping btree.
- * Any I/O for this block arriving after this point will get
- * remapped to it directly.
- */
- r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
+ r = commit_prepared_block(tc, m->virt_block, m->data_block);
if (r) {
- metadata_operation_failed(pool, "dm_thin_insert_block", r);
cell_error(pool, m->cell);
goto out;
}
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
-static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
+struct thin_bio_hints {
+ dm_block_t bio_virt_block;
+ struct dm_thin_endio_hook *pbd; /* per-bio-date */
+};
+
+static int __alloc_data_blocks(struct thin_c *tc, struct thin_bio_hints *bio_hints,
+ dm_block_t *result)
{
int r;
- dm_block_t free_blocks;
+ dm_block_t last_virt_block, virt_block, data_block;
struct pool *pool = tc->pool;
+ /*
+ * Allocate first block of extent, which is already locked in a cell.
+ * process_prepared_mapping() saves data_block to mapping btree.
+ */
+ r = dm_pool_alloc_data_block(pool->pmd, &data_block);
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+ return r;
+ }
+ *result = data_block;
+
+ /*
+ * Now pre-allocate the rest of the blocks needed for this extent.
+ *
+ * There is no need to lock these bio-less virt_blocks in a cell because we
+ * _know_ they will be because they are next on the sorted deferred_bio_list
+ */
+ last_virt_block = bio_hints->bio_virt_block + bio_hints->pbd->blocks_this_allocation;
+ for (virt_block = bio_hints->bio_virt_block + 1; virt_block < last_virt_block; virt_block++) {
+ r = dm_pool_alloc_data_block(pool->pmd, &data_block);
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+ return r;
+ }
+
+ /* Must commit preallocated data_block to thin dev's mapping btree */
+ r = commit_prepared_block(tc, virt_block, data_block);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int check_free_blocks(struct pool *pool, dm_block_t *result)
+{
+ int r;
+ dm_block_t free_blocks;
+
if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
return -EINVAL;
}
}
- r = dm_pool_alloc_data_block(pool->pmd, result);
- if (r) {
- metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+ *result = free_blocks;
+ return 0;
+}
+
+/*
+ * Allocate one or more data blocks. If @bio_hints are not NULL the caller
+ * allows multiple blocks to be allocated (governed by blocks_this_allocation).
+ */
+static int alloc_data_block(struct thin_c *tc, struct thin_bio_hints *bio_hints,
+ dm_block_t *result)
+{
+ int r;
+ dm_block_t free_blocks;
+ struct pool *pool = tc->pool;
+
+ if (get_pool_mode(pool) != PM_WRITE)
+ return -EINVAL;
+
+ r = check_free_blocks(pool, &free_blocks);
+ if (r)
return r;
+
+ if (!bio_hints || bio_hints->pbd->blocks_this_allocation == 1) {
+ r = dm_pool_alloc_data_block(pool->pmd, result);
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+ return r;
+ }
+ return 0;
}
+ if (free_blocks >= bio_hints->pbd->blocks_this_allocation)
+ return __alloc_data_blocks(tc, bio_hints, result);
+
return 0;
}
{
int r;
dm_block_t data_block;
+ struct thin_bio_hints bio_hints;
struct pool *pool = tc->pool;
- r = alloc_data_block(tc, &data_block);
+ bio_hints.bio_virt_block = block;
+ bio_hints.pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+ r = alloc_data_block(tc, &bio_hints, &data_block);
switch (r) {
case 0:
schedule_internal_copy(tc, block, lookup_result->block,
{
int r;
dm_block_t data_block;
+ struct thin_bio_hints bio_hints;
struct pool *pool = tc->pool;
/*
return;
}
- r = alloc_data_block(tc, &data_block);
+ bio_hints.bio_virt_block = block;
+ bio_hints.pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+ r = alloc_data_block(tc, &bio_hints, &data_block);
switch (r) {
case 0:
if (tc->origin_dev)