dm thin: lots of debugging.. FML thin-dev
authorMike Snitzer <snitzer@redhat.com>
Fri, 28 Mar 2014 06:35:06 +0000 (02:35 -0400)
committerMike Snitzer <snitzer@redhat.com>
Fri, 28 Mar 2014 06:35:06 +0000 (02:35 -0400)
drivers/md/dm-thin.c

index 05ab9bf6b4fe8b9746a91cbc49652c140e469b21..eb808a0af06c9a24e1d64d4c504ee7f5ba91dd25 100644 (file)
@@ -232,6 +232,7 @@ struct thin_c {
        struct bio_list deferred_bio_list;
        struct bio_list retry_on_resume_list;
        struct rb_root sort_bio_list; /* sorted list of deferred bios */
+       struct mutex deferred_bio_mutex;
 };
 
 /*----------------------------------------------------------------*/
@@ -1000,6 +1001,8 @@ static int __alloc_data_blocks(struct thin_c *tc, struct thin_bio_hints *bio_hin
                return r;
        }
        *result = data_block;
+       DMERR("allocated data_block for virt_block=%llu blocks_this_allocation=%llu",
+             bio_hints->bio_virt_block, bio_hints->pbd->blocks_this_allocation);
 
        /*
         * Now pre-allocate the rest of the blocks needed for this extent.
@@ -1019,6 +1022,9 @@ static int __alloc_data_blocks(struct thin_c *tc, struct thin_bio_hints *bio_hin
                r = commit_prepared_block(tc, virt_block, data_block);
                if (r)
                        return r;
+
+               if (virt_block >= 1 && virt_block <= 10)
+                       DMERR("preallocated data_block for virt_block=%llu", virt_block);
        }
 
        return 0;
@@ -1074,6 +1080,7 @@ static int alloc_data_block(struct thin_c *tc, struct thin_bio_hints *bio_hints,
 {
        int r;
        dm_block_t free_blocks;
+       struct dm_thin_lookup_result lookup_result;
        struct pool *pool = tc->pool;
 
        if (get_pool_mode(pool) != PM_WRITE)
@@ -1083,7 +1090,19 @@ static int alloc_data_block(struct thin_c *tc, struct thin_bio_hints *bio_hints,
        if (r)
                return r;
 
-       if (!bio_hints || bio_hints->pbd->blocks_this_allocation == 1) {
+       if (!bio_hints->pbd->blocks_this_allocation) {
+               /* Lookup preallocated block */
+               // FIXME: preallocated block should _never_ not be available...
+               r = dm_thin_find_block(tc->td, bio_hints->bio_virt_block, 1, &lookup_result);
+               if (!r)
+                       *result = lookup_result.block;
+               if (r == -ENODATA)
+                       DMERR("couldn't find preallocated data_block for virt_block=%llu",
+                             bio_hints->bio_virt_block);
+               return r;
+       }
+
+       if (bio_hints->pbd->blocks_this_allocation == 1) {
                r = dm_pool_alloc_data_block(pool->pmd, result);
                if (r) {
                        metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
@@ -1370,17 +1389,33 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
        struct dm_cell_key key;
        struct dm_thin_lookup_result lookup_result;
 
+       struct dm_thin_endio_hook *pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+
        /*
         * If cell is already occupied, then the block is already
         * being provisioned so we have nothing further to do here.
         */
        build_virtual_key(tc->td, block, &key);
-       if (bio_detain(pool, &key, bio, &cell))
+       if (bio_detain(pool, &key, bio, &cell)) {
+               if (pbd->blocks_this_allocation > 1)
+                       DMERR("extent head bio for virt_block=%llu was already detained?", block);
+               else
+                       DMERR("bio_sector=%llu for virt_block=%llu was detained",
+                             bio->bi_iter.bi_sector, block);
                return;
+       }
+
+       if (pbd->blocks_this_allocation > 1)
+               DMERR("processing extent head bio_sector=%llu for virt_block=%llu",
+                     bio->bi_iter.bi_sector, block);
 
        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
        switch (r) {
        case 0:
+               // FIXME: why are the same bios on the deferred_list?
+               if (pbd->blocks_this_allocation > 1)
+                       DMERR("extent head bio for virt_block=%llu already allocated", block);
+
                if (lookup_result.shared) {
                        process_shared_bio(tc, bio, block, &lookup_result);
                        cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
@@ -1393,13 +1428,22 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
                break;
 
        case -ENODATA:
+               //if (pbd->blocks_this_allocation > 1)
+               //      DMERR("-ENODATA for extent head bio for virt_block=%llu", block);
                if (bio_data_dir(bio) == READ && tc->origin_dev) {
                        inc_all_io_entry(pool, bio);
                        cell_defer_no_holder(tc, cell);
 
                        remap_to_origin_and_issue(tc, bio);
-               } else
+               } else {
+                       if (pbd->blocks_this_allocation > 1)
+                               DMERR("allocating extent head bio_sector=%llu for virt_block=%llu",
+                                     bio->bi_iter.bi_sector, block);
+                       else
+                               DMERR("allocating bio_sector=%llu for virt_block=%llu",
+                                     bio->bi_iter.bi_sector, block);
                        provision_block(tc, bio, block, cell);
+               }
                break;
 
        default:
@@ -1507,6 +1551,9 @@ static void __record_extent(struct thin_c *tc, struct bio_list *bios)
        struct bio *bio, *first_bio = NULL, *last_bio;
        sector_t virt_block_offset;
 
+       DMERR("raw extent first=%llu last=%llu size=%u",
+             get_bio_block(tc, bio_list_peek(bios)), get_bio_block(tc, bios->tail), bio_list_size(bios));
+
        /*
         * Only allocate blocks for this extent on behalf of
         * the first block aligned bio.
@@ -1529,6 +1576,9 @@ static void __record_extent(struct thin_c *tc, struct bio_list *bios)
        if (first_bio) {
                last_bio = bios->tail;
 
+               DMERR("    extent first=%llu last=%llu size=%u",
+                     get_bio_block(tc, first_bio), get_bio_block(tc, last_bio), bio_list_size(bios));
+
                /*
                 * When this extent's first bio is allocated it will also
                 * prealloc the blocks needed for the rest of the extent.
@@ -1536,6 +1586,12 @@ static void __record_extent(struct thin_c *tc, struct bio_list *bios)
                pbd = dm_per_bio_data(first_bio, sizeof(struct dm_thin_endio_hook));
                pbd->blocks_this_allocation =
                        get_bio_block(tc, last_bio) - get_bio_block(tc, first_bio) + 1;
+
+               if (pbd->blocks_this_allocation == 2)
+                       bio_list_for_each(bio, bios) {
+                               DMERR("extent bio_sector=%llu block=%llu",
+                                     bio->bi_iter.bi_sector, get_bio_block(tc, bio));
+                       }
        }
 
        /* merge all bios onto deferred_bio_list */
@@ -1651,8 +1707,14 @@ static void process_thin_deferred_bios(struct thin_c *tc)
        struct bio_list bios;
        struct blk_plug plug;
 
+       // FIXME: is it possible that we're running this method concurrently!?
+       mutex_lock(&tc->deferred_bio_mutex);
+
+       //DMERR("------------------------------------------------------");
+
        if (tc->requeue_mode) {
                requeue_bio_list(tc, &tc->deferred_bio_list);
+               mutex_unlock(&tc->deferred_bio_mutex);
                return;
        }
 
@@ -1662,9 +1724,12 @@ static void process_thin_deferred_bios(struct thin_c *tc)
 
        if (bio_list_empty(&tc->deferred_bio_list)) {
                spin_unlock_irqrestore(&tc->lock, flags);
+               mutex_unlock(&tc->deferred_bio_mutex);
                return;
        }
 
+       // FIXME: discards are interleaved with regular bios!.. damnit
+
        /*
         * FIXME: allow sorting to be enabled/disabled via ctr and/or
         * message (and auto-disable if data device is non-rotational?)
@@ -1693,10 +1758,18 @@ static void process_thin_deferred_bios(struct thin_c *tc)
 
                if (bio->bi_rw & REQ_DISCARD)
                        pool->process_discard(tc, bio);
-               else
+               else {
+                       DMERR("bio_sector=%llu for virt_block=%llu",
+                             bio->bi_iter.bi_sector, get_bio_block(tc, bio));
+
                        pool->process_bio(tc, bio);
+               }
        }
        blk_finish_plug(&plug);
+
+       DMERR("------------------------------------------------------");
+
+       mutex_unlock(&tc->deferred_bio_mutex);
 }
 
 static void process_deferred_bios(struct pool *pool)
@@ -3331,6 +3404,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
                goto out_unlock;
        }
        spin_lock_init(&tc->lock);
+       mutex_init(&tc->deferred_bio_mutex);
        bio_list_init(&tc->deferred_bio_list);
        bio_list_init(&tc->retry_on_resume_list);
        tc->sort_bio_list = RB_ROOT;