Skip to content
Snippets Groups Projects
check-integrity.c 101 KiB
Newer Older
  • Learn to ignore specific revisions
  • 		    (struct btrfs_leaf *)sf->hdr;
    
    		if (-1 == sf->i) {
    			sf->nr = le32_to_cpu(leafhdr->header.nritems);
    
    			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
    				printk(KERN_INFO
    				       "leaf %llu items %d generation %llu"
    				       " owner %llu\n",
    				       (unsigned long long)
    				       sf->block_ctx->start,
    				       sf->nr,
    				       (unsigned long long)
    				       le64_to_cpu(leafhdr->header.generation),
    				       (unsigned long long)
    				       le64_to_cpu(leafhdr->header.owner));
    		}
    
    continue_with_current_leaf_stack_frame:
    		if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
    			sf->i++;
    			sf->num_copies = 0;
    		}
    
    		if (sf->i < sf->nr) {
    
    			struct btrfs_item disk_item;
    			u32 disk_item_offset =
    				(uintptr_t)(leafhdr->items + sf->i) -
    				(uintptr_t)leafhdr;
    			struct btrfs_disk_key *disk_key;
    
    			if (disk_item_offset + sizeof(struct btrfs_item) >
    			    sf->block_ctx->len) {
    leaf_item_out_of_bounce_error:
    				printk(KERN_INFO
    				       "btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
    				       sf->block_ctx->start,
    				       sf->block_ctx->dev->name);
    				goto one_stack_frame_backwards;
    			}
    			btrfsic_read_from_block_data(sf->block_ctx,
    						     &disk_item,
    						     disk_item_offset,
    						     sizeof(struct btrfs_item));
    			item_offset = le32_to_cpu(disk_item.offset);
    			disk_key = &disk_item.key;
    
    			type = disk_key->type;
    
    			if (BTRFS_ROOT_ITEM_KEY == type) {
    
    				struct btrfs_root_item root_item;
    				u32 root_item_offset;
    				u64 next_bytenr;
    
    				root_item_offset = item_offset +
    					offsetof(struct btrfs_leaf, items);
    				if (root_item_offset +
    				    sizeof(struct btrfs_root_item) >
    				    sf->block_ctx->len)
    					goto leaf_item_out_of_bounce_error;
    				btrfsic_read_from_block_data(
    					sf->block_ctx, &root_item,
    					root_item_offset,
    					sizeof(struct btrfs_root_item));
    				next_bytenr = le64_to_cpu(root_item.bytenr);
    
    
    				sf->error =
    				    btrfsic_create_link_to_next_block(
    						state,
    						sf->block,
    						sf->block_ctx,
    						next_bytenr,
    						sf->limit_nesting,
    						&sf->next_block_ctx,
    						&sf->next_block,
    						force_iodone_flag,
    						&sf->num_copies,
    						&sf->mirror_num,
    						disk_key,
    
    						generation));
    				if (sf->error)
    					goto one_stack_frame_backwards;
    
    				if (NULL != sf->next_block) {
    					struct btrfs_header *const next_hdr =
    					    (struct btrfs_header *)
    
    					    sf->next_block_ctx.datav[0];
    
    
    					next_stack =
    					    btrfsic_stack_frame_alloc();
    					if (NULL == next_stack) {
    						btrfsic_release_block_ctx(
    								&sf->
    								next_block_ctx);
    						goto one_stack_frame_backwards;
    					}
    
    					next_stack->i = -1;
    					next_stack->block = sf->next_block;
    					next_stack->block_ctx =
    					    &sf->next_block_ctx;
    					next_stack->next_block = NULL;
    					next_stack->hdr = next_hdr;
    					next_stack->limit_nesting =
    					    sf->limit_nesting - 1;
    					next_stack->prev = sf;
    					sf = next_stack;
    					goto continue_with_new_stack_frame;
    				}
    			} else if (BTRFS_EXTENT_DATA_KEY == type &&
    				   state->include_extent_data) {
    				sf->error = btrfsic_handle_extent_data(
    						state,
    						sf->block,
    						sf->block_ctx,
    						item_offset,
    						force_iodone_flag);
    				if (sf->error)
    					goto one_stack_frame_backwards;
    			}
    
    			goto continue_with_current_leaf_stack_frame;
    		}
    	} else {
    		struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr;
    
    		if (-1 == sf->i) {
    			sf->nr = le32_to_cpu(nodehdr->header.nritems);
    
    			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
    				printk(KERN_INFO "node %llu level %d items %d"
    				       " generation %llu owner %llu\n",
    				       (unsigned long long)
    				       sf->block_ctx->start,
    				       nodehdr->header.level, sf->nr,
    				       (unsigned long long)
    				       le64_to_cpu(nodehdr->header.generation),
    				       (unsigned long long)
    				       le64_to_cpu(nodehdr->header.owner));
    		}
    
    continue_with_current_node_stack_frame:
    		if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
    			sf->i++;
    			sf->num_copies = 0;
    		}
    
    		if (sf->i < sf->nr) {
    
    			struct btrfs_key_ptr key_ptr;
    			u32 key_ptr_offset;
    			u64 next_bytenr;
    
    			key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) -
    					  (uintptr_t)nodehdr;
    			if (key_ptr_offset + sizeof(struct btrfs_key_ptr) >
    			    sf->block_ctx->len) {
    				printk(KERN_INFO
    				       "btrfsic: node item out of bounce at logical %llu, dev %s\n",
    				       sf->block_ctx->start,
    				       sf->block_ctx->dev->name);
    				goto one_stack_frame_backwards;
    			}
    			btrfsic_read_from_block_data(
    				sf->block_ctx, &key_ptr, key_ptr_offset,
    				sizeof(struct btrfs_key_ptr));
    			next_bytenr = le64_to_cpu(key_ptr.blockptr);
    
    
    			sf->error = btrfsic_create_link_to_next_block(
    					state,
    					sf->block,
    					sf->block_ctx,
    					next_bytenr,
    					sf->limit_nesting,
    					&sf->next_block_ctx,
    					&sf->next_block,
    					force_iodone_flag,
    					&sf->num_copies,
    					&sf->mirror_num,
    
    					&key_ptr.key,
    					le64_to_cpu(key_ptr.generation));
    
    			if (sf->error)
    				goto one_stack_frame_backwards;
    
    			if (NULL != sf->next_block) {
    				struct btrfs_header *const next_hdr =
    				    (struct btrfs_header *)
    
    				    sf->next_block_ctx.datav[0];
    
    
    				next_stack = btrfsic_stack_frame_alloc();
    				if (NULL == next_stack)
    					goto one_stack_frame_backwards;
    
    				next_stack->i = -1;
    				next_stack->block = sf->next_block;
    				next_stack->block_ctx = &sf->next_block_ctx;
    				next_stack->next_block = NULL;
    				next_stack->hdr = next_hdr;
    				next_stack->limit_nesting =
    				    sf->limit_nesting - 1;
    				next_stack->prev = sf;
    				sf = next_stack;
    				goto continue_with_new_stack_frame;
    			}
    
    			goto continue_with_current_node_stack_frame;
    		}
    	}
    
    one_stack_frame_backwards:
    	if (NULL != sf->prev) {
    		struct btrfsic_stack_frame *const prev = sf->prev;
    
    		/* the one for the initial block is freed in the caller */
    		btrfsic_release_block_ctx(sf->block_ctx);
    
    		if (sf->error) {
    			prev->error = sf->error;
    			btrfsic_stack_frame_free(sf);
    			sf = prev;
    			goto one_stack_frame_backwards;
    		}
    
    		btrfsic_stack_frame_free(sf);
    		sf = prev;
    		goto continue_with_new_stack_frame;
    	} else {
    		BUG_ON(&initial_stack_frame != sf);
    	}
    
    	return sf->error;
    }
    
    
    static void btrfsic_read_from_block_data(
    	struct btrfsic_block_data_ctx *block_ctx,
    	void *dstv, u32 offset, size_t len)
    {
    	size_t cur;
    	size_t offset_in_page;
    	char *kaddr;
    	char *dst = (char *)dstv;
    	size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1);
    	unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT;
    
    	WARN_ON(offset + len > block_ctx->len);
    	offset_in_page = (start_offset + offset) &
    			 ((unsigned long)PAGE_CACHE_SIZE - 1);
    
    	while (len > 0) {
    		cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page));
    		BUG_ON(i >= (block_ctx->len + PAGE_CACHE_SIZE - 1) >>
    			    PAGE_CACHE_SHIFT);
    		kaddr = block_ctx->datav[i];
    		memcpy(dst, kaddr + offset_in_page, cur);
    
    		dst += cur;
    		len -= cur;
    		offset_in_page = 0;
    		i++;
    	}
    }
    
    
    static int btrfsic_create_link_to_next_block(
    		struct btrfsic_state *state,
    		struct btrfsic_block *block,
    		struct btrfsic_block_data_ctx *block_ctx,
    		u64 next_bytenr,
    		int limit_nesting,
    		struct btrfsic_block_data_ctx *next_block_ctx,
    		struct btrfsic_block **next_blockp,
    		int force_iodone_flag,
    		int *num_copiesp, int *mirror_nump,
    		struct btrfs_disk_key *disk_key,
    		u64 parent_generation)
    {
    	struct btrfsic_block *next_block = NULL;
    	int ret;
    	struct btrfsic_block_link *l;
    	int did_alloc_block_link;
    	int block_was_created;
    
    	*next_blockp = NULL;
    	if (0 == *num_copiesp) {
    		*num_copiesp =
    		    btrfs_num_copies(&state->root->fs_info->mapping_tree,
    
    				     next_bytenr, state->metablock_size);
    
    		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
    			printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
    			       (unsigned long long)next_bytenr, *num_copiesp);
    		*mirror_nump = 1;
    	}
    
    	if (*mirror_nump > *num_copiesp)
    		return 0;
    
    	if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
    		printk(KERN_INFO
    		       "btrfsic_create_link_to_next_block(mirror_num=%d)\n",
    		       *mirror_nump);
    	ret = btrfsic_map_block(state, next_bytenr,
    
    				next_block_ctx, *mirror_nump);
    	if (ret) {
    		printk(KERN_INFO
    		       "btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
    		       (unsigned long long)next_bytenr, *mirror_nump);
    		btrfsic_release_block_ctx(next_block_ctx);
    		*next_blockp = NULL;
    		return -1;
    	}
    
    	next_block = btrfsic_block_lookup_or_add(state,
    						 next_block_ctx, "referenced ",
    						 1, force_iodone_flag,
    						 !force_iodone_flag,
    						 *mirror_nump,
    						 &block_was_created);
    	if (NULL == next_block) {
    		btrfsic_release_block_ctx(next_block_ctx);
    		*next_blockp = NULL;
    		return -1;
    	}
    	if (block_was_created) {
    		l = NULL;
    		next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
    	} else {
    		if (next_block->logical_bytenr != next_bytenr &&
    		    !(!next_block->is_metadata &&
    		      0 == next_block->logical_bytenr)) {
    			printk(KERN_INFO
    			       "Referenced block @%llu (%s/%llu/%d)"
    			       " found in hash table, %c,"
    			       " bytenr mismatch (!= stored %llu).\n",
    			       (unsigned long long)next_bytenr,
    			       next_block_ctx->dev->name,
    			       (unsigned long long)next_block_ctx->dev_bytenr,
    			       *mirror_nump,
    			       btrfsic_get_block_type(state, next_block),
    			       (unsigned long long)next_block->logical_bytenr);
    		} else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
    			printk(KERN_INFO
    			       "Referenced block @%llu (%s/%llu/%d)"
    			       " found in hash table, %c.\n",
    			       (unsigned long long)next_bytenr,
    			       next_block_ctx->dev->name,
    			       (unsigned long long)next_block_ctx->dev_bytenr,
    			       *mirror_nump,
    			       btrfsic_get_block_type(state, next_block));
    		next_block->logical_bytenr = next_bytenr;
    
    		next_block->mirror_num = *mirror_nump;
    		l = btrfsic_block_link_hashtable_lookup(
    				next_block_ctx->dev->bdev,
    				next_block_ctx->dev_bytenr,
    				block_ctx->dev->bdev,
    				block_ctx->dev_bytenr,
    				&state->block_link_hashtable);
    	}
    
    	next_block->disk_key = *disk_key;
    	if (NULL == l) {
    		l = btrfsic_block_link_alloc();
    		if (NULL == l) {
    			printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
    			btrfsic_release_block_ctx(next_block_ctx);
    			*next_blockp = NULL;
    			return -1;
    		}
    
    		did_alloc_block_link = 1;
    		l->block_ref_to = next_block;
    		l->block_ref_from = block;
    		l->ref_cnt = 1;
    		l->parent_generation = parent_generation;
    
    		if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
    			btrfsic_print_add_link(state, l);
    
    		list_add(&l->node_ref_to, &block->ref_to_list);
    		list_add(&l->node_ref_from, &next_block->ref_from_list);
    
    		btrfsic_block_link_hashtable_add(l,
    						 &state->block_link_hashtable);
    	} else {
    		did_alloc_block_link = 0;
    		if (0 == limit_nesting) {
    			l->ref_cnt++;
    			l->parent_generation = parent_generation;
    			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
    				btrfsic_print_add_link(state, l);
    		}
    	}
    
    	if (limit_nesting > 0 && did_alloc_block_link) {
    		ret = btrfsic_read_block(state, next_block_ctx);
    
    		if (ret < (int)next_block_ctx->len) {
    
    			printk(KERN_INFO
    			       "btrfsic: read block @logical %llu failed!\n",
    			       (unsigned long long)next_bytenr);
    			btrfsic_release_block_ctx(next_block_ctx);
    			*next_blockp = NULL;
    			return -1;
    		}
    
    		*next_blockp = next_block;
    	} else {
    		*next_blockp = NULL;
    	}
    	(*mirror_nump)++;
    
    	return 0;
    }
    
    static int btrfsic_handle_extent_data(
    		struct btrfsic_state *state,
    		struct btrfsic_block *block,
    		struct btrfsic_block_data_ctx *block_ctx,
    		u32 item_offset, int force_iodone_flag)
    {
    	int ret;
    
    	struct btrfs_file_extent_item file_extent_item;
    	u64 file_extent_item_offset;
    	u64 next_bytenr;
    	u64 num_bytes;
    	u64 generation;
    
    	struct btrfsic_block_link *l;
    
    
    	file_extent_item_offset = offsetof(struct btrfs_leaf, items) +
    				  item_offset;
    	if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) >
    	    block_ctx->len) {
    		printk(KERN_INFO
    		       "btrfsic: file item out of bounce at logical %llu, dev %s\n",
    		       block_ctx->start, block_ctx->dev->name);
    		return -1;
    	}
    	btrfsic_read_from_block_data(block_ctx, &file_extent_item,
    				     file_extent_item_offset,
    				     sizeof(struct btrfs_file_extent_item));
    	next_bytenr = le64_to_cpu(file_extent_item.disk_bytenr) +
    		      le64_to_cpu(file_extent_item.offset);
    	generation = le64_to_cpu(file_extent_item.generation);
    	num_bytes = le64_to_cpu(file_extent_item.num_bytes);
    	generation = le64_to_cpu(file_extent_item.generation);
    
    
    	if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
    		printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu,"
    		       " offset = %llu, num_bytes = %llu\n",
    
    		       (unsigned long long)
    
    		       le64_to_cpu(file_extent_item.disk_bytenr),
    		       (unsigned long long)le64_to_cpu(file_extent_item.offset),
    		       (unsigned long long)num_bytes);
    	if (BTRFS_FILE_EXTENT_REG != file_extent_item.type ||
    	    ((u64)0) == le64_to_cpu(file_extent_item.disk_bytenr))
    
    		return 0;
    	while (num_bytes > 0) {
    		u32 chunk_len;
    		int num_copies;
    		int mirror_num;
    
    
    		if (num_bytes > state->datablock_size)
    			chunk_len = state->datablock_size;
    
    		else
    			chunk_len = num_bytes;
    
    		num_copies =
    		    btrfs_num_copies(&state->root->fs_info->mapping_tree,
    
    				     next_bytenr, state->datablock_size);
    
    		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
    			printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
    			       (unsigned long long)next_bytenr, num_copies);
    		for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
    			struct btrfsic_block_data_ctx next_block_ctx;
    			struct btrfsic_block *next_block;
    			int block_was_created;
    
    			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
    				printk(KERN_INFO "btrfsic_handle_extent_data("
    				       "mirror_num=%d)\n", mirror_num);
    			if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
    				printk(KERN_INFO
    				       "\tdisk_bytenr = %llu, num_bytes %u\n",
    				       (unsigned long long)next_bytenr,
    				       chunk_len);
    			ret = btrfsic_map_block(state, next_bytenr,
    						chunk_len, &next_block_ctx,
    						mirror_num);
    			if (ret) {
    				printk(KERN_INFO
    				       "btrfsic: btrfsic_map_block(@%llu,"
    				       " mirror=%d) failed!\n",
    				       (unsigned long long)next_bytenr,
    				       mirror_num);
    				return -1;
    			}
    
    			next_block = btrfsic_block_lookup_or_add(
    					state,
    					&next_block_ctx,
    					"referenced ",
    					0,
    					force_iodone_flag,
    					!force_iodone_flag,
    					mirror_num,
    					&block_was_created);
    			if (NULL == next_block) {
    				printk(KERN_INFO
    				       "btrfsic: error, kmalloc failed!\n");
    				btrfsic_release_block_ctx(&next_block_ctx);
    				return -1;
    			}
    			if (!block_was_created) {
    				if (next_block->logical_bytenr != next_bytenr &&
    				    !(!next_block->is_metadata &&
    				      0 == next_block->logical_bytenr)) {
    					printk(KERN_INFO
    					       "Referenced block"
    					       " @%llu (%s/%llu/%d)"
    					       " found in hash table, D,"
    					       " bytenr mismatch"
    					       " (!= stored %llu).\n",
    					       (unsigned long long)next_bytenr,
    					       next_block_ctx.dev->name,
    					       (unsigned long long)
    					       next_block_ctx.dev_bytenr,
    					       mirror_num,
    					       (unsigned long long)
    					       next_block->logical_bytenr);
    				}
    				next_block->logical_bytenr = next_bytenr;
    				next_block->mirror_num = mirror_num;
    			}
    
    			l = btrfsic_block_link_lookup_or_add(state,
    							     &next_block_ctx,
    							     next_block, block,
    							     generation);
    			btrfsic_release_block_ctx(&next_block_ctx);
    			if (NULL == l)
    				return -1;
    		}
    
    		next_bytenr += chunk_len;
    		num_bytes -= chunk_len;
    	}
    
    	return 0;
    }
    
    static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
    			     struct btrfsic_block_data_ctx *block_ctx_out,
    			     int mirror_num)
    {
    	int ret;
    	u64 length;
    	struct btrfs_bio *multi = NULL;
    	struct btrfs_device *device;
    
    	length = len;
    	ret = btrfs_map_block(&state->root->fs_info->mapping_tree, READ,
    			      bytenr, &length, &multi, mirror_num);
    
    	device = multi->stripes[0].dev;
    	block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev);
    	block_ctx_out->dev_bytenr = multi->stripes[0].physical;
    	block_ctx_out->start = bytenr;
    	block_ctx_out->len = len;
    
    	block_ctx_out->datav = NULL;
    	block_ctx_out->pagev = NULL;
    	block_ctx_out->mem_to_free = NULL;
    
    
    	if (0 == ret)
    		kfree(multi);
    	if (NULL == block_ctx_out->dev) {
    		ret = -ENXIO;
    		printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n");
    	}
    
    	return ret;
    }
    
    static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
    				  u32 len, struct block_device *bdev,
    				  struct btrfsic_block_data_ctx *block_ctx_out)
    {
    	block_ctx_out->dev = btrfsic_dev_state_lookup(bdev);
    	block_ctx_out->dev_bytenr = bytenr;
    	block_ctx_out->start = bytenr;
    	block_ctx_out->len = len;
    
    	block_ctx_out->datav = NULL;
    	block_ctx_out->pagev = NULL;
    	block_ctx_out->mem_to_free = NULL;
    
    	if (NULL != block_ctx_out->dev) {
    		return 0;
    	} else {
    		printk(KERN_INFO "btrfsic: error, cannot lookup dev (#2)!\n");
    		return -ENXIO;
    	}
    }
    
    static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
    {
    
    	if (block_ctx->mem_to_free) {
    		unsigned int num_pages;
    
    		BUG_ON(!block_ctx->datav);
    		BUG_ON(!block_ctx->pagev);
    		num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
    			    PAGE_CACHE_SHIFT;
    		while (num_pages > 0) {
    			num_pages--;
    			if (block_ctx->datav[num_pages]) {
    				kunmap(block_ctx->pagev[num_pages]);
    				block_ctx->datav[num_pages] = NULL;
    			}
    			if (block_ctx->pagev[num_pages]) {
    				__free_page(block_ctx->pagev[num_pages]);
    				block_ctx->pagev[num_pages] = NULL;
    			}
    		}
    
    		kfree(block_ctx->mem_to_free);
    		block_ctx->mem_to_free = NULL;
    		block_ctx->pagev = NULL;
    		block_ctx->datav = NULL;
    
    	}
    }
    
    static int btrfsic_read_block(struct btrfsic_state *state,
    			      struct btrfsic_block_data_ctx *block_ctx)
    {
    
    	unsigned int num_pages;
    	unsigned int i;
    	u64 dev_bytenr;
    	int ret;
    
    	BUG_ON(block_ctx->datav);
    	BUG_ON(block_ctx->pagev);
    	BUG_ON(block_ctx->mem_to_free);
    	if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) {
    
    		printk(KERN_INFO
    		       "btrfsic: read_block() with unaligned bytenr %llu\n",
    		       (unsigned long long)block_ctx->dev_bytenr);
    		return -1;
    	}
    
    
    	num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
    		    PAGE_CACHE_SHIFT;
    	block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
    					  sizeof(*block_ctx->pagev)) *
    					 num_pages, GFP_NOFS);
    	if (!block_ctx->mem_to_free)
    
    	block_ctx->datav = block_ctx->mem_to_free;
    	block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
    	for (i = 0; i < num_pages; i++) {
    		block_ctx->pagev[i] = alloc_page(GFP_NOFS);
    		if (!block_ctx->pagev[i])
    			return -1;
    
    	dev_bytenr = block_ctx->dev_bytenr;
    	for (i = 0; i < num_pages;) {
    		struct bio *bio;
    		unsigned int j;
    		DECLARE_COMPLETION_ONSTACK(complete);
    
    		bio = bio_alloc(GFP_NOFS, num_pages - i);
    		if (!bio) {
    			printk(KERN_INFO
    			       "btrfsic: bio_alloc() for %u pages failed!\n",
    			       num_pages - i);
    			return -1;
    		}
    		bio->bi_bdev = block_ctx->dev->bdev;
    		bio->bi_sector = dev_bytenr >> 9;
    		bio->bi_end_io = btrfsic_complete_bio_end_io;
    		bio->bi_private = &complete;
    
    		for (j = i; j < num_pages; j++) {
    			ret = bio_add_page(bio, block_ctx->pagev[j],
    					   PAGE_CACHE_SIZE, 0);
    			if (PAGE_CACHE_SIZE != ret)
    				break;
    		}
    		if (j == i) {
    			printk(KERN_INFO
    			       "btrfsic: error, failed to add a single page!\n");
    			return -1;
    		}
    		submit_bio(READ, bio);
    
    		/* this will also unplug the queue */
    		wait_for_completion(&complete);
    
    		if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
    			printk(KERN_INFO
    			       "btrfsic: read error at logical %llu dev %s!\n",
    			       block_ctx->start, block_ctx->dev->name);
    			bio_put(bio);
    			return -1;
    		}
    		bio_put(bio);
    		dev_bytenr += (j - i) * PAGE_CACHE_SIZE;
    		i = j;
    	}
    	for (i = 0; i < num_pages; i++) {
    		block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
    		if (!block_ctx->datav[i]) {
    			printk(KERN_INFO "btrfsic: kmap() failed (dev %s)!\n",
    			       block_ctx->dev->name);
    			return -1;
    		}
    	}
    
    static void btrfsic_complete_bio_end_io(struct bio *bio, int err)
    {
    	complete((struct completion *)bio->bi_private);
    }
    
    
    static void btrfsic_dump_database(struct btrfsic_state *state)
    {
    	struct list_head *elem_all;
    
    	BUG_ON(NULL == state);
    
    	printk(KERN_INFO "all_blocks_list:\n");
    	list_for_each(elem_all, &state->all_blocks_list) {
    		const struct btrfsic_block *const b_all =
    		    list_entry(elem_all, struct btrfsic_block,
    			       all_blocks_node);
    		struct list_head *elem_ref_to;
    		struct list_head *elem_ref_from;
    
    		printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
    		       btrfsic_get_block_type(state, b_all),
    		       (unsigned long long)b_all->logical_bytenr,
    		       b_all->dev_state->name,
    		       (unsigned long long)b_all->dev_bytenr,
    		       b_all->mirror_num);
    
    		list_for_each(elem_ref_to, &b_all->ref_to_list) {
    			const struct btrfsic_block_link *const l =
    			    list_entry(elem_ref_to,
    				       struct btrfsic_block_link,
    				       node_ref_to);
    
    			printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
    			       " refers %u* to"
    			       " %c @%llu (%s/%llu/%d)\n",
    			       btrfsic_get_block_type(state, b_all),
    			       (unsigned long long)b_all->logical_bytenr,
    			       b_all->dev_state->name,
    			       (unsigned long long)b_all->dev_bytenr,
    			       b_all->mirror_num,
    			       l->ref_cnt,
    			       btrfsic_get_block_type(state, l->block_ref_to),
    			       (unsigned long long)
    			       l->block_ref_to->logical_bytenr,
    			       l->block_ref_to->dev_state->name,
    			       (unsigned long long)l->block_ref_to->dev_bytenr,
    			       l->block_ref_to->mirror_num);
    		}
    
    		list_for_each(elem_ref_from, &b_all->ref_from_list) {
    			const struct btrfsic_block_link *const l =
    			    list_entry(elem_ref_from,
    				       struct btrfsic_block_link,
    				       node_ref_from);
    
    			printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
    			       " is ref %u* from"
    			       " %c @%llu (%s/%llu/%d)\n",
    			       btrfsic_get_block_type(state, b_all),
    			       (unsigned long long)b_all->logical_bytenr,
    			       b_all->dev_state->name,
    			       (unsigned long long)b_all->dev_bytenr,
    			       b_all->mirror_num,
    			       l->ref_cnt,
    			       btrfsic_get_block_type(state, l->block_ref_from),
    			       (unsigned long long)
    			       l->block_ref_from->logical_bytenr,
    			       l->block_ref_from->dev_state->name,
    			       (unsigned long long)
    			       l->block_ref_from->dev_bytenr,
    			       l->block_ref_from->mirror_num);
    		}
    
    		printk(KERN_INFO "\n");
    	}
    }
    
    /*
     * Test whether the disk block contains a tree block (leaf or node)
     * (note that this test fails for the super block)
     */
    static int btrfsic_test_for_metadata(struct btrfsic_state *state,
    
    				     char **datav, unsigned int num_pages)
    
    {
    	struct btrfs_header *h;
    	u8 csum[BTRFS_CSUM_SIZE];
    	u32 crc = ~(u32)0;
    
    	if (num_pages * PAGE_CACHE_SIZE < state->metablock_size)
    		return 1; /* not metadata */
    	num_pages = state->metablock_size >> PAGE_CACHE_SHIFT;
    	h = (struct btrfs_header *)datav[0];
    
    
    	if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
    
    	for (i = 0; i < num_pages; i++) {
    		u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
    		size_t sublen = i ? PAGE_CACHE_SIZE :
    				    (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
    
    		crc = crc32c(crc, data, sublen);
    	}
    
    	btrfs_csum_final(crc, csum);
    	if (memcmp(csum, h->csum, state->csum_size))
    
    	return 0; /* is metadata */
    
    }
    
    static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
    
    					  u64 dev_bytenr, char **mapped_datav,
    					  unsigned int num_pages,
    					  struct bio *bio, int *bio_is_patched,
    
    					  struct buffer_head *bh,
    					  int submit_bio_bh_rw)
    {
    	int is_metadata;
    	struct btrfsic_block *block;
    	struct btrfsic_block_data_ctx block_ctx;
    	int ret;
    	struct btrfsic_state *state = dev_state->state;
    	struct block_device *bdev = dev_state->bdev;
    
    	unsigned int processed_len;
    
    
    	if (NULL != bio_is_patched)
    		*bio_is_patched = 0;
    
    
    again:
    	if (num_pages == 0)
    		return;
    
    	processed_len = 0;
    	is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav,
    						      num_pages));
    
    
    	block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
    					       &state->block_hashtable);
    	if (NULL != block) {
    
    		struct list_head *elem_ref_to;
    		struct list_head *tmp_ref_to;
    
    		if (block->is_superblock) {
    			bytenr = le64_to_cpu(((struct btrfs_super_block *)
    
    					      mapped_datav[0])->bytenr);
    			if (num_pages * PAGE_CACHE_SIZE <
    			    BTRFS_SUPER_INFO_SIZE) {
    				printk(KERN_INFO
    				       "btrfsic: cannot work with too short bios!\n");
    				return;
    			}
    
    			is_metadata = 1;
    
    			BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1));
    			processed_len = BTRFS_SUPER_INFO_SIZE;
    
    			if (state->print_mask &
    			    BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
    				printk(KERN_INFO
    				       "[before new superblock is written]:\n");
    				btrfsic_dump_tree_sub(state, block, 0);
    			}
    		}
    		if (is_metadata) {
    			if (!block->is_superblock) {
    
    				if (num_pages * PAGE_CACHE_SIZE <
    				    state->metablock_size) {
    					printk(KERN_INFO
    					       "btrfsic: cannot work with too short bios!\n");
    					return;
    				}
    				processed_len = state->metablock_size;
    
    				bytenr = le64_to_cpu(((struct btrfs_header *)
    
    						      mapped_datav[0])->bytenr);
    
    				btrfsic_cmp_log_and_dev_bytenr(state, bytenr,
    							       dev_state,
    
    			}
    			if (block->logical_bytenr != bytenr) {
    				printk(KERN_INFO
    				       "Written block @%llu (%s/%llu/%d)"
    				       " found in hash table, %c,"
    				       " bytenr mismatch"
    				       " (!= stored %llu).\n",
    				       (unsigned long long)bytenr,
    				       dev_state->name,
    				       (unsigned long long)dev_bytenr,
    				       block->mirror_num,
    				       btrfsic_get_block_type(state, block),
    				       (unsigned long long)
    				       block->logical_bytenr);
    				block->logical_bytenr = bytenr;
    			} else if (state->print_mask &
    				   BTRFSIC_PRINT_MASK_VERBOSE)
    				printk(KERN_INFO
    				       "Written block @%llu (%s/%llu/%d)"
    				       " found in hash table, %c.\n",
    				       (unsigned long long)bytenr,
    				       dev_state->name,
    				       (unsigned long long)dev_bytenr,
    				       block->mirror_num,
    				       btrfsic_get_block_type(state, block));
    		} else {
    
    			if (num_pages * PAGE_CACHE_SIZE <
    			    state->datablock_size) {
    				printk(KERN_INFO
    				       "btrfsic: cannot work with too short bios!\n");
    				return;
    			}
    			processed_len = state->datablock_size;
    
    			bytenr = block->logical_bytenr;
    			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
    				printk(KERN_INFO
    				       "Written block @%llu (%s/%llu/%d)"
    				       " found in hash table, %c.\n",
    				       (unsigned long long)bytenr,
    				       dev_state->name,
    				       (unsigned long long)dev_bytenr,
    				       block->mirror_num,
    				       btrfsic_get_block_type(state, block));
    		}
    
    		if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
    			printk(KERN_INFO
    			       "ref_to_list: %cE, ref_from_list: %cE\n",
    			       list_empty(&block->ref_to_list) ? ' ' : '!',
    			       list_empty(&block->ref_from_list) ? ' ' : '!');
    		if (btrfsic_is_block_ref_by_superblock(state, block, 0)) {
    			printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
    			       " @%llu (%s/%llu/%d), old(gen=%llu,"
    			       " objectid=%llu, type=%d, offset=%llu),"
    			       " new(gen=%llu),"
    			       " which is referenced by most recent superblock"
    			       " (superblockgen=%llu)!\n",
    			       btrfsic_get_block_type(state, block),
    			       (unsigned long long)bytenr,
    			       dev_state->name,
    			       (unsigned long long)dev_bytenr,
    			       block->mirror_num,
    			       (unsigned long long)block->generation,
    			       (unsigned long long)
    			       le64_to_cpu(block->disk_key.objectid),
    			       block->disk_key.type,
    			       (unsigned long long)
    			       le64_to_cpu(block->disk_key.offset),
    			       (unsigned long long)
    			       le64_to_cpu(((struct btrfs_header *)
    
    					    mapped_datav[0])->generation),
    
    			       (unsigned long long)
    			       state->max_superblock_generation);
    			btrfsic_dump_tree(state);
    		}
    
    		if (!block->is_iodone && !block->never_written) {
    			printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
    			       " @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu,"
    			       " which is not yet iodone!\n",
    			       btrfsic_get_block_type(state, block),
    			       (unsigned long long)bytenr,
    			       dev_state->name,
    			       (unsigned long long)dev_bytenr,
    			       block->mirror_num,
    			       (unsigned long long)block->generation,
    			       (unsigned long long)
    			       le64_to_cpu(((struct btrfs_header *)
    
    					    mapped_datav[0])->generation));
    
    			/* it would not be safe to go on */
    			btrfsic_dump_tree(state);
    
    		}
    
    		/*
    		 * Clear all references of this block. Do not free
    		 * the block itself even if is not referenced anymore
    		 * because it still carries valueable information
    		 * like whether it was ever written and IO completed.
    		 */
    		list_for_each_safe(elem_ref_to, tmp_ref_to,
    				   &block->ref_to_list) {
    			struct btrfsic_block_link *const l =