Skip to content
Snippets Groups Projects
extent-tree.c 222 KiB
Newer Older
  • Learn to ignore specific revisions
  • 		btrfs_set_extent_flags(leaf, ei, flags);
    	}
    
    	if (extent_op->update_key) {
    		struct btrfs_tree_block_info *bi;
    		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
    		bi = (struct btrfs_tree_block_info *)(ei + 1);
    		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
    	}
    }
    
    static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
    				 struct btrfs_root *root,
    				 struct btrfs_delayed_ref_node *node,
    				 struct btrfs_delayed_extent_op *extent_op)
    {
    	struct btrfs_key key;
    	struct btrfs_path *path;
    	struct btrfs_extent_item *ei;
    	struct extent_buffer *leaf;
    	u32 item_size;
    
    	path = btrfs_alloc_path();
    	if (!path)
    		return -ENOMEM;
    
    	key.objectid = node->bytenr;
    	key.type = BTRFS_EXTENT_ITEM_KEY;
    	key.offset = node->num_bytes;
    
    	path->reada = 1;
    	path->leave_spinning = 1;
    	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
    				path, 0, 1);
    	if (ret < 0) {
    		err = ret;
    		goto out;
    	}
    	if (ret > 0) {
    		err = -EIO;
    		goto out;
    	}
    
    	leaf = path->nodes[0];
    	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
    #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
    	if (item_size < sizeof(*ei)) {
    		ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
    					     path, (u64)-1, 0);
    		if (ret < 0) {
    			err = ret;
    			goto out;
    		}
    		leaf = path->nodes[0];
    		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
    	}
    #endif
    	BUG_ON(item_size < sizeof(*ei));
    	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
    	__run_delayed_extent_op(extent_op, leaf, ei);
    
    	btrfs_mark_buffer_dirty(leaf);
    out:
    	btrfs_free_path(path);
    	return err;
    
    static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
    				struct btrfs_root *root,
    				struct btrfs_delayed_ref_node *node,
    				struct btrfs_delayed_extent_op *extent_op,
    				int insert_reserved)
    
    	struct btrfs_delayed_tree_ref *ref;
    	struct btrfs_key ins;
    	u64 parent = 0;
    	u64 ref_root = 0;
    
    	ins.objectid = node->bytenr;
    	ins.offset = node->num_bytes;
    	ins.type = BTRFS_EXTENT_ITEM_KEY;
    
    	ref = btrfs_delayed_node_to_tree_ref(node);
    	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
    		parent = ref->parent;
    	else
    		ref_root = ref->root;
    
    	BUG_ON(node->ref_mod != 1);
    	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
    		BUG_ON(!extent_op || !extent_op->update_flags ||
    		       !extent_op->update_key);
    		ret = alloc_reserved_tree_block(trans, root,
    						parent, ref_root,
    						extent_op->flags_to_set,
    						&extent_op->key,
    						ref->level, &ins);
    	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
    		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
    					     node->num_bytes, parent, ref_root,
    					     ref->level, 0, 1, extent_op);
    	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
    		ret = __btrfs_free_extent(trans, root, node->bytenr,
    					  node->num_bytes, parent, ref_root,
    					  ref->level, 0, 1, extent_op);
    	} else {
    		BUG();
    	}
    
    	return ret;
    }
    
    /* helper function to actually process a single delayed ref entry */
    
    static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
    			       struct btrfs_root *root,
    			       struct btrfs_delayed_ref_node *node,
    			       struct btrfs_delayed_extent_op *extent_op,
    			       int insert_reserved)
    
    	if (btrfs_delayed_ref_is_head(node)) {
    
    		struct btrfs_delayed_ref_head *head;
    		/*
    		 * we've hit the end of the chain and we were supposed
    		 * to insert this extent into the tree.  But, it got
    		 * deleted before we ever needed to insert it, so all
    		 * we have to do is clean up the accounting
    		 */
    
    		BUG_ON(extent_op);
    		head = btrfs_delayed_node_to_head(node);
    
    			btrfs_pin_extent(root, node->bytenr,
    					 node->num_bytes, 1);
    
    			if (head->is_data) {
    				ret = btrfs_del_csums(trans, root,
    						      node->bytenr,
    						      node->num_bytes);
    			}
    
    	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
    	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
    		ret = run_delayed_tree_ref(trans, root, node, extent_op,
    					   insert_reserved);
    	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
    		 node->type == BTRFS_SHARED_DATA_REF_KEY)
    		ret = run_delayed_data_ref(trans, root, node, extent_op,
    					   insert_reserved);
    	else
    		BUG();
    	return ret;
    
    }
    
    static noinline struct btrfs_delayed_ref_node *
    select_delayed_ref(struct btrfs_delayed_ref_head *head)
    {
    	struct rb_node *node;
    	struct btrfs_delayed_ref_node *ref;
    	int action = BTRFS_ADD_DELAYED_REF;
    again:
    	/*
    	 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
    	 * this prevents ref count from going down to zero when
    	 * there still are pending delayed ref.
    	 */
    	node = rb_prev(&head->node.rb_node);
    	while (1) {
    		if (!node)
    			break;
    		ref = rb_entry(node, struct btrfs_delayed_ref_node,
    				rb_node);
    		if (ref->bytenr != head->node.bytenr)
    			break;
    
    		if (ref->action == action)
    
    			return ref;
    		node = rb_prev(node);
    	}
    	if (action == BTRFS_ADD_DELAYED_REF) {
    		action = BTRFS_DROP_DELAYED_REF;
    		goto again;
    	}
    	return NULL;
    }
    
    
    /*
     * Returns 0 on success or if called with an already aborted transaction.
     * Returns -ENOMEM or -EIO on failure and will abort the transaction.
     */
    
    static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
    				       struct btrfs_root *root,
    				       struct list_head *cluster)
    
    {
    	struct btrfs_delayed_ref_root *delayed_refs;
    	struct btrfs_delayed_ref_node *ref;
    	struct btrfs_delayed_ref_head *locked_ref = NULL;
    
    	struct btrfs_delayed_extent_op *extent_op;
    
    	struct btrfs_fs_info *fs_info = root->fs_info;
    
    	int must_insert_reserved = 0;
    
    	delayed_refs = &trans->transaction->delayed_refs;
    	while (1) {
    		if (!locked_ref) {
    
    			/* pick a new head ref from the cluster list */
    			if (list_empty(cluster))
    
    			locked_ref = list_entry(cluster->next,
    				     struct btrfs_delayed_ref_head, cluster);
    
    			/* grab the lock that says we are going to process
    			 * all the refs for this head */
    			ret = btrfs_delayed_ref_lock(trans, locked_ref);
    
    			/*
    			 * we may have dropped the spin lock to get the head
    			 * mutex lock, and that might have given someone else
    			 * time to free the head.  If that's true, it has been
    			 * removed from our list and we can move on.
    			 */
    			if (ret == -EAGAIN) {
    				locked_ref = NULL;
    				count++;
    				continue;
    
    		/*
    		 * We need to try and merge add/drops of the same ref since we
    		 * can run into issues with relocate dropping the implicit ref
    		 * and then it being added back again before the drop can
    		 * finish.  If we merged anything we need to re-loop so we can
    		 * get a good ref.
    		 */
    		btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
    					 locked_ref);
    
    
    		/*
    		 * locked_ref is the head node, so we have to go one
    		 * node back for any delayed ref updates
    		 */
    		ref = select_delayed_ref(locked_ref);
    
    		if (ref && ref->seq &&
    
    		    btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
    
    			/*
    			 * there are still refs with lower seq numbers in the
    			 * process of being added. Don't run this ref yet.
    			 */
    			list_del_init(&locked_ref->cluster);
    
    			btrfs_delayed_ref_unlock(locked_ref);
    
    			locked_ref = NULL;
    			delayed_refs->num_heads_ready++;
    			spin_unlock(&delayed_refs->lock);
    			cond_resched();
    			spin_lock(&delayed_refs->lock);
    			continue;
    		}
    
    
    		/*
    		 * record the must insert reserved flag before we
    		 * drop the spin lock.
    		 */
    		must_insert_reserved = locked_ref->must_insert_reserved;
    		locked_ref->must_insert_reserved = 0;
    
    		extent_op = locked_ref->extent_op;
    		locked_ref->extent_op = NULL;
    
    
    		if (!ref) {
    			/* All delayed refs have been processed, Go ahead
    			 * and send the head node to run_one_delayed_ref,
    			 * so that any accounting fixes can happen
    			 */
    			ref = &locked_ref->node;
    
    
    			if (extent_op && must_insert_reserved) {
    
    				btrfs_free_delayed_extent_op(extent_op);
    
    				extent_op = NULL;
    			}
    
    			if (extent_op) {
    				spin_unlock(&delayed_refs->lock);
    
    				ret = run_delayed_extent_op(trans, root,
    							    ref, extent_op);
    
    				btrfs_free_delayed_extent_op(extent_op);
    
    					printk(KERN_DEBUG
    					       "btrfs: run_delayed_extent_op "
    					       "returned %d\n", ret);
    
    					spin_lock(&delayed_refs->lock);
    
    					btrfs_delayed_ref_unlock(locked_ref);
    
    		ref->in_tree = 0;
    		rb_erase(&ref->rb_node, &delayed_refs->root);
    		delayed_refs->num_entries--;
    
    		if (!btrfs_delayed_ref_is_head(ref)) {
    
    			/*
    			 * when we play the delayed ref, also correct the
    			 * ref_mod on head
    			 */
    			switch (ref->action) {
    			case BTRFS_ADD_DELAYED_REF:
    			case BTRFS_ADD_DELAYED_EXTENT:
    				locked_ref->node.ref_mod -= ref->ref_mod;
    				break;
    			case BTRFS_DROP_DELAYED_REF:
    				locked_ref->node.ref_mod += ref->ref_mod;
    				break;
    			default:
    				WARN_ON(1);
    			}
    		}
    
    		ret = run_one_delayed_ref(trans, root, ref, extent_op,
    
    		btrfs_free_delayed_extent_op(extent_op);
    
    			btrfs_delayed_ref_unlock(locked_ref);
    			btrfs_put_delayed_ref(ref);
    			printk(KERN_DEBUG
    			       "btrfs: run_one_delayed_ref returned %d\n", ret);
    
    			spin_lock(&delayed_refs->lock);
    
    		/*
    		 * If this node is a head, that means all the refs in this head
    		 * have been dealt with, and we will pick the next head to deal
    		 * with, so we must unlock the head and drop it from the cluster
    		 * list before we release it.
    		 */
    		if (btrfs_delayed_ref_is_head(ref)) {
    			list_del_init(&locked_ref->cluster);
    			btrfs_delayed_ref_unlock(locked_ref);
    			locked_ref = NULL;
    		}
    		btrfs_put_delayed_ref(ref);
    		count++;
    
    		cond_resched();
    		spin_lock(&delayed_refs->lock);
    	}
    	return count;
    }
    
    
    #ifdef SCRAMBLE_DELAYED_REFS
    /*
     * Normally delayed refs get processed in ascending bytenr order. This
     * correlates in most cases to the order added. To expose dependencies on this
     * order, we start to process the tree in the middle instead of the beginning
     */
    static u64 find_middle(struct rb_root *root)
    {
    	struct rb_node *n = root->rb_node;
    	struct btrfs_delayed_ref_node *entry;
    	int alt = 1;
    	u64 middle;
    	u64 first = 0, last = 0;
    
    	n = rb_first(root);
    	if (n) {
    		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
    		first = entry->bytenr;
    	}
    	n = rb_last(root);
    	if (n) {
    		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
    		last = entry->bytenr;
    	}
    	n = root->rb_node;
    
    	while (n) {
    		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
    		WARN_ON(!entry->in_tree);
    
    		middle = entry->bytenr;
    
    		if (alt)
    			n = n->rb_left;
    		else
    			n = n->rb_right;
    
    		alt = 1 - alt;
    	}
    	return middle;
    }
    #endif
    
    
    int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
    					 struct btrfs_fs_info *fs_info)
    {
    	struct qgroup_update *qgroup_update;
    	int ret = 0;
    
    	if (list_empty(&trans->qgroup_ref_list) !=
    	    !trans->delayed_ref_elem.seq) {
    		/* list without seq or seq without list */
    		printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
    			list_empty(&trans->qgroup_ref_list) ? "" : " not",
    			trans->delayed_ref_elem.seq);
    		BUG();
    	}
    
    	if (!trans->delayed_ref_elem.seq)
    		return 0;
    
    	while (!list_empty(&trans->qgroup_ref_list)) {
    		qgroup_update = list_first_entry(&trans->qgroup_ref_list,
    						 struct qgroup_update, list);
    		list_del(&qgroup_update->list);
    		if (!ret)
    			ret = btrfs_qgroup_account_ref(
    					trans, fs_info, qgroup_update->node,
    					qgroup_update->extent_op);
    		kfree(qgroup_update);
    	}
    
    	btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
    
    	return ret;
    }
    
    
    static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
    		      int count)
    {
    	int val = atomic_read(&delayed_refs->ref_seq);
    
    	if (val < seq || val >= seq + count)
    		return 1;
    	return 0;
    }
    
    
    /*
     * this starts processing the delayed reference count updates and
     * extent insertions we have queued up so far.  count can be
     * 0, which means to process everything in the tree at the start
     * of the run (but not newly added entries), or it can be some target
     * number you'd like to process.
    
     *
     * Returns 0 on success or if called with an aborted transaction
     * Returns <0 on error and aborts the transaction
    
     */
    int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
    			   struct btrfs_root *root, unsigned long count)
    {
    	struct rb_node *node;
    	struct btrfs_delayed_ref_root *delayed_refs;
    	struct btrfs_delayed_ref_node *ref;
    	struct list_head cluster;
    	int ret;
    
    	int run_all = count == (unsigned long)-1;
    	int run_most = 0;
    
    	/* We'll clean this up in btrfs_cleanup_transaction */
    	if (trans->aborted)
    		return 0;
    
    
    	if (root == root->fs_info->extent_root)
    		root = root->fs_info->tree_root;
    
    
    	btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
    
    
    	delayed_refs = &trans->transaction->delayed_refs;
    	INIT_LIST_HEAD(&cluster);
    
    	if (count == 0) {
    		count = delayed_refs->num_entries * 2;
    		run_most = 1;
    	}
    
    	if (!run_all && !run_most) {
    		int old;
    		int seq = atomic_read(&delayed_refs->ref_seq);
    
    progress:
    		old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
    		if (old) {
    			DEFINE_WAIT(__wait);
    			if (delayed_refs->num_entries < 16348)
    				return 0;
    
    			prepare_to_wait(&delayed_refs->wait, &__wait,
    					TASK_UNINTERRUPTIBLE);
    
    			old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
    			if (old) {
    				schedule();
    				finish_wait(&delayed_refs->wait, &__wait);
    
    				if (!refs_newer(delayed_refs, seq, 256))
    					goto progress;
    				else
    					return 0;
    			} else {
    				finish_wait(&delayed_refs->wait, &__wait);
    				goto again;
    			}
    		}
    
    	} else {
    		atomic_inc(&delayed_refs->procs_running_refs);
    	}
    
    
    	spin_lock(&delayed_refs->lock);
    
    #ifdef SCRAMBLE_DELAYED_REFS
    	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
    #endif
    
    
    	while (1) {
    		if (!(run_all || run_most) &&
    		    delayed_refs->num_heads_ready < 64)
    			break;
    
    		 * go find something we can process in the rbtree.  We start at
    		 * the beginning of the tree, and then build a cluster
    		 * of refs to process starting at the first one we are able to
    		 * lock
    
    		delayed_start = delayed_refs->run_delayed_start;
    
    		ret = btrfs_find_ref_cluster(trans, &cluster,
    					     delayed_refs->run_delayed_start);
    		if (ret)
    
    		ret = run_clustered_refs(trans, root, &cluster);
    
    			btrfs_release_ref_cluster(&cluster);
    
    			spin_unlock(&delayed_refs->lock);
    			btrfs_abort_transaction(trans, root, ret);
    
    			atomic_dec(&delayed_refs->procs_running_refs);
    
    		atomic_add(ret, &delayed_refs->ref_seq);
    
    
    		count -= min_t(unsigned long, ret, count);
    
    		if (count == 0)
    			break;
    
    		if (delayed_start >= delayed_refs->run_delayed_start) {
    			if (loops == 0) {
    				/*
    				 * btrfs_find_ref_cluster looped. let's do one
    				 * more cycle. if we don't run any delayed ref
    				 * during that cycle (because we can't because
    				 * all of them are blocked), bail out.
    				 */
    				loops = 1;
    			} else {
    				/*
    				 * no runnable refs left, stop trying
    				 */
    				BUG_ON(run_all);
    				break;
    			}
    		}
    		if (ret) {
    
    			/* refs were run, let's reset staleness detection */
    
    		if (!list_empty(&trans->new_bgs)) {
    			spin_unlock(&delayed_refs->lock);
    			btrfs_create_pending_block_groups(trans, root);
    			spin_lock(&delayed_refs->lock);
    		}
    
    
    		node = rb_first(&delayed_refs->root);
    
    		count = (unsigned long)-1;
    
    		while (node) {
    			ref = rb_entry(node, struct btrfs_delayed_ref_node,
    				       rb_node);
    			if (btrfs_delayed_ref_is_head(ref)) {
    				struct btrfs_delayed_ref_head *head;
    
    				head = btrfs_delayed_node_to_head(ref);
    				atomic_inc(&ref->refs);
    
    				spin_unlock(&delayed_refs->lock);
    
    				/*
    				 * Mutex was contended, block until it's
    				 * released and try again
    				 */
    
    				mutex_lock(&head->mutex);
    				mutex_unlock(&head->mutex);
    
    				btrfs_put_delayed_ref(ref);
    
    				goto again;
    			}
    			node = rb_next(node);
    		}
    		spin_unlock(&delayed_refs->lock);
    		schedule_timeout(1);
    		goto again;
    
    	atomic_dec(&delayed_refs->procs_running_refs);
    	smp_mb();
    	if (waitqueue_active(&delayed_refs->wait))
    		wake_up(&delayed_refs->wait);
    
    
    	spin_unlock(&delayed_refs->lock);
    
    	assert_qgroups_uptodate(trans);
    
    int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
    				struct btrfs_root *root,
    				u64 bytenr, u64 num_bytes, u64 flags,
    				int is_data)
    {
    	struct btrfs_delayed_extent_op *extent_op;
    	int ret;
    
    
    	extent_op = btrfs_alloc_delayed_extent_op();
    
    	if (!extent_op)
    		return -ENOMEM;
    
    	extent_op->flags_to_set = flags;
    	extent_op->update_flags = 1;
    	extent_op->update_key = 0;
    	extent_op->is_data = is_data ? 1 : 0;
    
    
    	ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
    					  num_bytes, extent_op);
    
    		btrfs_free_delayed_extent_op(extent_op);
    
    	return ret;
    }
    
    static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
    				      struct btrfs_root *root,
    				      struct btrfs_path *path,
    				      u64 objectid, u64 offset, u64 bytenr)
    {
    	struct btrfs_delayed_ref_head *head;
    	struct btrfs_delayed_ref_node *ref;
    	struct btrfs_delayed_data_ref *data_ref;
    	struct btrfs_delayed_ref_root *delayed_refs;
    	struct rb_node *node;
    	int ret = 0;
    
    	ret = -ENOENT;
    	delayed_refs = &trans->transaction->delayed_refs;
    	spin_lock(&delayed_refs->lock);
    	head = btrfs_find_delayed_ref_head(trans, bytenr);
    	if (!head)
    		goto out;
    
    	if (!mutex_trylock(&head->mutex)) {
    		atomic_inc(&head->node.refs);
    		spin_unlock(&delayed_refs->lock);
    
    
    		btrfs_release_path(path);
    
    		/*
    		 * Mutex was contended, block until it's released and let
    		 * caller try again
    		 */
    
    		mutex_lock(&head->mutex);
    		mutex_unlock(&head->mutex);
    		btrfs_put_delayed_ref(&head->node);
    		return -EAGAIN;
    	}
    
    	node = rb_prev(&head->node.rb_node);
    	if (!node)
    		goto out_unlock;
    
    	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
    
    	if (ref->bytenr != bytenr)
    		goto out_unlock;
    
    	ret = 1;
    	if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
    		goto out_unlock;
    
    	data_ref = btrfs_delayed_node_to_data_ref(ref);
    
    	node = rb_prev(node);
    	if (node) {
    
    		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
    
    		if (ref->bytenr == bytenr && ref->seq == seq)
    
    			goto out_unlock;
    	}
    
    	if (data_ref->root != root->root_key.objectid ||
    	    data_ref->objectid != objectid || data_ref->offset != offset)
    		goto out_unlock;
    
    	ret = 0;
    out_unlock:
    	mutex_unlock(&head->mutex);
    out:
    	spin_unlock(&delayed_refs->lock);
    	return ret;
    }
    
    static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
    					struct btrfs_root *root,
    					struct btrfs_path *path,
    					u64 objectid, u64 offset, u64 bytenr)
    
    {
    	struct btrfs_root *extent_root = root->fs_info->extent_root;
    
    	struct extent_buffer *leaf;
    
    	struct btrfs_extent_data_ref *ref;
    	struct btrfs_extent_inline_ref *iref;
    	struct btrfs_extent_item *ei;
    
    	struct btrfs_key key;
    
    	key.objectid = bytenr;
    
    	key.offset = (u64)-1;
    
    	key.type = BTRFS_EXTENT_ITEM_KEY;
    
    
    	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
    	if (ret < 0)
    		goto out;
    
    	BUG_ON(ret == 0); /* Corruption */
    
    
    	ret = -ENOENT;
    	if (path->slots[0] == 0)
    
    		goto out;
    
    	path->slots[0]--;
    
    	leaf = path->nodes[0];
    
    	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
    
    	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
    
    	ret = 1;
    	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
    #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
    	if (item_size < sizeof(*ei)) {
    		WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
    		goto out;
    	}
    #endif
    	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
    
    	if (item_size != sizeof(*ei) +
    	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
    		goto out;
    
    	if (btrfs_extent_generation(leaf, ei) <=
    	    btrfs_root_last_snapshot(&root->root_item))
    		goto out;
    
    	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
    	if (btrfs_extent_inline_ref_type(leaf, iref) !=
    	    BTRFS_EXTENT_DATA_REF_KEY)
    		goto out;
    
    	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
    	if (btrfs_extent_refs(leaf, ei) !=
    	    btrfs_extent_data_ref_count(leaf, ref) ||
    	    btrfs_extent_data_ref_root(leaf, ref) !=
    	    root->root_key.objectid ||
    	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
    	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
    		goto out;
    
    	ret = 0;
    out:
    	return ret;
    }
    
    int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
    			  struct btrfs_root *root,
    			  u64 objectid, u64 offset, u64 bytenr)
    {
    	struct btrfs_path *path;
    	int ret;
    	int ret2;
    
    	path = btrfs_alloc_path();
    	if (!path)
    		return -ENOENT;
    
    	do {
    		ret = check_committed_ref(trans, root, path, objectid,
    					  offset, bytenr);
    		if (ret && ret != -ENOENT)
    
    		ret2 = check_delayed_ref(trans, root, path, objectid,
    					 offset, bytenr);
    	} while (ret2 == -EAGAIN);
    
    	if (ret2 && ret2 != -ENOENT) {
    		ret = ret2;
    		goto out;
    
    
    	if (ret != -ENOENT || ret2 != -ENOENT)
    		ret = 0;
    
    	btrfs_free_path(path);
    
    	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
    		WARN_ON(ret > 0);
    
    	return ret;
    
    static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
    
    			   struct extent_buffer *buf,
    
    			   int full_backref, int inc, int for_cow)
    
    {
    	u64 bytenr;
    
    	u64 ref_root;
    	u32 nritems;
    	struct btrfs_key key;
    	struct btrfs_file_extent_item *fi;
    	int i;
    	int level;
    	int ret = 0;
    	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
    
    			    u64, u64, u64, u64, u64, u64, int);
    
    
    	ref_root = btrfs_header_owner(buf);
    	nritems = btrfs_header_nritems(buf);
    	level = btrfs_header_level(buf);
    
    
    	if (!root->ref_cows && level == 0)
    		return 0;
    
    	if (inc)
    		process_func = btrfs_inc_extent_ref;
    	else
    		process_func = btrfs_free_extent;
    
    	if (full_backref)
    		parent = buf->start;
    	else
    		parent = 0;
    
    	for (i = 0; i < nritems; i++) {
    
    		if (level == 0) {
    
    			btrfs_item_key_to_cpu(buf, &key, i);
    
    			if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
    				continue;
    
    			fi = btrfs_item_ptr(buf, i,
    
    					    struct btrfs_file_extent_item);
    			if (btrfs_file_extent_type(buf, fi) ==
    			    BTRFS_FILE_EXTENT_INLINE)
    				continue;
    			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
    			if (bytenr == 0)
    				continue;
    
    
    			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
    			key.offset -= btrfs_file_extent_offset(buf, fi);
    			ret = process_func(trans, root, bytenr, num_bytes,
    					   parent, ref_root, key.objectid,
    
    					   key.offset, for_cow);
    
    			if (ret)
    				goto fail;
    		} else {
    
    			bytenr = btrfs_node_blockptr(buf, i);
    			num_bytes = btrfs_level_size(root, level - 1);
    			ret = process_func(trans, root, bytenr, num_bytes,
    
    					   parent, ref_root, level - 1, 0,
    					   for_cow);
    
    			if (ret)
    				goto fail;
    		}
    	}
    	return 0;
    fail:
    
    	return ret;
    }
    
    int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
    
    		  struct extent_buffer *buf, int full_backref, int for_cow)
    
    	return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
    
    }
    
    int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
    
    		  struct extent_buffer *buf, int full_backref, int for_cow)
    
    	return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
    
    static int write_one_cache_group(struct btrfs_trans_handle *trans,
    				 struct btrfs_root *root,
    				 struct btrfs_path *path,
    				 struct btrfs_block_group_cache *cache)
    {
    	int ret;
    	struct btrfs_root *extent_root = root->fs_info->extent_root;
    
    	unsigned long bi;
    	struct extent_buffer *leaf;
    
    
    	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
    
    	BUG_ON(ret); /* Corruption */
    
    
    	leaf = path->nodes[0];
    	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
    	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
    	btrfs_mark_buffer_dirty(leaf);
    
    	btrfs_release_path(path);
    
    	if (ret) {
    		btrfs_abort_transaction(trans, root, ret);
    
    		return ret;
    
    static struct btrfs_block_group_cache *
    next_block_group(struct btrfs_root *root,
    		 struct btrfs_block_group_cache *cache)
    {
    	struct rb_node *node;
    	spin_lock(&root->fs_info->block_group_cache_lock);
    	node = rb_next(&cache->cache_node);
    	btrfs_put_block_group(cache);
    	if (node) {
    		cache = rb_entry(node, struct btrfs_block_group_cache,
    				 cache_node);
    
    		btrfs_get_block_group(cache);
    
    	} else
    		cache = NULL;
    	spin_unlock(&root->fs_info->block_group_cache_lock);
    	return cache;
    }
    
    
    static int cache_save_setup(struct btrfs_block_group_cache *block_group,
    			    struct btrfs_trans_handle *trans,
    			    struct btrfs_path *path)
    {
    	struct btrfs_root *root = block_group->fs_info->tree_root;
    	struct inode *inode = NULL;
    	u64 alloc_hint = 0;
    
    	int dcs = BTRFS_DC_ERROR;
    
    	int num_pages = 0;
    	int retries = 0;
    	int ret = 0;
    
    	/*
    	 * If this block group is smaller than 100 megs don't bother caching the
    	 * block group.
    	 */
    	if (block_group->key.offset < (100 * 1024 * 1024)) {
    		spin_lock(&block_group->lock);
    		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
    		spin_unlock(&block_group->lock);
    		return 0;
    	}
    
    again:
    	inode = lookup_free_space_inode(root, block_group, path);
    	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
    		ret = PTR_ERR(inode);