Skip to content
Snippets Groups Projects
extent-tree.c 222 KiB
Newer Older
  • Learn to ignore specific revisions
  • 	leaf = path->nodes[0];
    	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
    	btrfs_set_extent_refs(leaf, item, refs);
    	/* FIXME: get real generation */
    	btrfs_set_extent_generation(leaf, item, 0);
    	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
    		btrfs_set_extent_flags(leaf, item,
    				       BTRFS_EXTENT_FLAG_TREE_BLOCK |
    				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
    		bi = (struct btrfs_tree_block_info *)(item + 1);
    		/* FIXME: get first key of the block */
    		memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
    		btrfs_set_tree_block_level(leaf, bi, (int)owner);
    	} else {
    		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
    	}
    	btrfs_mark_buffer_dirty(leaf);
    	return 0;
    }
    #endif
    
    static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
    {
    	u32 high_crc = ~(u32)0;
    	u32 low_crc = ~(u32)0;
    	__le64 lenum;
    
    	lenum = cpu_to_le64(root_objectid);
    
    	high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
    
    	lenum = cpu_to_le64(owner);
    
    	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
    
    	lenum = cpu_to_le64(offset);
    
    	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
    
    
    	return ((u64)high_crc << 31) ^ (u64)low_crc;
    }
    
    static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
    				     struct btrfs_extent_data_ref *ref)
    {
    	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
    				    btrfs_extent_data_ref_objectid(leaf, ref),
    				    btrfs_extent_data_ref_offset(leaf, ref));
    }
    
    static int match_extent_data_ref(struct extent_buffer *leaf,
    				 struct btrfs_extent_data_ref *ref,
    				 u64 root_objectid, u64 owner, u64 offset)
    {
    	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
    	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
    	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
    		return 0;
    	return 1;
    }
    
    static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
    					   struct btrfs_root *root,
    					   struct btrfs_path *path,
    					   u64 bytenr, u64 parent,
    					   u64 root_objectid,
    					   u64 owner, u64 offset)
    {
    	struct btrfs_key key;
    	struct btrfs_extent_data_ref *ref;
    
    	struct extent_buffer *leaf;
    
    	int recow;
    	int err = -ENOENT;
    
    	key.objectid = bytenr;
    
    	if (parent) {
    		key.type = BTRFS_SHARED_DATA_REF_KEY;
    		key.offset = parent;
    	} else {
    		key.type = BTRFS_EXTENT_DATA_REF_KEY;
    		key.offset = hash_extent_data_ref(root_objectid,
    						  owner, offset);
    	}
    again:
    	recow = 0;
    	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
    	if (ret < 0) {
    		err = ret;
    		goto fail;
    	}
    
    	if (parent) {
    		if (!ret)
    			return 0;
    #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
    		key.type = BTRFS_EXTENT_REF_V0_KEY;
    
    		btrfs_release_path(path);
    
    		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
    		if (ret < 0) {
    			err = ret;
    			goto fail;
    		}
    		if (!ret)
    			return 0;
    #endif
    		goto fail;
    
    	}
    
    	leaf = path->nodes[0];
    
    	nritems = btrfs_header_nritems(leaf);
    	while (1) {
    		if (path->slots[0] >= nritems) {
    			ret = btrfs_next_leaf(root, path);
    			if (ret < 0)
    				err = ret;
    			if (ret)
    				goto fail;
    
    			leaf = path->nodes[0];
    			nritems = btrfs_header_nritems(leaf);
    			recow = 1;
    		}
    
    		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
    		if (key.objectid != bytenr ||
    		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
    			goto fail;
    
    		ref = btrfs_item_ptr(leaf, path->slots[0],
    				     struct btrfs_extent_data_ref);
    
    		if (match_extent_data_ref(leaf, ref, root_objectid,
    					  owner, offset)) {
    			if (recow) {
    
    				btrfs_release_path(path);
    
    				goto again;
    			}
    			err = 0;
    			break;
    		}
    		path->slots[0]++;
    
    static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
    					   struct btrfs_root *root,
    					   struct btrfs_path *path,
    					   u64 bytenr, u64 parent,
    					   u64 root_objectid, u64 owner,
    					   u64 offset, int refs_to_add)
    
    {
    	struct btrfs_key key;
    	struct extent_buffer *leaf;
    
    	u32 num_refs;
    	int ret;
    
    	if (parent) {
    		key.type = BTRFS_SHARED_DATA_REF_KEY;
    		key.offset = parent;
    		size = sizeof(struct btrfs_shared_data_ref);
    	} else {
    		key.type = BTRFS_EXTENT_DATA_REF_KEY;
    		key.offset = hash_extent_data_ref(root_objectid,
    						  owner, offset);
    		size = sizeof(struct btrfs_extent_data_ref);
    	}
    
    	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
    	if (ret && ret != -EEXIST)
    		goto fail;
    
    	leaf = path->nodes[0];
    	if (parent) {
    		struct btrfs_shared_data_ref *ref;
    
    		ref = btrfs_item_ptr(leaf, path->slots[0],
    
    				     struct btrfs_shared_data_ref);
    		if (ret == 0) {
    			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
    		} else {
    			num_refs = btrfs_shared_data_ref_count(leaf, ref);
    			num_refs += refs_to_add;
    			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
    
    	} else {
    		struct btrfs_extent_data_ref *ref;
    		while (ret == -EEXIST) {
    			ref = btrfs_item_ptr(leaf, path->slots[0],
    					     struct btrfs_extent_data_ref);
    			if (match_extent_data_ref(leaf, ref, root_objectid,
    						  owner, offset))
    				break;
    
    			btrfs_release_path(path);
    
    			key.offset++;
    			ret = btrfs_insert_empty_item(trans, root, path, &key,
    						      size);
    			if (ret && ret != -EEXIST)
    				goto fail;
    
    			leaf = path->nodes[0];
    		}
    		ref = btrfs_item_ptr(leaf, path->slots[0],
    				     struct btrfs_extent_data_ref);
    		if (ret == 0) {
    			btrfs_set_extent_data_ref_root(leaf, ref,
    						       root_objectid);
    			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
    			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
    			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
    		} else {
    			num_refs = btrfs_extent_data_ref_count(leaf, ref);
    			num_refs += refs_to_add;
    			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
    
    	btrfs_mark_buffer_dirty(leaf);
    	ret = 0;
    fail:
    
    	btrfs_release_path(path);
    
    static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
    					   struct btrfs_root *root,
    					   struct btrfs_path *path,
    					   int refs_to_drop)
    
    	struct btrfs_key key;
    	struct btrfs_extent_data_ref *ref1 = NULL;
    	struct btrfs_shared_data_ref *ref2 = NULL;
    
    	struct extent_buffer *leaf;
    
    	int ret = 0;
    
    	leaf = path->nodes[0];
    
    	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
    
    	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
    		ref1 = btrfs_item_ptr(leaf, path->slots[0],
    				      struct btrfs_extent_data_ref);
    		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
    	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
    		ref2 = btrfs_item_ptr(leaf, path->slots[0],
    				      struct btrfs_shared_data_ref);
    		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
    #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
    	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
    		struct btrfs_extent_ref_v0 *ref0;
    		ref0 = btrfs_item_ptr(leaf, path->slots[0],
    				      struct btrfs_extent_ref_v0);
    		num_refs = btrfs_ref_count_v0(leaf, ref0);
    #endif
    	} else {
    		BUG();
    	}
    
    
    	BUG_ON(num_refs < refs_to_drop);
    	num_refs -= refs_to_drop;
    
    	if (num_refs == 0) {
    		ret = btrfs_del_item(trans, root, path);
    	} else {
    
    		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
    			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
    		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
    			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
    #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
    		else {
    			struct btrfs_extent_ref_v0 *ref0;
    			ref0 = btrfs_item_ptr(leaf, path->slots[0],
    					struct btrfs_extent_ref_v0);
    			btrfs_set_ref_count_v0(leaf, ref0, num_refs);
    		}
    #endif
    
    		btrfs_mark_buffer_dirty(leaf);
    	}
    	return ret;
    }
    
    
    static noinline u32 extent_data_ref_count(struct btrfs_root *root,
    					  struct btrfs_path *path,
    					  struct btrfs_extent_inline_ref *iref)
    
    	struct btrfs_key key;
    	struct extent_buffer *leaf;
    	struct btrfs_extent_data_ref *ref1;
    	struct btrfs_shared_data_ref *ref2;
    	u32 num_refs = 0;
    
    	leaf = path->nodes[0];
    	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
    	if (iref) {
    		if (btrfs_extent_inline_ref_type(leaf, iref) ==
    		    BTRFS_EXTENT_DATA_REF_KEY) {
    			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
    			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
    		} else {
    			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
    			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
    		}
    	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
    		ref1 = btrfs_item_ptr(leaf, path->slots[0],
    				      struct btrfs_extent_data_ref);
    		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
    	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
    		ref2 = btrfs_item_ptr(leaf, path->slots[0],
    				      struct btrfs_shared_data_ref);
    		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
    #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
    	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
    		struct btrfs_extent_ref_v0 *ref0;
    		ref0 = btrfs_item_ptr(leaf, path->slots[0],
    				      struct btrfs_extent_ref_v0);
    		num_refs = btrfs_ref_count_v0(leaf, ref0);
    
    Chris Mason's avatar
    Chris Mason committed
    #endif
    
    static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
    					  struct btrfs_root *root,
    					  struct btrfs_path *path,
    					  u64 bytenr, u64 parent,
    					  u64 root_objectid)
    
    	key.objectid = bytenr;
    	if (parent) {
    		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
    		key.offset = parent;
    	} else {
    		key.type = BTRFS_TREE_BLOCK_REF_KEY;
    		key.offset = root_objectid;
    
    	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
    	if (ret > 0)
    		ret = -ENOENT;
    #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
    	if (ret == -ENOENT && parent) {
    
    		btrfs_release_path(path);
    
    		key.type = BTRFS_EXTENT_REF_V0_KEY;
    		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
    		if (ret > 0)
    			ret = -ENOENT;
    	}
    
    static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
    					  struct btrfs_root *root,
    					  struct btrfs_path *path,
    					  u64 bytenr, u64 parent,
    					  u64 root_objectid)
    
    	key.objectid = bytenr;
    	if (parent) {
    		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
    		key.offset = parent;
    	} else {
    		key.type = BTRFS_TREE_BLOCK_REF_KEY;
    		key.offset = root_objectid;
    	}
    
    	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
    
    	btrfs_release_path(path);
    
    static inline int extent_ref_type(u64 parent, u64 owner)
    
    	int type;
    	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
    		if (parent > 0)
    			type = BTRFS_SHARED_BLOCK_REF_KEY;
    		else
    			type = BTRFS_TREE_BLOCK_REF_KEY;
    	} else {
    		if (parent > 0)
    			type = BTRFS_SHARED_DATA_REF_KEY;
    		else
    			type = BTRFS_EXTENT_DATA_REF_KEY;
    	}
    	return type;
    
    static int find_next_key(struct btrfs_path *path, int level,
    			 struct btrfs_key *key)
    
    	for (; level < BTRFS_MAX_LEVEL; level++) {
    
    		if (!path->nodes[level])
    			break;
    		if (path->slots[level] + 1 >=
    		    btrfs_header_nritems(path->nodes[level]))
    			continue;
    		if (level == 0)
    			btrfs_item_key_to_cpu(path->nodes[level], key,
    					      path->slots[level] + 1);
    		else
    			btrfs_node_key_to_cpu(path->nodes[level], key,
    					      path->slots[level] + 1);
    		return 0;
    	}
    	return 1;
    }
    
    /*
     * look for inline back ref. if back ref is found, *ref_ret is set
     * to the address of inline back ref, and 0 is returned.
     *
     * if back ref isn't found, *ref_ret is set to the address where it
     * should be inserted, and -ENOENT is returned.
     *
     * if insert is true and there are too many inline back refs, the path
     * points to the extent item, and -EAGAIN is returned.
     *
     * NOTE: inline back refs are ordered in the same way that back ref
     *	 items in the tree are ordered.
     */
    static noinline_for_stack
    int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
    				 struct btrfs_root *root,
    				 struct btrfs_path *path,
    				 struct btrfs_extent_inline_ref **ref_ret,
    				 u64 bytenr, u64 num_bytes,
    				 u64 parent, u64 root_objectid,
    				 u64 owner, u64 offset, int insert)
    {
    	struct btrfs_key key;
    	struct extent_buffer *leaf;
    	struct btrfs_extent_item *ei;
    	struct btrfs_extent_inline_ref *iref;
    	u64 flags;
    	u64 item_size;
    	unsigned long ptr;
    	unsigned long end;
    	int extra_size;
    	int type;
    	int want;
    	int ret;
    	int err = 0;
    
    	key.objectid = bytenr;
    
    	key.type = BTRFS_EXTENT_ITEM_KEY;
    
    	want = extent_ref_type(parent, owner);
    	if (insert) {
    		extra_size = btrfs_extent_inline_ref_size(want);
    
    	} else
    		extra_size = -1;
    	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
    
    	if (ret && !insert) {
    		err = -ENOENT;
    		goto out;
    	}
    	BUG_ON(ret); /* Corruption */
    
    
    	leaf = path->nodes[0];
    	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
    #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
    	if (item_size < sizeof(*ei)) {
    		if (!insert) {
    			err = -ENOENT;
    			goto out;
    		}
    		ret = convert_extent_item_v0(trans, root, path, owner,
    					     extra_size);
    		if (ret < 0) {
    			err = ret;
    			goto out;
    		}
    		leaf = path->nodes[0];
    		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
    	}
    #endif
    	BUG_ON(item_size < sizeof(*ei));
    
    	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
    	flags = btrfs_extent_flags(leaf, ei);
    
    	ptr = (unsigned long)(ei + 1);
    	end = (unsigned long)ei + item_size;
    
    	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
    		ptr += sizeof(struct btrfs_tree_block_info);
    		BUG_ON(ptr > end);
    	} else {
    		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
    	}
    
    	err = -ENOENT;
    	while (1) {
    		if (ptr >= end) {
    			WARN_ON(ptr > end);
    			break;
    		}
    		iref = (struct btrfs_extent_inline_ref *)ptr;
    		type = btrfs_extent_inline_ref_type(leaf, iref);
    		if (want < type)
    			break;
    		if (want > type) {
    			ptr += btrfs_extent_inline_ref_size(type);
    			continue;
    		}
    
    		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
    			struct btrfs_extent_data_ref *dref;
    			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
    			if (match_extent_data_ref(leaf, dref, root_objectid,
    						  owner, offset)) {
    				err = 0;
    				break;
    			}
    			if (hash_extent_data_ref_item(leaf, dref) <
    			    hash_extent_data_ref(root_objectid, owner, offset))
    				break;
    		} else {
    			u64 ref_offset;
    			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
    			if (parent > 0) {
    				if (parent == ref_offset) {
    					err = 0;
    					break;
    				}
    				if (ref_offset < parent)
    					break;
    			} else {
    				if (root_objectid == ref_offset) {
    					err = 0;
    					break;
    				}
    				if (ref_offset < root_objectid)
    					break;
    			}
    		}
    		ptr += btrfs_extent_inline_ref_size(type);
    	}
    	if (err == -ENOENT && insert) {
    		if (item_size + extra_size >=
    		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
    			err = -EAGAIN;
    			goto out;
    		}
    		/*
    		 * To add new inline back ref, we have to make sure
    		 * there is no corresponding back ref item.
    		 * For simplicity, we just do not add new inline back
    		 * ref if there is any kind of item for this block
    		 */
    
    		if (find_next_key(path, 0, &key) == 0 &&
    		    key.objectid == bytenr &&
    
    		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
    
    			err = -EAGAIN;
    			goto out;
    		}
    	}
    	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
    out:
    
    		path->keep_locks = 0;
    		btrfs_unlock_up_safe(path, 1);
    	}
    	return err;
    }
    
    /*
     * helper to add new inline back ref
     */
    static noinline_for_stack
    
    void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
    				 struct btrfs_root *root,
    				 struct btrfs_path *path,
    				 struct btrfs_extent_inline_ref *iref,
    				 u64 parent, u64 root_objectid,
    				 u64 owner, u64 offset, int refs_to_add,
    				 struct btrfs_delayed_extent_op *extent_op)
    
    {
    	struct extent_buffer *leaf;
    	struct btrfs_extent_item *ei;
    	unsigned long ptr;
    	unsigned long end;
    	unsigned long item_offset;
    	u64 refs;
    	int size;
    	int type;
    
    	leaf = path->nodes[0];
    	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
    	item_offset = (unsigned long)iref - (unsigned long)ei;
    
    	type = extent_ref_type(parent, owner);
    	size = btrfs_extent_inline_ref_size(type);
    
    
    	btrfs_extend_item(trans, root, path, size);
    
    
    	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
    	refs = btrfs_extent_refs(leaf, ei);
    	refs += refs_to_add;
    	btrfs_set_extent_refs(leaf, ei, refs);
    	if (extent_op)
    		__run_delayed_extent_op(extent_op, leaf, ei);
    
    	ptr = (unsigned long)ei + item_offset;
    	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
    	if (ptr < end - size)
    		memmove_extent_buffer(leaf, ptr + size, ptr,
    				      end - size - ptr);
    
    	iref = (struct btrfs_extent_inline_ref *)ptr;
    	btrfs_set_extent_inline_ref_type(leaf, iref, type);
    	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
    		struct btrfs_extent_data_ref *dref;
    		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
    		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
    		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
    		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
    		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
    	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
    		struct btrfs_shared_data_ref *sref;
    		sref = (struct btrfs_shared_data_ref *)(iref + 1);
    		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
    		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
    	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
    		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
    	} else {
    		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
    	}
    	btrfs_mark_buffer_dirty(leaf);
    }
    
    static int lookup_extent_backref(struct btrfs_trans_handle *trans,
    				 struct btrfs_root *root,
    				 struct btrfs_path *path,
    				 struct btrfs_extent_inline_ref **ref_ret,
    				 u64 bytenr, u64 num_bytes, u64 parent,
    				 u64 root_objectid, u64 owner, u64 offset)
    {
    	int ret;
    
    	ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
    					   bytenr, num_bytes, parent,
    					   root_objectid, owner, offset, 0);
    	if (ret != -ENOENT)
    
    	btrfs_release_path(path);
    
    	*ref_ret = NULL;
    
    	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
    		ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
    					    root_objectid);
    	} else {
    		ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
    					     root_objectid, owner, offset);
    
    /*
     * helper to update/remove inline back ref
     */
    static noinline_for_stack
    
    void update_inline_extent_backref(struct btrfs_trans_handle *trans,
    				  struct btrfs_root *root,
    				  struct btrfs_path *path,
    				  struct btrfs_extent_inline_ref *iref,
    				  int refs_to_mod,
    				  struct btrfs_delayed_extent_op *extent_op)
    
    {
    	struct extent_buffer *leaf;
    	struct btrfs_extent_item *ei;
    	struct btrfs_extent_data_ref *dref = NULL;
    	struct btrfs_shared_data_ref *sref = NULL;
    	unsigned long ptr;
    	unsigned long end;
    	u32 item_size;
    	int size;
    	int type;
    	u64 refs;
    
    	leaf = path->nodes[0];
    	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
    	refs = btrfs_extent_refs(leaf, ei);
    	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
    	refs += refs_to_mod;
    	btrfs_set_extent_refs(leaf, ei, refs);
    	if (extent_op)
    		__run_delayed_extent_op(extent_op, leaf, ei);
    
    	type = btrfs_extent_inline_ref_type(leaf, iref);
    
    	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
    		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
    		refs = btrfs_extent_data_ref_count(leaf, dref);
    	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
    		sref = (struct btrfs_shared_data_ref *)(iref + 1);
    		refs = btrfs_shared_data_ref_count(leaf, sref);
    	} else {
    		refs = 1;
    		BUG_ON(refs_to_mod != -1);
    
    	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
    	refs += refs_to_mod;
    
    	if (refs > 0) {
    		if (type == BTRFS_EXTENT_DATA_REF_KEY)
    			btrfs_set_extent_data_ref_count(leaf, dref, refs);
    		else
    			btrfs_set_shared_data_ref_count(leaf, sref, refs);
    	} else {
    		size =  btrfs_extent_inline_ref_size(type);
    		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
    		ptr = (unsigned long)iref;
    		end = (unsigned long)ei + item_size;
    		if (ptr + size < end)
    			memmove_extent_buffer(leaf, ptr, ptr + size,
    					      end - ptr - size);
    		item_size -= size;
    
    		btrfs_truncate_item(trans, root, path, item_size, 1);
    
    	}
    	btrfs_mark_buffer_dirty(leaf);
    }
    
    static noinline_for_stack
    int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
    				 struct btrfs_root *root,
    				 struct btrfs_path *path,
    				 u64 bytenr, u64 num_bytes, u64 parent,
    				 u64 root_objectid, u64 owner,
    				 u64 offset, int refs_to_add,
    				 struct btrfs_delayed_extent_op *extent_op)
    {
    	struct btrfs_extent_inline_ref *iref;
    	int ret;
    
    	ret = lookup_inline_extent_backref(trans, root, path, &iref,
    					   bytenr, num_bytes, parent,
    					   root_objectid, owner, offset, 1);
    	if (ret == 0) {
    		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
    
    		update_inline_extent_backref(trans, root, path, iref,
    					     refs_to_add, extent_op);
    
    	} else if (ret == -ENOENT) {
    
    		setup_inline_extent_backref(trans, root, path, iref, parent,
    					    root_objectid, owner, offset,
    					    refs_to_add, extent_op);
    		ret = 0;
    
    static int insert_extent_backref(struct btrfs_trans_handle *trans,
    				 struct btrfs_root *root,
    				 struct btrfs_path *path,
    				 u64 bytenr, u64 parent, u64 root_objectid,
    				 u64 owner, u64 offset, int refs_to_add)
    {
    	int ret;
    	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
    		BUG_ON(refs_to_add != 1);
    		ret = insert_tree_block_ref(trans, root, path, bytenr,
    					    parent, root_objectid);
    	} else {
    		ret = insert_extent_data_ref(trans, root, path, bytenr,
    					     parent, root_objectid,
    					     owner, offset, refs_to_add);
    	}
    	return ret;
    }
    
    static int remove_extent_backref(struct btrfs_trans_handle *trans,
    				 struct btrfs_root *root,
    				 struct btrfs_path *path,
    				 struct btrfs_extent_inline_ref *iref,
    				 int refs_to_drop, int is_data)
    {
    
    	BUG_ON(!is_data && refs_to_drop != 1);
    	if (iref) {
    
    		update_inline_extent_backref(trans, root, path, iref,
    					     -refs_to_drop, NULL);
    
    	} else if (is_data) {
    		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
    	} else {
    		ret = btrfs_del_item(trans, root, path);
    	}
    	return ret;
    }
    
    
    static int btrfs_issue_discard(struct block_device *bdev,
    
    	return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
    
    }
    
    static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
    
    				u64 num_bytes, u64 *actual_bytes)
    
    	struct btrfs_bio *bbio = NULL;
    
    	/* Tell the block device(s) that the sectors can be discarded */
    
    	ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
    
    			      bytenr, &num_bytes, &bbio, 0);
    
    	/* Error condition is -ENOMEM */
    
    		struct btrfs_bio_stripe *stripe = bbio->stripes;
    
    		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
    
    			if (!stripe->dev->can_discard)
    				continue;
    
    
    			ret = btrfs_issue_discard(stripe->dev->bdev,
    						  stripe->physical,
    						  stripe->length);
    			if (!ret)
    				discarded_bytes += stripe->length;
    			else if (ret != -EOPNOTSUPP)
    
    				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
    
    
    			/*
    			 * Just in case we get back EOPNOTSUPP for some reason,
    			 * just ignore the return value so we don't screw up
    			 * people calling discard_extent.
    			 */
    			ret = 0;
    
    
    	if (actual_bytes)
    		*actual_bytes = discarded_bytes;
    
    
    David Woodhouse's avatar
    David Woodhouse committed
    	if (ret == -EOPNOTSUPP)
    		ret = 0;
    
    /* Can return -ENOMEM */
    
    int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
    			 struct btrfs_root *root,
    			 u64 bytenr, u64 num_bytes, u64 parent,
    
    			 u64 root_objectid, u64 owner, u64 offset, int for_cow)
    
    	struct btrfs_fs_info *fs_info = root->fs_info;
    
    
    	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
    	       root_objectid == BTRFS_TREE_LOG_OBJECTID);
    
    	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
    
    		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
    					num_bytes,
    
    					parent, root_objectid, (int)owner,
    
    					BTRFS_ADD_DELAYED_REF, NULL, for_cow);
    
    		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
    					num_bytes,
    
    					parent, root_objectid, owner, offset,
    
    					BTRFS_ADD_DELAYED_REF, NULL, for_cow);
    
    	}
    	return ret;
    }
    
    static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
    				  struct btrfs_root *root,
    				  u64 bytenr, u64 num_bytes,
    				  u64 parent, u64 root_objectid,
    				  u64 owner, u64 offset, int refs_to_add,
    				  struct btrfs_delayed_extent_op *extent_op)
    {
    	struct btrfs_path *path;
    	struct extent_buffer *leaf;
    	struct btrfs_extent_item *item;
    	u64 refs;
    	int ret;
    	int err = 0;
    
    	path = btrfs_alloc_path();
    	if (!path)
    		return -ENOMEM;
    
    	path->reada = 1;
    	path->leave_spinning = 1;
    	/* this will setup the path even if it fails to insert the back ref */
    	ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
    					   path, bytenr, num_bytes, parent,
    					   root_objectid, owner, offset,
    					   refs_to_add, extent_op);
    	if (ret == 0)
    		goto out;
    
    	if (ret != -EAGAIN) {
    		err = ret;
    		goto out;
    	}
    
    	leaf = path->nodes[0];
    	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
    	refs = btrfs_extent_refs(leaf, item);
    	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
    	if (extent_op)
    		__run_delayed_extent_op(extent_op, leaf, item);
    
    	btrfs_mark_buffer_dirty(leaf);
    
    	btrfs_release_path(path);
    
    	path->leave_spinning = 1;
    
    
    	/* now insert the actual backref */
    	ret = insert_extent_backref(trans, root->fs_info->extent_root,
    
    				    path, bytenr, parent, root_objectid,
    				    owner, offset, refs_to_add);
    
    	if (ret)
    		btrfs_abort_transaction(trans, root, ret);
    
    static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
    				struct btrfs_root *root,
    				struct btrfs_delayed_ref_node *node,
    				struct btrfs_delayed_extent_op *extent_op,
    				int insert_reserved)
    
    	int ret = 0;
    	struct btrfs_delayed_data_ref *ref;
    	struct btrfs_key ins;
    	u64 parent = 0;
    	u64 ref_root = 0;
    	u64 flags = 0;
    
    	ins.objectid = node->bytenr;
    	ins.offset = node->num_bytes;
    	ins.type = BTRFS_EXTENT_ITEM_KEY;
    
    	ref = btrfs_delayed_node_to_data_ref(node);
    	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
    		parent = ref->parent;
    	else
    		ref_root = ref->root;
    
    	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
    		if (extent_op) {
    			BUG_ON(extent_op->update_key);
    			flags |= extent_op->flags_to_set;
    		}
    		ret = alloc_reserved_file_extent(trans, root,
    						 parent, ref_root, flags,
    						 ref->objectid, ref->offset,
    						 &ins, node->ref_mod);
    	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
    		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
    					     node->num_bytes, parent,
    					     ref_root, ref->objectid,
    					     ref->offset, node->ref_mod,
    					     extent_op);
    	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
    		ret = __btrfs_free_extent(trans, root, node->bytenr,
    					  node->num_bytes, parent,
    					  ref_root, ref->objectid,
    					  ref->offset, node->ref_mod,
    					  extent_op);
    	} else {
    		BUG();
    	}
    	return ret;
    }
    
    static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
    				    struct extent_buffer *leaf,
    				    struct btrfs_extent_item *ei)
    {
    	u64 flags = btrfs_extent_flags(leaf, ei);
    	if (extent_op->update_flags) {
    		flags |= extent_op->flags_to_set;