Newer
Older
/*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include "ctree.h"
#include "disk-io.h"
#include "print-tree.h"
#include "volumes.h"
#define PENDING_EXTENT_INSERT 0
#define PENDING_EXTENT_DELETE 1
#define PENDING_BACKREF_UPDATE 2
struct pending_extent_op {
int type;
u64 bytenr;
u64 num_bytes;
u64 parent;
u64 orig_parent;
u64 generation;
u64 orig_generation;
int level;
};
static int finish_current_insert(struct btrfs_trans_handle *trans, struct
btrfs_root *extent_root);
static int del_pending_extents(struct btrfs_trans_handle *trans, struct
btrfs_root *extent_root);
static struct btrfs_block_group_cache *
__btrfs_find_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache *hint,
u64 search_start, int data, int owner);
void maybe_lock_mutex(struct btrfs_root *root)
{
if (root != root->fs_info->extent_root &&
root != root->fs_info->chunk_root &&
root != root->fs_info->dev_root) {
mutex_lock(&root->fs_info->alloc_mutex);
}
}
void maybe_unlock_mutex(struct btrfs_root *root)
{
if (root != root->fs_info->extent_root &&
root != root->fs_info->chunk_root &&
root != root->fs_info->dev_root) {
mutex_unlock(&root->fs_info->alloc_mutex);
}
}
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
{
return (cache->flags & bits) == bits;
}
/*
* this adds the block group to the fs_info rb tree for the block group
* cache
*/
int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
struct btrfs_block_group_cache *block_group)
{
struct rb_node **p;
struct rb_node *parent = NULL;
struct btrfs_block_group_cache *cache;
spin_lock(&info->block_group_cache_lock);
p = &info->block_group_cache_tree.rb_node;
while (*p) {
parent = *p;
cache = rb_entry(parent, struct btrfs_block_group_cache,
cache_node);
if (block_group->key.objectid < cache->key.objectid) {
p = &(*p)->rb_left;
} else if (block_group->key.objectid > cache->key.objectid) {
p = &(*p)->rb_right;
} else {
spin_unlock(&info->block_group_cache_lock);
return -EEXIST;
}
}
rb_link_node(&block_group->cache_node, parent, p);
rb_insert_color(&block_group->cache_node,
&info->block_group_cache_tree);
spin_unlock(&info->block_group_cache_lock);
return 0;
}
/*
* This will return the block group at or after bytenr if contains is 0, else
* it will return the block group that contains the bytenr
*/
static struct btrfs_block_group_cache *
block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
int contains)
{
struct btrfs_block_group_cache *cache, *ret = NULL;
struct rb_node *n;
u64 end, start;
spin_lock(&info->block_group_cache_lock);
n = info->block_group_cache_tree.rb_node;
while (n) {
cache = rb_entry(n, struct btrfs_block_group_cache,
cache_node);
end = cache->key.objectid + cache->key.offset - 1;
start = cache->key.objectid;
if (bytenr < start) {
if (!contains && (!ret || start < ret->key.objectid))
ret = cache;
n = n->rb_left;
} else if (bytenr > start) {
if (contains && bytenr <= end) {
ret = cache;
break;
}
n = n->rb_right;
} else {
ret = cache;
break;
}
}
spin_unlock(&info->block_group_cache_lock);
return ret;
}
/*
* this is only called by cache_block_group, since we could have freed extents
* we need to check the pinned_extents for any extents that can't be used yet
* since their free space will be released as soon as the transaction commits.
*/
static int add_new_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_fs_info *info, u64 start, u64 end)
{
u64 extent_start, extent_end, size;
int ret;
while (start < end) {
ret = find_first_extent_bit(&info->pinned_extents, start,
&extent_start, &extent_end,
EXTENT_DIRTY);
if (ret)
break;
if (extent_start == start) {
start = extent_end + 1;
} else if (extent_start > start && extent_start < end) {
size = extent_start - start;
ret = btrfs_add_free_space(block_group, start, size);
BUG_ON(ret);
start = extent_end + 1;
} else {
break;
}
}
if (start < end) {
size = end - start;
ret = btrfs_add_free_space(block_group, start, size);
BUG_ON(ret);
}
return 0;
}
static int cache_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
u64 first_free;
if (!block_group)
return 0;
root = root->fs_info->extent_root;
if (block_group->cached)
return 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;

Chris Mason
committed
/*
* we get into deadlocks with paths held by callers of this function.
* since the alloc_mutex is protecting things right now, just
* skip the locking here
*/
path->skip_locking = 1;
first_free = max_t(u64, block_group->key.objectid,
BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
key.objectid = block_group->key.objectid;
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
if (ret == 0) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid + key.offset > first_free)
first_free = key.objectid + key.offset;
}
leaf = path->nodes[0];
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
goto err;
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid < block_group->key.objectid)
if (key.objectid >= block_group->key.objectid +
if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
if (!found) {
last = first_free;
add_new_free_space(block_group, root->fs_info, last,
key.objectid);
last = key.objectid + key.offset;
if (!found)
last = first_free;
add_new_free_space(block_group, root->fs_info, last,
block_group->key.objectid +
block_group->key.offset);
err:
/*
* return the block group that starts at or after bytenr
*/
struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
btrfs_fs_info *info,
u64 bytenr)
{
cache = block_group_cache_tree_search(info, bytenr, 0);
/*
* return the block group that contains teh given bytenr
*/
struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
btrfs_fs_info *info,
cache = block_group_cache_tree_search(info, bytenr, 1);
static int noinline find_free_space(struct btrfs_root *root,
struct btrfs_block_group_cache **cache_ret,
u64 *start_ret, u64 num, int data)
{
int ret;
struct btrfs_block_group_cache *cache = *cache_ret;

Chris Mason
committed
u64 last;
u64 search_start = *start_ret;
WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
last = max(search_start, cache->key.objectid);
ret = cache_block_group(root, cache);
goto out;

Chris Mason
committed
if (cache->ro || !block_group_bits(cache, data))

Chris Mason
committed
info = btrfs_find_free_space(cache, last, num);
if (info) {
*start_ret = info->offset;

Chris Mason
committed
last = cache->key.objectid + cache->key.offset;
cache = btrfs_lookup_first_block_group(root->fs_info, last);
if (!cache || cache->key.objectid >= total_fs_bytes)
*cache_ret = cache;
goto again;

Chris Mason
committed
if (factor == 10)
return num;
num *= factor;
do_div(num, 10);
return num;
}
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
u64 flags)
struct list_head *head = &info->space_info;
struct list_head *cur;
struct btrfs_space_info *found;
list_for_each(cur, head) {
found = list_entry(cur, struct btrfs_space_info, list);
if (found->flags == flags)
return found;
}
return NULL;
static struct btrfs_block_group_cache *
__btrfs_find_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache *hint,
u64 search_start, int data, int owner)
struct btrfs_block_group_cache *cache;
struct btrfs_block_group_cache *found_group = NULL;
struct btrfs_fs_info *info = root->fs_info;

Chris Mason
committed
int factor = 10;
if (data & BTRFS_BLOCK_GROUP_METADATA)
factor = 9;
shint = btrfs_lookup_first_block_group(info, search_start);
if (shint && block_group_bits(shint, data) && !shint->ro) {
used = btrfs_block_group_used(&shint->item);
if (used + shint->pinned <
div_factor(shint->key.offset, factor)) {
if (hint && !hint->ro && block_group_bits(hint, data)) {
used = btrfs_block_group_used(&hint->item);
if (used + hint->pinned <
div_factor(hint->key.offset, factor)) {

Chris Mason
committed
last = hint->key.objectid + hint->key.offset;
last = max(hint->key.objectid, search_start);
sinfo = __find_space_info(root->fs_info, data);
if (!sinfo)
goto found;
cache = NULL;
spin_lock(&sinfo->lock);
list_for_each(l, &sinfo->block_groups) {
struct btrfs_block_group_cache *entry;
entry = list_entry(l, struct btrfs_block_group_cache,
list);
if ((entry->key.objectid >= last) &&
(!cache || (entry->key.objectid <
cache->key.objectid)))
cache = entry;
spin_unlock(&sinfo->lock);
if (!cache)
break;
last = cache->key.objectid + cache->key.offset;
used = btrfs_block_group_used(&cache->item);
if (!cache->ro && block_group_bits(cache, data)) {
free_check = div_factor(cache->key.offset, factor);
if (used + cache->pinned < free_check) {
found_group = cache;
if (!wrapped) {
last = search_start;
wrapped = 1;
goto again;
}
if (!full_search && factor < 10) {
struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache
*hint, u64 search_start,
int data, int owner)
{
struct btrfs_block_group_cache *ret;
ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
return ret;
}
/* simple helper to search for an existing extent at a given offset */
int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
{
int ret;
struct btrfs_key key;
path = btrfs_alloc_path();
BUG_ON(!path);
maybe_lock_mutex(root);
key.objectid = start;
key.offset = len;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
0, 0);
maybe_unlock_mutex(root);
return ret;
}

Chris Mason
committed
/*
* Back reference rules. Back refs have three main goals:
*
* 1) differentiate between all holders of references to an extent so that
* when a reference is dropped we can make sure it was a valid reference
* before freeing the extent.
*
* 2) Provide enough information to quickly find the holders of an extent
* if we notice a given block is corrupted or bad.
*
* 3) Make it easy to migrate blocks for FS shrinking or storage pool
* maintenance. This is actually the same as #2, but with a slightly
* different use case.
*
* File extents can be referenced by:
*
* - multiple snapshots, subvolumes, or different generations in one subvol
* - different files inside a single subvolume

Chris Mason
committed
* - different offsets inside a file (bookend extents in file.c)
*
* The extent ref structure has fields for:
*
* - Objectid of the subvolume root
* - Generation number of the tree holding the reference
* - objectid of the file holding the reference
* - offset in the file corresponding to the key holding the reference
* - number of references holding by parent node (alway 1 for tree blocks)
*
* Btree leaf may hold multiple references to a file extent. In most cases,
* these references are from same file and the corresponding offsets inside
* the file are close together. So inode objectid and offset in file are
* just hints, they provide hints about where in the btree the references
* can be found and when we can stop searching.

Chris Mason
committed
*
* When a file extent is allocated the fields are filled in:
* (root_key.objectid, trans->transid, inode objectid, offset in file, 1)

Chris Mason
committed
*
* When a leaf is cow'd new references are added for every file extent found
* in the leaf. It looks similar to the create case, but trans->transid will
* be different when the block is cow'd.

Chris Mason
committed
*
* (root_key.objectid, trans->transid, inode objectid, offset in file,
* number of references in the leaf)

Chris Mason
committed
*
* Because inode objectid and offset in file are just hints, they are not
* used when backrefs are deleted. When a file extent is removed either
* during snapshot deletion or file truncation, we find the corresponding
* back back reference and check the following fields.

Chris Mason
committed
*
* (btrfs_header_owner(leaf), btrfs_header_generation(leaf))

Chris Mason
committed
*
* Btree extents can be referenced by:
*
* - Different subvolumes
* - Different generations of the same subvolume
*
* When a tree block is created, back references are inserted:
*
* (root->root_key.objectid, trans->transid, level, 0, 1)

Chris Mason
committed
*
* When a tree block is cow'd, new back references are added for all the
* blocks it points to. If the tree block isn't in reference counted root,
* the old back references are removed. These new back references are of
* the form (trans->transid will have increased since creation):

Chris Mason
committed
*
* (root->root_key.objectid, trans->transid, level, 0, 1)

Chris Mason
committed
*
* When a backref is in deleting, the following fields are checked:

Chris Mason
committed
*
* if backref was for a tree root:
* (btrfs_header_owner(itself), btrfs_header_generation(itself))

Chris Mason
committed
* else
* (btrfs_header_owner(parent), btrfs_header_generation(parent))

Chris Mason
committed
*

Chris Mason
committed
*
* The key objectid corresponds to the first byte in the extent, the key
* type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
* byte of parent extent. If a extent is tree root, the key offset is set
* to the key objectid.

Chris Mason
committed
*/
static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 bytenr,
u64 parent, u64 ref_root,
u64 ref_generation, int del)
{
struct btrfs_key key;
struct btrfs_extent_ref *ref;
struct extent_buffer *leaf;
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
key.objectid = bytenr;
key.type = BTRFS_EXTENT_REF_KEY;
key.offset = parent;
ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1);
if (ret < 0)
goto out;
if (ret > 0) {
ret = -ENOENT;
goto out;
}
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
if (btrfs_ref_root(leaf, ref) != ref_root ||
btrfs_ref_generation(leaf, ref) != ref_generation) {
ret = -EIO;
WARN_ON(1);
goto out;
}
ret = 0;
out:
return ret;
}
static int noinline insert_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 ref_root, u64 ref_generation,
u64 owner_objectid, u64 owner_offset)
{
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_extent_ref *ref;
u32 num_refs;
int ret;
key.objectid = bytenr;
key.type = BTRFS_EXTENT_REF_KEY;
ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref));
if (ret == 0) {
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_ref);
btrfs_set_ref_root(leaf, ref, ref_root);
btrfs_set_ref_generation(leaf, ref, ref_generation);
btrfs_set_ref_objectid(leaf, ref, owner_objectid);
btrfs_set_ref_offset(leaf, ref, owner_offset);
btrfs_set_ref_num_refs(leaf, ref, 1);
} else if (ret == -EEXIST) {
u64 existing_owner;
BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_ref);
if (btrfs_ref_root(leaf, ref) != ref_root ||
btrfs_ref_generation(leaf, ref) != ref_generation) {
ret = -EIO;
WARN_ON(1);
goto out;
}
num_refs = btrfs_ref_num_refs(leaf, ref);
BUG_ON(num_refs == 0);
btrfs_set_ref_num_refs(leaf, ref, num_refs + 1);
existing_owner = btrfs_ref_objectid(leaf, ref);
if (existing_owner == owner_objectid &&
btrfs_ref_offset(leaf, ref) > owner_offset) {
btrfs_set_ref_offset(leaf, ref, owner_offset);
} else if (existing_owner != owner_objectid &&
existing_owner != BTRFS_MULTIPLE_OBJECTIDS) {
btrfs_set_ref_objectid(leaf, ref,
BTRFS_MULTIPLE_OBJECTIDS);
btrfs_set_ref_offset(leaf, ref, 0);
}
ret = 0;
} else {
goto out;
btrfs_mark_buffer_dirty(path->nodes[0]);
out:
btrfs_release_path(root, path);
return ret;
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
static int noinline remove_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path)
{
struct extent_buffer *leaf;
struct btrfs_extent_ref *ref;
u32 num_refs;
int ret = 0;
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
num_refs = btrfs_ref_num_refs(leaf, ref);
BUG_ON(num_refs == 0);
num_refs -= 1;
if (num_refs == 0) {
ret = btrfs_del_item(trans, root, path);
} else {
btrfs_set_ref_num_refs(leaf, ref, num_refs);
btrfs_mark_buffer_dirty(leaf);
}
btrfs_release_path(root, path);
return ret;
}
static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
u64 orig_parent, u64 parent,
u64 orig_root, u64 ref_root,
u64 orig_generation, u64 ref_generation,
u64 owner_objectid, u64 owner_offset)
{
int ret;
struct btrfs_root *extent_root = root->fs_info->extent_root;
struct btrfs_path *path;
if (root == root->fs_info->extent_root) {
struct pending_extent_op *extent_op;
u64 num_bytes;
BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL);
num_bytes = btrfs_level_size(root, (int)owner_objectid);
if (test_range_bit(&root->fs_info->extent_ins, bytenr,
bytenr + num_bytes - 1, EXTENT_LOCKED, 0)) {
u64 priv;
ret = get_state_private(&root->fs_info->extent_ins,
bytenr, &priv);
BUG_ON(ret);
extent_op = (struct pending_extent_op *)
(unsigned long)priv;
BUG_ON(extent_op->parent != orig_parent);
BUG_ON(extent_op->generation != orig_generation);
extent_op->parent = parent;
extent_op->generation = ref_generation;
} else {
extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
BUG_ON(!extent_op);
extent_op->type = PENDING_BACKREF_UPDATE;
extent_op->bytenr = bytenr;
extent_op->num_bytes = num_bytes;
extent_op->parent = parent;
extent_op->orig_parent = orig_parent;
extent_op->generation = ref_generation;
extent_op->orig_generation = orig_generation;
extent_op->level = (int)owner_objectid;
set_extent_bits(&root->fs_info->extent_ins,
bytenr, bytenr + num_bytes - 1,
EXTENT_LOCKED, GFP_NOFS);
set_state_private(&root->fs_info->extent_ins,
bytenr, (unsigned long)extent_op);
}
return 0;
}
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = lookup_extent_backref(trans, extent_root, path,
bytenr, orig_parent, orig_root,
orig_generation, 1);
if (ret)
goto out;
ret = remove_extent_backref(trans, extent_root, path);
if (ret)
goto out;
ret = insert_extent_backref(trans, extent_root, path, bytenr,
parent, ref_root, ref_generation,
owner_objectid, owner_offset);
BUG_ON(ret);
finish_current_insert(trans, extent_root);
del_pending_extents(trans, extent_root);
out:
btrfs_free_path(path);
return ret;
}
int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
u64 orig_parent, u64 parent,
u64 ref_root, u64 ref_generation,
u64 owner_objectid, u64 owner_offset)
{
int ret;
if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
return 0;
maybe_lock_mutex(root);
ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent,
parent, ref_root, ref_root,
ref_generation, ref_generation,
owner_objectid, owner_offset);
maybe_unlock_mutex(root);
return ret;
}
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
u64 orig_parent, u64 parent,
u64 orig_root, u64 ref_root,
u64 orig_generation, u64 ref_generation,
u64 owner_objectid, u64 owner_offset)
struct extent_buffer *l;
if (!path)
return -ENOMEM;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = (u64)-1;
ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
if (ret < 0)
return ret;
BUG_ON(ret == 0 || path->slots[0] == 0);
path->slots[0]--;
l = path->nodes[0];
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
BUG_ON(key.objectid != bytenr);
BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(l, item);
btrfs_set_extent_refs(l, item, refs + 1);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_release_path(root->fs_info->extent_root, path);
ret = insert_extent_backref(trans, root->fs_info->extent_root,
path, bytenr, parent,
ref_root, ref_generation,
owner_objectid, owner_offset);
BUG_ON(ret);
finish_current_insert(trans, root->fs_info->extent_root);
del_pending_extents(trans, root->fs_info->extent_root);
btrfs_free_path(path);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
u64 ref_root, u64 ref_generation,
u64 owner_objectid, u64 owner_offset)
if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
return 0;
maybe_lock_mutex(root);
ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent,
0, ref_root, 0, ref_generation,
owner_objectid, owner_offset);
maybe_unlock_mutex(root);
int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
finish_current_insert(trans, root->fs_info->extent_root);
del_pending_extents(trans, root->fs_info->extent_root);
return 0;
}
int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
u64 num_bytes, u32 *refs)
struct extent_buffer *l;
WARN_ON(num_bytes < root->sectorsize);
key.objectid = bytenr;
key.offset = num_bytes;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
if (ret < 0)
goto out;
if (ret != 0) {
btrfs_print_leaf(root, path->nodes[0]);
printk("failed to find block number %Lu\n", bytenr);
}
l = path->nodes[0];
item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
*refs = btrfs_extent_refs(l, item);
out:
static int get_reference_status(struct btrfs_root *root, u64 bytenr,
u64 parent_gen, u64 ref_objectid,
u64 *min_generation, u32 *ref_count)
{
struct btrfs_root *extent_root = root->fs_info->extent_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_extent_ref *ref_item;
struct btrfs_key key;
struct btrfs_key found_key;
u64 root_objectid = root->root_key.objectid;
path = btrfs_alloc_path();
mutex_lock(&root->fs_info->alloc_mutex);
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
goto out;
BUG_ON(ret == 0);
if (ret < 0 || path->slots[0] == 0)
goto out;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != bytenr ||
found_key.type != BTRFS_EXTENT_ITEM_KEY) {
*ref_count = 0;
*min_generation = (u64)-1;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(extent_root, path);
if (ret == 0)
continue;
break;
}
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != bytenr)
break;
if (found_key.type != BTRFS_EXTENT_REF_KEY) {
path->slots[0]++;
continue;
}
ref_item = btrfs_item_ptr(leaf, path->slots[0],