Skip to content
Snippets Groups Projects
inode.c 237 KiB
Newer Older
  • Learn to ignore specific revisions
  • Chris Mason's avatar
    Chris Mason committed
    /*
     * Copyright (C) 2007 Oracle.  All rights reserved.
     *
     * This program is free software; you can redistribute it and/or
     * modify it under the terms of the GNU General Public
     * License v2 as published by the Free Software Foundation.
     *
     * This program is distributed in the hope that it will be useful,
     * but WITHOUT ANY WARRANTY; without even the implied warranty of
     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     * General Public License for more details.
     *
     * You should have received a copy of the GNU General Public
     * License along with this program; if not, write to the
     * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
     * Boston, MA 021110-1307, USA.
     */
    
    
    Chris Mason's avatar
    Chris Mason committed
    #include <linux/buffer_head.h>
    
    Sage Weil's avatar
    Sage Weil committed
    #include <linux/file.h>
    
    Chris Mason's avatar
    Chris Mason committed
    #include <linux/fs.h>
    #include <linux/pagemap.h>
    #include <linux/highmem.h>
    #include <linux/time.h>
    #include <linux/init.h>
    #include <linux/string.h>
    #include <linux/backing-dev.h>
    #include <linux/mpage.h>
    #include <linux/swap.h>
    #include <linux/writeback.h>
    #include <linux/statfs.h>
    #include <linux/compat.h>
    
    #include <linux/aio.h>
    
    #include <linux/bit_spinlock.h>
    
    Josef Bacik's avatar
    Josef Bacik committed
    #include <linux/xattr.h>
    
    Josef Bacik's avatar
    Josef Bacik committed
    #include <linux/posix_acl.h>
    
    #include <linux/falloc.h>
    
    #include <linux/ratelimit.h>
    
    #include <linux/mount.h>
    
    David Woodhouse's avatar
    David Woodhouse committed
    #include <linux/blkdev.h>
    
    #include <linux/posix_acl_xattr.h>
    
    Chris Mason's avatar
    Chris Mason committed
    #include "ctree.h"
    #include "disk-io.h"
    #include "transaction.h"
    #include "btrfs_inode.h"
    #include "print-tree.h"
    
    #include "ordered-data.h"
    
    #include "xattr.h"
    
    #include "volumes.h"
    
    #include "compression.h"
    
    #include "free-space-cache.h"
    
    #include "inode-map.h"
    
    Liu Bo's avatar
    Liu Bo committed
    #include "backref.h"
    
    Chris Mason's avatar
    Chris Mason committed
    
    struct btrfs_iget_args {
    
    	struct btrfs_key *location;
    
    Chris Mason's avatar
    Chris Mason committed
    	struct btrfs_root *root;
    };
    
    
    static const struct inode_operations btrfs_dir_inode_operations;
    static const struct inode_operations btrfs_symlink_inode_operations;
    static const struct inode_operations btrfs_dir_ro_inode_operations;
    static const struct inode_operations btrfs_special_inode_operations;
    static const struct inode_operations btrfs_file_inode_operations;
    
    static const struct address_space_operations btrfs_aops;
    static const struct address_space_operations btrfs_symlink_aops;
    
    static const struct file_operations btrfs_dir_file_operations;
    
    static struct extent_io_ops btrfs_extent_io_ops;
    
    Chris Mason's avatar
    Chris Mason committed
    
    static struct kmem_cache *btrfs_inode_cachep;
    
    static struct kmem_cache *btrfs_delalloc_work_cachep;
    
    Chris Mason's avatar
    Chris Mason committed
    struct kmem_cache *btrfs_trans_handle_cachep;
    struct kmem_cache *btrfs_transaction_cachep;
    struct kmem_cache *btrfs_path_cachep;
    
    struct kmem_cache *btrfs_free_space_cachep;
    
    Chris Mason's avatar
    Chris Mason committed
    
    #define S_SHIFT 12
    static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
    	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
    	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
    	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
    	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
    	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
    	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
    	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
    };
    
    
    static int btrfs_setsize(struct inode *inode, struct iattr *attr);
    
    static int btrfs_truncate(struct inode *inode);
    
    static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
    
    static noinline int cow_file_range(struct inode *inode,
    				   struct page *locked_page,
    				   u64 start, u64 end, int *page_started,
    				   unsigned long *nr_written, int unlock);
    
    static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
    					   u64 len, u64 orig_start,
    					   u64 block_start, u64 block_len,
    
    					   u64 orig_block_len, u64 ram_bytes,
    					   int type);
    
    static int btrfs_dirty_inode(struct inode *inode);
    
    static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
    
    				     struct inode *inode,  struct inode *dir,
    				     const struct qstr *qstr)
    
    Jim Owens's avatar
    Jim Owens committed
    {
    	int err;
    
    
    	err = btrfs_init_acl(trans, inode, dir);
    
    Jim Owens's avatar
    Jim Owens committed
    	if (!err)
    
    		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
    
    Jim Owens's avatar
    Jim Owens committed
    	return err;
    }
    
    
    /*
     * this does all the hard work for inserting an inline extent into
     * the btree.  The caller should have done a btrfs_drop_extents so that
     * no overlapping inline items exist in the btree
     */
    
    static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
    
    				struct btrfs_path *path, int extent_inserted,
    
    				struct btrfs_root *root, struct inode *inode,
    				u64 start, size_t size, size_t compressed_size,
    
    				struct page **compressed_pages)
    {
    	struct extent_buffer *leaf;
    	struct page *page = NULL;
    	char *kaddr;
    	unsigned long ptr;
    	struct btrfs_file_extent_item *ei;
    	int err = 0;
    	int ret;
    	size_t cur_size = size;
    	unsigned long offset;
    
    
    	if (compressed_size && compressed_pages)
    
    		cur_size = compressed_size;
    
    
    	inode_add_bytes(inode, size);
    
    	if (!extent_inserted) {
    		struct btrfs_key key;
    		size_t datasize;
    
    		key.objectid = btrfs_ino(inode);
    		key.offset = start;
    		btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
    
    		datasize = btrfs_file_extent_calc_inline_size(cur_size);
    		path->leave_spinning = 1;
    		ret = btrfs_insert_empty_item(trans, root, path, &key,
    					      datasize);
    		if (ret) {
    			err = ret;
    			goto fail;
    		}
    
    	}
    	leaf = path->nodes[0];
    	ei = btrfs_item_ptr(leaf, path->slots[0],
    			    struct btrfs_file_extent_item);
    	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
    	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
    	btrfs_set_file_extent_encryption(leaf, ei, 0);
    	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
    	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
    	ptr = btrfs_file_extent_inline_start(ei);
    
    
    	if (compress_type != BTRFS_COMPRESS_NONE) {
    
    		struct page *cpage;
    		int i = 0;
    
    		while (compressed_size > 0) {
    
    			cpage = compressed_pages[i];
    
    			cur_size = min_t(unsigned long, compressed_size,
    
    				       PAGE_CACHE_SIZE);
    
    
    			kaddr = kmap_atomic(cpage);
    
    			write_extent_buffer(leaf, kaddr, ptr, cur_size);
    
    			kunmap_atomic(kaddr);
    
    
    			i++;
    			ptr += cur_size;
    			compressed_size -= cur_size;
    		}
    		btrfs_set_file_extent_compression(leaf, ei,
    
    						  compress_type);
    
    	} else {
    		page = find_get_page(inode->i_mapping,
    				     start >> PAGE_CACHE_SHIFT);
    		btrfs_set_file_extent_compression(leaf, ei, 0);
    
    		kaddr = kmap_atomic(page);
    
    		offset = start & (PAGE_CACHE_SIZE - 1);
    		write_extent_buffer(leaf, kaddr + offset, ptr, size);
    
    		kunmap_atomic(kaddr);
    
    		page_cache_release(page);
    	}
    	btrfs_mark_buffer_dirty(leaf);
    
    	/*
    	 * we're an inline extent, so nobody can
    	 * extend the file past i_size without locking
    	 * a page we already have locked.
    	 *
    	 * We must do any isize and inode updates
    	 * before we unlock the pages.  Otherwise we
    	 * could end up racing with unlink.
    	 */
    
    	BTRFS_I(inode)->disk_i_size = inode->i_size;
    
    	ret = btrfs_update_inode(trans, root, inode);
    
    fail:
    	return err;
    }
    
    
    /*
     * conditionally insert an inline extent into the file.  This
     * does the checks required to make sure the data is small enough
     * to fit as an inline extent.
     */
    
    static noinline int cow_file_range_inline(struct btrfs_root *root,
    					  struct inode *inode, u64 start,
    					  u64 end, size_t compressed_size,
    					  int compress_type,
    					  struct page **compressed_pages)
    
    	struct btrfs_trans_handle *trans;
    
    	u64 isize = i_size_read(inode);
    	u64 actual_end = min(end + 1, isize);
    	u64 inline_len = actual_end - start;
    
    	u64 aligned_end = ALIGN(end, root->sectorsize);
    
    	u64 data_len = inline_len;
    	int ret;
    
    	struct btrfs_path *path;
    	int extent_inserted = 0;
    	u32 extent_item_size;
    
    
    	if (compressed_size)
    		data_len = compressed_size;
    
    	if (start > 0 ||
    
    	    actual_end >= PAGE_CACHE_SIZE ||
    
    	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
    	    (!compressed_size &&
    	    (actual_end & (root->sectorsize - 1)) == 0) ||
    	    end + 1 < isize ||
    	    data_len > root->fs_info->max_inline) {
    		return 1;
    	}
    
    
    	path = btrfs_alloc_path();
    	if (!path)
    		return -ENOMEM;
    
    
    	trans = btrfs_join_transaction(root);
    
    	if (IS_ERR(trans)) {
    		btrfs_free_path(path);
    
    		return PTR_ERR(trans);
    
    	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
    
    
    	if (compressed_size && compressed_pages)
    		extent_item_size = btrfs_file_extent_calc_inline_size(
    		   compressed_size);
    	else
    		extent_item_size = btrfs_file_extent_calc_inline_size(
    		    inline_len);
    
    	ret = __btrfs_drop_extents(trans, root, inode, path,
    				   start, aligned_end, NULL,
    				   1, 1, extent_item_size, &extent_inserted);
    
    	if (ret) {
    		btrfs_abort_transaction(trans, root, ret);
    		goto out;
    	}
    
    
    	if (isize > actual_end)
    		inline_len = min_t(u64, isize, actual_end);
    
    	ret = insert_inline_extent(trans, path, extent_inserted,
    				   root, inode, start,
    
    				   inline_len, compressed_size,
    
    				   compress_type, compressed_pages);
    
    	if (ret && ret != -ENOSPC) {
    
    		btrfs_abort_transaction(trans, root, ret);
    
    	} else if (ret == -ENOSPC) {
    
    	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
    
    	btrfs_delalloc_release_metadata(inode, end + 1 - start);
    
    	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
    
    	btrfs_end_transaction(trans, root);
    	return ret;
    
    struct async_extent {
    	u64 start;
    	u64 ram_size;
    	u64 compressed_size;
    	struct page **pages;
    	unsigned long nr_pages;
    
    	int compress_type;
    
    	struct list_head list;
    };
    
    struct async_cow {
    	struct inode *inode;
    	struct btrfs_root *root;
    	struct page *locked_page;
    	u64 start;
    	u64 end;
    	struct list_head extents;
    	struct btrfs_work work;
    };
    
    static noinline int add_async_extent(struct async_cow *cow,
    				     u64 start, u64 ram_size,
    				     u64 compressed_size,
    				     struct page **pages,
    
    				     unsigned long nr_pages,
    				     int compress_type)
    
    {
    	struct async_extent *async_extent;
    
    	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
    
    	BUG_ON(!async_extent); /* -ENOMEM */
    
    	async_extent->start = start;
    	async_extent->ram_size = ram_size;
    	async_extent->compressed_size = compressed_size;
    	async_extent->pages = pages;
    	async_extent->nr_pages = nr_pages;
    
    	async_extent->compress_type = compress_type;
    
    	list_add_tail(&async_extent->list, &cow->extents);
    	return 0;
    }
    
    
     * we create compressed extents in two phases.  The first
     * phase compresses a range of pages that have already been
     * locked (both pages and state bits are locked).
    
     * This is done inside an ordered work queue, and the compression
     * is spread across many cpus.  The actual IO submission is step
     * two, and the ordered work queue takes care of making sure that
     * happens in the same order things were put onto the queue by
     * writepages and friends.
    
     * If this code finds it can't get good compression, it puts an
     * entry onto the work queue to write the uncompressed bytes.  This
     * makes sure that both compressed inodes and uncompressed inodes
    
     * are written in the same order that the flusher thread sent them
     * down.
    
    static noinline int compress_file_range(struct inode *inode,
    					struct page *locked_page,
    					u64 start, u64 end,
    					struct async_cow *async_cow,
    					int *num_added)
    
    {
    	struct btrfs_root *root = BTRFS_I(inode)->root;
    
    	u64 num_bytes;
    	u64 blocksize = root->sectorsize;
    
    	u64 actual_end;
    
    	u64 isize = i_size_read(inode);
    
    	int ret = 0;
    
    	struct page **pages = NULL;
    	unsigned long nr_pages;
    	unsigned long nr_pages_ret = 0;
    	unsigned long total_compressed = 0;
    	unsigned long total_in = 0;
    	unsigned long max_compressed = 128 * 1024;
    
    	unsigned long max_uncompressed = 128 * 1024;
    
    	int i;
    	int will_compress;
    
    	int compress_type = root->fs_info->compress_type;
    
    	/* if this is a small write inside eof, kick off a defrag */
    	if ((end - start + 1) < 16 * 1024 &&
    	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
    
    		btrfs_add_inode_defrag(NULL, inode);
    
    
    	actual_end = min_t(u64, isize, end + 1);
    
    again:
    	will_compress = 0;
    	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
    	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
    
    	/*
    	 * we don't want to send crud past the end of i_size through
    	 * compression, that's just a waste of CPU time.  So, if the
    	 * end of the file is before the start of our current
    	 * requested range of bytes, we bail out to the uncompressed
    	 * cleanup code that can deal with all of this.
    	 *
    	 * It isn't really the fastest way to fix things, but this is a
    	 * very uncommon corner.
    	 */
    	if (actual_end <= start)
    		goto cleanup_and_bail_uncompressed;
    
    
    	total_compressed = actual_end - start;
    
    	/* we want to make sure that amount of ram required to uncompress
    	 * an extent is reasonable, so we limit the total size in ram
    
    	 * of a compressed extent to 128k.  This is a crucial number
    	 * because it also controls how easily we can spread reads across
    	 * cpus for decompression.
    	 *
    	 * We also want to make sure the amount of IO required to do
    	 * a random read is reasonably small, so we limit the size of
    	 * a compressed extent to 128k.
    
    	 */
    	total_compressed = min(total_compressed, max_uncompressed);
    
    	num_bytes = ALIGN(end - start + 1, blocksize);
    
    	num_bytes = max(blocksize,  num_bytes);
    
    	total_in = 0;
    	ret = 0;
    
    	/*
    	 * we do compression for mount -o compress and when the
    	 * inode has not been flagged as nocompress.  This flag can
    	 * change at any time if we discover bad compression ratios.
    
    	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
    
    	    (btrfs_test_opt(root, COMPRESS) ||
    
    	     (BTRFS_I(inode)->force_compress) ||
    	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
    
    		WARN_ON(pages);
    
    		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
    
    		if (!pages) {
    			/* just bail out to the uncompressed code */
    			goto cont;
    		}
    
    		if (BTRFS_I(inode)->force_compress)
    			compress_type = BTRFS_I(inode)->force_compress;
    
    
    		/*
    		 * we need to call clear_page_dirty_for_io on each
    		 * page in the range.  Otherwise applications with the file
    		 * mmap'd can wander in and change the page contents while
    		 * we are compressing them.
    		 *
    		 * If the compression fails for any reason, we set the pages
    		 * dirty again later on.
    		 */
    		extent_range_clear_dirty_for_io(inode, start, end);
    		redirty = 1;
    
    		ret = btrfs_compress_pages(compress_type,
    					   inode->i_mapping, start,
    					   total_compressed, pages,
    					   nr_pages, &nr_pages_ret,
    					   &total_in,
    					   &total_compressed,
    					   max_compressed);
    
    
    		if (!ret) {
    			unsigned long offset = total_compressed &
    				(PAGE_CACHE_SIZE - 1);
    			struct page *page = pages[nr_pages_ret - 1];
    			char *kaddr;
    
    			/* zero the tail end of the last page, we might be
    			 * sending it down to disk
    			 */
    			if (offset) {
    
    				kaddr = kmap_atomic(page);
    
    				memset(kaddr + offset, 0,
    				       PAGE_CACHE_SIZE - offset);
    
    				kunmap_atomic(kaddr);
    
    			}
    			will_compress = 1;
    		}
    	}
    
    	if (start == 0) {
    		/* lets try to make an inline extent */
    
    		if (ret || total_in < (actual_end - start)) {
    
    			/* we didn't compress the entire range, try
    
    			 * to make an uncompressed inline extent.
    
    			ret = cow_file_range_inline(root, inode, start, end,
    						    0, 0, NULL);
    
    			/* try making a compressed inline extent */
    
    			ret = cow_file_range_inline(root, inode, start, end,
    
    						    total_compressed,
    						    compress_type, pages);
    
    			unsigned long clear_flags = EXTENT_DELALLOC |
    				EXTENT_DEFRAG;
    			clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
    
    
    			 * inline extent creation worked or returned error,
    			 * we don't need to create any more async work items.
    			 * Unlock and free up our temp pages.
    
    			extent_clear_unlock_delalloc(inode, start, end, NULL,
    
    						     clear_flags, PAGE_UNLOCK |
    
    						     PAGE_CLEAR_DIRTY |
    						     PAGE_SET_WRITEBACK |
    						     PAGE_END_WRITEBACK);
    
    			goto free_pages_out;
    		}
    	}
    
    	if (will_compress) {
    		/*
    		 * we aren't doing an inline extent round the compressed size
    		 * up to a block size boundary so the allocator does sane
    		 * things
    		 */
    
    		total_compressed = ALIGN(total_compressed, blocksize);
    
    
    		/*
    		 * one last check to make sure the compression is really a
    		 * win, compare the page count read with the blocks on disk
    		 */
    
    		total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
    
    		if (total_compressed >= total_in) {
    			will_compress = 0;
    		} else {
    			num_bytes = total_in;
    		}
    	}
    	if (!will_compress && pages) {
    		/*
    		 * the compression code ran but failed to make things smaller,
    		 * free any pages it allocated and our page pointer array
    		 */
    		for (i = 0; i < nr_pages_ret; i++) {
    
    			WARN_ON(pages[i]->mapping);
    
    			page_cache_release(pages[i]);
    		}
    		kfree(pages);
    		pages = NULL;
    		total_compressed = 0;
    		nr_pages_ret = 0;
    
    		/* flag the file so we don't compress in the future */
    
    		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
    		    !(BTRFS_I(inode)->force_compress)) {
    
    			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
    
    	if (will_compress) {
    		*num_added += 1;
    
    		/* the async work queues will take care of doing actual
    		 * allocation on disk for these compressed pages,
    		 * and will submit them to the elevator.
    		 */
    		add_async_extent(async_cow, start, num_bytes,
    
    				 total_compressed, pages, nr_pages_ret,
    				 compress_type);
    
    		if (start + num_bytes < end) {
    
    			start += num_bytes;
    			pages = NULL;
    			cond_resched();
    			goto again;
    		}
    	} else {
    
    cleanup_and_bail_uncompressed:
    
    		/*
    		 * No compression, but we still need to write the pages in
    		 * the file we've been given so far.  redirty the locked
    		 * page if it corresponds to our extent and set things up
    		 * for the async work queue to run cow_file_range to do
    		 * the normal delalloc dance
    		 */
    		if (page_offset(locked_page) >= start &&
    		    page_offset(locked_page) <= end) {
    			__set_page_dirty_nobuffers(locked_page);
    			/* unlocked later on in the async handlers */
    		}
    
    		if (redirty)
    			extent_range_redirty_for_io(inode, start, end);
    
    		add_async_extent(async_cow, start, end - start + 1,
    				 0, NULL, 0, BTRFS_COMPRESS_NONE);
    
    
    free_pages_out:
    	for (i = 0; i < nr_pages_ret; i++) {
    		WARN_ON(pages[i]->mapping);
    		page_cache_release(pages[i]);
    	}
    
    	kfree(pages);
    
    
    	goto out;
    }
    
    /*
     * phase two of compressed writeback.  This is the ordered portion
     * of the code, which only gets called in the order the work was
     * queued.  We walk all the async extents created by compress_file_range
     * and send them down to the disk.
     */
    static noinline int submit_compressed_extents(struct inode *inode,
    					      struct async_cow *async_cow)
    {
    	struct async_extent *async_extent;
    	u64 alloc_hint = 0;
    	struct btrfs_key ins;
    	struct extent_map *em;
    	struct btrfs_root *root = BTRFS_I(inode)->root;
    	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
    	struct extent_io_tree *io_tree;
    
    
    	if (list_empty(&async_cow->extents))
    		return 0;
    
    
    	while (!list_empty(&async_cow->extents)) {
    
    		async_extent = list_entry(async_cow->extents.next,
    					  struct async_extent, list);
    		list_del(&async_extent->list);
    
    		io_tree = &BTRFS_I(inode)->io_tree;
    
    
    		/* did the compression code fall back to uncompressed IO? */
    		if (!async_extent->pages) {
    			int page_started = 0;
    			unsigned long nr_written = 0;
    
    			lock_extent(io_tree, async_extent->start,
    
    					 async_extent->ram_size - 1);
    
    
    			/* allocate blocks */
    
    			ret = cow_file_range(inode, async_cow->locked_page,
    					     async_extent->start,
    					     async_extent->start +
    					     async_extent->ram_size - 1,
    					     &page_started, &nr_written, 0);
    
    			/*
    			 * if page_started, cow_file_range inserted an
    			 * inline extent and took care of all the unlocking
    			 * and IO for us.  Otherwise, we need to submit
    			 * all those pages down to the drive.
    			 */
    
    			if (!page_started && !ret)
    
    				extent_write_locked_range(io_tree,
    						  inode, async_extent->start,
    
    						  async_extent->start +
    
    						  async_extent->ram_size - 1,
    						  btrfs_get_extent,
    						  WB_SYNC_ALL);
    
    			else if (ret)
    				unlock_page(async_cow->locked_page);
    
    			kfree(async_extent);
    			cond_resched();
    			continue;
    		}
    
    		lock_extent(io_tree, async_extent->start,
    
    			    async_extent->start + async_extent->ram_size - 1);
    
    		ret = btrfs_reserve_extent(root,
    
    					   async_extent->compressed_size,
    					   async_extent->compressed_size,
    
    			for (i = 0; i < async_extent->nr_pages; i++) {
    				WARN_ON(async_extent->pages[i]->mapping);
    				page_cache_release(async_extent->pages[i]);
    			}
    			kfree(async_extent->pages);
    			async_extent->nr_pages = 0;
    			async_extent->pages = NULL;
    
    			if (ret == -ENOSPC) {
    				unlock_extent(io_tree, async_extent->start,
    					      async_extent->start +
    					      async_extent->ram_size - 1);
    
    		/*
    		 * here we're doing allocation and writeback of the
    		 * compressed pages
    		 */
    		btrfs_drop_extent_cache(inode, async_extent->start,
    					async_extent->start +
    					async_extent->ram_size - 1, 0);
    
    
    		em = alloc_extent_map();
    
    			goto out_free_reserve;
    
    		em->start = async_extent->start;
    		em->len = async_extent->ram_size;
    
    		em->orig_start = em->start;
    
    		em->mod_start = em->start;
    		em->mod_len = em->len;
    
    		em->block_start = ins.objectid;
    		em->block_len = ins.offset;
    
    		em->orig_block_len = ins.offset;
    
    		em->ram_bytes = async_extent->ram_size;
    
    		em->bdev = root->fs_info->fs_devices->latest_bdev;
    
    		em->compress_type = async_extent->compress_type;
    
    		set_bit(EXTENT_FLAG_PINNED, &em->flags);
    		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
    
    		while (1) {
    
    			write_lock(&em_tree->lock);
    
    			ret = add_extent_mapping(em_tree, em, 1);
    
    			write_unlock(&em_tree->lock);
    
    			if (ret != -EEXIST) {
    				free_extent_map(em);
    				break;
    			}
    			btrfs_drop_extent_cache(inode, async_extent->start,
    						async_extent->start +
    						async_extent->ram_size - 1, 0);
    		}
    
    
    		if (ret)
    			goto out_free_reserve;
    
    
    		ret = btrfs_add_ordered_extent_compress(inode,
    						async_extent->start,
    						ins.objectid,
    						async_extent->ram_size,
    						ins.offset,
    						BTRFS_ORDERED_COMPRESSED,
    						async_extent->compress_type);
    
    		if (ret)
    			goto out_free_reserve;
    
    
    		/*
    		 * clear dirty, set writeback and unlock the pages.
    		 */
    
    		extent_clear_unlock_delalloc(inode, async_extent->start,
    
    				async_extent->start +
    				async_extent->ram_size - 1,
    
    				NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
    				PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
    
    		ret = btrfs_submit_compressed_write(inode,
    
    				    async_extent->start,
    				    async_extent->ram_size,
    				    ins.objectid,
    				    ins.offset, async_extent->pages,
    				    async_extent->nr_pages);
    
    		alloc_hint = ins.objectid + ins.offset;
    		kfree(async_extent);
    
    out_free_reserve:
    	btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
    
    	extent_clear_unlock_delalloc(inode, async_extent->start,
    
    				     async_extent->start +
    				     async_extent->ram_size - 1,
    
    				     NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
    
    				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
    				     PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
    				     PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
    
    static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
    				      u64 num_bytes)
    {
    	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
    	struct extent_map *em;
    	u64 alloc_hint = 0;
    
    	read_lock(&em_tree->lock);
    	em = search_extent_mapping(em_tree, start, num_bytes);
    	if (em) {
    		/*
    		 * if block start isn't an actual block number then find the
    		 * first block in this inode and use that as a hint.  If that
    		 * block is also bogus then just don't worry about it.
    		 */
    		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
    			free_extent_map(em);
    			em = search_extent_mapping(em_tree, 0, 0);
    			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
    				alloc_hint = em->block_start;
    			if (em)
    				free_extent_map(em);
    		} else {
    			alloc_hint = em->block_start;
    			free_extent_map(em);
    		}
    	}
    	read_unlock(&em_tree->lock);
    
    	return alloc_hint;
    }
    
    
    /*
     * when extent_io.c finds a delayed allocation range in the file,
     * the call backs end up in this code.  The basic idea is to
     * allocate extents on disk for the range, and create ordered data structs
     * in ram to track those extents.
     *
     * locked_page is the page that writepage had locked already.  We use
     * it to make sure we don't do extra locks or unlocks.
     *
     * *page_started is set to one if we unlock locked_page and do everything
     * required to start IO on it.  It may be clean and already done with
     * IO when we return.
     */
    
    static noinline int cow_file_range(struct inode *inode,
    				   struct page *locked_page,
    				   u64 start, u64 end, int *page_started,
    				   unsigned long *nr_written,
    				   int unlock)
    
    	struct btrfs_root *root = BTRFS_I(inode)->root;
    
    	u64 alloc_hint = 0;
    	u64 num_bytes;
    	unsigned long ram_size;
    	u64 disk_num_bytes;
    	u64 cur_alloc_size;
    	u64 blocksize = root->sectorsize;
    	struct btrfs_key ins;
    	struct extent_map *em;
    	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
    	int ret = 0;
    
    
    	if (btrfs_is_free_space_inode(inode)) {
    		WARN_ON_ONCE(1);
    		return -EINVAL;
    	}
    
    	num_bytes = ALIGN(end - start + 1, blocksize);
    
    	num_bytes = max(blocksize,  num_bytes);
    	disk_num_bytes = num_bytes;
    
    
    	/* if this is a small write inside eof, kick off defrag */
    
    	if (num_bytes < 64 * 1024 &&
    	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
    
    		btrfs_add_inode_defrag(NULL, inode);
    
    	if (start == 0) {
    		/* lets try to make an inline extent */
    
    		ret = cow_file_range_inline(root, inode, start, end, 0, 0,
    					    NULL);
    
    			extent_clear_unlock_delalloc(inode, start, end, NULL,
    				     EXTENT_LOCKED | EXTENT_DELALLOC |
    
    				     EXTENT_DEFRAG, PAGE_UNLOCK |
    
    				     PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
    				     PAGE_END_WRITEBACK);
    
    			*nr_written = *nr_written +
    			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
    			*page_started = 1;
    			goto out;
    
    		} else if (ret < 0) {
    			goto out_unlock;
    
    	       btrfs_super_total_bytes(root->fs_info->super_copy));
    
    	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
    
    	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
    
    
    	while (disk_num_bytes > 0) {
    
    		cur_alloc_size = disk_num_bytes;
    
    		ret = btrfs_reserve_extent(root, cur_alloc_size,
    
    					   root->sectorsize, 0, alloc_hint,
    
    		em = alloc_extent_map();
    
    			goto out_reserve;
    
    		em->start = start;
    
    		em->orig_start = em->start;
    
    		ram_size = ins.offset;
    		em->len = ins.offset;
    
    		em->mod_start = em->start;
    		em->mod_len = em->len;
    
    		em->block_start = ins.objectid;
    
    		em->block_len = ins.offset;
    
    		em->orig_block_len = ins.offset;
    
    		em->ram_bytes = ram_size;
    
    		em->bdev = root->fs_info->fs_devices->latest_bdev;
    
    		set_bit(EXTENT_FLAG_PINNED, &em->flags);
    
    		while (1) {
    
    			write_lock(&em_tree->lock);
    
    			ret = add_extent_mapping(em_tree, em, 1);
    
    			write_unlock(&em_tree->lock);
    
    			if (ret != -EEXIST) {
    				free_extent_map(em);
    				break;
    			}
    			btrfs_drop_extent_cache(inode, start,
    
    						start + ram_size - 1, 0);
    
    		if (ret)
    			goto out_reserve;
    
    		cur_alloc_size = ins.offset;
    
    		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
    
    					       ram_size, cur_alloc_size, 0);
    
    		if (ret)
    			goto out_reserve;
    
    		if (root->root_key.objectid ==
    		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
    			ret = btrfs_reloc_clone_csums(inode, start,
    						      cur_alloc_size);
    
    				goto out_reserve;
    
    		if (disk_num_bytes < cur_alloc_size)
    
    		/* we're not doing compressed IO, don't unlock the first
    		 * page (which the caller expects to stay locked), don't
    		 * clear any dirty bits and don't set any writeback bits
    
    		 *
    		 * Do set the Private2 bit so we know this page was properly
    		 * setup for writepage
    
    		op = unlock ? PAGE_UNLOCK : 0;
    		op |= PAGE_SET_PRIVATE2;
    
    		extent_clear_unlock_delalloc(inode, start,
    					     start + ram_size - 1, locked_page,
    					     EXTENT_LOCKED | EXTENT_DELALLOC,
    					     op);
    
    		disk_num_bytes -= cur_alloc_size;
    
    		num_bytes -= cur_alloc_size;
    		alloc_hint = ins.objectid + ins.offset;
    		start += cur_alloc_size;
    
    out_reserve:
    	btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
    
    	extent_clear_unlock_delalloc(inode, start, end, locked_page,
    
    				     EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
    				     EXTENT_DELALLOC | EXTENT_DEFRAG,
    				     PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
    				     PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
    
    /*
     * work queue call back to started compression on a file and pages