Skip to content
Snippets Groups Projects
inode.c 36.4 KiB
Newer Older
  • Learn to ignore specific revisions
  • /*
     * Copyright (C) 2005, 2006
    
     * Avishay Traeger (avishay@gmail.com)
    
     * Copyright (C) 2008, 2009
     * Boaz Harrosh <bharrosh@panasas.com>
     *
     * Copyrights for code taken from ext2:
     *     Copyright (C) 1992, 1993, 1994, 1995
     *     Remy Card (card@masi.ibp.fr)
     *     Laboratoire MASI - Institut Blaise Pascal
     *     Universite Pierre et Marie Curie (Paris VI)
     *     from
     *     linux/fs/minix/inode.c
     *     Copyright (C) 1991, 1992  Linus Torvalds
     *
     * This file is part of exofs.
     *
     * exofs is free software; you can redistribute it and/or modify
     * it under the terms of the GNU General Public License as published by
     * the Free Software Foundation.  Since it is based on ext2, and the only
     * valid version of GPL for the Linux kernel is version 2, the only valid
     * version of GPL for exofs is version 2.
     *
     * exofs is distributed in the hope that it will be useful,
     * but WITHOUT ANY WARRANTY; without even the implied warranty of
     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     * GNU General Public License for more details.
     *
     * You should have received a copy of the GNU General Public License
     * along with exofs; if not, write to the Free Software
     * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
     */
    
    
    
    #include "exofs.h"
    
    
    Boaz Harrosh's avatar
    Boaz Harrosh committed
    #define EXOFS_DBGMSG2(M...) do {} while (0)
    
    
    enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
    
    unsigned exofs_max_io_pages(struct ore_layout *layout,
    
    			    unsigned expected_pages)
    {
    	unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
    
    	/* TODO: easily support bio chaining */
    
    	pages =  min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
    
    struct page_collect {
    	struct exofs_sb_info *sbi;
    	struct inode *inode;
    	unsigned expected_pages;
    
    	struct ore_io_state *ios;
    
    	struct page **pages;
    	unsigned alloc_pages;
    
    	unsigned nr_pages;
    	unsigned long length;
    	loff_t pg_first; /* keep 64bit also in 32-arches */
    
    	bool read_4_write; /* This means two things: that the read is sync
    			    * And the pages should not be unlocked.
    			    */
    
    	struct page *that_locked_page;
    
    };
    
    static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
    
    		       struct inode *inode)
    
    {
    	struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
    
    	pcol->sbi = sbi;
    	pcol->inode = inode;
    	pcol->expected_pages = expected_pages;
    
    
    	pcol->ios = NULL;
    
    	pcol->pages = NULL;
    	pcol->alloc_pages = 0;
    
    	pcol->nr_pages = 0;
    	pcol->length = 0;
    	pcol->pg_first = -1;
    
    	pcol->read_4_write = false;
    
    	pcol->that_locked_page = NULL;
    
    }
    
    static void _pcol_reset(struct page_collect *pcol)
    {
    	pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
    
    
    	pcol->pages = NULL;
    	pcol->alloc_pages = 0;
    
    	pcol->nr_pages = 0;
    	pcol->length = 0;
    	pcol->pg_first = -1;
    
    	pcol->ios = NULL;
    
    	pcol->that_locked_page = NULL;
    
    
    	/* this is probably the end of the loop but in writes
    	 * it might not end here. don't be left with nothing
    	 */
    	if (!pcol->expected_pages)
    
    		pcol->expected_pages = MAX_PAGES_KMALLOC;
    
    }
    
    static int pcol_try_alloc(struct page_collect *pcol)
    {
    
    	/* TODO: easily support bio chaining */
    
    	pages =  exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
    
    	for (; pages; pages >>= 1) {
    
    		pcol->pages = kmalloc(pages * sizeof(struct page *),
    				      GFP_KERNEL);
    		if (likely(pcol->pages)) {
    			pcol->alloc_pages = pages;
    
    			return 0;
    
    	EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
    
    		  pcol->expected_pages);
    	return -ENOMEM;
    }
    
    static void pcol_free(struct page_collect *pcol)
    {
    
    	kfree(pcol->pages);
    	pcol->pages = NULL;
    
    		ore_put_io_state(pcol->ios);
    
    		pcol->ios = NULL;
    	}
    
    }
    
    static int pcol_add_page(struct page_collect *pcol, struct page *page,
    			 unsigned len)
    {
    
    	if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
    
    		return -ENOMEM;
    
    
    	pcol->pages[pcol->nr_pages++] = page;
    
    	pcol->length += len;
    	return 0;
    }
    
    
    enum {PAGE_WAS_NOT_IN_IO = 17};
    
    static int update_read_page(struct page *page, int ret)
    {
    
    	switch (ret) {
    	case 0:
    
    		/* Everything is OK */
    		SetPageUptodate(page);
    		if (PageError(page))
    			ClearPageError(page);
    
    		break;
    	case -EFAULT:
    
    		/* In this case we were trying to read something that wasn't on
    		 * disk yet - return a page full of zeroes.  This should be OK,
    		 * because the object should be empty (if there was a write
    		 * before this read, the read would be waiting with the page
    		 * locked */
    		clear_highpage(page);
    
    		SetPageUptodate(page);
    		if (PageError(page))
    			ClearPageError(page);
    		EXOFS_DBGMSG("recovered read error\n");
    
    		/* fall through */
    	case PAGE_WAS_NOT_IN_IO:
    		ret = 0; /* recovered error */
    		break;
    	default:
    
    		SetPageError(page);
    
    	return ret;
    }
    
    static void update_write_page(struct page *page, int ret)
    {
    
    	if (unlikely(ret == PAGE_WAS_NOT_IN_IO))
    		return; /* don't pass start don't collect $200 */
    
    
    	if (ret) {
    		mapping_set_error(page->mapping, ret);
    		SetPageError(page);
    	}
    	end_page_writeback(page);
    }
    
    /* Called at the end of reads, to optionally unlock pages and update their
     * status.
     */
    
    static int __readpages_done(struct page_collect *pcol)
    
    {
    	int i;
    	u64 good_bytes;
    	u64 length = 0;
    
    	int ret = ore_check_io(pcol->ios, NULL);
    
    	if (likely(!ret)) {
    
    		good_bytes = pcol->length;
    
    		ret = PAGE_WAS_NOT_IN_IO;
    	} else {
    
    		good_bytes = 0;
    
    	EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
    
    		     " length=0x%lx nr_pages=%u\n",
    		     pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
    		     pcol->nr_pages);
    
    
    	for (i = 0; i < pcol->nr_pages; i++) {
    		struct page *page = pcol->pages[i];
    
    		struct inode *inode = page->mapping->host;
    		int page_stat;
    
    		if (inode != pcol->inode)
    			continue; /* osd might add more pages at end */
    
    		if (likely(length < good_bytes))
    			page_stat = 0;
    		else
    			page_stat = ret;
    
    
    Boaz Harrosh's avatar
    Boaz Harrosh committed
    		EXOFS_DBGMSG2("    readpages_done(0x%lx, 0x%lx) %s\n",
    
    			  inode->i_ino, page->index,
    			  page_stat ? "bad_bytes" : "good_bytes");
    
    		ret = update_read_page(page, page_stat);
    
    		if (!pcol->read_4_write)
    
    			unlock_page(page);
    
    	}
    
    	pcol_free(pcol);
    
    	EXOFS_DBGMSG2("readpages_done END\n");
    
    	return ret;
    }
    
    /* callback of async reads */
    
    static void readpages_done(struct ore_io_state *ios, void *p)
    
    {
    	struct page_collect *pcol = p;
    
    
    	__readpages_done(pcol);
    
    	atomic_dec(&pcol->sbi->s_curr_pending);
    
    }
    
    static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
    {
    	int i;
    
    
    	for (i = 0; i < pcol->nr_pages; i++) {
    		struct page *page = pcol->pages[i];
    
    
    		if (rw == READ)
    			update_read_page(page, ret);
    		else
    			update_write_page(page, ret);
    
    		unlock_page(page);
    	}
    }
    
    
    static int _maybe_not_all_in_one_io(struct ore_io_state *ios,
    	struct page_collect *pcol_src, struct page_collect *pcol)
    {
    	/* length was wrong or offset was not page aligned */
    	BUG_ON(pcol_src->nr_pages < ios->nr_pages);
    
    	if (pcol_src->nr_pages > ios->nr_pages) {
    		struct page **src_page;
    		unsigned pages_less = pcol_src->nr_pages - ios->nr_pages;
    		unsigned long len_less = pcol_src->length - ios->length;
    		unsigned i;
    		int ret;
    
    		/* This IO was trimmed */
    		pcol_src->nr_pages = ios->nr_pages;
    		pcol_src->length = ios->length;
    
    		/* Left over pages are passed to the next io */
    		pcol->expected_pages += pages_less;
    		pcol->nr_pages = pages_less;
    		pcol->length = len_less;
    		src_page = pcol_src->pages + pcol_src->nr_pages;
    		pcol->pg_first = (*src_page)->index;
    
    		ret = pcol_try_alloc(pcol);
    		if (unlikely(ret))
    			return ret;
    
    		for (i = 0; i < pages_less; ++i)
    			pcol->pages[i] = *src_page++;
    
    		EXOFS_DBGMSG("Length was adjusted nr_pages=0x%x "
    			"pages_less=0x%x expected_pages=0x%x "
    			"next_offset=0x%llx next_len=0x%lx\n",
    			pcol_src->nr_pages, pages_less, pcol->expected_pages,
    			pcol->pg_first * PAGE_SIZE, pcol->length);
    	}
    	return 0;
    }
    
    
    static int read_exec(struct page_collect *pcol)
    
    {
    	struct exofs_i_info *oi = exofs_i(pcol->inode);
    
    	struct ore_io_state *ios;
    
    	struct page_collect *pcol_copy = NULL;
    	int ret;
    
    
    	if (!pcol->ios) {
    
    		int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
    
    					     pcol->pg_first << PAGE_CACHE_SHIFT,
    					     pcol->length, &pcol->ios);
    
    		if (ret)
    			return ret;
    	}
    
    	ios = pcol->ios;
    
    	if (pcol->read_4_write) {
    
    		ore_read(pcol->ios);
    
    		return __readpages_done(pcol);
    
    	}
    
    	pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
    	if (!pcol_copy) {
    		ret = -ENOMEM;
    		goto err;
    	}
    
    	*pcol_copy = *pcol;
    
    	ios->done = readpages_done;
    	ios->private = pcol_copy;
    
    
    	/* pages ownership was passed to pcol_copy */
    	_pcol_reset(pcol);
    
    	ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
    	if (unlikely(ret))
    		goto err;
    
    	EXOFS_DBGMSG2("read_exec(0x%lx) offset=0x%llx length=0x%llx\n",
    		pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
    
    
    	ret = ore_read(ios);
    
    	if (unlikely(ret))
    		goto err;
    
    	atomic_inc(&pcol->sbi->s_curr_pending);
    
    	return 0;
    
    err:
    
    	if (!pcol->read_4_write)
    
    		_unlock_pcol_pages(pcol, ret, READ);
    
    	kfree(pcol_copy);
    	return ret;
    }
    
    /* readpage_strip is called either directly from readpage() or by the VFS from
     * within read_cache_pages(), to add one more page to be read. It will try to
     * collect as many contiguous pages as posible. If a discontinuity is
     * encountered, or it runs out of resources, it will submit the previous segment
     * and will start a new collection. Eventually caller must submit the last
     * segment if present.
     */
    static int readpage_strip(void *data, struct page *page)
    {
    	struct page_collect *pcol = data;
    	struct inode *inode = pcol->inode;
    	struct exofs_i_info *oi = exofs_i(inode);
    	loff_t i_size = i_size_read(inode);
    	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
    	size_t len;
    	int ret;
    
    	/* FIXME: Just for debugging, will be removed */
    	if (PageUptodate(page))
    		EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
    			  page->index);
    
    
    	pcol->that_locked_page = page;
    
    
    	if (page->index < end_index)
    		len = PAGE_CACHE_SIZE;
    	else if (page->index == end_index)
    		len = i_size & ~PAGE_CACHE_MASK;
    	else
    		len = 0;
    
    	if (!len || !obj_created(oi)) {
    		/* this will be out of bounds, or doesn't exist yet.
    		 * Current page is cleared and the request is split
    		 */
    		clear_highpage(page);
    
    		SetPageUptodate(page);
    		if (PageError(page))
    			ClearPageError(page);
    
    
    		if (!pcol->read_4_write)
    			unlock_page(page);
    
    		EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
    			     "read_4_write=%d index=0x%lx end_index=0x%lx "
    			     "splitting\n", inode->i_ino, len,
    			     pcol->read_4_write, page->index, end_index);
    
    		return read_exec(pcol);
    
    	}
    
    try_again:
    
    	if (unlikely(pcol->pg_first == -1)) {
    		pcol->pg_first = page->index;
    	} else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
    		   page->index)) {
    		/* Discontinuity detected, split the request */
    
    		ret = read_exec(pcol);
    
    		if (unlikely(ret))
    			goto fail;
    		goto try_again;
    	}
    
    
    		ret = pcol_try_alloc(pcol);
    		if (unlikely(ret))
    			goto fail;
    	}
    
    	if (len != PAGE_CACHE_SIZE)
    		zero_user(page, len, PAGE_CACHE_SIZE - len);
    
    
    Boaz Harrosh's avatar
    Boaz Harrosh committed
    	EXOFS_DBGMSG2("    readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
    
    		     inode->i_ino, page->index, len);
    
    	ret = pcol_add_page(pcol, page, len);
    	if (ret) {
    
    Boaz Harrosh's avatar
    Boaz Harrosh committed
    		EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
    
    			  "this_len=0x%zx nr_pages=%u length=0x%lx\n",
    			  page, len, pcol->nr_pages, pcol->length);
    
    		/* split the request, and start again with current page */
    
    		ret = read_exec(pcol);
    
    		if (unlikely(ret))
    			goto fail;
    
    		goto try_again;
    	}
    
    	return 0;
    
    fail:
    	/* SetPageError(page); ??? */
    	unlock_page(page);
    	return ret;
    }
    
    static int exofs_readpages(struct file *file, struct address_space *mapping,
    			   struct list_head *pages, unsigned nr_pages)
    {
    	struct page_collect pcol;
    	int ret;
    
    	_pcol_init(&pcol, nr_pages, mapping->host);
    
    	ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
    	if (ret) {
    		EXOFS_ERR("read_cache_pages => %d\n", ret);
    		return ret;
    	}
    
    
    	ret = read_exec(&pcol);
    	if (unlikely(ret))
    		return ret;
    
    
    	return read_exec(&pcol);
    
    static int _readpage(struct page *page, bool read_4_write)
    
    {
    	struct page_collect pcol;
    	int ret;
    
    	_pcol_init(&pcol, 1, page->mapping->host);
    
    
    	pcol.read_4_write = read_4_write;
    
    	ret = readpage_strip(&pcol, page);
    	if (ret) {
    		EXOFS_ERR("_readpage => %d\n", ret);
    		return ret;
    	}
    
    
    	return read_exec(&pcol);
    
    }
    
    /*
     * We don't need the file
     */
    static int exofs_readpage(struct file *file, struct page *page)
    {
    	return _readpage(page, false);
    }
    
    
    /* Callback for osd_write. All writes are asynchronous */
    
    static void writepages_done(struct ore_io_state *ios, void *p)
    
    {
    	struct page_collect *pcol = p;
    	int i;
    	u64  good_bytes;
    	u64  length = 0;
    
    	int ret = ore_check_io(ios, NULL);
    
    
    	atomic_dec(&pcol->sbi->s_curr_pending);
    
    
    	if (likely(!ret)) {
    
    		good_bytes = pcol->length;
    
    		ret = PAGE_WAS_NOT_IN_IO;
    	} else {
    
    		good_bytes = 0;
    
    	EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
    
    		     " length=0x%lx nr_pages=%u\n",
    		     pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
    		     pcol->nr_pages);
    
    
    	for (i = 0; i < pcol->nr_pages; i++) {
    		struct page *page = pcol->pages[i];
    
    		struct inode *inode = page->mapping->host;
    		int page_stat;
    
    		if (inode != pcol->inode)
    			continue; /* osd might add more pages to a bio */
    
    		if (likely(length < good_bytes))
    			page_stat = 0;
    		else
    			page_stat = ret;
    
    		update_write_page(page, page_stat);
    		unlock_page(page);
    
    Boaz Harrosh's avatar
    Boaz Harrosh committed
    		EXOFS_DBGMSG2("    writepages_done(0x%lx, 0x%lx) status=%d\n",
    
    			     inode->i_ino, page->index, page_stat);
    
    
    	}
    
    	pcol_free(pcol);
    	kfree(pcol);
    
    	EXOFS_DBGMSG2("writepages_done END\n");
    
    static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
    {
    	struct page_collect *pcol = priv;
    	pgoff_t index = offset / PAGE_SIZE;
    
    	if (!pcol->that_locked_page ||
    	    (pcol->that_locked_page->index != index)) {
    		struct page *page = find_get_page(pcol->inode->i_mapping, index);
    
    		if (!page) {
    			page = find_or_create_page(pcol->inode->i_mapping,
    						   index, GFP_NOFS);
    			if (unlikely(!page)) {
    				EXOFS_DBGMSG("grab_cache_page Failed "
    					"index=0x%llx\n", _LLU(index));
    				return NULL;
    			}
    			unlock_page(page);
    		}
    		if (PageDirty(page) || PageWriteback(page))
    			*uptodate = true;
    		else
    			*uptodate = PageUptodate(page);
    		EXOFS_DBGMSG("index=0x%lx uptodate=%d\n", index, *uptodate);
    		return page;
    	} else {
    		EXOFS_DBGMSG("YES that_locked_page index=0x%lx\n",
    			     pcol->that_locked_page->index);
    		*uptodate = true;
    		return pcol->that_locked_page;
    	}
    }
    
    static void __r4w_put_page(void *priv, struct page *page)
    {
    	struct page_collect *pcol = priv;
    
    	if (pcol->that_locked_page != page) {
    		EXOFS_DBGMSG("index=0x%lx\n", page->index);
    		page_cache_release(page);
    		return;
    	}
    	EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index);
    }
    
    static const struct _ore_r4w_op _r4w_op = {
    	.get_page = &__r4w_get_page,
    	.put_page = &__r4w_put_page,
    };
    
    
    static int write_exec(struct page_collect *pcol)
    {
    	struct exofs_i_info *oi = exofs_i(pcol->inode);
    
    	struct ore_io_state *ios;
    
    	struct page_collect *pcol_copy = NULL;
    	int ret;
    
    
    	BUG_ON(pcol->ios);
    
    	ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
    
    				 pcol->pg_first << PAGE_CACHE_SHIFT,
    				 pcol->length, &pcol->ios);
    	if (unlikely(ret))
    		goto err;
    
    
    	pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
    	if (!pcol_copy) {
    
    		EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
    
    		ret = -ENOMEM;
    		goto err;
    	}
    
    	*pcol_copy = *pcol;
    
    
    	ios->pages = pcol_copy->pages;
    
    	ios->done = writepages_done;
    
    	ios->r4w = &_r4w_op;
    
    	ios->private = pcol_copy;
    
    
    	/* pages ownership was passed to pcol_copy */
    	_pcol_reset(pcol);
    
    	ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
    	if (unlikely(ret))
    		goto err;
    
    	EXOFS_DBGMSG2("write_exec(0x%lx) offset=0x%llx length=0x%llx\n",
    		pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
    
    
    	ret = ore_write(ios);
    
    	if (unlikely(ret)) {
    
    		EXOFS_ERR("write_exec: ore_write() Failed\n");
    
    		goto err;
    	}
    
    	atomic_inc(&pcol->sbi->s_curr_pending);
    	return 0;
    
    err:
    	_unlock_pcol_pages(pcol, ret, WRITE);
    
    	pcol_free(pcol);
    
    	kfree(pcol_copy);
    
    	return ret;
    }
    
    /* writepage_strip is called either directly from writepage() or by the VFS from
     * within write_cache_pages(), to add one more page to be written to storage.
     * It will try to collect as many contiguous pages as possible. If a
     * discontinuity is encountered or it runs out of resources it will submit the
     * previous segment and will start a new collection.
     * Eventually caller must submit the last segment if present.
     */
    static int writepage_strip(struct page *page,
    			   struct writeback_control *wbc_unused, void *data)
    {
    	struct page_collect *pcol = data;
    	struct inode *inode = pcol->inode;
    	struct exofs_i_info *oi = exofs_i(inode);
    	loff_t i_size = i_size_read(inode);
    	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
    	size_t len;
    	int ret;
    
    	BUG_ON(!PageLocked(page));
    
    	ret = wait_obj_created(oi);
    	if (unlikely(ret))
    		goto fail;
    
    	if (page->index < end_index)
    		/* in this case, the page is within the limits of the file */
    		len = PAGE_CACHE_SIZE;
    	else {
    		len = i_size & ~PAGE_CACHE_MASK;
    
    		if (page->index > end_index || !len) {
    			/* in this case, the page is outside the limits
    			 * (truncate in progress)
    			 */
    			ret = write_exec(pcol);
    			if (unlikely(ret))
    				goto fail;
    			if (PageError(page))
    				ClearPageError(page);
    			unlock_page(page);
    
    			EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
    				     "outside the limits\n",
    				     inode->i_ino, page->index);
    
    			return 0;
    		}
    	}
    
    try_again:
    
    	if (unlikely(pcol->pg_first == -1)) {
    		pcol->pg_first = page->index;
    	} else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
    		   page->index)) {
    		/* Discontinuity detected, split the request */
    		ret = write_exec(pcol);
    		if (unlikely(ret))
    			goto fail;
    
    
    		EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
    			     inode->i_ino, page->index);
    
    		goto try_again;
    	}
    
    
    		ret = pcol_try_alloc(pcol);
    		if (unlikely(ret))
    			goto fail;
    	}
    
    
    Boaz Harrosh's avatar
    Boaz Harrosh committed
    	EXOFS_DBGMSG2("    writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
    
    		     inode->i_ino, page->index, len);
    
    	ret = pcol_add_page(pcol, page, len);
    	if (unlikely(ret)) {
    
    		EXOFS_DBGMSG2("Failed pcol_add_page "
    
    			     "nr_pages=%u total_length=0x%lx\n",
    			     pcol->nr_pages, pcol->length);
    
    		/* split the request, next loop will start again */
    		ret = write_exec(pcol);
    		if (unlikely(ret)) {
    
    			EXOFS_DBGMSG("write_exec failed => %d", ret);
    
    			goto fail;
    		}
    
    		goto try_again;
    	}
    
    	BUG_ON(PageWriteback(page));
    	set_page_writeback(page);
    
    	return 0;
    
    fail:
    
    	EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
    		     inode->i_ino, page->index, ret);
    
    	set_bit(AS_EIO, &page->mapping->flags);
    	unlock_page(page);
    	return ret;
    }
    
    static int exofs_writepages(struct address_space *mapping,
    		       struct writeback_control *wbc)
    {
    	struct page_collect pcol;
    	long start, end, expected_pages;
    	int ret;
    
    	start = wbc->range_start >> PAGE_CACHE_SHIFT;
    	end = (wbc->range_end == LLONG_MAX) ?
    			start + mapping->nrpages :
    			wbc->range_end >> PAGE_CACHE_SHIFT;
    
    	if (start || end)
    
    		expected_pages = end - start + 1;
    
    	else
    		expected_pages = mapping->nrpages;
    
    
    	if (expected_pages < 32L)
    		expected_pages = 32L;
    
    
    	EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
    
    		     "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
    
    		     mapping->host->i_ino, wbc->range_start, wbc->range_end,
    
    		     mapping->nrpages, start, end, expected_pages);
    
    
    	_pcol_init(&pcol, expected_pages, mapping->host);
    
    	ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
    
    	if (unlikely(ret)) {
    
    		EXOFS_ERR("write_cache_pages => %d\n", ret);
    		return ret;
    	}
    
    
    	ret = write_exec(&pcol);
    	if (unlikely(ret))
    		return ret;
    
    	if (wbc->sync_mode == WB_SYNC_ALL) {
    		return write_exec(&pcol); /* pump the last reminder */
    	} else if (pcol.nr_pages) {
    		/* not SYNC let the reminder join the next writeout */
    		unsigned i;
    
    		for (i = 0; i < pcol.nr_pages; i++) {
    			struct page *page = pcol.pages[i];
    
    			end_page_writeback(page);
    			set_page_dirty(page);
    			unlock_page(page);
    		}
    	}
    	return 0;
    
    static int exofs_writepage(struct page *page, struct writeback_control *wbc)
    {
    	struct page_collect pcol;
    	int ret;
    
    	_pcol_init(&pcol, 1, page->mapping->host);
    
    	ret = writepage_strip(page, NULL, &pcol);
    	if (ret) {
    		EXOFS_ERR("exofs_writepage => %d\n", ret);
    		return ret;
    	}
    
    	return write_exec(&pcol);
    }
    
    /* i_mutex held using inode->i_size directly */
    static void _write_failed(struct inode *inode, loff_t to)
    {
    	if (to > inode->i_size)
    		truncate_pagecache(inode, to, inode->i_size);
    }
    
    
    int exofs_write_begin(struct file *file, struct address_space *mapping,
    		loff_t pos, unsigned len, unsigned flags,
    		struct page **pagep, void **fsdata)
    {
    	int ret = 0;
    	struct page *page;
    
    	page = *pagep;
    	if (page == NULL) {
    		ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
    					 fsdata);
    		if (ret) {
    
    			EXOFS_DBGMSG("simple_write_begin failed\n");
    
    			goto out;
    
    		}
    
    		page = *pagep;
    	}
    
    	 /* read modify write */
    	if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
    
    		loff_t i_size = i_size_read(mapping->host);
    		pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
    		size_t rlen;
    
    		if (page->index < end_index)
    			rlen = PAGE_CACHE_SIZE;
    		else if (page->index == end_index)
    			rlen = i_size & ~PAGE_CACHE_MASK;
    		else
    			rlen = 0;
    
    		if (!rlen) {
    			clear_highpage(page);
    			SetPageUptodate(page);
    			goto out;
    		}
    
    
    		ret = _readpage(page, true);
    		if (ret) {
    			/*SetPageError was done by _readpage. Is it ok?*/
    			unlock_page(page);
    
    			EXOFS_DBGMSG("__readpage failed\n");
    
    out:
    	if (unlikely(ret))
    		_write_failed(mapping->host, pos + len);
    
    
    	return ret;
    }
    
    static int exofs_write_begin_export(struct file *file,
    		struct address_space *mapping,
    		loff_t pos, unsigned len, unsigned flags,
    		struct page **pagep, void **fsdata)
    {
    	*pagep = NULL;
    
    	return exofs_write_begin(file, mapping, pos, len, flags, pagep,
    					fsdata);
    }
    
    
    static int exofs_write_end(struct file *file, struct address_space *mapping,
    			loff_t pos, unsigned len, unsigned copied,
    			struct page *page, void *fsdata)
    {
    	struct inode *inode = mapping->host;
    	/* According to comment in simple_write_end i_mutex is held */
    	loff_t i_size = inode->i_size;
    	int ret;
    
    	ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
    
    	if (unlikely(ret))
    		_write_failed(inode, pos + len);
    
    	/* TODO: once simple_write_end marks inode dirty remove */
    
    	if (i_size != inode->i_size)
    		mark_inode_dirty(inode);
    	return ret;
    }
    
    
    static int exofs_releasepage(struct page *page, gfp_t gfp)
    {
    	EXOFS_DBGMSG("page 0x%lx\n", page->index);
    	WARN_ON(1);
    
    }
    
    static void exofs_invalidatepage(struct page *page, unsigned long offset)
    {
    
    	EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
    
    const struct address_space_operations exofs_aops = {
    	.readpage	= exofs_readpage,
    	.readpages	= exofs_readpages,
    
    	.writepage	= NULL,
    
    	.writepages	= exofs_writepages,
    	.write_begin	= exofs_write_begin_export,
    
    	.write_end	= exofs_write_end,
    
    	.releasepage	= exofs_releasepage,
    	.set_page_dirty	= __set_page_dirty_nobuffers,
    	.invalidatepage = exofs_invalidatepage,
    
    	/* Not implemented Yet */
    	.bmap		= NULL, /* TODO: use osd's OSD_ACT_READ_MAP */
    	.direct_IO	= NULL, /* TODO: Should be trivial to do */
    
    	/* With these NULL has special meaning or default is not exported */
    	.get_xip_mem	= NULL,
    	.migratepage	= NULL,
    	.launder_page	= NULL,
    	.is_partially_uptodate = NULL,
    	.error_remove_page = NULL,
    
    /******************************************************************************
     * INODE OPERATIONS
     *****************************************************************************/
    
    /*
     * Test whether an inode is a fast symlink.
     */
    static inline int exofs_inode_is_fast_symlink(struct inode *inode)
    {
    	struct exofs_i_info *oi = exofs_i(inode);
    
    	return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
    }
    
    
    static int _do_truncate(struct inode *inode, loff_t newsize)
    
    {
    	struct exofs_i_info *oi = exofs_i(inode);
    
    	struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
    
    	int ret;
    
    	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
    
    
    	ret = ore_truncate(&sbi->layout, &oi->oc, (u64)newsize);
    
    	if (likely(!ret))
    		truncate_setsize(inode, newsize);
    
    	EXOFS_DBGMSG("(0x%lx) size=0x%llx ret=>%d\n",