Skip to content
Snippets Groups Projects
alloc.c 180 KiB
Newer Older
  • Learn to ignore specific revisions
  • 			   struct buffer_head *fe_bh,
    			   struct ocfs2_truncate_context **tc)
    {
    
    	unsigned int new_i_clusters;
    	struct ocfs2_dinode *fe;
    	struct ocfs2_extent_block *eb;
    	struct buffer_head *last_eb_bh = NULL;
    
    	mlog_entry_void();
    
    	*tc = NULL;
    
    	new_i_clusters = ocfs2_clusters_for_bytes(osb->sb,
    						  i_size_read(inode));
    	fe = (struct ocfs2_dinode *) fe_bh->b_data;
    
    	mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size ="
    
    	     "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters,
    	     (unsigned long long)le64_to_cpu(fe->i_size));
    
    	*tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
    
    	if (!(*tc)) {
    		status = -ENOMEM;
    		mlog_errno(status);
    		goto bail;
    	}
    
    	ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc);
    
    
    	if (fe->id2.i_list.l_tree_depth) {
    		status = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk),
    					  &last_eb_bh, OCFS2_BH_CACHED, inode);
    		if (status < 0) {
    			mlog_errno(status);
    			goto bail;
    		}
    		eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
    		if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
    			OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
    
    			brelse(last_eb_bh);
    			status = -EIO;
    			goto bail;
    		}
    	}
    
    	(*tc)->tc_last_eb_bh = last_eb_bh;
    
    	status = 0;
    bail:
    	if (status < 0) {
    		if (*tc)
    			ocfs2_free_truncate_context(*tc);
    		*tc = NULL;
    	}
    	mlog_exit_void();
    	return status;
    }
    
    
    /*
     * 'start' is inclusive, 'end' is not.
     */
    int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
    			  unsigned int start, unsigned int end, int trunc)
    {
    	int ret;
    	unsigned int numbytes;
    	handle_t *handle;
    	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
    	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
    	struct ocfs2_inline_data *idata = &di->id2.i_data;
    
    	if (end > i_size_read(inode))
    		end = i_size_read(inode);
    
    	BUG_ON(start >= end);
    
    	if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
    	    !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
    	    !ocfs2_supports_inline_data(osb)) {
    		ocfs2_error(inode->i_sb,
    			    "Inline data flags for inode %llu don't agree! "
    			    "Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n",
    			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
    			    le16_to_cpu(di->i_dyn_features),
    			    OCFS2_I(inode)->ip_dyn_features,
    			    osb->s_feature_incompat);
    		ret = -EROFS;
    		goto out;
    	}
    
    	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
    	if (IS_ERR(handle)) {
    		ret = PTR_ERR(handle);
    		mlog_errno(ret);
    		goto out;
    	}
    
    	ret = ocfs2_journal_access(handle, inode, di_bh,
    				   OCFS2_JOURNAL_ACCESS_WRITE);
    	if (ret) {
    		mlog_errno(ret);
    		goto out_commit;
    	}
    
    	numbytes = end - start;
    	memset(idata->id_data + start, 0, numbytes);
    
    	/*
    	 * No need to worry about the data page here - it's been
    	 * truncated already and inline data doesn't need it for
    	 * pushing zero's to disk, so we'll let readpage pick it up
    	 * later.
    	 */
    	if (trunc) {
    		i_size_write(inode, start);
    		di->i_size = cpu_to_le64(start);
    	}
    
    	inode->i_blocks = ocfs2_inode_sector_count(inode);
    	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
    
    	di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
    	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
    
    	ocfs2_journal_dirty(handle, di_bh);
    
    out_commit:
    	ocfs2_commit_trans(osb, handle);
    
    out:
    	return ret;
    }
    
    
    static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
    {
    
    	/*
    	 * The caller is responsible for completing deallocation
    	 * before freeing the context.
    	 */
    	if (tc->tc_dealloc.c_first_suballocator != NULL)
    		mlog(ML_NOTICE,
    		     "Truncate completion has non-empty dealloc context\n");
    
    
    	if (tc->tc_last_eb_bh)
    		brelse(tc->tc_last_eb_bh);
    
    	kfree(tc);
    }