Skip to content
Snippets Groups Projects
dm-cache-target.c 63.2 KiB
Newer Older
  • Learn to ignore specific revisions
  • Joe Thornber's avatar
    Joe Thornber committed
    				struct dm_bio_prison_cell *old_ocell,
    				struct dm_bio_prison_cell *new_ocell)
    {
    	struct dm_cache_migration *mg = prealloc_get_migration(structs);
    
    	mg->err = false;
    	mg->writeback = false;
    	mg->demote = true;
    	mg->promote = true;
    	mg->cache = cache;
    	mg->old_oblock = old_oblock;
    	mg->new_oblock = new_oblock;
    	mg->cblock = cblock;
    	mg->old_ocell = old_ocell;
    	mg->new_ocell = new_ocell;
    	mg->start_jiffies = jiffies;
    
    	inc_nr_migrations(cache);
    	quiesce_migration(mg);
    }
    
    /*----------------------------------------------------------------
     * bio processing
     *--------------------------------------------------------------*/
    static void defer_bio(struct cache *cache, struct bio *bio)
    {
    	unsigned long flags;
    
    	spin_lock_irqsave(&cache->lock, flags);
    	bio_list_add(&cache->deferred_bios, bio);
    	spin_unlock_irqrestore(&cache->lock, flags);
    
    	wake_worker(cache);
    }
    
    static void process_flush_bio(struct cache *cache, struct bio *bio)
    {
    	struct per_bio_data *pb = get_per_bio_data(bio);
    
    	BUG_ON(bio->bi_size);
    	if (!pb->req_nr)
    		remap_to_origin(cache, bio);
    	else
    		remap_to_cache(cache, bio, 0);
    
    	issue(cache, bio);
    }
    
    /*
     * People generally discard large parts of a device, eg, the whole device
     * when formatting.  Splitting these large discards up into cache block
     * sized ios and then quiescing (always neccessary for discard) takes too
     * long.
     *
     * We keep it simple, and allow any size of discard to come in, and just
     * mark off blocks on the discard bitset.  No passdown occurs!
     *
     * To implement passdown we need to change the bio_prison such that a cell
     * can have a key that spans many blocks.
     */
    static void process_discard_bio(struct cache *cache, struct bio *bio)
    {
    	dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
    						  cache->discard_block_size);
    	dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
    	dm_block_t b;
    
    
    	end_block = block_div(end_block, cache->discard_block_size);
    
    Joe Thornber's avatar
    Joe Thornber committed
    
    	for (b = start_block; b < end_block; b++)
    		set_discard(cache, to_dblock(b));
    
    	bio_endio(bio, 0);
    }
    
    static bool spare_migration_bandwidth(struct cache *cache)
    {
    	sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
    		cache->sectors_per_block;
    	return current_volume < cache->migration_threshold;
    }
    
    static bool is_writethrough_io(struct cache *cache, struct bio *bio,
    			       dm_cblock_t cblock)
    {
    	return bio_data_dir(bio) == WRITE &&
    		cache->features.write_through && !is_dirty(cache, cblock);
    }
    
    static void inc_hit_counter(struct cache *cache, struct bio *bio)
    {
    	atomic_inc(bio_data_dir(bio) == READ ?
    		   &cache->stats.read_hit : &cache->stats.write_hit);
    }
    
    static void inc_miss_counter(struct cache *cache, struct bio *bio)
    {
    	atomic_inc(bio_data_dir(bio) == READ ?
    		   &cache->stats.read_miss : &cache->stats.write_miss);
    }
    
    static void process_bio(struct cache *cache, struct prealloc *structs,
    			struct bio *bio)
    {
    	int r;
    	bool release_cell = true;
    	dm_oblock_t block = get_bio_block(cache, bio);
    	struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
    	struct policy_result lookup_result;
    	struct per_bio_data *pb = get_per_bio_data(bio);
    	bool discarded_block = is_discarded_oblock(cache, block);
    	bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
    
    	/*
    	 * Check to see if that block is currently migrating.
    	 */
    	cell_prealloc = prealloc_get_cell(structs);
    	r = bio_detain(cache, block, bio, cell_prealloc,
    		       (cell_free_fn) prealloc_put_cell,
    		       structs, &new_ocell);
    	if (r > 0)
    		return;
    
    	r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
    		       bio, &lookup_result);
    
    	if (r == -EWOULDBLOCK)
    		/* migration has been denied */
    		lookup_result.op = POLICY_MISS;
    
    	switch (lookup_result.op) {
    	case POLICY_HIT:
    		inc_hit_counter(cache, bio);
    		pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
    
    
    		if (is_writethrough_io(cache, bio, lookup_result.cblock))
    			remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
    		else
    
    Joe Thornber's avatar
    Joe Thornber committed
    			remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
    
    		issue(cache, bio);
    		break;
    
    	case POLICY_MISS:
    		inc_miss_counter(cache, bio);
    		pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
    
    		remap_to_origin_clear_discard(cache, bio, block);
    		issue(cache, bio);
    
    Joe Thornber's avatar
    Joe Thornber committed
    		break;
    
    	case POLICY_NEW:
    		atomic_inc(&cache->stats.promotion);
    		promote(cache, structs, block, lookup_result.cblock, new_ocell);
    		release_cell = false;
    		break;
    
    	case POLICY_REPLACE:
    		cell_prealloc = prealloc_get_cell(structs);
    		r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
    			       (cell_free_fn) prealloc_put_cell,
    			       structs, &old_ocell);
    		if (r > 0) {
    			/*
    			 * We have to be careful to avoid lock inversion of
    			 * the cells.  So we back off, and wait for the
    			 * old_ocell to become free.
    			 */
    			policy_force_mapping(cache->policy, block,
    					     lookup_result.old_oblock);
    			atomic_inc(&cache->stats.cache_cell_clash);
    			break;
    		}
    		atomic_inc(&cache->stats.demotion);
    		atomic_inc(&cache->stats.promotion);
    
    		demote_then_promote(cache, structs, lookup_result.old_oblock,
    				    block, lookup_result.cblock,
    				    old_ocell, new_ocell);
    		release_cell = false;
    		break;
    
    	default:
    		DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
    			    (unsigned) lookup_result.op);
    		bio_io_error(bio);
    	}
    
    	if (release_cell)
    		cell_defer(cache, new_ocell, false);
    }
    
    static int need_commit_due_to_time(struct cache *cache)
    {
    	return jiffies < cache->last_commit_jiffies ||
    	       jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
    }
    
    static int commit_if_needed(struct cache *cache)
    {
    	if (dm_cache_changed_this_transaction(cache->cmd) &&
    	    (cache->commit_requested || need_commit_due_to_time(cache))) {
    		atomic_inc(&cache->stats.commit_count);
    		cache->last_commit_jiffies = jiffies;
    		cache->commit_requested = false;
    		return dm_cache_commit(cache->cmd, false);
    	}
    
    	return 0;
    }
    
    static void process_deferred_bios(struct cache *cache)
    {
    	unsigned long flags;
    	struct bio_list bios;
    	struct bio *bio;
    	struct prealloc structs;
    
    	memset(&structs, 0, sizeof(structs));
    	bio_list_init(&bios);
    
    	spin_lock_irqsave(&cache->lock, flags);
    	bio_list_merge(&bios, &cache->deferred_bios);
    	bio_list_init(&cache->deferred_bios);
    	spin_unlock_irqrestore(&cache->lock, flags);
    
    	while (!bio_list_empty(&bios)) {
    		/*
    		 * If we've got no free migration structs, and processing
    		 * this bio might require one, we pause until there are some
    		 * prepared mappings to process.
    		 */
    		if (prealloc_data_structs(cache, &structs)) {
    			spin_lock_irqsave(&cache->lock, flags);
    			bio_list_merge(&cache->deferred_bios, &bios);
    			spin_unlock_irqrestore(&cache->lock, flags);
    			break;
    		}
    
    		bio = bio_list_pop(&bios);
    
    		if (bio->bi_rw & REQ_FLUSH)
    			process_flush_bio(cache, bio);
    		else if (bio->bi_rw & REQ_DISCARD)
    			process_discard_bio(cache, bio);
    		else
    			process_bio(cache, &structs, bio);
    	}
    
    	prealloc_free_structs(cache, &structs);
    }
    
    static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
    {
    	unsigned long flags;
    	struct bio_list bios;
    	struct bio *bio;
    
    	bio_list_init(&bios);
    
    	spin_lock_irqsave(&cache->lock, flags);
    	bio_list_merge(&bios, &cache->deferred_flush_bios);
    	bio_list_init(&cache->deferred_flush_bios);
    	spin_unlock_irqrestore(&cache->lock, flags);
    
    	while ((bio = bio_list_pop(&bios)))
    		submit_bios ? generic_make_request(bio) : bio_io_error(bio);
    }
    
    
    static void process_deferred_writethrough_bios(struct cache *cache)
    {
    	unsigned long flags;
    	struct bio_list bios;
    	struct bio *bio;
    
    	bio_list_init(&bios);
    
    	spin_lock_irqsave(&cache->lock, flags);
    	bio_list_merge(&bios, &cache->deferred_writethrough_bios);
    	bio_list_init(&cache->deferred_writethrough_bios);
    	spin_unlock_irqrestore(&cache->lock, flags);
    
    	while ((bio = bio_list_pop(&bios)))
    		generic_make_request(bio);
    }
    
    
    Joe Thornber's avatar
    Joe Thornber committed
    static void writeback_some_dirty_blocks(struct cache *cache)
    {
    	int r = 0;
    	dm_oblock_t oblock;
    	dm_cblock_t cblock;
    	struct prealloc structs;
    	struct dm_bio_prison_cell *old_ocell;
    
    	memset(&structs, 0, sizeof(structs));
    
    	while (spare_migration_bandwidth(cache)) {
    		if (prealloc_data_structs(cache, &structs))
    			break;
    
    		r = policy_writeback_work(cache->policy, &oblock, &cblock);
    		if (r)
    			break;
    
    		r = get_cell(cache, oblock, &structs, &old_ocell);
    		if (r) {
    			policy_set_dirty(cache->policy, oblock);
    			break;
    		}
    
    		writeback(cache, &structs, oblock, cblock, old_ocell);
    	}
    
    	prealloc_free_structs(cache, &structs);
    }
    
    /*----------------------------------------------------------------
     * Main worker loop
     *--------------------------------------------------------------*/
    static void start_quiescing(struct cache *cache)
    {
    	unsigned long flags;
    
    	spin_lock_irqsave(&cache->lock, flags);
    	cache->quiescing = 1;
    	spin_unlock_irqrestore(&cache->lock, flags);
    }
    
    static void stop_quiescing(struct cache *cache)
    {
    	unsigned long flags;
    
    	spin_lock_irqsave(&cache->lock, flags);
    	cache->quiescing = 0;
    	spin_unlock_irqrestore(&cache->lock, flags);
    }
    
    static bool is_quiescing(struct cache *cache)
    {
    	int r;
    	unsigned long flags;
    
    	spin_lock_irqsave(&cache->lock, flags);
    	r = cache->quiescing;
    	spin_unlock_irqrestore(&cache->lock, flags);
    
    	return r;
    }
    
    static void wait_for_migrations(struct cache *cache)
    {
    	wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
    }
    
    static void stop_worker(struct cache *cache)
    {
    	cancel_delayed_work(&cache->waker);
    	flush_workqueue(cache->wq);
    }
    
    static void requeue_deferred_io(struct cache *cache)
    {
    	struct bio *bio;
    	struct bio_list bios;
    
    	bio_list_init(&bios);
    	bio_list_merge(&bios, &cache->deferred_bios);
    	bio_list_init(&cache->deferred_bios);
    
    	while ((bio = bio_list_pop(&bios)))
    		bio_endio(bio, DM_ENDIO_REQUEUE);
    }
    
    static int more_work(struct cache *cache)
    {
    	if (is_quiescing(cache))
    		return !list_empty(&cache->quiesced_migrations) ||
    			!list_empty(&cache->completed_migrations) ||
    			!list_empty(&cache->need_commit_migrations);
    	else
    		return !bio_list_empty(&cache->deferred_bios) ||
    			!bio_list_empty(&cache->deferred_flush_bios) ||
    
    			!bio_list_empty(&cache->deferred_writethrough_bios) ||
    
    Joe Thornber's avatar
    Joe Thornber committed
    			!list_empty(&cache->quiesced_migrations) ||
    			!list_empty(&cache->completed_migrations) ||
    			!list_empty(&cache->need_commit_migrations);
    }
    
    static void do_worker(struct work_struct *ws)
    {
    	struct cache *cache = container_of(ws, struct cache, worker);
    
    	do {
    		if (!is_quiescing(cache))
    			process_deferred_bios(cache);
    
    		process_migrations(cache, &cache->quiesced_migrations, issue_copy);
    		process_migrations(cache, &cache->completed_migrations, complete_migration);
    
    		writeback_some_dirty_blocks(cache);
    
    
    		process_deferred_writethrough_bios(cache);
    
    
    Joe Thornber's avatar
    Joe Thornber committed
    1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827
    		if (commit_if_needed(cache)) {
    			process_deferred_flush_bios(cache, false);
    
    			/*
    			 * FIXME: rollback metadata or just go into a
    			 * failure mode and error everything
    			 */
    		} else {
    			process_deferred_flush_bios(cache, true);
    			process_migrations(cache, &cache->need_commit_migrations,
    					   migration_success_post_commit);
    		}
    	} while (more_work(cache));
    }
    
    /*
     * We want to commit periodically so that not too much
     * unwritten metadata builds up.
     */
    static void do_waker(struct work_struct *ws)
    {
    	struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
    	wake_worker(cache);
    	queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
    }
    
    /*----------------------------------------------------------------*/
    
    static int is_congested(struct dm_dev *dev, int bdi_bits)
    {
    	struct request_queue *q = bdev_get_queue(dev->bdev);
    	return bdi_congested(&q->backing_dev_info, bdi_bits);
    }
    
    static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
    {
    	struct cache *cache = container_of(cb, struct cache, callbacks);
    
    	return is_congested(cache->origin_dev, bdi_bits) ||
    		is_congested(cache->cache_dev, bdi_bits);
    }
    
    /*----------------------------------------------------------------
     * Target methods
     *--------------------------------------------------------------*/
    
    /*
     * This function gets called on the error paths of the constructor, so we
     * have to cope with a partially initialised struct.
     */
    static void destroy(struct cache *cache)
    {
    	unsigned i;
    
    	if (cache->next_migration)
    		mempool_free(cache->next_migration, cache->migration_pool);
    
    	if (cache->migration_pool)
    		mempool_destroy(cache->migration_pool);
    
    	if (cache->all_io_ds)
    		dm_deferred_set_destroy(cache->all_io_ds);
    
    	if (cache->prison)
    		dm_bio_prison_destroy(cache->prison);
    
    	if (cache->wq)
    		destroy_workqueue(cache->wq);
    
    	if (cache->dirty_bitset)
    		free_bitset(cache->dirty_bitset);
    
    	if (cache->discard_bitset)
    		free_bitset(cache->discard_bitset);
    
    	if (cache->copier)
    		dm_kcopyd_client_destroy(cache->copier);
    
    	if (cache->cmd)
    		dm_cache_metadata_close(cache->cmd);
    
    	if (cache->metadata_dev)
    		dm_put_device(cache->ti, cache->metadata_dev);
    
    	if (cache->origin_dev)
    		dm_put_device(cache->ti, cache->origin_dev);
    
    	if (cache->cache_dev)
    		dm_put_device(cache->ti, cache->cache_dev);
    
    	if (cache->policy)
    		dm_cache_policy_destroy(cache->policy);
    
    	for (i = 0; i < cache->nr_ctr_args ; i++)
    		kfree(cache->ctr_args[i]);
    	kfree(cache->ctr_args);
    
    	kfree(cache);
    }
    
    static void cache_dtr(struct dm_target *ti)
    {
    	struct cache *cache = ti->private;
    
    	destroy(cache);
    }
    
    static sector_t get_dev_size(struct dm_dev *dev)
    {
    	return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
    }
    
    /*----------------------------------------------------------------*/
    
    /*
     * Construct a cache device mapping.
     *
     * cache <metadata dev> <cache dev> <origin dev> <block size>
     *       <#feature args> [<feature arg>]*
     *       <policy> <#policy args> [<policy arg>]*
     *
     * metadata dev    : fast device holding the persistent metadata
     * cache dev	   : fast device holding cached data blocks
     * origin dev	   : slow device holding original data blocks
     * block size	   : cache unit size in sectors
     *
     * #feature args   : number of feature arguments passed
     * feature args    : writethrough.  (The default is writeback.)
     *
     * policy	   : the replacement policy to use
     * #policy args    : an even number of policy arguments corresponding
     *		     to key/value pairs passed to the policy
     * policy args	   : key/value pairs passed to the policy
     *		     E.g. 'sequential_threshold 1024'
     *		     See cache-policies.txt for details.
     *
     * Optional feature arguments are:
     *   writethrough  : write through caching that prohibits cache block
     *		     content from being different from origin block content.
     *		     Without this argument, the default behaviour is to write
     *		     back cache block contents later for performance reasons,
     *		     so they may differ from the corresponding origin blocks.
     */
    struct cache_args {
    	struct dm_target *ti;
    
    	struct dm_dev *metadata_dev;
    
    	struct dm_dev *cache_dev;
    	sector_t cache_sectors;
    
    	struct dm_dev *origin_dev;
    	sector_t origin_sectors;
    
    	uint32_t block_size;
    
    	const char *policy_name;
    	int policy_argc;
    	const char **policy_argv;
    
    	struct cache_features features;
    };
    
    static void destroy_cache_args(struct cache_args *ca)
    {
    	if (ca->metadata_dev)
    		dm_put_device(ca->ti, ca->metadata_dev);
    
    	if (ca->cache_dev)
    		dm_put_device(ca->ti, ca->cache_dev);
    
    	if (ca->origin_dev)
    		dm_put_device(ca->ti, ca->origin_dev);
    
    	kfree(ca);
    }
    
    static bool at_least_one_arg(struct dm_arg_set *as, char **error)
    {
    	if (!as->argc) {
    		*error = "Insufficient args";
    		return false;
    	}
    
    	return true;
    }
    
    static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
    			      char **error)
    {
    	int r;
    	sector_t metadata_dev_size;
    	char b[BDEVNAME_SIZE];
    
    	if (!at_least_one_arg(as, error))
    		return -EINVAL;
    
    	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
    			  &ca->metadata_dev);
    	if (r) {
    		*error = "Error opening metadata device";
    		return r;
    	}
    
    	metadata_dev_size = get_dev_size(ca->metadata_dev);
    	if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
    		DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
    		       bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
    
    	return 0;
    }
    
    static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
    			   char **error)
    {
    	int r;
    
    	if (!at_least_one_arg(as, error))
    		return -EINVAL;
    
    	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
    			  &ca->cache_dev);
    	if (r) {
    		*error = "Error opening cache device";
    		return r;
    	}
    	ca->cache_sectors = get_dev_size(ca->cache_dev);
    
    	return 0;
    }
    
    static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
    			    char **error)
    {
    	int r;
    
    	if (!at_least_one_arg(as, error))
    		return -EINVAL;
    
    	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
    			  &ca->origin_dev);
    	if (r) {
    		*error = "Error opening origin device";
    		return r;
    	}
    
    	ca->origin_sectors = get_dev_size(ca->origin_dev);
    	if (ca->ti->len > ca->origin_sectors) {
    		*error = "Device size larger than cached device";
    		return -EINVAL;
    	}
    
    	return 0;
    }
    
    static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
    			    char **error)
    {
    	unsigned long tmp;
    
    	if (!at_least_one_arg(as, error))
    		return -EINVAL;
    
    	if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
    	    tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
    	    tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
    		*error = "Invalid data block size";
    		return -EINVAL;
    	}
    
    	if (tmp > ca->cache_sectors) {
    		*error = "Data block size is larger than the cache device";
    		return -EINVAL;
    	}
    
    	ca->block_size = tmp;
    
    	return 0;
    }
    
    static void init_features(struct cache_features *cf)
    {
    	cf->mode = CM_WRITE;
    	cf->write_through = false;
    }
    
    static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
    			  char **error)
    {
    	static struct dm_arg _args[] = {
    		{0, 1, "Invalid number of cache feature arguments"},
    	};
    
    	int r;
    	unsigned argc;
    	const char *arg;
    	struct cache_features *cf = &ca->features;
    
    	init_features(cf);
    
    	r = dm_read_arg_group(_args, as, &argc, error);
    	if (r)
    		return -EINVAL;
    
    	while (argc--) {
    		arg = dm_shift_arg(as);
    
    		if (!strcasecmp(arg, "writeback"))
    			cf->write_through = false;
    
    		else if (!strcasecmp(arg, "writethrough"))
    			cf->write_through = true;
    
    		else {
    			*error = "Unrecognised cache feature requested";
    			return -EINVAL;
    		}
    	}
    
    	return 0;
    }
    
    static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
    			char **error)
    {
    	static struct dm_arg _args[] = {
    		{0, 1024, "Invalid number of policy arguments"},
    	};
    
    	int r;
    
    	if (!at_least_one_arg(as, error))
    		return -EINVAL;
    
    	ca->policy_name = dm_shift_arg(as);
    
    	r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
    	if (r)
    		return -EINVAL;
    
    	ca->policy_argv = (const char **)as->argv;
    	dm_consume_args(as, ca->policy_argc);
    
    	return 0;
    }
    
    static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
    			    char **error)
    {
    	int r;
    	struct dm_arg_set as;
    
    	as.argc = argc;
    	as.argv = argv;
    
    	r = parse_metadata_dev(ca, &as, error);
    	if (r)
    		return r;
    
    	r = parse_cache_dev(ca, &as, error);
    	if (r)
    		return r;
    
    	r = parse_origin_dev(ca, &as, error);
    	if (r)
    		return r;
    
    	r = parse_block_size(ca, &as, error);
    	if (r)
    		return r;
    
    	r = parse_features(ca, &as, error);
    	if (r)
    		return r;
    
    	r = parse_policy(ca, &as, error);
    	if (r)
    		return r;
    
    	return 0;
    }
    
    /*----------------------------------------------------------------*/
    
    static struct kmem_cache *migration_cache;
    
    static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
    {
    	int r = 0;
    
    	if (argc & 1) {
    		DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
    		return -EINVAL;
    	}
    
    	while (argc) {
    		r = policy_set_config_value(p, argv[0], argv[1]);
    		if (r) {
    			DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
    			       argv[0], argv[1]);
    			return r;
    		}
    
    		argc -= 2;
    		argv += 2;
    	}
    
    	return r;
    }
    
    static int create_cache_policy(struct cache *cache, struct cache_args *ca,
    			       char **error)
    {
    	int r;
    
    	cache->policy =	dm_cache_policy_create(ca->policy_name,
    					       cache->cache_size,
    					       cache->origin_sectors,
    					       cache->sectors_per_block);
    	if (!cache->policy) {
    		*error = "Error creating cache's policy";
    		return -ENOMEM;
    	}
    
    	r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
    
    	if (r) {
    		*error = "Error setting cache policy's config values";
    
    Joe Thornber's avatar
    Joe Thornber committed
    		dm_cache_policy_destroy(cache->policy);
    
    Joe Thornber's avatar
    Joe Thornber committed
    
    	return r;
    }
    
    /*
     * We want the discard block size to be a power of two, at least the size
     * of the cache block size, and have no more than 2^14 discard blocks
     * across the origin.
     */
    #define MAX_DISCARD_BLOCKS (1 << 14)
    
    static bool too_many_discard_blocks(sector_t discard_block_size,
    				    sector_t origin_size)
    {
    	(void) sector_div(origin_size, discard_block_size);
    
    	return origin_size > MAX_DISCARD_BLOCKS;
    }
    
    static sector_t calculate_discard_block_size(sector_t cache_block_size,
    					     sector_t origin_size)
    {
    	sector_t discard_block_size;
    
    	discard_block_size = roundup_pow_of_two(cache_block_size);
    
    	if (origin_size)
    		while (too_many_discard_blocks(discard_block_size, origin_size))
    			discard_block_size *= 2;
    
    	return discard_block_size;
    }
    
    #define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
    
    static int cache_create(struct cache_args *ca, struct cache **result)
    {
    	int r = 0;
    	char **error = &ca->ti->error;
    	struct cache *cache;
    	struct dm_target *ti = ca->ti;
    	dm_block_t origin_blocks;
    	struct dm_cache_metadata *cmd;
    	bool may_format = ca->features.mode == CM_WRITE;
    
    	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
    	if (!cache)
    		return -ENOMEM;
    
    	cache->ti = ca->ti;
    	ti->private = cache;
    	ti->per_bio_data_size = sizeof(struct per_bio_data);
    	ti->num_flush_bios = 2;
    	ti->flush_supported = true;
    
    	ti->num_discard_bios = 1;
    	ti->discards_supported = true;
    	ti->discard_zeroes_data_unsupported = true;
    
    	memcpy(&cache->features, &ca->features, sizeof(cache->features));
    
    	cache->callbacks.congested_fn = cache_is_congested;
    	dm_table_add_target_callbacks(ti->table, &cache->callbacks);
    
    	cache->metadata_dev = ca->metadata_dev;
    	cache->origin_dev = ca->origin_dev;
    	cache->cache_dev = ca->cache_dev;
    
    	ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
    
    	/* FIXME: factor out this whole section */
    	origin_blocks = cache->origin_sectors = ca->origin_sectors;
    
    	origin_blocks = block_div(origin_blocks, ca->block_size);
    
    Joe Thornber's avatar
    Joe Thornber committed
    	cache->origin_blocks = to_oblock(origin_blocks);
    
    	cache->sectors_per_block = ca->block_size;
    	if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
    		r = -EINVAL;
    		goto bad;
    	}
    
    	if (ca->block_size & (ca->block_size - 1)) {
    		dm_block_t cache_size = ca->cache_sectors;
    
    		cache->sectors_per_block_shift = -1;
    
    		cache_size = block_div(cache_size, ca->block_size);
    
    Joe Thornber's avatar
    Joe Thornber committed
    		cache->cache_size = to_cblock(cache_size);
    	} else {
    		cache->sectors_per_block_shift = __ffs(ca->block_size);
    		cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
    	}
    
    	r = create_cache_policy(cache, ca, error);
    	if (r)
    		goto bad;
    	cache->policy_nr_args = ca->policy_argc;
    
    	cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
    				     ca->block_size, may_format,
    				     dm_cache_policy_get_hint_size(cache->policy));
    	if (IS_ERR(cmd)) {
    		*error = "Error creating metadata object";
    		r = PTR_ERR(cmd);
    		goto bad;
    	}
    	cache->cmd = cmd;
    
    	spin_lock_init(&cache->lock);
    	bio_list_init(&cache->deferred_bios);
    	bio_list_init(&cache->deferred_flush_bios);
    
    	bio_list_init(&cache->deferred_writethrough_bios);
    
    Joe Thornber's avatar
    Joe Thornber committed
    	INIT_LIST_HEAD(&cache->quiesced_migrations);
    	INIT_LIST_HEAD(&cache->completed_migrations);
    	INIT_LIST_HEAD(&cache->need_commit_migrations);
    	cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
    	atomic_set(&cache->nr_migrations, 0);
    	init_waitqueue_head(&cache->migration_wait);
    
    	cache->nr_dirty = 0;
    	cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
    	if (!cache->dirty_bitset) {
    		*error = "could not allocate dirty bitset";
    		goto bad;
    	}
    	clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
    
    	cache->discard_block_size =
    		calculate_discard_block_size(cache->sectors_per_block,
    					     cache->origin_sectors);
    	cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
    	cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
    	if (!cache->discard_bitset) {
    		*error = "could not allocate discard bitset";
    		goto bad;
    	}
    	clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
    
    	cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
    	if (IS_ERR(cache->copier)) {
    		*error = "could not create kcopyd client";
    		r = PTR_ERR(cache->copier);
    		goto bad;
    	}
    
    	cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
    	if (!cache->wq) {
    		*error = "could not create workqueue for metadata object";
    		goto bad;
    	}
    	INIT_WORK(&cache->worker, do_worker);
    	INIT_DELAYED_WORK(&cache->waker, do_waker);
    	cache->last_commit_jiffies = jiffies;
    
    	cache->prison = dm_bio_prison_create(PRISON_CELLS);
    	if (!cache->prison) {
    		*error = "could not create bio prison";
    		goto bad;
    	}
    
    	cache->all_io_ds = dm_deferred_set_create();
    	if (!cache->all_io_ds) {
    		*error = "could not create all_io deferred set";
    		goto bad;
    	}
    
    	cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
    							 migration_cache);
    	if (!cache->migration_pool) {