Skip to content
Snippets Groups Projects
dm-cache-target.c 76.4 KiB
Newer Older
  • Learn to ignore specific revisions
  • 	req.err = 0;
    	init_waitqueue_head(&req.result_wait);
    
    	spin_lock(&cache->invalidation_lock);
    	list_add(&req.list, &cache->invalidation_requests);
    	spin_unlock(&cache->invalidation_lock);
    	wake_worker(cache);
    
    	wait_event(req.result_wait, atomic_read(&req.complete));
    	return req.err;
    }
    
    static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
    					      const char **cblock_ranges)
    {
    	int r = 0;
    	unsigned i;
    	struct cblock_range range;
    
    	if (!passthrough_mode(&cache->features)) {
    		DMERR("cache has to be in passthrough mode for invalidation");
    		return -EPERM;
    	}
    
    	for (i = 0; i < count; i++) {
    		r = parse_cblock_range(cache, cblock_ranges[i], &range);
    		if (r)
    			break;
    
    		r = validate_cblock_range(cache, &range);
    		if (r)
    			break;
    
    		/*
    		 * Pass begin and end origin blocks to the worker and wake it.
    		 */
    		r = request_invalidation(cache, &range);
    		if (r)
    			break;
    	}
    
    	return r;
    }
    
    /*
     * Supports
     *	"<key> <value>"
     * and
     *     "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
    
    Joe Thornber's avatar
    Joe Thornber committed
     *
     * The key migration_threshold is supported by the cache target core.
     */
    static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
    {
    	struct cache *cache = ti->private;
    
    
    	if (!argc)
    		return -EINVAL;
    
    	if (!strcmp(argv[0], "invalidate_cblocks"))
    		return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
    
    
    Joe Thornber's avatar
    Joe Thornber committed
    	if (argc != 2)
    		return -EINVAL;
    
    
    Joe Thornber's avatar
    Joe Thornber committed
    	return set_config_value(cache, argv[0], argv[1]);
    
    Joe Thornber's avatar
    Joe Thornber committed
    }
    
    static int cache_iterate_devices(struct dm_target *ti,
    				 iterate_devices_callout_fn fn, void *data)
    {
    	int r = 0;
    	struct cache *cache = ti->private;
    
    	r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
    	if (!r)
    		r = fn(ti, cache->origin_dev, 0, ti->len, data);
    
    	return r;
    }
    
    /*
     * We assume I/O is going to the origin (which is the volume
     * more likely to have restrictions e.g. by being striped).
     * (Looking up the exact location of the data would be expensive
     * and could always be out of date by the time the bio is submitted.)
     */
    static int cache_bvec_merge(struct dm_target *ti,
    			    struct bvec_merge_data *bvm,
    			    struct bio_vec *biovec, int max_size)
    {
    	struct cache *cache = ti->private;
    	struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
    
    	if (!q->merge_bvec_fn)
    		return max_size;
    
    	bvm->bi_bdev = cache->origin_dev->bdev;
    	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
    }
    
    static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
    {
    	/*
    	 * FIXME: these limits may be incompatible with the cache device
    	 */
    	limits->max_discard_sectors = cache->discard_block_size * 1024;
    	limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
    }
    
    static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
    {
    	struct cache *cache = ti->private;
    
    	uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
    
    	/*
    	 * If the system-determined stacked limits are compatible with the
    	 * cache's blocksize (io_opt is a factor) do not override them.
    	 */
    	if (io_opt_sectors < cache->sectors_per_block ||
    	    do_div(io_opt_sectors, cache->sectors_per_block)) {
    		blk_limits_io_min(limits, 0);
    		blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
    	}
    
    Joe Thornber's avatar
    Joe Thornber committed
    	set_discard_limits(cache, limits);
    }
    
    /*----------------------------------------------------------------*/
    
    static struct target_type cache_target = {
    	.name = "cache",
    
    	.version = {1, 2, 0},
    
    Joe Thornber's avatar
    Joe Thornber committed
    	.module = THIS_MODULE,
    	.ctr = cache_ctr,
    	.dtr = cache_dtr,
    	.map = cache_map,
    	.end_io = cache_end_io,
    	.postsuspend = cache_postsuspend,
    	.preresume = cache_preresume,
    	.resume = cache_resume,
    	.status = cache_status,
    	.message = cache_message,
    	.iterate_devices = cache_iterate_devices,
    	.merge = cache_bvec_merge,
    	.io_hints = cache_io_hints,
    };
    
    static int __init dm_cache_init(void)
    {
    	int r;
    
    	r = dm_register_target(&cache_target);
    	if (r) {
    		DMERR("cache target registration failed: %d", r);
    		return r;
    	}
    
    	migration_cache = KMEM_CACHE(dm_cache_migration, 0);
    	if (!migration_cache) {
    		dm_unregister_target(&cache_target);
    		return -ENOMEM;
    	}
    
    	return 0;
    }
    
    static void __exit dm_cache_exit(void)
    {
    	dm_unregister_target(&cache_target);
    	kmem_cache_destroy(migration_cache);
    }
    
    module_init(dm_cache_init);
    module_exit(dm_cache_exit);
    
    MODULE_DESCRIPTION(DM_NAME " cache target");
    MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
    MODULE_LICENSE("GPL");