Skip to content
Snippets Groups Projects
dev.c 157 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	/*
    	 *	These ioctl calls:
    	 *	- require superuser power.
    	 *	- require strict serialization.
    	 *	- do not return a value
    	 */
    	case SIOCSIFFLAGS:
    	case SIOCSIFMETRIC:
    	case SIOCSIFMTU:
    	case SIOCSIFMAP:
    	case SIOCSIFHWADDR:
    	case SIOCSIFSLAVE:
    	case SIOCADDMULTI:
    	case SIOCDELMULTI:
    	case SIOCSIFHWBROADCAST:
    	case SIOCSIFTXQLEN:
    	case SIOCSMIIREG:
    	case SIOCBONDENSLAVE:
    	case SIOCBONDRELEASE:
    	case SIOCBONDSETHWADDR:
    	case SIOCBONDCHANGEACTIVE:
    	case SIOCBRADDIF:
    	case SIOCBRDELIF:
    	case SIOCSHWTSTAMP:
    		if (!capable(CAP_NET_ADMIN))
    			return -EPERM;
    		/* fall through */
    	case SIOCBONDSLAVEINFOQUERY:
    	case SIOCBONDINFOQUERY:
    		dev_load(net, ifr.ifr_name);
    		rtnl_lock();
    		ret = dev_ifsioc(net, &ifr, cmd);
    		rtnl_unlock();
    		return ret;
    
    	case SIOCGIFMEM:
    		/* Get the per device memory space. We can add this but
    		 * currently do not support it */
    	case SIOCSIFMEM:
    		/* Set the per device memory buffer space.
    		 * Not applicable in our case */
    	case SIOCSIFLINK:
    		return -EINVAL;
    
    	/*
    	 *	Unknown or private ioctl.
    	 */
    	default:
    		if (cmd == SIOCWANDEV ||
    		    (cmd >= SIOCDEVPRIVATE &&
    		     cmd <= SIOCDEVPRIVATE + 15)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			rtnl_lock();
    
    			ret = dev_ifsioc(net, &ifr, cmd);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			rtnl_unlock();
    
    			if (!ret && copy_to_user(arg, &ifr,
    						 sizeof(struct ifreq)))
    				ret = -EFAULT;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			return ret;
    
    		}
    		/* Take care of Wireless Extensions */
    		if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
    			return wext_handle_ioctl(net, &ifr, cmd, arg);
    		return -EINVAL;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    }
    
    
    /**
     *	dev_new_index	-	allocate an ifindex
    
     *	@net: the applicable net namespace
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
     *	Returns a suitable unique value for a new device interface
     *	number.  The caller must hold the rtnl semaphore or the
     *	dev_base_lock to be sure it remains unique.
     */
    
    static int dev_new_index(struct net *net)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	static int ifindex;
    	for (;;) {
    		if (++ifindex <= 0)
    			ifindex = 1;
    
    		if (!__dev_get_by_index(net, ifindex))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			return ifindex;
    	}
    }
    
    /* Delayed registration/unregisteration */
    
    static LIST_HEAD(net_todo_list);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    static void net_set_todo(struct net_device *dev)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	list_add_tail(&dev->todo_list, &net_todo_list);
    }
    
    
    static void rollback_registered_many(struct list_head *head)
    
    	struct net_device *dev, *tmp;
    
    	BUG_ON(dev_boot_phase);
    	ASSERT_RTNL();
    
    
    	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
    
    		/* Some devices call without registering
    
    		 * for initialization unwind. Remove those
    		 * devices and proceed with the remaining.
    
    		 */
    		if (dev->reg_state == NETREG_UNINITIALIZED) {
    			pr_debug("unregister_netdevice: device %s/%p never "
    				 "was registered\n", dev->name, dev);
    
    			list_del(&dev->unreg_list);
    			continue;
    
    		BUG_ON(dev->reg_state != NETREG_REGISTERED);
    
    	/* If device is running, close it first. */
    	dev_close_many(head);
    
    	list_for_each_entry(dev, head, unreg_list) {
    
    		/* And unlink it from device chain. */
    		unlist_netdevice(dev);
    
    		dev->reg_state = NETREG_UNREGISTERING;
    	}
    
    	list_for_each_entry(dev, head, unreg_list) {
    		/* Shutdown queueing discipline. */
    		dev_shutdown(dev);
    
    		/* Notify protocols, that we are about to destroy
    		   this device. They should clean all the things.
    		*/
    		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
    
    		if (!dev->rtnl_link_ops ||
    		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
    			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
    
    
    		/*
    		 *	Flush the unicast and multicast chains
    		 */
    
    		dev_mc_flush(dev);
    
    		if (dev->netdev_ops->ndo_uninit)
    			dev->netdev_ops->ndo_uninit(dev);
    
    		/* Notifier chain MUST detach us from master device. */
    		WARN_ON(dev->master);
    
    		/* Remove entries from kobject tree */
    		netdev_unregister_kobject(dev);
    	}
    
    	/* Process any work delayed until the end of the batch */
    
    	dev = list_first_entry(head, struct net_device, unreg_list);
    
    	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
    
    	list_for_each_entry(dev, head, unreg_list)
    
    		dev_put(dev);
    }
    
    static void rollback_registered(struct net_device *dev)
    {
    	LIST_HEAD(single);
    
    	list_add(&dev->unreg_list, &single);
    	rollback_registered_many(&single);
    
    	list_del(&single);
    
    u32 netdev_fix_features(struct net_device *dev, u32 features)
    
    	/* Fix illegal checksum combinations */
    	if ((features & NETIF_F_HW_CSUM) &&
    	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
    
    		netdev_info(dev, "mixed HW and IP checksum settings.\n");
    
    		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
    	}
    
    	if ((features & NETIF_F_NO_CSUM) &&
    	    (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
    
    		netdev_info(dev, "mixed no checksumming and other settings.\n");
    
    		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
    	}
    
    
    	/* Fix illegal SG+CSUM combinations. */
    	if ((features & NETIF_F_SG) &&
    	    !(features & NETIF_F_ALL_CSUM)) {
    
    		netdev_info(dev,
    			    "Dropping NETIF_F_SG since no checksum feature.\n");
    
    		features &= ~NETIF_F_SG;
    	}
    
    	/* TSO requires that SG is present as well. */
    	if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
    
    		netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
    
    		features &= ~NETIF_F_TSO;
    	}
    
    
    	/* Software GSO depends on SG. */
    	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
    		netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
    		features &= ~NETIF_F_GSO;
    	}
    
    
    	/* UFO needs SG and checksumming */
    
    	if (features & NETIF_F_UFO) {
    
    		/* maybe split UFO into V4 and V6? */
    		if (!((features & NETIF_F_GEN_CSUM) ||
    		    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
    			    == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
    
    			netdev_info(dev,
    				"Dropping NETIF_F_UFO since no checksum offload features.\n");
    
    			features &= ~NETIF_F_UFO;
    		}
    
    		if (!(features & NETIF_F_SG)) {
    
    			netdev_info(dev,
    				"Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
    
    			features &= ~NETIF_F_UFO;
    		}
    	}
    
    	return features;
    }
    EXPORT_SYMBOL(netdev_fix_features);
    
    
    void netdev_update_features(struct net_device *dev)
    {
    	u32 features;
    	int err = 0;
    
    	features = netdev_get_wanted_features(dev);
    
    	if (dev->netdev_ops->ndo_fix_features)
    		features = dev->netdev_ops->ndo_fix_features(dev, features);
    
    	/* driver might be less strict about feature dependencies */
    	features = netdev_fix_features(dev, features);
    
    	if (dev->features == features)
    		return;
    
    	netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
    		dev->features, features);
    
    	if (dev->netdev_ops->ndo_set_features)
    		err = dev->netdev_ops->ndo_set_features(dev, features);
    
    	if (!err)
    		dev->features = features;
    	else if (err < 0)
    		netdev_err(dev,
    			"set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
    			err, features, dev->features);
    }
    EXPORT_SYMBOL(netdev_update_features);
    
    
    /**
     *	netif_stacked_transfer_operstate -	transfer operstate
     *	@rootdev: the root or lower level device to transfer state from
     *	@dev: the device to transfer operstate to
     *
     *	Transfer operational state from root to device. This is normally
     *	called when a stacking relationship exists between the root
     *	device and the device(a leaf device).
     */
    void netif_stacked_transfer_operstate(const struct net_device *rootdev,
    					struct net_device *dev)
    {
    	if (rootdev->operstate == IF_OPER_DORMANT)
    		netif_dormant_on(dev);
    	else
    		netif_dormant_off(dev);
    
    	if (netif_carrier_ok(rootdev)) {
    		if (!netif_carrier_ok(dev))
    			netif_carrier_on(dev);
    	} else {
    		if (netif_carrier_ok(dev))
    			netif_carrier_off(dev);
    	}
    }
    EXPORT_SYMBOL(netif_stacked_transfer_operstate);
    
    
    Tom Herbert's avatar
    Tom Herbert committed
    #ifdef CONFIG_RPS
    
    static int netif_alloc_rx_queues(struct net_device *dev)
    {
    	unsigned int i, count = dev->num_rx_queues;
    
    	struct netdev_rx_queue *rx;
    
    	BUG_ON(count < 1);
    
    	rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
    	if (!rx) {
    		pr_err("netdev: Unable to allocate %u rx queues.\n", count);
    		return -ENOMEM;
    
    	dev->_rx = rx;
    
    	for (i = 0; i < count; i++)
    
    		rx[i].dev = dev;
    
    Tom Herbert's avatar
    Tom Herbert committed
    #endif
    
    Changli Gao's avatar
    Changli Gao committed
    static void netdev_init_one_queue(struct net_device *dev,
    				  struct netdev_queue *queue, void *_unused)
    {
    	/* Initialize queue lock */
    	spin_lock_init(&queue->_xmit_lock);
    	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
    	queue->xmit_lock_owner = -1;
    
    	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
    
    Changli Gao's avatar
    Changli Gao committed
    	queue->dev = dev;
    }
    
    
    static int netif_alloc_netdev_queues(struct net_device *dev)
    {
    	unsigned int count = dev->num_tx_queues;
    	struct netdev_queue *tx;
    
    	BUG_ON(count < 1);
    
    	tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
    	if (!tx) {
    		pr_err("netdev: Unable to allocate %u tx queues.\n",
    		       count);
    		return -ENOMEM;
    	}
    	dev->_tx = tx;
    
    	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
    	spin_lock_init(&dev->tx_global_lock);
    
    Changli Gao's avatar
    Changli Gao committed
    
    	return 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /**
     *	register_netdevice	- register a network device
     *	@dev: device to register
     *
     *	Take a completed network device structure and add it to the kernel
     *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
     *	chain. 0 is returned on success. A negative errno code is returned
     *	on a failure to set up the device, or if the name is a duplicate.
     *
     *	Callers must hold the rtnl semaphore. You may want
     *	register_netdev() instead of this.
     *
     *	BUGS:
     *	The locking appears insufficient to guarantee two parallel registers
     *	will not get the same name.
     */
    
    int register_netdevice(struct net_device *dev)
    {
    	int ret;
    
    	struct net *net = dev_net(dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	BUG_ON(dev_boot_phase);
    	ASSERT_RTNL();
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	/* When net_device's are persistent, this will be fatal. */
    	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	spin_lock_init(&dev->addr_list_lock);
    
    	netdev_set_addr_lockdep_class(dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	dev->iflink = -1;
    
    	/* Init, if this function is available */
    
    	if (dev->netdev_ops->ndo_init) {
    		ret = dev->netdev_ops->ndo_init(dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (ret) {
    			if (ret > 0)
    				ret = -EIO;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		}
    	}
    
    	ret = dev_get_valid_name(dev, dev->name, 0);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	dev->ifindex = dev_new_index(net);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (dev->iflink == -1)
    		dev->iflink = dev->ifindex;
    
    
    	/* Transfer changeable features to wanted_features and enable
    	 * software offloads (GSO and GRO).
    	 */
    	dev->hw_features |= NETIF_F_SOFT_FEATURES;
    
    	dev->features |= NETIF_F_SOFT_FEATURES;
    	dev->wanted_features = dev->features & dev->hw_features;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	/* Avoid warning from netdev_fix_features() for GSO without SG */
    
    	if (!(dev->wanted_features & NETIF_F_SG)) {
    
    		dev->wanted_features &= ~NETIF_F_GSO;
    
    		dev->features &= ~NETIF_F_GSO;
    	}
    
    	/* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
    	 * vlan_dev_init() will do the dev->features check, so these features
    	 * are enabled only if supported by underlying device.
    
    	dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
    
    	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
    	ret = notifier_to_errno(ret);
    	if (ret)
    		goto err_uninit;
    
    
    	ret = netdev_register_kobject(dev);
    
    	dev->reg_state = NETREG_REGISTERED;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	/*
    	 *	Default initial state at registry is that the
    	 *	device is present.
    	 */
    
    	set_bit(__LINK_STATE_PRESENT, &dev->state);
    
    	dev_init_scheduler(dev);
    	dev_hold(dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* Notify protocols, that a new device appeared. */
    
    	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
    
    	ret = notifier_to_errno(ret);
    
    	if (ret) {
    		rollback_registered(dev);
    		dev->reg_state = NETREG_UNREGISTERED;
    	}
    
    	/*
    	 *	Prevent userspace races by waiting until the network
    	 *	device is fully setup before sending notifications.
    	 */
    
    	if (!dev->rtnl_link_ops ||
    	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
    		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    out:
    	return ret;
    
    	if (dev->netdev_ops->ndo_uninit)
    		dev->netdev_ops->ndo_uninit(dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    EXPORT_SYMBOL(register_netdevice);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    /**
     *	init_dummy_netdev	- init a dummy network device for NAPI
     *	@dev: device to init
     *
     *	This takes a network device structure and initialize the minimum
     *	amount of fields so it can be used to schedule NAPI polls without
     *	registering a full blown interface. This is to be used by drivers
     *	that need to tie several hardware interfaces to a single NAPI
     *	poll scheduler due to HW limitations.
     */
    int init_dummy_netdev(struct net_device *dev)
    {
    	/* Clear everything. Note we don't initialize spinlocks
    	 * are they aren't supposed to be taken by any of the
    	 * NAPI code and this dummy netdev is supposed to be
    	 * only ever used for NAPI polls
    	 */
    	memset(dev, 0, sizeof(struct net_device));
    
    	/* make sure we BUG if trying to hit standard
    	 * register/unregister code path
    	 */
    	dev->reg_state = NETREG_DUMMY;
    
    	/* NAPI wants this */
    	INIT_LIST_HEAD(&dev->napi_list);
    
    	/* a dummy interface is started by default */
    	set_bit(__LINK_STATE_PRESENT, &dev->state);
    	set_bit(__LINK_STATE_START, &dev->state);
    
    
    	/* Note : We dont allocate pcpu_refcnt for dummy devices,
    	 * because users of this 'device' dont need to change
    	 * its refcount.
    	 */
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /**
     *	register_netdev	- register a network device
     *	@dev: device to register
     *
     *	Take a completed network device structure and add it to the kernel
     *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
     *	chain. 0 is returned on success. A negative errno code is returned
     *	on a failure to set up the device, or if the name is a duplicate.
     *
    
     *	This is a wrapper around register_netdevice that takes the rtnl semaphore
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *	and expands the device name if you passed a format string to
     *	alloc_netdev.
     */
    int register_netdev(struct net_device *dev)
    {
    	int err;
    
    	rtnl_lock();
    
    	/*
    	 * If the name is a format string the caller wants us to do a
    	 * name allocation.
    	 */
    	if (strchr(dev->name, '%')) {
    		err = dev_alloc_name(dev, dev->name);
    		if (err < 0)
    			goto out;
    	}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	err = register_netdevice(dev);
    out:
    	rtnl_unlock();
    	return err;
    }
    EXPORT_SYMBOL(register_netdev);
    
    
    int netdev_refcnt_read(const struct net_device *dev)
    {
    	int i, refcnt = 0;
    
    	for_each_possible_cpu(i)
    		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
    	return refcnt;
    }
    EXPORT_SYMBOL(netdev_refcnt_read);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     * netdev_wait_allrefs - wait until all references are gone.
     *
     * This is called when unregistering network devices.
     *
     * Any protocol or device that holds a reference should register
     * for netdevice notification, and cleanup and put back the
     * reference if they receive an UNREGISTER event.
     * We can get stuck here if buggy protocols don't correctly
    
     * call dev_put.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    static void netdev_wait_allrefs(struct net_device *dev)
    {
    	unsigned long rebroadcast_time, warning_time;
    
    	int refcnt;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	rebroadcast_time = warning_time = jiffies;
    
    	refcnt = netdev_refcnt_read(dev);
    
    	while (refcnt != 0) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
    
    			rtnl_lock();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    			/* Rebroadcast unregister notification */
    
    			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
    
    			/* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
    
    			 * should have already handle it the first time */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
    				     &dev->state)) {
    				/* We must not have linkwatch events
    				 * pending on unregister. If this
    				 * happens, we simply run the queue
    				 * unscheduled, resulting in a noop
    				 * for this device.
    				 */
    				linkwatch_run_queue();
    			}
    
    
    			__rtnl_unlock();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    			rebroadcast_time = jiffies;
    		}
    
    		msleep(250);
    
    
    		refcnt = netdev_refcnt_read(dev);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (time_after(jiffies, warning_time + 10 * HZ)) {
    			printk(KERN_EMERG "unregister_netdevice: "
    			       "waiting for %s to become free. Usage "
    			       "count = %d\n",
    
    			       dev->name, refcnt);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			warning_time = jiffies;
    		}
    	}
    }
    
    /* The sequence is:
     *
     *	rtnl_lock();
     *	...
     *	register_netdevice(x1);
     *	register_netdevice(x2);
     *	...
     *	unregister_netdevice(y1);
     *	unregister_netdevice(y2);
     *      ...
     *	rtnl_unlock();
     *	free_netdev(y1);
     *	free_netdev(y2);
     *
    
     * We are invoked by rtnl_unlock().
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * This allows us to deal with problems:
    
     * 1) We can delete sysfs objects which invoke hotplug
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *    without deadlocking with linkwatch via keventd.
     * 2) Since we run with the RTNL semaphore not held, we can sleep
     *    safely in order to wait for the netdev refcnt to drop to zero.
    
     *
     * We must not return until all unregister events added during
     * the interval the lock was held have been completed.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    void netdev_run_todo(void)
    {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* Snapshot list, allow later requests */
    
    	list_replace_init(&net_todo_list, &list);
    
    
    	__rtnl_unlock();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	while (!list_empty(&list)) {
    		struct net_device *dev
    
    			= list_first_entry(&list, struct net_device, todo_list);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		list_del(&dev->todo_list);
    
    
    		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
    			printk(KERN_ERR "network todo '%s' but state %d\n",
    			       dev->name, dev->reg_state);
    			dump_stack();
    			continue;
    		}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		dev->reg_state = NETREG_UNREGISTERED;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		on_each_cpu(flush_backlog, dev, 1);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		BUG_ON(netdev_refcnt_read(dev));
    
    Eric Dumazet's avatar
    Eric Dumazet committed
    		WARN_ON(rcu_dereference_raw(dev->ip_ptr));
    
    		WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
    
    		WARN_ON(dev->dn_ptr);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		if (dev->destructor)
    			dev->destructor(dev);
    
    
    		/* Free network device */
    		kobject_put(&dev->dev.kobj);
    
    /* Convert net_device_stats to rtnl_link_stats64.  They have the same
     * fields in the same order, with only the type differing.
     */
    static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
    				    const struct net_device_stats *netdev_stats)
    {
    #if BITS_PER_LONG == 64
            BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
            memcpy(stats64, netdev_stats, sizeof(*stats64));
    #else
    	size_t i, n = sizeof(*stats64) / sizeof(u64);
    	const unsigned long *src = (const unsigned long *)netdev_stats;
    	u64 *dst = (u64 *)stats64;
    
    	BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
    		     sizeof(*stats64) / sizeof(u64));
    	for (i = 0; i < n; i++)
    		dst[i] = src[i];
    #endif
    }
    
    
    /**
     *	dev_get_stats	- get network device statistics
     *	@dev: device to get statistics from
    
     *	@storage: place to store stats
    
     *	Get network statistics from device. Return @storage.
     *	The device driver may provide its own method by setting
     *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
     *	otherwise the internal statistics structure is used.
    
    struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
    					struct rtnl_link_stats64 *storage)
    
    	const struct net_device_ops *ops = dev->netdev_ops;
    
    
    	if (ops->ndo_get_stats64) {
    		memset(storage, 0, sizeof(*storage));
    
    		ops->ndo_get_stats64(dev, storage);
    	} else if (ops->ndo_get_stats) {
    
    		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
    
    	} else {
    		netdev_stats_to_stats64(storage, &dev->stats);
    
    	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
    
    	return storage;
    
    EXPORT_SYMBOL(dev_get_stats);
    
    struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
    
    	struct netdev_queue *queue = dev_ingress_queue(dev);
    
    #ifdef CONFIG_NET_CLS_ACT
    	if (queue)
    		return queue;
    	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
    	if (!queue)
    		return NULL;
    	netdev_init_one_queue(dev, queue, NULL);
    	queue->qdisc = &noop_qdisc;
    	queue->qdisc_sleeping = &noop_qdisc;
    	rcu_assign_pointer(dev->ingress_queue, queue);
    #endif
    	return queue;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /**
    
     *	alloc_netdev_mqs - allocate network device
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *	@sizeof_priv:	size of private data to allocate space for
     *	@name:		device name format string
     *	@setup:		callback to initialize device
    
     *	@txqs:		the number of TX subqueues to allocate
     *	@rxqs:		the number of RX subqueues to allocate
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
     *	Allocates a struct net_device with private data area for driver use
    
     *	and performs basic initialization.  Also allocates subquue structs
    
     *	for each queue on the device.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    
    struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
    		void (*setup)(struct net_device *),
    		unsigned int txqs, unsigned int rxqs)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	struct net_device *dev;
    
    	size_t alloc_size;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	BUG_ON(strlen(name) >= sizeof(dev->name));
    
    
    	if (txqs < 1) {
    
    		pr_err("alloc_netdev: Unable to allocate device "
    		       "with zero queues.\n");
    		return NULL;
    	}
    
    
    #ifdef CONFIG_RPS
    	if (rxqs < 1) {
    		pr_err("alloc_netdev: Unable to allocate device "
    		       "with zero RX queues.\n");
    		return NULL;
    	}
    #endif
    
    
    	alloc_size = sizeof(struct net_device);
    
    	if (sizeof_priv) {
    		/* ensure 32-byte alignment of private area */
    
    		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
    
    		alloc_size += sizeof_priv;
    	}
    	/* ensure 32-byte alignment of whole construct */
    
    	alloc_size += NETDEV_ALIGN - 1;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	p = kzalloc(alloc_size, GFP_KERNEL);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!p) {
    
    		printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		return NULL;
    	}
    
    
    	dev = PTR_ALIGN(p, NETDEV_ALIGN);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	dev->padded = (char *)dev - (char *)p;
    
    	dev->pcpu_refcnt = alloc_percpu(int);
    	if (!dev->pcpu_refcnt)
    
    
    	if (dev_addr_init(dev))
    
    		goto free_pcpu;
    
    	dev_mc_init(dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	dev->gso_max_size = GSO_MAX_SIZE;
    
    	INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
    	dev->ethtool_ntuple_list.count = 0;
    	INIT_LIST_HEAD(&dev->napi_list);
    	INIT_LIST_HEAD(&dev->unreg_list);
    	INIT_LIST_HEAD(&dev->link_watch_list);
    	dev->priv_flags = IFF_XMIT_DST_RELEASE;
    	setup(dev);
    
    
    	dev->num_tx_queues = txqs;
    	dev->real_num_tx_queues = txqs;
    
    	if (netif_alloc_netdev_queues(dev))
    
    Eric Dumazet's avatar
    Eric Dumazet committed
    #ifdef CONFIG_RPS
    
    	dev->num_rx_queues = rxqs;
    	dev->real_num_rx_queues = rxqs;
    
    	if (netif_alloc_rx_queues(dev))
    
    Eric Dumazet's avatar
    Eric Dumazet committed
    #endif
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	strcpy(dev->name, name);
    
    	dev->group = INIT_NETDEV_GROUP;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return dev;
    
    free_pcpu:
    	free_percpu(dev->pcpu_refcnt);
    
    #ifdef CONFIG_RPS
    	kfree(dev->_rx);
    #endif
    
    
    free_p:
    	kfree(p);
    	return NULL;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    EXPORT_SYMBOL(alloc_netdev_mqs);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    /**
     *	free_netdev - free network device
     *	@dev: device
     *
    
     *	This function does the last stage of destroying an allocated device
     * 	interface. The reference to the device object is released.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *	If this is the last reference then it will be freed.
     */
    void free_netdev(struct net_device *dev)
    {
    
    	struct napi_struct *p, *n;
    
    
    	kfree(dev->_tx);
    
    #ifdef CONFIG_RPS
    	kfree(dev->_rx);
    #endif
    
    	kfree(rcu_dereference_raw(dev->ingress_queue));
    
    
    	/* Flush device addresses */
    	dev_addr_flush(dev);
    
    
    	/* Clear ethtool n-tuple list */
    	ethtool_ntuple_flush(dev);
    
    
    	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
    		netif_napi_del(p);
    
    
    	free_percpu(dev->pcpu_refcnt);
    	dev->pcpu_refcnt = NULL;
    
    
    	/*  Compatibility with error handling in drivers */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (dev->reg_state == NETREG_UNINITIALIZED) {
    		kfree((char *)dev - dev->padded);
    		return;
    	}
    
    	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
    	dev->reg_state = NETREG_RELEASED;
    
    
    	/* will free via device release */
    	put_device(&dev->dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    EXPORT_SYMBOL(free_netdev);
    
    /**
     *	synchronize_net -  Synchronize with packet receive processing
     *
     *	Wait for packets currently being received to be done.
     *	Does not block later packets from starting.
     */
    
    void synchronize_net(void)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	might_sleep();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    EXPORT_SYMBOL(synchronize_net);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    /**
    
     *	unregister_netdevice_queue - remove device from the kernel
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *	@dev: device
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *	This function shuts down a device interface and removes it
    
     *	from the kernel tables.
    
     *	If head not NULL, device is queued to be unregistered later.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
     *	Callers must hold the rtnl semaphore.  You may want
     *	unregister_netdev() instead of this.
     */
    
    
    void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    		list_move_tail(&dev->unreg_list, head);
    
    	} else {
    		rollback_registered(dev);
    		/* Finish processing unregister after unlock */
    		net_set_todo(dev);
    	}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    EXPORT_SYMBOL(unregister_netdevice_queue);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    /**
     *	unregister_netdevice_many - unregister many devices
     *	@head: list of devices
     */
    void unregister_netdevice_many(struct list_head *head)
    {
    	struct net_device *dev;
    
    	if (!list_empty(head)) {
    		rollback_registered_many(head);
    		list_for_each_entry(dev, head, unreg_list)
    			net_set_todo(dev);
    	}
    }
    
    EXPORT_SYMBOL(unregister_netdevice_many);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /**
     *	unregister_netdev - remove device from the kernel
     *	@dev: device
     *
     *	This function shuts down a device interface and removes it
    
     *	from the kernel tables.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
     *	This is just a wrapper for unregister_netdevice that takes
     *	the rtnl semaphore.  In general you want to use this and not
     *	unregister_netdevice.
     */
    void unregister_netdev(struct net_device *dev)
    {
    	rtnl_lock();
    	unregister_netdevice(dev);
    	rtnl_unlock();
    }
    EXPORT_SYMBOL(unregister_netdev);
    
    
    /**
     *	dev_change_net_namespace - move device to different nethost namespace
     *	@dev: device
     *	@net: network namespace
     *	@pat: If not NULL name pattern to try if the current device name
     *	      is already taken in the destination network namespace.
     *
     *	This function shuts down a device interface and moves it
     *	to a new network namespace. On success 0 is returned, on
     *	a failure a netagive errno code is returned.
     *
     *	Callers must hold the rtnl semaphore.
     */
    
    int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
    {
    	int err;
    
    	ASSERT_RTNL();