Skip to content
Snippets Groups Projects
dev.c 158 KiB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed

	ret = device_rename(&dev->dev, dev->name);
	if (ret) {
		memcpy(dev->name, oldname, IFNAMSIZ);
		return ret;
	hlist_del_rcu(&dev->name_hlist);
	write_unlock_bh(&dev_base_lock);

	synchronize_rcu();

	write_lock_bh(&dev_base_lock);
	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
	ret = notifier_to_errno(ret);

	if (ret) {
		/* err >= 0 after dev_alloc_name() or stores the first errno */
		if (err >= 0) {
			err = ret;
			memcpy(dev->name, oldname, IFNAMSIZ);
			goto rollback;
		} else {
			printk(KERN_ERR
			       "%s: name change rollback failed: %d.\n",
			       dev->name, ret);
Linus Torvalds's avatar
Linus Torvalds committed

	return err;
}

/**
 *	dev_set_alias - change ifalias of a device
 *	@dev: device
 *	@alias: name up to IFALIASZ
 *	@len: limit of bytes to copy from info
 *
 *	Set ifalias for a device,
 */
int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
{
	ASSERT_RTNL();

	if (len >= IFALIASZ)
		return -EINVAL;

	if (!len) {
		if (dev->ifalias) {
			kfree(dev->ifalias);
			dev->ifalias = NULL;
		}
		return 0;
	}

	dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
	if (!dev->ifalias)
		return -ENOMEM;

	strlcpy(dev->ifalias, alias, len+1);
	return len;
}


 *	netdev_features_change - device changes features
 *	@dev: device to cause notification
 *
 *	Called to indicate a device has changed features.
 */
void netdev_features_change(struct net_device *dev)
{
	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
}
EXPORT_SYMBOL(netdev_features_change);

Linus Torvalds's avatar
Linus Torvalds committed
/**
 *	netdev_state_change - device changes state
 *	@dev: device to cause notification
 *
 *	Called to indicate a device has changed state. This function calls
 *	the notifier chains for netdev_chain and sends a NEWLINK message
 *	to the routing socket.
 */
void netdev_state_change(struct net_device *dev)
{
	if (dev->flags & IFF_UP) {
		call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds's avatar
Linus Torvalds committed
		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
	}
}
EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds's avatar
Linus Torvalds committed

int netdev_bonding_change(struct net_device *dev, unsigned long event)
	return call_netdevice_notifiers(event, dev);
}
EXPORT_SYMBOL(netdev_bonding_change);

Linus Torvalds's avatar
Linus Torvalds committed
/**
 *	dev_load 	- load a network module
 *	@net: the applicable net namespace
Linus Torvalds's avatar
Linus Torvalds committed
 *	@name: name of interface
 *
 *	If a network interface is not present and the process has suitable
 *	privileges this function loads the module. If module loading is not
 *	available in this kernel then it becomes a nop.
 */

void dev_load(struct net *net, const char *name)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct net_device *dev;
Linus Torvalds's avatar
Linus Torvalds committed

	rcu_read_lock();
	dev = dev_get_by_name_rcu(net, name);
	rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed

	no_module = !dev;
	if (no_module && capable(CAP_NET_ADMIN))
		no_module = request_module("netdev-%s", name);
	if (no_module && capable(CAP_SYS_MODULE)) {
		if (!request_module("%s", name))
			pr_err("Loading kernel module for a network device "
"with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s "
"instead\n", name);
	}
Linus Torvalds's avatar
Linus Torvalds committed
}
EXPORT_SYMBOL(dev_load);
Linus Torvalds's avatar
Linus Torvalds committed

static int __dev_open(struct net_device *dev)
Linus Torvalds's avatar
Linus Torvalds committed
{
	const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds's avatar
Linus Torvalds committed

Linus Torvalds's avatar
Linus Torvalds committed
	if (!netif_device_present(dev))
		return -ENODEV;

	ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
	ret = notifier_to_errno(ret);
	if (ret)
		return ret;

Linus Torvalds's avatar
Linus Torvalds committed
	set_bit(__LINK_STATE_START, &dev->state);
	if (ops->ndo_validate_addr)
		ret = ops->ndo_validate_addr(dev);
	if (!ret && ops->ndo_open)
		ret = ops->ndo_open(dev);
Linus Torvalds's avatar
Linus Torvalds committed

	if (ret)
		clear_bit(__LINK_STATE_START, &dev->state);
	else {
Linus Torvalds's avatar
Linus Torvalds committed
		dev->flags |= IFF_UP;
		dev_set_rx_mode(dev);
Linus Torvalds's avatar
Linus Torvalds committed
		dev_activate(dev);
	}
Linus Torvalds's avatar
Linus Torvalds committed
	return ret;
}

/**
 *	dev_open	- prepare an interface for use.
 *	@dev:	device to open
Linus Torvalds's avatar
Linus Torvalds committed
 *
 *	Takes a device from down to up state. The device's private open
 *	function is invoked and then the multicast lists are loaded. Finally
 *	the device is moved into the up state and a %NETDEV_UP message is
 *	sent to the netdev notifier chain.
 *
 *	Calling this function on an active interface is a nop. On a failure
 *	a negative errno code is returned.
Linus Torvalds's avatar
Linus Torvalds committed
 */
int dev_open(struct net_device *dev)
{
	int ret;

	if (dev->flags & IFF_UP)
		return 0;

	ret = __dev_open(dev);
	if (ret < 0)
		return ret;

	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
	call_netdevice_notifiers(NETDEV_UP, dev);

	return ret;
}
EXPORT_SYMBOL(dev_open);

static int __dev_close_many(struct list_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
{
	list_for_each_entry(dev, head, unreg_list) {
		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds's avatar
Linus Torvalds committed

		clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds's avatar
Linus Torvalds committed

		/* Synchronize to scheduled poll. We cannot touch poll list, it
		 * can be even on different cpu. So just clear netif_running().
		 *
		 * dev->stop() will invoke napi_disable() on all of it's
		 * napi_struct instances on this device.
		 */
		smp_mb__after_clear_bit(); /* Commit netif_running(). */
	}
Linus Torvalds's avatar
Linus Torvalds committed

	list_for_each_entry(dev, head, unreg_list) {
		const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds's avatar
Linus Torvalds committed

		/*
		 *	Call the device specific close. This cannot fail.
		 *	Only if device is UP
		 *
		 *	We allow it to be called even after a DETACH hot-plug
		 *	event.
		 */
		if (ops->ndo_stop)
			ops->ndo_stop(dev);

		dev->flags &= ~IFF_UP;
		net_dmaengine_put();
	}

	return 0;
}

static int __dev_close(struct net_device *dev)
{
	LIST_HEAD(single);

	list_add(&dev->unreg_list, &single);
	retval = __dev_close_many(&single);
	list_del(&single);
	return retval;
static int dev_close_many(struct list_head *head)
{
	struct net_device *dev, *tmp;
	LIST_HEAD(tmp_list);
Linus Torvalds's avatar
Linus Torvalds committed

	list_for_each_entry_safe(dev, tmp, head, unreg_list)
		if (!(dev->flags & IFF_UP))
			list_move(&dev->unreg_list, &tmp_list);

	__dev_close_many(head);
Linus Torvalds's avatar
Linus Torvalds committed

	list_for_each_entry(dev, head, unreg_list) {
		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
		call_netdevice_notifiers(NETDEV_DOWN, dev);
	}
	/* rollback_registered_many needs the complete original list */
	list_splice(&tmp_list, head);
	return 0;
}

/**
 *	dev_close - shutdown an interface.
 *	@dev: device to shutdown
 *
 *	This function moves an active device into down state. A
 *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
 *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
 *	chain.
 */
int dev_close(struct net_device *dev)
{
	if (dev->flags & IFF_UP) {
		LIST_HEAD(single);
Linus Torvalds's avatar
Linus Torvalds committed

		list_add(&dev->unreg_list, &single);
		dev_close_many(&single);
		list_del(&single);
	}
Linus Torvalds's avatar
Linus Torvalds committed
	return 0;
}
EXPORT_SYMBOL(dev_close);
/**
 *	dev_disable_lro - disable Large Receive Offload on a device
 *	@dev: device
 *
 *	Disable Large Receive Offload (LRO) on a net device.  Must be
 *	called under RTNL.  This is needed if received packets may be
 *	forwarded to another interface.
 */
void dev_disable_lro(struct net_device *dev)
{
	/*
	 * If we're trying to disable lro on a vlan device
	 * use the underlying physical device instead
	 */
	if (is_vlan_dev(dev))
		dev = vlan_dev_real_dev(dev);

	if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
		flags = dev->ethtool_ops->get_flags(dev);
	else
		flags = ethtool_op_get_flags(dev);

	if (!(flags & ETH_FLAG_LRO))
		return;

	__ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
	if (unlikely(dev->features & NETIF_F_LRO))
		netdev_WARN(dev, "failed to disable LRO!\n");
Linus Torvalds's avatar
Linus Torvalds committed
/**
 *	register_netdevice_notifier - register a network notifier block
 *	@nb: notifier
 *
 *	Register a notifier to be called when network device events occur.
 *	The notifier passed is linked into the kernel structures and must
 *	not be reused until it has been unregistered. A negative errno code
 *	is returned on a failure.
 *
 * 	When registered all registration and up events are replayed
 *	to the new notifier to allow device to have a race free
Linus Torvalds's avatar
Linus Torvalds committed
 *	view of the network device list.
 */

int register_netdevice_notifier(struct notifier_block *nb)
{
	struct net_device *dev;
	struct net_device *last;
Linus Torvalds's avatar
Linus Torvalds committed
	int err;

	rtnl_lock();
	err = raw_notifier_chain_register(&netdev_chain, nb);
	if (dev_boot_phase)
		goto unlock;
	for_each_net(net) {
		for_each_netdev(net, dev) {
			err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
			err = notifier_to_errno(err);
			if (err)
				goto rollback;

			if (!(dev->flags & IFF_UP))
				continue;
Linus Torvalds's avatar
Linus Torvalds committed

			nb->notifier_call(nb, NETDEV_UP, dev);
		}
Linus Torvalds's avatar
Linus Torvalds committed
	}
Linus Torvalds's avatar
Linus Torvalds committed
	rtnl_unlock();
	return err;
	for_each_net(net) {
		for_each_netdev(net, dev) {
			if (dev == last)
				break;
			if (dev->flags & IFF_UP) {
				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
				nb->notifier_call(nb, NETDEV_DOWN, dev);
			}
			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
			nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);

	raw_notifier_chain_unregister(&netdev_chain, nb);
Linus Torvalds's avatar
Linus Torvalds committed
}
EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds's avatar
Linus Torvalds committed

/**
 *	unregister_netdevice_notifier - unregister a network notifier block
 *	@nb: notifier
 *
 *	Unregister a notifier previously registered by
 *	register_netdevice_notifier(). The notifier is unlinked into the
 *	kernel structures and may then be reused. A negative errno code
 *	is returned on a failure.
 */

int unregister_netdevice_notifier(struct notifier_block *nb)
{
	err = raw_notifier_chain_unregister(&netdev_chain, nb);
	rtnl_unlock();
	return err;
Linus Torvalds's avatar
Linus Torvalds committed
}
EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds's avatar
Linus Torvalds committed

/**
 *	call_netdevice_notifiers - call all network notifier blocks
 *      @val: value passed unmodified to notifier function
 *      @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds's avatar
Linus Torvalds committed
 *
 *	Call all network notifier blocks.  Parameters and return value
 *	are as for raw_notifier_call_chain().
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds's avatar
Linus Torvalds committed
{
	return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds's avatar
Linus Torvalds committed
}
EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds's avatar
Linus Torvalds committed

/* When > 0 there are consumers of rx skb time stamps */
static atomic_t netstamp_needed = ATOMIC_INIT(0);

void net_enable_timestamp(void)
{
	atomic_inc(&netstamp_needed);
}
EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds's avatar
Linus Torvalds committed

void net_disable_timestamp(void)
{
	atomic_dec(&netstamp_needed);
}
EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds's avatar
Linus Torvalds committed

static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds's avatar
Linus Torvalds committed
{
	if (atomic_read(&netstamp_needed))
	else
		skb->tstamp.tv64 = 0;
static inline void net_timestamp_check(struct sk_buff *skb)
{
	if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
		__net_timestamp(skb);
}

static inline bool is_skb_forwardable(struct net_device *dev,
				      struct sk_buff *skb)
{
	unsigned int len;

	if (!(dev->flags & IFF_UP))
		return false;

	len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
	if (skb->len <= len)
		return true;

	/* if TSO is enabled, we don't care about the length as the packet
	 * could be forwarded without being segmented before
	 */
	if (skb_is_gso(skb))
		return true;

	return false;
}

/**
 * dev_forward_skb - loopback an skb to another netif
 *
 * @dev: destination network device
 * @skb: buffer to forward
 *
 * return values:
 *	NET_RX_SUCCESS	(no congestion)
 *	NET_RX_DROP     (packet was dropped, but freed)
 *
 * dev_forward_skb can be used for injecting an skb from the
 * start_xmit function of one device into the receive queue
 * of another device.
 *
 * The receiving device may be in another namespace, so
 * we have to clear all information in the skb that could
 * impact namespace isolation.
 */
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
	skb_orphan(skb);
	nf_reset(skb);
	if (unlikely(!is_skb_forwardable(dev, skb))) {
		atomic_long_inc(&dev->rx_dropped);
		return NET_RX_DROP;
	skb->tstamp.tv64 = 0;
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, dev);
	return netif_rx(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);

static inline int deliver_skb(struct sk_buff *skb,
			      struct packet_type *pt_prev,
			      struct net_device *orig_dev)
{
	atomic_inc(&skb->users);
	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}

Linus Torvalds's avatar
Linus Torvalds committed
/*
 *	Support routine. Sends outgoing frames to any network
 *	taps currently in use.
 */

static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct packet_type *ptype;
	struct sk_buff *skb2 = NULL;
	struct packet_type *pt_prev = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
	rcu_read_lock();
	list_for_each_entry_rcu(ptype, &ptype_all, list) {
		/* Never send packets back to the socket
		 * they originated from - MvS (miquels@drinkel.ow.org)
		 */
		if ((ptype->dev == dev || !ptype->dev) &&
		    (ptype->af_packet_priv == NULL ||
		     (struct sock *)ptype->af_packet_priv != skb->sk)) {
			if (pt_prev) {
				deliver_skb(skb2, pt_prev, skb->dev);
				pt_prev = ptype;
				continue;
			}

			skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds's avatar
Linus Torvalds committed
			if (!skb2)
				break;

Linus Torvalds's avatar
Linus Torvalds committed
			/* skb->nh should be correctly
			   set by sender, so that the second statement is
			   just protection against buggy protocols.
			 */
			skb_reset_mac_header(skb2);
Linus Torvalds's avatar
Linus Torvalds committed

			if (skb_network_header(skb2) < skb2->data ||
			    skb2->network_header > skb2->tail) {
Linus Torvalds's avatar
Linus Torvalds committed
				if (net_ratelimit())
					printk(KERN_CRIT "protocol %04x is "
					       "buggy, dev %s\n",
					       ntohs(skb2->protocol),
					       dev->name);
				skb_reset_network_header(skb2);
			skb2->transport_header = skb2->network_header;
Linus Torvalds's avatar
Linus Torvalds committed
			skb2->pkt_type = PACKET_OUTGOING;
Linus Torvalds's avatar
Linus Torvalds committed
		}
	}
	if (pt_prev)
		pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds's avatar
Linus Torvalds committed
	rcu_read_unlock();
}

/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
 * @dev: Network device
 * @txq: number of queues available
 *
 * If real_num_tx_queues is changed the tc mappings may no longer be
 * valid. To resolve this verify the tc mapping remains valid and if
 * not NULL the mapping. With no priorities mapping to this
 * offset/count pair it will no longer be used. In the worst case TC0
 * is invalid nothing can be done so disable priority mappings. If is
 * expected that drivers will fix this mapping if they can before
 * calling netif_set_real_num_tx_queues.
 */
static void netif_setup_tc(struct net_device *dev, unsigned int txq)
{
	int i;
	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];

	/* If TC0 is invalidated disable TC mapping */
	if (tc->offset + tc->count > txq) {
		pr_warning("Number of in use tx queues changed "
			   "invalidating tc mappings. Priority "
			   "traffic classification disabled!\n");
		dev->num_tc = 0;
		return;
	}

	/* Invalidated prio to tc mappings set to TC0 */
	for (i = 1; i < TC_BITMASK + 1; i++) {
		int q = netdev_get_prio_tc_map(dev, i);

		tc = &dev->tc_to_txq[q];
		if (tc->offset + tc->count > txq) {
			pr_warning("Number of in use tx queues "
				   "changed. Priority %i to tc "
				   "mapping %i is no longer valid "
				   "setting map to 0\n",
				   i, q);
			netdev_set_prio_tc_map(dev, i, 0);
		}
	}
}

/*
 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
 */
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
	if (txq < 1 || txq > dev->num_tx_queues)
		return -EINVAL;
	if (dev->reg_state == NETREG_REGISTERED ||
	    dev->reg_state == NETREG_UNREGISTERING) {
		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
						  txq);
Tom Herbert's avatar
Tom Herbert committed
		if (rc)
			return rc;

		if (dev->num_tc)
			netif_setup_tc(dev, txq);

		if (txq < dev->real_num_tx_queues)
			qdisc_reset_all_tx_gt(dev, txq);

	dev->real_num_tx_queues = txq;
	return 0;
}
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
#ifdef CONFIG_RPS
/**
 *	netif_set_real_num_rx_queues - set actual number of RX queues used
 *	@dev: Network device
 *	@rxq: Actual number of RX queues
 *
 *	This must be called either with the rtnl_lock held or before
 *	registration of the net device.  Returns 0 on success, or a
 *	negative error code.  If called before registration, it always
 *	succeeds.
 */
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
{
	int rc;

	if (rxq < 1 || rxq > dev->num_rx_queues)
		return -EINVAL;

	if (dev->reg_state == NETREG_REGISTERED) {
		ASSERT_RTNL();

		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
						  rxq);
		if (rc)
			return rc;
	}

	dev->real_num_rx_queues = rxq;
	return 0;
}
EXPORT_SYMBOL(netif_set_real_num_rx_queues);
#endif

static inline void __netif_reschedule(struct Qdisc *q)
	struct softnet_data *sd;
	unsigned long flags;
	local_irq_save(flags);
	sd = &__get_cpu_var(softnet_data);
	q->next_sched = NULL;
	*sd->output_queue_tailp = q;
	sd->output_queue_tailp = &q->next_sched;
	raise_softirq_irqoff(NET_TX_SOFTIRQ);
	local_irq_restore(flags);
}

void __netif_schedule(struct Qdisc *q)
{
	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
		__netif_reschedule(q);
}
EXPORT_SYMBOL(__netif_schedule);

void dev_kfree_skb_irq(struct sk_buff *skb)
	if (atomic_dec_and_test(&skb->users)) {
		struct softnet_data *sd;
		unsigned long flags;
		local_irq_save(flags);
		sd = &__get_cpu_var(softnet_data);
		skb->next = sd->completion_queue;
		sd->completion_queue = skb;
		raise_softirq_irqoff(NET_TX_SOFTIRQ);
		local_irq_restore(flags);
	}

void dev_kfree_skb_any(struct sk_buff *skb)
{
	if (in_irq() || irqs_disabled())
		dev_kfree_skb_irq(skb);
	else
		dev_kfree_skb(skb);
}
EXPORT_SYMBOL(dev_kfree_skb_any);


/**
 * netif_device_detach - mark device as removed
 * @dev: network device
 *
 * Mark device as removed from system and therefore no longer available.
 */
void netif_device_detach(struct net_device *dev)
{
	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
	    netif_running(dev)) {
		netif_tx_stop_all_queues(dev);
	}
}
EXPORT_SYMBOL(netif_device_detach);

/**
 * netif_device_attach - mark device as attached
 * @dev: network device
 *
 * Mark device as attached from system and restart if needed.
 */
void netif_device_attach(struct net_device *dev)
{
	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
	    netif_running(dev)) {
		netif_tx_wake_all_queues(dev);
		__netdev_watchdog_up(dev);
	}
}
EXPORT_SYMBOL(netif_device_attach);

/**
 * skb_dev_set -- assign a new device to a buffer
 * @skb: buffer for the new device
 * @dev: network device
 *
 * If an skb is owned by a device already, we have to reset
 * all data private to the namespace a device belongs to
 * before assigning it a new device.
 */
#ifdef CONFIG_NET_NS
void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
{
	skb_dst_drop(skb);
	if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
		secpath_reset(skb);
		nf_reset(skb);
		skb_init_secmark(skb);
		skb->mark = 0;
		skb->priority = 0;
		skb->nf_trace = 0;
		skb->ipvs_property = 0;
#ifdef CONFIG_NET_SCHED
		skb->tc_index = 0;
#endif
	}
	skb->dev = dev;
}
EXPORT_SYMBOL(skb_set_dev);
#endif /* CONFIG_NET_NS */

Linus Torvalds's avatar
Linus Torvalds committed
/*
 * Invalidate hardware checksum when packet is to be mangled, and
 * complete checksum manually on outgoing path.
 */
int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds's avatar
Linus Torvalds committed
{
Linus Torvalds's avatar
Linus Torvalds committed

	if (skb->ip_summed == CHECKSUM_COMPLETE)
		goto out_set_summed;

	if (unlikely(skb_shinfo(skb)->gso_size)) {
		/* Let GSO fix up the checksum. */
		goto out_set_summed;
	offset = skb_checksum_start_offset(skb);
	BUG_ON(offset >= skb_headlen(skb));
	csum = skb_checksum(skb, offset, skb->len - offset, 0);

	offset += skb->csum_offset;
	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));

	if (skb_cloned(skb) &&
	    !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds's avatar
Linus Torvalds committed
		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
		if (ret)
			goto out;
	}

	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
Linus Torvalds's avatar
Linus Torvalds committed
	skb->ip_summed = CHECKSUM_NONE;
Linus Torvalds's avatar
Linus Torvalds committed
	return ret;
}
EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds's avatar
Linus Torvalds committed

/**
 *	skb_gso_segment - Perform segmentation on skb.
 *	@skb: buffer to segment
 *	@features: features for the output path (see dev->features)
 *
 *	This function segments the given skb and returns a list of segments.
 *
 *	It may return NULL if the skb requires no segmentation.  This is
 *	only possible when GSO is used for verifying header integrity.
struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
{
	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
	struct packet_type *ptype;
Al Viro's avatar
Al Viro committed
	__be16 type = skb->protocol;
	int vlan_depth = ETH_HLEN;
	while (type == htons(ETH_P_8021Q)) {
		struct vlan_hdr *vh;
		if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
		vh = (struct vlan_hdr *)(skb->data + vlan_depth);
		type = vh->h_vlan_encapsulated_proto;
		vlan_depth += VLAN_HLEN;
	skb_reset_mac_header(skb);
	skb->mac_len = skb->network_header - skb->mac_header;
	__skb_pull(skb, skb->mac_len);

	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
		struct net_device *dev = skb->dev;
		struct ethtool_drvinfo info = {};

		if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
			dev->ethtool_ops->get_drvinfo(dev, &info);

		WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
		     info.driver, dev ? dev->features : 0L,
		     skb->sk ? skb->sk->sk_route_caps : 0L,
		     skb->len, skb->data_len, skb->ip_summed);

		if (skb_header_cloned(skb) &&
		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
			return ERR_PTR(err);
	}

	rcu_read_lock();
	list_for_each_entry_rcu(ptype,
			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
				err = ptype->gso_send_check(skb);
				segs = ERR_PTR(err);
				if (err || skb_gso_ok(skb, features))
					break;
				__skb_push(skb, (skb->data -
						 skb_network_header(skb)));
			segs = ptype->gso_segment(skb, features);
	__skb_push(skb, skb->data - skb_mac_header(skb));
	return segs;
}
EXPORT_SYMBOL(skb_gso_segment);

/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
void netdev_rx_csum_fault(struct net_device *dev)
{
	if (net_ratelimit()) {
		printk(KERN_ERR "%s: hw csum failure.\n",
			dev ? dev->name : "<unknown>");
		dump_stack();
	}
}
EXPORT_SYMBOL(netdev_rx_csum_fault);
#endif

Linus Torvalds's avatar
Linus Torvalds committed
/* Actually, we should eliminate this check as soon as we know, that:
 * 1. IOMMU is present and allows to map all the memory.
 * 2. No high memory really exists on this machine.
 */

Eric Dumazet's avatar
Eric Dumazet committed
static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds's avatar
Linus Torvalds committed
{
#ifdef CONFIG_HIGHMEM
Linus Torvalds's avatar
Linus Torvalds committed
	int i;
	if (!(dev->features & NETIF_F_HIGHDMA)) {
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
			if (PageHighMem(skb_shinfo(skb)->frags[i].page))
				return 1;
	}
Linus Torvalds's avatar
Linus Torvalds committed

	if (PCI_DMA_BUS_IS_PHYS) {
		struct device *pdev = dev->dev.parent;
Linus Torvalds's avatar
Linus Torvalds committed

Eric Dumazet's avatar
Eric Dumazet committed
		if (!pdev)
			return 0;
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
			if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
				return 1;
		}
	}
Linus Torvalds's avatar
Linus Torvalds committed
	return 0;
}

struct dev_gso_cb {
	void (*destructor)(struct sk_buff *skb);
};

#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)

static void dev_gso_skb_destructor(struct sk_buff *skb)
{
	struct dev_gso_cb *cb;

	do {
		struct sk_buff *nskb = skb->next;

		skb->next = nskb->next;
		nskb->next = NULL;
		kfree_skb(nskb);
	} while (skb->next);

	cb = DEV_GSO_CB(skb);
	if (cb->destructor)
		cb->destructor(skb);
}

/**
 *	dev_gso_segment - Perform emulated hardware segmentation on skb.
 *	@skb: buffer to segment
 *	@features: device features as applicable to this skb
 *
 *	This function segments the given skb and stores the list of segments
 *	in skb->next.
 */
static int dev_gso_segment(struct sk_buff *skb, int features)
{
	struct sk_buff *segs;

	segs = skb_gso_segment(skb, features);

	/* Verifying header integrity only. */
	if (!segs)
		return 0;