Skip to content
Snippets Groups Projects
dev.c 114 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
     *	...
     *	unregister_netdevice(y1);
     *	unregister_netdevice(y2);
     *      ...
     *	rtnl_unlock();
     *	free_netdev(y1);
     *	free_netdev(y2);
     *
     * We are invoked by rtnl_unlock() after it drops the semaphore.
     * This allows us to deal with problems:
    
     * 1) We can delete sysfs objects which invoke hotplug
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *    without deadlocking with linkwatch via keventd.
     * 2) Since we run with the RTNL semaphore not held, we can sleep
     *    safely in order to wait for the netdev refcnt to drop to zero.
     */
    
    Arjan van de Ven's avatar
    Arjan van de Ven committed
    static DEFINE_MUTEX(net_todo_run_mutex);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    void netdev_run_todo(void)
    {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* Need to guard against multiple cpu's getting out of order. */
    
    Arjan van de Ven's avatar
    Arjan van de Ven committed
    	mutex_lock(&net_todo_run_mutex);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* Not safe to do outside the semaphore.  We must not return
    	 * until all unregister events invoked by the local processor
    	 * have been completed (either by this todo run, or one on
    	 * another cpu).
    	 */
    	if (list_empty(&net_todo_list))
    		goto out;
    
    	/* Snapshot list, allow later requests */
    	spin_lock(&net_todo_list_lock);
    
    	list_replace_init(&net_todo_list, &list);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	spin_unlock(&net_todo_list_lock);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	while (!list_empty(&list)) {
    		struct net_device *dev
    			= list_entry(list.next, struct net_device, todo_list);
    		list_del(&dev->todo_list);
    
    
    		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
    			printk(KERN_ERR "network todo '%s' but state %d\n",
    			       dev->name, dev->reg_state);
    			dump_stack();
    			continue;
    		}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		dev->reg_state = NETREG_UNREGISTERED;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		/* paranoia */
    		BUG_ON(atomic_read(&dev->refcnt));
    		BUG_TRAP(!dev->ip_ptr);
    		BUG_TRAP(!dev->ip6_ptr);
    		BUG_TRAP(!dev->dn_ptr);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		if (dev->destructor)
    			dev->destructor(dev);
    
    
    		/* Free network device */
    		kobject_put(&dev->dev.kobj);
    
    Arjan van de Ven's avatar
    Arjan van de Ven committed
    	mutex_unlock(&net_todo_run_mutex);
    
    static struct net_device_stats *internal_stats(struct net_device *dev)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /**
    
     *	alloc_netdev_mq - allocate network device
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *	@sizeof_priv:	size of private data to allocate space for
     *	@name:		device name format string
     *	@setup:		callback to initialize device
    
     *	@queue_count:	the number of subqueues to allocate
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
     *	Allocates a struct net_device with private data area for driver use
    
     *	and performs basic initialization.  Also allocates subquue structs
     *	for each queue on the device at the end of the netdevice.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    
    struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
    		void (*setup)(struct net_device *), unsigned int queue_count)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	void *p;
    	struct net_device *dev;
    	int alloc_size;
    
    
    	BUG_ON(strlen(name) >= sizeof(dev->name));
    
    
    	alloc_size = sizeof(struct net_device) +
    		     sizeof(struct net_device_subqueue) * (queue_count - 1);
    	if (sizeof_priv) {
    		/* ensure 32-byte alignment of private area */
    		alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
    		alloc_size += sizeof_priv;
    	}
    	/* ensure 32-byte alignment of whole construct */
    	alloc_size += NETDEV_ALIGN_CONST;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	p = kzalloc(alloc_size, GFP_KERNEL);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!p) {
    
    		printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		return NULL;
    	}
    
    	dev = (struct net_device *)
    		(((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
    	dev->padded = (char *)dev - (char *)p;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	if (sizeof_priv) {
    		dev->priv = ((char *)dev +
    			     ((sizeof(struct net_device) +
    			       (sizeof(struct net_device_subqueue) *
    
    				(queue_count - 1)) + NETDEV_ALIGN_CONST)
    
    			      & ~NETDEV_ALIGN_CONST));
    	}
    
    	dev->egress_subqueue_count = queue_count;
    
    	dev->gso_max_size = GSO_MAX_SIZE;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	dev->get_stats = internal_stats;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	setup(dev);
    	strcpy(dev->name, name);
    	return dev;
    }
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    /**
     *	free_netdev - free network device
     *	@dev: device
     *
    
     *	This function does the last stage of destroying an allocated device
     * 	interface. The reference to the device object is released.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *	If this is the last reference then it will be freed.
     */
    void free_netdev(struct net_device *dev)
    {
    
    	/*  Compatibility with error handling in drivers */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (dev->reg_state == NETREG_UNINITIALIZED) {
    		kfree((char *)dev - dev->padded);
    		return;
    	}
    
    	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
    	dev->reg_state = NETREG_RELEASED;
    
    
    	/* will free via device release */
    	put_device(&dev->dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /* Synchronize with packet receive processing. */
    
    void synchronize_net(void)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	might_sleep();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /**
     *	unregister_netdevice - remove device from the kernel
     *	@dev: device
     *
     *	This function shuts down a device interface and removes it
    
     *	from the kernel tables.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
     *	Callers must hold the rtnl semaphore.  You may want
     *	unregister_netdev() instead of this.
     */
    
    
    void unregister_netdevice(struct net_device *dev)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	rollback_registered(dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	/* Finish processing unregister after unlock */
    	net_set_todo(dev);
    }
    
    /**
     *	unregister_netdev - remove device from the kernel
     *	@dev: device
     *
     *	This function shuts down a device interface and removes it
    
     *	from the kernel tables.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
     *	This is just a wrapper for unregister_netdevice that takes
     *	the rtnl semaphore.  In general you want to use this and not
     *	unregister_netdevice.
     */
    void unregister_netdev(struct net_device *dev)
    {
    	rtnl_lock();
    	unregister_netdevice(dev);
    	rtnl_unlock();
    }
    
    EXPORT_SYMBOL(unregister_netdev);
    
    
    /**
     *	dev_change_net_namespace - move device to different nethost namespace
     *	@dev: device
     *	@net: network namespace
     *	@pat: If not NULL name pattern to try if the current device name
     *	      is already taken in the destination network namespace.
     *
     *	This function shuts down a device interface and moves it
     *	to a new network namespace. On success 0 is returned, on
     *	a failure a netagive errno code is returned.
     *
     *	Callers must hold the rtnl semaphore.
     */
    
    int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
    {
    	char buf[IFNAMSIZ];
    	const char *destname;
    	int err;
    
    	ASSERT_RTNL();
    
    	/* Don't allow namespace local devices to be moved. */
    	err = -EINVAL;
    	if (dev->features & NETIF_F_NETNS_LOCAL)
    		goto out;
    
    	/* Ensure the device has been registrered */
    	err = -EINVAL;
    	if (dev->reg_state != NETREG_REGISTERED)
    		goto out;
    
    	/* Get out if there is nothing todo */
    	err = 0;
    
    		goto out;
    
    	/* Pick the destination device name, and ensure
    	 * we can use it in the destination network namespace.
    	 */
    	err = -EEXIST;
    	destname = dev->name;
    	if (__dev_get_by_name(net, destname)) {
    		/* We get here if we can't use the current device name */
    		if (!pat)
    			goto out;
    		if (!dev_valid_name(pat))
    			goto out;
    		if (strchr(pat, '%')) {
    			if (__dev_alloc_name(net, pat, buf) < 0)
    				goto out;
    			destname = buf;
    		} else
    			destname = pat;
    		if (__dev_get_by_name(net, destname))
    			goto out;
    	}
    
    	/*
    	 * And now a mini version of register_netdevice unregister_netdevice.
    	 */
    
    	/* If device is running close it first. */
    
    
    	/* And unlink it from device chain */
    	err = -ENODEV;
    	unlist_netdevice(dev);
    
    	synchronize_net();
    
    	/* Shutdown queueing discipline. */
    	dev_shutdown(dev);
    
    	/* Notify protocols, that we are about to destroy
    	   this device. They should clean all the things.
    	*/
    	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
    
    	/*
    	 *	Flush the unicast and multicast chains
    	 */
    	dev_addr_discard(dev);
    
    	/* Actually switch the network namespace */
    
    
    	/* Assign the new device name */
    	if (destname != dev->name)
    		strcpy(dev->name, destname);
    
    	/* If there is an ifindex conflict assign a new one */
    	if (__dev_get_by_index(net, dev->ifindex)) {
    		int iflink = (dev->iflink == dev->ifindex);
    		dev->ifindex = dev_new_index(net);
    		if (iflink)
    			dev->iflink = dev->ifindex;
    	}
    
    
    	/* Fixup kobjects */
    
    	netdev_unregister_kobject(dev);
    	err = netdev_register_kobject(dev);
    
    	WARN_ON(err);
    
    
    	/* Add the device back in the hashes */
    	list_netdevice(dev);
    
    	/* Notify protocols, that a new device appeared. */
    	call_netdevice_notifiers(NETDEV_REGISTER, dev);
    
    	synchronize_net();
    	err = 0;
    out:
    	return err;
    }
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    static int dev_cpu_callback(struct notifier_block *nfb,
    			    unsigned long action,
    			    void *ocpu)
    {
    	struct sk_buff **list_skb;
    	struct net_device **list_net;
    	struct sk_buff *skb;
    	unsigned int cpu, oldcpu = (unsigned long)ocpu;
    	struct softnet_data *sd, *oldsd;
    
    
    	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		return NOTIFY_OK;
    
    	local_irq_disable();
    	cpu = smp_processor_id();
    	sd = &per_cpu(softnet_data, cpu);
    	oldsd = &per_cpu(softnet_data, oldcpu);
    
    	/* Find end of our completion_queue. */
    	list_skb = &sd->completion_queue;
    	while (*list_skb)
    		list_skb = &(*list_skb)->next;
    	/* Append completion queue from offline CPU. */
    	*list_skb = oldsd->completion_queue;
    	oldsd->completion_queue = NULL;
    
    	/* Find end of our output_queue. */
    	list_net = &sd->output_queue;
    	while (*list_net)
    		list_net = &(*list_net)->next_sched;
    	/* Append output queue from offline CPU. */
    	*list_net = oldsd->output_queue;
    	oldsd->output_queue = NULL;
    
    	raise_softirq_irqoff(NET_TX_SOFTIRQ);
    	local_irq_enable();
    
    	/* Process offline CPU's input_pkt_queue */
    	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
    		netif_rx(skb);
    
    	return NOTIFY_OK;
    }
    
    
    Randy Dunlap's avatar
    Randy Dunlap committed
     * net_dma_rebalance - try to maintain one DMA channel per CPU
     * @net_dma: DMA client and associated data (lock, channels, channel_mask)
     *
     * This is called when the number of channels allocated to the net_dma client
     * changes.  The net_dma client tries to have one DMA channel per CPU.
    
    
    static void net_dma_rebalance(struct net_dma *net_dma)
    
    	unsigned int cpu, i, n, chan_idx;
    
    	if (cpus_empty(net_dma->channel_mask)) {
    
    		for_each_online_cpu(cpu)
    
    			rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
    
    		return;
    	}
    
    	i = 0;
    	cpu = first_cpu(cpu_online_map);
    
    
    	for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
    		chan = net_dma->channels[chan_idx];
    
    		n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
    		   + (i < (num_online_cpus() %
    			cpus_weight(net_dma->channel_mask)) ? 1 : 0));
    
    			per_cpu(softnet_data, cpu).net_dma = chan;
    
    			cpu = next_cpu(cpu, cpu_online_map);
    			n--;
    		}
    		i++;
    	}
    }
    
    /**
     * netdev_dma_event - event callback for the net_dma_client
     * @client: should always be net_dma_client
    
     * @chan: DMA channel for the event
    
    Randy Dunlap's avatar
    Randy Dunlap committed
     * @state: DMA state to be handled
    
    static enum dma_state_client
    netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
    	enum dma_state state)
    {
    	int i, found = 0, pos = -1;
    	struct net_dma *net_dma =
    		container_of(client, struct net_dma, client);
    	enum dma_state_client ack = DMA_DUP; /* default: take no action */
    
    	spin_lock(&net_dma->lock);
    	switch (state) {
    	case DMA_RESOURCE_AVAILABLE:
    
    		for (i = 0; i < nr_cpu_ids; i++)
    
    			if (net_dma->channels[i] == chan) {
    				found = 1;
    				break;
    			} else if (net_dma->channels[i] == NULL && pos < 0)
    				pos = i;
    
    		if (!found && pos >= 0) {
    			ack = DMA_ACK;
    			net_dma->channels[pos] = chan;
    			cpu_set(pos, net_dma->channel_mask);
    			net_dma_rebalance(net_dma);
    		}
    
    		break;
    	case DMA_RESOURCE_REMOVED:
    
    		for (i = 0; i < nr_cpu_ids; i++)
    
    			if (net_dma->channels[i] == chan) {
    				found = 1;
    				pos = i;
    				break;
    			}
    
    		if (found) {
    			ack = DMA_ACK;
    			cpu_clear(pos, net_dma->channel_mask);
    			net_dma->channels[i] = NULL;
    			net_dma_rebalance(net_dma);
    		}
    
    	spin_unlock(&net_dma->lock);
    
    	return ack;
    
    }
    
    /**
     * netdev_dma_regiser - register the networking subsystem as a DMA client
     */
    static int __init netdev_dma_register(void)
    {
    
    	net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
    								GFP_KERNEL);
    	if (unlikely(!net_dma.channels)) {
    		printk(KERN_NOTICE
    				"netdev_dma: no memory for net_dma.channels\n");
    		return -ENOMEM;
    	}
    
    	spin_lock_init(&net_dma.lock);
    	dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
    	dma_async_client_register(&net_dma.client);
    	dma_async_client_chan_request(&net_dma.client);
    
    	return 0;
    }
    
    #else
    static int __init netdev_dma_register(void) { return -ENODEV; }
    #endif /* CONFIG_NET_DMA */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    /**
     *	netdev_compute_feature - compute conjunction of two feature sets
     *	@all: first feature set
     *	@one: second feature set
     *
     *	Computes a new feature set after adding a device with feature set
     *	@one to the master device with current feature set @all.  Returns
     *	the new feature set.
     */
    int netdev_compute_features(unsigned long all, unsigned long one)
    {
    	/* if device needs checksumming, downgrade to hw checksumming */
    	if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
    		all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
    
    	/* if device can't do all checksum, downgrade to ipv4/ipv6 */
    	if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
    		all ^= NETIF_F_HW_CSUM
    			| NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
    
    	if (one & NETIF_F_GSO)
    		one |= NETIF_F_GSO_SOFTWARE;
    	one |= NETIF_F_GSO;
    
    	/* If even one device supports robust GSO, enable it for all. */
    	if (one & NETIF_F_GSO_ROBUST)
    		all |= NETIF_F_GSO_ROBUST;
    
    	all &= one | NETIF_F_LLTX;
    
    	if (!(all & NETIF_F_ALL_CSUM))
    		all &= ~NETIF_F_SG;
    	if (!(all & NETIF_F_SG))
    		all &= ~NETIF_F_GSO_MASK;
    
    	return all;
    }
    EXPORT_SYMBOL(netdev_compute_features);
    
    
    static struct hlist_head *netdev_create_hash(void)
    {
    	int i;
    	struct hlist_head *hash;
    
    	hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
    	if (hash != NULL)
    		for (i = 0; i < NETDEV_HASHENTRIES; i++)
    			INIT_HLIST_HEAD(&hash[i]);
    
    	return hash;
    }
    
    
    /* Initialize per network namespace state */
    
    static int __net_init netdev_init(struct net *net)
    
    	net->dev_name_head = netdev_create_hash();
    	if (net->dev_name_head == NULL)
    		goto err_name;
    
    	net->dev_index_head = netdev_create_hash();
    	if (net->dev_index_head == NULL)
    		goto err_idx;
    
    
    err_idx:
    	kfree(net->dev_name_head);
    err_name:
    	return -ENOMEM;
    
    static void __net_exit netdev_exit(struct net *net)
    
    {
    	kfree(net->dev_name_head);
    	kfree(net->dev_index_head);
    }
    
    
    static struct pernet_operations __net_initdata netdev_net_ops = {
    
    static void __net_exit default_device_exit(struct net *net)
    
    {
    	struct net_device *dev, *next;
    	/*
    	 * Push all migratable of the network devices back to the
    	 * initial network namespace
    	 */
    	rtnl_lock();
    	for_each_netdev_safe(net, dev, next) {
    		int err;
    
    
    		/* Ignore unmoveable devices (i.e. loopback) */
    		if (dev->features & NETIF_F_NETNS_LOCAL)
    			continue;
    
    		/* Push remaing network devices to init_net */
    
    		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
    		err = dev_change_net_namespace(dev, &init_net, fb_name);
    
    			printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
    
    static struct pernet_operations __net_initdata default_device_ops = {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     *	Initialize the DEV module. At boot time this walks the device list and
     *	unhooks any devices that fail to initialise (normally hardware not
     *	present) and leaves us with a valid list of present and active devices.
     *
     */
    
    /*
     *       This is called single threaded during boot, so no need
     *       to take the rtnl semaphore.
     */
    static int __init net_dev_init(void)
    {
    	int i, rc = -ENOMEM;
    
    	BUG_ON(!dev_boot_phase);
    
    	if (dev_proc_init())
    		goto out;
    
    
    	if (netdev_kobject_init())
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		goto out;
    
    	INIT_LIST_HEAD(&ptype_all);
    
    	for (i = 0; i < PTYPE_HASH_SIZE; i++)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		INIT_LIST_HEAD(&ptype_base[i]);
    
    
    	if (register_pernet_subsys(&netdev_net_ops))
    		goto out;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	if (register_pernet_device(&default_device_ops))
    		goto out;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	/*
    	 *	Initialise the packet receive queues.
    	 */
    
    
    	for_each_possible_cpu(i) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		struct softnet_data *queue;
    
    		queue = &per_cpu(softnet_data, i);
    		skb_queue_head_init(&queue->input_pkt_queue);
    		queue->completion_queue = NULL;
    		INIT_LIST_HEAD(&queue->poll_list);
    
    
    		queue->backlog.poll = process_backlog;
    		queue->backlog.weight = weight_p;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	dev_boot_phase = 0;
    
    	open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
    	open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
    
    	hotcpu_notifier(dev_cpu_callback, 0);
    	dst_init();
    	dev_mcast_init();
    	rc = 0;
    out:
    	return rc;
    }
    
    subsys_initcall(net_dev_init);
    
    EXPORT_SYMBOL(__dev_get_by_index);
    EXPORT_SYMBOL(__dev_get_by_name);
    EXPORT_SYMBOL(__dev_remove_pack);
    
    EXPORT_SYMBOL(dev_valid_name);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    EXPORT_SYMBOL(dev_add_pack);
    EXPORT_SYMBOL(dev_alloc_name);
    EXPORT_SYMBOL(dev_close);
    EXPORT_SYMBOL(dev_get_by_flags);
    EXPORT_SYMBOL(dev_get_by_index);
    EXPORT_SYMBOL(dev_get_by_name);
    EXPORT_SYMBOL(dev_open);
    EXPORT_SYMBOL(dev_queue_xmit);
    EXPORT_SYMBOL(dev_remove_pack);
    EXPORT_SYMBOL(dev_set_allmulti);
    EXPORT_SYMBOL(dev_set_promiscuity);
    EXPORT_SYMBOL(dev_change_flags);
    EXPORT_SYMBOL(dev_set_mtu);
    EXPORT_SYMBOL(dev_set_mac_address);
    EXPORT_SYMBOL(free_netdev);
    EXPORT_SYMBOL(netdev_boot_setup_check);
    EXPORT_SYMBOL(netdev_set_master);
    EXPORT_SYMBOL(netdev_state_change);
    EXPORT_SYMBOL(netif_receive_skb);
    EXPORT_SYMBOL(netif_rx);
    EXPORT_SYMBOL(register_gifconf);
    EXPORT_SYMBOL(register_netdevice);
    EXPORT_SYMBOL(register_netdevice_notifier);
    EXPORT_SYMBOL(skb_checksum_help);
    EXPORT_SYMBOL(synchronize_net);
    EXPORT_SYMBOL(unregister_netdevice);
    EXPORT_SYMBOL(unregister_netdevice_notifier);
    EXPORT_SYMBOL(net_enable_timestamp);
    EXPORT_SYMBOL(net_disable_timestamp);
    EXPORT_SYMBOL(dev_get_flags);
    
    #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
    EXPORT_SYMBOL(br_handle_frame_hook);
    EXPORT_SYMBOL(br_fdb_get_hook);
    EXPORT_SYMBOL(br_fdb_put_hook);
    #endif
    
    #ifdef CONFIG_KMOD
    EXPORT_SYMBOL(dev_load);
    #endif
    
    EXPORT_PER_CPU_SYMBOL(softnet_data);