Skip to content
Snippets Groups Projects
dev.c 147 KiB
Newer Older
  • Learn to ignore specific revisions
  • Eric Dumazet's avatar
    Eric Dumazet committed
    	rps_lock(sd);
    
    	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
    
    		if (skb->dev == dev) {
    
    Eric Dumazet's avatar
    Eric Dumazet committed
    			__skb_unlink(skb, &sd->input_pkt_queue);
    
    			input_queue_head_incr(sd);
    
    Eric Dumazet's avatar
    Eric Dumazet committed
    	rps_unlock(sd);
    
    
    	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
    		if (skb->dev == dev) {
    			__skb_unlink(skb, &sd->process_queue);
    			kfree_skb(skb);
    
    			input_queue_head_incr(sd);
    
    static int napi_gro_complete(struct sk_buff *skb)
    {
    	struct packet_type *ptype;
    	__be16 type = skb->protocol;
    	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
    	int err = -ENOENT;
    
    
    	if (NAPI_GRO_CB(skb)->count == 1) {
    		skb_shinfo(skb)->gso_size = 0;
    
    
    	rcu_read_lock();
    	list_for_each_entry_rcu(ptype, head, list) {
    		if (ptype->type != type || ptype->dev || !ptype->gro_complete)
    			continue;
    
    		err = ptype->gro_complete(skb);
    		break;
    	}
    	rcu_read_unlock();
    
    	if (err) {
    		WARN_ON(&ptype->list == head);
    		kfree_skb(skb);
    		return NET_RX_SUCCESS;
    	}
    
    out:
    	return netif_receive_skb(skb);
    }
    
    
    static void napi_gro_flush(struct napi_struct *napi)
    
    {
    	struct sk_buff *skb, *next;
    
    	for (skb = napi->gro_list; skb; skb = next) {
    		next = skb->next;
    		skb->next = NULL;
    		napi_gro_complete(skb);
    	}
    
    
    enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
    
    {
    	struct sk_buff **pp = NULL;
    	struct packet_type *ptype;
    	__be16 type = skb->protocol;
    	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
    
    	int same_flow;
    
    	enum gro_result ret;
    
    
    	if (!(skb->dev->features & NETIF_F_GRO))
    		goto normal;
    
    
    	if (skb_is_gso(skb) || skb_has_frags(skb))
    
    	rcu_read_lock();
    	list_for_each_entry_rcu(ptype, head, list) {
    		if (ptype->type != type || ptype->dev || !ptype->gro_receive)
    			continue;
    
    
    		skb_set_network_header(skb, skb_gro_offset(skb));
    
    		mac_len = skb->network_header - skb->mac_header;
    		skb->mac_len = mac_len;
    		NAPI_GRO_CB(skb)->same_flow = 0;
    		NAPI_GRO_CB(skb)->flush = 0;
    
    Herbert Xu's avatar
    Herbert Xu committed
    		NAPI_GRO_CB(skb)->free = 0;
    
    
    		pp = ptype->gro_receive(&napi->gro_list, skb);
    		break;
    	}
    	rcu_read_unlock();
    
    	if (&ptype->list == head)
    		goto normal;
    
    
    	same_flow = NAPI_GRO_CB(skb)->same_flow;
    
    	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
    
    	if (pp) {
    		struct sk_buff *nskb = *pp;
    
    		*pp = nskb->next;
    		nskb->next = NULL;
    		napi_gro_complete(nskb);
    
    	if (same_flow)
    
    	if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
    
    	NAPI_GRO_CB(skb)->count = 1;
    
    	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
    
    	skb->next = napi->gro_list;
    	napi->gro_list = skb;
    
    	ret = GRO_HELD;
    
    	if (skb_headlen(skb) < skb_gro_offset(skb)) {
    		int grow = skb_gro_offset(skb) - skb_headlen(skb);
    
    		BUG_ON(skb->end - skb->tail < grow);
    
    		memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
    
    		skb->tail += grow;
    		skb->data_len -= grow;
    
    		skb_shinfo(skb)->frags[0].page_offset += grow;
    		skb_shinfo(skb)->frags[0].size -= grow;
    
    		if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
    			put_page(skb_shinfo(skb)->frags[0].page);
    			memmove(skb_shinfo(skb)->frags,
    				skb_shinfo(skb)->frags + 1,
    				--skb_shinfo(skb)->nr_frags);
    		}
    
    	ret = GRO_NORMAL;
    	goto pull;
    
    EXPORT_SYMBOL(dev_gro_receive);
    
    
    static gro_result_t
    __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
    
    {
    	struct sk_buff *p;
    
    
    	if (netpoll_rx_on(skb))
    		return GRO_NORMAL;
    
    
    	for (p = napi->gro_list; p; p = p->next) {
    
    		NAPI_GRO_CB(p)->same_flow =
    			(p->dev == skb->dev) &&
    			!compare_ether_header(skb_mac_header(p),
    					      skb_gro_mac_header(skb));
    
    		NAPI_GRO_CB(p)->flush = 0;
    	}
    
    	return dev_gro_receive(napi, skb);
    }
    
    gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
    
    	switch (ret) {
    	case GRO_NORMAL:
    
    		if (netif_receive_skb(skb))
    			ret = GRO_DROP;
    		break;
    
    	case GRO_DROP:
    	case GRO_MERGED_FREE:
    
    Herbert Xu's avatar
    Herbert Xu committed
    		kfree_skb(skb);
    		break;
    
    
    	case GRO_HELD:
    	case GRO_MERGED:
    		break;
    
    }
    EXPORT_SYMBOL(napi_skb_finish);
    
    
    void skb_gro_reset_offset(struct sk_buff *skb)
    {
    	NAPI_GRO_CB(skb)->data_offset = 0;
    	NAPI_GRO_CB(skb)->frag0 = NULL;
    
    	NAPI_GRO_CB(skb)->frag0_len = 0;
    
    	if (skb->mac_header == skb->tail &&
    
    	    !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
    
    		NAPI_GRO_CB(skb)->frag0 =
    			page_address(skb_shinfo(skb)->frags[0].page) +
    			skb_shinfo(skb)->frags[0].page_offset;
    
    		NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
    	}
    
    }
    EXPORT_SYMBOL(skb_gro_reset_offset);
    
    
    gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
    
    	skb_gro_reset_offset(skb);
    
    
    	return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
    
    }
    EXPORT_SYMBOL(napi_gro_receive);
    
    
    void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
    {
    	__skb_pull(skb, skb_headlen(skb));
    	skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
    
    	napi->skb = skb;
    }
    EXPORT_SYMBOL(napi_reuse_skb);
    
    
    struct sk_buff *napi_get_frags(struct napi_struct *napi)
    
    Herbert Xu's avatar
    Herbert Xu committed
    {
    	struct sk_buff *skb = napi->skb;
    
    	if (!skb) {
    
    		skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
    		if (skb)
    			napi->skb = skb;
    
    EXPORT_SYMBOL(napi_get_frags);
    
    gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
    			       gro_result_t ret)
    
    	switch (ret) {
    	case GRO_NORMAL:
    
    		skb->protocol = eth_type_trans(skb, skb->dev);
    
    		if (ret == GRO_HELD)
    			skb_gro_pull(skb, -ETH_HLEN);
    		else if (netif_receive_skb(skb))
    			ret = GRO_DROP;
    
    	case GRO_DROP:
    	case GRO_MERGED_FREE:
    		napi_reuse_skb(napi, skb);
    		break;
    
    EXPORT_SYMBOL(napi_frags_finish);
    
    
    struct sk_buff *napi_frags_skb(struct napi_struct *napi)
    {
    	struct sk_buff *skb = napi->skb;
    	struct ethhdr *eth;
    
    	unsigned int hlen;
    	unsigned int off;
    
    
    	napi->skb = NULL;
    
    	skb_reset_mac_header(skb);
    	skb_gro_reset_offset(skb);
    
    
    	off = skb_gro_offset(skb);
    	hlen = off + sizeof(*eth);
    	eth = skb_gro_header_fast(skb, off);
    	if (skb_gro_header_hard(skb, hlen)) {
    		eth = skb_gro_header_slow(skb, hlen, off);
    		if (unlikely(!eth)) {
    			napi_reuse_skb(napi, skb);
    			skb = NULL;
    			goto out;
    		}
    
    	}
    
    	skb_gro_pull(skb, sizeof(*eth));
    
    	/*
    	 * This works because the only protocols we care about don't require
    	 * special handling.  We'll fix it up properly at the end.
    	 */
    	skb->protocol = eth->h_proto;
    
    out:
    	return skb;
    }
    EXPORT_SYMBOL(napi_frags_skb);
    
    
    gro_result_t napi_gro_frags(struct napi_struct *napi)
    
    	struct sk_buff *skb = napi_frags_skb(napi);
    
    
    	return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
    }
    
    Herbert Xu's avatar
    Herbert Xu committed
    EXPORT_SYMBOL(napi_gro_frags);
    
    
    /*
     * net_rps_action sends any pending IPI's for rps.
     * Note: called with local irq disabled, but exits with local irq enabled.
     */
    static void net_rps_action_and_irq_enable(struct softnet_data *sd)
    {
    #ifdef CONFIG_RPS
    	struct softnet_data *remsd = sd->rps_ipi_list;
    
    	if (remsd) {
    		sd->rps_ipi_list = NULL;
    
    		local_irq_enable();
    
    		/* Send pending IPI's to kick RPS processing on remote cpus. */
    		while (remsd) {
    			struct softnet_data *next = remsd->rps_ipi_next;
    
    			if (cpu_online(remsd->cpu))
    				__smp_call_function_single(remsd->cpu,
    							   &remsd->csd, 0);
    			remsd = next;
    		}
    	} else
    #endif
    		local_irq_enable();
    }
    
    
    static int process_backlog(struct napi_struct *napi, int quota)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	int work = 0;
    
    Eric Dumazet's avatar
    Eric Dumazet committed
    	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    #ifdef CONFIG_RPS
    	/* Check if we have pending ipi, its better to send them now,
    	 * not waiting net_rx_action() end.
    	 */
    	if (sd->rps_ipi_list) {
    		local_irq_disable();
    		net_rps_action_and_irq_enable(sd);
    	}
    #endif
    
    	local_irq_disable();
    	while (work < quota) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		struct sk_buff *skb;
    
    		unsigned int qlen;
    
    		while ((skb = __skb_dequeue(&sd->process_queue))) {
    			local_irq_enable();
    			__netif_receive_skb(skb);
    			local_irq_disable();
    
    			input_queue_head_incr(sd);
    			if (++work >= quota) {
    				local_irq_enable();
    				return work;
    			}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Eric Dumazet's avatar
    Eric Dumazet committed
    		rps_lock(sd);
    
    		qlen = skb_queue_len(&sd->input_pkt_queue);
    
    			skb_queue_splice_tail_init(&sd->input_pkt_queue,
    						   &sd->process_queue);
    
    		if (qlen < quota - work) {
    
    Eric Dumazet's avatar
    Eric Dumazet committed
    			/*
    			 * Inline a custom version of __napi_complete().
    			 * only current cpu owns and manipulates this napi,
    			 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
    			 * we can use a plain write instead of clear_bit(),
    			 * and we dont need an smp_mb() memory barrier.
    			 */
    			list_del(&napi->poll_list);
    			napi->state = 0;
    
    
    Eric Dumazet's avatar
    Eric Dumazet committed
    		rps_unlock(sd);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    /**
     * __napi_schedule - schedule for receive
    
     * @n: entry to schedule
    
     *
     * The entry's receive function will be scheduled to run
     */
    
    void __napi_schedule(struct napi_struct *n)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Eric Dumazet's avatar
    Eric Dumazet committed
    	____napi_schedule(&__get_cpu_var(softnet_data), n);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    void __napi_complete(struct napi_struct *n)
    {
    	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
    	BUG_ON(n->gro_list);
    
    	list_del(&n->poll_list);
    	smp_mb__before_clear_bit();
    	clear_bit(NAPI_STATE_SCHED, &n->state);
    }
    EXPORT_SYMBOL(__napi_complete);
    
    void napi_complete(struct napi_struct *n)
    {
    	unsigned long flags;
    
    	/*
    	 * don't let napi dequeue from the cpu poll list
    	 * just in case its running on a different cpu
    	 */
    	if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
    		return;
    
    	napi_gro_flush(n);
    	local_irq_save(flags);
    	__napi_complete(n);
    	local_irq_restore(flags);
    }
    EXPORT_SYMBOL(napi_complete);
    
    void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
    		    int (*poll)(struct napi_struct *, int), int weight)
    {
    	INIT_LIST_HEAD(&napi->poll_list);
    
    	napi->gro_list = NULL;
    
    Herbert Xu's avatar
    Herbert Xu committed
    	napi->skb = NULL;
    
    	napi->poll = poll;
    	napi->weight = weight;
    	list_add(&napi->dev_list, &dev->napi_list);
    	napi->dev = dev;
    
    Herbert Xu's avatar
    Herbert Xu committed
    #ifdef CONFIG_NETPOLL
    
    	spin_lock_init(&napi->poll_lock);
    	napi->poll_owner = -1;
    #endif
    	set_bit(NAPI_STATE_SCHED, &napi->state);
    }
    EXPORT_SYMBOL(netif_napi_add);
    
    void netif_napi_del(struct napi_struct *napi)
    {
    	struct sk_buff *skb, *next;
    
    
    	list_del_init(&napi->dev_list);
    
    	napi_free_frags(napi);
    
    
    	for (skb = napi->gro_list; skb; skb = next) {
    		next = skb->next;
    		skb->next = NULL;
    		kfree_skb(skb);
    	}
    
    	napi->gro_list = NULL;
    
    }
    EXPORT_SYMBOL(netif_napi_del);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    static void net_rx_action(struct softirq_action *h)
    {
    
    	struct softnet_data *sd = &__get_cpu_var(softnet_data);
    
    	unsigned long time_limit = jiffies + 2;
    
    	int budget = netdev_budget;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	local_irq_disable();
    
    
    	while (!list_empty(&sd->poll_list)) {
    
    		struct napi_struct *n;
    		int work, weight;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		/* If softirq window is exhuasted then punt.
    
    		 * Allow this to run for 2 jiffies since which will allow
    		 * an average latency of 1.5/HZ.
    
    		if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			goto softnet_break;
    
    		local_irq_enable();
    
    
    		/* Even though interrupts have been re-enabled, this
    		 * access is safe because interrupts can only add new
    		 * entries to the tail of this list, and only ->poll()
    		 * calls can remove this head entry from the list.
    		 */
    
    		n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		/* This NAPI_STATE_SCHED test is for avoiding a race
    		 * with netpoll's poll_napi().  Only the entity which
    		 * obtains the lock and sees NAPI_STATE_SCHED set will
    		 * actually make the ->poll() call.  Therefore we avoid
    		 * accidently calling ->poll() when NAPI is not scheduled.
    		 */
    		work = 0;
    
    		if (test_bit(NAPI_STATE_SCHED, &n->state)) {
    
    			work = n->poll(n, weight);
    
    
    		WARN_ON_ONCE(work > weight);
    
    		budget -= work;
    
    		local_irq_disable();
    
    		/* Drivers must not modify the NAPI state if they
    		 * consume the entire weight.  In such cases this code
    		 * still "owns" the NAPI instance and therefore can
    		 * move the instance around on the list at-will.
    		 */
    
    		if (unlikely(work == weight)) {
    
    			if (unlikely(napi_disable_pending(n))) {
    				local_irq_enable();
    				napi_complete(n);
    				local_irq_disable();
    			} else
    
    				list_move_tail(&n->poll_list, &sd->poll_list);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    out:
    
    	net_rps_action_and_irq_enable(sd);
    
    #ifdef CONFIG_NET_DMA
    	/*
    	 * There may not be any more sk_buffs coming right now, so push
    	 * any pending DMA copies to hardware
    	 */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return;
    
    softnet_break:
    
    Changli Gao's avatar
    Changli Gao committed
    	sd->time_squeeze++;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
    	goto out;
    }
    
    
    static gifconf_func_t *gifconf_list[NPROTO];
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    /**
     *	register_gifconf	-	register a SIOCGIF handler
     *	@family: Address family
     *	@gifconf: Function handler
     *
     *	Register protocol dependent address dumping routines. The handler
     *	that is passed must not be freed or reused until it has been replaced
     *	by another handler.
     */
    
    int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	if (family >= NPROTO)
    		return -EINVAL;
    	gifconf_list[family] = gifconf;
    	return 0;
    }
    
    EXPORT_SYMBOL(register_gifconf);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    /*
     *	Map an interface index to its name (SIOCGIFNAME)
     */
    
    /*
     *	We need this ioctl for efficient implementation of the
     *	if_indextoname() function required by the IPv6 API.  Without
     *	it, we would have to search all the interfaces to find a
     *	match.  --pb
     */
    
    
    static int dev_ifname(struct net *net, struct ifreq __user *arg)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	struct net_device *dev;
    	struct ifreq ifr;
    
    	/*
    	 *	Fetch the caller's info block.
    	 */
    
    	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
    		return -EFAULT;
    
    
    	rcu_read_lock();
    	dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!dev) {
    
    		rcu_read_unlock();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		return -ENODEV;
    	}
    
    	strcpy(ifr.ifr_name, dev->name);
    
    	rcu_read_unlock();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
    		return -EFAULT;
    	return 0;
    }
    
    /*
     *	Perform a SIOCGIFCONF call. This structure will change
     *	size eventually, and there is nothing I can do about it.
     *	Thus we will need a 'compatibility mode'.
     */
    
    
    static int dev_ifconf(struct net *net, char __user *arg)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	struct ifconf ifc;
    	struct net_device *dev;
    	char __user *pos;
    	int len;
    	int total;
    	int i;
    
    	/*
    	 *	Fetch the caller's info block.
    	 */
    
    	if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
    		return -EFAULT;
    
    	pos = ifc.ifc_buf;
    	len = ifc.ifc_len;
    
    	/*
    	 *	Loop over the interfaces, and write an info block for each.
    	 */
    
    	total = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		for (i = 0; i < NPROTO; i++) {
    			if (gifconf_list[i]) {
    				int done;
    				if (!pos)
    					done = gifconf_list[i](dev, NULL, 0);
    				else
    					done = gifconf_list[i](dev, pos + total,
    							       len - total);
    				if (done < 0)
    					return -EFAULT;
    				total += done;
    			}
    		}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/*
    	 *	All done.  Write the updated control block back to the caller.
    	 */
    	ifc.ifc_len = total;
    
    	/*
    	 * 	Both BSD and Solaris return 0 here, so we do too.
    	 */
    	return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
    }
    
    #ifdef CONFIG_PROC_FS
    /*
     *	This is invoked by the /proc filesystem handler to display a device
     *	in detail.
     */
    
    void *dev_seq_start(struct seq_file *seq, loff_t *pos)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct net *net = seq_file_net(seq);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct net_device *dev;
    
    
    	if (!*pos)
    		return SEQ_START_TOKEN;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	for_each_netdev_rcu(net, dev)
    
    		if (off++ == *pos)
    			return dev;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
    {
    
    	struct net_device *dev = (v == SEQ_START_TOKEN) ?
    				  first_net_device(seq_file_net(seq)) :
    				  next_net_device((struct net_device *)v);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	++*pos;
    
    	return rcu_dereference(dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    void dev_seq_stop(struct seq_file *seq, void *v)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	rcu_read_unlock();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
    {
    
    	struct rtnl_link_stats64 temp;
    	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
    		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
    
    		   dev->name, stats->rx_bytes, stats->rx_packets,
    		   stats->rx_errors,
    		   stats->rx_dropped + stats->rx_missed_errors,
    		   stats->rx_fifo_errors,
    		   stats->rx_length_errors + stats->rx_over_errors +
    		    stats->rx_crc_errors + stats->rx_frame_errors,
    		   stats->rx_compressed, stats->multicast,
    		   stats->tx_bytes, stats->tx_packets,
    		   stats->tx_errors, stats->tx_dropped,
    		   stats->tx_fifo_errors, stats->collisions,
    		   stats->tx_carrier_errors +
    		    stats->tx_aborted_errors +
    		    stats->tx_window_errors +
    		    stats->tx_heartbeat_errors,
    		   stats->tx_compressed);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /*
     *	Called from the PROCfs module. This now uses the new arbitrary sized
     *	/proc/net interface to create /proc/net/dev
     */
    static int dev_seq_show(struct seq_file *seq, void *v)
    {
    	if (v == SEQ_START_TOKEN)
    		seq_puts(seq, "Inter-|   Receive                            "
    			      "                    |  Transmit\n"
    			      " face |bytes    packets errs drop fifo frame "
    			      "compressed multicast|bytes    packets errs "
    			      "drop fifo colls carrier compressed\n");
    	else
    		dev_seq_printf_stats(seq, v);
    	return 0;
    }
    
    
    Changli Gao's avatar
    Changli Gao committed
    static struct softnet_data *softnet_get_online(loff_t *pos)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    Changli Gao's avatar
    Changli Gao committed
    	struct softnet_data *sd = NULL;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	while (*pos < nr_cpu_ids)
    
    		if (cpu_online(*pos)) {
    
    Changli Gao's avatar
    Changli Gao committed
    			sd = &per_cpu(softnet_data, *pos);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			break;
    		} else
    			++*pos;
    
    Changli Gao's avatar
    Changli Gao committed
    	return sd;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
    {
    	return softnet_get_online(pos);
    }
    
    static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
    {
    	++*pos;
    	return softnet_get_online(pos);
    }
    
    static void softnet_seq_stop(struct seq_file *seq, void *v)
    {
    }
    
    static int softnet_seq_show(struct seq_file *seq, void *v)
    {
    
    Changli Gao's avatar
    Changli Gao committed
    	struct softnet_data *sd = v;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
    
    Changli Gao's avatar
    Changli Gao committed
    		   sd->processed, sd->dropped, sd->time_squeeze, 0,
    
    		   0, 0, 0, 0, /* was fastroute */
    
    Changli Gao's avatar
    Changli Gao committed
    		   sd->cpu_collision, sd->received_rps);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return 0;
    }
    
    
    static const struct seq_operations dev_seq_ops = {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	.start = dev_seq_start,
    	.next  = dev_seq_next,
    	.stop  = dev_seq_stop,
    	.show  = dev_seq_show,
    };
    
    static int dev_seq_open(struct inode *inode, struct file *file)
    {
    
    	return seq_open_net(inode, file, &dev_seq_ops,
    			    sizeof(struct seq_net_private));
    
    static const struct file_operations dev_seq_fops = {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	.owner	 = THIS_MODULE,
    	.open    = dev_seq_open,
    	.read    = seq_read,
    	.llseek  = seq_lseek,
    
    static const struct seq_operations softnet_seq_ops = {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	.start = softnet_seq_start,
    	.next  = softnet_seq_next,
    	.stop  = softnet_seq_stop,
    	.show  = softnet_seq_show,
    };
    
    static int softnet_seq_open(struct inode *inode, struct file *file)
    {
    	return seq_open(file, &softnet_seq_ops);
    }
    
    
    static const struct file_operations softnet_seq_fops = {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	.owner	 = THIS_MODULE,
    	.open    = softnet_seq_open,
    	.read    = seq_read,
    	.llseek  = seq_lseek,
    	.release = seq_release,
    };
    
    
    static void *ptype_get_idx(loff_t pos)
    {
    	struct packet_type *pt = NULL;
    	loff_t i = 0;
    	int t;
    
    	list_for_each_entry_rcu(pt, &ptype_all, list) {
    		if (i == pos)
    			return pt;
    		++i;
    	}
    
    
    	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
    
    		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
    			if (i == pos)
    				return pt;
    			++i;
    		}
    	}
    	return NULL;
    }
    
    static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
    
    {
    	rcu_read_lock();
    	return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
    }
    
    static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
    {
    	struct packet_type *pt;
    	struct list_head *nxt;
    	int hash;
    
    	++*pos;
    	if (v == SEQ_START_TOKEN)
    		return ptype_get_idx(0);
    
    	pt = v;
    	nxt = pt->list.next;
    	if (pt->type == htons(ETH_P_ALL)) {
    		if (nxt != &ptype_all)
    			goto found;
    		hash = 0;
    		nxt = ptype_base[0].next;
    	} else
    
    		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
    
    
    	while (nxt == &ptype_base[hash]) {
    
    		if (++hash >= PTYPE_HASH_SIZE)
    
    			return NULL;
    		nxt = ptype_base[hash].next;
    	}
    found:
    	return list_entry(nxt, struct packet_type, list);
    }
    
    static void ptype_seq_stop(struct seq_file *seq, void *v)
    
    {
    	rcu_read_unlock();
    }
    
    static int ptype_seq_show(struct seq_file *seq, void *v)
    {
    	struct packet_type *pt = v;
    
    	if (v == SEQ_START_TOKEN)
    		seq_puts(seq, "Type Device      Function\n");
    
    	else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
    
    		if (pt->type == htons(ETH_P_ALL))
    			seq_puts(seq, "ALL ");
    		else
    			seq_printf(seq, "%04x", ntohs(pt->type));
    
    
    		seq_printf(seq, " %-8s %pF\n",
    			   pt->dev ? pt->dev->name : "", pt->func);
    
    	}
    
    	return 0;
    }
    
    static const struct seq_operations ptype_seq_ops = {
    	.start = ptype_seq_start,
    	.next  = ptype_seq_next,
    	.stop  = ptype_seq_stop,
    	.show  = ptype_seq_show,
    };
    
    static int ptype_seq_open(struct inode *inode, struct file *file)
    {
    
    	return seq_open_net(inode, file, &ptype_seq_ops,
    			sizeof(struct seq_net_private));
    
    }
    
    static const struct file_operations ptype_seq_fops = {
    	.owner	 = THIS_MODULE,
    	.open    = ptype_seq_open,
    	.read    = seq_read,
    	.llseek  = seq_lseek,
    
    	.release = seq_release_net,
    
    static int __net_init dev_proc_net_init(struct net *net)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	int rc = -ENOMEM;
    
    
    	if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		goto out;
    
    	if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		goto out_dev;
    
    	if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	rc = 0;
    out:
    	return rc;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    out_softnet:
    
    	proc_net_remove(net, "softnet_stat");
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    out_dev:
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	goto out;
    }
    
    static void __net_exit dev_proc_net_exit(struct net *net)
    
    {
    	wext_proc_exit(net);
    
    	proc_net_remove(net, "ptype");
    	proc_net_remove(net, "softnet_stat");
    	proc_net_remove(net, "dev");
    }
    
    
    static struct pernet_operations __net_initdata dev_proc_ops = {
    
    	.init = dev_proc_net_init,
    	.exit = dev_proc_net_exit,
    };
    
    static int __init dev_proc_init(void)
    {
    	return register_pernet_subsys(&dev_proc_ops);
    }
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #else
    #define dev_proc_init() 0