Skip to content
Snippets Groups Projects
netback.c 43.2 KiB
Newer Older
  • Learn to ignore specific revisions
  • Ian Campbell's avatar
    Ian Campbell committed
    /*
     * Back-end of the driver for virtual network devices. This portion of the
     * driver exports a 'unified' network-device interface that can be accessed
     * by any operating system that implements a compatible front end. A
     * reference front-end implementation can be found in:
     *  drivers/net/xen-netfront.c
     *
     * Copyright (c) 2002-2005, K A Fraser
     *
     * This program is free software; you can redistribute it and/or
     * modify it under the terms of the GNU General Public License version 2
     * as published by the Free Software Foundation; or, when distributed
     * separately from the Linux kernel or incorporated into other
     * software packages, subject to the following license:
     *
     * Permission is hereby granted, free of charge, to any person obtaining a copy
     * of this source file (the "Software"), to deal in the Software without
     * restriction, including without limitation the rights to use, copy, modify,
     * merge, publish, distribute, sublicense, and/or sell copies of the Software,
     * and to permit persons to whom the Software is furnished to do so, subject to
     * the following conditions:
     *
     * The above copyright notice and this permission notice shall be included in
     * all copies or substantial portions of the Software.
     *
     * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     * IN THE SOFTWARE.
     */
    
    #include "common.h"
    
    #include <linux/kthread.h>
    #include <linux/if_vlan.h>
    #include <linux/udp.h>
    
    #include <net/tcp.h>
    
    
    #include <xen/xen.h>
    
    Ian Campbell's avatar
    Ian Campbell committed
    #include <xen/events.h>
    #include <xen/interface/memory.h>
    
    #include <asm/xen/hypercall.h>
    #include <asm/xen/page.h>
    
    struct pending_tx_info {
    	struct xen_netif_tx_request req;
    	struct xenvif *vif;
    };
    typedef unsigned int pending_ring_idx_t;
    
    struct netbk_rx_meta {
    	int id;
    	int size;
    	int gso_size;
    };
    
    #define MAX_PENDING_REQS 256
    
    
    /* Discriminate from any valid pending_idx value. */
    #define INVALID_PENDING_IDX 0xFFFF
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    #define MAX_BUFFER_OFFSET PAGE_SIZE
    
    /* extra field used in struct page */
    union page_ext {
    	struct {
    #if BITS_PER_LONG < 64
    #define IDX_WIDTH   8
    #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
    		unsigned int group:GROUP_WIDTH;
    		unsigned int idx:IDX_WIDTH;
    #else
    		unsigned int group, idx;
    #endif
    	} e;
    	void *mapping;
    };
    
    struct xen_netbk {
    	wait_queue_head_t wq;
    	struct task_struct *task;
    
    	struct sk_buff_head rx_queue;
    	struct sk_buff_head tx_queue;
    
    	struct timer_list net_timer;
    
    	struct page *mmap_pages[MAX_PENDING_REQS];
    
    	pending_ring_idx_t pending_prod;
    	pending_ring_idx_t pending_cons;
    	struct list_head net_schedule_list;
    
    	/* Protect the net_schedule_list in netif. */
    	spinlock_t net_schedule_list_lock;
    
    	atomic_t netfront_count;
    
    	struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
    	struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
    
    	u16 pending_ring[MAX_PENDING_REQS];
    
    	/*
    	 * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
    	 * head/fragment page uses 2 copy operations because it
    	 * straddles two buffers in the frontend.
    	 */
    	struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
    	struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
    };
    
    static struct xen_netbk *xen_netbk;
    static int xen_netbk_group_nr;
    
    void xen_netbk_add_xenvif(struct xenvif *vif)
    {
    	int i;
    	int min_netfront_count;
    	int min_group = 0;
    	struct xen_netbk *netbk;
    
    	min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
    	for (i = 0; i < xen_netbk_group_nr; i++) {
    		int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
    		if (netfront_count < min_netfront_count) {
    			min_group = i;
    			min_netfront_count = netfront_count;
    		}
    	}
    
    	netbk = &xen_netbk[min_group];
    
    	vif->netbk = netbk;
    	atomic_inc(&netbk->netfront_count);
    }
    
    void xen_netbk_remove_xenvif(struct xenvif *vif)
    {
    	struct xen_netbk *netbk = vif->netbk;
    	vif->netbk = NULL;
    	atomic_dec(&netbk->netfront_count);
    }
    
    
    static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
    				  u8 status);
    
    Ian Campbell's avatar
    Ian Campbell committed
    static void make_tx_response(struct xenvif *vif,
    			     struct xen_netif_tx_request *txp,
    			     s8       st);
    static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
    					     u16      id,
    					     s8       st,
    					     u16      offset,
    					     u16      size,
    					     u16      flags);
    
    static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	return page_to_pfn(netbk->mmap_pages[idx]);
    }
    
    static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
    }
    
    /* extra field used in struct page */
    static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
    				unsigned int idx)
    {
    	unsigned int group = netbk - xen_netbk;
    	union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
    
    	BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
    	pg->mapping = ext.mapping;
    }
    
    static int get_page_ext(struct page *pg,
    			unsigned int *pgroup, unsigned int *pidx)
    {
    	union page_ext ext = { .mapping = pg->mapping };
    	struct xen_netbk *netbk;
    	unsigned int group, idx;
    
    	group = ext.e.group - 1;
    
    	if (group < 0 || group >= xen_netbk_group_nr)
    		return 0;
    
    	netbk = &xen_netbk[group];
    
    	idx = ext.e.idx;
    
    	if ((idx < 0) || (idx >= MAX_PENDING_REQS))
    		return 0;
    
    	if (netbk->mmap_pages[idx] != pg)
    		return 0;
    
    	*pgroup = group;
    	*pidx = idx;
    
    	return 1;
    }
    
    /*
     * This is the amount of packet we copy rather than map, so that the
     * guest can't fiddle with the contents of the headers while we do
     * packet processing on them (netfilter, routing, etc).
     */
    #define PKT_PROT_LEN    (ETH_HLEN + \
    			 VLAN_HLEN + \
    			 sizeof(struct iphdr) + MAX_IPOPTLEN + \
    			 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
    
    
    static u16 frag_get_pending_idx(skb_frag_t *frag)
    {
    	return (u16)frag->page_offset;
    }
    
    static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
    {
    	frag->page_offset = pending_idx;
    }
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    static inline pending_ring_idx_t pending_index(unsigned i)
    {
    	return i & (MAX_PENDING_REQS-1);
    }
    
    static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
    {
    	return MAX_PENDING_REQS -
    		netbk->pending_prod + netbk->pending_cons;
    }
    
    static void xen_netbk_kick_thread(struct xen_netbk *netbk)
    {
    	wake_up(&netbk->wq);
    }
    
    static int max_required_rx_slots(struct xenvif *vif)
    {
    	int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
    
    	if (vif->can_sg || vif->gso || vif->gso_prefix)
    		max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
    
    	return max;
    }
    
    int xen_netbk_rx_ring_full(struct xenvif *vif)
    {
    	RING_IDX peek   = vif->rx_req_cons_peek;
    	RING_IDX needed = max_required_rx_slots(vif);
    
    	return ((vif->rx.sring->req_prod - peek) < needed) ||
    	       ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
    }
    
    int xen_netbk_must_stop_queue(struct xenvif *vif)
    {
    	if (!xen_netbk_rx_ring_full(vif))
    		return 0;
    
    	vif->rx.sring->req_event = vif->rx_req_cons_peek +
    		max_required_rx_slots(vif);
    	mb(); /* request notification /then/ check the queue */
    
    	return xen_netbk_rx_ring_full(vif);
    }
    
    /*
     * Returns true if we should start a new receive buffer instead of
     * adding 'size' bytes to a buffer which currently contains 'offset'
     * bytes.
     */
    static bool start_new_rx_buffer(int offset, unsigned long size, int head)
    {
    	/* simple case: we have completely filled the current buffer. */
    	if (offset == MAX_BUFFER_OFFSET)
    		return true;
    
    	/*
    	 * complex case: start a fresh buffer if the current frag
    	 * would overflow the current buffer but only if:
    	 *     (i)   this frag would fit completely in the next buffer
    	 * and (ii)  there is already some data in the current buffer
    	 * and (iii) this is not the head buffer.
    	 *
    	 * Where:
    	 * - (i) stops us splitting a frag into two copies
    	 *   unless the frag is too large for a single buffer.
    	 * - (ii) stops us from leaving a buffer pointlessly empty.
    	 * - (iii) stops us leaving the first buffer
    	 *   empty. Strictly speaking this is already covered
    	 *   by (ii) but is explicitly checked because
    	 *   netfront relies on the first buffer being
    	 *   non-empty and can crash otherwise.
    	 *
    	 * This means we will effectively linearise small
    	 * frags but do not needlessly split large buffers
    	 * into multiple copies tend to give large frags their
    	 * own buffers as before.
    	 */
    	if ((offset + size > MAX_BUFFER_OFFSET) &&
    	    (size <= MAX_BUFFER_OFFSET) && offset && !head)
    		return true;
    
    	return false;
    }
    
    /*
     * Figure out how many ring slots we're going to need to send @skb to
     * the guest. This function is essentially a dry run of
     * netbk_gop_frag_copy.
     */
    unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
    {
    	unsigned int count;
    	int i, copy_off;
    
    
    	count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	copy_off = skb_headlen(skb) % PAGE_SIZE;
    
    	if (skb_shinfo(skb)->gso_size)
    		count++;
    
    	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
    
    		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
    
    		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
    
    Ian Campbell's avatar
    Ian Campbell committed
    		unsigned long bytes;
    
    Ian Campbell's avatar
    Ian Campbell committed
    		while (size > 0) {
    
    			BUG_ON(offset >= PAGE_SIZE);
    
    Ian Campbell's avatar
    Ian Campbell committed
    			BUG_ON(copy_off > MAX_BUFFER_OFFSET);
    
    
    			bytes = PAGE_SIZE - offset;
    
    			if (bytes > size)
    				bytes = size;
    
    			if (start_new_rx_buffer(copy_off, bytes, 0)) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    				count++;
    				copy_off = 0;
    			}
    
    			if (copy_off + bytes > MAX_BUFFER_OFFSET)
    				bytes = MAX_BUFFER_OFFSET - copy_off;
    
    			copy_off += bytes;
    
    Ian Campbell's avatar
    Ian Campbell committed
    			size -= bytes;
    
    
    			if (offset == PAGE_SIZE)
    				offset = 0;
    
    Ian Campbell's avatar
    Ian Campbell committed
    		}
    	}
    	return count;
    }
    
    struct netrx_pending_operations {
    	unsigned copy_prod, copy_cons;
    	unsigned meta_prod, meta_cons;
    	struct gnttab_copy *copy;
    	struct netbk_rx_meta *meta;
    	int copy_off;
    	grant_ref_t copy_gref;
    };
    
    static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
    						struct netrx_pending_operations *npo)
    {
    	struct netbk_rx_meta *meta;
    	struct xen_netif_rx_request *req;
    
    	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
    
    	meta = npo->meta + npo->meta_prod++;
    	meta->gso_size = 0;
    	meta->size = 0;
    	meta->id = req->id;
    
    	npo->copy_off = 0;
    	npo->copy_gref = req->gref;
    
    	return meta;
    }
    
    /*
     * Set up the grant operations for this fragment. If it's a flipping
     * interface, we also set up the unmap request from here.
     */
    static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
    				struct netrx_pending_operations *npo,
    				struct page *page, unsigned long size,
    				unsigned long offset, int *head)
    {
    	struct gnttab_copy *copy_gop;
    	struct netbk_rx_meta *meta;
    	/*
    
    Wei Liu's avatar
    Wei Liu committed
    	 * These variables are used iff get_page_ext returns true,
    
    Ian Campbell's avatar
    Ian Campbell committed
    	 * in which case they are guaranteed to be initialized.
    	 */
    	unsigned int uninitialized_var(group), uninitialized_var(idx);
    	int foreign = get_page_ext(page, &group, &idx);
    	unsigned long bytes;
    
    	/* Data must not cross a page boundary. */
    
    	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	meta = npo->meta + npo->meta_prod - 1;
    
    
    	/* Skip unused frames from start of page */
    	page += offset >> PAGE_SHIFT;
    	offset &= ~PAGE_MASK;
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    	while (size > 0) {
    
    		BUG_ON(offset >= PAGE_SIZE);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
    
    
    		bytes = PAGE_SIZE - offset;
    
    		if (bytes > size)
    			bytes = size;
    
    		if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    			/*
    			 * Netfront requires there to be some data in the head
    			 * buffer.
    			 */
    			BUG_ON(*head);
    
    			meta = get_next_rx_buffer(vif, npo);
    		}
    
    		if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
    			bytes = MAX_BUFFER_OFFSET - npo->copy_off;
    
    		copy_gop = npo->copy + npo->copy_prod++;
    		copy_gop->flags = GNTCOPY_dest_gref;
    		if (foreign) {
    			struct xen_netbk *netbk = &xen_netbk[group];
    			struct pending_tx_info *src_pend;
    
    			src_pend = &netbk->pending_tx_info[idx];
    
    			copy_gop->source.domid = src_pend->vif->domid;
    			copy_gop->source.u.ref = src_pend->req.gref;
    			copy_gop->flags |= GNTCOPY_source_gref;
    		} else {
    			void *vaddr = page_address(page);
    			copy_gop->source.domid = DOMID_SELF;
    			copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
    		}
    		copy_gop->source.offset = offset;
    		copy_gop->dest.domid = vif->domid;
    
    		copy_gop->dest.offset = npo->copy_off;
    		copy_gop->dest.u.ref = npo->copy_gref;
    		copy_gop->len = bytes;
    
    		npo->copy_off += bytes;
    		meta->size += bytes;
    
    		offset += bytes;
    		size -= bytes;
    
    
    		/* Next frame */
    		if (offset == PAGE_SIZE && size) {
    			BUG_ON(!PageCompound(page));
    			page++;
    			offset = 0;
    		}
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    		/* Leave a gap for the GSO descriptor. */
    		if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
    			vif->rx.req_cons++;
    
    		*head = 0; /* There must be something in this buffer now. */
    
    	}
    }
    
    /*
     * Prepare an SKB to be transmitted to the frontend.
     *
     * This function is responsible for allocating grant operations, meta
     * structures, etc.
     *
     * It returns the number of meta structures consumed. The number of
     * ring slots used is always equal to the number of meta slots used
     * plus the number of GSO descriptors used. Currently, we use either
     * zero GSO descriptors (for non-GSO packets) or one descriptor (for
     * frontend-side LRO).
     */
    static int netbk_gop_skb(struct sk_buff *skb,
    			 struct netrx_pending_operations *npo)
    {
    	struct xenvif *vif = netdev_priv(skb->dev);
    	int nr_frags = skb_shinfo(skb)->nr_frags;
    	int i;
    	struct xen_netif_rx_request *req;
    	struct netbk_rx_meta *meta;
    	unsigned char *data;
    	int head = 1;
    	int old_meta_prod;
    
    	old_meta_prod = npo->meta_prod;
    
    	/* Set up a GSO prefix descriptor, if necessary */
    	if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
    		req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
    		meta = npo->meta + npo->meta_prod++;
    		meta->gso_size = skb_shinfo(skb)->gso_size;
    		meta->size = 0;
    		meta->id = req->id;
    	}
    
    	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
    	meta = npo->meta + npo->meta_prod++;
    
    	if (!vif->gso_prefix)
    		meta->gso_size = skb_shinfo(skb)->gso_size;
    	else
    		meta->gso_size = 0;
    
    	meta->size = 0;
    	meta->id = req->id;
    	npo->copy_off = 0;
    	npo->copy_gref = req->gref;
    
    	data = skb->data;
    	while (data < skb_tail_pointer(skb)) {
    		unsigned int offset = offset_in_page(data);
    		unsigned int len = PAGE_SIZE - offset;
    
    		if (data + len > skb_tail_pointer(skb))
    			len = skb_tail_pointer(skb) - data;
    
    		netbk_gop_frag_copy(vif, skb, npo,
    				    virt_to_page(data), len, offset, &head);
    		data += len;
    	}
    
    	for (i = 0; i < nr_frags; i++) {
    		netbk_gop_frag_copy(vif, skb, npo,
    
    				    skb_frag_page(&skb_shinfo(skb)->frags[i]),
    
    				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
    
    Ian Campbell's avatar
    Ian Campbell committed
    				    skb_shinfo(skb)->frags[i].page_offset,
    				    &head);
    	}
    
    	return npo->meta_prod - old_meta_prod;
    }
    
    /*
     * This is a twin to netbk_gop_skb.  Assume that netbk_gop_skb was
     * used to set up the operations on the top of
     * netrx_pending_operations, which have since been done.  Check that
     * they didn't give any errors and advance over them.
     */
    static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
    			   struct netrx_pending_operations *npo)
    {
    	struct gnttab_copy     *copy_op;
    	int status = XEN_NETIF_RSP_OKAY;
    	int i;
    
    	for (i = 0; i < nr_meta_slots; i++) {
    		copy_op = npo->copy + npo->copy_cons++;
    		if (copy_op->status != GNTST_okay) {
    			netdev_dbg(vif->dev,
    				   "Bad status %d from copy to DOM%d.\n",
    				   copy_op->status, vif->domid);
    			status = XEN_NETIF_RSP_ERROR;
    		}
    	}
    
    	return status;
    }
    
    static void netbk_add_frag_responses(struct xenvif *vif, int status,
    				     struct netbk_rx_meta *meta,
    				     int nr_meta_slots)
    {
    	int i;
    	unsigned long offset;
    
    	/* No fragments used */
    	if (nr_meta_slots <= 1)
    		return;
    
    	nr_meta_slots--;
    
    	for (i = 0; i < nr_meta_slots; i++) {
    		int flags;
    		if (i == nr_meta_slots - 1)
    			flags = 0;
    		else
    			flags = XEN_NETRXF_more_data;
    
    		offset = 0;
    		make_rx_response(vif, meta[i].id, status, offset,
    				 meta[i].size, flags);
    	}
    }
    
    struct skb_cb_overlay {
    	int meta_slots_used;
    };
    
    static void xen_netbk_rx_action(struct xen_netbk *netbk)
    {
    	struct xenvif *vif = NULL, *tmp;
    	s8 status;
    	u16 irq, flags;
    	struct xen_netif_rx_response *resp;
    	struct sk_buff_head rxq;
    	struct sk_buff *skb;
    	LIST_HEAD(notify);
    	int ret;
    	int nr_frags;
    	int count;
    	unsigned long offset;
    	struct skb_cb_overlay *sco;
    
    	struct netrx_pending_operations npo = {
    		.copy  = netbk->grant_copy_op,
    		.meta  = netbk->meta,
    	};
    
    	skb_queue_head_init(&rxq);
    
    	count = 0;
    
    	while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
    		vif = netdev_priv(skb->dev);
    		nr_frags = skb_shinfo(skb)->nr_frags;
    
    		sco = (struct skb_cb_overlay *)skb->cb;
    		sco->meta_slots_used = netbk_gop_skb(skb, &npo);
    
    		count += nr_frags + 1;
    
    		__skb_queue_tail(&rxq, skb);
    
    		/* Filled the batch queue? */
    		if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
    			break;
    	}
    
    	BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
    
    	if (!npo.copy_prod)
    		return;
    
    	BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
    
    	gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	while ((skb = __skb_dequeue(&rxq)) != NULL) {
    		sco = (struct skb_cb_overlay *)skb->cb;
    
    		vif = netdev_priv(skb->dev);
    
    		if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
    			resp = RING_GET_RESPONSE(&vif->rx,
    						vif->rx.rsp_prod_pvt++);
    
    			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
    
    			resp->offset = netbk->meta[npo.meta_cons].gso_size;
    			resp->id = netbk->meta[npo.meta_cons].id;
    			resp->status = sco->meta_slots_used;
    
    			npo.meta_cons++;
    			sco->meta_slots_used--;
    		}
    
    
    		vif->dev->stats.tx_bytes += skb->len;
    		vif->dev->stats.tx_packets++;
    
    		status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
    
    		if (sco->meta_slots_used == 1)
    			flags = 0;
    		else
    			flags = XEN_NETRXF_more_data;
    
    		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
    			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
    		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
    			/* remote but checksummed. */
    			flags |= XEN_NETRXF_data_validated;
    
    		offset = 0;
    		resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
    					status, offset,
    					netbk->meta[npo.meta_cons].size,
    					flags);
    
    		if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
    			struct xen_netif_extra_info *gso =
    				(struct xen_netif_extra_info *)
    				RING_GET_RESPONSE(&vif->rx,
    						  vif->rx.rsp_prod_pvt++);
    
    			resp->flags |= XEN_NETRXF_extra_info;
    
    			gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
    			gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
    			gso->u.gso.pad = 0;
    			gso->u.gso.features = 0;
    
    			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
    			gso->flags = 0;
    		}
    
    		netbk_add_frag_responses(vif, status,
    					 netbk->meta + npo.meta_cons + 1,
    					 sco->meta_slots_used);
    
    		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
    		irq = vif->irq;
    		if (ret && list_empty(&vif->notify_list))
    			list_add_tail(&vif->notify_list, &notify);
    
    		xenvif_notify_tx_completion(vif);
    
    		xenvif_put(vif);
    		npo.meta_cons += sco->meta_slots_used;
    		dev_kfree_skb(skb);
    	}
    
    	list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
    		notify_remote_via_irq(vif->irq);
    		list_del_init(&vif->notify_list);
    	}
    
    	/* More work to do? */
    	if (!skb_queue_empty(&netbk->rx_queue) &&
    			!timer_pending(&netbk->net_timer))
    		xen_netbk_kick_thread(netbk);
    }
    
    void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
    {
    	struct xen_netbk *netbk = vif->netbk;
    
    	skb_queue_tail(&netbk->rx_queue, skb);
    
    	xen_netbk_kick_thread(netbk);
    }
    
    static void xen_netbk_alarm(unsigned long data)
    {
    	struct xen_netbk *netbk = (struct xen_netbk *)data;
    	xen_netbk_kick_thread(netbk);
    }
    
    static int __on_net_schedule_list(struct xenvif *vif)
    {
    	return !list_empty(&vif->schedule_list);
    }
    
    /* Must be called with net_schedule_list_lock held */
    static void remove_from_net_schedule_list(struct xenvif *vif)
    {
    	if (likely(__on_net_schedule_list(vif))) {
    		list_del_init(&vif->schedule_list);
    		xenvif_put(vif);
    	}
    }
    
    static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
    {
    	struct xenvif *vif = NULL;
    
    	spin_lock_irq(&netbk->net_schedule_list_lock);
    	if (list_empty(&netbk->net_schedule_list))
    		goto out;
    
    	vif = list_first_entry(&netbk->net_schedule_list,
    			       struct xenvif, schedule_list);
    	if (!vif)
    		goto out;
    
    	xenvif_get(vif);
    
    	remove_from_net_schedule_list(vif);
    out:
    	spin_unlock_irq(&netbk->net_schedule_list_lock);
    	return vif;
    }
    
    void xen_netbk_schedule_xenvif(struct xenvif *vif)
    {
    	unsigned long flags;
    	struct xen_netbk *netbk = vif->netbk;
    
    	if (__on_net_schedule_list(vif))
    		goto kick;
    
    	spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
    	if (!__on_net_schedule_list(vif) &&
    	    likely(xenvif_schedulable(vif))) {
    		list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
    		xenvif_get(vif);
    	}
    	spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
    
    kick:
    	smp_mb();
    	if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
    	    !list_empty(&netbk->net_schedule_list))
    		xen_netbk_kick_thread(netbk);
    }
    
    void xen_netbk_deschedule_xenvif(struct xenvif *vif)
    {
    	struct xen_netbk *netbk = vif->netbk;
    	spin_lock_irq(&netbk->net_schedule_list_lock);
    	remove_from_net_schedule_list(vif);
    	spin_unlock_irq(&netbk->net_schedule_list_lock);
    }
    
    void xen_netbk_check_rx_xenvif(struct xenvif *vif)
    {
    	int more_to_do;
    
    	RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
    
    	if (more_to_do)
    		xen_netbk_schedule_xenvif(vif);
    }
    
    static void tx_add_credit(struct xenvif *vif)
    {
    	unsigned long max_burst, max_credit;
    
    	/*
    	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
    	 * Otherwise the interface can seize up due to insufficient credit.
    	 */
    	max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
    	max_burst = min(max_burst, 131072UL);
    	max_burst = max(max_burst, vif->credit_bytes);
    
    	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
    	max_credit = vif->remaining_credit + vif->credit_bytes;
    	if (max_credit < vif->remaining_credit)
    		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
    
    	vif->remaining_credit = min(max_credit, max_burst);
    }
    
    static void tx_credit_callback(unsigned long data)
    {
    	struct xenvif *vif = (struct xenvif *)data;
    	tx_add_credit(vif);
    	xen_netbk_check_rx_xenvif(vif);
    }
    
    static void netbk_tx_err(struct xenvif *vif,
    			 struct xen_netif_tx_request *txp, RING_IDX end)
    {
    	RING_IDX cons = vif->tx.req_cons;
    
    	do {
    		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
    		if (cons >= end)
    			break;
    		txp = RING_GET_REQUEST(&vif->tx, cons++);
    	} while (1);
    	vif->tx.req_cons = cons;
    	xen_netbk_check_rx_xenvif(vif);
    	xenvif_put(vif);
    }
    
    
    static void netbk_fatal_tx_err(struct xenvif *vif)
    {
    	netdev_err(vif->dev, "fatal error; disabling device\n");
    	xenvif_carrier_off(vif);
    	xenvif_put(vif);
    }
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    static int netbk_count_requests(struct xenvif *vif,
    				struct xen_netif_tx_request *first,
    				struct xen_netif_tx_request *txp,
    				int work_to_do)
    {
    	RING_IDX cons = vif->tx.req_cons;
    	int frags = 0;
    
    	if (!(first->flags & XEN_NETTXF_more_data))
    		return 0;
    
    	do {
    		if (frags >= work_to_do) {
    
    			netdev_err(vif->dev, "Need more frags\n");
    			netbk_fatal_tx_err(vif);
    
    Ian Campbell's avatar
    Ian Campbell committed
    			return -frags;
    		}
    
    		if (unlikely(frags >= MAX_SKB_FRAGS)) {
    
    			netdev_err(vif->dev, "Too many frags\n");
    			netbk_fatal_tx_err(vif);
    
    Ian Campbell's avatar
    Ian Campbell committed
    			return -frags;
    		}
    
    		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
    		       sizeof(*txp));
    		if (txp->size > first->size) {
    
    			netdev_err(vif->dev, "Frag is bigger than frame.\n");
    			netbk_fatal_tx_err(vif);
    
    Ian Campbell's avatar
    Ian Campbell committed
    			return -frags;
    		}
    
    		first->size -= txp->size;
    		frags++;
    
    		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
    
    			netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
    
    Ian Campbell's avatar
    Ian Campbell committed
    				 txp->offset, txp->size);
    
    			netbk_fatal_tx_err(vif);
    
    Ian Campbell's avatar
    Ian Campbell committed
    			return -frags;
    		}
    	} while ((txp++)->flags & XEN_NETTXF_more_data);
    	return frags;
    }
    
    static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
    					 struct sk_buff *skb,
    
    					 u16 pending_idx)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	struct page *page;
    	page = alloc_page(GFP_KERNEL|__GFP_COLD);
    	if (!page)
    		return NULL;
    	set_page_ext(page, netbk, pending_idx);
    	netbk->mmap_pages[pending_idx] = page;
    	return page;
    }
    
    static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
    						  struct xenvif *vif,
    						  struct sk_buff *skb,
    						  struct xen_netif_tx_request *txp,
    						  struct gnttab_copy *gop)
    {
    	struct skb_shared_info *shinfo = skb_shinfo(skb);
    	skb_frag_t *frags = shinfo->frags;
    
    	u16 pending_idx = *((u16 *)skb->data);
    
    Ian Campbell's avatar
    Ian Campbell committed
    	int i, start;
    
    	/* Skip first skb fragment if it is on same page as header fragment. */
    
    	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	for (i = start; i < shinfo->nr_frags; i++, txp++) {
    		struct page *page;
    		pending_ring_idx_t index;
    		struct pending_tx_info *pending_tx_info =
    			netbk->pending_tx_info;
    
    		index = pending_index(netbk->pending_cons++);
    		pending_idx = netbk->pending_ring[index];
    		page = xen_netbk_alloc_page(netbk, skb, pending_idx);
    		if (!page)
    			return NULL;
    
    		gop->source.u.ref = txp->gref;
    		gop->source.domid = vif->domid;
    		gop->source.offset = txp->offset;
    
    		gop->dest.u.gmfn = virt_to_mfn(page_address(page));
    		gop->dest.domid = DOMID_SELF;
    		gop->dest.offset = txp->offset;
    
    		gop->len = txp->size;
    		gop->flags = GNTCOPY_source_gref;
    
    		gop++;
    
    		memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
    		xenvif_get(vif);
    		pending_tx_info[pending_idx].vif = vif;
    
    		frag_set_pending_idx(&frags[i], pending_idx);