Skip to content
Snippets Groups Projects
netback.c 42 KiB
Newer Older
  • Learn to ignore specific revisions
  • Ian Campbell's avatar
    Ian Campbell committed
    /*
     * Back-end of the driver for virtual network devices. This portion of the
     * driver exports a 'unified' network-device interface that can be accessed
     * by any operating system that implements a compatible front end. A
     * reference front-end implementation can be found in:
     *  drivers/net/xen-netfront.c
     *
     * Copyright (c) 2002-2005, K A Fraser
     *
     * This program is free software; you can redistribute it and/or
     * modify it under the terms of the GNU General Public License version 2
     * as published by the Free Software Foundation; or, when distributed
     * separately from the Linux kernel or incorporated into other
     * software packages, subject to the following license:
     *
     * Permission is hereby granted, free of charge, to any person obtaining a copy
     * of this source file (the "Software"), to deal in the Software without
     * restriction, including without limitation the rights to use, copy, modify,
     * merge, publish, distribute, sublicense, and/or sell copies of the Software,
     * and to permit persons to whom the Software is furnished to do so, subject to
     * the following conditions:
     *
     * The above copyright notice and this permission notice shall be included in
     * all copies or substantial portions of the Software.
     *
     * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     * IN THE SOFTWARE.
     */
    
    #include "common.h"
    
    #include <linux/kthread.h>
    #include <linux/if_vlan.h>
    #include <linux/udp.h>
    
    #include <net/tcp.h>
    
    
    #include <xen/xen.h>
    
    Ian Campbell's avatar
    Ian Campbell committed
    #include <xen/events.h>
    #include <xen/interface/memory.h>
    
    #include <asm/xen/hypercall.h>
    #include <asm/xen/page.h>
    
    
    /* Provide an option to disable split event channels at load time as
     * event channels are limited resource. Split event channels are
     * enabled by default.
     */
    bool separate_tx_rx_irq = 1;
    module_param(separate_tx_rx_irq, bool, 0644);
    
    
    /*
     * This is the maximum slots a skb can have. If a guest sends a skb
     * which exceeds this limit it is considered malicious.
     */
    
    #define FATAL_SKB_SLOTS_DEFAULT 20
    static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
    module_param(fatal_skb_slots, uint, 0444);
    
    /*
     * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
     * the maximum slots a valid packet can use. Now this value is defined
     * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
     * all backend.
     */
    #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
    
    
    /*
     * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
     * one or more merged tx requests, otherwise it is the continuation of
     * previous tx request.
     */
    
    static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
    
    	return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
    
    Wei Liu's avatar
    Wei Liu committed
    static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
    			       u8 status);
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    static void make_tx_response(struct xenvif *vif,
    			     struct xen_netif_tx_request *txp,
    			     s8       st);
    
    
    static inline int tx_work_todo(struct xenvif *vif);
    static inline int rx_work_todo(struct xenvif *vif);
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
    					     u16      id,
    					     s8       st,
    					     u16      offset,
    					     u16      size,
    					     u16      flags);
    
    
    static inline unsigned long idx_to_pfn(struct xenvif *vif,
    
    	return page_to_pfn(vif->mmap_pages[idx]);
    
    static inline unsigned long idx_to_kaddr(struct xenvif *vif,
    
    	return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
    
    Ian Campbell's avatar
    Ian Campbell committed
    }
    
    /*
     * This is the amount of packet we copy rather than map, so that the
     * guest can't fiddle with the contents of the headers while we do
     * packet processing on them (netfilter, routing, etc).
     */
    #define PKT_PROT_LEN    (ETH_HLEN + \
    			 VLAN_HLEN + \
    			 sizeof(struct iphdr) + MAX_IPOPTLEN + \
    			 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
    
    
    static u16 frag_get_pending_idx(skb_frag_t *frag)
    {
    	return (u16)frag->page_offset;
    }
    
    static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
    {
    	frag->page_offset = pending_idx;
    }
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    static inline pending_ring_idx_t pending_index(unsigned i)
    {
    	return i & (MAX_PENDING_REQS-1);
    }
    
    
    static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	return MAX_PENDING_REQS -
    
    		vif->pending_prod + vif->pending_cons;
    
    Ian Campbell's avatar
    Ian Campbell committed
    }
    
    static int max_required_rx_slots(struct xenvif *vif)
    {
    	int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
    
    
    	/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
    
    Ian Campbell's avatar
    Ian Campbell committed
    	if (vif->can_sg || vif->gso || vif->gso_prefix)
    		max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
    
    	return max;
    }
    
    
    Wei Liu's avatar
    Wei Liu committed
    int xenvif_rx_ring_full(struct xenvif *vif)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	RING_IDX peek   = vif->rx_req_cons_peek;
    	RING_IDX needed = max_required_rx_slots(vif);
    
    	return ((vif->rx.sring->req_prod - peek) < needed) ||
    	       ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
    }
    
    
    Wei Liu's avatar
    Wei Liu committed
    int xenvif_must_stop_queue(struct xenvif *vif)
    
    Wei Liu's avatar
    Wei Liu committed
    	if (!xenvif_rx_ring_full(vif))
    
    Ian Campbell's avatar
    Ian Campbell committed
    		return 0;
    
    	vif->rx.sring->req_event = vif->rx_req_cons_peek +
    		max_required_rx_slots(vif);
    	mb(); /* request notification /then/ check the queue */
    
    
    Wei Liu's avatar
    Wei Liu committed
    	return xenvif_rx_ring_full(vif);
    
    Ian Campbell's avatar
    Ian Campbell committed
    }
    
    /*
     * Returns true if we should start a new receive buffer instead of
     * adding 'size' bytes to a buffer which currently contains 'offset'
     * bytes.
     */
    static bool start_new_rx_buffer(int offset, unsigned long size, int head)
    {
    	/* simple case: we have completely filled the current buffer. */
    	if (offset == MAX_BUFFER_OFFSET)
    		return true;
    
    	/*
    	 * complex case: start a fresh buffer if the current frag
    	 * would overflow the current buffer but only if:
    	 *     (i)   this frag would fit completely in the next buffer
    	 * and (ii)  there is already some data in the current buffer
    	 * and (iii) this is not the head buffer.
    	 *
    	 * Where:
    	 * - (i) stops us splitting a frag into two copies
    	 *   unless the frag is too large for a single buffer.
    	 * - (ii) stops us from leaving a buffer pointlessly empty.
    	 * - (iii) stops us leaving the first buffer
    	 *   empty. Strictly speaking this is already covered
    	 *   by (ii) but is explicitly checked because
    	 *   netfront relies on the first buffer being
    	 *   non-empty and can crash otherwise.
    	 *
    	 * This means we will effectively linearise small
    	 * frags but do not needlessly split large buffers
    	 * into multiple copies tend to give large frags their
    	 * own buffers as before.
    	 */
    	if ((offset + size > MAX_BUFFER_OFFSET) &&
    	    (size <= MAX_BUFFER_OFFSET) && offset && !head)
    		return true;
    
    	return false;
    }
    
    
    struct xenvif_count_slot_state {
    	unsigned long copy_off;
    	bool head;
    };
    
    unsigned int xenvif_count_frag_slots(struct xenvif *vif,
    				     unsigned long offset, unsigned long size,
    				     struct xenvif_count_slot_state *state)
    {
    	unsigned count = 0;
    
    	offset &= ~PAGE_MASK;
    
    	while (size > 0) {
    		unsigned long bytes;
    
    		bytes = PAGE_SIZE - offset;
    
    		if (bytes > size)
    			bytes = size;
    
    		if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
    			count++;
    			state->copy_off = 0;
    		}
    
    		if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
    			bytes = MAX_BUFFER_OFFSET - state->copy_off;
    
    		state->copy_off += bytes;
    
    		offset += bytes;
    		size -= bytes;
    
    		if (offset == PAGE_SIZE)
    			offset = 0;
    
    		state->head = false;
    	}
    
    	return count;
    }
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    /*
     * Figure out how many ring slots we're going to need to send @skb to
     * the guest. This function is essentially a dry run of
    
    Wei Liu's avatar
    Wei Liu committed
     * xenvif_gop_frag_copy.
    
    Wei Liu's avatar
    Wei Liu committed
    unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
    
    	struct xenvif_count_slot_state state;
    
    Ian Campbell's avatar
    Ian Campbell committed
    	unsigned int count;
    
    	state.head = true;
    	state.copy_off = 0;
    
    	/* Slot for the first (partial) page of data. */
    	count = 1;
    
    	/* Need a slot for the GSO prefix for GSO extra data? */
    
    Ian Campbell's avatar
    Ian Campbell committed
    	if (skb_shinfo(skb)->gso_size)
    		count++;
    
    
    	data = skb->data;
    	while (data < skb_tail_pointer(skb)) {
    		unsigned long offset = offset_in_page(data);
    		unsigned long size = PAGE_SIZE - offset;
    
    		if (data + size > skb_tail_pointer(skb))
    			size = skb_tail_pointer(skb) - data;
    
    		count += xenvif_count_frag_slots(vif, offset, size, &state);
    
    	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
    		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
    		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
    
    		count += xenvif_count_frag_slots(vif, offset, size, &state);
    
    Ian Campbell's avatar
    Ian Campbell committed
    	}
    	return count;
    }
    
    struct netrx_pending_operations {
    	unsigned copy_prod, copy_cons;
    	unsigned meta_prod, meta_cons;
    	struct gnttab_copy *copy;
    
    	struct xenvif_rx_meta *meta;
    
    Ian Campbell's avatar
    Ian Campbell committed
    	int copy_off;
    	grant_ref_t copy_gref;
    };
    
    
    static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
    						 struct netrx_pending_operations *npo)
    
    	struct xenvif_rx_meta *meta;
    
    Ian Campbell's avatar
    Ian Campbell committed
    	struct xen_netif_rx_request *req;
    
    	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
    
    	meta = npo->meta + npo->meta_prod++;
    	meta->gso_size = 0;
    	meta->size = 0;
    	meta->id = req->id;
    
    	npo->copy_off = 0;
    	npo->copy_gref = req->gref;
    
    	return meta;
    }
    
    /*
     * Set up the grant operations for this fragment. If it's a flipping
     * interface, we also set up the unmap request from here.
     */
    
    Wei Liu's avatar
    Wei Liu committed
    static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
    				 struct netrx_pending_operations *npo,
    				 struct page *page, unsigned long size,
    				 unsigned long offset, int *head)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	struct gnttab_copy *copy_gop;
    
    	struct xenvif_rx_meta *meta;
    
    Ian Campbell's avatar
    Ian Campbell committed
    	unsigned long bytes;
    
    	/* Data must not cross a page boundary. */
    
    	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	meta = npo->meta + npo->meta_prod - 1;
    
    
    	/* Skip unused frames from start of page */
    	page += offset >> PAGE_SHIFT;
    	offset &= ~PAGE_MASK;
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    	while (size > 0) {
    
    		BUG_ON(offset >= PAGE_SIZE);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
    
    
    		bytes = PAGE_SIZE - offset;
    
    		if (bytes > size)
    			bytes = size;
    
    		if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    			/*
    			 * Netfront requires there to be some data in the head
    			 * buffer.
    			 */
    			BUG_ON(*head);
    
    			meta = get_next_rx_buffer(vif, npo);
    		}
    
    		if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
    			bytes = MAX_BUFFER_OFFSET - npo->copy_off;
    
    		copy_gop = npo->copy + npo->copy_prod++;
    		copy_gop->flags = GNTCOPY_dest_gref;
    
    		copy_gop->len = bytes;
    
    
    		copy_gop->source.domid = DOMID_SELF;
    		copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
    
    Ian Campbell's avatar
    Ian Campbell committed
    		copy_gop->source.offset = offset;
    
    
    		copy_gop->dest.domid = vif->domid;
    
    Ian Campbell's avatar
    Ian Campbell committed
    		copy_gop->dest.offset = npo->copy_off;
    		copy_gop->dest.u.ref = npo->copy_gref;
    
    		npo->copy_off += bytes;
    		meta->size += bytes;
    
    		offset += bytes;
    		size -= bytes;
    
    
    		/* Next frame */
    		if (offset == PAGE_SIZE && size) {
    			BUG_ON(!PageCompound(page));
    			page++;
    			offset = 0;
    		}
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    		/* Leave a gap for the GSO descriptor. */
    		if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
    			vif->rx.req_cons++;
    
    		*head = 0; /* There must be something in this buffer now. */
    
    	}
    }
    
    /*
     * Prepare an SKB to be transmitted to the frontend.
     *
     * This function is responsible for allocating grant operations, meta
     * structures, etc.
     *
     * It returns the number of meta structures consumed. The number of
     * ring slots used is always equal to the number of meta slots used
     * plus the number of GSO descriptors used. Currently, we use either
     * zero GSO descriptors (for non-GSO packets) or one descriptor (for
     * frontend-side LRO).
     */
    
    Wei Liu's avatar
    Wei Liu committed
    static int xenvif_gop_skb(struct sk_buff *skb,
    			  struct netrx_pending_operations *npo)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	struct xenvif *vif = netdev_priv(skb->dev);
    	int nr_frags = skb_shinfo(skb)->nr_frags;
    	int i;
    	struct xen_netif_rx_request *req;
    
    	struct xenvif_rx_meta *meta;
    
    Ian Campbell's avatar
    Ian Campbell committed
    	unsigned char *data;
    	int head = 1;
    	int old_meta_prod;
    
    	old_meta_prod = npo->meta_prod;
    
    	/* Set up a GSO prefix descriptor, if necessary */
    	if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
    		req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
    		meta = npo->meta + npo->meta_prod++;
    		meta->gso_size = skb_shinfo(skb)->gso_size;
    		meta->size = 0;
    		meta->id = req->id;
    	}
    
    	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
    	meta = npo->meta + npo->meta_prod++;
    
    	if (!vif->gso_prefix)
    		meta->gso_size = skb_shinfo(skb)->gso_size;
    	else
    		meta->gso_size = 0;
    
    	meta->size = 0;
    	meta->id = req->id;
    	npo->copy_off = 0;
    	npo->copy_gref = req->gref;
    
    	data = skb->data;
    	while (data < skb_tail_pointer(skb)) {
    		unsigned int offset = offset_in_page(data);
    		unsigned int len = PAGE_SIZE - offset;
    
    		if (data + len > skb_tail_pointer(skb))
    			len = skb_tail_pointer(skb) - data;
    
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_gop_frag_copy(vif, skb, npo,
    				     virt_to_page(data), len, offset, &head);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		data += len;
    	}
    
    	for (i = 0; i < nr_frags; i++) {
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_gop_frag_copy(vif, skb, npo,
    				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
    				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
    				     skb_shinfo(skb)->frags[i].page_offset,
    				     &head);
    
    Ian Campbell's avatar
    Ian Campbell committed
    	}
    
    	return npo->meta_prod - old_meta_prod;
    }
    
    /*
    
    Wei Liu's avatar
    Wei Liu committed
     * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
    
    Ian Campbell's avatar
    Ian Campbell committed
     * used to set up the operations on the top of
     * netrx_pending_operations, which have since been done.  Check that
     * they didn't give any errors and advance over them.
     */
    
    Wei Liu's avatar
    Wei Liu committed
    static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
    			    struct netrx_pending_operations *npo)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	struct gnttab_copy     *copy_op;
    	int status = XEN_NETIF_RSP_OKAY;
    	int i;
    
    	for (i = 0; i < nr_meta_slots; i++) {
    		copy_op = npo->copy + npo->copy_cons++;
    		if (copy_op->status != GNTST_okay) {
    			netdev_dbg(vif->dev,
    				   "Bad status %d from copy to DOM%d.\n",
    				   copy_op->status, vif->domid);
    			status = XEN_NETIF_RSP_ERROR;
    		}
    	}
    
    	return status;
    }
    
    
    Wei Liu's avatar
    Wei Liu committed
    static void xenvif_add_frag_responses(struct xenvif *vif, int status,
    				      struct xenvif_rx_meta *meta,
    				      int nr_meta_slots)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	int i;
    	unsigned long offset;
    
    	/* No fragments used */
    	if (nr_meta_slots <= 1)
    		return;
    
    	nr_meta_slots--;
    
    	for (i = 0; i < nr_meta_slots; i++) {
    		int flags;
    		if (i == nr_meta_slots - 1)
    			flags = 0;
    		else
    			flags = XEN_NETRXF_more_data;
    
    		offset = 0;
    		make_rx_response(vif, meta[i].id, status, offset,
    				 meta[i].size, flags);
    	}
    }
    
    struct skb_cb_overlay {
    	int meta_slots_used;
    };
    
    
    Wei Liu's avatar
    Wei Liu committed
    static void xenvif_kick_thread(struct xenvif *vif)
    
    Wei Liu's avatar
    Wei Liu committed
    void xenvif_rx_action(struct xenvif *vif)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	s8 status;
    
    Ian Campbell's avatar
    Ian Campbell committed
    	struct xen_netif_rx_response *resp;
    	struct sk_buff_head rxq;
    	struct sk_buff *skb;
    	LIST_HEAD(notify);
    	int ret;
    	int nr_frags;
    	int count;
    	unsigned long offset;
    	struct skb_cb_overlay *sco;
    
    	int need_to_notify = 0;
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	struct netrx_pending_operations npo = {
    
    		.copy  = vif->grant_copy_op,
    		.meta  = vif->meta,
    
    Ian Campbell's avatar
    Ian Campbell committed
    	};
    
    	skb_queue_head_init(&rxq);
    
    	count = 0;
    
    
    	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    		vif = netdev_priv(skb->dev);
    		nr_frags = skb_shinfo(skb)->nr_frags;
    
    		sco = (struct skb_cb_overlay *)skb->cb;
    
    Wei Liu's avatar
    Wei Liu committed
    		sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    		count += nr_frags + 1;
    
    		__skb_queue_tail(&rxq, skb);
    
    		/* Filled the batch queue? */
    
    		/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
    
    Ian Campbell's avatar
    Ian Campbell committed
    		if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
    			break;
    	}
    
    
    	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	if (!npo.copy_prod)
    		return;
    
    
    	BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
    	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	while ((skb = __skb_dequeue(&rxq)) != NULL) {
    		sco = (struct skb_cb_overlay *)skb->cb;
    
    		vif = netdev_priv(skb->dev);
    
    
    		if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    			resp = RING_GET_RESPONSE(&vif->rx,
    
    						 vif->rx.rsp_prod_pvt++);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
    
    
    			resp->offset = vif->meta[npo.meta_cons].gso_size;
    			resp->id = vif->meta[npo.meta_cons].id;
    
    Ian Campbell's avatar
    Ian Campbell committed
    			resp->status = sco->meta_slots_used;
    
    			npo.meta_cons++;
    			sco->meta_slots_used--;
    		}
    
    
    		vif->dev->stats.tx_bytes += skb->len;
    		vif->dev->stats.tx_packets++;
    
    
    Wei Liu's avatar
    Wei Liu committed
    		status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    		if (sco->meta_slots_used == 1)
    			flags = 0;
    		else
    			flags = XEN_NETRXF_more_data;
    
    		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
    			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
    		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
    			/* remote but checksummed. */
    			flags |= XEN_NETRXF_data_validated;
    
    		offset = 0;
    
    		resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
    
    Ian Campbell's avatar
    Ian Campbell committed
    					status, offset,
    
    					vif->meta[npo.meta_cons].size,
    
    Ian Campbell's avatar
    Ian Campbell committed
    					flags);
    
    
    		if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    			struct xen_netif_extra_info *gso =
    				(struct xen_netif_extra_info *)
    				RING_GET_RESPONSE(&vif->rx,
    						  vif->rx.rsp_prod_pvt++);
    
    			resp->flags |= XEN_NETRXF_extra_info;
    
    
    			gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
    
    Ian Campbell's avatar
    Ian Campbell committed
    			gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
    			gso->u.gso.pad = 0;
    			gso->u.gso.features = 0;
    
    			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
    			gso->flags = 0;
    		}
    
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_add_frag_responses(vif, status,
    					  vif->meta + npo.meta_cons + 1,
    					  sco->meta_slots_used);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
    
    
    		if (ret)
    			need_to_notify = 1;
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    		xenvif_notify_tx_completion(vif);
    
    		npo.meta_cons += sco->meta_slots_used;
    		dev_kfree_skb(skb);
    	}
    
    
    	if (need_to_notify)
    
    		notify_remote_via_irq(vif->rx_irq);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	/* More work to do? */
    
    	if (!skb_queue_empty(&vif->rx_queue))
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_kick_thread(vif);
    
    Wei Liu's avatar
    Wei Liu committed
    void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
    
    	skb_queue_tail(&vif->rx_queue, skb);
    
    Wei Liu's avatar
    Wei Liu committed
    	xenvif_kick_thread(vif);
    
    Wei Liu's avatar
    Wei Liu committed
    void xenvif_check_rx_xenvif(struct xenvif *vif)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	int more_to_do;
    
    	RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
    
    	if (more_to_do)
    
    		napi_schedule(&vif->napi);
    
    Ian Campbell's avatar
    Ian Campbell committed
    }
    
    static void tx_add_credit(struct xenvif *vif)
    {
    	unsigned long max_burst, max_credit;
    
    	/*
    	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
    	 * Otherwise the interface can seize up due to insufficient credit.
    	 */
    	max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
    	max_burst = min(max_burst, 131072UL);
    	max_burst = max(max_burst, vif->credit_bytes);
    
    	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
    	max_credit = vif->remaining_credit + vif->credit_bytes;
    	if (max_credit < vif->remaining_credit)
    		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
    
    	vif->remaining_credit = min(max_credit, max_burst);
    }
    
    static void tx_credit_callback(unsigned long data)
    {
    	struct xenvif *vif = (struct xenvif *)data;
    	tx_add_credit(vif);
    
    Wei Liu's avatar
    Wei Liu committed
    	xenvif_check_rx_xenvif(vif);
    
    Wei Liu's avatar
    Wei Liu committed
    static void xenvif_tx_err(struct xenvif *vif,
    			  struct xen_netif_tx_request *txp, RING_IDX end)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	RING_IDX cons = vif->tx.req_cons;
    
    	do {
    		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
    
    Ian Campbell's avatar
    Ian Campbell committed
    			break;
    		txp = RING_GET_REQUEST(&vif->tx, cons++);
    	} while (1);
    	vif->tx.req_cons = cons;
    }
    
    
    Wei Liu's avatar
    Wei Liu committed
    static void xenvif_fatal_tx_err(struct xenvif *vif)
    
    {
    	netdev_err(vif->dev, "fatal error; disabling device\n");
    	xenvif_carrier_off(vif);
    }
    
    
    Wei Liu's avatar
    Wei Liu committed
    static int xenvif_count_requests(struct xenvif *vif,
    				 struct xen_netif_tx_request *first,
    				 struct xen_netif_tx_request *txp,
    				 int work_to_do)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	RING_IDX cons = vif->tx.req_cons;
    
    	int slots = 0;
    	int drop_err = 0;
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	if (!(first->flags & XEN_NETTXF_more_data))
    		return 0;
    
    	do {
    
    		struct xen_netif_tx_request dropped_tx = { 0 };
    
    
    		if (slots >= work_to_do) {
    			netdev_err(vif->dev,
    				   "Asked for %d slots but exceeds this limit\n",
    				   work_to_do);
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_fatal_tx_err(vif);
    
    		/* This guest is really using too many slots and
    		 * considered malicious.
    		 */
    
    		if (unlikely(slots >= fatal_skb_slots)) {
    
    			netdev_err(vif->dev,
    				   "Malicious frontend using %d slots, threshold %u\n",
    
    				   slots, fatal_skb_slots);
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_fatal_tx_err(vif);
    
    		/* Xen network protocol had implicit dependency on
    
    		 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
    		 * the historical MAX_SKB_FRAGS value 18 to honor the
    		 * same behavior as before. Any packet using more than
    		 * 18 slots but less than fatal_skb_slots slots is
    		 * dropped
    
    		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
    
    			if (net_ratelimit())
    				netdev_dbg(vif->dev,
    					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",
    
    					   slots, XEN_NETBK_LEGACY_SLOTS_MAX);
    
    		if (drop_err)
    			txp = &dropped_tx;
    
    
    		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
    
    Ian Campbell's avatar
    Ian Campbell committed
    		       sizeof(*txp));
    
    
    		/* If the guest submitted a frame >= 64 KiB then
    		 * first->size overflowed and following slots will
    		 * appear to be larger than the frame.
    		 *
    		 * This cannot be fatal error as there are buggy
    		 * frontends that do this.
    		 *
    		 * Consume all slots and drop the packet.
    		 */
    		if (!drop_err && txp->size > first->size) {
    			if (net_ratelimit())
    				netdev_dbg(vif->dev,
    					   "Invalid tx request, slot size %u > remaining size %u\n",
    					   txp->size, first->size);
    			drop_err = -EIO;
    
    Ian Campbell's avatar
    Ian Campbell committed
    		}
    
    		first->size -= txp->size;
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
    
    			netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
    
    Ian Campbell's avatar
    Ian Campbell committed
    				 txp->offset, txp->size);
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_fatal_tx_err(vif);
    
    
    		more_data = txp->flags & XEN_NETTXF_more_data;
    
    		if (!drop_err)
    			txp++;
    
    	} while (more_data);
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_tx_err(vif, first, cons + slots);
    
    Wei Liu's avatar
    Wei Liu committed
    static struct page *xenvif_alloc_page(struct xenvif *vif,
    				      u16 pending_idx)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	struct page *page;
    
    
    	page = alloc_page(GFP_ATOMIC|__GFP_COLD);
    
    Ian Campbell's avatar
    Ian Campbell committed
    	if (!page)
    		return NULL;
    
    	vif->mmap_pages[pending_idx] = page;
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    	return page;
    }
    
    
    Wei Liu's avatar
    Wei Liu committed
    static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
    					       struct sk_buff *skb,
    					       struct xen_netif_tx_request *txp,
    					       struct gnttab_copy *gop)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	struct skb_shared_info *shinfo = skb_shinfo(skb);
    	skb_frag_t *frags = shinfo->frags;
    
    	u16 pending_idx = *((u16 *)skb->data);
    
    	u16 head_idx = 0;
    	int slot, start;
    	struct page *page;
    	pending_ring_idx_t index, start_idx = 0;
    	uint16_t dst_offset;
    	unsigned int nr_slots;
    	struct pending_tx_info *first = NULL;
    
    	/* At this point shinfo->nr_frags is in fact the number of
    
    	 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
    
    	 */
    	nr_slots = shinfo->nr_frags;
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	/* Skip first skb fragment if it is on same page as header fragment. */
    
    	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
    
    	/* Coalesce tx requests, at this point the packet passed in
    	 * should be <= 64K. Any packets larger than 64K have been
    
    Wei Liu's avatar
    Wei Liu committed
    	 * handled in xenvif_count_requests().
    
    	 */
    	for (shinfo->nr_frags = slot = start; slot < nr_slots;
    	     shinfo->nr_frags++) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    		struct pending_tx_info *pending_tx_info =
    
    			vif->pending_tx_info;
    
    		page = alloc_page(GFP_ATOMIC|__GFP_COLD);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		if (!page)
    
    		dst_offset = 0;
    		first = NULL;
    		while (dst_offset < PAGE_SIZE && slot < nr_slots) {
    			gop->flags = GNTCOPY_source_gref;
    
    			gop->source.u.ref = txp->gref;
    			gop->source.domid = vif->domid;
    			gop->source.offset = txp->offset;
    
    			gop->dest.domid = DOMID_SELF;
    
    			gop->dest.offset = dst_offset;
    			gop->dest.u.gmfn = virt_to_mfn(page_address(page));
    
    			if (dst_offset + txp->size > PAGE_SIZE) {
    				/* This page can only merge a portion
    				 * of tx request. Do not increment any
    				 * pointer / counter here. The txp
    				 * will be dealt with in future
    				 * rounds, eventually hitting the
    				 * `else` branch.
    				 */
    				gop->len = PAGE_SIZE - dst_offset;
    				txp->offset += gop->len;
    				txp->size -= gop->len;
    				dst_offset += gop->len; /* quit loop */
    			} else {
    				/* This tx request can be merged in the page */
    				gop->len = txp->size;
    				dst_offset += gop->len;
    
    
    				index = pending_index(vif->pending_cons++);
    
    				pending_idx = vif->pending_ring[index];
    
    
    				memcpy(&pending_tx_info[pending_idx].req, txp,
    				       sizeof(*txp));
    
    				/* Poison these fields, corresponding
    				 * fields for head tx req will be set
    				 * to correct values after the loop.
    				 */
    
    				vif->mmap_pages[pending_idx] = (void *)(~0UL);
    
    				pending_tx_info[pending_idx].head =
    					INVALID_PENDING_RING_IDX;
    
    				if (!first) {
    					first = &pending_tx_info[pending_idx];
    					start_idx = index;
    					head_idx = pending_idx;
    				}
    
    				txp++;
    				slot++;
    			}
    
    		first->req.offset = 0;
    		first->req.size = dst_offset;
    		first->head = start_idx;
    
    		vif->mmap_pages[head_idx] = page;
    
    		frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
    
    	BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    	return gop;
    
    err:
    	/* Unwind, freeing all pages and sending error responses. */
    
    	while (shinfo->nr_frags-- > start) {
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_idx_release(vif,
    
    				frag_get_pending_idx(&frags[shinfo->nr_frags]),
    				XEN_NETIF_RSP_ERROR);
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
    
    Wei Liu's avatar
    Wei Liu committed
    static int xenvif_tx_check_gop(struct xenvif *vif,
    			       struct sk_buff *skb,
    			       struct gnttab_copy **gopp)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	struct gnttab_copy *gop = *gopp;
    
    	u16 pending_idx = *((u16 *)skb->data);
    
    Ian Campbell's avatar
    Ian Campbell committed
    	struct skb_shared_info *shinfo = skb_shinfo(skb);
    
    	struct pending_tx_info *tx_info;
    
    Ian Campbell's avatar
    Ian Campbell committed
    	int nr_frags = shinfo->nr_frags;
    	int i, err, start;
    
    	u16 peek; /* peek into next tx request */
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	/* Check status of header. */
    	err = gop->status;
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	/* Skip first skb fragment if it is on same page as header fragment. */
    
    	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	for (i = start; i < nr_frags; i++) {
    		int j, newerr;
    
    		pending_ring_idx_t head;
    
    		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
    
    		tx_info = &vif->pending_tx_info[pending_idx];
    
    		head = tx_info->head;
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    		/* Check error status: if okay then remember grant handle. */
    
    		do {
    			newerr = (++gop)->status;
    			if (newerr)
    				break;
    
    			peek = vif->pending_ring[pending_index(++head)];