Skip to content
Snippets Groups Projects
netback.c 46.5 KiB
Newer Older
  • Learn to ignore specific revisions
  • Ian Campbell's avatar
    Ian Campbell committed
    	int nr_frags = shinfo->nr_frags;
    	int i, err, start;
    
    	u16 peek; /* peek into next tx request */
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	/* Check status of header. */
    	err = gop->status;
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	/* Skip first skb fragment if it is on same page as header fragment. */
    
    	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	for (i = start; i < nr_frags; i++) {
    		int j, newerr;
    
    		pending_ring_idx_t head;
    
    		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
    
    		tx_info = &vif->pending_tx_info[pending_idx];
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    		/* Check error status: if okay then remember grant handle. */
    
    		do {
    			newerr = (++gop)->status;
    			if (newerr)
    				break;
    
    			peek = vif->pending_ring[pending_index(++head)];
    		} while (!pending_tx_is_head(vif, peek));
    
    Ian Campbell's avatar
    Ian Campbell committed
    		if (likely(!newerr)) {
    			/* Had a previous error? Invalidate this fragment. */
    			if (unlikely(err))
    
    Wei Liu's avatar
    Wei Liu committed
    				xenvif_idx_release(vif, pending_idx,
    						   XEN_NETIF_RSP_OKAY);
    
    Ian Campbell's avatar
    Ian Campbell committed
    			continue;
    		}
    
    		/* Error on this fragment: respond to client with an error. */
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    		/* Not the first error? Preceding frags already invalidated. */
    		if (err)
    			continue;
    
    		/* First error: invalidate header and preceding fragments. */
    		pending_idx = *((u16 *)skb->data);
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		for (j = start; j < i; j++) {
    
    			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_idx_release(vif, pending_idx,
    					   XEN_NETIF_RSP_OKAY);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		}
    
    		/* Remember the error: invalidate all subsequent fragments. */
    		err = newerr;
    	}
    
    	*gopp = gop + 1;
    	return err;
    }
    
    
    Wei Liu's avatar
    Wei Liu committed
    static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	struct skb_shared_info *shinfo = skb_shinfo(skb);
    	int nr_frags = shinfo->nr_frags;
    	int i;
    
    	for (i = 0; i < nr_frags; i++) {
    		skb_frag_t *frag = shinfo->frags + i;
    		struct xen_netif_tx_request *txp;
    
    		struct page *page;
    		u16 pending_idx;
    
    		pending_idx = frag_get_pending_idx(frag);
    
    		txp = &vif->pending_tx_info[pending_idx].req;
    		page = virt_to_page(idx_to_kaddr(vif, pending_idx));
    
    		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		skb->len += txp->size;
    		skb->data_len += txp->size;
    		skb->truesize += txp->size;
    
    
    Wei Liu's avatar
    Wei Liu committed
    		/* Take an extra reference to offset xenvif_idx_release */
    
    		get_page(vif->mmap_pages[pending_idx]);
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
    
    Wei Liu's avatar
    Wei Liu committed
    static int xenvif_get_extras(struct xenvif *vif,
    
    Ian Campbell's avatar
    Ian Campbell committed
    				struct xen_netif_extra_info *extras,
    				int work_to_do)
    {
    	struct xen_netif_extra_info extra;
    	RING_IDX cons = vif->tx.req_cons;
    
    	do {
    		if (unlikely(work_to_do-- <= 0)) {
    
    			netdev_err(vif->dev, "Missing extra info\n");
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_fatal_tx_err(vif);
    
    Ian Campbell's avatar
    Ian Campbell committed
    			return -EBADR;
    		}
    
    		memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
    		       sizeof(extra));
    		if (unlikely(!extra.type ||
    			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
    			vif->tx.req_cons = ++cons;
    
    Ian Campbell's avatar
    Ian Campbell committed
    				   "Invalid extra type: %d\n", extra.type);
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_fatal_tx_err(vif);
    
    Ian Campbell's avatar
    Ian Campbell committed
    			return -EINVAL;
    		}
    
    		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
    		vif->tx.req_cons = ++cons;
    	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
    
    	return work_to_do;
    }
    
    
    Wei Liu's avatar
    Wei Liu committed
    static int xenvif_set_skb_gso(struct xenvif *vif,
    			      struct sk_buff *skb,
    			      struct xen_netif_extra_info *gso)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	if (!gso->u.gso.size) {
    
    		netdev_err(vif->dev, "GSO size must not be zero.\n");
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_fatal_tx_err(vif);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		return -EINVAL;
    	}
    
    
    	switch (gso->u.gso.type) {
    	case XEN_NETIF_GSO_TYPE_TCPV4:
    		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
    		break;
    	case XEN_NETIF_GSO_TYPE_TCPV6:
    		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
    		break;
    	default:
    
    		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_fatal_tx_err(vif);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		return -EINVAL;
    	}
    
    	skb_shinfo(skb)->gso_size = gso->u.gso.size;
    
    	/* Header must be checked, and gso_segs computed. */
    	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
    	skb_shinfo(skb)->gso_segs = 0;
    
    	return 0;
    }
    
    
    static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
    				  unsigned int max)
    
    	if (skb_headlen(skb) >= len)
    		return 0;
    
    	/* If we need to pullup then pullup to the max, so we
    	 * won't need to do it again.
    	 */
    	if (max > skb->len)
    		max = skb->len;
    
    	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
    		return -ENOMEM;
    
    	if (skb_headlen(skb) < len)
    		return -EPROTO;
    
    	return 0;
    
    /* This value should be large enough to cover a tagged ethernet header plus
     * maximally sized IP and TCP or UDP headers.
     */
    #define MAX_IP_HDR_LEN 128
    
    
    static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
    			     int recalculate_partial_csum)
    
    	bool fragment;
    	int err;
    
    	fragment = false;
    
    	err = maybe_pull_tail(skb,
    			      sizeof(struct iphdr),
    			      MAX_IP_HDR_LEN);
    	if (err < 0)
    		goto out;
    
    	if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
    		fragment = true;
    
    	switch (ip_hdr(skb)->protocol) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    	case IPPROTO_TCP:
    
    		err = maybe_pull_tail(skb,
    				      off + sizeof(struct tcphdr),
    				      MAX_IP_HDR_LEN);
    		if (err < 0)
    			goto out;
    
    
    		if (!skb_partial_csum_set(skb, off,
    
    					  offsetof(struct tcphdr, check)))
    			goto out;
    
    		if (recalculate_partial_csum)
    
    			tcp_hdr(skb)->check =
    				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
    						   ip_hdr(skb)->daddr,
    						   skb->len - off,
    						   IPPROTO_TCP, 0);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		break;
    	case IPPROTO_UDP:
    
    		err = maybe_pull_tail(skb,
    				      off + sizeof(struct udphdr),
    				      MAX_IP_HDR_LEN);
    		if (err < 0)
    			goto out;
    
    
    		if (!skb_partial_csum_set(skb, off,
    
    					  offsetof(struct udphdr, check)))
    			goto out;
    
    		if (recalculate_partial_csum)
    
    			udp_hdr(skb)->check =
    				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
    						   ip_hdr(skb)->daddr,
    						   skb->len - off,
    						   IPPROTO_UDP, 0);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		break;
    	default:
    		goto out;
    	}
    
    	err = 0;
    
    out:
    	return err;
    }
    
    
    /* This value should be large enough to cover a tagged ethernet header plus
     * an IPv6 header, all options, and a maximal TCP or UDP header.
     */
    #define MAX_IPV6_HDR_LEN 256
    
    #define OPT_HDR(type, skb, off) \
    	(type *)(skb_network_header(skb) + (off))
    
    
    static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
    			       int recalculate_partial_csum)
    {
    
    	done = false;
    
    	off = sizeof(struct ipv6hdr);
    
    
    	err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
    	if (err < 0)
    		goto out;
    
    	nexthdr = ipv6_hdr(skb)->nexthdr;
    
    	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
    	while (off <= len && !done) {
    
    		switch (nexthdr) {
    		case IPPROTO_DSTOPTS:
    		case IPPROTO_HOPOPTS:
    		case IPPROTO_ROUTING: {
    
    			struct ipv6_opt_hdr *hp;
    
    			err = maybe_pull_tail(skb,
    					      off +
    					      sizeof(struct ipv6_opt_hdr),
    					      MAX_IPV6_HDR_LEN);
    			if (err < 0)
    				goto out;
    
    			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
    
    			nexthdr = hp->nexthdr;
    			off += ipv6_optlen(hp);
    			break;
    		}
    		case IPPROTO_AH: {
    
    			struct ip_auth_hdr *hp;
    
    			err = maybe_pull_tail(skb,
    					      off +
    					      sizeof(struct ip_auth_hdr),
    					      MAX_IPV6_HDR_LEN);
    			if (err < 0)
    				goto out;
    
    			hp = OPT_HDR(struct ip_auth_hdr, skb, off);
    			nexthdr = hp->nexthdr;
    			off += ipv6_authlen(hp);
    			break;
    		}
    		case IPPROTO_FRAGMENT: {
    			struct frag_hdr *hp;
    
    			err = maybe_pull_tail(skb,
    					      off +
    					      sizeof(struct frag_hdr),
    					      MAX_IPV6_HDR_LEN);
    			if (err < 0)
    				goto out;
    
    			hp = OPT_HDR(struct frag_hdr, skb, off);
    
    			if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
    				fragment = true;
    
    			off += sizeof(struct frag_hdr);
    
    	if (!done || fragment)
    
    		goto out;
    
    	switch (nexthdr) {
    	case IPPROTO_TCP:
    
    		err = maybe_pull_tail(skb,
    				      off + sizeof(struct tcphdr),
    				      MAX_IPV6_HDR_LEN);
    		if (err < 0)
    			goto out;
    
    
    		if (!skb_partial_csum_set(skb, off,
    					  offsetof(struct tcphdr, check)))
    			goto out;
    
    
    		if (recalculate_partial_csum)
    
    			tcp_hdr(skb)->check =
    				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
    						 &ipv6_hdr(skb)->daddr,
    						 skb->len - off,
    						 IPPROTO_TCP, 0);
    
    		err = maybe_pull_tail(skb,
    				      off + sizeof(struct udphdr),
    				      MAX_IPV6_HDR_LEN);
    		if (err < 0)
    			goto out;
    
    
    		if (!skb_partial_csum_set(skb, off,
    					  offsetof(struct udphdr, check)))
    			goto out;
    
    
    		if (recalculate_partial_csum)
    
    			udp_hdr(skb)->check =
    				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
    						 &ipv6_hdr(skb)->daddr,
    						 skb->len - off,
    						 IPPROTO_UDP, 0);
    
    		break;
    	default:
    		goto out;
    	}
    
    	err = 0;
    
    out:
    	return err;
    }
    
    static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
    {
    	int err = -EPROTO;
    	int recalculate_partial_csum = 0;
    
    	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
    	 * peers can fail to set NETRXF_csum_blank when sending a GSO
    	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
    	 * recalculate the partial checksum.
    	 */
    	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
    		vif->rx_gso_checksum_fixup++;
    		skb->ip_summed = CHECKSUM_PARTIAL;
    		recalculate_partial_csum = 1;
    	}
    
    	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
    	if (skb->ip_summed != CHECKSUM_PARTIAL)
    		return 0;
    
    	if (skb->protocol == htons(ETH_P_IP))
    		err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
    	else if (skb->protocol == htons(ETH_P_IPV6))
    		err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
    
    	return err;
    }
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
    {
    
    	u64 now = get_jiffies_64();
    	u64 next_credit = vif->credit_window_start +
    
    Ian Campbell's avatar
    Ian Campbell committed
    		msecs_to_jiffies(vif->credit_usec / 1000);
    
    	/* Timer could already be pending in rare cases. */
    	if (timer_pending(&vif->credit_timeout))
    		return true;
    
    	/* Passed the point where we can replenish credit? */
    
    	if (time_after_eq64(now, next_credit)) {
    		vif->credit_window_start = now;
    
    Ian Campbell's avatar
    Ian Campbell committed
    		tx_add_credit(vif);
    	}
    
    	/* Still too big to send right now? Set a callback. */
    	if (size > vif->remaining_credit) {
    		vif->credit_timeout.data     =
    			(unsigned long)vif;
    		vif->credit_timeout.function =
    			tx_credit_callback;
    		mod_timer(&vif->credit_timeout,
    			  next_credit);
    
    		vif->credit_window_start = next_credit;
    
    static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
    
    	struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
    
    Ian Campbell's avatar
    Ian Campbell committed
    	struct sk_buff *skb;
    	int ret;
    
    
    	while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
    
    		< MAX_PENDING_REQS) &&
    	       (skb_queue_len(&vif->tx_queue) < budget)) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    		struct xen_netif_tx_request txreq;
    
    		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
    
    Ian Campbell's avatar
    Ian Campbell committed
    		struct page *page;
    		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
    		u16 pending_idx;
    		RING_IDX idx;
    		int work_to_do;
    		unsigned int data_len;
    		pending_ring_idx_t index;
    
    
    		if (vif->tx.sring->req_prod - vif->tx.req_cons >
    		    XEN_NETIF_TX_RING_SIZE) {
    			netdev_err(vif->dev,
    				   "Impossible number of requests. "
    				   "req_prod %d, req_cons %d, size %ld\n",
    				   vif->tx.sring->req_prod, vif->tx.req_cons,
    				   XEN_NETIF_TX_RING_SIZE);
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_fatal_tx_err(vif);
    
    		work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
    
    		if (!work_to_do)
    			break;
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    		idx = vif->tx.req_cons;
    		rmb(); /* Ensure that we see the request before we copy it. */
    		memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
    
    		/* Credit-based scheduling. */
    		if (txreq.size > vif->remaining_credit &&
    
    		    tx_credit_exceeded(vif, txreq.size))
    			break;
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    		vif->remaining_credit -= txreq.size;
    
    		work_to_do--;
    		vif->tx.req_cons = ++idx;
    
    		memset(extras, 0, sizeof(extras));
    		if (txreq.flags & XEN_NETTXF_extra_info) {
    
    Wei Liu's avatar
    Wei Liu committed
    			work_to_do = xenvif_get_extras(vif, extras,
    						       work_to_do);
    
    Ian Campbell's avatar
    Ian Campbell committed
    			idx = vif->tx.req_cons;
    
    			if (unlikely(work_to_do < 0))
    
    Wei Liu's avatar
    Wei Liu committed
    		ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		idx += ret;
    
    		if (unlikely(txreq.size < ETH_HLEN)) {
    			netdev_dbg(vif->dev,
    				   "Bad packet size: %d\n", txreq.size);
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_tx_err(vif, &txreq, idx);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		}
    
    		/* No crossing a page as the payload mustn't fragment. */
    		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    				   "txreq.offset: %x, size: %u, end: %lu\n",
    				   txreq.offset, txreq.size,
    				   (txreq.offset&~PAGE_MASK) + txreq.size);
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_fatal_tx_err(vif);
    
    		index = pending_index(vif->pending_cons);
    		pending_idx = vif->pending_ring[index];
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    		data_len = (txreq.size > PKT_PROT_LEN &&
    
    			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
    
    Ian Campbell's avatar
    Ian Campbell committed
    			PKT_PROT_LEN : txreq.size;
    
    		skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
    				GFP_ATOMIC | __GFP_NOWARN);
    		if (unlikely(skb == NULL)) {
    			netdev_dbg(vif->dev,
    				   "Can't allocate a skb in start_xmit.\n");
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_tx_err(vif, &txreq, idx);
    
    Ian Campbell's avatar
    Ian Campbell committed
    			break;
    		}
    
    		/* Packets passed to netif_rx() must have some headroom. */
    		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
    
    		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
    			struct xen_netif_extra_info *gso;
    			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
    
    
    Wei Liu's avatar
    Wei Liu committed
    			if (xenvif_set_skb_gso(vif, skb, gso)) {
    				/* Failure in xenvif_set_skb_gso is fatal. */
    
    Ian Campbell's avatar
    Ian Campbell committed
    				kfree_skb(skb);
    
    Ian Campbell's avatar
    Ian Campbell committed
    			}
    		}
    
    		/* XXX could copy straight to head */
    
    Wei Liu's avatar
    Wei Liu committed
    		page = xenvif_alloc_page(vif, pending_idx);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		if (!page) {
    			kfree_skb(skb);
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_tx_err(vif, &txreq, idx);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		}
    
    		gop->source.u.ref = txreq.gref;
    		gop->source.domid = vif->domid;
    		gop->source.offset = txreq.offset;
    
    		gop->dest.u.gmfn = virt_to_mfn(page_address(page));
    		gop->dest.domid = DOMID_SELF;
    		gop->dest.offset = txreq.offset;
    
    		gop->len = txreq.size;
    		gop->flags = GNTCOPY_source_gref;
    
    		gop++;
    
    
    		memcpy(&vif->pending_tx_info[pending_idx].req,
    
    Ian Campbell's avatar
    Ian Campbell committed
    		       &txreq, sizeof(txreq));
    
    		vif->pending_tx_info[pending_idx].head = index;
    
    Ian Campbell's avatar
    Ian Campbell committed
    		*((u16 *)skb->data) = pending_idx;
    
    		__skb_put(skb, data_len);
    
    		skb_shinfo(skb)->nr_frags = ret;
    		if (data_len < txreq.size) {
    			skb_shinfo(skb)->nr_frags++;
    
    			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
    					     pending_idx);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		} else {
    
    			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
    					     INVALID_PENDING_IDX);
    
    		vif->pending_cons++;
    
    Wei Liu's avatar
    Wei Liu committed
    		request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		if (request_gop == NULL) {
    			kfree_skb(skb);
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_tx_err(vif, &txreq, idx);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		}
    		gop = request_gop;
    
    
    		__skb_queue_tail(&vif->tx_queue, skb);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		vif->tx.req_cons = idx;
    
    
    		if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
    
    	return gop - vif->tx_copy_ops;
    
    static int xenvif_tx_submit(struct xenvif *vif)
    
    	struct gnttab_copy *gop = vif->tx_copy_ops;
    
    Ian Campbell's avatar
    Ian Campbell committed
    	struct sk_buff *skb;
    
    	int work_done = 0;
    
    	while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    		struct xen_netif_tx_request *txp;
    		u16 pending_idx;
    		unsigned data_len;
    
    		pending_idx = *((u16 *)skb->data);
    
    		txp = &vif->pending_tx_info[pending_idx].req;
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    		/* Check the remap error code. */
    
    Wei Liu's avatar
    Wei Liu committed
    		if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    			netdev_dbg(vif->dev, "netback grant failed.\n");
    			skb_shinfo(skb)->nr_frags = 0;
    			kfree_skb(skb);
    			continue;
    		}
    
    		data_len = skb->len;
    		memcpy(skb->data,
    
    		       (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
    
    Ian Campbell's avatar
    Ian Campbell committed
    		       data_len);
    		if (data_len < txp->size) {
    			/* Append the packet payload as a fragment. */
    			txp->offset += data_len;
    			txp->size -= data_len;
    		} else {
    			/* Schedule a response immediately. */
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_idx_release(vif, pending_idx,
    					   XEN_NETIF_RSP_OKAY);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		}
    
    		if (txp->flags & XEN_NETTXF_csum_blank)
    			skb->ip_summed = CHECKSUM_PARTIAL;
    		else if (txp->flags & XEN_NETTXF_data_validated)
    			skb->ip_summed = CHECKSUM_UNNECESSARY;
    
    
    Wei Liu's avatar
    Wei Liu committed
    		xenvif_fill_frags(vif, skb);
    
    		if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
    
    Ian Campbell's avatar
    Ian Campbell committed
    			int target = min_t(int, skb->len, PKT_PROT_LEN);
    			__pskb_pull_tail(skb, target - skb_headlen(skb));
    		}
    
    		skb->dev      = vif->dev;
    		skb->protocol = eth_type_trans(skb, skb->dev);
    
    		skb_reset_network_header(skb);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    		if (checksum_setup(vif, skb)) {
    			netdev_dbg(vif->dev,
    				   "Can't setup checksum in net_tx_action\n");
    			kfree_skb(skb);
    			continue;
    		}
    
    
    		skb_probe_transport_header(skb, 0);
    
    Ian Campbell's avatar
    Ian Campbell committed
    		vif->dev->stats.rx_bytes += skb->len;
    		vif->dev->stats.rx_packets++;
    
    
    		work_done++;
    
    		netif_receive_skb(skb);
    
    Ian Campbell's avatar
    Ian Campbell committed
    }
    
    /* Called after netfront has transmitted */
    
    Wei Liu's avatar
    Wei Liu committed
    int xenvif_tx_action(struct xenvif *vif, int budget)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	unsigned nr_gops;
    
    	if (unlikely(!tx_work_todo(vif)))
    		return 0;
    
    
    	nr_gops = xenvif_tx_build_gops(vif, budget);
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	if (nr_gops == 0)
    
    		return 0;
    
    	gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
    
    	work_done = xenvif_tx_submit(vif);
    
    	return work_done;
    
    Wei Liu's avatar
    Wei Liu committed
    static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
    			       u8 status)
    
    Ian Campbell's avatar
    Ian Campbell committed
    {
    	struct pending_tx_info *pending_tx_info;
    
    	pending_ring_idx_t head;
    	u16 peek; /* peek into next tx request */
    
    
    	BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
    
    Ian Campbell's avatar
    Ian Campbell committed
    
    	/* Already complete? */
    
    	if (vif->mmap_pages[pending_idx] == NULL)
    
    	pending_tx_info = &vif->pending_tx_info[pending_idx];
    
    	head = pending_tx_info->head;
    
    	BUG_ON(!pending_tx_is_head(vif, head));
    	BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
    
    	do {
    		pending_ring_idx_t index;
    		pending_ring_idx_t idx = pending_index(head);
    
    		u16 info_idx = vif->pending_ring[idx];
    
    		pending_tx_info = &vif->pending_tx_info[info_idx];
    
    		make_tx_response(vif, &pending_tx_info->req, status);
    
    		/* Setting any number other than
    		 * INVALID_PENDING_RING_IDX indicates this slot is
    		 * starting a new packet / ending a previous packet.
    		 */
    		pending_tx_info->head = 0;
    
    
    		index = pending_index(vif->pending_prod++);
    		vif->pending_ring[index] = vif->pending_ring[info_idx];
    
    		peek = vif->pending_ring[pending_index(++head)];
    
    	} while (!pending_tx_is_head(vif, peek));
    
    	put_page(vif->mmap_pages[pending_idx]);
    	vif->mmap_pages[pending_idx] = NULL;
    
    Ian Campbell's avatar
    Ian Campbell committed
    static void make_tx_response(struct xenvif *vif,
    			     struct xen_netif_tx_request *txp,
    			     s8       st)
    {
    	RING_IDX i = vif->tx.rsp_prod_pvt;
    	struct xen_netif_tx_response *resp;
    	int notify;
    
    	resp = RING_GET_RESPONSE(&vif->tx, i);
    	resp->id     = txp->id;
    	resp->status = st;
    
    	if (txp->flags & XEN_NETTXF_extra_info)
    		RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
    
    	vif->tx.rsp_prod_pvt = ++i;
    	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
    	if (notify)
    
    		notify_remote_via_irq(vif->tx_irq);
    
    Ian Campbell's avatar
    Ian Campbell committed
    }
    
    static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
    					     u16      id,
    					     s8       st,
    					     u16      offset,
    					     u16      size,
    					     u16      flags)
    {
    	RING_IDX i = vif->rx.rsp_prod_pvt;
    	struct xen_netif_rx_response *resp;
    
    	resp = RING_GET_RESPONSE(&vif->rx, i);
    	resp->offset     = offset;
    	resp->flags      = flags;
    	resp->id         = id;
    	resp->status     = (s16)size;
    	if (st < 0)
    		resp->status = (s16)st;
    
    	vif->rx.rsp_prod_pvt = ++i;
    
    	return resp;
    }
    
    
    static inline int rx_work_todo(struct xenvif *vif)
    
    	return !skb_queue_empty(&vif->rx_queue);
    
    static inline int tx_work_todo(struct xenvif *vif)
    
    	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
    	    (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
    	     < MAX_PENDING_REQS))
    
    Wei Liu's avatar
    Wei Liu committed
    void xenvif_unmap_frontend_rings(struct xenvif *vif)
    
    	if (vif->tx.sring)
    		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
    					vif->tx.sring);
    	if (vif->rx.sring)
    		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
    					vif->rx.sring);
    
    Wei Liu's avatar
    Wei Liu committed
    int xenvif_map_frontend_rings(struct xenvif *vif,
    			      grant_ref_t tx_ring_ref,
    			      grant_ref_t rx_ring_ref)
    
    Ian Campbell's avatar
    Ian Campbell committed
    	struct xen_netif_tx_sring *txs;
    	struct xen_netif_rx_sring *rxs;
    
    	int err = -ENOMEM;
    
    
    	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
    				     tx_ring_ref, &addr);
    	if (err)
    
    Ian Campbell's avatar
    Ian Campbell committed
    		goto err;
    
    
    	txs = (struct xen_netif_tx_sring *)addr;
    
    Ian Campbell's avatar
    Ian Campbell committed
    	BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
    
    
    	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
    				     rx_ring_ref, &addr);
    	if (err)
    
    Ian Campbell's avatar
    Ian Campbell committed
    		goto err;
    
    
    	rxs = (struct xen_netif_rx_sring *)addr;
    
    Ian Campbell's avatar
    Ian Campbell committed
    	BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    	return 0;
    
    err:
    
    Wei Liu's avatar
    Wei Liu committed
    	xenvif_unmap_frontend_rings(vif);
    
    Wei Liu's avatar
    Wei Liu committed
    int xenvif_kthread(void *data)
    
    {
    	struct xenvif *vif = data;
    
    	while (!kthread_should_stop()) {
    		wait_event_interruptible(vif->wq,
    					 rx_work_todo(vif) ||
    					 kthread_should_stop());
    		if (kthread_should_stop())
    			break;
    
    		if (rx_work_todo(vif))
    
    Wei Liu's avatar
    Wei Liu committed
    			xenvif_rx_action(vif);
    
    Ian Campbell's avatar
    Ian Campbell committed
    static int __init netback_init(void)
    {
    	int rc = 0;
    
    
    	if (!xen_domain())
    
    Ian Campbell's avatar
    Ian Campbell committed
    		return -ENODEV;
    
    
    	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
    
    		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
    			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
    
    		fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
    
    Ian Campbell's avatar
    Ian Campbell committed
    	rc = xenvif_xenbus_init();
    	if (rc)
    		goto failed_init;
    
    	return 0;
    
    failed_init:
    	return rc;
    }
    
    module_init(netback_init);
    
    
    static void __exit netback_fini(void)
    {
    	xenvif_xenbus_fini();
    }
    module_exit(netback_fini);
    
    
    Ian Campbell's avatar
    Ian Campbell committed
    MODULE_LICENSE("Dual BSD/GPL");
    
    MODULE_ALIAS("xen-backend:vif");