Skip to content
Snippets Groups Projects
ixgbe_main.c 208 KiB
Newer Older
  • Learn to ignore specific revisions
  • /*******************************************************************************
    
      Intel 10 Gigabit PCI Express Linux driver
    
      Copyright(c) 1999 - 2010 Intel Corporation.
    
    
      This program is free software; you can redistribute it and/or modify it
      under the terms and conditions of the GNU General Public License,
      version 2, as published by the Free Software Foundation.
    
      This program is distributed in the hope it will be useful, but WITHOUT
      ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
      FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
      more details.
    
      You should have received a copy of the GNU General Public License along with
      this program; if not, write to the Free Software Foundation, Inc.,
      51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
    
      The full GNU General Public License is included in this distribution in
      the file called "COPYING".
    
      Contact Information:
      e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
      Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
    
    *******************************************************************************/
    
    #include <linux/types.h>
    #include <linux/module.h>
    #include <linux/pci.h>
    #include <linux/netdevice.h>
    #include <linux/vmalloc.h>
    #include <linux/string.h>
    #include <linux/in.h>
    #include <linux/ip.h>
    #include <linux/tcp.h>
    
    #include <linux/pkt_sched.h>
    
    #include <net/checksum.h>
    #include <net/ip6_checksum.h>
    #include <linux/ethtool.h>
    #include <linux/if_vlan.h>
    
    #include <scsi/fc/fc_fcoe.h>
    
    
    #include "ixgbe.h"
    #include "ixgbe_common.h"
    
    #include "ixgbe_dcb_82599.h"
    
    #include "ixgbe_sriov.h"
    
    
    char ixgbe_driver_name[] = "ixgbe";
    
    static const char ixgbe_driver_string[] =
    
    			      "Intel(R) 10 Gigabit PCI Express Network Driver";
    
    #define DRV_VERSION "2.0.84-k2"
    
    const char ixgbe_driver_version[] = DRV_VERSION;
    
    static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
    
    
    static const struct ixgbe_info *ixgbe_info_tbl[] = {
    
    	[board_82598] = &ixgbe_82598_info,
    
    	[board_82599] = &ixgbe_82599_info,
    
    };
    
    /* ixgbe_pci_tbl - PCI Device ID Table
     *
     * Wildcard entries (PCI_ANY_ID) should come last
     * Last entry must be all 0s
     *
     * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
     *   Class, Class Mask, private data (not used) }
     */
    
    static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
    	 board_82598 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
    
    	 board_82598 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
    
    	 board_82598 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
    	 board_82598 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
    	 board_82598 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
    
    	 board_82598 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
    	 board_82598 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
    	 board_82598 },
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
    	 board_82598 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
    	 board_82598 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
    	 board_82598 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
    	 board_82598 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
    	 board_82599 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
    	 board_82599 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
    	 board_82599 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
    	 board_82599 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
    	 board_82599 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
    	 board_82599 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
    	 board_82599 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
    	 board_82599 },
    
    	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
    	 board_82599 },
    
    
    	/* required last entry */
    	{0, }
    };
    MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
    
    
    static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
    
    static struct notifier_block dca_notifier = {
    	.notifier_call = ixgbe_notify_dca,
    	.next          = NULL,
    	.priority      = 0
    };
    #endif
    
    
    #ifdef CONFIG_PCI_IOV
    static unsigned int max_vfs;
    module_param(max_vfs, uint, 0);
    
    MODULE_PARM_DESC(max_vfs,
    		 "Maximum number of virtual functions to allocate per physical function");
    
    #endif /* CONFIG_PCI_IOV */
    
    
    MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
    MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
    MODULE_LICENSE("GPL");
    MODULE_VERSION(DRV_VERSION);
    
    #define DEFAULT_DEBUG_LEVEL_SHIFT 3
    
    
    static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
    {
    	struct ixgbe_hw *hw = &adapter->hw;
    	u32 gcr;
    	u32 gpie;
    	u32 vmdctl;
    
    #ifdef CONFIG_PCI_IOV
    	/* disable iov and allow time for transactions to clear */
    	pci_disable_sriov(adapter->pdev);
    #endif
    
    	/* turn off device IOV mode */
    	gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
    	gcr &= ~(IXGBE_GCR_EXT_SRIOV);
    	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
    	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
    	gpie &= ~IXGBE_GPIE_VTMODE_MASK;
    	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    
    	/* set default pool back to 0 */
    	vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
    	vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
    	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
    
    	/* take a breather then clean up driver data */
    	msleep(100);
    
    	adapter->vfinfo = NULL;
    
    	adapter->num_vfs = 0;
    	adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
    }
    
    
    struct ixgbe_reg_info {
    	u32 ofs;
    	char *name;
    };
    
    static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
    
    	/* General Registers */
    	{IXGBE_CTRL, "CTRL"},
    	{IXGBE_STATUS, "STATUS"},
    	{IXGBE_CTRL_EXT, "CTRL_EXT"},
    
    	/* Interrupt Registers */
    	{IXGBE_EICR, "EICR"},
    
    	/* RX Registers */
    	{IXGBE_SRRCTL(0), "SRRCTL"},
    	{IXGBE_DCA_RXCTRL(0), "DRXCTL"},
    	{IXGBE_RDLEN(0), "RDLEN"},
    	{IXGBE_RDH(0), "RDH"},
    	{IXGBE_RDT(0), "RDT"},
    	{IXGBE_RXDCTL(0), "RXDCTL"},
    	{IXGBE_RDBAL(0), "RDBAL"},
    	{IXGBE_RDBAH(0), "RDBAH"},
    
    	/* TX Registers */
    	{IXGBE_TDBAL(0), "TDBAL"},
    	{IXGBE_TDBAH(0), "TDBAH"},
    	{IXGBE_TDLEN(0), "TDLEN"},
    	{IXGBE_TDH(0), "TDH"},
    	{IXGBE_TDT(0), "TDT"},
    	{IXGBE_TXDCTL(0), "TXDCTL"},
    
    	/* List Terminator */
    	{}
    };
    
    
    /*
     * ixgbe_regdump - register printout routine
     */
    static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
    {
    	int i = 0, j = 0;
    	char rname[16];
    	u32 regs[64];
    
    	switch (reginfo->ofs) {
    	case IXGBE_SRRCTL(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
    		break;
    	case IXGBE_DCA_RXCTRL(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
    		break;
    	case IXGBE_RDLEN(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
    		break;
    	case IXGBE_RDH(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
    		break;
    	case IXGBE_RDT(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
    		break;
    	case IXGBE_RXDCTL(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
    		break;
    	case IXGBE_RDBAL(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
    		break;
    	case IXGBE_RDBAH(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
    		break;
    	case IXGBE_TDBAL(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
    		break;
    	case IXGBE_TDBAH(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
    		break;
    	case IXGBE_TDLEN(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
    		break;
    	case IXGBE_TDH(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
    		break;
    	case IXGBE_TDT(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
    		break;
    	case IXGBE_TXDCTL(0):
    		for (i = 0; i < 64; i++)
    			regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
    		break;
    	default:
    
    		pr_info("%-15s %08x\n", reginfo->name,
    
    			IXGBE_READ_REG(hw, reginfo->ofs));
    		return;
    	}
    
    	for (i = 0; i < 8; i++) {
    		snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
    
    		pr_err("%-15s", rname);
    
    			pr_cont(" %08x", regs[i*8+j]);
    		pr_cont("\n");
    
    	}
    
    }
    
    /*
     * ixgbe_dump - Print registers, tx-rings and rx-rings
     */
    static void ixgbe_dump(struct ixgbe_adapter *adapter)
    {
    	struct net_device *netdev = adapter->netdev;
    	struct ixgbe_hw *hw = &adapter->hw;
    	struct ixgbe_reg_info *reginfo;
    	int n = 0;
    	struct ixgbe_ring *tx_ring;
    	struct ixgbe_tx_buffer *tx_buffer_info;
    	union ixgbe_adv_tx_desc *tx_desc;
    	struct my_u0 { u64 a; u64 b; } *u0;
    	struct ixgbe_ring *rx_ring;
    	union ixgbe_adv_rx_desc *rx_desc;
    	struct ixgbe_rx_buffer *rx_buffer_info;
    	u32 staterr;
    	int i = 0;
    
    	if (!netif_msg_hw(adapter))
    		return;
    
    	/* Print netdevice Info */
    	if (netdev) {
    		dev_info(&adapter->pdev->dev, "Net device Info\n");
    
    		pr_info("Device Name     state            "
    
    		pr_info("%-15s %016lX %016lX %016lX\n",
    			netdev->name,
    			netdev->state,
    			netdev->trans_start,
    			netdev->last_rx);
    
    	}
    
    	/* Print Registers */
    	dev_info(&adapter->pdev->dev, "Register Dump\n");
    
    	pr_info(" Register Name   Value\n");
    
    	for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
    	     reginfo->name; reginfo++) {
    		ixgbe_regdump(hw, reginfo);
    	}
    
    	/* Print TX Ring Summary */
    	if (!netdev || !netif_running(netdev))
    		goto exit;
    
    	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
    
    	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
    
    	for (n = 0; n < adapter->num_tx_queues; n++) {
    		tx_ring = adapter->tx_ring[n];
    		tx_buffer_info =
    			&tx_ring->tx_buffer_info[tx_ring->next_to_clean];
    
    		pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
    
    			   n, tx_ring->next_to_use, tx_ring->next_to_clean,
    			   (u64)tx_buffer_info->dma,
    			   tx_buffer_info->length,
    			   tx_buffer_info->next_to_watch,
    			   (u64)tx_buffer_info->time_stamp);
    	}
    
    	/* Print TX Rings */
    	if (!netif_msg_tx_done(adapter))
    		goto rx_ring_summary;
    
    	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
    
    	/* Transmit Descriptor Formats
    	 *
    	 * Advanced Transmit Descriptor
    	 *   +--------------------------------------------------------------+
    	 * 0 |         Buffer Address [63:0]                                |
    	 *   +--------------------------------------------------------------+
    	 * 8 |  PAYLEN  | PORTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN |
    	 *   +--------------------------------------------------------------+
    	 *   63       46 45    40 39 36 35 32 31   24 23 20 19              0
    	 */
    
    	for (n = 0; n < adapter->num_tx_queues; n++) {
    		tx_ring = adapter->tx_ring[n];
    
    		pr_info("------------------------------------\n");
    		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
    		pr_info("------------------------------------\n");
    		pr_info("T [desc]     [address 63:0  ] "
    
    			"[PlPOIdStDDt Ln] [bi->dma       ] "
    			"leng  ntw timestamp        bi->skb\n");
    
    		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
    
    			tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
    
    			tx_buffer_info = &tx_ring->tx_buffer_info[i];
    			u0 = (struct my_u0 *)tx_desc;
    
    			pr_info("T [0x%03X]    %016llX %016llX %016llX"
    
    				" %04X  %3X %016llX %p", i,
    				le64_to_cpu(u0->a),
    				le64_to_cpu(u0->b),
    				(u64)tx_buffer_info->dma,
    				tx_buffer_info->length,
    				tx_buffer_info->next_to_watch,
    				(u64)tx_buffer_info->time_stamp,
    				tx_buffer_info->skb);
    			if (i == tx_ring->next_to_use &&
    				i == tx_ring->next_to_clean)
    
    				pr_cont(" NTC/U\n");
    
    			else if (i == tx_ring->next_to_use)
    
    				pr_cont(" NTU\n");
    
    			else if (i == tx_ring->next_to_clean)
    
    				pr_cont(" NTC\n");
    
    
    			if (netif_msg_pktdata(adapter) &&
    				tx_buffer_info->dma != 0)
    				print_hex_dump(KERN_INFO, "",
    					DUMP_PREFIX_ADDRESS, 16, 1,
    					phys_to_virt(tx_buffer_info->dma),
    					tx_buffer_info->length, true);
    		}
    	}
    
    	/* Print RX Rings Summary */
    rx_ring_summary:
    	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
    
    	pr_info("Queue [NTU] [NTC]\n");
    
    	for (n = 0; n < adapter->num_rx_queues; n++) {
    		rx_ring = adapter->rx_ring[n];
    
    		pr_info("%5d %5X %5X\n",
    			n, rx_ring->next_to_use, rx_ring->next_to_clean);
    
    	}
    
    	/* Print RX Rings */
    	if (!netif_msg_rx_status(adapter))
    		goto exit;
    
    	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
    
    	/* Advanced Receive Descriptor (Read) Format
    	 *    63                                           1        0
    	 *    +-----------------------------------------------------+
    	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
    	 *    +----------------------------------------------+------+
    	 *  8 |       Header Buffer Address [63:1]           |  DD  |
    	 *    +-----------------------------------------------------+
    	 *
    	 *
    	 * Advanced Receive Descriptor (Write-Back) Format
    	 *
    	 *   63       48 47    32 31  30      21 20 16 15   4 3     0
    	 *   +------------------------------------------------------+
    	 * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
    	 *   | Checksum   Ident  |   |           |    | Type | Type |
    	 *   +------------------------------------------------------+
    	 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
    	 *   +------------------------------------------------------+
    	 *   63       48 47    32 31            20 19               0
    	 */
    	for (n = 0; n < adapter->num_rx_queues; n++) {
    		rx_ring = adapter->rx_ring[n];
    
    		pr_info("------------------------------------\n");
    		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
    		pr_info("------------------------------------\n");
    		pr_info("R  [desc]      [ PktBuf     A0] "
    
    			"[  HeadBuf   DD] [bi->dma       ] [bi->skb] "
    			"<-- Adv Rx Read format\n");
    
    		pr_info("RWB[desc]      [PcsmIpSHl PtRs] "
    
    			"[vl er S cks ln] ---------------- [bi->skb] "
    			"<-- Adv Rx Write-Back format\n");
    
    		for (i = 0; i < rx_ring->count; i++) {
    			rx_buffer_info = &rx_ring->rx_buffer_info[i];
    
    			rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
    
    			u0 = (struct my_u0 *)rx_desc;
    			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
    			if (staterr & IXGBE_RXD_STAT_DD) {
    				/* Descriptor Done */
    
    				pr_info("RWB[0x%03X]     %016llX "
    
    					"%016llX ---------------- %p", i,
    					le64_to_cpu(u0->a),
    					le64_to_cpu(u0->b),
    					rx_buffer_info->skb);
    			} else {
    
    				pr_info("R  [0x%03X]     %016llX "
    
    					"%016llX %016llX %p", i,
    					le64_to_cpu(u0->a),
    					le64_to_cpu(u0->b),
    					(u64)rx_buffer_info->dma,
    					rx_buffer_info->skb);
    
    				if (netif_msg_pktdata(adapter)) {
    					print_hex_dump(KERN_INFO, "",
    					   DUMP_PREFIX_ADDRESS, 16, 1,
    					   phys_to_virt(rx_buffer_info->dma),
    					   rx_ring->rx_buf_len, true);
    
    					if (rx_ring->rx_buf_len
    						< IXGBE_RXBUFFER_2048)
    						print_hex_dump(KERN_INFO, "",
    						  DUMP_PREFIX_ADDRESS, 16, 1,
    						  phys_to_virt(
    						    rx_buffer_info->page_dma +
    						    rx_buffer_info->page_offset
    						  ),
    						  PAGE_SIZE/2, true);
    				}
    			}
    
    			if (i == rx_ring->next_to_use)
    
    				pr_cont(" NTU\n");
    
    			else if (i == rx_ring->next_to_clean)
    
    				pr_cont(" NTC\n");
    
    static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
    {
    	u32 ctrl_ext;
    
    	/* Let firmware take over control of h/w */
    	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
    
    			ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
    
    }
    
    static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
    {
    	u32 ctrl_ext;
    
    	/* Let firmware know the driver has taken over */
    	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
    
    			ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
    
    /*
     * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
     * @adapter: pointer to adapter struct
     * @direction: 0 for Rx, 1 for Tx, -1 for other causes
     * @queue: queue to map the corresponding interrupt to
     * @msix_vector: the vector to map to the corresponding queue
     *
     */
    static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
    
    			   u8 queue, u8 msix_vector)
    
    	struct ixgbe_hw *hw = &adapter->hw;
    	switch (hw->mac.type) {
    	case ixgbe_mac_82598EB:
    		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
    		if (direction == -1)
    			direction = 0;
    		index = (((direction * 64) + queue) >> 2) & 0x1F;
    		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
    		ivar &= ~(0xFF << (8 * (queue & 0x3)));
    		ivar |= (msix_vector << (8 * (queue & 0x3)));
    		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
    		break;
    	case ixgbe_mac_82599EB:
    		if (direction == -1) {
    			/* other causes */
    			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
    			index = ((queue & 1) * 8);
    			ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
    			ivar &= ~(0xFF << index);
    			ivar |= (msix_vector << index);
    			IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
    			break;
    		} else {
    			/* tx or rx causes */
    			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
    			index = ((16 * (queue & 1)) + (8 * direction));
    			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
    			ivar &= ~(0xFF << index);
    			ivar |= (msix_vector << index);
    			IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
    			break;
    		}
    	default:
    		break;
    	}
    
    static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
    
    	switch (adapter->hw.mac.type) {
    	case ixgbe_mac_82598EB:
    
    		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
    		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
    
    		break;
    	case ixgbe_mac_82599EB:
    
    		mask = (qmask & 0xFFFFFFFF);
    		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
    		mask = (qmask >> 32);
    		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
    
    void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
    				      struct ixgbe_tx_buffer *tx_buffer_info)
    
    	if (tx_buffer_info->dma) {
    		if (tx_buffer_info->mapped_as_page)
    
    			dma_unmap_page(tx_ring->dev,
    
    				       tx_buffer_info->dma,
    				       tx_buffer_info->length,
    
    			dma_unmap_single(tx_ring->dev,
    
    					 tx_buffer_info->dma,
    					 tx_buffer_info->length,
    
    		tx_buffer_info->dma = 0;
    	}
    
    	if (tx_buffer_info->skb) {
    		dev_kfree_skb_any(tx_buffer_info->skb);
    		tx_buffer_info->skb = NULL;
    	}
    
    	tx_buffer_info->time_stamp = 0;
    
    	/* tx_buffer_info must be completely set up in the transmit path */
    }
    
    
     * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
     * @adapter: driver private struct
     * @index: reg idx of queue to query (0-127)
    
     * Helper function to determine the traffic index for a paticular
     * register index.
    
     * Returns : a tc index for use in range 0-7, or 0-3
    
    u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
    
    	int tc = -1;
    	int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
    
    	/* if DCB is not enabled the queues have no TC */
    	if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
    		return tc;
    
    	/* check valid range */
    	if (reg_idx >= adapter->hw.mac.max_tx_queues)
    		return tc;
    
    	switch (adapter->hw.mac.type) {
    	case ixgbe_mac_82598EB:
    		tc = reg_idx >> 2;
    		break;
    	default:
    		if (dcb_i != 4 && dcb_i != 8)
    
    
    		/* if VMDq is enabled the lowest order bits determine TC */
    		if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
    				      IXGBE_FLAG_VMDQ_ENABLED)) {
    			tc = reg_idx & (dcb_i - 1);
    			break;
    		}
    
    		/*
    		 * Convert the reg_idx into the correct TC. This bitmask
    		 * targets the last full 32 ring traffic class and assigns
    		 * it a value of 1. From there the rest of the rings are
    		 * based on shifting the mask further up to include the
    		 * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
    		 * will only ever be 8 or 4 and that reg_idx will never
    		 * be greater then 128. The code without the power of 2
    		 * optimizations would be:
    		 * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
    		 */
    		tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
    		tc >>= 9 - (reg_idx >> 5);
    	}
    
    	return tc;
    }
    
    static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
    {
    	struct ixgbe_hw *hw = &adapter->hw;
    	struct ixgbe_hw_stats *hwstats = &adapter->stats;
    	u32 data = 0;
    	u32 xoff[8] = {0};
    	int i;
    
    	if ((hw->fc.current_mode == ixgbe_fc_full) ||
    	    (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
    		switch (hw->mac.type) {
    		case ixgbe_mac_82598EB:
    			data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
    
    			data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
    		}
    		hwstats->lxoffrxc += data;
    
    		/* refill credits (no tx hang) if we received xoff */
    		if (!data)
    			return;
    
    		for (i = 0; i < adapter->num_tx_queues; i++)
    			clear_bit(__IXGBE_HANG_CHECK_ARMED,
    				  &adapter->tx_ring[i]->state);
    		return;
    	} else if (!(adapter->dcb_cfg.pfc_mode_enable))
    		return;
    
    	/* update stats for each tc, only valid with PFC enabled */
    	for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
    		switch (hw->mac.type) {
    		case ixgbe_mac_82598EB:
    			xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
    
    		default:
    			xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
    
    		hwstats->pxoffrxc[i] += xoff[i];
    	}
    
    	/* disarm tx queues that have received xoff frames */
    	for (i = 0; i < adapter->num_tx_queues; i++) {
    		struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
    		u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
    
    		if (xoff[tc])
    			clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
    
    static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
    
    	return ring->tx_stats.completed;
    }
    
    static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
    {
    	struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
    
    	struct ixgbe_hw *hw = &adapter->hw;
    
    
    	u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
    	u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
    
    	if (head != tail)
    		return (head < tail) ?
    			tail - head : (tail + ring->count - head);
    
    	return 0;
    }
    
    static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
    {
    	u32 tx_done = ixgbe_get_tx_completed(tx_ring);
    	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
    	u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
    	bool ret = false;
    
    
    	clear_check_for_tx_hang(tx_ring);
    
    
    	/*
    	 * Check for a hung queue, but be thorough. This verifies
    	 * that a transmit has been completed since the previous
    	 * check AND there is at least one packet pending. The
    	 * ARMED bit is set to indicate a potential hang. The
    	 * bit is cleared if a pause frame is received to remove
    	 * false hang detection due to PFC or 802.3x frames. By
    	 * requiring this to fail twice we avoid races with
    	 * pfc clearing the ARMED bit and conditions where we
    	 * run the check_tx_hang logic with a transmit completion
    	 * pending but without time to complete it yet.
    	 */
    	if ((tx_done_old == tx_done) && tx_pending) {
    		/* make sure it is true for two checks in a row */
    		ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
    				       &tx_ring->state);
    	} else {
    		/* update completed stats and continue */
    		tx_ring->tx_stats.tx_done_old = tx_done;
    		/* reset the countdown */
    		clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
    
    #define IXGBE_MAX_TXD_PWR       14
    #define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
    
    
    /* Tx Descriptors needed, worst case */
    #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
    			 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
    #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
    
    	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
    
    static void ixgbe_tx_timeout(struct net_device *netdev);
    
    
    /**
     * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
    
     * @q_vector: structure containing interrupt and ring information
    
     * @tx_ring: tx ring to clean
    
    static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
    
    			       struct ixgbe_ring *tx_ring)
    
    	struct ixgbe_adapter *adapter = q_vector->adapter;
    
    	union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
    	struct ixgbe_tx_buffer *tx_buffer_info;
    
    	unsigned int total_bytes = 0, total_packets = 0;
    
    	eop = tx_ring->tx_buffer_info[i].next_to_watch;
    
    	eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
    
    
    	while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
    
    	       (count < tx_ring->work_limit)) {
    
    		bool cleaned = false;
    
    		rmb(); /* read buffer_info after eop_desc */
    
    		for ( ; !cleaned; count++) {
    
    			tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
    
    			tx_buffer_info = &tx_ring->tx_buffer_info[i];
    
    			cleaned = (i == eop);
    
    			if (cleaned && tx_buffer_info->skb) {
    				total_bytes += tx_buffer_info->bytecount;
    				total_packets += tx_buffer_info->gso_segs;
    
    			ixgbe_unmap_and_free_tx_resource(tx_ring,
    
    		eop = tx_ring->tx_buffer_info[i].next_to_watch;
    
    		eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
    
    	tx_ring->next_to_clean = i;
    
    	tx_ring->total_bytes += total_bytes;
    	tx_ring->total_packets += total_packets;
    	u64_stats_update_begin(&tx_ring->syncp);
    	tx_ring->stats.packets += total_packets;
    	tx_ring->stats.bytes += total_bytes;
    	u64_stats_update_end(&tx_ring->syncp);
    
    
    	if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
    		/* schedule immediate reset if we believe we hung */
    		struct ixgbe_hw *hw = &adapter->hw;
    		tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
    		e_err(drv, "Detected Tx Unit Hang\n"
    			"  Tx Queue             <%d>\n"
    			"  TDH, TDT             <%x>, <%x>\n"
    			"  next_to_use          <%x>\n"
    			"  next_to_clean        <%x>\n"
    			"tx_buffer_info[next_to_clean]\n"
    			"  time_stamp           <%lx>\n"
    			"  jiffies              <%lx>\n",
    			tx_ring->queue_index,
    			IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
    			IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
    			tx_ring->next_to_use, eop,
    			tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
    
    		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
    
    		e_info(probe,
    		       "tx hang %d detected on queue %d, resetting adapter\n",
    			adapter->tx_timeout_count + 1, tx_ring->queue_index);
    
    
    		/* schedule immediate reset if we believe we hung */
    		ixgbe_tx_timeout(adapter->netdev);
    
    		/* the adapter is about to reset, no point in enabling stuff */
    		return true;
    	}
    
    #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
    
    	if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
    
    		     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
    
    		/* Make sure that anybody stopping the queue after this
    		 * sees the new next_to_clean.
    		 */
    		smp_mb();
    
    		if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
    
    		    !test_bit(__IXGBE_DOWN, &adapter->state)) {
    
    			netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
    
    	return count < tx_ring->work_limit;
    
    static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
    
    	struct ixgbe_hw *hw = &adapter->hw;
    
    	u32 rxctrl;
    
    	u8 reg_idx = rx_ring->reg_idx;
    
    	rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
    	switch (hw->mac.type) {
    	case ixgbe_mac_82598EB:
    		rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
    		rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
    		break;
    	case ixgbe_mac_82599EB:
    		rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
    		rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
    			   IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
    		break;
    	default:
    		break;
    
    	rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
    	rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
    	rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
    	rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
    		    IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
    	IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
    
    }
    
    static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
    
    	struct ixgbe_hw *hw = &adapter->hw;
    
    	u32 txctrl;
    
    	u8 reg_idx = tx_ring->reg_idx;
    
    	switch (hw->mac.type) {
    	case ixgbe_mac_82598EB:
    		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
    		txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
    		txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
    		txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
    		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
    		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
    		break;
    	case ixgbe_mac_82599EB:
    		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
    		txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
    		txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
    			   IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
    		txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
    		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
    		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
    		break;
    	default:
    		break;
    	}
    }
    
    static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
    {
    	struct ixgbe_adapter *adapter = q_vector->adapter;
    
    	int cpu = get_cpu();
    
    	if (q_vector->cpu == cpu)
    		goto out_no_update;
    
    	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
    	for (i = 0; i < q_vector->txr_count; i++) {
    		ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
    		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
    				      r_idx + 1);
    
    
    	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
    	for (i = 0; i < q_vector->rxr_count; i++) {
    		ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
    		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
    				      r_idx + 1);
    	}
    
    	q_vector->cpu = cpu;
    out_no_update:
    
    	put_cpu();
    }
    
    static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)