Skip to content
Snippets Groups Projects
igb_main.c 121 KiB
Newer Older
  • Learn to ignore specific revisions
  • /*******************************************************************************
    
      Intel(R) Gigabit Ethernet Linux driver
    
      Copyright(c) 2007-2009 Intel Corporation.
    
    
      This program is free software; you can redistribute it and/or modify it
      under the terms and conditions of the GNU General Public License,
      version 2, as published by the Free Software Foundation.
    
      This program is distributed in the hope it will be useful, but WITHOUT
      ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
      FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
      more details.
    
      You should have received a copy of the GNU General Public License along with
      this program; if not, write to the Free Software Foundation, Inc.,
      51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
    
      The full GNU General Public License is included in this distribution in
      the file called "COPYING".
    
      Contact Information:
      e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
      Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
    
    *******************************************************************************/
    
    #include <linux/module.h>
    #include <linux/types.h>
    #include <linux/init.h>
    #include <linux/vmalloc.h>
    #include <linux/pagemap.h>
    #include <linux/netdevice.h>
    #include <linux/ipv6.h>
    #include <net/checksum.h>
    #include <net/ip6_checksum.h>
    #include <linux/mii.h>
    #include <linux/ethtool.h>
    #include <linux/if_vlan.h>
    #include <linux/pci.h>
    
    #include <linux/pci-aspm.h>
    
    #include <linux/delay.h>
    #include <linux/interrupt.h>
    #include <linux/if_ether.h>
    
    #include <linux/aer.h>
    
    #ifdef CONFIG_IGB_DCA
    
    Jeb Cramer's avatar
    Jeb Cramer committed
    #include <linux/dca.h>
    #endif
    
    #define DRV_VERSION "1.3.16-k2"
    
    char igb_driver_name[] = "igb";
    char igb_driver_version[] = DRV_VERSION;
    static const char igb_driver_string[] =
    				"Intel(R) Gigabit Ethernet Network Driver";
    
    static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
    
    
    static const struct e1000_info *igb_info_tbl[] = {
    	[board_82575] = &e1000_82575_info,
    };
    
    static struct pci_device_id igb_pci_tbl[] = {
    
    	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
    	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
    	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
    
    	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
    	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
    	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
    	/* required last entry */
    	{0, }
    };
    
    MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
    
    void igb_reset(struct igb_adapter *);
    static int igb_setup_all_tx_resources(struct igb_adapter *);
    static int igb_setup_all_rx_resources(struct igb_adapter *);
    static void igb_free_all_tx_resources(struct igb_adapter *);
    static void igb_free_all_rx_resources(struct igb_adapter *);
    void igb_update_stats(struct igb_adapter *);
    static int igb_probe(struct pci_dev *, const struct pci_device_id *);
    static void __devexit igb_remove(struct pci_dev *pdev);
    static int igb_sw_init(struct igb_adapter *);
    static int igb_open(struct net_device *);
    static int igb_close(struct net_device *);
    static void igb_configure_tx(struct igb_adapter *);
    static void igb_configure_rx(struct igb_adapter *);
    static void igb_setup_rctl(struct igb_adapter *);
    static void igb_clean_all_tx_rings(struct igb_adapter *);
    static void igb_clean_all_rx_rings(struct igb_adapter *);
    
    static void igb_clean_tx_ring(struct igb_ring *);
    static void igb_clean_rx_ring(struct igb_ring *);
    
    static void igb_set_multi(struct net_device *);
    static void igb_update_phy_info(unsigned long);
    static void igb_watchdog(unsigned long);
    static void igb_watchdog_task(struct work_struct *);
    static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
    				  struct igb_ring *);
    static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
    static struct net_device_stats *igb_get_stats(struct net_device *);
    static int igb_change_mtu(struct net_device *, int);
    static int igb_set_mac(struct net_device *, void *);
    static irqreturn_t igb_intr(int irq, void *);
    static irqreturn_t igb_intr_msi(int irq, void *);
    static irqreturn_t igb_msix_other(int irq, void *);
    static irqreturn_t igb_msix_rx(int irq, void *);
    static irqreturn_t igb_msix_tx(int irq, void *);
    static int igb_clean_rx_ring_msix(struct napi_struct *, int);
    
    #ifdef CONFIG_IGB_DCA
    
    Jeb Cramer's avatar
    Jeb Cramer committed
    static void igb_update_rx_dca(struct igb_ring *);
    static void igb_update_tx_dca(struct igb_ring *);
    static void igb_setup_dca(struct igb_adapter *);
    
    #endif /* CONFIG_IGB_DCA */
    
    static bool igb_clean_tx_irq(struct igb_ring *);
    
    static int igb_poll(struct napi_struct *, int);
    
    static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
    static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
    
    static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
    static void igb_tx_timeout(struct net_device *);
    static void igb_reset_task(struct work_struct *);
    static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
    static void igb_vlan_rx_add_vid(struct net_device *, u16);
    static void igb_vlan_rx_kill_vid(struct net_device *, u16);
    static void igb_restore_vlan(struct igb_adapter *);
    
    static int igb_suspend(struct pci_dev *, pm_message_t);
    #ifdef CONFIG_PM
    static int igb_resume(struct pci_dev *);
    #endif
    static void igb_shutdown(struct pci_dev *);
    
    #ifdef CONFIG_IGB_DCA
    
    Jeb Cramer's avatar
    Jeb Cramer committed
    static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
    static struct notifier_block dca_notifier = {
    	.notifier_call	= igb_notify_dca,
    	.next		= NULL,
    	.priority	= 0
    };
    #endif
    
    
    #ifdef CONFIG_NET_POLL_CONTROLLER
    /* for netdump / net console */
    static void igb_netpoll(struct net_device *);
    #endif
    
    static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
    		     pci_channel_state_t);
    static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
    static void igb_io_resume(struct pci_dev *);
    
    static struct pci_error_handlers igb_err_handler = {
    	.error_detected = igb_io_error_detected,
    	.slot_reset = igb_io_slot_reset,
    	.resume = igb_io_resume,
    };
    
    
    static struct pci_driver igb_driver = {
    	.name     = igb_driver_name,
    	.id_table = igb_pci_tbl,
    	.probe    = igb_probe,
    	.remove   = __devexit_p(igb_remove),
    #ifdef CONFIG_PM
    	/* Power Managment Hooks */
    	.suspend  = igb_suspend,
    	.resume   = igb_resume,
    #endif
    	.shutdown = igb_shutdown,
    	.err_handler = &igb_err_handler
    };
    
    
    static int global_quad_port_a; /* global quad port a indication */
    
    
    MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
    MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
    MODULE_LICENSE("GPL");
    MODULE_VERSION(DRV_VERSION);
    
    #ifdef DEBUG
    /**
     * igb_get_hw_dev_name - return device name string
     * used by hardware layer to print debugging information
     **/
    char *igb_get_hw_dev_name(struct e1000_hw *hw)
    {
    	struct igb_adapter *adapter = hw->back;
    	return adapter->netdev->name;
    }
    #endif
    
    /**
     * igb_init_module - Driver Registration Routine
     *
     * igb_init_module is the first routine called when the driver is
     * loaded. All it does is register with the PCI subsystem.
     **/
    static int __init igb_init_module(void)
    {
    	int ret;
    	printk(KERN_INFO "%s - version %s\n",
    	       igb_driver_string, igb_driver_version);
    
    	printk(KERN_INFO "%s\n", igb_copyright);
    
    
    #ifdef CONFIG_IGB_DCA
    
    Jeb Cramer's avatar
    Jeb Cramer committed
    	dca_register_notify(&dca_notifier);
    #endif
    
    
    	ret = pci_register_driver(&igb_driver);
    
    	return ret;
    }
    
    module_init(igb_init_module);
    
    /**
     * igb_exit_module - Driver Exit Cleanup Routine
     *
     * igb_exit_module is called just before the driver is removed
     * from memory.
     **/
    static void __exit igb_exit_module(void)
    {
    
    #ifdef CONFIG_IGB_DCA
    
    Jeb Cramer's avatar
    Jeb Cramer committed
    	dca_unregister_notify(&dca_notifier);
    #endif
    
    	pci_unregister_driver(&igb_driver);
    }
    
    module_exit(igb_exit_module);
    
    
    #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
    /**
     * igb_cache_ring_register - Descriptor ring to register mapping
     * @adapter: board private structure to initialize
     *
     * Once we know the feature-set enabled for the device, we'll cache
     * the register offset the descriptor ring is assigned to.
     **/
    static void igb_cache_ring_register(struct igb_adapter *adapter)
    {
    	int i;
    
    	switch (adapter->hw.mac.type) {
    	case e1000_82576:
    		/* The queues are allocated for virtualization such that VF 0
    		 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
    		 * In order to avoid collision we start at the first free queue
    		 * and continue consuming queues in the same sequence
    		 */
    		for (i = 0; i < adapter->num_rx_queues; i++)
    			adapter->rx_ring[i].reg_idx = Q_IDX_82576(i);
    		for (i = 0; i < adapter->num_tx_queues; i++)
    			adapter->tx_ring[i].reg_idx = Q_IDX_82576(i);
    		break;
    	case e1000_82575:
    	default:
    		for (i = 0; i < adapter->num_rx_queues; i++)
    			adapter->rx_ring[i].reg_idx = i;
    		for (i = 0; i < adapter->num_tx_queues; i++)
    			adapter->tx_ring[i].reg_idx = i;
    		break;
    	}
    }
    
    
    /**
     * igb_alloc_queues - Allocate memory for all rings
     * @adapter: board private structure to initialize
     *
     * We allocate one ring per queue at run-time since we don't know the
     * number of queues at compile-time.
     **/
    static int igb_alloc_queues(struct igb_adapter *adapter)
    {
    	int i;
    
    	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
    				   sizeof(struct igb_ring), GFP_KERNEL);
    	if (!adapter->tx_ring)
    		return -ENOMEM;
    
    	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
    				   sizeof(struct igb_ring), GFP_KERNEL);
    	if (!adapter->rx_ring) {
    		kfree(adapter->tx_ring);
    		return -ENOMEM;
    	}
    
    
    	adapter->rx_ring->buddy = adapter->tx_ring;
    
    
    	for (i = 0; i < adapter->num_tx_queues; i++) {
    		struct igb_ring *ring = &(adapter->tx_ring[i]);
    
    		ring->count = adapter->tx_ring_count;
    
    		ring->adapter = adapter;
    		ring->queue_index = i;
    	}
    
    	for (i = 0; i < adapter->num_rx_queues; i++) {
    		struct igb_ring *ring = &(adapter->rx_ring[i]);
    
    		ring->count = adapter->rx_ring_count;
    
    		ring->adapter = adapter;
    
    		ring->queue_index = i;
    
    		ring->itr_register = E1000_ITR;
    
    
    		/* set a default napi handler for each rx_ring */
    
    		netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
    
    
    	igb_cache_ring_register(adapter);
    
    static void igb_free_queues(struct igb_adapter *adapter)
    {
    	int i;
    
    	for (i = 0; i < adapter->num_rx_queues; i++)
    		netif_napi_del(&adapter->rx_ring[i].napi);
    
    	kfree(adapter->tx_ring);
    	kfree(adapter->rx_ring);
    }
    
    
    #define IGB_N0_QUEUE -1
    static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
    			      int tx_queue, int msix_vector)
    {
    	u32 msixbm = 0;
    	struct e1000_hw *hw = &adapter->hw;
    
    	u32 ivar, index;
    
    	switch (hw->mac.type) {
    	case e1000_82575:
    
    		/* The 82575 assigns vectors using a bitmask, which matches the
    		   bitmask for the EICR/EIMS/EIMC registers.  To assign one
    		   or more queues to a vector, we write the appropriate bits
    		   into the MSIXBM register for that vector. */
    		if (rx_queue > IGB_N0_QUEUE) {
    			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
    			adapter->rx_ring[rx_queue].eims_value = msixbm;
    		}
    		if (tx_queue > IGB_N0_QUEUE) {
    			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
    			adapter->tx_ring[tx_queue].eims_value =
    				  E1000_EICR_TX_QUEUE0 << tx_queue;
    		}
    		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
    
    		break;
    	case e1000_82576:
    
    		/* 82576 uses a table-based method for assigning vectors.
    
    		   Each queue has a single entry in the table to which we write
    		   a vector number along with a "valid" bit.  Sadly, the layout
    		   of the table is somewhat counterintuitive. */
    		if (rx_queue > IGB_N0_QUEUE) {
    
    			ivar = array_rd32(E1000_IVAR0, index);
    
    				/* vector goes into third byte of register */
    				ivar = ivar & 0xFF00FFFF;
    				ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
    
    			} else {
    				/* vector goes into low byte of register */
    				ivar = ivar & 0xFFFFFF00;
    				ivar |= msix_vector | E1000_IVAR_VALID;
    
    			}
    			adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
    			array_wr32(E1000_IVAR0, index, ivar);
    		}
    		if (tx_queue > IGB_N0_QUEUE) {
    
    			ivar = array_rd32(E1000_IVAR0, index);
    
    				/* vector goes into high byte of register */
    				ivar = ivar & 0x00FFFFFF;
    				ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
    
    			} else {
    				/* vector goes into second byte of register */
    				ivar = ivar & 0xFFFF00FF;
    				ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
    
    			}
    			adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
    			array_wr32(E1000_IVAR0, index, ivar);
    		}
    		break;
    	default:
    		BUG();
    		break;
    	}
    
    }
    
    /**
     * igb_configure_msix - Configure MSI-X hardware
     *
     * igb_configure_msix sets up the hardware to properly
     * generate MSI-X interrupts.
     **/
    static void igb_configure_msix(struct igb_adapter *adapter)
    {
    	u32 tmp;
    	int i, vector = 0;
    	struct e1000_hw *hw = &adapter->hw;
    
    	adapter->eims_enable_mask = 0;
    
    	if (hw->mac.type == e1000_82576)
    		/* Turn on MSI-X capability first, or our settings
    		 * won't stick.  And it will take days to debug. */
    		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
    
    				   E1000_GPIE_PBA | E1000_GPIE_EIAME |
    
     				   E1000_GPIE_NSICR);
    
    
    	for (i = 0; i < adapter->num_tx_queues; i++) {
    		struct igb_ring *tx_ring = &adapter->tx_ring[i];
    		igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
    		adapter->eims_enable_mask |= tx_ring->eims_value;
    		if (tx_ring->itr_val)
    
    			writel(tx_ring->itr_val,
    
    			       hw->hw_addr + tx_ring->itr_register);
    		else
    			writel(1, hw->hw_addr + tx_ring->itr_register);
    	}
    
    	for (i = 0; i < adapter->num_rx_queues; i++) {
    		struct igb_ring *rx_ring = &adapter->rx_ring[i];
    
    		igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
    		adapter->eims_enable_mask |= rx_ring->eims_value;
    		if (rx_ring->itr_val)
    
    			writel(rx_ring->itr_val,
    
    			       hw->hw_addr + rx_ring->itr_register);
    		else
    			writel(1, hw->hw_addr + rx_ring->itr_register);
    	}
    
    
    	/* set vector for other causes, i.e. link changes */
    
    	switch (hw->mac.type) {
    	case e1000_82575:
    
    		array_wr32(E1000_MSIXBM(0), vector++,
    				      E1000_EIMS_OTHER);
    
    		tmp = rd32(E1000_CTRL_EXT);
    		/* enable MSI-X PBA support*/
    		tmp |= E1000_CTRL_EXT_PBA_CLR;
    
    		/* Auto-Mask interrupts upon ICR read. */
    		tmp |= E1000_CTRL_EXT_EIAME;
    		tmp |= E1000_CTRL_EXT_IRCA;
    
    		wr32(E1000_CTRL_EXT, tmp);
    		adapter->eims_enable_mask |= E1000_EIMS_OTHER;
    
    		adapter->eims_other = E1000_EIMS_OTHER;
    
    		break;
    
    	case e1000_82576:
    		tmp = (vector++ | E1000_IVAR_VALID) << 8;
    		wr32(E1000_IVAR_MISC, tmp);
    
    		adapter->eims_enable_mask = (1 << (vector)) - 1;
    		adapter->eims_other = 1 << (vector - 1);
    		break;
    	default:
    		/* do nothing, since nothing else supports MSI-X */
    		break;
    	} /* switch (hw->mac.type) */
    
    	wrfl();
    }
    
    /**
     * igb_request_msix - Initialize MSI-X interrupts
     *
     * igb_request_msix allocates MSI-X vectors and requests interrupts from the
     * kernel.
     **/
    static int igb_request_msix(struct igb_adapter *adapter)
    {
    	struct net_device *netdev = adapter->netdev;
    	int i, err = 0, vector = 0;
    
    	vector = 0;
    
    	for (i = 0; i < adapter->num_tx_queues; i++) {
    		struct igb_ring *ring = &(adapter->tx_ring[i]);
    
    		sprintf(ring->name, "%s-tx-%d", netdev->name, i);
    
    		err = request_irq(adapter->msix_entries[vector].vector,
    				  &igb_msix_tx, 0, ring->name,
    				  &(adapter->tx_ring[i]));
    		if (err)
    			goto out;
    		ring->itr_register = E1000_EITR(0) + (vector << 2);
    
    		ring->itr_val = 976; /* ~4000 ints/sec */
    
    		vector++;
    	}
    	for (i = 0; i < adapter->num_rx_queues; i++) {
    		struct igb_ring *ring = &(adapter->rx_ring[i]);
    		if (strlen(netdev->name) < (IFNAMSIZ - 5))
    
    			sprintf(ring->name, "%s-rx-%d", netdev->name, i);
    
    		else
    			memcpy(ring->name, netdev->name, IFNAMSIZ);
    		err = request_irq(adapter->msix_entries[vector].vector,
    				  &igb_msix_rx, 0, ring->name,
    				  &(adapter->rx_ring[i]));
    		if (err)
    			goto out;
    		ring->itr_register = E1000_EITR(0) + (vector << 2);
    		ring->itr_val = adapter->itr;
    
    		/* overwrite the poll routine for MSIX, we've already done
    		 * netif_napi_add */
    		ring->napi.poll = &igb_clean_rx_ring_msix;
    
    		vector++;
    	}
    
    	err = request_irq(adapter->msix_entries[vector].vector,
    			  &igb_msix_other, 0, netdev->name, netdev);
    	if (err)
    		goto out;
    
    	igb_configure_msix(adapter);
    	return 0;
    out:
    	return err;
    }
    
    static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
    {
    	if (adapter->msix_entries) {
    		pci_disable_msix(adapter->pdev);
    		kfree(adapter->msix_entries);
    		adapter->msix_entries = NULL;
    
    	} else if (adapter->flags & IGB_FLAG_HAS_MSI)
    
    		pci_disable_msi(adapter->pdev);
    	return;
    }
    
    
    /**
     * igb_set_interrupt_capability - set MSI or MSI-X if supported
     *
     * Attempt to configure interrupts using the best available
     * capabilities of the hardware and kernel.
     **/
    static void igb_set_interrupt_capability(struct igb_adapter *adapter)
    {
    	int err;
    	int numvecs, i;
    
    
    	/* Number of supported queues. */
    	/* Having more queues than CPUs doesn't make sense. */
    	adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
    	adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
    
    
    	numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
    	adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
    					GFP_KERNEL);
    	if (!adapter->msix_entries)
    		goto msi_only;
    
    	for (i = 0; i < numvecs; i++)
    		adapter->msix_entries[i].entry = i;
    
    	err = pci_enable_msix(adapter->pdev,
    			      adapter->msix_entries,
    			      numvecs);
    	if (err == 0)
    
    
    	igb_reset_interrupt_capability(adapter);
    
    	/* If we can't do MSI-X, try MSI */
    msi_only:
    	adapter->num_rx_queues = 1;
    
    	adapter->num_tx_queues = 1;
    
    	if (!pci_enable_msi(adapter->pdev))
    
    		adapter->flags |= IGB_FLAG_HAS_MSI;
    
    	/* Notify the stack of the (possibly) reduced Tx Queue count. */
    
    	adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
    
    	return;
    }
    
    /**
     * igb_request_irq - initialize interrupts
     *
     * Attempts to configure interrupts using the best available
     * capabilities of the hardware and kernel.
     **/
    static int igb_request_irq(struct igb_adapter *adapter)
    {
    	struct net_device *netdev = adapter->netdev;
    	struct e1000_hw *hw = &adapter->hw;
    	int err = 0;
    
    	if (adapter->msix_entries) {
    		err = igb_request_msix(adapter);
    
    		if (!err)
    
    			goto request_done;
    		/* fall back to MSI */
    		igb_reset_interrupt_capability(adapter);
    		if (!pci_enable_msi(adapter->pdev))
    
    			adapter->flags |= IGB_FLAG_HAS_MSI;
    
    		igb_free_all_tx_resources(adapter);
    		igb_free_all_rx_resources(adapter);
    		adapter->num_rx_queues = 1;
    		igb_alloc_queues(adapter);
    
    	} else {
    
    		switch (hw->mac.type) {
    		case e1000_82575:
    			wr32(E1000_MSIXBM(0),
    			     (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
    			break;
    		case e1000_82576:
    			wr32(E1000_IVAR0, E1000_IVAR_VALID);
    			break;
    		default:
    			break;
    		}
    
    	if (adapter->flags & IGB_FLAG_HAS_MSI) {
    
    		err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
    				  netdev->name, netdev);
    		if (!err)
    			goto request_done;
    		/* fall back to legacy interrupts */
    		igb_reset_interrupt_capability(adapter);
    
    		adapter->flags &= ~IGB_FLAG_HAS_MSI;
    
    	}
    
    	err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
    			  netdev->name, netdev);
    
    
    	if (err)
    
    		dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
    			err);
    
    request_done:
    	return err;
    }
    
    static void igb_free_irq(struct igb_adapter *adapter)
    {
    	struct net_device *netdev = adapter->netdev;
    
    	if (adapter->msix_entries) {
    		int vector = 0, i;
    
    		for (i = 0; i < adapter->num_tx_queues; i++)
    			free_irq(adapter->msix_entries[vector++].vector,
    				&(adapter->tx_ring[i]));
    		for (i = 0; i < adapter->num_rx_queues; i++)
    			free_irq(adapter->msix_entries[vector++].vector,
    				&(adapter->rx_ring[i]));
    
    		free_irq(adapter->msix_entries[vector++].vector, netdev);
    		return;
    	}
    
    	free_irq(adapter->pdev->irq, netdev);
    }
    
    /**
     * igb_irq_disable - Mask off interrupt generation on the NIC
     * @adapter: board private structure
     **/
    static void igb_irq_disable(struct igb_adapter *adapter)
    {
    	struct e1000_hw *hw = &adapter->hw;
    
    	if (adapter->msix_entries) {
    
    		wr32(E1000_EIAM, 0);
    
    		wr32(E1000_EIMC, ~0);
    		wr32(E1000_EIAC, 0);
    	}
    
    
    	wr32(E1000_IAM, 0);
    
    	wr32(E1000_IMC, ~0);
    	wrfl();
    	synchronize_irq(adapter->pdev->irq);
    }
    
    /**
     * igb_irq_enable - Enable default interrupt generation settings
     * @adapter: board private structure
     **/
    static void igb_irq_enable(struct igb_adapter *adapter)
    {
    	struct e1000_hw *hw = &adapter->hw;
    
    	if (adapter->msix_entries) {
    
    		wr32(E1000_EIAC, adapter->eims_enable_mask);
    		wr32(E1000_EIAM, adapter->eims_enable_mask);
    		wr32(E1000_EIMS, adapter->eims_enable_mask);
    
    		wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
    
    	} else {
    		wr32(E1000_IMS, IMS_ENABLE_MASK);
    		wr32(E1000_IAM, IMS_ENABLE_MASK);
    	}
    
    }
    
    static void igb_update_mng_vlan(struct igb_adapter *adapter)
    {
    	struct net_device *netdev = adapter->netdev;
    	u16 vid = adapter->hw.mng_cookie.vlan_id;
    	u16 old_vid = adapter->mng_vlan_id;
    	if (adapter->vlgrp) {
    		if (!vlan_group_get_device(adapter->vlgrp, vid)) {
    			if (adapter->hw.mng_cookie.status &
    				E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
    				igb_vlan_rx_add_vid(netdev, vid);
    				adapter->mng_vlan_id = vid;
    			} else
    				adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
    
    			if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
    					(vid != old_vid) &&
    			    !vlan_group_get_device(adapter->vlgrp, old_vid))
    				igb_vlan_rx_kill_vid(netdev, old_vid);
    		} else
    			adapter->mng_vlan_id = vid;
    	}
    }
    
    /**
     * igb_release_hw_control - release control of the h/w to f/w
     * @adapter: address of board private structure
     *
     * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
     * For ASF and Pass Through versions of f/w this means that the
     * driver is no longer loaded.
     *
     **/
    static void igb_release_hw_control(struct igb_adapter *adapter)
    {
    	struct e1000_hw *hw = &adapter->hw;
    	u32 ctrl_ext;
    
    	/* Let firmware take over control of h/w */
    	ctrl_ext = rd32(E1000_CTRL_EXT);
    	wr32(E1000_CTRL_EXT,
    			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
    }
    
    
    /**
     * igb_get_hw_control - get control of the h/w from f/w
     * @adapter: address of board private structure
     *
     * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
     * For ASF and Pass Through versions of f/w this means that
     * the driver is loaded.
     *
     **/
    static void igb_get_hw_control(struct igb_adapter *adapter)
    {
    	struct e1000_hw *hw = &adapter->hw;
    	u32 ctrl_ext;
    
    	/* Let firmware know the driver has taken over */
    	ctrl_ext = rd32(E1000_CTRL_EXT);
    	wr32(E1000_CTRL_EXT,
    			ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
    }
    
    /**
     * igb_configure - configure the hardware for RX and TX
     * @adapter: private board structure
     **/
    static void igb_configure(struct igb_adapter *adapter)
    {
    	struct net_device *netdev = adapter->netdev;
    	int i;
    
    	igb_get_hw_control(adapter);
    	igb_set_multi(netdev);
    
    	igb_restore_vlan(adapter);
    
    	igb_configure_tx(adapter);
    	igb_setup_rctl(adapter);
    	igb_configure_rx(adapter);
    
    
    	igb_rx_fifo_flush_82575(&adapter->hw);
    
    
    	/* call IGB_DESC_UNUSED which always leaves
    	 * at least 1 descriptor unused to make sure
    	 * next_to_use != next_to_clean */
    	for (i = 0; i < adapter->num_rx_queues; i++) {
    		struct igb_ring *ring = &adapter->rx_ring[i];
    
    		igb_alloc_rx_buffers_adv(ring, IGB_DESC_UNUSED(ring));
    
    	}
    
    
    	adapter->tx_queue_len = netdev->tx_queue_len;
    }
    
    
    /**
     * igb_up - Open the interface and prepare it to handle traffic
     * @adapter: board private structure
     **/
    
    int igb_up(struct igb_adapter *adapter)
    {
    	struct e1000_hw *hw = &adapter->hw;
    	int i;
    
    	/* hardware has been reset, we need to reload some things */
    	igb_configure(adapter);
    
    	clear_bit(__IGB_DOWN, &adapter->state);
    
    
    	for (i = 0; i < adapter->num_rx_queues; i++)
    		napi_enable(&adapter->rx_ring[i].napi);
    	if (adapter->msix_entries)
    
    		igb_configure_msix(adapter);
    
    	/* Clear any pending interrupts. */
    	rd32(E1000_ICR);
    	igb_irq_enable(adapter);
    
    	/* Fire a link change interrupt to start the watchdog. */
    	wr32(E1000_ICS, E1000_ICS_LSC);
    	return 0;
    }
    
    void igb_down(struct igb_adapter *adapter)
    {
    	struct e1000_hw *hw = &adapter->hw;
    	struct net_device *netdev = adapter->netdev;
    	u32 tctl, rctl;
    	int i;
    
    	/* signal that we're down so the interrupt handler does not
    	 * reschedule our watchdog timer */
    	set_bit(__IGB_DOWN, &adapter->state);
    
    	/* disable receives in the hardware */
    	rctl = rd32(E1000_RCTL);
    	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
    	/* flush and sleep below */
    
    
    	netif_tx_stop_all_queues(netdev);
    
    
    	/* disable transmits in the hardware */
    	tctl = rd32(E1000_TCTL);
    	tctl &= ~E1000_TCTL_EN;
    	wr32(E1000_TCTL, tctl);
    	/* flush both disables and wait for them to finish */
    	wrfl();
    	msleep(10);
    
    
    	for (i = 0; i < adapter->num_rx_queues; i++)
    		napi_disable(&adapter->rx_ring[i].napi);
    
    
    	igb_irq_disable(adapter);
    
    	del_timer_sync(&adapter->watchdog_timer);
    	del_timer_sync(&adapter->phy_info_timer);
    
    	netdev->tx_queue_len = adapter->tx_queue_len;
    	netif_carrier_off(netdev);
    
    
    	/* record the stats before reset*/
    	igb_update_stats(adapter);
    
    
    	adapter->link_speed = 0;
    	adapter->link_duplex = 0;
    
    
    	if (!pci_channel_offline(adapter->pdev))
    		igb_reset(adapter);
    
    	igb_clean_all_tx_rings(adapter);
    	igb_clean_all_rx_rings(adapter);
    }
    
    void igb_reinit_locked(struct igb_adapter *adapter)
    {
    	WARN_ON(in_interrupt());
    	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
    		msleep(1);
    	igb_down(adapter);
    	igb_up(adapter);
    	clear_bit(__IGB_RESETTING, &adapter->state);
    }
    
    void igb_reset(struct igb_adapter *adapter)
    {
    	struct e1000_hw *hw = &adapter->hw;
    
    	struct e1000_mac_info *mac = &hw->mac;
    	struct e1000_fc_info *fc = &hw->fc;
    
    	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
    	u16 hwm;
    
    	/* Repartition Pba for greater than 9k mtu
    	 * To take effect CTRL.RST is required.
    	 */
    
    	switch (mac->type) {
    	case e1000_82576:
    
    		pba = E1000_PBA_64K;
    
    		break;
    	case e1000_82575:
    	default:
    		pba = E1000_PBA_34K;
    		break;
    
    	if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
    	    (mac->type < e1000_82576)) {
    
    		/* adjust PBA for jumbo frames */
    		wr32(E1000_PBA, pba);
    
    		/* To maintain wire speed transmits, the Tx FIFO should be
    		 * large enough to accommodate two full transmit packets,
    		 * rounded up to the next 1KB and expressed in KB.  Likewise,
    		 * the Rx FIFO should be large enough to accommodate at least
    		 * one full receive packet and is similarly rounded up and
    		 * expressed in KB. */
    		pba = rd32(E1000_PBA);
    		/* upper 16 bits has Tx packet buffer allocation size in KB */
    		tx_space = pba >> 16;
    		/* lower 16 bits has Rx packet buffer allocation size in KB */
    		pba &= 0xffff;
    		/* the tx fifo also stores 16 bytes of information about the tx
    		 * but don't include ethernet FCS because hardware appends it */
    		min_tx_space = (adapter->max_frame_size +
    				sizeof(struct e1000_tx_desc) -
    				ETH_FCS_LEN) * 2;
    		min_tx_space = ALIGN(min_tx_space, 1024);
    		min_tx_space >>= 10;
    		/* software strips receive CRC, so leave room for it */
    		min_rx_space = adapter->max_frame_size;
    		min_rx_space = ALIGN(min_rx_space, 1024);
    		min_rx_space >>= 10;
    
    		/* If current Tx allocation is less than the min Tx FIFO size,
    		 * and the min Tx FIFO size is less than the current Rx FIFO
    		 * allocation, take space away from current Rx allocation */
    		if (tx_space < min_tx_space &&
    		    ((min_tx_space - tx_space) < pba)) {
    			pba = pba - (min_tx_space - tx_space);
    
    			/* if short on rx space, rx wins and must trump tx
    			 * adjustment */
    			if (pba < min_rx_space)
    				pba = min_rx_space;
    		}
    
    		wr32(E1000_PBA, pba);
    
    	}
    
    	/* flow control settings */
    	/* The high water mark must be low enough to fit one full frame
    	 * (or the size used for early receive) above it in the Rx FIFO.
    	 * Set it to the lower of:
    	 * - 90% of the Rx FIFO size, or
    	 * - the full Rx FIFO size minus one full frame */
    	hwm = min(((pba << 10) * 9 / 10),
    
    			((pba << 10) - 2 * adapter->max_frame_size));
    
    	if (mac->type < e1000_82576) {
    		fc->high_water = hwm & 0xFFF8;	/* 8-byte granularity */
    		fc->low_water = fc->high_water - 8;
    	} else {
    		fc->high_water = hwm & 0xFFF0;	/* 16-byte granularity */
    		fc->low_water = fc->high_water - 16;
    	}
    
    	fc->pause_time = 0xFFFF;
    	fc->send_xon = 1;
    	fc->type = fc->original_type;
    
    	/* Allow time for pending master requests to run */
    	adapter->hw.mac.ops.reset_hw(&adapter->hw);
    	wr32(E1000_WUC, 0);
    
    	if (adapter->hw.mac.ops.init_hw(&adapter->hw))
    		dev_err(&adapter->pdev->dev, "Hardware Error\n");
    
    	igb_update_mng_vlan(adapter);
    
    	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
    	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
    
    	igb_reset_adaptive(&adapter->hw);
    
    static const struct net_device_ops igb_netdev_ops = {
    	.ndo_open 		= igb_open,
    	.ndo_stop		= igb_close,
    
    	.ndo_start_xmit		= igb_xmit_frame_adv,
    
    	.ndo_get_stats		= igb_get_stats,
    	.ndo_set_multicast_list	= igb_set_multi,
    	.ndo_set_mac_address	= igb_set_mac,
    	.ndo_change_mtu		= igb_change_mtu,
    	.ndo_do_ioctl		= igb_ioctl,
    	.ndo_tx_timeout		= igb_tx_timeout,
    	.ndo_validate_addr	= eth_validate_addr,
    	.ndo_vlan_rx_register	= igb_vlan_rx_register,
    	.ndo_vlan_rx_add_vid	= igb_vlan_rx_add_vid,
    	.ndo_vlan_rx_kill_vid	= igb_vlan_rx_kill_vid,
    #ifdef CONFIG_NET_POLL_CONTROLLER
    	.ndo_poll_controller	= igb_netpoll,
    #endif