Skip to content
Snippets Groups Projects
gianfar.c 80.6 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
     * drivers/net/gianfar.c
     *
     * Gianfar Ethernet Driver
    
     * This driver is designed for the non-CPM ethernet controllers
     * on the 85xx and 83xx family of integrated processors
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * Based on 8260_io/fcc_enet.c
     *
     * Author: Andy Fleming
    
     * Maintainer: Kumar Gala
    
     * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
    
     * Copyright 2002-2009 Freescale Semiconductor, Inc.
     * Copyright 2007 MontaVista Software, Inc.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
     * This program is free software; you can redistribute  it and/or modify it
     * under  the terms of  the GNU General  Public License as published by the
     * Free Software Foundation;  either version 2 of the  License, or (at your
     * option) any later version.
     *
     *  Gianfar:  AKA Lambda Draconis, "Dragon"
     *  RA 11 31 24.2
     *  Dec +69 19 52
     *  V 3.84
     *  B-V +1.62
     *
     *  Theory of operation
    
     *  The driver is initialized through of_device. Configuration information
     *  is therefore conveyed through an OF-style device tree.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
     *  The Gianfar Ethernet Controller uses a ring of buffer
     *  descriptors.  The beginning is indicated by a register
    
     *  pointing to the physical address of the start of the ring.
     *  The end is determined by a "wrap" bit being set in the
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *  last descriptor of the ring.
     *
     *  When a packet is received, the RXF bit in the
    
     *  IEVENT register is set, triggering an interrupt when the
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *  corresponding bit in the IMASK register is also set (if
     *  interrupt coalescing is active, then the interrupt may not
     *  happen immediately, but will wait until either a set number
    
     *  of frames or amount of time have passed).  In NAPI, the
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *  interrupt handler will signal there is work to be done, and
    
     *  exit. This method will start at the last known empty
    
     *  descriptor, and process every subsequent descriptor until there
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *  are none left with data (NAPI will stop after a set number of
     *  packets to give time to other tasks, but will eventually
     *  process all the packets).  The data arrives inside a
     *  pre-allocated skb, and so after the skb is passed up to the
     *  stack, a new skb must be allocated, and the address field in
     *  the buffer descriptor must be updated to indicate this new
     *  skb.
     *
     *  When the kernel requests that a packet be transmitted, the
     *  driver starts where it left off last time, and points the
     *  descriptor at the buffer which was passed in.  The driver
     *  then informs the DMA engine that there are packets ready to
     *  be transmitted.  Once the controller is finished transmitting
     *  the packet, an interrupt may be triggered (under the same
     *  conditions as for reception, but depending on the TXF bit).
     *  The driver then cleans up the buffer.
     */
    
    #include <linux/kernel.h>
    #include <linux/string.h>
    #include <linux/errno.h>
    
    #include <linux/unistd.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #include <linux/slab.h>
    #include <linux/interrupt.h>
    #include <linux/init.h>
    #include <linux/delay.h>
    #include <linux/netdevice.h>
    #include <linux/etherdevice.h>
    #include <linux/skbuff.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #include <linux/spinlock.h>
    #include <linux/mm.h>
    
    #include <linux/of_mdio.h>
    
    #include <linux/of_platform.h>
    
    #include <linux/ip.h>
    #include <linux/tcp.h>
    #include <linux/udp.h>
    
    #include <linux/in.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    #include <asm/io.h>
    #include <asm/irq.h>
    #include <asm/uaccess.h>
    #include <linux/module.h>
    #include <linux/dma-mapping.h>
    #include <linux/crc32.h>
    
    #include <linux/mii.h>
    #include <linux/phy.h>
    
    #include <linux/phy_fixed.h>
    #include <linux/of.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    #include "gianfar.h"
    
    #include "fsl_pq_mdio.h"
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    #define TX_TIMEOUT      (1*HZ)
    #undef BRIEF_GFAR_ERRORS
    #undef VERBOSE_GFAR_ERRORS
    
    const char gfar_driver_name[] = "Gianfar Ethernet";
    
    const char gfar_driver_version[] = "1.3";
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    static int gfar_enet_open(struct net_device *dev);
    static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
    
    static void gfar_reset_task(struct work_struct *work);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    static void gfar_timeout(struct net_device *dev);
    static int gfar_close(struct net_device *dev);
    
    struct sk_buff *gfar_new_skb(struct net_device *dev);
    
    static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
    
    		struct sk_buff *skb);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    static int gfar_set_mac_address(struct net_device *dev);
    static int gfar_change_mtu(struct net_device *dev, int new_mtu);
    
    static irqreturn_t gfar_error(int irq, void *dev_id);
    static irqreturn_t gfar_transmit(int irq, void *dev_id);
    static irqreturn_t gfar_interrupt(int irq, void *dev_id);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    static void adjust_link(struct net_device *dev);
    static void init_registers(struct net_device *dev);
    static int init_phy(struct net_device *dev);
    
    static int gfar_probe(struct of_device *ofdev,
    		const struct of_device_id *match);
    static int gfar_remove(struct of_device *ofdev);
    
    static void free_skb_resources(struct gfar_private *priv);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    static void gfar_set_multi(struct net_device *dev);
    static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
    
    static void gfar_configure_serdes(struct net_device *dev);
    
    static int gfar_poll(struct napi_struct *napi, int budget);
    
    #ifdef CONFIG_NET_POLL_CONTROLLER
    static void gfar_netpoll(struct net_device *dev);
    #endif
    
    int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
    static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
    
    static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
    			      int amount_pull);
    
    static void gfar_vlan_rx_register(struct net_device *netdev,
    		                struct vlan_group *grp);
    
    void gfar_halt(struct net_device *dev);
    
    static void gfar_halt_nodisable(struct net_device *dev);
    
    void gfar_start(struct net_device *dev);
    static void gfar_clear_exact_match(struct net_device *dev);
    static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
    
    static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    MODULE_AUTHOR("Freescale Semiconductor, Inc");
    MODULE_DESCRIPTION("Gianfar Ethernet Driver");
    MODULE_LICENSE("GPL");
    
    
    static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
    
    			    dma_addr_t buf)
    {
    	u32 lstatus;
    
    	bdp->bufPtr = buf;
    
    	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
    
    	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
    
    		lstatus |= BD_LFLAG(RXBD_WRAP);
    
    	eieio();
    
    	bdp->lstatus = lstatus;
    }
    
    
    static int gfar_init_bds(struct net_device *ndev)
    
    	struct gfar_private *priv = netdev_priv(ndev);
    
    	struct gfar_priv_tx_q *tx_queue = NULL;
    	struct gfar_priv_rx_q *rx_queue = NULL;
    
    	struct txbd8 *txbdp;
    	struct rxbd8 *rxbdp;
    
    	for (i = 0; i < priv->num_tx_queues; i++) {
    		tx_queue = priv->tx_queue[i];
    		/* Initialize some variables in our dev structure */
    		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
    		tx_queue->dirty_tx = tx_queue->tx_bd_base;
    		tx_queue->cur_tx = tx_queue->tx_bd_base;
    		tx_queue->skb_curtx = 0;
    		tx_queue->skb_dirtytx = 0;
    
    		/* Initialize Transmit Descriptor Ring */
    		txbdp = tx_queue->tx_bd_base;
    		for (j = 0; j < tx_queue->tx_ring_size; j++) {
    			txbdp->lstatus = 0;
    			txbdp->bufPtr = 0;
    			txbdp++;
    		}
    
    		/* Set the last descriptor in the ring to indicate wrap */
    		txbdp--;
    		txbdp->status |= TXBD_WRAP;
    
    	for (i = 0; i < priv->num_rx_queues; i++) {
    		rx_queue = priv->rx_queue[i];
    		rx_queue->cur_rx = rx_queue->rx_bd_base;
    		rx_queue->skb_currx = 0;
    		rxbdp = rx_queue->rx_bd_base;
    
    		for (j = 0; j < rx_queue->rx_ring_size; j++) {
    			struct sk_buff *skb = rx_queue->rx_skbuff[j];
    
    			if (skb) {
    				gfar_init_rxbdp(rx_queue, rxbdp,
    						rxbdp->bufPtr);
    			} else {
    				skb = gfar_new_skb(ndev);
    				if (!skb) {
    					pr_err("%s: Can't allocate RX buffers\n",
    							ndev->name);
    					goto err_rxalloc_fail;
    				}
    				rx_queue->rx_skbuff[j] = skb;
    
    				gfar_new_rxbdp(rx_queue, rxbdp, skb);
    
    
    err_rxalloc_fail:
    	free_skb_resources(priv);
    	return -ENOMEM;
    
    }
    
    static int gfar_alloc_skb_resources(struct net_device *ndev)
    {
    
    	dma_addr_t addr;
    	int i, j, k;
    
    	struct gfar_private *priv = netdev_priv(ndev);
    	struct device *dev = &priv->ofdev->dev;
    
    	struct gfar_priv_tx_q *tx_queue = NULL;
    	struct gfar_priv_rx_q *rx_queue = NULL;
    
    
    	priv->total_tx_ring_size = 0;
    	for (i = 0; i < priv->num_tx_queues; i++)
    		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
    
    	priv->total_rx_ring_size = 0;
    	for (i = 0; i < priv->num_rx_queues; i++)
    		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
    
    
    	/* Allocate memory for the buffer descriptors */
    
    			sizeof(struct txbd8) * priv->total_tx_ring_size +
    			sizeof(struct rxbd8) * priv->total_rx_ring_size,
    			&addr, GFP_KERNEL);
    
    	if (!vaddr) {
    		if (netif_msg_ifup(priv))
    			pr_err("%s: Could not allocate buffer descriptors!\n",
    			       ndev->name);
    		return -ENOMEM;
    	}
    
    
    	for (i = 0; i < priv->num_tx_queues; i++) {
    		tx_queue = priv->tx_queue[i];
    		tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
    		tx_queue->tx_bd_dma_base = addr;
    		tx_queue->dev = ndev;
    		/* enet DMA only understands physical addresses */
    		addr    += sizeof(struct txbd8) *tx_queue->tx_ring_size;
    		vaddr   += sizeof(struct txbd8) *tx_queue->tx_ring_size;
    	}
    
    
    	/* Start the rx descriptor ring where the tx ring leaves off */
    
    	for (i = 0; i < priv->num_rx_queues; i++) {
    		rx_queue = priv->rx_queue[i];
    		rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
    		rx_queue->rx_bd_dma_base = addr;
    		rx_queue->dev = ndev;
    		addr    += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
    		vaddr   += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
    	}
    
    	for (i = 0; i < priv->num_tx_queues; i++) {
    		tx_queue = priv->tx_queue[i];
    		tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
    
    				  tx_queue->tx_ring_size, GFP_KERNEL);
    
    		if (!tx_queue->tx_skbuff) {
    			if (netif_msg_ifup(priv))
    				pr_err("%s: Could not allocate tx_skbuff\n",
    						ndev->name);
    			goto cleanup;
    		}
    
    		for (k = 0; k < tx_queue->tx_ring_size; k++)
    			tx_queue->tx_skbuff[k] = NULL;
    	}
    
    	for (i = 0; i < priv->num_rx_queues; i++) {
    		rx_queue = priv->rx_queue[i];
    		rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
    
    				  rx_queue->rx_ring_size, GFP_KERNEL);
    
    		if (!rx_queue->rx_skbuff) {
    			if (netif_msg_ifup(priv))
    				pr_err("%s: Could not allocate rx_skbuff\n",
    				       ndev->name);
    			goto cleanup;
    		}
    
    		for (j = 0; j < rx_queue->rx_ring_size; j++)
    			rx_queue->rx_skbuff[j] = NULL;
    	}
    
    	if (gfar_init_bds(ndev))
    		goto cleanup;
    
    static void gfar_init_tx_rx_base(struct gfar_private *priv)
    {
    
    	struct gfar __iomem *regs = priv->gfargrp[0].regs;
    
    	u32 __iomem *baddr;
    
    	int i;
    
    	baddr = &regs->tbase0;
    	for(i = 0; i < priv->num_tx_queues; i++) {
    		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
    		baddr	+= 2;
    	}
    
    	baddr = &regs->rbase0;
    	for(i = 0; i < priv->num_rx_queues; i++) {
    		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
    		baddr   += 2;
    	}
    }
    
    
    static void gfar_init_mac(struct net_device *ndev)
    {
    	struct gfar_private *priv = netdev_priv(ndev);
    
    	struct gfar __iomem *regs = priv->gfargrp[0].regs;
    
    	/* write the tx/rx base registers */
    	gfar_init_tx_rx_base(priv);
    
    	/* Configure the coalescing support */
    
    	gfar_configure_coalescing(priv, 0xFF, 0xFF);
    
    	if (priv->rx_filer_enable) {
    
    		rctrl |= RCTRL_FILREN;
    
    		/* Program the RIR0 reg with the required distribution */
    		gfar_write(&regs->rir0, DEFAULT_RIR0);
    	}
    
    
    	if (priv->rx_csum_enable)
    		rctrl |= RCTRL_CHECKSUMMING;
    
    	if (priv->extended_hash) {
    		rctrl |= RCTRL_EXTHASH;
    
    		gfar_clear_exact_match(ndev);
    		rctrl |= RCTRL_EMEN;
    	}
    
    	if (priv->padding) {
    		rctrl &= ~RCTRL_PAL_MASK;
    		rctrl |= RCTRL_PADDING(priv->padding);
    	}
    
    	/* keep vlan related bits if it's enabled */
    	if (priv->vlgrp) {
    		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
    		tctrl |= TCTRL_VLINS;
    	}
    
    	/* Init rctrl based on our settings */
    	gfar_write(&regs->rctrl, rctrl);
    
    	if (ndev->features & NETIF_F_IP_CSUM)
    		tctrl |= TCTRL_INIT_CSUM;
    
    
    	tctrl |= TCTRL_TXSCHED_PRIO;
    
    
    	gfar_write(&regs->tctrl, tctrl);
    
    	/* Set the extraction length and index */
    	attrs = ATTRELI_EL(priv->rx_stash_size) |
    		ATTRELI_EI(priv->rx_stash_index);
    
    	gfar_write(&regs->attreli, attrs);
    
    	/* Start with defaults, and add stashing or locking
    	 * depending on the approprate variables */
    	attrs = ATTR_INIT_SETTINGS;
    
    	if (priv->bd_stash_en)
    		attrs |= ATTR_BDSTASH;
    
    	if (priv->rx_stash_size != 0)
    		attrs |= ATTR_BUFSTASH;
    
    	gfar_write(&regs->attr, attrs);
    
    	gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
    	gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
    	gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
    }
    
    
    static struct net_device_stats *gfar_get_stats(struct net_device *dev)
    {
    	struct gfar_private *priv = netdev_priv(dev);
    	struct netdev_queue *txq;
    	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
    	unsigned long tx_packets = 0, tx_bytes = 0;
    	int i = 0;
    
    	for (i = 0; i < priv->num_rx_queues; i++) {
    		rx_packets += priv->rx_queue[i]->stats.rx_packets;
    		rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
    		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
    	}
    
    	dev->stats.rx_packets = rx_packets;
    	dev->stats.rx_bytes = rx_bytes;
    	dev->stats.rx_dropped = rx_dropped;
    
    	for (i = 0; i < priv->num_tx_queues; i++) {
    		txq = netdev_get_tx_queue(dev, i);
    		tx_bytes += txq->tx_bytes;
    		tx_packets += txq->tx_packets;
    	}
    
    	dev->stats.tx_bytes = tx_bytes;
    	dev->stats.tx_packets = tx_packets;
    
    	return &dev->stats;
    }
    
    
    static const struct net_device_ops gfar_netdev_ops = {
    	.ndo_open = gfar_enet_open,
    	.ndo_start_xmit = gfar_start_xmit,
    	.ndo_stop = gfar_close,
    	.ndo_change_mtu = gfar_change_mtu,
    	.ndo_set_multicast_list = gfar_set_multi,
    	.ndo_tx_timeout = gfar_timeout,
    	.ndo_do_ioctl = gfar_ioctl,
    
    	.ndo_get_stats = gfar_get_stats,
    
    	.ndo_vlan_rx_register = gfar_vlan_rx_register,
    
    	.ndo_set_mac_address = eth_mac_addr,
    	.ndo_validate_addr = eth_validate_addr,
    
    #ifdef CONFIG_NET_POLL_CONTROLLER
    	.ndo_poll_controller = gfar_netpoll,
    #endif
    };
    
    
    unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
    unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
    
    
    void lock_rx_qs(struct gfar_private *priv)
    {
    	int i = 0x0;
    
    	for (i = 0; i < priv->num_rx_queues; i++)
    		spin_lock(&priv->rx_queue[i]->rxlock);
    }
    
    void lock_tx_qs(struct gfar_private *priv)
    {
    	int i = 0x0;
    
    	for (i = 0; i < priv->num_tx_queues; i++)
    		spin_lock(&priv->tx_queue[i]->txlock);
    }
    
    void unlock_rx_qs(struct gfar_private *priv)
    {
    	int i = 0x0;
    
    	for (i = 0; i < priv->num_rx_queues; i++)
    		spin_unlock(&priv->rx_queue[i]->rxlock);
    }
    
    void unlock_tx_qs(struct gfar_private *priv)
    {
    	int i = 0x0;
    
    	for (i = 0; i < priv->num_tx_queues; i++)
    		spin_unlock(&priv->tx_queue[i]->txlock);
    }
    
    
    /* Returns 1 if incoming frames use an FCB */
    static inline int gfar_uses_fcb(struct gfar_private *priv)
    
    	return priv->vlgrp || priv->rx_csum_enable;
    
    static void free_tx_pointers(struct gfar_private *priv)
    {
    	int i = 0;
    
    	for (i = 0; i < priv->num_tx_queues; i++)
    		kfree(priv->tx_queue[i]);
    }
    
    static void free_rx_pointers(struct gfar_private *priv)
    {
    	int i = 0;
    
    	for (i = 0; i < priv->num_rx_queues; i++)
    		kfree(priv->rx_queue[i]);
    }
    
    
    static void unmap_group_regs(struct gfar_private *priv)
    {
    	int i = 0;
    
    	for (i = 0; i < MAXGROUPS; i++)
    		if (priv->gfargrp[i].regs)
    			iounmap(priv->gfargrp[i].regs);
    }
    
    static void disable_napi(struct gfar_private *priv)
    {
    	int i = 0;
    
    	for (i = 0; i < priv->num_grps; i++)
    		napi_disable(&priv->gfargrp[i].napi);
    }
    
    static void enable_napi(struct gfar_private *priv)
    {
    	int i = 0;
    
    	for (i = 0; i < priv->num_grps; i++)
    		napi_enable(&priv->gfargrp[i].napi);
    }
    
    static int gfar_parse_group(struct device_node *np,
    		struct gfar_private *priv, const char *model)
    {
    	u32 *queue_mask;
    	u64 addr, size;
    
    	addr = of_translate_address(np,
    			of_get_address(np, 0, &size, NULL));
    	priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
    
    	if (!priv->gfargrp[priv->num_grps].regs)
    		return -ENOMEM;
    
    	priv->gfargrp[priv->num_grps].interruptTransmit =
    			irq_of_parse_and_map(np, 0);
    
    	/* If we aren't the FEC we have multiple interrupts */
    	if (model && strcasecmp(model, "FEC")) {
    		priv->gfargrp[priv->num_grps].interruptReceive =
    			irq_of_parse_and_map(np, 1);
    		priv->gfargrp[priv->num_grps].interruptError =
    			irq_of_parse_and_map(np,2);
    		if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
    			priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
    			priv->gfargrp[priv->num_grps].interruptError < 0) {
    			return -EINVAL;
    		}
    	}
    
    	priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
    	priv->gfargrp[priv->num_grps].priv = priv;
    	spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
    	if(priv->mode == MQ_MG_MODE) {
    		queue_mask = (u32 *)of_get_property(np,
    					"fsl,rx-bit-map", NULL);
    		priv->gfargrp[priv->num_grps].rx_bit_map =
    			queue_mask ?  *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
    		queue_mask = (u32 *)of_get_property(np,
    					"fsl,tx-bit-map", NULL);
    		priv->gfargrp[priv->num_grps].tx_bit_map =
    			queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
    	} else {
    		priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
    		priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
    	}
    	priv->num_grps++;
    
    	return 0;
    }
    
    
    static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
    
    {
    	const char *model;
    	const char *ctype;
    	const void *mac_addr;
    
    	int err = 0, i;
    	struct net_device *dev = NULL;
    	struct gfar_private *priv = NULL;
    	struct device_node *np = ofdev->node;
    
    	struct device_node *child = NULL;
    
    	const u32 *stash;
    	const u32 *stash_len;
    	const u32 *stash_idx;
    
    	unsigned int num_tx_qs, num_rx_qs;
    	u32 *tx_queues, *rx_queues;
    
    
    	if (!np || !of_device_is_available(np))
    		return -ENODEV;
    
    
    	/* parse the num of tx and rx queues */
    	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
    	num_tx_qs = tx_queues ? *tx_queues : 1;
    
    	if (num_tx_qs > MAX_TX_QS) {
    		printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
    				num_tx_qs, MAX_TX_QS);
    		printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
    		return -EINVAL;
    	}
    
    	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
    	num_rx_qs = rx_queues ? *rx_queues : 1;
    
    	if (num_rx_qs > MAX_RX_QS) {
    		printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
    				num_tx_qs, MAX_TX_QS);
    		printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
    		return -EINVAL;
    	}
    
    	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
    	dev = *pdev;
    	if (NULL == dev)
    		return -ENOMEM;
    
    	priv = netdev_priv(dev);
    	priv->node = ofdev->node;
    	priv->ndev = dev;
    
    	dev->num_tx_queues = num_tx_qs;
    	dev->real_num_tx_queues = num_tx_qs;
    	priv->num_tx_queues = num_tx_qs;
    	priv->num_rx_queues = num_rx_qs;
    
    	priv->num_grps = 0x0;
    
    
    	model = of_get_property(np, "model", NULL);
    
    
    	for (i = 0; i < MAXGROUPS; i++)
    		priv->gfargrp[i].regs = NULL;
    
    	/* Parse and initialize group specific information */
    	if (of_device_is_compatible(np, "fsl,etsec2")) {
    		priv->mode = MQ_MG_MODE;
    		for_each_child_of_node(np, child) {
    			err = gfar_parse_group(child, priv, model);
    			if (err)
    				goto err_grp_init;
    
    	} else {
    		priv->mode = SQ_SG_MODE;
    		err = gfar_parse_group(np, priv, model);
    		if(err)
    			goto err_grp_init;
    
    	for (i = 0; i < priv->num_tx_queues; i++)
    	       priv->tx_queue[i] = NULL;
    	for (i = 0; i < priv->num_rx_queues; i++)
    		priv->rx_queue[i] = NULL;
    
    	for (i = 0; i < priv->num_tx_queues; i++) {
    
    		priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kzalloc(
    
    				sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
    		if (!priv->tx_queue[i]) {
    			err = -ENOMEM;
    			goto tx_alloc_failed;
    		}
    		priv->tx_queue[i]->tx_skbuff = NULL;
    		priv->tx_queue[i]->qindex = i;
    		priv->tx_queue[i]->dev = dev;
    		spin_lock_init(&(priv->tx_queue[i]->txlock));
    	}
    
    	for (i = 0; i < priv->num_rx_queues; i++) {
    
    		priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
    
    					sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
    		if (!priv->rx_queue[i]) {
    			err = -ENOMEM;
    			goto rx_alloc_failed;
    		}
    		priv->rx_queue[i]->rx_skbuff = NULL;
    		priv->rx_queue[i]->qindex = i;
    		priv->rx_queue[i]->dev = dev;
    		spin_lock_init(&(priv->rx_queue[i]->rxlock));
    	}
    
    
    
    	stash = of_get_property(np, "bd-stash", NULL);
    
    
    		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
    		priv->bd_stash_en = 1;
    	}
    
    	stash_len = of_get_property(np, "rx-stash-len", NULL);
    
    	if (stash_len)
    		priv->rx_stash_size = *stash_len;
    
    	stash_idx = of_get_property(np, "rx-stash-idx", NULL);
    
    	if (stash_idx)
    		priv->rx_stash_index = *stash_idx;
    
    	if (stash_len || stash_idx)
    		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
    
    
    	mac_addr = of_get_mac_address(np);
    	if (mac_addr)
    		memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
    
    	if (model && !strcasecmp(model, "TSEC"))
    		priv->device_flags =
    			FSL_GIANFAR_DEV_HAS_GIGABIT |
    			FSL_GIANFAR_DEV_HAS_COALESCE |
    			FSL_GIANFAR_DEV_HAS_RMON |
    			FSL_GIANFAR_DEV_HAS_MULTI_INTR;
    	if (model && !strcasecmp(model, "eTSEC"))
    		priv->device_flags =
    			FSL_GIANFAR_DEV_HAS_GIGABIT |
    			FSL_GIANFAR_DEV_HAS_COALESCE |
    			FSL_GIANFAR_DEV_HAS_RMON |
    			FSL_GIANFAR_DEV_HAS_MULTI_INTR |
    
    			FSL_GIANFAR_DEV_HAS_CSUM |
    			FSL_GIANFAR_DEV_HAS_VLAN |
    			FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
    			FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
    
    	ctype = of_get_property(np, "phy-connection-type", NULL);
    
    	/* We only care about rgmii-id.  The rest are autodetected */
    	if (ctype && !strcmp(ctype, "rgmii-id"))
    		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
    	else
    		priv->interface = PHY_INTERFACE_MODE_MII;
    
    	if (of_get_property(np, "fsl,magic-packet", NULL))
    		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
    
    
    	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
    
    
    	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
    
    	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
    
    rx_alloc_failed:
    	free_rx_pointers(priv);
    tx_alloc_failed:
    	free_tx_pointers(priv);
    
    err_grp_init:
    	unmap_group_regs(priv);
    
    	free_netdev(dev);
    
    /* Ioctl MII Interface */
    static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
    {
    	struct gfar_private *priv = netdev_priv(dev);
    
    	if (!netif_running(dev))
    		return -EINVAL;
    
    	if (!priv->phydev)
    		return -ENODEV;
    
    	return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
    }
    
    
    static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
    {
    	unsigned int new_bit_map = 0x0;
    	int mask = 0x1 << (max_qs - 1), i;
    	for (i = 0; i < max_qs; i++) {
    		if (bit_map & mask)
    			new_bit_map = new_bit_map + (1 << i);
    		mask = mask >> 0x1;
    	}
    	return new_bit_map;
    }
    
    static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
    				   u32 class)
    
    {
    	u32 rqfpr = FPR_FILER_MASK;
    	u32 rqfcr = 0x0;
    
    	rqfar--;
    	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
    	ftp_rqfpr[rqfar] = rqfpr;
    	ftp_rqfcr[rqfar] = rqfcr;
    	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
    
    	rqfar--;
    	rqfcr = RQFCR_CMP_NOMATCH;
    	ftp_rqfpr[rqfar] = rqfpr;
    	ftp_rqfcr[rqfar] = rqfcr;
    	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
    
    	rqfar--;
    	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
    	rqfpr = class;
    	ftp_rqfcr[rqfar] = rqfcr;
    	ftp_rqfpr[rqfar] = rqfpr;
    	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
    
    	rqfar--;
    	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
    	rqfpr = class;
    	ftp_rqfcr[rqfar] = rqfcr;
    	ftp_rqfpr[rqfar] = rqfpr;
    	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
    
    	return rqfar;
    }
    
    static void gfar_init_filer_table(struct gfar_private *priv)
    {
    	int i = 0x0;
    	u32 rqfar = MAX_FILER_IDX;
    	u32 rqfcr = 0x0;
    	u32 rqfpr = FPR_FILER_MASK;
    
    	/* Default rule */
    	rqfcr = RQFCR_CMP_MATCH;
    	ftp_rqfcr[rqfar] = rqfcr;
    	ftp_rqfpr[rqfar] = rqfpr;
    	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
    
    	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
    	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
    	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
    	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
    	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
    	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
    
    	/* cur_filer_idx indicated the fisrt non-masked rule */
    	priv->cur_filer_idx = rqfar;
    
    	/* Rest are masked rules */
    	rqfcr = RQFCR_CMP_NOMATCH;
    	for (i = 0; i < rqfar; i++) {
    		ftp_rqfcr[i] = rqfcr;
    		ftp_rqfpr[i] = rqfpr;
    		gfar_write_filer(priv, i, rqfcr, rqfpr);
    	}
    }
    
    
    /* Set up the ethernet device structure, private data,
     * and anything else we need before we start */
    
    static int gfar_probe(struct of_device *ofdev,
    		const struct of_device_id *match)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	u32 tempval;
    	struct net_device *dev = NULL;
    	struct gfar_private *priv = NULL;
    
    	struct gfar __iomem *regs = NULL;
    
    	int err = 0, i, grp_idx = 0;
    
    	u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
    
    	u32 isrg = 0;
    
    	u32 __iomem *baddr;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	err = gfar_of_init(ofdev, &dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	if (err)
    		return err;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	priv = netdev_priv(dev);
    
    	priv->ndev = dev;
    	priv->ofdev = ofdev;
    
    	priv->node = ofdev->node;
    
    	SET_NETDEV_DEV(dev, &ofdev->dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	spin_lock_init(&priv->bflock);
    
    	INIT_WORK(&priv->reset_task, gfar_reset_task);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	dev_set_drvdata(&ofdev->dev, priv);
    
    	regs = priv->gfargrp[0].regs;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* Stop the DMA engine now, in case it was running before */
    	/* (The firmware could have used it, and left it running). */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* Reset MAC layer */
    
    	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	/* We need to delay at least 3 TX clocks */
    	udelay(2);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
    
    	gfar_write(&regs->maccfg1, tempval);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* Initialize MACCFG2. */
    
    	gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* Initialize ECNTRL */
    
    	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* Set the dev->base_addr to the gfar reg region */
    
    	dev->base_addr = (unsigned long) regs;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	SET_NETDEV_DEV(dev, &ofdev->dev);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* Fill in the dev structure */
    	dev->watchdog_timeo = TX_TIMEOUT;
    	dev->mtu = 1500;
    
    	dev->netdev_ops = &gfar_netdev_ops;
    
    	dev->ethtool_ops = &gfar_ethtool_ops;
    
    
    	/* Register for napi ...We are registering NAPI for each grp */
    
    	for (i = 0; i < priv->num_grps; i++)
    		netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
    
    	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
    
    		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
    
    	} else
    		priv->rx_csum_enable = 0;
    
    	priv->vlgrp = NULL;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
    
    		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
    
    
    	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
    
    		priv->extended_hash = 1;
    		priv->hash_width = 9;
    
    
    		priv->hash_regs[0] = &regs->igaddr0;
    		priv->hash_regs[1] = &regs->igaddr1;
    		priv->hash_regs[2] = &regs->igaddr2;
    		priv->hash_regs[3] = &regs->igaddr3;
    		priv->hash_regs[4] = &regs->igaddr4;
    		priv->hash_regs[5] = &regs->igaddr5;
    		priv->hash_regs[6] = &regs->igaddr6;
    		priv->hash_regs[7] = &regs->igaddr7;
    		priv->hash_regs[8] = &regs->gaddr0;
    		priv->hash_regs[9] = &regs->gaddr1;
    		priv->hash_regs[10] = &regs->gaddr2;
    		priv->hash_regs[11] = &regs->gaddr3;
    		priv->hash_regs[12] = &regs->gaddr4;
    		priv->hash_regs[13] = &regs->gaddr5;
    		priv->hash_regs[14] = &regs->gaddr6;
    		priv->hash_regs[15] = &regs->gaddr7;
    
    
    	} else {
    		priv->extended_hash = 0;
    		priv->hash_width = 8;
    
    
    		priv->hash_regs[0] = &regs->gaddr0;
    		priv->hash_regs[1] = &regs->gaddr1;
    		priv->hash_regs[2] = &regs->gaddr2;
    		priv->hash_regs[3] = &regs->gaddr3;
    		priv->hash_regs[4] = &regs->gaddr4;
    		priv->hash_regs[5] = &regs->gaddr5;
    		priv->hash_regs[6] = &regs->gaddr6;
    		priv->hash_regs[7] = &regs->gaddr7;
    
    	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
    
    		priv->padding = DEFAULT_PADDING;
    	else
    		priv->padding = 0;
    
    	if (dev->features & NETIF_F_IP_CSUM)
    		dev->hard_header_len += GMAC_FCB_LEN;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	/* Program the isrg regs only if number of grps > 1 */
    	if (priv->num_grps > 1) {
    		baddr = &regs->isrg0;
    		for (i = 0; i < priv->num_grps; i++) {
    			isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
    			isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
    			gfar_write(baddr, isrg);
    			baddr++;
    			isrg = 0x0;
    		}
    	}
    
    
    	/* Need to reverse the bit maps as  bit_map's MSB is q0