Skip to content
Snippets Groups Projects
spi-atmel.c 37 KiB
Newer Older
  • Learn to ignore specific revisions
  • /*
     * Driver for Atmel AT32 and AT91 SPI Controllers
     *
     * Copyright (C) 2006 Atmel Corporation
     *
     * This program is free software; you can redistribute it and/or modify
     * it under the terms of the GNU General Public License version 2 as
     * published by the Free Software Foundation.
     */
    
    #include <linux/kernel.h>
    #include <linux/clk.h>
    #include <linux/module.h>
    #include <linux/platform_device.h>
    #include <linux/delay.h>
    #include <linux/dma-mapping.h>
    
    #include <linux/dmaengine.h>
    
    #include <linux/err.h>
    #include <linux/interrupt.h>
    #include <linux/spi/spi.h>
    
    #include <linux/platform_data/dma-atmel.h>
    
    #include <linux/of.h>
    
    #include <linux/io.h>
    #include <linux/gpio.h>
    
    #include <linux/pinctrl/consumer.h>
    
    Grant Likely's avatar
    Grant Likely committed
    /* SPI register offsets */
    #define SPI_CR					0x0000
    #define SPI_MR					0x0004
    #define SPI_RDR					0x0008
    #define SPI_TDR					0x000c
    #define SPI_SR					0x0010
    #define SPI_IER					0x0014
    #define SPI_IDR					0x0018
    #define SPI_IMR					0x001c
    #define SPI_CSR0				0x0030
    #define SPI_CSR1				0x0034
    #define SPI_CSR2				0x0038
    #define SPI_CSR3				0x003c
    
    Grant Likely's avatar
    Grant Likely committed
    #define SPI_RPR					0x0100
    #define SPI_RCR					0x0104
    #define SPI_TPR					0x0108
    #define SPI_TCR					0x010c
    #define SPI_RNPR				0x0110
    #define SPI_RNCR				0x0114
    #define SPI_TNPR				0x0118
    #define SPI_TNCR				0x011c
    #define SPI_PTCR				0x0120
    #define SPI_PTSR				0x0124
    
    /* Bitfields in CR */
    #define SPI_SPIEN_OFFSET			0
    #define SPI_SPIEN_SIZE				1
    #define SPI_SPIDIS_OFFSET			1
    #define SPI_SPIDIS_SIZE				1
    #define SPI_SWRST_OFFSET			7
    #define SPI_SWRST_SIZE				1
    #define SPI_LASTXFER_OFFSET			24
    #define SPI_LASTXFER_SIZE			1
    
    /* Bitfields in MR */
    #define SPI_MSTR_OFFSET				0
    #define SPI_MSTR_SIZE				1
    #define SPI_PS_OFFSET				1
    #define SPI_PS_SIZE				1
    #define SPI_PCSDEC_OFFSET			2
    #define SPI_PCSDEC_SIZE				1
    #define SPI_FDIV_OFFSET				3
    #define SPI_FDIV_SIZE				1
    #define SPI_MODFDIS_OFFSET			4
    #define SPI_MODFDIS_SIZE			1
    
    #define SPI_WDRBT_OFFSET			5
    #define SPI_WDRBT_SIZE				1
    
    Grant Likely's avatar
    Grant Likely committed
    #define SPI_LLB_OFFSET				7
    #define SPI_LLB_SIZE				1
    #define SPI_PCS_OFFSET				16
    #define SPI_PCS_SIZE				4
    #define SPI_DLYBCS_OFFSET			24
    #define SPI_DLYBCS_SIZE				8
    
    /* Bitfields in RDR */
    #define SPI_RD_OFFSET				0
    #define SPI_RD_SIZE				16
    
    /* Bitfields in TDR */
    #define SPI_TD_OFFSET				0
    #define SPI_TD_SIZE				16
    
    /* Bitfields in SR */
    #define SPI_RDRF_OFFSET				0
    #define SPI_RDRF_SIZE				1
    #define SPI_TDRE_OFFSET				1
    #define SPI_TDRE_SIZE				1
    #define SPI_MODF_OFFSET				2
    #define SPI_MODF_SIZE				1
    #define SPI_OVRES_OFFSET			3
    #define SPI_OVRES_SIZE				1
    #define SPI_ENDRX_OFFSET			4
    #define SPI_ENDRX_SIZE				1
    #define SPI_ENDTX_OFFSET			5
    #define SPI_ENDTX_SIZE				1
    #define SPI_RXBUFF_OFFSET			6
    #define SPI_RXBUFF_SIZE				1
    #define SPI_TXBUFE_OFFSET			7
    #define SPI_TXBUFE_SIZE				1
    #define SPI_NSSR_OFFSET				8
    #define SPI_NSSR_SIZE				1
    #define SPI_TXEMPTY_OFFSET			9
    #define SPI_TXEMPTY_SIZE			1
    #define SPI_SPIENS_OFFSET			16
    #define SPI_SPIENS_SIZE				1
    
    /* Bitfields in CSR0 */
    #define SPI_CPOL_OFFSET				0
    #define SPI_CPOL_SIZE				1
    #define SPI_NCPHA_OFFSET			1
    #define SPI_NCPHA_SIZE				1
    #define SPI_CSAAT_OFFSET			3
    #define SPI_CSAAT_SIZE				1
    #define SPI_BITS_OFFSET				4
    #define SPI_BITS_SIZE				4
    #define SPI_SCBR_OFFSET				8
    #define SPI_SCBR_SIZE				8
    #define SPI_DLYBS_OFFSET			16
    #define SPI_DLYBS_SIZE				8
    #define SPI_DLYBCT_OFFSET			24
    #define SPI_DLYBCT_SIZE				8
    
    /* Bitfields in RCR */
    #define SPI_RXCTR_OFFSET			0
    #define SPI_RXCTR_SIZE				16
    
    /* Bitfields in TCR */
    #define SPI_TXCTR_OFFSET			0
    #define SPI_TXCTR_SIZE				16
    
    /* Bitfields in RNCR */
    #define SPI_RXNCR_OFFSET			0
    #define SPI_RXNCR_SIZE				16
    
    /* Bitfields in TNCR */
    #define SPI_TXNCR_OFFSET			0
    #define SPI_TXNCR_SIZE				16
    
    /* Bitfields in PTCR */
    #define SPI_RXTEN_OFFSET			0
    #define SPI_RXTEN_SIZE				1
    #define SPI_RXTDIS_OFFSET			1
    #define SPI_RXTDIS_SIZE				1
    #define SPI_TXTEN_OFFSET			8
    #define SPI_TXTEN_SIZE				1
    #define SPI_TXTDIS_OFFSET			9
    #define SPI_TXTDIS_SIZE				1
    
    /* Constants for BITS */
    #define SPI_BITS_8_BPT				0
    #define SPI_BITS_9_BPT				1
    #define SPI_BITS_10_BPT				2
    #define SPI_BITS_11_BPT				3
    #define SPI_BITS_12_BPT				4
    #define SPI_BITS_13_BPT				5
    #define SPI_BITS_14_BPT				6
    #define SPI_BITS_15_BPT				7
    #define SPI_BITS_16_BPT				8
    
    /* Bit manipulation macros */
    #define SPI_BIT(name) \
    	(1 << SPI_##name##_OFFSET)
    
    #define SPI_BF(name, value) \
    
    Grant Likely's avatar
    Grant Likely committed
    	(((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
    
    #define SPI_BFEXT(name, value) \
    
    Grant Likely's avatar
    Grant Likely committed
    	(((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
    
    #define SPI_BFINS(name, value, old) \
    	(((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
    	  | SPI_BF(name, value))
    
    Grant Likely's avatar
    Grant Likely committed
    
    /* Register access macros */
    
    #define spi_readl(port, reg) \
    
    Grant Likely's avatar
    Grant Likely committed
    	__raw_readl((port)->regs + SPI_##reg)
    
    #define spi_writel(port, reg, value) \
    
    Grant Likely's avatar
    Grant Likely committed
    	__raw_writel((value), (port)->regs + SPI_##reg)
    
    
    /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
     * cache operations; better heuristics consider wordsize and bitrate.
     */
    #define DMA_MIN_BYTES	16
    
    
    #define SPI_DMA_TIMEOUT		(msecs_to_jiffies(1000))
    
    
    struct atmel_spi_dma {
    	struct dma_chan			*chan_rx;
    	struct dma_chan			*chan_tx;
    	struct scatterlist		sgrx;
    	struct scatterlist		sgtx;
    	struct dma_async_tx_descriptor	*data_desc_rx;
    	struct dma_async_tx_descriptor	*data_desc_tx;
    
    	struct at_dma_slave	dma_slave;
    };
    
    
    struct atmel_spi_caps {
    	bool	is_spi2;
    	bool	has_wdrbt;
    	bool	has_dma_support;
    };
    
    
    /*
     * The core SPI transfer engine just talks to a register bank to set up
     * DMA transfers; transfer queue progress is driven by IRQs.  The clock
     * framework provides the base clock, subdivided for each spi_device.
     */
    struct atmel_spi {
    	spinlock_t		lock;
    
    	phys_addr_t		phybase;
    
    	void __iomem		*regs;
    	int			irq;
    	struct clk		*clk;
    	struct platform_device	*pdev;
    
    	struct spi_transfer	*current_transfer;
    
    	unsigned long		current_remaining_bytes;
    
    	struct completion	xfer_completion;
    
    
    	/* scratch buffer */
    
    	void			*buffer;
    	dma_addr_t		buffer_dma;
    
    
    	bool			use_dma;
    	bool			use_pdc;
    	/* dmaengine data */
    	struct atmel_spi_dma	dma;
    
    /* Controller-specific per-slave state */
    struct atmel_spi_device {
    	unsigned int		npcs_pin;
    	u32			csr;
    };
    
    
    #define BUFFER_SIZE		PAGE_SIZE
    #define INVALID_DMA_ADDRESS	0xffffffff
    
    
    /*
     * Version 2 of the SPI controller has
     *  - CR.LASTXFER
     *  - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
     *  - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
     *  - SPI_CSRx.CSAAT
     *  - SPI_CSRx.SBCR allows faster clocking
     */
    
    static bool atmel_spi_is_v2(struct atmel_spi *as)
    
    /*
     * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
     * they assume that spi slave device state will not change on deselect, so
    
     * that automagic deselection is OK.  ("NPCSx rises if no data is to be
     * transmitted")  Not so!  Workaround uses nCSx pins as GPIOs; or newer
     * controllers have CSAAT and friends.
    
     * Since the CSAAT functionality is a bit weird on newer controllers as
     * well, we use GPIO to control nCSx pins on all controllers, updating
     * MR.PCS to avoid confusing the controller.  Using GPIOs also lets us
     * support active-high chipselects despite the controller's belief that
     * only active-low devices/systems exists.
     *
     * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
     * right when driven with GPIO.  ("Mode Fault does not allow more than one
     * Master on Chip Select 0.")  No workaround exists for that ... so for
     * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
     * and (c) will trigger that first erratum in some cases.
    
    static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
    
    	struct atmel_spi_device *asd = spi->controller_state;
    
    	unsigned active = spi->mode & SPI_CS_HIGH;
    
    		spi_writel(as, CSR0 + 4 * spi->chip_select, asd->csr);
    		/* For the low SPI version, there is a issue that PDC transfer
    		 * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
    
    		 */
    		spi_writel(as, CSR0, asd->csr);
    
    			spi_writel(as, MR,
    					SPI_BF(PCS, ~(0x01 << spi->chip_select))
    					| SPI_BIT(WDRBT)
    					| SPI_BIT(MODFDIS)
    					| SPI_BIT(MSTR));
    
    			spi_writel(as, MR,
    					SPI_BF(PCS, ~(0x01 << spi->chip_select))
    					| SPI_BIT(MODFDIS)
    					| SPI_BIT(MSTR));
    
    		mr = spi_readl(as, MR);
    		gpio_set_value(asd->npcs_pin, active);
    	} else {
    		u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
    		int i;
    		u32 csr;
    
    		/* Make sure clock polarity is correct */
    		for (i = 0; i < spi->master->num_chipselect; i++) {
    			csr = spi_readl(as, CSR0 + 4 * i);
    			if ((csr ^ cpol) & SPI_BIT(CPOL))
    				spi_writel(as, CSR0 + 4 * i,
    						csr ^ SPI_BIT(CPOL));
    		}
    
    		mr = spi_readl(as, MR);
    		mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
    		if (spi->chip_select != 0)
    			gpio_set_value(asd->npcs_pin, active);
    		spi_writel(as, MR, mr);
    	}
    
    
    	dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
    
    			asd->npcs_pin, active ? " (high)" : "",
    
    static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
    
    	struct atmel_spi_device *asd = spi->controller_state;
    
    	unsigned active = spi->mode & SPI_CS_HIGH;
    
    	u32 mr;
    
    	/* only deactivate *this* device; sometimes transfers to
    	 * another device may be active when this routine is called.
    	 */
    	mr = spi_readl(as, MR);
    	if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) {
    		mr = SPI_BFINS(PCS, 0xf, mr);
    		spi_writel(as, MR, mr);
    	}
    
    	dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
    
    			asd->npcs_pin, active ? " (low)" : "",
    
    	if (atmel_spi_is_v2(as) || spi->chip_select != 0)
    
    		gpio_set_value(asd->npcs_pin, !active);
    
    static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
    
    {
    	spin_lock_irqsave(&as->lock, as->flags);
    }
    
    
    static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
    
    {
    	spin_unlock_irqrestore(&as->lock, as->flags);
    }
    
    
    static inline bool atmel_spi_use_dma(struct atmel_spi *as,
    				struct spi_transfer *xfer)
    {
    	return as->use_dma && xfer->len >= DMA_MIN_BYTES;
    }
    
    static int atmel_spi_dma_slave_config(struct atmel_spi *as,
    				struct dma_slave_config *slave_config,
    				u8 bits_per_word)
    {
    	int err = 0;
    
    	if (bits_per_word > 8) {
    		slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
    		slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
    	} else {
    		slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
    		slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
    	}
    
    	slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR;
    	slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR;
    	slave_config->src_maxburst = 1;
    	slave_config->dst_maxburst = 1;
    	slave_config->device_fc = false;
    
    	slave_config->direction = DMA_MEM_TO_DEV;
    	if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) {
    		dev_err(&as->pdev->dev,
    			"failed to configure tx dma channel\n");
    		err = -EINVAL;
    	}
    
    	slave_config->direction = DMA_DEV_TO_MEM;
    	if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) {
    		dev_err(&as->pdev->dev,
    			"failed to configure rx dma channel\n");
    		err = -EINVAL;
    	}
    
    	return err;
    }
    
    
    static bool filter(struct dma_chan *chan, void *pdata)
    
    	struct atmel_spi_dma *sl_pdata = pdata;
    	struct at_dma_slave *sl;
    
    	if (!sl_pdata)
    		return false;
    
    	sl = &sl_pdata->dma_slave;
    
    	if (sl->dma_dev == chan->device->dev) {
    		chan->private = sl;
    		return true;
    	} else {
    		return false;
    	}
    }
    
    static int atmel_spi_configure_dma(struct atmel_spi *as)
    {
    	struct dma_slave_config	slave_config;
    
    	struct device *dev = &as->pdev->dev;
    
    	dma_cap_mask_t mask;
    	dma_cap_zero(mask);
    	dma_cap_set(DMA_SLAVE, mask);
    
    	as->dma.chan_tx = dma_request_slave_channel_compat(mask, filter,
    							   &as->dma,
    							   dev, "tx");
    	if (!as->dma.chan_tx) {
    		dev_err(dev,
    			"DMA TX channel not available, SPI unable to use DMA\n");
    		err = -EBUSY;
    		goto error;
    
    
    	as->dma.chan_rx = dma_request_slave_channel_compat(mask, filter,
    							   &as->dma,
    							   dev, "rx");
    
    	if (!as->dma.chan_rx) {
    		dev_err(dev,
    			"DMA RX channel not available, SPI unable to use DMA\n");
    
    		err = -EBUSY;
    		goto error;
    	}
    
    	err = atmel_spi_dma_slave_config(as, &slave_config, 8);
    	if (err)
    		goto error;
    
    	dev_info(&as->pdev->dev,
    			"Using %s (tx) and %s (rx) for DMA transfers\n",
    			dma_chan_name(as->dma.chan_tx),
    			dma_chan_name(as->dma.chan_rx));
    	return 0;
    error:
    	if (as->dma.chan_rx)
    		dma_release_channel(as->dma.chan_rx);
    	if (as->dma.chan_tx)
    		dma_release_channel(as->dma.chan_tx);
    	return err;
    }
    
    static void atmel_spi_stop_dma(struct atmel_spi *as)
    {
    	if (as->dma.chan_rx)
    		as->dma.chan_rx->device->device_control(as->dma.chan_rx,
    							DMA_TERMINATE_ALL, 0);
    	if (as->dma.chan_tx)
    		as->dma.chan_tx->device->device_control(as->dma.chan_tx,
    							DMA_TERMINATE_ALL, 0);
    }
    
    static void atmel_spi_release_dma(struct atmel_spi *as)
    {
    	if (as->dma.chan_rx)
    		dma_release_channel(as->dma.chan_rx);
    	if (as->dma.chan_tx)
    		dma_release_channel(as->dma.chan_tx);
    }
    
    /* This function is called by the DMA driver from tasklet context */
    static void dma_callback(void *data)
    {
    	struct spi_master	*master = data;
    	struct atmel_spi	*as = spi_master_get_devdata(master);
    
    
    	complete(&as->xfer_completion);
    
    }
    
    /*
     * Next transfer using PIO.
     */
    static void atmel_spi_next_xfer_pio(struct spi_master *master,
    				struct spi_transfer *xfer)
    {
    	struct atmel_spi	*as = spi_master_get_devdata(master);
    
    	unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
    
    
    	dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
    
    	/* Make sure data is not remaining in RDR */
    	spi_readl(as, RDR);
    	while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
    		spi_readl(as, RDR);
    		cpu_relax();
    	}
    
    
    			spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
    
    			spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
    	} else {
    
    		spi_writel(as, TDR, 0);
    
    
    	dev_dbg(master->dev.parent,
    
    		"  start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
    		xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
    		xfer->bits_per_word);
    
    
    	/* Enable relevant interrupts */
    	spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
    }
    
    /*
     * Submit next transfer for DMA.
     */
    static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
    				struct spi_transfer *xfer,
    				u32 *plen)
    {
    	struct atmel_spi	*as = spi_master_get_devdata(master);
    	struct dma_chan		*rxchan = as->dma.chan_rx;
    	struct dma_chan		*txchan = as->dma.chan_tx;
    	struct dma_async_tx_descriptor *rxdesc;
    	struct dma_async_tx_descriptor *txdesc;
    	struct dma_slave_config	slave_config;
    	dma_cookie_t		cookie;
    	u32	len = *plen;
    
    	dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
    
    	/* Check that the channels are available */
    	if (!rxchan || !txchan)
    		return -ENODEV;
    
    	/* release lock for DMA operations */
    	atmel_spi_unlock(as);
    
    	/* prepare the RX dma transfer */
    	sg_init_table(&as->dma.sgrx, 1);
    	if (xfer->rx_buf) {
    		as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen;
    	} else {
    		as->dma.sgrx.dma_address = as->buffer_dma;
    		if (len > BUFFER_SIZE)
    			len = BUFFER_SIZE;
    	}
    
    	/* prepare the TX dma transfer */
    	sg_init_table(&as->dma.sgtx, 1);
    	if (xfer->tx_buf) {
    		as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen;
    	} else {
    		as->dma.sgtx.dma_address = as->buffer_dma;
    		if (len > BUFFER_SIZE)
    			len = BUFFER_SIZE;
    		memset(as->buffer, 0, len);
    	}
    
    	sg_dma_len(&as->dma.sgtx) = len;
    	sg_dma_len(&as->dma.sgrx) = len;
    
    	*plen = len;
    
    	if (atmel_spi_dma_slave_config(as, &slave_config, 8))
    		goto err_exit;
    
    	/* Send both scatterlists */
    	rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
    					&as->dma.sgrx,
    					1,
    					DMA_FROM_DEVICE,
    					DMA_PREP_INTERRUPT | DMA_CTRL_ACK,
    					NULL);
    	if (!rxdesc)
    		goto err_dma;
    
    	txdesc = txchan->device->device_prep_slave_sg(txchan,
    					&as->dma.sgtx,
    					1,
    					DMA_TO_DEVICE,
    					DMA_PREP_INTERRUPT | DMA_CTRL_ACK,
    					NULL);
    	if (!txdesc)
    		goto err_dma;
    
    	dev_dbg(master->dev.parent,
    
    		"  start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
    		xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma,
    		xfer->rx_buf, (unsigned long long)xfer->rx_dma);
    
    
    	/* Enable relevant interrupts */
    	spi_writel(as, IER, SPI_BIT(OVRES));
    
    	/* Put the callback on the RX transfer only, that should finish last */
    	rxdesc->callback = dma_callback;
    	rxdesc->callback_param = master;
    
    	/* Submit and fire RX and TX with TX last so we're ready to read! */
    	cookie = rxdesc->tx_submit(rxdesc);
    	if (dma_submit_error(cookie))
    		goto err_dma;
    	cookie = txdesc->tx_submit(txdesc);
    	if (dma_submit_error(cookie))
    		goto err_dma;
    	rxchan->device->device_issue_pending(rxchan);
    	txchan->device->device_issue_pending(txchan);
    
    	/* take back lock */
    	atmel_spi_lock(as);
    	return 0;
    
    err_dma:
    	spi_writel(as, IDR, SPI_BIT(OVRES));
    	atmel_spi_stop_dma(as);
    err_exit:
    	atmel_spi_lock(as);
    	return -ENOMEM;
    }
    
    
    static void atmel_spi_next_xfer_data(struct spi_master *master,
    				struct spi_transfer *xfer,
    				dma_addr_t *tx_dma,
    				dma_addr_t *rx_dma,
    				u32 *plen)
    {
    	struct atmel_spi	*as = spi_master_get_devdata(master);
    	u32			len = *plen;
    
    	/* use scratch buffer only when rx or tx data is unspecified */
    	if (xfer->rx_buf)
    
    		*rx_dma = xfer->rx_dma + xfer->len - *plen;
    
    	else {
    		*rx_dma = as->buffer_dma;
    		if (len > BUFFER_SIZE)
    			len = BUFFER_SIZE;
    	}
    
    	if (xfer->tx_buf)
    
    		*tx_dma = xfer->tx_dma + xfer->len - *plen;
    
    	else {
    		*tx_dma = as->buffer_dma;
    		if (len > BUFFER_SIZE)
    			len = BUFFER_SIZE;
    		memset(as->buffer, 0, len);
    		dma_sync_single_for_device(&as->pdev->dev,
    				as->buffer_dma, len, DMA_TO_DEVICE);
    	}
    
    	*plen = len;
    }
    
    
    static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
    				    struct spi_device *spi,
    				    struct spi_transfer *xfer)
    {
    	u32			scbr, csr;
    	unsigned long		bus_hz;
    
    	/* v1 chips start out at half the peripheral bus speed. */
    	bus_hz = clk_get_rate(as->clk);
    	if (!atmel_spi_is_v2(as))
    		bus_hz /= 2;
    
    	/*
    	 * Calculate the lowest divider that satisfies the
    	 * constraint, assuming div32/fdiv/mbz == 0.
    	 */
    	if (xfer->speed_hz)
    		scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
    	else
    		/*
    		 * This can happend if max_speed is null.
    		 * In this case, we set the lowest possible speed
    		 */
    		scbr = 0xff;
    
    	/*
    	 * If the resulting divider doesn't fit into the
    	 * register bitfield, we can't satisfy the constraint.
    	 */
    	if (scbr >= (1 << SPI_SCBR_SIZE)) {
    		dev_err(&spi->dev,
    			"setup: %d Hz too slow, scbr %u; min %ld Hz\n",
    			xfer->speed_hz, scbr, bus_hz/255);
    		return -EINVAL;
    	}
    	if (scbr == 0) {
    		dev_err(&spi->dev,
    			"setup: %d Hz too high, scbr %u; max %ld Hz\n",
    			xfer->speed_hz, scbr, bus_hz);
    		return -EINVAL;
    	}
    	csr = spi_readl(as, CSR0 + 4 * spi->chip_select);
    	csr = SPI_BFINS(SCBR, scbr, csr);
    	spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
    
    	return 0;
    }
    
    
     * Submit next transfer for PDC.
    
     * lock is held, spi irq is blocked
     */
    
    static void atmel_spi_pdc_next_xfer(struct spi_master *master,
    
    					struct spi_message *msg,
    					struct spi_transfer *xfer)
    
    {
    	struct atmel_spi	*as = spi_master_get_devdata(master);
    
    	dma_addr_t		tx_dma, rx_dma;
    
    
    	spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
    
    	len = as->current_remaining_bytes;
    	atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
    	as->current_remaining_bytes -= len;
    
    	spi_writel(as, RPR, rx_dma);
    	spi_writel(as, TPR, tx_dma);
    
    	if (msg->spi->bits_per_word > 8)
    		len >>= 1;
    	spi_writel(as, RCR, len);
    	spi_writel(as, TCR, len);
    
    	dev_dbg(&msg->spi->dev,
    		"  start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
    		xfer, xfer->len, xfer->tx_buf,
    		(unsigned long long)xfer->tx_dma, xfer->rx_buf,
    		(unsigned long long)xfer->rx_dma);
    
    	if (as->current_remaining_bytes) {
    		len = as->current_remaining_bytes;
    
    		atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
    
    		as->current_remaining_bytes -= len;
    
    		spi_writel(as, RNPR, rx_dma);
    		spi_writel(as, TNPR, tx_dma);
    
    		if (msg->spi->bits_per_word > 8)
    			len >>= 1;
    		spi_writel(as, RNCR, len);
    		spi_writel(as, TNCR, len);
    
    			"  next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
    			xfer, xfer->len, xfer->tx_buf,
    			(unsigned long long)xfer->tx_dma, xfer->rx_buf,
    			(unsigned long long)xfer->rx_dma);
    
    	}
    
    	/* REVISIT: We're waiting for ENDRX before we start the next
    
    	 * transfer because we need to handle some difficult timing
    	 * issues otherwise. If we wait for ENDTX in one transfer and
    	 * then starts waiting for ENDRX in the next, it's difficult
    	 * to tell the difference between the ENDRX interrupt we're
    	 * actually waiting for and the ENDRX interrupt of the
    	 * previous transfer.
    	 *
    	 * It should be doable, though. Just not now...
    	 */
    
    	spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
    
    	spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
    }
    
    
    David Brownell's avatar
    David Brownell committed
    /*
     * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
     *  - The buffer is either valid for CPU access, else NULL
    
     *  - If the buffer is valid, so is its DMA address
    
     * This driver manages the dma address unless message->is_dma_mapped.
    
    David Brownell's avatar
    David Brownell committed
     */
    static int
    
    atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
    {
    
    David Brownell's avatar
    David Brownell committed
    	struct device	*dev = &as->pdev->dev;
    
    
    	xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
    
    David Brownell's avatar
    David Brownell committed
    	if (xfer->tx_buf) {
    
    		/* tx_buf is a const void* where we need a void * for the dma
    		 * mapping */
    		void *nonconst_tx = (void *)xfer->tx_buf;
    
    
    David Brownell's avatar
    David Brownell committed
    		xfer->tx_dma = dma_map_single(dev,
    
    				DMA_TO_DEVICE);
    
    		if (dma_mapping_error(dev, xfer->tx_dma))
    
    David Brownell's avatar
    David Brownell committed
    			return -ENOMEM;
    	}
    	if (xfer->rx_buf) {
    		xfer->rx_dma = dma_map_single(dev,
    
    				xfer->rx_buf, xfer->len,
    				DMA_FROM_DEVICE);
    
    		if (dma_mapping_error(dev, xfer->rx_dma)) {
    
    David Brownell's avatar
    David Brownell committed
    			if (xfer->tx_buf)
    				dma_unmap_single(dev,
    						xfer->tx_dma, xfer->len,
    						DMA_TO_DEVICE);
    			return -ENOMEM;
    		}
    	}
    	return 0;
    
    }
    
    static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
    				     struct spi_transfer *xfer)
    {
    	if (xfer->tx_dma != INVALID_DMA_ADDRESS)
    
    		dma_unmap_single(master->dev.parent, xfer->tx_dma,
    
    				 xfer->len, DMA_TO_DEVICE);
    	if (xfer->rx_dma != INVALID_DMA_ADDRESS)
    
    		dma_unmap_single(master->dev.parent, xfer->rx_dma,
    
    				 xfer->len, DMA_FROM_DEVICE);
    }
    
    
    static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
    {
    	spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
    }
    
    /* Called from IRQ
     *
     * Must update "current_remaining_bytes" to keep track of data
     * to transfer.
     */
    static void
    atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
    {
    	u8		*rxp;
    
    	unsigned long	xfer_pos = xfer->len - as->current_remaining_bytes;
    
    	if (xfer->rx_buf) {
    
    		if (xfer->bits_per_word > 8) {
    			rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
    			*rxp16 = spi_readl(as, RDR);
    		} else {
    			rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
    			*rxp = spi_readl(as, RDR);
    		}
    
    	} else {
    		spi_readl(as, RDR);
    	}
    
    	if (xfer->bits_per_word > 8) {
    		as->current_remaining_bytes -= 2;
    		if (as->current_remaining_bytes < 0)
    			as->current_remaining_bytes = 0;
    	} else {
    		as->current_remaining_bytes--;
    	}
    
    }
    
    /* Interrupt
     *
     * No need for locking in this Interrupt handler: done_status is the
    
     * only information modified.
    
     */
    static irqreturn_t
    atmel_spi_pio_interrupt(int irq, void *dev_id)
    {
    	struct spi_master	*master = dev_id;
    	struct atmel_spi	*as = spi_master_get_devdata(master);
    	u32			status, pending, imr;
    	struct spi_transfer	*xfer;
    	int			ret = IRQ_NONE;
    
    	imr = spi_readl(as, IMR);
    	status = spi_readl(as, SR);
    	pending = status & imr;
    
    	if (pending & SPI_BIT(OVRES)) {
    		ret = IRQ_HANDLED;
    		spi_writel(as, IDR, SPI_BIT(OVRES));
    		dev_warn(master->dev.parent, "overrun\n");
    
    		/*
    		 * When we get an overrun, we disregard the current
    		 * transfer. Data will not be copied back from any
    		 * bounce buffer and msg->actual_len will not be
    		 * updated with the last xfer.
    		 *
    		 * We will also not process any remaning transfers in
    		 * the message.
    		 */
    		as->done_status = -EIO;
    		smp_wmb();
    
    		/* Clear any overrun happening while cleaning up */
    		spi_readl(as, SR);
    
    
    		complete(&as->xfer_completion);
    
    
    	} else if (pending & SPI_BIT(RDRF)) {
    		atmel_spi_lock(as);
    
    		if (as->current_remaining_bytes) {
    			ret = IRQ_HANDLED;
    			xfer = as->current_transfer;
    			atmel_spi_pump_pio_data(as, xfer);
    
    			if (!as->current_remaining_bytes)
    
    				spi_writel(as, IDR, pending);
    
    
    			complete(&as->xfer_completion);
    
    		}
    
    		atmel_spi_unlock(as);
    	} else {
    		WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending);
    		ret = IRQ_HANDLED;
    		spi_writel(as, IDR, pending);
    	}
    
    	return ret;
    
    }
    
    static irqreturn_t
    
    atmel_spi_pdc_interrupt(int irq, void *dev_id)
    
    {
    	struct spi_master	*master = dev_id;
    	struct atmel_spi	*as = spi_master_get_devdata(master);
    	u32			status, pending, imr;
    	int			ret = IRQ_NONE;
    
    	imr = spi_readl(as, IMR);
    	status = spi_readl(as, SR);
    	pending = status & imr;
    
    	if (pending & SPI_BIT(OVRES)) {
    
    		ret = IRQ_HANDLED;
    
    
    		spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
    
    				     | SPI_BIT(OVRES)));
    
    		/* Clear any overrun happening while cleaning up */
    		spi_readl(as, SR);
    
    
    	} else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
    
    		ret = IRQ_HANDLED;
    
    		spi_writel(as, IDR, pending);
    
    
    		complete(&as->xfer_completion);
    
    	}
    
    	return ret;
    }
    
    static int atmel_spi_setup(struct spi_device *spi)
    {
    	struct atmel_spi	*as;
    
    	struct atmel_spi_device	*asd;
    
    	unsigned int		bits = spi->bits_per_word;
    	unsigned int		npcs_pin;
    	int			ret;
    
    	as = spi_master_get_devdata(spi->master);
    
    
    	/* see notes above re chipselect */
    
    			&& spi->chip_select == 0
    			&& (spi->mode & SPI_CS_HIGH)) {
    		dev_dbg(&spi->dev, "setup: can't be active-high\n");