2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2007-07-11 18:04:50 +00:00
|
|
|
* linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
* Copyright (C) 2010 ST-Ericsson SA
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/device.h>
|
2014-03-17 12:56:32 +00:00
|
|
|
#include <linux/io.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/interrupt.h>
|
2011-01-30 21:06:53 +00:00
|
|
|
#include <linux/kernel.h>
|
2012-04-16 09:18:43 +00:00
|
|
|
#include <linux/slab.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/highmem.h>
|
2007-10-11 05:06:03 +00:00
|
|
|
#include <linux/log2.h>
|
2013-01-07 14:35:06 +00:00
|
|
|
#include <linux/mmc/pm.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/mmc/host.h>
|
2010-10-19 11:43:58 +00:00
|
|
|
#include <linux/mmc/card.h>
|
2014-03-17 12:56:19 +00:00
|
|
|
#include <linux/mmc/slot-gpio.h>
|
2006-01-07 13:52:45 +00:00
|
|
|
#include <linux/amba/bus.h>
|
2006-01-07 16:15:52 +00:00
|
|
|
#include <linux/clk.h>
|
2007-10-24 07:01:09 +00:00
|
|
|
#include <linux/scatterlist.h>
|
2009-07-09 14:16:07 +00:00
|
|
|
#include <linux/gpio.h>
|
2012-04-12 15:51:13 +00:00
|
|
|
#include <linux/of_gpio.h>
|
2009-09-22 13:41:40 +00:00
|
|
|
#include <linux/regulator/consumer.h>
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/amba/mmci.h>
|
2011-08-14 08:17:05 +00:00
|
|
|
#include <linux/pm_runtime.h>
|
2012-02-01 10:42:19 +00:00
|
|
|
#include <linux/types.h>
|
2012-10-29 13:39:30 +00:00
|
|
|
#include <linux/pinctrl/consumer.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-07-01 11:02:59 +00:00
|
|
|
#include <asm/div64.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/io.h>
|
2005-10-28 13:05:16 +00:00
|
|
|
#include <asm/sizes.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include "mmci.h"
|
2014-07-29 02:50:30 +00:00
|
|
|
#include "mmci_qcom_dml.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define DRIVER_NAME "mmci-pl18x"
|
|
|
|
|
|
|
|
static unsigned int fmax = 515633;
|
|
|
|
|
2010-07-21 11:54:40 +00:00
|
|
|
/**
|
|
|
|
* struct variant_data - MMCI variant-specific quirks
|
|
|
|
* @clkreg: default value for MCICLOCK register
|
2010-07-21 11:55:18 +00:00
|
|
|
* @clkreg_enable: enable value for MMCICLOCK register
|
2014-06-02 09:09:23 +00:00
|
|
|
* @clkreg_8bit_bus_enable: enable value for 8 bit bus
|
2014-06-02 09:09:30 +00:00
|
|
|
* @clkreg_neg_edge_enable: enable value for inverted data/cmd output
|
2010-07-21 11:55:59 +00:00
|
|
|
* @datalength_bits: number of bits in the MMCIDATALENGTH register
|
2010-08-09 11:57:30 +00:00
|
|
|
* @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
|
|
|
|
* is asserted (likewise for RX)
|
|
|
|
* @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
|
|
|
|
* is asserted (likewise for RX)
|
2014-06-02 09:09:39 +00:00
|
|
|
* @data_cmd_enable: enable value for data commands.
|
2014-08-22 04:55:16 +00:00
|
|
|
* @st_sdio: enable ST specific SDIO logic
|
2010-12-06 08:24:14 +00:00
|
|
|
* @st_clkdiv: true if using a ST-specific clock divider algorithm
|
2014-06-02 09:09:15 +00:00
|
|
|
* @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
|
2011-03-25 07:51:52 +00:00
|
|
|
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
|
2014-06-02 09:09:06 +00:00
|
|
|
* @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
|
|
|
|
* register
|
2014-08-22 04:54:55 +00:00
|
|
|
* @datactrl_mask_sdio: SDIO enable mask in datactrl register
|
2011-12-13 15:54:55 +00:00
|
|
|
* @pwrreg_powerup: power up value for MMCIPOWER register
|
2014-06-02 09:09:47 +00:00
|
|
|
* @f_max: maximum clk frequency supported by the controller.
|
2011-12-13 15:57:07 +00:00
|
|
|
* @signal_direction: input/out direction of bus signals can be indicated
|
2013-01-09 16:19:54 +00:00
|
|
|
* @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
|
2013-05-15 19:53:22 +00:00
|
|
|
* @busy_detect: true if busy detection on dat0 is supported
|
2013-09-04 08:05:17 +00:00
|
|
|
* @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
|
2014-06-02 09:09:55 +00:00
|
|
|
* @explicit_mclk_control: enable explicit mclk control in driver.
|
2014-06-02 09:10:04 +00:00
|
|
|
* @qcom_fifo: enables qcom specific fifo pio read logic.
|
2014-07-29 02:50:30 +00:00
|
|
|
* @qcom_dml: enables qcom specific dma glue for dma transfers.
|
2014-06-13 11:21:38 +00:00
|
|
|
* @reversed_irq_handling: handle data irq before cmd irq.
|
2010-07-21 11:54:40 +00:00
|
|
|
*/
|
|
|
|
struct variant_data {
|
|
|
|
unsigned int clkreg;
|
2010-07-21 11:55:18 +00:00
|
|
|
unsigned int clkreg_enable;
|
2014-06-02 09:09:23 +00:00
|
|
|
unsigned int clkreg_8bit_bus_enable;
|
2014-06-02 09:09:30 +00:00
|
|
|
unsigned int clkreg_neg_edge_enable;
|
2010-07-21 11:55:59 +00:00
|
|
|
unsigned int datalength_bits;
|
2010-08-09 11:57:30 +00:00
|
|
|
unsigned int fifosize;
|
|
|
|
unsigned int fifohalfsize;
|
2014-06-02 09:09:39 +00:00
|
|
|
unsigned int data_cmd_enable;
|
2014-06-02 09:09:15 +00:00
|
|
|
unsigned int datactrl_mask_ddrmode;
|
2014-08-22 04:54:55 +00:00
|
|
|
unsigned int datactrl_mask_sdio;
|
2014-08-22 04:55:16 +00:00
|
|
|
bool st_sdio;
|
2010-12-06 08:24:14 +00:00
|
|
|
bool st_clkdiv;
|
2011-03-25 07:51:52 +00:00
|
|
|
bool blksz_datactrl16;
|
2014-06-02 09:09:06 +00:00
|
|
|
bool blksz_datactrl4;
|
2011-12-13 15:54:55 +00:00
|
|
|
u32 pwrreg_powerup;
|
2014-06-02 09:09:47 +00:00
|
|
|
u32 f_max;
|
2011-12-13 15:57:07 +00:00
|
|
|
bool signal_direction;
|
2013-01-09 16:19:54 +00:00
|
|
|
bool pwrreg_clkgate;
|
2013-05-15 19:53:22 +00:00
|
|
|
bool busy_detect;
|
2013-09-04 08:05:17 +00:00
|
|
|
bool pwrreg_nopower;
|
2014-06-02 09:09:55 +00:00
|
|
|
bool explicit_mclk_control;
|
2014-06-02 09:10:04 +00:00
|
|
|
bool qcom_fifo;
|
2014-07-29 02:50:30 +00:00
|
|
|
bool qcom_dml;
|
2014-06-13 11:21:38 +00:00
|
|
|
bool reversed_irq_handling;
|
2010-07-21 11:54:40 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct variant_data variant_arm = {
|
2010-08-09 11:57:30 +00:00
|
|
|
.fifosize = 16 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
2010-07-21 11:55:59 +00:00
|
|
|
.datalength_bits = 16,
|
2011-12-13 15:54:55 +00:00
|
|
|
.pwrreg_powerup = MCI_PWR_UP,
|
2014-06-02 09:09:47 +00:00
|
|
|
.f_max = 100000000,
|
2014-06-13 11:21:38 +00:00
|
|
|
.reversed_irq_handling = true,
|
2010-07-21 11:54:40 +00:00
|
|
|
};
|
|
|
|
|
2011-03-11 17:18:07 +00:00
|
|
|
static struct variant_data variant_arm_extended_fifo = {
|
|
|
|
.fifosize = 128 * 4,
|
|
|
|
.fifohalfsize = 64 * 4,
|
|
|
|
.datalength_bits = 16,
|
2011-12-13 15:54:55 +00:00
|
|
|
.pwrreg_powerup = MCI_PWR_UP,
|
2014-06-02 09:09:47 +00:00
|
|
|
.f_max = 100000000,
|
2011-03-11 17:18:07 +00:00
|
|
|
};
|
|
|
|
|
2013-01-24 13:12:45 +00:00
|
|
|
static struct variant_data variant_arm_extended_fifo_hwfc = {
|
|
|
|
.fifosize = 128 * 4,
|
|
|
|
.fifohalfsize = 64 * 4,
|
|
|
|
.clkreg_enable = MCI_ARM_HWFCEN,
|
|
|
|
.datalength_bits = 16,
|
|
|
|
.pwrreg_powerup = MCI_PWR_UP,
|
2014-06-02 09:09:47 +00:00
|
|
|
.f_max = 100000000,
|
2013-01-24 13:12:45 +00:00
|
|
|
};
|
|
|
|
|
2010-07-21 11:54:40 +00:00
|
|
|
static struct variant_data variant_u300 = {
|
2010-08-09 11:57:30 +00:00
|
|
|
.fifosize = 16 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
2011-03-04 13:54:16 +00:00
|
|
|
.clkreg_enable = MCI_ST_U300_HWFCEN,
|
2014-06-02 09:09:23 +00:00
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
2010-07-21 11:55:59 +00:00
|
|
|
.datalength_bits = 16,
|
2014-08-22 04:54:55 +00:00
|
|
|
.datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
|
2014-08-22 04:55:16 +00:00
|
|
|
.st_sdio = true,
|
2011-12-13 15:54:55 +00:00
|
|
|
.pwrreg_powerup = MCI_PWR_ON,
|
2014-06-02 09:09:47 +00:00
|
|
|
.f_max = 100000000,
|
2011-12-13 15:57:07 +00:00
|
|
|
.signal_direction = true,
|
2013-01-09 16:19:54 +00:00
|
|
|
.pwrreg_clkgate = true,
|
2013-09-04 08:05:17 +00:00
|
|
|
.pwrreg_nopower = true,
|
2010-07-21 11:54:40 +00:00
|
|
|
};
|
|
|
|
|
2012-04-10 16:43:59 +00:00
|
|
|
static struct variant_data variant_nomadik = {
|
|
|
|
.fifosize = 16 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
|
|
|
.clkreg = MCI_CLK_ENABLE,
|
|
|
|
.datalength_bits = 24,
|
2014-08-22 04:54:55 +00:00
|
|
|
.datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
|
2014-08-22 04:55:16 +00:00
|
|
|
.st_sdio = true,
|
2012-04-10 16:43:59 +00:00
|
|
|
.st_clkdiv = true,
|
|
|
|
.pwrreg_powerup = MCI_PWR_ON,
|
2014-06-02 09:09:47 +00:00
|
|
|
.f_max = 100000000,
|
2012-04-10 16:43:59 +00:00
|
|
|
.signal_direction = true,
|
2013-01-09 16:19:54 +00:00
|
|
|
.pwrreg_clkgate = true,
|
2013-09-04 08:05:17 +00:00
|
|
|
.pwrreg_nopower = true,
|
2012-04-10 16:43:59 +00:00
|
|
|
};
|
|
|
|
|
2010-07-21 11:54:40 +00:00
|
|
|
static struct variant_data variant_ux500 = {
|
2010-08-09 11:57:30 +00:00
|
|
|
.fifosize = 30 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
2010-07-21 11:54:40 +00:00
|
|
|
.clkreg = MCI_CLK_ENABLE,
|
2011-03-04 13:54:16 +00:00
|
|
|
.clkreg_enable = MCI_ST_UX500_HWFCEN,
|
2014-06-02 09:09:23 +00:00
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
2014-06-02 09:09:30 +00:00
|
|
|
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
|
2010-07-21 11:55:59 +00:00
|
|
|
.datalength_bits = 24,
|
2014-08-22 04:54:55 +00:00
|
|
|
.datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
|
2014-08-22 04:55:16 +00:00
|
|
|
.st_sdio = true,
|
2010-12-06 08:24:14 +00:00
|
|
|
.st_clkdiv = true,
|
2011-12-13 15:54:55 +00:00
|
|
|
.pwrreg_powerup = MCI_PWR_ON,
|
2014-06-02 09:09:47 +00:00
|
|
|
.f_max = 100000000,
|
2011-12-13 15:57:07 +00:00
|
|
|
.signal_direction = true,
|
2013-01-09 16:19:54 +00:00
|
|
|
.pwrreg_clkgate = true,
|
2013-05-15 19:53:22 +00:00
|
|
|
.busy_detect = true,
|
2013-09-04 08:05:17 +00:00
|
|
|
.pwrreg_nopower = true,
|
2010-07-21 11:54:40 +00:00
|
|
|
};
|
2010-12-06 08:24:14 +00:00
|
|
|
|
2011-03-25 07:51:52 +00:00
|
|
|
static struct variant_data variant_ux500v2 = {
|
|
|
|
.fifosize = 30 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
|
|
|
.clkreg = MCI_CLK_ENABLE,
|
|
|
|
.clkreg_enable = MCI_ST_UX500_HWFCEN,
|
2014-06-02 09:09:23 +00:00
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
2014-06-02 09:09:30 +00:00
|
|
|
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
|
2014-06-02 09:09:15 +00:00
|
|
|
.datactrl_mask_ddrmode = MCI_ST_DPSM_DDRMODE,
|
2011-03-25 07:51:52 +00:00
|
|
|
.datalength_bits = 24,
|
2014-08-22 04:54:55 +00:00
|
|
|
.datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
|
2014-08-22 04:55:16 +00:00
|
|
|
.st_sdio = true,
|
2011-03-25 07:51:52 +00:00
|
|
|
.st_clkdiv = true,
|
|
|
|
.blksz_datactrl16 = true,
|
2011-12-13 15:54:55 +00:00
|
|
|
.pwrreg_powerup = MCI_PWR_ON,
|
2014-06-02 09:09:47 +00:00
|
|
|
.f_max = 100000000,
|
2011-12-13 15:57:07 +00:00
|
|
|
.signal_direction = true,
|
2013-01-09 16:19:54 +00:00
|
|
|
.pwrreg_clkgate = true,
|
2013-05-15 19:53:22 +00:00
|
|
|
.busy_detect = true,
|
2013-09-04 08:05:17 +00:00
|
|
|
.pwrreg_nopower = true,
|
2011-03-25 07:51:52 +00:00
|
|
|
};
|
|
|
|
|
2014-06-02 09:10:13 +00:00
|
|
|
static struct variant_data variant_qcom = {
|
|
|
|
.fifosize = 16 * 4,
|
|
|
|
.fifohalfsize = 8 * 4,
|
|
|
|
.clkreg = MCI_CLK_ENABLE,
|
|
|
|
.clkreg_enable = MCI_QCOM_CLK_FLOWENA |
|
|
|
|
MCI_QCOM_CLK_SELECT_IN_FBCLK,
|
|
|
|
.clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
|
|
|
|
.datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
|
|
|
|
.data_cmd_enable = MCI_QCOM_CSPM_DATCMD,
|
|
|
|
.blksz_datactrl4 = true,
|
|
|
|
.datalength_bits = 24,
|
|
|
|
.pwrreg_powerup = MCI_PWR_UP,
|
|
|
|
.f_max = 208000000,
|
|
|
|
.explicit_mclk_control = true,
|
|
|
|
.qcom_fifo = true,
|
2014-07-29 02:50:30 +00:00
|
|
|
.qcom_dml = true,
|
2014-06-02 09:10:13 +00:00
|
|
|
};
|
|
|
|
|
2013-05-15 19:53:22 +00:00
|
|
|
static int mmci_card_busy(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
unsigned long flags;
|
|
|
|
int busy = 0;
|
|
|
|
|
|
|
|
pm_runtime_get_sync(mmc_dev(mmc));
|
|
|
|
|
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
|
|
|
if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
|
|
|
|
busy = 1;
|
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
|
|
|
|
|
|
|
pm_runtime_mark_last_busy(mmc_dev(mmc));
|
|
|
|
pm_runtime_put_autosuspend(mmc_dev(mmc));
|
|
|
|
|
|
|
|
return busy;
|
|
|
|
}
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
/*
|
|
|
|
* Validate mmc prerequisites
|
|
|
|
*/
|
|
|
|
static int mmci_validate_data(struct mmci_host *host,
|
|
|
|
struct mmc_data *data)
|
|
|
|
{
|
|
|
|
if (!data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!is_power_of_2(data->blksz)) {
|
|
|
|
dev_err(mmc_dev(host->mmc),
|
|
|
|
"unsupported block size (%d bytes)\n", data->blksz);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-04 08:01:15 +00:00
|
|
|
static void mmci_reg_delay(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* According to the spec, at least three feedback clock cycles
|
|
|
|
* of max 52 MHz must pass between two writes to the MMCICLOCK reg.
|
|
|
|
* Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
|
|
|
|
* Worst delay time during card init is at 100 kHz => 30 us.
|
|
|
|
* Worst delay time when up and running is at 25 MHz => 120 ns.
|
|
|
|
*/
|
|
|
|
if (host->cclk < 25000000)
|
|
|
|
udelay(30);
|
|
|
|
else
|
|
|
|
ndelay(120);
|
|
|
|
}
|
|
|
|
|
2012-01-18 08:17:27 +00:00
|
|
|
/*
|
|
|
|
* This must be called with host->lock held
|
|
|
|
*/
|
|
|
|
static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
|
|
|
|
{
|
|
|
|
if (host->clk_reg != clk) {
|
|
|
|
host->clk_reg = clk;
|
|
|
|
writel(clk, host->base + MMCICLOCK);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This must be called with host->lock held
|
|
|
|
*/
|
|
|
|
static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
|
|
|
|
{
|
|
|
|
if (host->pwr_reg != pwr) {
|
|
|
|
host->pwr_reg = pwr;
|
|
|
|
writel(pwr, host->base + MMCIPOWER);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-15 19:48:23 +00:00
|
|
|
/*
|
|
|
|
* This must be called with host->lock held
|
|
|
|
*/
|
|
|
|
static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
|
|
|
|
{
|
2013-05-15 19:53:22 +00:00
|
|
|
/* Keep ST Micro busy mode if enabled */
|
|
|
|
datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE;
|
|
|
|
|
2013-05-15 19:48:23 +00:00
|
|
|
if (host->datactrl_reg != datactrl) {
|
|
|
|
host->datactrl_reg = datactrl;
|
|
|
|
writel(datactrl, host->base + MMCIDATACTRL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-14 11:56:14 +00:00
|
|
|
/*
|
|
|
|
* This must be called with host->lock held
|
|
|
|
*/
|
|
|
|
static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
|
|
|
|
{
|
2010-07-21 11:54:40 +00:00
|
|
|
struct variant_data *variant = host->variant;
|
|
|
|
u32 clk = variant->clkreg;
|
2009-09-14 11:56:14 +00:00
|
|
|
|
2013-05-13 14:40:03 +00:00
|
|
|
/* Make sure cclk reflects the current calculated clock */
|
|
|
|
host->cclk = 0;
|
|
|
|
|
2009-09-14 11:56:14 +00:00
|
|
|
if (desired) {
|
2014-06-02 09:09:55 +00:00
|
|
|
if (variant->explicit_mclk_control) {
|
|
|
|
host->cclk = host->mclk;
|
|
|
|
} else if (desired >= host->mclk) {
|
2010-12-10 08:35:53 +00:00
|
|
|
clk = MCI_CLK_BYPASS;
|
2011-04-01 06:59:17 +00:00
|
|
|
if (variant->st_clkdiv)
|
|
|
|
clk |= MCI_ST_UX500_NEG_EDGE;
|
2009-09-14 11:56:14 +00:00
|
|
|
host->cclk = host->mclk;
|
2010-12-06 08:24:14 +00:00
|
|
|
} else if (variant->st_clkdiv) {
|
|
|
|
/*
|
|
|
|
* DB8500 TRM says f = mclk / (clkdiv + 2)
|
|
|
|
* => clkdiv = (mclk / f) - 2
|
|
|
|
* Round the divider up so we don't exceed the max
|
|
|
|
* frequency
|
|
|
|
*/
|
|
|
|
clk = DIV_ROUND_UP(host->mclk, desired) - 2;
|
|
|
|
if (clk >= 256)
|
|
|
|
clk = 255;
|
|
|
|
host->cclk = host->mclk / (clk + 2);
|
2009-09-14 11:56:14 +00:00
|
|
|
} else {
|
2010-12-06 08:24:14 +00:00
|
|
|
/*
|
|
|
|
* PL180 TRM says f = mclk / (2 * (clkdiv + 1))
|
|
|
|
* => clkdiv = mclk / (2 * f) - 1
|
|
|
|
*/
|
2009-09-14 11:56:14 +00:00
|
|
|
clk = host->mclk / (2 * desired) - 1;
|
|
|
|
if (clk >= 256)
|
|
|
|
clk = 255;
|
|
|
|
host->cclk = host->mclk / (2 * (clk + 1));
|
|
|
|
}
|
2010-07-21 11:55:18 +00:00
|
|
|
|
|
|
|
clk |= variant->clkreg_enable;
|
2009-09-14 11:56:14 +00:00
|
|
|
clk |= MCI_CLK_ENABLE;
|
|
|
|
/* This hasn't proven to be worthwhile */
|
|
|
|
/* clk |= MCI_CLK_PWRSAVE; */
|
|
|
|
}
|
|
|
|
|
2013-05-13 14:40:03 +00:00
|
|
|
/* Set actual clock for debug */
|
|
|
|
host->mmc->actual_clock = host->cclk;
|
|
|
|
|
2009-09-14 11:57:11 +00:00
|
|
|
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
|
2010-04-08 06:38:52 +00:00
|
|
|
clk |= MCI_4BIT_BUS;
|
|
|
|
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
|
2014-06-02 09:09:23 +00:00
|
|
|
clk |= variant->clkreg_8bit_bus_enable;
|
2009-09-14 11:57:11 +00:00
|
|
|
|
2014-03-14 12:12:13 +00:00
|
|
|
if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
|
|
|
|
host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
|
2014-06-02 09:09:30 +00:00
|
|
|
clk |= variant->clkreg_neg_edge_enable;
|
2013-01-07 14:30:44 +00:00
|
|
|
|
2012-01-18 08:17:27 +00:00
|
|
|
mmci_write_clkreg(host, clk);
|
2009-09-14 11:56:14 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static void
|
|
|
|
mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
|
|
|
|
{
|
|
|
|
writel(0, host->base + MMCICOMMAND);
|
|
|
|
|
2007-01-08 16:42:51 +00:00
|
|
|
BUG_ON(host->data);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
host->mrq = NULL;
|
|
|
|
host->cmd = NULL;
|
|
|
|
|
|
|
|
mmc_request_done(host->mmc, mrq);
|
2011-12-13 16:01:11 +00:00
|
|
|
|
|
|
|
pm_runtime_mark_last_busy(mmc_dev(host->mmc));
|
|
|
|
pm_runtime_put_autosuspend(mmc_dev(host->mmc));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-10-19 11:39:48 +00:00
|
|
|
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
|
|
|
|
{
|
|
|
|
void __iomem *base = host->base;
|
|
|
|
|
|
|
|
if (host->singleirq) {
|
|
|
|
unsigned int mask0 = readl(base + MMCIMASK0);
|
|
|
|
|
|
|
|
mask0 &= ~MCI_IRQ1MASK;
|
|
|
|
mask0 |= mask;
|
|
|
|
|
|
|
|
writel(mask0, base + MMCIMASK0);
|
|
|
|
}
|
|
|
|
|
|
|
|
writel(mask, base + MMCIMASK1);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static void mmci_stop_data(struct mmci_host *host)
|
|
|
|
{
|
2013-05-15 19:48:23 +00:00
|
|
|
mmci_write_datactrlreg(host, 0);
|
2010-10-19 11:39:48 +00:00
|
|
|
mmci_set_mask1(host, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
host->data = NULL;
|
|
|
|
}
|
|
|
|
|
2010-07-21 11:44:58 +00:00
|
|
|
static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
|
|
|
|
{
|
|
|
|
unsigned int flags = SG_MITER_ATOMIC;
|
|
|
|
|
|
|
|
if (data->flags & MMC_DATA_READ)
|
|
|
|
flags |= SG_MITER_TO_SG;
|
|
|
|
else
|
|
|
|
flags |= SG_MITER_FROM_SG;
|
|
|
|
|
|
|
|
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
|
|
|
|
}
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
/*
|
|
|
|
* All the DMA operation mode stuff goes inside this ifdef.
|
|
|
|
* This assumes that you have a generic DMA device interface,
|
|
|
|
* no custom DMA interfaces are supported.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_DMA_ENGINE
|
2012-11-19 18:23:06 +00:00
|
|
|
static void mmci_dma_setup(struct mmci_host *host)
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
{
|
|
|
|
const char *rxname, *txname;
|
2014-07-29 02:50:30 +00:00
|
|
|
struct variant_data *variant = host->variant;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
|
2013-05-03 11:51:17 +00:00
|
|
|
host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
|
|
|
|
host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
|
2011-07-01 16:55:24 +00:00
|
|
|
/* initialize pre request cookie */
|
|
|
|
host->next_data.cookie = 1;
|
|
|
|
|
2013-05-03 11:51:17 +00:00
|
|
|
/*
|
|
|
|
* If only an RX channel is specified, the driver will
|
|
|
|
* attempt to use it bidirectionally, however if it is
|
|
|
|
* is specified but cannot be located, DMA will be disabled.
|
|
|
|
*/
|
|
|
|
if (host->dma_rx_channel && !host->dma_tx_channel)
|
|
|
|
host->dma_tx_channel = host->dma_rx_channel;
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
if (host->dma_rx_channel)
|
|
|
|
rxname = dma_chan_name(host->dma_rx_channel);
|
|
|
|
else
|
|
|
|
rxname = "none";
|
|
|
|
|
|
|
|
if (host->dma_tx_channel)
|
|
|
|
txname = dma_chan_name(host->dma_tx_channel);
|
|
|
|
else
|
|
|
|
txname = "none";
|
|
|
|
|
|
|
|
dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
|
|
|
|
rxname, txname);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Limit the maximum segment size in any SG entry according to
|
|
|
|
* the parameters of the DMA engine device.
|
|
|
|
*/
|
|
|
|
if (host->dma_tx_channel) {
|
|
|
|
struct device *dev = host->dma_tx_channel->device->dev;
|
|
|
|
unsigned int max_seg_size = dma_get_max_seg_size(dev);
|
|
|
|
|
|
|
|
if (max_seg_size < host->mmc->max_seg_size)
|
|
|
|
host->mmc->max_seg_size = max_seg_size;
|
|
|
|
}
|
|
|
|
if (host->dma_rx_channel) {
|
|
|
|
struct device *dev = host->dma_rx_channel->device->dev;
|
|
|
|
unsigned int max_seg_size = dma_get_max_seg_size(dev);
|
|
|
|
|
|
|
|
if (max_seg_size < host->mmc->max_seg_size)
|
|
|
|
host->mmc->max_seg_size = max_seg_size;
|
|
|
|
}
|
2014-07-29 02:50:30 +00:00
|
|
|
|
|
|
|
if (variant->qcom_dml && host->dma_rx_channel && host->dma_tx_channel)
|
|
|
|
if (dml_hw_init(host, host->mmc->parent->of_node))
|
|
|
|
variant->qcom_dml = false;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-11-19 18:26:03 +00:00
|
|
|
* This is used in or so inline it
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
* so it can be discarded.
|
|
|
|
*/
|
|
|
|
static inline void mmci_dma_release(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
if (host->dma_rx_channel)
|
|
|
|
dma_release_channel(host->dma_rx_channel);
|
2014-05-20 04:45:54 +00:00
|
|
|
if (host->dma_tx_channel)
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
dma_release_channel(host->dma_tx_channel);
|
|
|
|
host->dma_rx_channel = host->dma_tx_channel = NULL;
|
|
|
|
}
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
static void mmci_dma_data_error(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
|
|
|
|
dmaengine_terminate_all(host->dma_current);
|
|
|
|
host->dma_current = NULL;
|
|
|
|
host->dma_desc_current = NULL;
|
|
|
|
host->data->host_cookie = 0;
|
|
|
|
}
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
|
|
|
{
|
2013-01-21 20:29:34 +00:00
|
|
|
struct dma_chan *chan;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
enum dma_data_direction dir;
|
2013-01-21 20:29:34 +00:00
|
|
|
|
|
|
|
if (data->flags & MMC_DATA_READ) {
|
|
|
|
dir = DMA_FROM_DEVICE;
|
|
|
|
chan = host->dma_rx_channel;
|
|
|
|
} else {
|
|
|
|
dir = DMA_TO_DEVICE;
|
|
|
|
chan = host->dma_tx_channel;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
|
|
|
|
{
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
u32 status;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Wait up to 1ms for the DMA to complete */
|
|
|
|
for (i = 0; ; i++) {
|
|
|
|
status = readl(host->base + MMCISTATUS);
|
|
|
|
if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
|
|
|
|
break;
|
|
|
|
udelay(10);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see whether we still have some data left in the FIFO -
|
|
|
|
* this catches DMA controllers which are unable to monitor the
|
|
|
|
* DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
|
|
|
|
* contiguous buffers. On TX, we'll get a FIFO underrun error.
|
|
|
|
*/
|
|
|
|
if (status & MCI_RXDATAAVLBLMASK) {
|
2013-01-21 20:29:34 +00:00
|
|
|
mmci_dma_data_error(host);
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
if (!data->error)
|
|
|
|
data->error = -EIO;
|
|
|
|
}
|
|
|
|
|
2011-07-01 16:55:24 +00:00
|
|
|
if (!data->host_cookie)
|
2013-01-21 20:29:34 +00:00
|
|
|
mmci_dma_unmap(host, data);
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use of DMA with scatter-gather is impossible.
|
|
|
|
* Give up with DMA and switch back to PIO mode.
|
|
|
|
*/
|
|
|
|
if (status & MCI_RXDATAAVLBLMASK) {
|
|
|
|
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
|
|
|
|
mmci_dma_release(host);
|
|
|
|
}
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
host->dma_current = NULL;
|
|
|
|
host->dma_desc_current = NULL;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
}
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
|
|
|
|
static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|
|
|
struct dma_chan **dma_chan,
|
|
|
|
struct dma_async_tx_descriptor **dma_desc)
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
{
|
|
|
|
struct variant_data *variant = host->variant;
|
|
|
|
struct dma_slave_config conf = {
|
|
|
|
.src_addr = host->phybase + MMCIFIFO,
|
|
|
|
.dst_addr = host->phybase + MMCIFIFO,
|
|
|
|
.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
|
|
|
|
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
|
|
|
|
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
|
|
|
|
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
|
2012-02-01 10:42:19 +00:00
|
|
|
.device_fc = false,
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
};
|
|
|
|
struct dma_chan *chan;
|
|
|
|
struct dma_device *device;
|
|
|
|
struct dma_async_tx_descriptor *desc;
|
2011-10-14 05:15:11 +00:00
|
|
|
enum dma_data_direction buffer_dirn;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
int nr_sg;
|
2014-07-29 02:50:30 +00:00
|
|
|
unsigned long flags = DMA_CTRL_ACK;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
|
|
|
|
if (data->flags & MMC_DATA_READ) {
|
2011-10-14 05:15:11 +00:00
|
|
|
conf.direction = DMA_DEV_TO_MEM;
|
|
|
|
buffer_dirn = DMA_FROM_DEVICE;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
chan = host->dma_rx_channel;
|
|
|
|
} else {
|
2011-10-14 05:15:11 +00:00
|
|
|
conf.direction = DMA_MEM_TO_DEV;
|
|
|
|
buffer_dirn = DMA_TO_DEVICE;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
chan = host->dma_tx_channel;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If there's no DMA channel, fall back to PIO */
|
|
|
|
if (!chan)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* If less than or equal to the fifo size, don't bother with DMA */
|
2011-07-01 16:55:24 +00:00
|
|
|
if (data->blksz * data->blocks <= variant->fifosize)
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
device = chan->device;
|
2011-10-14 05:15:11 +00:00
|
|
|
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
if (nr_sg == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-07-29 02:50:30 +00:00
|
|
|
if (host->variant->qcom_dml)
|
|
|
|
flags |= DMA_PREP_INTERRUPT;
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
dmaengine_slave_config(chan, &conf);
|
2012-03-08 21:11:18 +00:00
|
|
|
desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
|
2014-07-29 02:50:30 +00:00
|
|
|
conf.direction, flags);
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
if (!desc)
|
|
|
|
goto unmap_exit;
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
*dma_chan = chan;
|
|
|
|
*dma_desc = desc;
|
2011-07-01 16:55:24 +00:00
|
|
|
|
|
|
|
return 0;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
|
2011-07-01 16:55:24 +00:00
|
|
|
unmap_exit:
|
2011-10-14 05:15:11 +00:00
|
|
|
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
|
2011-07-01 16:55:24 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
static inline int mmci_dma_prep_data(struct mmci_host *host,
|
|
|
|
struct mmc_data *data)
|
|
|
|
{
|
|
|
|
/* Check if next job is already prepared. */
|
|
|
|
if (host->dma_current && host->dma_desc_current)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* No job were prepared thus do it now. */
|
|
|
|
return __mmci_dma_prep_data(host, data, &host->dma_current,
|
|
|
|
&host->dma_desc_current);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int mmci_dma_prep_next(struct mmci_host *host,
|
|
|
|
struct mmc_data *data)
|
|
|
|
{
|
|
|
|
struct mmci_host_next *nd = &host->next_data;
|
|
|
|
return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
|
|
|
|
}
|
|
|
|
|
2011-07-01 16:55:24 +00:00
|
|
|
static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct mmc_data *data = host->data;
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
ret = mmci_dma_prep_data(host, host->data);
|
2011-07-01 16:55:24 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Okay, go for it. */
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
dev_vdbg(mmc_dev(host->mmc),
|
|
|
|
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
|
|
|
|
data->sg_len, data->blksz, data->blocks, data->flags);
|
2011-07-01 16:55:24 +00:00
|
|
|
dmaengine_submit(host->dma_desc_current);
|
|
|
|
dma_async_issue_pending(host->dma_current);
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
|
2014-07-29 02:50:30 +00:00
|
|
|
if (host->variant->qcom_dml)
|
|
|
|
dml_start_xfer(host, data);
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
datactrl |= MCI_DPSM_DMAENABLE;
|
|
|
|
|
|
|
|
/* Trigger the DMA transfer */
|
2013-05-15 19:48:23 +00:00
|
|
|
mmci_write_datactrlreg(host, datactrl);
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Let the MMCI say when the data is ended and it's time
|
|
|
|
* to fire next DMA request. When that happens, MMCI will
|
|
|
|
* call mmci_data_end()
|
|
|
|
*/
|
|
|
|
writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
|
|
|
|
host->base + MMCIMASK0);
|
|
|
|
return 0;
|
2011-07-01 16:55:24 +00:00
|
|
|
}
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
|
2011-07-01 16:55:24 +00:00
|
|
|
static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
|
|
|
{
|
|
|
|
struct mmci_host_next *next = &host->next_data;
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
|
|
|
|
WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
|
2011-07-01 16:55:24 +00:00
|
|
|
|
|
|
|
host->dma_desc_current = next->dma_desc;
|
|
|
|
host->dma_current = next->dma_chan;
|
|
|
|
next->dma_desc = NULL;
|
|
|
|
next->dma_chan = NULL;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
}
|
2011-07-01 16:55:24 +00:00
|
|
|
|
|
|
|
static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
|
|
|
bool is_first_req)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
struct mmc_data *data = mrq->data;
|
|
|
|
struct mmci_host_next *nd = &host->next_data;
|
|
|
|
|
|
|
|
if (!data)
|
|
|
|
return;
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
BUG_ON(data->host_cookie);
|
|
|
|
|
|
|
|
if (mmci_validate_data(host, data))
|
2011-07-01 16:55:24 +00:00
|
|
|
return;
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
if (!mmci_dma_prep_next(host, data))
|
|
|
|
data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
|
2011-07-01 16:55:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
|
|
|
int err)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
struct mmc_data *data = mrq->data;
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
if (!data || !data->host_cookie)
|
2011-07-01 16:55:24 +00:00
|
|
|
return;
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
mmci_dma_unmap(host, data);
|
2011-07-01 16:55:24 +00:00
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
if (err) {
|
|
|
|
struct mmci_host_next *next = &host->next_data;
|
|
|
|
struct dma_chan *chan;
|
|
|
|
if (data->flags & MMC_DATA_READ)
|
|
|
|
chan = host->dma_rx_channel;
|
|
|
|
else
|
|
|
|
chan = host->dma_tx_channel;
|
|
|
|
dmaengine_terminate_all(chan);
|
2011-07-01 16:55:24 +00:00
|
|
|
|
2014-10-08 11:25:17 +00:00
|
|
|
if (host->dma_desc_current == next->dma_desc)
|
|
|
|
host->dma_desc_current = NULL;
|
|
|
|
|
|
|
|
if (host->dma_current == next->dma_chan)
|
|
|
|
host->dma_current = NULL;
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
next->dma_desc = NULL;
|
|
|
|
next->dma_chan = NULL;
|
2014-10-08 11:25:17 +00:00
|
|
|
data->host_cookie = 0;
|
2011-07-01 16:55:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
#else
|
|
|
|
/* Blank functions if the DMA engine is not available */
|
2011-07-01 16:55:24 +00:00
|
|
|
static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
|
|
|
{
|
|
|
|
}
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
static inline void mmci_dma_setup(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void mmci_dma_release(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
static inline void mmci_dma_finalize(struct mmci_host *host,
|
|
|
|
struct mmc_data *data)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
static inline void mmci_dma_data_error(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
2011-07-01 16:55:24 +00:00
|
|
|
|
|
|
|
#define mmci_pre_request NULL
|
|
|
|
#define mmci_post_request NULL
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
|
|
|
|
{
|
2010-08-09 11:57:30 +00:00
|
|
|
struct variant_data *variant = host->variant;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned int datactrl, timeout, irqmask;
|
2005-07-01 11:02:59 +00:00
|
|
|
unsigned long long clks;
|
2005-04-16 22:20:36 +00:00
|
|
|
void __iomem *base;
|
2006-08-27 12:51:28 +00:00
|
|
|
int blksz_bits;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-02-19 00:09:10 +00:00
|
|
|
dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
|
|
|
|
data->blksz, data->blocks, data->flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
host->data = data;
|
2010-07-21 11:49:49 +00:00
|
|
|
host->size = data->blksz * data->blocks;
|
2011-01-27 10:56:52 +00:00
|
|
|
data->bytes_xfered = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-07-01 11:02:59 +00:00
|
|
|
clks = (unsigned long long)data->timeout_ns * host->cclk;
|
2014-06-02 09:08:39 +00:00
|
|
|
do_div(clks, NSEC_PER_SEC);
|
2005-07-01 11:02:59 +00:00
|
|
|
|
|
|
|
timeout = data->timeout_clks + (unsigned int)clks;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
base = host->base;
|
|
|
|
writel(timeout, base + MMCIDATATIMER);
|
|
|
|
writel(host->size, base + MMCIDATALENGTH);
|
|
|
|
|
2006-08-27 12:51:28 +00:00
|
|
|
blksz_bits = ffs(data->blksz) - 1;
|
|
|
|
BUG_ON(1 << blksz_bits != data->blksz);
|
|
|
|
|
2011-03-25 07:51:52 +00:00
|
|
|
if (variant->blksz_datactrl16)
|
|
|
|
datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
|
2014-06-02 09:09:06 +00:00
|
|
|
else if (variant->blksz_datactrl4)
|
|
|
|
datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
|
2011-03-25 07:51:52 +00:00
|
|
|
else
|
|
|
|
datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
|
|
|
|
if (data->flags & MMC_DATA_READ)
|
2005-04-16 22:20:36 +00:00
|
|
|
datactrl |= MCI_DPSM_DIRECTION;
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
|
2014-08-22 04:55:16 +00:00
|
|
|
if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
|
|
|
|
u32 clk;
|
2011-12-13 16:05:28 +00:00
|
|
|
|
2014-08-22 04:55:16 +00:00
|
|
|
datactrl |= variant->datactrl_mask_sdio;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The ST Micro variant for SDIO small write transfers
|
|
|
|
* needs to have clock H/W flow control disabled,
|
|
|
|
* otherwise the transfer will not start. The threshold
|
|
|
|
* depends on the rate of MCLK.
|
|
|
|
*/
|
|
|
|
if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
|
|
|
|
(host->size < 8 ||
|
|
|
|
(host->size <= 8 && host->mclk > 50000000)))
|
|
|
|
clk = host->clk_reg & ~variant->clkreg_enable;
|
|
|
|
else
|
|
|
|
clk = host->clk_reg | variant->clkreg_enable;
|
|
|
|
|
|
|
|
mmci_write_clkreg(host, clk);
|
|
|
|
}
|
2012-10-12 13:01:50 +00:00
|
|
|
|
2014-03-14 12:12:13 +00:00
|
|
|
if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
|
|
|
|
host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
|
2014-06-02 09:09:15 +00:00
|
|
|
datactrl |= variant->datactrl_mask_ddrmode;
|
2013-01-07 14:30:44 +00:00
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
/*
|
|
|
|
* Attempt to use DMA operation mode, if this
|
|
|
|
* should fail, fall back to PIO mode
|
|
|
|
*/
|
|
|
|
if (!mmci_dma_start_data(host, datactrl))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* IRQ mode, map the SG list for CPU reading/writing */
|
|
|
|
mmci_init_sg(host, data);
|
|
|
|
|
|
|
|
if (data->flags & MMC_DATA_READ) {
|
2005-04-16 22:20:36 +00:00
|
|
|
irqmask = MCI_RXFIFOHALFFULLMASK;
|
2006-02-16 16:48:31 +00:00
|
|
|
|
|
|
|
/*
|
2011-01-27 09:50:13 +00:00
|
|
|
* If we have less than the fifo 'half-full' threshold to
|
|
|
|
* transfer, trigger a PIO interrupt as soon as any data
|
|
|
|
* is available.
|
2006-02-16 16:48:31 +00:00
|
|
|
*/
|
2011-01-27 09:50:13 +00:00
|
|
|
if (host->size < variant->fifohalfsize)
|
2006-02-16 16:48:31 +00:00
|
|
|
irqmask |= MCI_RXDATAAVLBLMASK;
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We don't actually need to include "FIFO empty" here
|
|
|
|
* since its implicit in "FIFO half empty".
|
|
|
|
*/
|
|
|
|
irqmask = MCI_TXFIFOHALFEMPTYMASK;
|
|
|
|
}
|
|
|
|
|
2013-05-15 19:48:23 +00:00
|
|
|
mmci_write_datactrlreg(host, datactrl);
|
2005-04-16 22:20:36 +00:00
|
|
|
writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
|
2010-10-19 11:39:48 +00:00
|
|
|
mmci_set_mask1(host, irqmask);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
|
|
|
|
{
|
|
|
|
void __iomem *base = host->base;
|
|
|
|
|
2010-02-19 00:09:10 +00:00
|
|
|
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
|
2005-04-16 22:20:36 +00:00
|
|
|
cmd->opcode, cmd->arg, cmd->flags);
|
|
|
|
|
|
|
|
if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
|
|
|
|
writel(0, base + MMCICOMMAND);
|
2014-06-02 09:08:57 +00:00
|
|
|
mmci_reg_delay(host);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c |= cmd->opcode | MCI_CPSM_ENABLE;
|
2006-02-02 12:23:12 +00:00
|
|
|
if (cmd->flags & MMC_RSP_PRESENT) {
|
|
|
|
if (cmd->flags & MMC_RSP_136)
|
|
|
|
c |= MCI_CPSM_LONGRSP;
|
2005-04-16 22:20:36 +00:00
|
|
|
c |= MCI_CPSM_RESPONSE;
|
|
|
|
}
|
|
|
|
if (/*interrupt*/0)
|
|
|
|
c |= MCI_CPSM_INTERRUPT;
|
|
|
|
|
2014-06-02 09:09:39 +00:00
|
|
|
if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
|
|
|
|
c |= host->variant->data_cmd_enable;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
host->cmd = cmd;
|
|
|
|
|
|
|
|
writel(cmd->arg, base + MMCIARGUMENT);
|
|
|
|
writel(c, base + MMCICOMMAND);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
|
|
|
|
unsigned int status)
|
|
|
|
{
|
2014-06-12 12:42:23 +00:00
|
|
|
/* Make sure we have data to handle */
|
|
|
|
if (!data)
|
|
|
|
return;
|
|
|
|
|
2010-10-19 12:41:24 +00:00
|
|
|
/* First check for errors */
|
2011-12-13 15:51:04 +00:00
|
|
|
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
|
|
|
|
MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
|
2011-01-24 14:22:13 +00:00
|
|
|
u32 remain, success;
|
2010-10-19 12:41:24 +00:00
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
/* Terminate the DMA transfer */
|
2013-01-21 20:29:34 +00:00
|
|
|
if (dma_inprogress(host)) {
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
mmci_dma_data_error(host);
|
2013-01-21 20:29:34 +00:00
|
|
|
mmci_dma_unmap(host, data);
|
|
|
|
}
|
2006-01-04 16:24:05 +00:00
|
|
|
|
|
|
|
/*
|
2011-02-04 09:19:46 +00:00
|
|
|
* Calculate how far we are into the transfer. Note that
|
|
|
|
* the data counter gives the number of bytes transferred
|
|
|
|
* on the MMC bus, not on the host side. On reads, this
|
|
|
|
* can be as much as a FIFO-worth of data ahead. This
|
|
|
|
* matters for FIFO overruns only.
|
2006-01-04 16:24:05 +00:00
|
|
|
*/
|
2011-01-27 16:44:34 +00:00
|
|
|
remain = readl(host->base + MMCIDATACNT);
|
2011-01-24 14:22:13 +00:00
|
|
|
success = data->blksz * data->blocks - remain;
|
|
|
|
|
2011-02-04 09:19:46 +00:00
|
|
|
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
|
|
|
|
status, success);
|
2011-01-24 14:22:13 +00:00
|
|
|
if (status & MCI_DATACRCFAIL) {
|
|
|
|
/* Last block was not successful */
|
2011-02-04 09:19:46 +00:00
|
|
|
success -= 1;
|
2007-07-22 20:18:46 +00:00
|
|
|
data->error = -EILSEQ;
|
2011-01-24 14:22:13 +00:00
|
|
|
} else if (status & MCI_DATATIMEOUT) {
|
2007-07-22 20:18:46 +00:00
|
|
|
data->error = -ETIMEDOUT;
|
2011-06-30 14:10:21 +00:00
|
|
|
} else if (status & MCI_STARTBITERR) {
|
|
|
|
data->error = -ECOMM;
|
2011-02-04 09:19:46 +00:00
|
|
|
} else if (status & MCI_TXUNDERRUN) {
|
|
|
|
data->error = -EIO;
|
|
|
|
} else if (status & MCI_RXOVERRUN) {
|
|
|
|
if (success > host->variant->fifosize)
|
|
|
|
success -= host->variant->fifosize;
|
|
|
|
else
|
|
|
|
success = 0;
|
2007-07-22 20:18:46 +00:00
|
|
|
data->error = -EIO;
|
2010-07-21 11:44:58 +00:00
|
|
|
}
|
2011-01-27 10:56:52 +00:00
|
|
|
data->bytes_xfered = round_down(success, data->blksz);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2010-10-19 12:41:24 +00:00
|
|
|
|
2011-01-24 14:22:13 +00:00
|
|
|
if (status & MCI_DATABLOCKEND)
|
|
|
|
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
|
2010-10-19 12:41:24 +00:00
|
|
|
|
2011-01-30 21:03:50 +00:00
|
|
|
if (status & MCI_DATAEND || data->error) {
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
if (dma_inprogress(host))
|
2013-01-21 20:29:34 +00:00
|
|
|
mmci_dma_finalize(host, data);
|
2005-04-16 22:20:36 +00:00
|
|
|
mmci_stop_data(host);
|
|
|
|
|
2011-01-24 14:22:13 +00:00
|
|
|
if (!data->error)
|
|
|
|
/* The error clause is handled above, success! */
|
2011-01-27 10:56:52 +00:00
|
|
|
data->bytes_xfered = data->blksz * data->blocks;
|
2010-10-19 12:41:24 +00:00
|
|
|
|
2013-05-13 14:40:56 +00:00
|
|
|
if (!data->stop || host->mrq->sbc) {
|
2005-04-16 22:20:36 +00:00
|
|
|
mmci_request_end(host, data->mrq);
|
|
|
|
} else {
|
|
|
|
mmci_start_command(host, data->stop, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
|
|
|
unsigned int status)
|
|
|
|
{
|
|
|
|
void __iomem *base = host->base;
|
2014-06-12 13:01:57 +00:00
|
|
|
bool sbc, busy_resp;
|
|
|
|
|
|
|
|
if (!cmd)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sbc = (cmd == host->mrq->sbc);
|
|
|
|
busy_resp = host->variant->busy_detect && (cmd->flags & MMC_RSP_BUSY);
|
|
|
|
|
|
|
|
if (!((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
|
|
|
|
MCI_CMDSENT|MCI_CMDRESPEND)))
|
|
|
|
return;
|
2014-01-13 15:49:31 +00:00
|
|
|
|
|
|
|
/* Check if we need to wait for busy completion. */
|
|
|
|
if (host->busy_status && (status & MCI_ST_CARDBUSY))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Enable busy completion if needed and supported. */
|
|
|
|
if (!host->busy_status && busy_resp &&
|
|
|
|
!(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
|
|
|
|
(readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) {
|
|
|
|
writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND,
|
|
|
|
base + MMCIMASK0);
|
|
|
|
host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* At busy completion, mask the IRQ and complete the request. */
|
|
|
|
if (host->busy_status) {
|
|
|
|
writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND,
|
|
|
|
base + MMCIMASK0);
|
|
|
|
host->busy_status = 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
host->cmd = NULL;
|
|
|
|
|
|
|
|
if (status & MCI_CMDTIMEOUT) {
|
2007-07-22 20:18:46 +00:00
|
|
|
cmd->error = -ETIMEDOUT;
|
2005-04-16 22:20:36 +00:00
|
|
|
} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
|
2007-07-22 20:18:46 +00:00
|
|
|
cmd->error = -EILSEQ;
|
2011-01-11 16:35:56 +00:00
|
|
|
} else {
|
|
|
|
cmd->resp[0] = readl(base + MMCIRESPONSE0);
|
|
|
|
cmd->resp[1] = readl(base + MMCIRESPONSE1);
|
|
|
|
cmd->resp[2] = readl(base + MMCIRESPONSE2);
|
|
|
|
cmd->resp[3] = readl(base + MMCIRESPONSE3);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2013-05-13 14:40:56 +00:00
|
|
|
if ((!sbc && !cmd->data) || cmd->error) {
|
2011-12-13 15:58:43 +00:00
|
|
|
if (host->data) {
|
|
|
|
/* Terminate the DMA transfer */
|
2013-01-21 20:29:34 +00:00
|
|
|
if (dma_inprogress(host)) {
|
2011-12-13 15:58:43 +00:00
|
|
|
mmci_dma_data_error(host);
|
2013-01-21 20:29:34 +00:00
|
|
|
mmci_dma_unmap(host, host->data);
|
|
|
|
}
|
2007-01-08 16:42:51 +00:00
|
|
|
mmci_stop_data(host);
|
2011-12-13 15:58:43 +00:00
|
|
|
}
|
2013-05-13 14:40:56 +00:00
|
|
|
mmci_request_end(host, host->mrq);
|
|
|
|
} else if (sbc) {
|
|
|
|
mmci_start_command(host, host->mrq->cmd, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
} else if (!(cmd->data->flags & MMC_DATA_READ)) {
|
|
|
|
mmci_start_data(host, cmd->data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-02 09:10:04 +00:00
|
|
|
static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
|
|
|
|
{
|
|
|
|
return remain - (readl(host->base + MMCIFIFOCNT) << 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
|
|
|
|
* from the fifo range should be used
|
|
|
|
*/
|
|
|
|
if (status & MCI_RXFIFOHALFFULL)
|
|
|
|
return host->variant->fifohalfsize;
|
|
|
|
else if (status & MCI_RXDATAAVLBL)
|
|
|
|
return 4;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
|
|
|
|
{
|
|
|
|
void __iomem *base = host->base;
|
|
|
|
char *ptr = buffer;
|
2014-06-02 09:10:04 +00:00
|
|
|
u32 status = readl(host->base + MMCISTATUS);
|
2008-04-26 22:39:44 +00:00
|
|
|
int host_remain = host->size;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
do {
|
2014-06-02 09:10:04 +00:00
|
|
|
int count = host->get_rx_fifocnt(host, status, host_remain);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (count > remain)
|
|
|
|
count = remain;
|
|
|
|
|
|
|
|
if (count <= 0)
|
|
|
|
break;
|
|
|
|
|
2011-12-13 16:08:04 +00:00
|
|
|
/*
|
|
|
|
* SDIO especially may want to send something that is
|
|
|
|
* not divisible by 4 (as opposed to card sectors
|
|
|
|
* etc). Therefore make sure to always read the last bytes
|
|
|
|
* while only doing full 32-bit reads towards the FIFO.
|
|
|
|
*/
|
|
|
|
if (unlikely(count & 0x3)) {
|
|
|
|
if (count < 4) {
|
|
|
|
unsigned char buf[4];
|
2012-12-10 13:47:21 +00:00
|
|
|
ioread32_rep(base + MMCIFIFO, buf, 1);
|
2011-12-13 16:08:04 +00:00
|
|
|
memcpy(ptr, buf, count);
|
|
|
|
} else {
|
2012-12-10 13:47:21 +00:00
|
|
|
ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
|
2011-12-13 16:08:04 +00:00
|
|
|
count &= ~0x3;
|
|
|
|
}
|
|
|
|
} else {
|
2012-12-10 13:47:21 +00:00
|
|
|
ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
|
2011-12-13 16:08:04 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
ptr += count;
|
|
|
|
remain -= count;
|
2008-04-26 22:39:44 +00:00
|
|
|
host_remain -= count;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (remain == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
status = readl(base + MMCISTATUS);
|
|
|
|
} while (status & MCI_RXDATAAVLBL);
|
|
|
|
|
|
|
|
return ptr - buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
|
|
|
|
{
|
2010-08-09 11:57:30 +00:00
|
|
|
struct variant_data *variant = host->variant;
|
2005-04-16 22:20:36 +00:00
|
|
|
void __iomem *base = host->base;
|
|
|
|
char *ptr = buffer;
|
|
|
|
|
|
|
|
do {
|
|
|
|
unsigned int count, maxcnt;
|
|
|
|
|
2010-08-09 11:57:30 +00:00
|
|
|
maxcnt = status & MCI_TXFIFOEMPTY ?
|
|
|
|
variant->fifosize : variant->fifohalfsize;
|
2005-04-16 22:20:36 +00:00
|
|
|
count = min(remain, maxcnt);
|
|
|
|
|
2010-10-19 11:43:58 +00:00
|
|
|
/*
|
|
|
|
* SDIO especially may want to send something that is
|
|
|
|
* not divisible by 4 (as opposed to card sectors
|
|
|
|
* etc), and the FIFO only accept full 32-bit writes.
|
|
|
|
* So compensate by adding +3 on the count, a single
|
|
|
|
* byte become a 32bit write, 7 bytes will be two
|
|
|
|
* 32bit writes etc.
|
|
|
|
*/
|
2012-12-10 13:47:21 +00:00
|
|
|
iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
ptr += count;
|
|
|
|
remain -= count;
|
|
|
|
|
|
|
|
if (remain == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
status = readl(base + MMCISTATUS);
|
|
|
|
} while (status & MCI_TXFIFOHALFEMPTY);
|
|
|
|
|
|
|
|
return ptr - buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PIO data transfer IRQ handler.
|
|
|
|
*/
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 13:55:46 +00:00
|
|
|
static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct mmci_host *host = dev_id;
|
2010-07-21 11:44:58 +00:00
|
|
|
struct sg_mapping_iter *sg_miter = &host->sg_miter;
|
2010-08-09 11:57:30 +00:00
|
|
|
struct variant_data *variant = host->variant;
|
2005-04-16 22:20:36 +00:00
|
|
|
void __iomem *base = host->base;
|
2010-07-21 11:44:58 +00:00
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
u32 status;
|
|
|
|
|
|
|
|
status = readl(base + MMCISTATUS);
|
|
|
|
|
2010-02-19 00:09:10 +00:00
|
|
|
dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-07-21 11:44:58 +00:00
|
|
|
local_irq_save(flags);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
|
|
|
unsigned int remain, len;
|
|
|
|
char *buffer;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For write, we only need to test the half-empty flag
|
|
|
|
* here - if the FIFO is completely empty, then by
|
|
|
|
* definition it is more than half empty.
|
|
|
|
*
|
|
|
|
* For read, check for data available.
|
|
|
|
*/
|
|
|
|
if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
|
|
|
|
break;
|
|
|
|
|
2010-07-21 11:44:58 +00:00
|
|
|
if (!sg_miter_next(sg_miter))
|
|
|
|
break;
|
|
|
|
|
|
|
|
buffer = sg_miter->addr;
|
|
|
|
remain = sg_miter->length;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
len = 0;
|
|
|
|
if (status & MCI_RXACTIVE)
|
|
|
|
len = mmci_pio_read(host, buffer, remain);
|
|
|
|
if (status & MCI_TXACTIVE)
|
|
|
|
len = mmci_pio_write(host, buffer, remain, status);
|
|
|
|
|
2010-07-21 11:44:58 +00:00
|
|
|
sg_miter->consumed = len;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
host->size -= len;
|
|
|
|
remain -= len;
|
|
|
|
|
|
|
|
if (remain)
|
|
|
|
break;
|
|
|
|
|
|
|
|
status = readl(base + MMCISTATUS);
|
|
|
|
} while (1);
|
|
|
|
|
2010-07-21 11:44:58 +00:00
|
|
|
sg_miter_stop(sg_miter);
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2011-01-27 09:50:13 +00:00
|
|
|
* If we have less than the fifo 'half-full' threshold to transfer,
|
|
|
|
* trigger a PIO interrupt as soon as any data is available.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2011-01-27 09:50:13 +00:00
|
|
|
if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
|
2010-10-19 11:39:48 +00:00
|
|
|
mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we run out of data, disable the data IRQs; this
|
|
|
|
* prevents a race where the FIFO becomes empty before
|
|
|
|
* the chip itself has disabled the data path, and
|
|
|
|
* stops us racing with our data end IRQ.
|
|
|
|
*/
|
|
|
|
if (host->size == 0) {
|
2010-10-19 11:39:48 +00:00
|
|
|
mmci_set_mask1(host, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle completion of command and data transfers.
|
|
|
|
*/
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 13:55:46 +00:00
|
|
|
static irqreturn_t mmci_irq(int irq, void *dev_id)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct mmci_host *host = dev_id;
|
|
|
|
u32 status;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
spin_lock(&host->lock);
|
|
|
|
|
|
|
|
do {
|
|
|
|
status = readl(host->base + MMCISTATUS);
|
2010-10-19 11:39:48 +00:00
|
|
|
|
|
|
|
if (host->singleirq) {
|
|
|
|
if (status & readl(host->base + MMCIMASK1))
|
|
|
|
mmci_pio_irq(irq, dev_id);
|
|
|
|
|
|
|
|
status &= ~MCI_IRQ1MASK;
|
|
|
|
}
|
|
|
|
|
2014-01-13 15:49:31 +00:00
|
|
|
/*
|
|
|
|
* We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
|
|
|
|
* enabled) since the HW seems to be triggering the IRQ on both
|
|
|
|
* edges while monitoring DAT0 for busy completion.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
status &= readl(host->base + MMCIMASK0);
|
|
|
|
writel(status, host->base + MMCICLEAR);
|
|
|
|
|
2010-02-19 00:09:10 +00:00
|
|
|
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-06-13 11:21:38 +00:00
|
|
|
if (host->variant->reversed_irq_handling) {
|
|
|
|
mmci_data_irq(host, host->data, status);
|
|
|
|
mmci_cmd_irq(host, host->cmd, status);
|
|
|
|
} else {
|
|
|
|
mmci_cmd_irq(host, host->cmd, status);
|
|
|
|
mmci_data_irq(host, host->data, status);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-01-13 15:49:31 +00:00
|
|
|
/* Don't poll for busy completion in irq context. */
|
|
|
|
if (host->busy_status)
|
|
|
|
status &= ~MCI_ST_CARDBUSY;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
ret = 1;
|
|
|
|
} while (status);
|
|
|
|
|
|
|
|
spin_unlock(&host->lock);
|
|
|
|
|
|
|
|
return IRQ_RETVAL(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
2008-10-24 20:17:50 +00:00
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
WARN_ON(host->mrq != NULL);
|
|
|
|
|
2013-01-21 20:29:34 +00:00
|
|
|
mrq->cmd->error = mmci_validate_data(host, mrq->data);
|
|
|
|
if (mrq->cmd->error) {
|
2007-07-24 18:38:53 +00:00
|
|
|
mmc_request_done(mmc, mrq);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-08-14 08:17:05 +00:00
|
|
|
pm_runtime_get_sync(mmc_dev(mmc));
|
|
|
|
|
2008-10-24 20:17:50 +00:00
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
host->mrq = mrq;
|
|
|
|
|
2011-07-01 16:55:24 +00:00
|
|
|
if (mrq->data)
|
|
|
|
mmci_get_next_data(host, mrq->data);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (mrq->data && mrq->data->flags & MMC_DATA_READ)
|
|
|
|
mmci_start_data(host, mrq->data);
|
|
|
|
|
2013-05-13 14:40:56 +00:00
|
|
|
if (mrq->sbc)
|
|
|
|
mmci_start_command(host, mrq->sbc, 0);
|
|
|
|
else
|
|
|
|
mmci_start_command(host, mrq->cmd, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-10-24 20:17:50 +00:00
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
2011-12-13 15:54:55 +00:00
|
|
|
struct variant_data *variant = host->variant;
|
2009-09-14 11:56:14 +00:00
|
|
|
u32 pwr = 0;
|
|
|
|
unsigned long flags;
|
2013-05-03 11:52:12 +00:00
|
|
|
int ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-12-13 16:01:11 +00:00
|
|
|
pm_runtime_get_sync(mmc_dev(mmc));
|
|
|
|
|
2011-12-13 15:57:55 +00:00
|
|
|
if (host->plat->ios_handler &&
|
|
|
|
host->plat->ios_handler(mmc_dev(mmc), ios))
|
|
|
|
dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (ios->power_mode) {
|
|
|
|
case MMC_POWER_OFF:
|
2013-01-07 15:22:50 +00:00
|
|
|
if (!IS_ERR(mmc->supply.vmmc))
|
|
|
|
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
|
2013-01-31 11:27:52 +00:00
|
|
|
|
2013-05-14 12:53:10 +00:00
|
|
|
if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
|
2013-01-31 11:27:52 +00:00
|
|
|
regulator_disable(mmc->supply.vqmmc);
|
2013-05-14 12:53:10 +00:00
|
|
|
host->vqmmc_enabled = false;
|
|
|
|
}
|
2013-01-31 11:27:52 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case MMC_POWER_UP:
|
2013-01-07 15:22:50 +00:00
|
|
|
if (!IS_ERR(mmc->supply.vmmc))
|
|
|
|
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
|
|
|
|
|
2011-12-13 15:54:55 +00:00
|
|
|
/*
|
|
|
|
* The ST Micro variant doesn't have the PL180s MCI_PWR_UP
|
|
|
|
* and instead uses MCI_PWR_ON so apply whatever value is
|
|
|
|
* configured in the variant data.
|
|
|
|
*/
|
|
|
|
pwr |= variant->pwrreg_powerup;
|
|
|
|
|
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
case MMC_POWER_ON:
|
2013-05-14 12:53:10 +00:00
|
|
|
if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
|
2013-05-03 11:52:12 +00:00
|
|
|
ret = regulator_enable(mmc->supply.vqmmc);
|
|
|
|
if (ret < 0)
|
|
|
|
dev_err(mmc_dev(mmc),
|
|
|
|
"failed to enable vqmmc regulator\n");
|
2013-05-14 12:53:10 +00:00
|
|
|
else
|
|
|
|
host->vqmmc_enabled = true;
|
2013-05-03 11:52:12 +00:00
|
|
|
}
|
2013-01-31 11:27:52 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
pwr |= MCI_PWR_ON;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-12-13 15:57:07 +00:00
|
|
|
if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
|
|
|
|
/*
|
|
|
|
* The ST Micro variant has some additional bits
|
|
|
|
* indicating signal direction for the signals in
|
|
|
|
* the SD/MMC bus and feedback-clock usage.
|
|
|
|
*/
|
2014-03-21 09:13:05 +00:00
|
|
|
pwr |= host->pwr_reg_add;
|
2011-12-13 15:57:07 +00:00
|
|
|
|
|
|
|
if (ios->bus_width == MMC_BUS_WIDTH_4)
|
|
|
|
pwr &= ~MCI_ST_DATA74DIREN;
|
|
|
|
else if (ios->bus_width == MMC_BUS_WIDTH_1)
|
|
|
|
pwr &= (~MCI_ST_DATA74DIREN &
|
|
|
|
~MCI_ST_DATA31DIREN &
|
|
|
|
~MCI_ST_DATA2DIREN);
|
|
|
|
}
|
|
|
|
|
2009-01-04 14:18:54 +00:00
|
|
|
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
|
2009-08-04 00:01:02 +00:00
|
|
|
if (host->hw_designer != AMBA_VENDOR_ST)
|
2009-01-04 14:18:54 +00:00
|
|
|
pwr |= MCI_ROD;
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* The ST Micro variant use the ROD bit for something
|
|
|
|
* else and only has OD (Open Drain).
|
|
|
|
*/
|
|
|
|
pwr |= MCI_OD;
|
|
|
|
}
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-01-09 16:19:54 +00:00
|
|
|
/*
|
|
|
|
* If clock = 0 and the variant requires the MMCIPOWER to be used for
|
|
|
|
* gating the clock, the MCI_PWR_ON bit is cleared.
|
|
|
|
*/
|
|
|
|
if (!ios->clock && variant->pwrreg_clkgate)
|
|
|
|
pwr &= ~MCI_PWR_ON;
|
|
|
|
|
2014-06-02 09:09:55 +00:00
|
|
|
if (host->variant->explicit_mclk_control &&
|
|
|
|
ios->clock != host->clock_cache) {
|
|
|
|
ret = clk_set_rate(host->clk, ios->clock);
|
|
|
|
if (ret < 0)
|
|
|
|
dev_err(mmc_dev(host->mmc),
|
|
|
|
"Error setting clock rate (%d)\n", ret);
|
|
|
|
else
|
|
|
|
host->mclk = clk_get_rate(host->clk);
|
|
|
|
}
|
|
|
|
host->clock_cache = ios->clock;
|
|
|
|
|
2009-09-14 11:56:14 +00:00
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
|
|
|
|
|
|
|
mmci_set_clkreg(host, ios->clock);
|
2012-01-18 08:17:27 +00:00
|
|
|
mmci_write_pwrreg(host, pwr);
|
2013-09-04 08:01:15 +00:00
|
|
|
mmci_reg_delay(host);
|
2009-09-14 11:56:14 +00:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
2011-12-13 16:01:11 +00:00
|
|
|
|
|
|
|
pm_runtime_mark_last_busy(mmc_dev(mmc));
|
|
|
|
pm_runtime_put_autosuspend(mmc_dev(mmc));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-07-09 14:16:07 +00:00
|
|
|
static int mmci_get_cd(struct mmc_host *mmc)
|
|
|
|
{
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
2010-08-09 11:54:43 +00:00
|
|
|
struct mmci_platform_data *plat = host->plat;
|
2014-03-17 12:56:19 +00:00
|
|
|
unsigned int status = mmc_gpio_get_cd(mmc);
|
2009-07-09 14:16:07 +00:00
|
|
|
|
2014-03-17 12:56:19 +00:00
|
|
|
if (status == -ENOSYS) {
|
2010-08-09 11:56:40 +00:00
|
|
|
if (!plat->status)
|
|
|
|
return 1; /* Assume always present */
|
|
|
|
|
2010-08-09 11:54:43 +00:00
|
|
|
status = plat->status(mmc_dev(host->mmc));
|
2014-03-17 12:56:19 +00:00
|
|
|
}
|
2010-07-29 14:58:59 +00:00
|
|
|
return status;
|
2009-07-09 14:16:07 +00:00
|
|
|
}
|
|
|
|
|
2013-05-15 19:47:33 +00:00
|
|
|
static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!IS_ERR(mmc->supply.vqmmc)) {
|
|
|
|
|
|
|
|
pm_runtime_get_sync(mmc_dev(mmc));
|
|
|
|
|
|
|
|
switch (ios->signal_voltage) {
|
|
|
|
case MMC_SIGNAL_VOLTAGE_330:
|
|
|
|
ret = regulator_set_voltage(mmc->supply.vqmmc,
|
|
|
|
2700000, 3600000);
|
|
|
|
break;
|
|
|
|
case MMC_SIGNAL_VOLTAGE_180:
|
|
|
|
ret = regulator_set_voltage(mmc->supply.vqmmc,
|
|
|
|
1700000, 1950000);
|
|
|
|
break;
|
|
|
|
case MMC_SIGNAL_VOLTAGE_120:
|
|
|
|
ret = regulator_set_voltage(mmc->supply.vqmmc,
|
|
|
|
1100000, 1300000);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
|
|
|
|
|
|
|
|
pm_runtime_mark_last_busy(mmc_dev(mmc));
|
|
|
|
pm_runtime_put_autosuspend(mmc_dev(mmc));
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-05-15 19:53:22 +00:00
|
|
|
static struct mmc_host_ops mmci_ops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.request = mmci_request,
|
2011-07-01 16:55:24 +00:00
|
|
|
.pre_req = mmci_pre_request,
|
|
|
|
.post_req = mmci_post_request,
|
2005-04-16 22:20:36 +00:00
|
|
|
.set_ios = mmci_set_ios,
|
2014-03-17 12:56:19 +00:00
|
|
|
.get_ro = mmc_gpio_get_ro,
|
2009-07-09 14:16:07 +00:00
|
|
|
.get_cd = mmci_get_cd,
|
2013-05-15 19:47:33 +00:00
|
|
|
.start_signal_voltage_switch = mmci_sig_volt_switch,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2014-03-21 09:13:05 +00:00
|
|
|
static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
|
2012-04-16 09:18:43 +00:00
|
|
|
{
|
2014-03-21 09:13:05 +00:00
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
int ret = mmc_of_parse(mmc);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2014-03-18 09:46:37 +00:00
|
|
|
if (of_get_property(np, "st,sig-dir-dat0", NULL))
|
2014-03-21 09:13:05 +00:00
|
|
|
host->pwr_reg_add |= MCI_ST_DATA0DIREN;
|
2014-03-18 09:46:37 +00:00
|
|
|
if (of_get_property(np, "st,sig-dir-dat2", NULL))
|
2014-03-21 09:13:05 +00:00
|
|
|
host->pwr_reg_add |= MCI_ST_DATA2DIREN;
|
2014-03-18 09:46:37 +00:00
|
|
|
if (of_get_property(np, "st,sig-dir-dat31", NULL))
|
2014-03-21 09:13:05 +00:00
|
|
|
host->pwr_reg_add |= MCI_ST_DATA31DIREN;
|
2014-03-18 09:46:37 +00:00
|
|
|
if (of_get_property(np, "st,sig-dir-dat74", NULL))
|
2014-03-21 09:13:05 +00:00
|
|
|
host->pwr_reg_add |= MCI_ST_DATA74DIREN;
|
2014-03-18 09:46:37 +00:00
|
|
|
if (of_get_property(np, "st,sig-dir-cmd", NULL))
|
2014-03-21 09:13:05 +00:00
|
|
|
host->pwr_reg_add |= MCI_ST_CMDDIREN;
|
2014-03-31 12:19:21 +00:00
|
|
|
if (of_get_property(np, "st,sig-pin-fbclk", NULL))
|
2014-03-21 09:13:05 +00:00
|
|
|
host->pwr_reg_add |= MCI_ST_FBCLKEN;
|
2012-04-16 09:18:43 +00:00
|
|
|
|
|
|
|
if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
|
2014-03-17 14:53:07 +00:00
|
|
|
mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
|
2012-04-16 09:18:43 +00:00
|
|
|
if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
|
2014-03-17 14:53:07 +00:00
|
|
|
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
|
2012-04-16 09:18:43 +00:00
|
|
|
|
2014-03-17 14:53:07 +00:00
|
|
|
return 0;
|
2012-05-08 12:59:38 +00:00
|
|
|
}
|
2012-04-16 09:18:43 +00:00
|
|
|
|
2012-11-19 18:23:06 +00:00
|
|
|
static int mmci_probe(struct amba_device *dev,
|
2011-02-19 15:55:00 +00:00
|
|
|
const struct amba_id *id)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-09-22 13:29:36 +00:00
|
|
|
struct mmci_platform_data *plat = dev->dev.platform_data;
|
2012-04-16 09:18:43 +00:00
|
|
|
struct device_node *np = dev->dev.of_node;
|
2010-07-21 11:54:40 +00:00
|
|
|
struct variant_data *variant = id->data;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct mmci_host *host;
|
|
|
|
struct mmc_host *mmc;
|
|
|
|
int ret;
|
|
|
|
|
2012-04-16 09:18:43 +00:00
|
|
|
/* Must have platform data or Device Tree. */
|
|
|
|
if (!plat && !np) {
|
|
|
|
dev_err(&dev->dev, "No plat data or DT found\n");
|
|
|
|
return -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-06-12 09:49:51 +00:00
|
|
|
if (!plat) {
|
|
|
|
plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
|
|
|
|
if (!plat)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
|
2014-03-17 12:56:32 +00:00
|
|
|
if (!mmc)
|
|
|
|
return -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-03-17 14:53:07 +00:00
|
|
|
ret = mmci_of_parse(np, mmc);
|
|
|
|
if (ret)
|
|
|
|
goto host_free;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
host = mmc_priv(mmc);
|
2009-04-17 03:14:19 +00:00
|
|
|
host->mmc = mmc;
|
2009-07-09 14:13:56 +00:00
|
|
|
|
|
|
|
host->hw_designer = amba_manf(dev);
|
|
|
|
host->hw_revision = amba_rev(dev);
|
2010-02-19 00:09:10 +00:00
|
|
|
dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
|
|
|
|
dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
|
2009-07-09 14:13:56 +00:00
|
|
|
|
2013-05-13 14:39:17 +00:00
|
|
|
host->clk = devm_clk_get(&dev->dev, NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (IS_ERR(host->clk)) {
|
|
|
|
ret = PTR_ERR(host->clk);
|
|
|
|
goto host_free;
|
|
|
|
}
|
|
|
|
|
2012-08-26 16:00:59 +00:00
|
|
|
ret = clk_prepare_enable(host->clk);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (ret)
|
2013-05-13 14:39:17 +00:00
|
|
|
goto host_free;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-06-02 09:10:04 +00:00
|
|
|
if (variant->qcom_fifo)
|
|
|
|
host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
|
|
|
|
else
|
|
|
|
host->get_rx_fifocnt = mmci_get_rx_fifocnt;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
host->plat = plat;
|
2010-07-21 11:54:40 +00:00
|
|
|
host->variant = variant;
|
2005-04-16 22:20:36 +00:00
|
|
|
host->mclk = clk_get_rate(host->clk);
|
2008-04-29 08:34:07 +00:00
|
|
|
/*
|
|
|
|
* According to the spec, mclk is max 100 MHz,
|
|
|
|
* so we try to adjust the clock down to this,
|
|
|
|
* (if possible).
|
|
|
|
*/
|
2014-06-02 09:09:47 +00:00
|
|
|
if (host->mclk > variant->f_max) {
|
|
|
|
ret = clk_set_rate(host->clk, variant->f_max);
|
2008-04-29 08:34:07 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto clk_disable;
|
|
|
|
host->mclk = clk_get_rate(host->clk);
|
2010-02-19 00:09:10 +00:00
|
|
|
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
|
|
|
|
host->mclk);
|
2008-04-29 08:34:07 +00:00
|
|
|
}
|
2014-03-17 12:56:32 +00:00
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
host->phybase = dev->res.start;
|
2014-03-17 12:56:32 +00:00
|
|
|
host->base = devm_ioremap_resource(&dev->dev, &dev->res);
|
|
|
|
if (IS_ERR(host->base)) {
|
|
|
|
ret = PTR_ERR(host->base);
|
2005-04-16 22:20:36 +00:00
|
|
|
goto clk_disable;
|
|
|
|
}
|
|
|
|
|
2011-07-08 08:57:15 +00:00
|
|
|
/*
|
|
|
|
* The ARM and ST versions of the block have slightly different
|
|
|
|
* clock divider equations which means that the minimum divider
|
|
|
|
* differs too.
|
2014-06-02 09:09:55 +00:00
|
|
|
* on Qualcomm like controllers get the nearest minimum clock to 100Khz
|
2011-07-08 08:57:15 +00:00
|
|
|
*/
|
|
|
|
if (variant->st_clkdiv)
|
|
|
|
mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
|
2014-06-02 09:09:55 +00:00
|
|
|
else if (variant->explicit_mclk_control)
|
|
|
|
mmc->f_min = clk_round_rate(host->clk, 100000);
|
2011-07-08 08:57:15 +00:00
|
|
|
else
|
|
|
|
mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
|
2010-04-08 06:39:38 +00:00
|
|
|
/*
|
2014-03-17 14:53:07 +00:00
|
|
|
* If no maximum operating frequency is supplied, fall back to use
|
|
|
|
* the module parameter, which has a (low) default value in case it
|
|
|
|
* is not specified. Either value must not exceed the clock rate into
|
2014-03-21 09:46:39 +00:00
|
|
|
* the block, of course.
|
2010-04-08 06:39:38 +00:00
|
|
|
*/
|
2014-03-17 14:53:07 +00:00
|
|
|
if (mmc->f_max)
|
2014-06-02 09:09:55 +00:00
|
|
|
mmc->f_max = variant->explicit_mclk_control ?
|
|
|
|
min(variant->f_max, mmc->f_max) :
|
|
|
|
min(host->mclk, mmc->f_max);
|
2010-04-08 06:39:38 +00:00
|
|
|
else
|
2014-06-02 09:09:55 +00:00
|
|
|
mmc->f_max = variant->explicit_mclk_control ?
|
|
|
|
fmax : min(host->mclk, fmax);
|
|
|
|
|
|
|
|
|
2010-02-19 00:09:10 +00:00
|
|
|
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
|
|
|
|
|
2013-01-07 15:22:50 +00:00
|
|
|
/* Get regulators and the supported OCR mask */
|
2015-03-25 01:39:49 +00:00
|
|
|
ret = mmc_regulator_get_supply(mmc);
|
|
|
|
if (ret == -EPROBE_DEFER)
|
|
|
|
goto clk_disable;
|
|
|
|
|
2013-01-07 15:22:50 +00:00
|
|
|
if (!mmc->ocr_avail)
|
2009-09-22 13:41:40 +00:00
|
|
|
mmc->ocr_avail = plat->ocr_mask;
|
2013-01-07 15:22:50 +00:00
|
|
|
else if (plat->ocr_mask)
|
|
|
|
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
|
|
|
|
|
2014-03-17 14:53:07 +00:00
|
|
|
/* DT takes precedence over platform data. */
|
|
|
|
if (!np) {
|
|
|
|
if (!plat->cd_invert)
|
|
|
|
mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
|
|
|
|
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-03-19 12:54:18 +00:00
|
|
|
/* We support these capabilities. */
|
|
|
|
mmc->caps |= MMC_CAP_CMD23;
|
|
|
|
|
2014-01-13 15:49:31 +00:00
|
|
|
if (variant->busy_detect) {
|
|
|
|
mmci_ops.card_busy = mmci_card_busy;
|
|
|
|
mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
|
|
|
|
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
|
|
|
|
mmc->max_busy_timeout = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
mmc->ops = &mmci_ops;
|
|
|
|
|
2013-01-07 14:35:06 +00:00
|
|
|
/* We support these PM capabilities. */
|
2014-03-17 14:53:07 +00:00
|
|
|
mmc->pm_caps |= MMC_PM_KEEP_POWER;
|
2013-01-07 14:35:06 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* We can do SGIO
|
|
|
|
*/
|
2010-09-10 05:33:59 +00:00
|
|
|
mmc->max_segs = NR_SG;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2010-07-21 11:55:59 +00:00
|
|
|
* Since only a certain number of bits are valid in the data length
|
|
|
|
* register, we must ensure that we don't exceed 2^num-1 bytes in a
|
|
|
|
* single request.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2010-07-21 11:55:59 +00:00
|
|
|
mmc->max_req_size = (1 << variant->datalength_bits) - 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the maximum segment size. Since we aren't doing DMA
|
|
|
|
* (yet) we are only limited by the data length register.
|
|
|
|
*/
|
2006-11-21 16:55:45 +00:00
|
|
|
mmc->max_seg_size = mmc->max_req_size;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-11-21 16:54:23 +00:00
|
|
|
/*
|
|
|
|
* Block size can be up to 2048 bytes, but must be a power of two.
|
|
|
|
*/
|
2012-02-24 11:25:21 +00:00
|
|
|
mmc->max_blk_size = 1 << 11;
|
2006-11-21 16:54:23 +00:00
|
|
|
|
2006-11-21 16:55:45 +00:00
|
|
|
/*
|
2012-02-24 11:25:21 +00:00
|
|
|
* Limit the number of blocks transferred so that we don't overflow
|
|
|
|
* the maximum request size.
|
2006-11-21 16:55:45 +00:00
|
|
|
*/
|
2012-02-24 11:25:21 +00:00
|
|
|
mmc->max_blk_count = mmc->max_req_size >> 11;
|
2006-11-21 16:55:45 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_lock_init(&host->lock);
|
|
|
|
|
|
|
|
writel(0, host->base + MMCIMASK0);
|
|
|
|
writel(0, host->base + MMCIMASK1);
|
|
|
|
writel(0xfff, host->base + MMCICLEAR);
|
|
|
|
|
2014-08-27 13:13:54 +00:00
|
|
|
/*
|
|
|
|
* If:
|
|
|
|
* - not using DT but using a descriptor table, or
|
|
|
|
* - using a table of descriptors ALONGSIDE DT, or
|
|
|
|
* look up these descriptors named "cd" and "wp" right here, fail
|
|
|
|
* silently of these do not exist and proceed to try platform data
|
|
|
|
*/
|
|
|
|
if (!np) {
|
2014-10-02 07:08:46 +00:00
|
|
|
ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
|
2014-08-27 13:13:54 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
if (ret == -EPROBE_DEFER)
|
|
|
|
goto clk_disable;
|
|
|
|
else if (gpio_is_valid(plat->gpio_cd)) {
|
|
|
|
ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
|
|
|
|
if (ret)
|
|
|
|
goto clk_disable;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-02 07:08:46 +00:00
|
|
|
ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
|
2014-08-27 13:13:54 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
if (ret == -EPROBE_DEFER)
|
|
|
|
goto clk_disable;
|
|
|
|
else if (gpio_is_valid(plat->gpio_wp)) {
|
|
|
|
ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
|
|
|
|
if (ret)
|
|
|
|
goto clk_disable;
|
|
|
|
}
|
|
|
|
}
|
2009-07-09 14:16:07 +00:00
|
|
|
}
|
|
|
|
|
2014-03-17 12:56:32 +00:00
|
|
|
ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
|
|
|
|
DRIVER_NAME " (cmd)", host);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (ret)
|
2014-03-17 12:56:32 +00:00
|
|
|
goto clk_disable;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-03 10:33:15 +00:00
|
|
|
if (!dev->irq[1])
|
2010-10-19 11:39:48 +00:00
|
|
|
host->singleirq = true;
|
|
|
|
else {
|
2014-03-17 12:56:32 +00:00
|
|
|
ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
|
|
|
|
IRQF_SHARED, DRIVER_NAME " (pio)", host);
|
2010-10-19 11:39:48 +00:00
|
|
|
if (ret)
|
2014-03-17 12:56:32 +00:00
|
|
|
goto clk_disable;
|
2010-10-19 11:39:48 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-01-24 14:22:13 +00:00
|
|
|
writel(MCI_IRQENABLE, host->base + MMCIMASK0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
amba_set_drvdata(dev, mmc);
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
|
|
|
|
mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
|
|
|
|
amba_rev(dev), (unsigned long long)dev->res.start,
|
|
|
|
dev->irq[0], dev->irq[1]);
|
|
|
|
|
|
|
|
mmci_dma_setup(host);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-12-13 16:01:11 +00:00
|
|
|
pm_runtime_set_autosuspend_delay(&dev->dev, 50);
|
|
|
|
pm_runtime_use_autosuspend(&dev->dev);
|
2011-08-14 08:17:05 +00:00
|
|
|
|
2010-12-28 19:40:40 +00:00
|
|
|
mmc_add_host(mmc);
|
|
|
|
|
2014-12-11 13:35:55 +00:00
|
|
|
pm_runtime_put(&dev->dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
clk_disable:
|
2012-08-26 16:00:59 +00:00
|
|
|
clk_disable_unprepare(host->clk);
|
2005-04-16 22:20:36 +00:00
|
|
|
host_free:
|
|
|
|
mmc_free_host(mmc);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-11-19 18:26:03 +00:00
|
|
|
static int mmci_remove(struct amba_device *dev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct mmc_host *mmc = amba_get_drvdata(dev);
|
|
|
|
|
|
|
|
if (mmc) {
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
|
2011-08-14 08:17:05 +00:00
|
|
|
/*
|
|
|
|
* Undo pm_runtime_put() in probe. We use the _sync
|
|
|
|
* version here so that we can access the primecell.
|
|
|
|
*/
|
|
|
|
pm_runtime_get_sync(&dev->dev);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
mmc_remove_host(mmc);
|
|
|
|
|
|
|
|
writel(0, host->base + MMCIMASK0);
|
|
|
|
writel(0, host->base + MMCIMASK1);
|
|
|
|
|
|
|
|
writel(0, host->base + MMCICOMMAND);
|
|
|
|
writel(0, host->base + MMCIDATACTRL);
|
|
|
|
|
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij.
Add dmaengine based support for DMA to the MMCI driver, using the
Primecell DMA engine interface. The changes over Linus' driver are:
- rename txsize_threshold to dmasize_threshold, as this reflects the
purpose more.
- use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'.
- clean up requesting of dma channels.
- don't release a single channel twice when it's shared between tx and rx.
- get rid of 'dma_enable' bool - instead check whether the channel is NULL.
- detect incomplete DMA at the end of a transfer. Some DMA controllers
(eg, PL08x) are unable to be configured for scatter DMA and also listen
to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI.
They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the
final burst/words, PL08x does not transfer the last few words.
- map and unmap DMA buffers using the DMA engine struct device, not the
MMCI struct device - the DMA engine is doing the DMA transfer, not us.
- avoid double-unmapping of the DMA buffers on MMCI data errors.
- don't check for negative values from the dmaengine tx submission
function - Dan says this must never fail.
- use new dmaengine helper functions rather than using the ugly function
pointers directly.
- allow DMA code to be fully optimized away using dma_inprogress() which
is defined to constant 0 if DMA engine support is disabled.
- request maximum segment size from the DMA engine struct device and
set this appropriately.
- removed checking of buffer alignment - the DMA engine should deal with
its own restrictions on buffer alignment, not the individual DMA engine
users.
- removed setting DMAREQCTL - this confuses some DMA controllers as it
causes LBREQ to be asserted for the last seven transfers, rather than
six SREQ and one LSREQ.
- removed burst setting - the DMA controller should not burst past the
transfer size required to complete the DMA operation.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-11 19:35:53 +00:00
|
|
|
mmci_dma_release(host);
|
2012-08-26 16:00:59 +00:00
|
|
|
clk_disable_unprepare(host->clk);
|
2005-04-16 22:20:36 +00:00
|
|
|
mmc_free_host(mmc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-22 23:38:00 +00:00
|
|
|
#ifdef CONFIG_PM
|
2013-09-04 08:05:17 +00:00
|
|
|
static void mmci_save(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2014-01-22 23:19:38 +00:00
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
2013-09-04 08:05:17 +00:00
|
|
|
|
2014-01-22 23:19:38 +00:00
|
|
|
writel(0, host->base + MMCIMASK0);
|
|
|
|
if (host->variant->pwrreg_nopower) {
|
2013-09-04 08:05:17 +00:00
|
|
|
writel(0, host->base + MMCIDATACTRL);
|
|
|
|
writel(0, host->base + MMCIPOWER);
|
|
|
|
writel(0, host->base + MMCICLOCK);
|
|
|
|
}
|
2014-01-22 23:19:38 +00:00
|
|
|
mmci_reg_delay(host);
|
2013-09-04 08:05:17 +00:00
|
|
|
|
2014-01-22 23:19:38 +00:00
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
2013-09-04 08:05:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mmci_restore(struct mmci_host *host)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2014-01-22 23:19:38 +00:00
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
2013-09-04 08:05:17 +00:00
|
|
|
|
2014-01-22 23:19:38 +00:00
|
|
|
if (host->variant->pwrreg_nopower) {
|
2013-09-04 08:05:17 +00:00
|
|
|
writel(host->clk_reg, host->base + MMCICLOCK);
|
|
|
|
writel(host->datactrl_reg, host->base + MMCIDATACTRL);
|
|
|
|
writel(host->pwr_reg, host->base + MMCIPOWER);
|
|
|
|
}
|
2014-01-22 23:19:38 +00:00
|
|
|
writel(MCI_IRQENABLE, host->base + MMCIMASK0);
|
|
|
|
mmci_reg_delay(host);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
2013-09-04 08:05:17 +00:00
|
|
|
}
|
|
|
|
|
2013-01-09 10:15:26 +00:00
|
|
|
static int mmci_runtime_suspend(struct device *dev)
|
|
|
|
{
|
|
|
|
struct amba_device *adev = to_amba_device(dev);
|
|
|
|
struct mmc_host *mmc = amba_get_drvdata(adev);
|
|
|
|
|
|
|
|
if (mmc) {
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
2013-09-04 08:00:37 +00:00
|
|
|
pinctrl_pm_select_sleep_state(dev);
|
2013-09-04 08:05:17 +00:00
|
|
|
mmci_save(host);
|
2013-01-09 10:15:26 +00:00
|
|
|
clk_disable_unprepare(host->clk);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mmci_runtime_resume(struct device *dev)
|
|
|
|
{
|
|
|
|
struct amba_device *adev = to_amba_device(dev);
|
|
|
|
struct mmc_host *mmc = amba_get_drvdata(adev);
|
|
|
|
|
|
|
|
if (mmc) {
|
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
|
clk_prepare_enable(host->clk);
|
2013-09-04 08:05:17 +00:00
|
|
|
mmci_restore(host);
|
2013-09-04 08:00:37 +00:00
|
|
|
pinctrl_pm_select_default_state(dev);
|
2013-01-09 10:15:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-12-13 15:59:34 +00:00
|
|
|
static const struct dev_pm_ops mmci_dev_pm_ops = {
|
2014-01-23 00:11:33 +00:00
|
|
|
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
|
|
|
pm_runtime_force_resume)
|
2014-12-03 23:34:11 +00:00
|
|
|
SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
|
2011-12-13 15:59:34 +00:00
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static struct amba_id mmci_ids[] = {
|
|
|
|
{
|
|
|
|
.id = 0x00041180,
|
2011-03-11 17:18:07 +00:00
|
|
|
.mask = 0xff0fffff,
|
2010-07-21 11:54:40 +00:00
|
|
|
.data = &variant_arm,
|
2005-04-16 22:20:36 +00:00
|
|
|
},
|
2011-03-11 17:18:07 +00:00
|
|
|
{
|
|
|
|
.id = 0x01041180,
|
|
|
|
.mask = 0xff0fffff,
|
|
|
|
.data = &variant_arm_extended_fifo,
|
|
|
|
},
|
2013-01-24 13:12:45 +00:00
|
|
|
{
|
|
|
|
.id = 0x02041180,
|
|
|
|
.mask = 0xff0fffff,
|
|
|
|
.data = &variant_arm_extended_fifo_hwfc,
|
|
|
|
},
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
.id = 0x00041181,
|
|
|
|
.mask = 0x000fffff,
|
2010-07-21 11:54:40 +00:00
|
|
|
.data = &variant_arm,
|
2005-04-16 22:20:36 +00:00
|
|
|
},
|
2009-01-04 14:18:54 +00:00
|
|
|
/* ST Micro variants */
|
|
|
|
{
|
|
|
|
.id = 0x00180180,
|
|
|
|
.mask = 0x00ffffff,
|
2010-07-21 11:54:40 +00:00
|
|
|
.data = &variant_u300,
|
2009-01-04 14:18:54 +00:00
|
|
|
},
|
2012-04-10 16:43:59 +00:00
|
|
|
{
|
|
|
|
.id = 0x10180180,
|
|
|
|
.mask = 0xf0ffffff,
|
|
|
|
.data = &variant_nomadik,
|
|
|
|
},
|
2009-01-04 14:18:54 +00:00
|
|
|
{
|
|
|
|
.id = 0x00280180,
|
|
|
|
.mask = 0x00ffffff,
|
2016-01-04 01:21:55 +00:00
|
|
|
.data = &variant_nomadik,
|
2010-07-21 11:54:40 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.id = 0x00480180,
|
2011-03-25 07:51:52 +00:00
|
|
|
.mask = 0xf0ffffff,
|
2010-07-21 11:54:40 +00:00
|
|
|
.data = &variant_ux500,
|
2009-01-04 14:18:54 +00:00
|
|
|
},
|
2011-03-25 07:51:52 +00:00
|
|
|
{
|
|
|
|
.id = 0x10480180,
|
|
|
|
.mask = 0xf0ffffff,
|
|
|
|
.data = &variant_ux500v2,
|
|
|
|
},
|
2014-06-02 09:10:13 +00:00
|
|
|
/* Qualcomm variants */
|
|
|
|
{
|
|
|
|
.id = 0x00051180,
|
|
|
|
.mask = 0x000fffff,
|
|
|
|
.data = &variant_qcom,
|
|
|
|
},
|
2005-04-16 22:20:36 +00:00
|
|
|
{ 0, 0 },
|
|
|
|
};
|
|
|
|
|
2011-10-05 14:15:21 +00:00
|
|
|
MODULE_DEVICE_TABLE(amba, mmci_ids);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static struct amba_driver mmci_driver = {
|
|
|
|
.drv = {
|
|
|
|
.name = DRIVER_NAME,
|
2011-12-13 15:59:34 +00:00
|
|
|
.pm = &mmci_dev_pm_ops,
|
2005-04-16 22:20:36 +00:00
|
|
|
},
|
|
|
|
.probe = mmci_probe,
|
2012-11-19 18:20:26 +00:00
|
|
|
.remove = mmci_remove,
|
2005-04-16 22:20:36 +00:00
|
|
|
.id_table = mmci_ids,
|
|
|
|
};
|
|
|
|
|
2012-03-15 09:40:38 +00:00
|
|
|
module_amba_driver(mmci_driver);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
module_param(fmax, uint, 0444);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
|
|
|
|
MODULE_LICENSE("GPL");
|