2018-05-02 19:18:27 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
// Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
|
|
|
|
// Copyright (C) 2008 Juergen Beisert
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2024-02-04 09:19:12 +00:00
|
|
|
#include <linux/bits.h>
|
2009-09-22 23:46:02 +00:00
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/delay.h>
|
2014-09-11 01:18:44 +00:00
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2009-09-22 23:46:02 +00:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
2020-07-27 06:33:54 +00:00
|
|
|
#include <linux/pinctrl/consumer.h>
|
2009-09-22 23:46:02 +00:00
|
|
|
#include <linux/platform_device.h>
|
2020-07-27 06:33:54 +00:00
|
|
|
#include <linux/pm_runtime.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2009-09-22 23:46:02 +00:00
|
|
|
#include <linux/spi/spi.h>
|
|
|
|
#include <linux/types.h>
|
2011-07-09 17:16:41 +00:00
|
|
|
#include <linux/of.h>
|
2020-06-25 20:02:52 +00:00
|
|
|
#include <linux/property.h>
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2022-04-14 16:22:37 +00:00
|
|
|
#include <linux/dma/imx-dma.h>
|
2009-09-22 23:46:02 +00:00
|
|
|
|
|
|
|
#define DRIVER_NAME "spi_imx"
|
|
|
|
|
2019-03-04 23:02:36 +00:00
|
|
|
static bool use_dma = true;
|
|
|
|
module_param(use_dma, bool, 0644);
|
|
|
|
MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
|
|
|
|
|
2022-05-02 17:54:56 +00:00
|
|
|
/* define polling limits */
|
|
|
|
static unsigned int polling_limit_us = 30;
|
|
|
|
module_param(polling_limit_us, uint, 0664);
|
|
|
|
MODULE_PARM_DESC(polling_limit_us,
|
|
|
|
"time in us to run a transfer in polling mode\n");
|
|
|
|
|
2020-07-27 06:33:54 +00:00
|
|
|
#define MXC_RPM_TIMEOUT 2000 /* 2000ms */
|
|
|
|
|
2009-09-22 23:46:02 +00:00
|
|
|
#define MXC_CSPIRXDATA 0x00
|
|
|
|
#define MXC_CSPITXDATA 0x04
|
|
|
|
#define MXC_CSPICTRL 0x08
|
|
|
|
#define MXC_CSPIINT 0x0c
|
|
|
|
#define MXC_RESET 0x1c
|
|
|
|
|
|
|
|
/* generic defines to abstract from the different register layouts */
|
|
|
|
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
|
|
|
|
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
|
2017-09-05 05:12:32 +00:00
|
|
|
#define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2018-11-30 06:47:07 +00:00
|
|
|
/* The maximum bytes that a sdma BD can transfer. */
|
|
|
|
#define MAX_SDMA_BD_BYTES (1 << 15)
|
2017-08-10 04:50:08 +00:00
|
|
|
#define MX51_ECSPI_CTRL_MAX_BURST 512
|
2023-08-07 12:41:01 +00:00
|
|
|
/* The maximum bytes that IMX53_ECSPI can transfer in target mode.*/
|
2017-09-05 05:12:32 +00:00
|
|
|
#define MX53_MAX_TRANSFER_BYTES 512
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2010-09-09 13:29:01 +00:00
|
|
|
enum spi_imx_devtype {
|
2011-07-09 17:16:39 +00:00
|
|
|
IMX1_CSPI,
|
|
|
|
IMX21_CSPI,
|
|
|
|
IMX27_CSPI,
|
|
|
|
IMX31_CSPI,
|
|
|
|
IMX35_CSPI, /* CSPI on all i.mx except above */
|
2017-06-08 05:16:01 +00:00
|
|
|
IMX51_ECSPI, /* ECSPI on i.mx51 */
|
|
|
|
IMX53_ECSPI, /* ECSPI on i.mx53 and later */
|
2010-09-09 13:29:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct spi_imx_data;
|
|
|
|
|
|
|
|
struct spi_imx_devtype_data {
|
2022-05-02 17:54:49 +00:00
|
|
|
void (*intctrl)(struct spi_imx_data *spi_imx, int enable);
|
|
|
|
int (*prepare_message)(struct spi_imx_data *spi_imx, struct spi_message *msg);
|
|
|
|
int (*prepare_transfer)(struct spi_imx_data *spi_imx, struct spi_device *spi);
|
|
|
|
void (*trigger)(struct spi_imx_data *spi_imx);
|
|
|
|
int (*rx_available)(struct spi_imx_data *spi_imx);
|
|
|
|
void (*reset)(struct spi_imx_data *spi_imx);
|
|
|
|
void (*setup_wml)(struct spi_imx_data *spi_imx);
|
|
|
|
void (*disable)(struct spi_imx_data *spi_imx);
|
2017-06-08 05:16:00 +00:00
|
|
|
bool has_dmamode;
|
2023-08-07 12:41:01 +00:00
|
|
|
bool has_targetmode;
|
2017-06-08 05:16:00 +00:00
|
|
|
unsigned int fifo_size;
|
2017-08-10 04:50:08 +00:00
|
|
|
bool dynamic_burst;
|
2021-07-14 10:20:48 +00:00
|
|
|
/*
|
|
|
|
* ERR009165 fixed or not:
|
|
|
|
* https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
|
|
|
|
*/
|
|
|
|
bool tx_glitch_fixed;
|
2011-07-09 17:16:39 +00:00
|
|
|
enum spi_imx_devtype devtype;
|
2010-09-09 13:29:01 +00:00
|
|
|
};
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
struct spi_imx_data {
|
2022-05-02 17:54:55 +00:00
|
|
|
struct spi_controller *controller;
|
2016-02-17 13:28:48 +00:00
|
|
|
struct device *dev;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
|
|
|
struct completion xfer_done;
|
2012-03-29 19:54:18 +00:00
|
|
|
void __iomem *base;
|
2016-02-24 08:20:29 +00:00
|
|
|
unsigned long base_phys;
|
|
|
|
|
2012-03-07 08:30:22 +00:00
|
|
|
struct clk *clk_per;
|
|
|
|
struct clk *clk_ipg;
|
2009-09-22 23:46:02 +00:00
|
|
|
unsigned long spi_clk;
|
2016-02-19 07:43:03 +00:00
|
|
|
unsigned int spi_bus_clk;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2017-06-02 05:38:01 +00:00
|
|
|
unsigned int bits_per_word;
|
2017-04-23 19:19:58 +00:00
|
|
|
unsigned int spi_drctl;
|
2016-02-24 08:20:29 +00:00
|
|
|
|
2017-08-10 04:50:08 +00:00
|
|
|
unsigned int count, remainder;
|
2022-05-02 17:54:49 +00:00
|
|
|
void (*tx)(struct spi_imx_data *spi_imx);
|
|
|
|
void (*rx)(struct spi_imx_data *spi_imx);
|
2009-09-22 23:46:02 +00:00
|
|
|
void *rx_buf;
|
|
|
|
const void *tx_buf;
|
|
|
|
unsigned int txfifo; /* number of words pushed in tx FIFO */
|
2018-07-17 14:31:54 +00:00
|
|
|
unsigned int dynamic_burst;
|
2022-04-11 18:45:29 +00:00
|
|
|
bool rx_only;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
/* Target mode */
|
|
|
|
bool target_mode;
|
|
|
|
bool target_aborted;
|
|
|
|
unsigned int target_burst;
|
2017-09-05 05:12:32 +00:00
|
|
|
|
2014-09-11 01:18:44 +00:00
|
|
|
/* DMA */
|
|
|
|
bool usedma;
|
2015-12-05 16:57:01 +00:00
|
|
|
u32 wml;
|
2014-09-11 01:18:44 +00:00
|
|
|
struct completion dma_rx_completion;
|
|
|
|
struct completion dma_tx_completion;
|
|
|
|
|
2012-05-21 19:49:35 +00:00
|
|
|
const struct spi_imx_devtype_data *devtype_data;
|
2009-09-22 23:46:02 +00:00
|
|
|
};
|
|
|
|
|
2011-07-09 17:16:39 +00:00
|
|
|
static inline int is_imx27_cspi(struct spi_imx_data *d)
|
|
|
|
{
|
|
|
|
return d->devtype_data->devtype == IMX27_CSPI;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_imx35_cspi(struct spi_imx_data *d)
|
|
|
|
{
|
|
|
|
return d->devtype_data->devtype == IMX35_CSPI;
|
|
|
|
}
|
|
|
|
|
2015-12-05 16:57:02 +00:00
|
|
|
static inline int is_imx51_ecspi(struct spi_imx_data *d)
|
|
|
|
{
|
|
|
|
return d->devtype_data->devtype == IMX51_ECSPI;
|
|
|
|
}
|
|
|
|
|
2017-06-08 05:16:01 +00:00
|
|
|
static inline int is_imx53_ecspi(struct spi_imx_data *d)
|
|
|
|
{
|
|
|
|
return d->devtype_data->devtype == IMX53_ECSPI;
|
|
|
|
}
|
|
|
|
|
2009-09-22 23:46:02 +00:00
|
|
|
#define MXC_SPI_BUF_RX(type) \
|
2009-10-01 22:44:28 +00:00
|
|
|
static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
|
2009-09-22 23:46:02 +00:00
|
|
|
{ \
|
2009-10-01 22:44:28 +00:00
|
|
|
unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
|
2009-09-22 23:46:02 +00:00
|
|
|
\
|
2009-10-01 22:44:28 +00:00
|
|
|
if (spi_imx->rx_buf) { \
|
|
|
|
*(type *)spi_imx->rx_buf = val; \
|
|
|
|
spi_imx->rx_buf += sizeof(type); \
|
2009-09-22 23:46:02 +00:00
|
|
|
} \
|
2018-07-17 14:31:54 +00:00
|
|
|
\
|
|
|
|
spi_imx->remainder -= sizeof(type); \
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define MXC_SPI_BUF_TX(type) \
|
2009-10-01 22:44:28 +00:00
|
|
|
static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
|
2009-09-22 23:46:02 +00:00
|
|
|
{ \
|
|
|
|
type val = 0; \
|
|
|
|
\
|
2009-10-01 22:44:28 +00:00
|
|
|
if (spi_imx->tx_buf) { \
|
|
|
|
val = *(type *)spi_imx->tx_buf; \
|
|
|
|
spi_imx->tx_buf += sizeof(type); \
|
2009-09-22 23:46:02 +00:00
|
|
|
} \
|
|
|
|
\
|
2009-10-01 22:44:28 +00:00
|
|
|
spi_imx->count -= sizeof(type); \
|
2009-09-22 23:46:02 +00:00
|
|
|
\
|
2009-10-01 22:44:28 +00:00
|
|
|
writel(val, spi_imx->base + MXC_CSPITXDATA); \
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MXC_SPI_BUF_RX(u8)
|
|
|
|
MXC_SPI_BUF_TX(u8)
|
|
|
|
MXC_SPI_BUF_RX(u16)
|
|
|
|
MXC_SPI_BUF_TX(u16)
|
|
|
|
MXC_SPI_BUF_RX(u32)
|
|
|
|
MXC_SPI_BUF_TX(u32)
|
|
|
|
|
|
|
|
/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
|
|
|
|
* (which is currently not the case in this driver)
|
|
|
|
*/
|
|
|
|
static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
|
|
|
|
256, 384, 512, 768, 1024};
|
|
|
|
|
|
|
|
/* MX21, MX27 */
|
2009-10-01 22:44:28 +00:00
|
|
|
static unsigned int spi_imx_clkdiv_1(unsigned int fin,
|
2016-11-01 21:18:39 +00:00
|
|
|
unsigned int fspi, unsigned int max, unsigned int *fres)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
2011-07-09 17:16:39 +00:00
|
|
|
int i;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
|
|
|
for (i = 2; i < max; i++)
|
|
|
|
if (fspi * mxc_clkdivs[i] >= fin)
|
2016-11-01 21:18:39 +00:00
|
|
|
break;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2016-11-01 21:18:39 +00:00
|
|
|
*fres = fin / mxc_clkdivs[i];
|
|
|
|
return i;
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2010-09-09 19:02:48 +00:00
|
|
|
/* MX1, MX31, MX35, MX51 CSPI */
|
2009-10-01 22:44:28 +00:00
|
|
|
static unsigned int spi_imx_clkdiv_2(unsigned int fin,
|
2016-09-01 20:38:40 +00:00
|
|
|
unsigned int fspi, unsigned int *fres)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
|
|
|
int i, div = 4;
|
|
|
|
|
|
|
|
for (i = 0; i < 7; i++) {
|
|
|
|
if (fspi * div >= fin)
|
2016-09-01 20:38:40 +00:00
|
|
|
goto out;
|
2009-09-22 23:46:02 +00:00
|
|
|
div <<= 1;
|
|
|
|
}
|
|
|
|
|
2016-09-01 20:38:40 +00:00
|
|
|
out:
|
|
|
|
*fres = fin / div;
|
|
|
|
return i;
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2017-06-02 05:38:04 +00:00
|
|
|
static int spi_imx_bytes_per_word(const int bits_per_word)
|
2016-02-24 08:20:29 +00:00
|
|
|
{
|
2018-07-17 14:31:52 +00:00
|
|
|
if (bits_per_word <= 8)
|
|
|
|
return 1;
|
|
|
|
else if (bits_per_word <= 16)
|
|
|
|
return 2;
|
|
|
|
else
|
|
|
|
return 4;
|
2016-02-24 08:20:29 +00:00
|
|
|
}
|
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device *spi,
|
2014-09-11 01:18:44 +00:00
|
|
|
struct spi_transfer *transfer)
|
|
|
|
{
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
|
2016-02-24 08:20:29 +00:00
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
if (!use_dma || controller->fallback)
|
2019-03-04 23:02:36 +00:00
|
|
|
return false;
|
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
if (!controller->dma_rx)
|
2016-02-24 08:20:29 +00:00
|
|
|
return false;
|
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
if (spi_imx->target_mode)
|
2017-09-05 05:12:32 +00:00
|
|
|
return false;
|
|
|
|
|
2018-10-10 10:32:48 +00:00
|
|
|
if (transfer->len < spi_imx->devtype_data->fifo_size)
|
|
|
|
return false;
|
|
|
|
|
2017-08-10 04:50:08 +00:00
|
|
|
spi_imx->dynamic_burst = 0;
|
2017-01-06 12:22:18 +00:00
|
|
|
|
2016-02-24 08:20:29 +00:00
|
|
|
return true;
|
2014-09-11 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 22:21:32 +00:00
|
|
|
/*
|
|
|
|
* Note the number of natively supported chip selects for MX51 is 4. Some
|
|
|
|
* devices may have less actual SS pins but the register map supports 4. When
|
|
|
|
* using gpio chip selects the cs values passed into the macros below can go
|
|
|
|
* outside the range 0 - 3. We therefore need to limit the cs value to avoid
|
|
|
|
* corrupting bits outside the allocated locations.
|
|
|
|
*
|
|
|
|
* The simplest way to do this is to just mask the cs bits to 2 bits. This
|
|
|
|
* still allows all 4 native chip selects to work as well as gpio chip selects
|
|
|
|
* (which can use any of the 4 chip select configurations).
|
|
|
|
*/
|
|
|
|
|
2011-07-09 17:16:37 +00:00
|
|
|
#define MX51_ECSPI_CTRL 0x08
|
|
|
|
#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
|
|
|
|
#define MX51_ECSPI_CTRL_XCH (1 << 2)
|
2014-09-11 01:18:44 +00:00
|
|
|
#define MX51_ECSPI_CTRL_SMC (1 << 3)
|
2011-07-09 17:16:37 +00:00
|
|
|
#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
|
2017-04-23 19:19:58 +00:00
|
|
|
#define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
|
2011-07-09 17:16:37 +00:00
|
|
|
#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
|
|
|
|
#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
|
2023-03-18 22:21:32 +00:00
|
|
|
#define MX51_ECSPI_CTRL_CS(cs) ((cs & 3) << 18)
|
2011-07-09 17:16:37 +00:00
|
|
|
#define MX51_ECSPI_CTRL_BL_OFFSET 20
|
2017-08-10 04:50:08 +00:00
|
|
|
#define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
|
2011-07-09 17:16:37 +00:00
|
|
|
|
|
|
|
#define MX51_ECSPI_CONFIG 0x0c
|
2023-03-18 22:21:32 +00:00
|
|
|
#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs & 3) + 0))
|
|
|
|
#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs & 3) + 4))
|
|
|
|
#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs & 3) + 8))
|
|
|
|
#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs & 3) + 12))
|
2023-05-30 14:16:38 +00:00
|
|
|
#define MX51_ECSPI_CONFIG_DATACTL(cs) (1 << ((cs & 3) + 16))
|
2023-03-18 22:21:32 +00:00
|
|
|
#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs & 3) + 20))
|
2011-07-09 17:16:37 +00:00
|
|
|
|
|
|
|
#define MX51_ECSPI_INT 0x10
|
|
|
|
#define MX51_ECSPI_INT_TEEN (1 << 0)
|
|
|
|
#define MX51_ECSPI_INT_RREN (1 << 3)
|
2017-09-05 05:12:32 +00:00
|
|
|
#define MX51_ECSPI_INT_RDREN (1 << 4)
|
2011-07-09 17:16:37 +00:00
|
|
|
|
2018-11-30 06:47:07 +00:00
|
|
|
#define MX51_ECSPI_DMA 0x14
|
2016-02-24 08:20:31 +00:00
|
|
|
#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
|
|
|
|
#define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
|
|
|
|
#define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
|
2014-09-11 01:18:44 +00:00
|
|
|
|
2016-02-24 08:20:27 +00:00
|
|
|
#define MX51_ECSPI_DMA_TEDEN (1 << 7)
|
|
|
|
#define MX51_ECSPI_DMA_RXDEN (1 << 23)
|
|
|
|
#define MX51_ECSPI_DMA_RXTDEN (1 << 31)
|
2014-09-11 01:18:44 +00:00
|
|
|
|
2011-07-09 17:16:37 +00:00
|
|
|
#define MX51_ECSPI_STAT 0x18
|
|
|
|
#define MX51_ECSPI_STAT_RR (1 << 3)
|
2010-09-09 19:02:48 +00:00
|
|
|
|
2015-12-04 01:23:24 +00:00
|
|
|
#define MX51_ECSPI_TESTREG 0x20
|
|
|
|
#define MX51_ECSPI_TESTREG_LBC BIT(31)
|
|
|
|
|
2017-08-10 04:50:08 +00:00
|
|
|
static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
|
|
|
unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
|
|
|
|
|
|
|
|
if (spi_imx->rx_buf) {
|
|
|
|
#ifdef __LITTLE_ENDIAN
|
2022-05-02 17:54:53 +00:00
|
|
|
unsigned int bytes_per_word;
|
|
|
|
|
2017-08-10 04:50:08 +00:00
|
|
|
bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
|
|
|
|
if (bytes_per_word == 1)
|
2022-05-02 17:54:52 +00:00
|
|
|
swab32s(&val);
|
2017-08-10 04:50:08 +00:00
|
|
|
else if (bytes_per_word == 2)
|
2022-05-02 17:54:53 +00:00
|
|
|
swahw32s(&val);
|
2017-08-10 04:50:08 +00:00
|
|
|
#endif
|
|
|
|
*(u32 *)spi_imx->rx_buf = val;
|
|
|
|
spi_imx->rx_buf += sizeof(u32);
|
|
|
|
}
|
2018-07-17 14:31:54 +00:00
|
|
|
|
|
|
|
spi_imx->remainder -= sizeof(u32);
|
2017-08-10 04:50:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
2018-07-17 14:31:54 +00:00
|
|
|
int unaligned;
|
|
|
|
u32 val;
|
2017-08-10 04:50:08 +00:00
|
|
|
|
2018-07-17 14:31:54 +00:00
|
|
|
unaligned = spi_imx->remainder % 4;
|
|
|
|
|
|
|
|
if (!unaligned) {
|
2017-08-10 04:50:08 +00:00
|
|
|
spi_imx_buf_rx_swap_u32(spi_imx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-07-17 14:31:54 +00:00
|
|
|
if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
|
2017-08-10 04:50:08 +00:00
|
|
|
spi_imx_buf_rx_u16(spi_imx);
|
2018-07-17 14:31:54 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
val = readl(spi_imx->base + MXC_CSPIRXDATA);
|
|
|
|
|
|
|
|
while (unaligned--) {
|
|
|
|
if (spi_imx->rx_buf) {
|
|
|
|
*(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
|
|
|
|
spi_imx->rx_buf++;
|
|
|
|
}
|
|
|
|
spi_imx->remainder--;
|
|
|
|
}
|
2017-08-10 04:50:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
|
|
|
u32 val = 0;
|
2017-08-23 13:34:43 +00:00
|
|
|
#ifdef __LITTLE_ENDIAN
|
2017-08-10 04:50:08 +00:00
|
|
|
unsigned int bytes_per_word;
|
2017-08-23 13:34:43 +00:00
|
|
|
#endif
|
2017-08-10 04:50:08 +00:00
|
|
|
|
|
|
|
if (spi_imx->tx_buf) {
|
|
|
|
val = *(u32 *)spi_imx->tx_buf;
|
|
|
|
spi_imx->tx_buf += sizeof(u32);
|
|
|
|
}
|
|
|
|
|
|
|
|
spi_imx->count -= sizeof(u32);
|
|
|
|
#ifdef __LITTLE_ENDIAN
|
|
|
|
bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
|
|
|
|
|
|
|
|
if (bytes_per_word == 1)
|
2022-05-02 17:54:52 +00:00
|
|
|
swab32s(&val);
|
2017-08-10 04:50:08 +00:00
|
|
|
else if (bytes_per_word == 2)
|
2022-05-02 17:54:53 +00:00
|
|
|
swahw32s(&val);
|
2017-08-10 04:50:08 +00:00
|
|
|
#endif
|
|
|
|
writel(val, spi_imx->base + MXC_CSPITXDATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
2018-07-17 14:31:54 +00:00
|
|
|
int unaligned;
|
|
|
|
u32 val = 0;
|
2017-08-10 04:50:08 +00:00
|
|
|
|
2018-07-17 14:31:54 +00:00
|
|
|
unaligned = spi_imx->count % 4;
|
2017-08-10 04:50:08 +00:00
|
|
|
|
2018-07-17 14:31:54 +00:00
|
|
|
if (!unaligned) {
|
|
|
|
spi_imx_buf_tx_swap_u32(spi_imx);
|
|
|
|
return;
|
2017-08-10 04:50:08 +00:00
|
|
|
}
|
|
|
|
|
2018-07-17 14:31:54 +00:00
|
|
|
if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
|
|
|
|
spi_imx_buf_tx_u16(spi_imx);
|
2017-08-10 04:50:08 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-07-17 14:31:54 +00:00
|
|
|
while (unaligned--) {
|
|
|
|
if (spi_imx->tx_buf) {
|
|
|
|
val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
|
|
|
|
spi_imx->tx_buf++;
|
|
|
|
}
|
|
|
|
spi_imx->count--;
|
|
|
|
}
|
2017-08-10 04:50:08 +00:00
|
|
|
|
2018-07-17 14:31:54 +00:00
|
|
|
writel(val, spi_imx->base + MXC_CSPITXDATA);
|
2017-08-10 04:50:08 +00:00
|
|
|
}
|
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
static void mx53_ecspi_rx_target(struct spi_imx_data *spi_imx)
|
2017-09-05 05:12:32 +00:00
|
|
|
{
|
|
|
|
u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
|
|
|
|
|
|
|
|
if (spi_imx->rx_buf) {
|
2023-08-07 12:41:01 +00:00
|
|
|
int n_bytes = spi_imx->target_burst % sizeof(val);
|
2017-09-05 05:12:32 +00:00
|
|
|
|
|
|
|
if (!n_bytes)
|
|
|
|
n_bytes = sizeof(val);
|
|
|
|
|
|
|
|
memcpy(spi_imx->rx_buf,
|
|
|
|
((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
|
|
|
|
|
|
|
|
spi_imx->rx_buf += n_bytes;
|
2023-08-07 12:41:01 +00:00
|
|
|
spi_imx->target_burst -= n_bytes;
|
2017-09-05 05:12:32 +00:00
|
|
|
}
|
2018-07-17 14:31:54 +00:00
|
|
|
|
|
|
|
spi_imx->remainder -= sizeof(u32);
|
2017-09-05 05:12:32 +00:00
|
|
|
}
|
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
static void mx53_ecspi_tx_target(struct spi_imx_data *spi_imx)
|
2017-09-05 05:12:32 +00:00
|
|
|
{
|
|
|
|
u32 val = 0;
|
|
|
|
int n_bytes = spi_imx->count % sizeof(val);
|
|
|
|
|
|
|
|
if (!n_bytes)
|
|
|
|
n_bytes = sizeof(val);
|
|
|
|
|
|
|
|
if (spi_imx->tx_buf) {
|
|
|
|
memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
|
|
|
|
spi_imx->tx_buf, n_bytes);
|
|
|
|
val = cpu_to_be32(val);
|
|
|
|
spi_imx->tx_buf += n_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
spi_imx->count -= n_bytes;
|
|
|
|
|
|
|
|
writel(val, spi_imx->base + MXC_CSPITXDATA);
|
|
|
|
}
|
|
|
|
|
2010-09-09 19:02:48 +00:00
|
|
|
/* MX51 eCSPI */
|
2016-02-17 13:28:48 +00:00
|
|
|
static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
|
|
|
|
unsigned int fspi, unsigned int *fres)
|
2010-09-09 19:02:48 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* there are two 4-bit dividers, the pre-divider divides by
|
|
|
|
* $pre, the post-divider by 2^$post
|
|
|
|
*/
|
|
|
|
unsigned int pre, post;
|
2016-02-17 13:28:48 +00:00
|
|
|
unsigned int fin = spi_imx->spi_clk;
|
2010-09-09 19:02:48 +00:00
|
|
|
|
2022-11-15 18:10:00 +00:00
|
|
|
fspi = min(fspi, fin);
|
2010-09-09 19:02:48 +00:00
|
|
|
|
|
|
|
post = fls(fin) - fls(fspi);
|
|
|
|
if (fin > fspi << post)
|
|
|
|
post++;
|
|
|
|
|
|
|
|
/* now we have: (fin <= fspi << post) with post being minimal */
|
|
|
|
|
|
|
|
post = max(4U, post) - 4;
|
|
|
|
if (unlikely(post > 0xf)) {
|
2016-02-17 13:28:48 +00:00
|
|
|
dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
|
|
|
|
fspi, fin);
|
2010-09-09 19:02:48 +00:00
|
|
|
return 0xff;
|
|
|
|
}
|
|
|
|
|
|
|
|
pre = DIV_ROUND_UP(fin, fspi << post) - 1;
|
|
|
|
|
2016-02-17 13:28:48 +00:00
|
|
|
dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
|
2010-09-09 19:02:48 +00:00
|
|
|
__func__, fin, fspi, post, pre);
|
spi: spi-imx: Fix out-of-order CS/SCLK operation at low speeds
Problem:
--------
The problem this patch addresses has the following assumptions about the
SPI bus setup:
- The hardware used to find this is Freescale i.MX537 @ 1200MHz
- The SPI SCLK operate at very low speed, less than 200 kHz
- There are two SPI devices attached to the bus
- Each device uses different GPIO for chipselect
- Each device requires different SCLK signal polarity
The observation of the SCLK and GPIO chipselect lines with a logic analyzer
shows, that the SCLK polarity change does sometimes happen after the GPIO
chipselect is asserted. The SPI slave device reacts on that by counting the
SCLK polarity change as a clock pulse, which disrupts the communication with
the SPI slave device.
Explanation:
------------
We found an interesting correlation, that the maximum delay between the write
into the ECSPIx_CONFIGREG register and the change of SCLK polarity at each
SCLK frequency of 10 kHz, 20 kHz, 50 kHz and 100 kHz is 100 uS, 50 uS, 20 uS
and 10 uS respectively. This lead us to a theory, that at SCLK frequency of
1 Hz, the delay would be 1 S. Therefore, the time it takes for the write to
ECSPIx_CONFIGREG to take effect in the hardware is up to the duration of 1
tick of the SCLK clock.
During this delay period, if the SCLK frequency is too low, the execution of
the spi-imx.c driver can advance so much, that the GPIO chipselect will be
asserted. The GPIO chipselect is asserted almost immediatelly.
Solution:
---------
The solution this patch presents is simple. We calculate the resulting SCLK
clock first by dividing the ECSPI block clock by both dividers that are to be
programmed into the configuration register. Based on the resulting SCLK clock,
we derive the delay it will take for the changes to get really applied. We are
extra careful here so we delay twice as long as we should. Note that the patch
does not create additional overhead at high speeds as the delay will likely be
close to zero there.
Signed-off-by: Marek Vasut <marex@denx.de>
To: linux-spi@vger.kernel.org
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Huang Shijie <b32955@freescale.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Sascha Hauer <s.hauer@pengutronix.de>
Cc: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Mark Brown <broonie@linaro.org>
2013-12-18 17:31:47 +00:00
|
|
|
|
|
|
|
/* Resulting frequency for the SCLK line. */
|
|
|
|
*fres = (fin / (pre + 1)) >> post;
|
|
|
|
|
2011-07-09 17:16:37 +00:00
|
|
|
return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
|
|
|
|
(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
|
2010-09-09 19:02:48 +00:00
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
|
2010-09-09 19:02:48 +00:00
|
|
|
{
|
2022-05-02 17:54:51 +00:00
|
|
|
unsigned int val = 0;
|
2010-09-09 19:02:48 +00:00
|
|
|
|
|
|
|
if (enable & MXC_INT_TE)
|
2011-07-09 17:16:37 +00:00
|
|
|
val |= MX51_ECSPI_INT_TEEN;
|
2010-09-09 19:02:48 +00:00
|
|
|
|
|
|
|
if (enable & MXC_INT_RR)
|
2011-07-09 17:16:37 +00:00
|
|
|
val |= MX51_ECSPI_INT_RREN;
|
2010-09-09 19:02:48 +00:00
|
|
|
|
2017-09-05 05:12:32 +00:00
|
|
|
if (enable & MXC_INT_RDR)
|
|
|
|
val |= MX51_ECSPI_INT_RDREN;
|
|
|
|
|
2011-07-09 17:16:37 +00:00
|
|
|
writel(val, spi_imx->base + MX51_ECSPI_INT);
|
2010-09-09 19:02:48 +00:00
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
|
2010-09-09 19:02:48 +00:00
|
|
|
{
|
2016-02-24 08:20:32 +00:00
|
|
|
u32 reg;
|
2014-09-11 01:18:44 +00:00
|
|
|
|
2016-02-24 08:20:32 +00:00
|
|
|
reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
reg |= MX51_ECSPI_CTRL_XCH;
|
2011-07-09 17:16:37 +00:00
|
|
|
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
|
2010-09-09 19:02:48 +00:00
|
|
|
}
|
|
|
|
|
2017-09-05 05:12:32 +00:00
|
|
|
static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
|
|
|
u32 ctrl;
|
|
|
|
|
|
|
|
ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
|
|
|
|
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
}
|
|
|
|
|
2023-06-02 11:57:30 +00:00
|
|
|
static int mx51_ecspi_channel(const struct spi_device *spi)
|
|
|
|
{
|
|
|
|
if (!spi_get_csgpiod(spi, 0))
|
|
|
|
return spi_get_chipselect(spi, 0);
|
|
|
|
return spi->controller->unused_native_cs;
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:47:05 +00:00
|
|
|
static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
|
|
|
|
struct spi_message *msg)
|
|
|
|
{
|
2018-11-30 06:47:06 +00:00
|
|
|
struct spi_device *spi = msg->spi;
|
2021-07-26 10:01:02 +00:00
|
|
|
struct spi_transfer *xfer;
|
2016-03-15 13:24:36 +00:00
|
|
|
u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
|
2021-07-26 10:01:02 +00:00
|
|
|
u32 min_speed_hz = ~0U;
|
2021-07-03 02:23:00 +00:00
|
|
|
u32 testreg, delay;
|
2016-03-15 13:24:36 +00:00
|
|
|
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
|
2022-05-02 17:54:57 +00:00
|
|
|
u32 current_cfg = cfg;
|
2023-06-02 11:57:30 +00:00
|
|
|
int channel = mx51_ecspi_channel(spi);
|
2010-09-09 19:02:48 +00:00
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
/* set Host or Target mode */
|
|
|
|
if (spi_imx->target_mode)
|
2017-09-05 05:12:32 +00:00
|
|
|
ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
|
|
|
|
else
|
|
|
|
ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
|
2010-09-09 19:02:48 +00:00
|
|
|
|
2017-04-23 19:19:58 +00:00
|
|
|
/*
|
|
|
|
* Enable SPI_RDY handling (falling edge/level triggered).
|
|
|
|
*/
|
|
|
|
if (spi->mode & SPI_READY)
|
|
|
|
ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
|
|
|
|
|
2010-09-09 19:02:48 +00:00
|
|
|
/* set chip select to use */
|
2023-06-02 11:57:30 +00:00
|
|
|
ctrl |= MX51_ECSPI_CTRL_CS(channel);
|
2010-09-09 19:02:48 +00:00
|
|
|
|
2018-11-30 06:47:06 +00:00
|
|
|
/*
|
|
|
|
* The ctrl register must be written first, with the EN bit set other
|
|
|
|
* registers must not be written to.
|
|
|
|
*/
|
|
|
|
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
|
|
|
|
testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
|
|
|
|
if (spi->mode & SPI_LOOP)
|
|
|
|
testreg |= MX51_ECSPI_TESTREG_LBC;
|
2017-09-05 05:12:32 +00:00
|
|
|
else
|
2018-11-30 06:47:06 +00:00
|
|
|
testreg &= ~MX51_ECSPI_TESTREG_LBC;
|
|
|
|
writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
|
2010-09-09 19:02:48 +00:00
|
|
|
|
2017-09-05 05:12:32 +00:00
|
|
|
/*
|
2023-08-07 12:41:01 +00:00
|
|
|
* eCSPI burst completion by Chip Select signal in Target mode
|
2017-09-05 05:12:32 +00:00
|
|
|
* is not functional for imx53 Soc, config SPI burst completed when
|
|
|
|
* BURST_LENGTH + 1 bits are received
|
|
|
|
*/
|
2023-08-07 12:41:01 +00:00
|
|
|
if (spi_imx->target_mode && is_imx53_ecspi(spi_imx))
|
2023-06-02 11:57:30 +00:00
|
|
|
cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(channel);
|
2017-09-05 05:12:32 +00:00
|
|
|
else
|
2023-06-02 11:57:30 +00:00
|
|
|
cfg |= MX51_ECSPI_CONFIG_SBBCTRL(channel);
|
2010-09-09 19:02:48 +00:00
|
|
|
|
2016-06-08 17:02:07 +00:00
|
|
|
if (spi->mode & SPI_CPOL) {
|
2023-06-02 11:57:30 +00:00
|
|
|
cfg |= MX51_ECSPI_CONFIG_SCLKPOL(channel);
|
|
|
|
cfg |= MX51_ECSPI_CONFIG_SCLKCTL(channel);
|
2016-03-15 13:24:36 +00:00
|
|
|
} else {
|
2023-06-02 11:57:30 +00:00
|
|
|
cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(channel);
|
|
|
|
cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(channel);
|
2012-09-25 11:21:57 +00:00
|
|
|
}
|
2018-11-30 06:47:06 +00:00
|
|
|
|
2023-05-30 14:16:38 +00:00
|
|
|
if (spi->mode & SPI_MOSI_IDLE_LOW)
|
2023-06-02 11:57:30 +00:00
|
|
|
cfg |= MX51_ECSPI_CONFIG_DATACTL(channel);
|
2023-05-30 14:16:38 +00:00
|
|
|
else
|
2023-06-02 11:57:30 +00:00
|
|
|
cfg &= ~MX51_ECSPI_CONFIG_DATACTL(channel);
|
2023-05-30 14:16:38 +00:00
|
|
|
|
2016-06-08 17:02:07 +00:00
|
|
|
if (spi->mode & SPI_CS_HIGH)
|
2023-06-02 11:57:30 +00:00
|
|
|
cfg |= MX51_ECSPI_CONFIG_SSBPOL(channel);
|
2016-03-15 13:24:36 +00:00
|
|
|
else
|
2023-06-02 11:57:30 +00:00
|
|
|
cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(channel);
|
2010-09-09 19:02:48 +00:00
|
|
|
|
2022-05-02 17:54:57 +00:00
|
|
|
if (cfg == current_cfg)
|
|
|
|
return 0;
|
|
|
|
|
2018-11-30 06:47:06 +00:00
|
|
|
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
|
2016-02-24 08:20:32 +00:00
|
|
|
|
2021-07-03 02:23:00 +00:00
|
|
|
/*
|
|
|
|
* Wait until the changes in the configuration register CONFIGREG
|
|
|
|
* propagate into the hardware. It takes exactly one tick of the
|
|
|
|
* SCLK clock, but we will wait two SCLK clock just to be sure. The
|
|
|
|
* effect of the delay it takes for the hardware to apply changes
|
|
|
|
* is noticable if the SCLK clock run very slow. In such a case, if
|
|
|
|
* the polarity of SCLK should be inverted, the GPIO ChipSelect might
|
|
|
|
* be asserted before the SCLK polarity changes, which would disrupt
|
|
|
|
* the SPI communication as the device on the other end would consider
|
|
|
|
* the change of SCLK polarity as a clock tick already.
|
2021-07-26 10:01:02 +00:00
|
|
|
*
|
2022-05-02 17:54:55 +00:00
|
|
|
* Because spi_imx->spi_bus_clk is only set in prepare_message
|
2021-07-26 10:01:02 +00:00
|
|
|
* callback, iterate over all the transfers in spi_message, find the
|
|
|
|
* one with lowest bus frequency, and use that bus frequency for the
|
|
|
|
* delay calculation. In case all transfers have speed_hz == 0, then
|
|
|
|
* min_speed_hz is ~0 and the resulting delay is zero.
|
2021-07-03 02:23:00 +00:00
|
|
|
*/
|
2021-07-26 10:01:02 +00:00
|
|
|
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
|
|
|
|
if (!xfer->speed_hz)
|
|
|
|
continue;
|
|
|
|
min_speed_hz = min(xfer->speed_hz, min_speed_hz);
|
|
|
|
}
|
|
|
|
|
|
|
|
delay = (2 * 1000000) / min_speed_hz;
|
2021-07-27 16:04:28 +00:00
|
|
|
if (likely(delay < 10)) /* SCLK is faster than 200 kHz */
|
2021-07-03 02:23:00 +00:00
|
|
|
udelay(delay);
|
|
|
|
else /* SCLK is _very_ slow */
|
|
|
|
usleep_range(delay, delay + 10);
|
|
|
|
|
2018-11-30 06:47:06 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2015-12-08 06:43:43 +00:00
|
|
|
|
2022-04-11 18:45:29 +00:00
|
|
|
static void mx51_configure_cpha(struct spi_imx_data *spi_imx,
|
|
|
|
struct spi_device *spi)
|
|
|
|
{
|
|
|
|
bool cpha = (spi->mode & SPI_CPHA);
|
|
|
|
bool flip_cpha = (spi->mode & SPI_RX_CPHA_FLIP) && spi_imx->rx_only;
|
|
|
|
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
|
2023-06-02 11:57:30 +00:00
|
|
|
int channel = mx51_ecspi_channel(spi);
|
2022-04-11 18:45:29 +00:00
|
|
|
|
|
|
|
/* Flip cpha logical value iff flip_cpha */
|
|
|
|
cpha ^= flip_cpha;
|
|
|
|
|
|
|
|
if (cpha)
|
2023-06-02 11:57:30 +00:00
|
|
|
cfg |= MX51_ECSPI_CONFIG_SCLKPHA(channel);
|
2022-04-11 18:45:29 +00:00
|
|
|
else
|
2023-06-02 11:57:30 +00:00
|
|
|
cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(channel);
|
2022-04-11 18:45:29 +00:00
|
|
|
|
|
|
|
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:47:08 +00:00
|
|
|
static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
|
2021-04-08 10:33:47 +00:00
|
|
|
struct spi_device *spi)
|
2018-11-30 06:47:06 +00:00
|
|
|
{
|
|
|
|
u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
|
2021-07-03 02:23:00 +00:00
|
|
|
u32 clk;
|
2018-11-30 06:47:06 +00:00
|
|
|
|
|
|
|
/* Clear BL field and set the right value */
|
|
|
|
ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
|
2023-08-07 12:41:01 +00:00
|
|
|
if (spi_imx->target_mode && is_imx53_ecspi(spi_imx))
|
|
|
|
ctrl |= (spi_imx->target_burst * 8 - 1)
|
2018-11-30 06:47:06 +00:00
|
|
|
<< MX51_ECSPI_CTRL_BL_OFFSET;
|
2023-06-28 12:54:06 +00:00
|
|
|
else {
|
2023-12-09 22:23:26 +00:00
|
|
|
if (spi_imx->usedma) {
|
2024-02-04 09:19:12 +00:00
|
|
|
ctrl |= (spi_imx->bits_per_word - 1)
|
2023-06-28 12:54:06 +00:00
|
|
|
<< MX51_ECSPI_CTRL_BL_OFFSET;
|
2023-12-09 22:23:26 +00:00
|
|
|
} else {
|
|
|
|
if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST)
|
2024-02-04 09:19:12 +00:00
|
|
|
ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
|
2023-12-09 22:23:26 +00:00
|
|
|
<< MX51_ECSPI_CTRL_BL_OFFSET;
|
|
|
|
else
|
2024-03-18 17:50:52 +00:00
|
|
|
ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
|
|
|
|
BITS_PER_BYTE) * spi_imx->bits_per_word - 1)
|
2023-12-09 22:23:26 +00:00
|
|
|
<< MX51_ECSPI_CTRL_BL_OFFSET;
|
|
|
|
}
|
2023-06-28 12:54:06 +00:00
|
|
|
}
|
2015-12-04 01:23:24 +00:00
|
|
|
|
2018-11-30 06:47:06 +00:00
|
|
|
/* set clock speed */
|
|
|
|
ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
|
|
|
|
0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
|
2021-04-08 10:33:47 +00:00
|
|
|
ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
|
2018-11-30 06:47:06 +00:00
|
|
|
spi_imx->spi_bus_clk = clk;
|
|
|
|
|
2022-04-11 18:45:29 +00:00
|
|
|
mx51_configure_cpha(spi_imx, spi);
|
|
|
|
|
2021-07-14 10:20:48 +00:00
|
|
|
/*
|
|
|
|
* ERR009165: work in XHC mode instead of SMC as PIO on the chips
|
|
|
|
* before i.mx6ul.
|
|
|
|
*/
|
|
|
|
if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed)
|
2018-11-30 06:47:06 +00:00
|
|
|
ctrl |= MX51_ECSPI_CTRL_SMC;
|
2021-07-14 10:20:48 +00:00
|
|
|
else
|
|
|
|
ctrl &= ~MX51_ECSPI_CTRL_SMC;
|
2018-11-30 06:47:06 +00:00
|
|
|
|
|
|
|
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
|
2010-09-09 19:02:48 +00:00
|
|
|
|
2018-10-10 10:32:42 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mx51_setup_wml(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
2021-07-14 10:20:48 +00:00
|
|
|
u32 tx_wml = 0;
|
|
|
|
|
|
|
|
if (spi_imx->devtype_data->tx_glitch_fixed)
|
|
|
|
tx_wml = spi_imx->wml;
|
2014-09-11 01:18:44 +00:00
|
|
|
/*
|
|
|
|
* Configure the DMA register: setup the watermark
|
|
|
|
* and enable DMA request.
|
|
|
|
*/
|
2018-10-10 10:32:45 +00:00
|
|
|
writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
|
2021-07-14 10:20:48 +00:00
|
|
|
MX51_ECSPI_DMA_TX_WML(tx_wml) |
|
2016-02-24 08:20:31 +00:00
|
|
|
MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
|
2016-02-24 08:20:27 +00:00
|
|
|
MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
|
|
|
|
MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
|
2010-09-09 19:02:48 +00:00
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
|
2010-09-09 19:02:48 +00:00
|
|
|
{
|
2011-07-09 17:16:37 +00:00
|
|
|
return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
|
2010-09-09 19:02:48 +00:00
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
|
2010-09-09 19:02:48 +00:00
|
|
|
{
|
|
|
|
/* drain receive buffer */
|
2011-07-09 17:16:37 +00:00
|
|
|
while (mx51_ecspi_rx_available(spi_imx))
|
2010-09-09 19:02:48 +00:00
|
|
|
readl(spi_imx->base + MXC_CSPIRXDATA);
|
|
|
|
}
|
|
|
|
|
2009-09-22 23:46:02 +00:00
|
|
|
#define MX31_INTREG_TEEN (1 << 0)
|
|
|
|
#define MX31_INTREG_RREN (1 << 3)
|
|
|
|
|
|
|
|
#define MX31_CSPICTRL_ENABLE (1 << 0)
|
2023-08-07 12:41:01 +00:00
|
|
|
#define MX31_CSPICTRL_HOST (1 << 1)
|
2009-09-22 23:46:02 +00:00
|
|
|
#define MX31_CSPICTRL_XCH (1 << 2)
|
2016-10-19 22:42:25 +00:00
|
|
|
#define MX31_CSPICTRL_SMC (1 << 3)
|
2009-09-22 23:46:02 +00:00
|
|
|
#define MX31_CSPICTRL_POL (1 << 4)
|
|
|
|
#define MX31_CSPICTRL_PHA (1 << 5)
|
|
|
|
#define MX31_CSPICTRL_SSCTL (1 << 6)
|
|
|
|
#define MX31_CSPICTRL_SSPOL (1 << 7)
|
|
|
|
#define MX31_CSPICTRL_BC_SHIFT 8
|
|
|
|
#define MX35_CSPICTRL_BL_SHIFT 20
|
|
|
|
#define MX31_CSPICTRL_CS_SHIFT 24
|
|
|
|
#define MX35_CSPICTRL_CS_SHIFT 12
|
|
|
|
#define MX31_CSPICTRL_DR_SHIFT 16
|
|
|
|
|
2016-10-19 22:42:25 +00:00
|
|
|
#define MX31_CSPI_DMAREG 0x10
|
|
|
|
#define MX31_DMAREG_RH_DEN (1<<4)
|
|
|
|
#define MX31_DMAREG_TH_DEN (1<<1)
|
|
|
|
|
2009-09-22 23:46:02 +00:00
|
|
|
#define MX31_CSPISTATUS 0x14
|
|
|
|
#define MX31_STATUS_RR (1 << 3)
|
|
|
|
|
2016-09-01 20:39:58 +00:00
|
|
|
#define MX31_CSPI_TESTREG 0x1C
|
|
|
|
#define MX31_TEST_LBC (1 << 14)
|
|
|
|
|
2009-09-22 23:46:02 +00:00
|
|
|
/* These functions also work for the i.MX35, but be aware that
|
|
|
|
* the i.MX35 has a slightly different register layout for bits
|
|
|
|
* we do not use here.
|
|
|
|
*/
|
2016-06-08 17:02:08 +00:00
|
|
|
static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
|
|
|
unsigned int val = 0;
|
|
|
|
|
|
|
|
if (enable & MXC_INT_TE)
|
|
|
|
val |= MX31_INTREG_TEEN;
|
|
|
|
if (enable & MXC_INT_RR)
|
|
|
|
val |= MX31_INTREG_RREN;
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
writel(val, spi_imx->base + MXC_CSPIINT);
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static void mx31_trigger(struct spi_imx_data *spi_imx)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
|
|
|
unsigned int reg;
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
reg = readl(spi_imx->base + MXC_CSPICTRL);
|
2009-09-22 23:46:02 +00:00
|
|
|
reg |= MX31_CSPICTRL_XCH;
|
2009-10-01 22:44:28 +00:00
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:47:05 +00:00
|
|
|
static int mx31_prepare_message(struct spi_imx_data *spi_imx,
|
|
|
|
struct spi_message *msg)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:47:08 +00:00
|
|
|
static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
|
2021-04-08 10:33:47 +00:00
|
|
|
struct spi_device *spi)
|
2010-09-10 07:19:18 +00:00
|
|
|
{
|
2023-08-07 12:41:01 +00:00
|
|
|
unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_HOST;
|
2016-09-01 20:38:40 +00:00
|
|
|
unsigned int clk;
|
2010-09-10 07:19:18 +00:00
|
|
|
|
2021-04-08 10:33:47 +00:00
|
|
|
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
|
2010-09-10 07:19:18 +00:00
|
|
|
MX31_CSPICTRL_DR_SHIFT;
|
2016-09-01 20:38:40 +00:00
|
|
|
spi_imx->spi_bus_clk = clk;
|
2010-09-10 07:19:18 +00:00
|
|
|
|
2011-07-09 17:16:39 +00:00
|
|
|
if (is_imx35_cspi(spi_imx)) {
|
2017-06-02 05:38:01 +00:00
|
|
|
reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
|
2011-07-09 17:16:38 +00:00
|
|
|
reg |= MX31_CSPICTRL_SSCTL;
|
|
|
|
} else {
|
2017-06-02 05:38:01 +00:00
|
|
|
reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
|
2011-07-09 17:16:38 +00:00
|
|
|
}
|
2010-09-10 07:19:18 +00:00
|
|
|
|
2016-06-08 17:02:07 +00:00
|
|
|
if (spi->mode & SPI_CPHA)
|
2010-09-10 07:19:18 +00:00
|
|
|
reg |= MX31_CSPICTRL_PHA;
|
2016-06-08 17:02:07 +00:00
|
|
|
if (spi->mode & SPI_CPOL)
|
2010-09-10 07:19:18 +00:00
|
|
|
reg |= MX31_CSPICTRL_POL;
|
2016-06-08 17:02:07 +00:00
|
|
|
if (spi->mode & SPI_CS_HIGH)
|
2010-09-10 07:19:18 +00:00
|
|
|
reg |= MX31_CSPICTRL_SSPOL;
|
2023-03-10 17:32:03 +00:00
|
|
|
if (!spi_get_csgpiod(spi, 0))
|
|
|
|
reg |= (spi_get_chipselect(spi, 0)) <<
|
2011-07-09 17:16:39 +00:00
|
|
|
(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
|
|
|
|
MX31_CSPICTRL_CS_SHIFT);
|
2010-09-10 07:19:18 +00:00
|
|
|
|
2016-10-19 22:42:25 +00:00
|
|
|
if (spi_imx->usedma)
|
|
|
|
reg |= MX31_CSPICTRL_SMC;
|
|
|
|
|
2010-09-10 07:19:18 +00:00
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
|
|
|
|
2016-09-01 20:39:58 +00:00
|
|
|
reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
|
|
|
|
if (spi->mode & SPI_LOOP)
|
|
|
|
reg |= MX31_TEST_LBC;
|
|
|
|
else
|
|
|
|
reg &= ~MX31_TEST_LBC;
|
|
|
|
writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
|
|
|
|
|
2016-10-19 22:42:25 +00:00
|
|
|
if (spi_imx->usedma) {
|
2018-11-30 06:47:07 +00:00
|
|
|
/*
|
|
|
|
* configure DMA requests when RXFIFO is half full and
|
|
|
|
* when TXFIFO is half empty
|
|
|
|
*/
|
2016-10-19 22:42:25 +00:00
|
|
|
writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
|
|
|
|
spi_imx->base + MX31_CSPI_DMAREG);
|
|
|
|
}
|
|
|
|
|
2010-09-10 07:19:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static int mx31_rx_available(struct spi_imx_data *spi_imx)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
2009-10-01 22:44:28 +00:00
|
|
|
return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static void mx31_reset(struct spi_imx_data *spi_imx)
|
2010-09-10 07:19:18 +00:00
|
|
|
{
|
|
|
|
/* drain receive buffer */
|
2011-07-09 17:16:38 +00:00
|
|
|
while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
|
2010-09-10 07:19:18 +00:00
|
|
|
readl(spi_imx->base + MXC_CSPIRXDATA);
|
|
|
|
}
|
|
|
|
|
2011-07-09 17:16:36 +00:00
|
|
|
#define MX21_INTREG_RR (1 << 4)
|
|
|
|
#define MX21_INTREG_TEEN (1 << 9)
|
|
|
|
#define MX21_INTREG_RREN (1 << 13)
|
|
|
|
|
|
|
|
#define MX21_CSPICTRL_POL (1 << 5)
|
|
|
|
#define MX21_CSPICTRL_PHA (1 << 6)
|
|
|
|
#define MX21_CSPICTRL_SSPOL (1 << 8)
|
|
|
|
#define MX21_CSPICTRL_XCH (1 << 9)
|
|
|
|
#define MX21_CSPICTRL_ENABLE (1 << 10)
|
2023-08-07 12:41:01 +00:00
|
|
|
#define MX21_CSPICTRL_HOST (1 << 11)
|
2011-07-09 17:16:36 +00:00
|
|
|
#define MX21_CSPICTRL_DR_SHIFT 14
|
|
|
|
#define MX21_CSPICTRL_CS_SHIFT 19
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
|
|
|
unsigned int val = 0;
|
|
|
|
|
|
|
|
if (enable & MXC_INT_TE)
|
2011-07-09 17:16:36 +00:00
|
|
|
val |= MX21_INTREG_TEEN;
|
2009-09-22 23:46:02 +00:00
|
|
|
if (enable & MXC_INT_RR)
|
2011-07-09 17:16:36 +00:00
|
|
|
val |= MX21_INTREG_RREN;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
writel(val, spi_imx->base + MXC_CSPIINT);
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static void mx21_trigger(struct spi_imx_data *spi_imx)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
|
|
|
unsigned int reg;
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
reg = readl(spi_imx->base + MXC_CSPICTRL);
|
2011-07-09 17:16:36 +00:00
|
|
|
reg |= MX21_CSPICTRL_XCH;
|
2009-10-01 22:44:28 +00:00
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:47:05 +00:00
|
|
|
static int mx21_prepare_message(struct spi_imx_data *spi_imx,
|
|
|
|
struct spi_message *msg)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:47:08 +00:00
|
|
|
static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
|
2021-04-08 10:33:47 +00:00
|
|
|
struct spi_device *spi)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
2023-08-07 12:41:01 +00:00
|
|
|
unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_HOST;
|
2011-07-09 17:16:39 +00:00
|
|
|
unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
|
2016-11-01 21:18:39 +00:00
|
|
|
unsigned int clk;
|
|
|
|
|
2021-04-08 10:33:47 +00:00
|
|
|
reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
|
2016-11-01 21:18:39 +00:00
|
|
|
<< MX21_CSPICTRL_DR_SHIFT;
|
|
|
|
spi_imx->spi_bus_clk = clk;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2017-06-02 05:38:01 +00:00
|
|
|
reg |= spi_imx->bits_per_word - 1;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2016-06-08 17:02:07 +00:00
|
|
|
if (spi->mode & SPI_CPHA)
|
2011-07-09 17:16:36 +00:00
|
|
|
reg |= MX21_CSPICTRL_PHA;
|
2016-06-08 17:02:07 +00:00
|
|
|
if (spi->mode & SPI_CPOL)
|
2011-07-09 17:16:36 +00:00
|
|
|
reg |= MX21_CSPICTRL_POL;
|
2016-06-08 17:02:07 +00:00
|
|
|
if (spi->mode & SPI_CS_HIGH)
|
2011-07-09 17:16:36 +00:00
|
|
|
reg |= MX21_CSPICTRL_SSPOL;
|
2023-03-10 17:32:03 +00:00
|
|
|
if (!spi_get_csgpiod(spi, 0))
|
|
|
|
reg |= spi_get_chipselect(spi, 0) << MX21_CSPICTRL_CS_SHIFT;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static int mx21_rx_available(struct spi_imx_data *spi_imx)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
2011-07-09 17:16:36 +00:00
|
|
|
return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static void mx21_reset(struct spi_imx_data *spi_imx)
|
2010-09-10 07:19:18 +00:00
|
|
|
{
|
|
|
|
writel(1, spi_imx->base + MXC_RESET);
|
|
|
|
}
|
|
|
|
|
2009-09-22 23:46:02 +00:00
|
|
|
#define MX1_INTREG_RR (1 << 3)
|
|
|
|
#define MX1_INTREG_TEEN (1 << 8)
|
|
|
|
#define MX1_INTREG_RREN (1 << 11)
|
|
|
|
|
|
|
|
#define MX1_CSPICTRL_POL (1 << 4)
|
|
|
|
#define MX1_CSPICTRL_PHA (1 << 5)
|
|
|
|
#define MX1_CSPICTRL_XCH (1 << 8)
|
|
|
|
#define MX1_CSPICTRL_ENABLE (1 << 9)
|
2023-08-07 12:41:01 +00:00
|
|
|
#define MX1_CSPICTRL_HOST (1 << 10)
|
2009-09-22 23:46:02 +00:00
|
|
|
#define MX1_CSPICTRL_DR_SHIFT 13
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
|
|
|
unsigned int val = 0;
|
|
|
|
|
|
|
|
if (enable & MXC_INT_TE)
|
|
|
|
val |= MX1_INTREG_TEEN;
|
|
|
|
if (enable & MXC_INT_RR)
|
|
|
|
val |= MX1_INTREG_RREN;
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
writel(val, spi_imx->base + MXC_CSPIINT);
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static void mx1_trigger(struct spi_imx_data *spi_imx)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
|
|
|
unsigned int reg;
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
reg = readl(spi_imx->base + MXC_CSPICTRL);
|
2009-09-22 23:46:02 +00:00
|
|
|
reg |= MX1_CSPICTRL_XCH;
|
2009-10-01 22:44:28 +00:00
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:47:05 +00:00
|
|
|
static int mx1_prepare_message(struct spi_imx_data *spi_imx,
|
|
|
|
struct spi_message *msg)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:47:08 +00:00
|
|
|
static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
|
2021-04-08 10:33:47 +00:00
|
|
|
struct spi_device *spi)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
2023-08-07 12:41:01 +00:00
|
|
|
unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_HOST;
|
2016-09-01 20:38:40 +00:00
|
|
|
unsigned int clk;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2021-04-08 10:33:47 +00:00
|
|
|
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
|
2009-09-22 23:46:02 +00:00
|
|
|
MX1_CSPICTRL_DR_SHIFT;
|
2016-09-01 20:38:40 +00:00
|
|
|
spi_imx->spi_bus_clk = clk;
|
|
|
|
|
2017-06-02 05:38:01 +00:00
|
|
|
reg |= spi_imx->bits_per_word - 1;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2016-06-08 17:02:07 +00:00
|
|
|
if (spi->mode & SPI_CPHA)
|
2009-09-22 23:46:02 +00:00
|
|
|
reg |= MX1_CSPICTRL_PHA;
|
2016-06-08 17:02:07 +00:00
|
|
|
if (spi->mode & SPI_CPOL)
|
2009-09-22 23:46:02 +00:00
|
|
|
reg |= MX1_CSPICTRL_POL;
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static int mx1_rx_available(struct spi_imx_data *spi_imx)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
2009-10-01 22:44:28 +00:00
|
|
|
return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2016-06-08 17:02:08 +00:00
|
|
|
static void mx1_reset(struct spi_imx_data *spi_imx)
|
2010-09-10 07:19:18 +00:00
|
|
|
{
|
|
|
|
writel(1, spi_imx->base + MXC_RESET);
|
|
|
|
}
|
|
|
|
|
2011-07-09 17:16:39 +00:00
|
|
|
static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
|
|
|
|
.intctrl = mx1_intctrl,
|
2018-11-30 06:47:05 +00:00
|
|
|
.prepare_message = mx1_prepare_message,
|
2018-11-30 06:47:08 +00:00
|
|
|
.prepare_transfer = mx1_prepare_transfer,
|
2011-07-09 17:16:39 +00:00
|
|
|
.trigger = mx1_trigger,
|
|
|
|
.rx_available = mx1_rx_available,
|
|
|
|
.reset = mx1_reset,
|
2017-06-08 05:16:00 +00:00
|
|
|
.fifo_size = 8,
|
|
|
|
.has_dmamode = false,
|
2017-08-10 04:50:08 +00:00
|
|
|
.dynamic_burst = false,
|
2023-08-07 12:41:01 +00:00
|
|
|
.has_targetmode = false,
|
2011-07-09 17:16:39 +00:00
|
|
|
.devtype = IMX1_CSPI,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
|
|
|
|
.intctrl = mx21_intctrl,
|
2018-11-30 06:47:05 +00:00
|
|
|
.prepare_message = mx21_prepare_message,
|
2018-11-30 06:47:08 +00:00
|
|
|
.prepare_transfer = mx21_prepare_transfer,
|
2011-07-09 17:16:39 +00:00
|
|
|
.trigger = mx21_trigger,
|
|
|
|
.rx_available = mx21_rx_available,
|
|
|
|
.reset = mx21_reset,
|
2017-06-08 05:16:00 +00:00
|
|
|
.fifo_size = 8,
|
|
|
|
.has_dmamode = false,
|
2017-08-10 04:50:08 +00:00
|
|
|
.dynamic_burst = false,
|
2023-08-07 12:41:01 +00:00
|
|
|
.has_targetmode = false,
|
2011-07-09 17:16:39 +00:00
|
|
|
.devtype = IMX21_CSPI,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
|
|
|
|
/* i.mx27 cspi shares the functions with i.mx21 one */
|
|
|
|
.intctrl = mx21_intctrl,
|
2018-11-30 06:47:05 +00:00
|
|
|
.prepare_message = mx21_prepare_message,
|
2018-11-30 06:47:08 +00:00
|
|
|
.prepare_transfer = mx21_prepare_transfer,
|
2011-07-09 17:16:39 +00:00
|
|
|
.trigger = mx21_trigger,
|
|
|
|
.rx_available = mx21_rx_available,
|
|
|
|
.reset = mx21_reset,
|
2017-06-08 05:16:00 +00:00
|
|
|
.fifo_size = 8,
|
|
|
|
.has_dmamode = false,
|
2017-08-10 04:50:08 +00:00
|
|
|
.dynamic_burst = false,
|
2023-08-07 12:41:01 +00:00
|
|
|
.has_targetmode = false,
|
2011-07-09 17:16:39 +00:00
|
|
|
.devtype = IMX27_CSPI,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
|
|
|
|
.intctrl = mx31_intctrl,
|
2018-11-30 06:47:05 +00:00
|
|
|
.prepare_message = mx31_prepare_message,
|
2018-11-30 06:47:08 +00:00
|
|
|
.prepare_transfer = mx31_prepare_transfer,
|
2011-07-09 17:16:39 +00:00
|
|
|
.trigger = mx31_trigger,
|
|
|
|
.rx_available = mx31_rx_available,
|
|
|
|
.reset = mx31_reset,
|
2017-06-08 05:16:00 +00:00
|
|
|
.fifo_size = 8,
|
|
|
|
.has_dmamode = false,
|
2017-08-10 04:50:08 +00:00
|
|
|
.dynamic_burst = false,
|
2023-08-07 12:41:01 +00:00
|
|
|
.has_targetmode = false,
|
2011-07-09 17:16:39 +00:00
|
|
|
.devtype = IMX31_CSPI,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
|
|
|
|
/* i.mx35 and later cspi shares the functions with i.mx31 one */
|
|
|
|
.intctrl = mx31_intctrl,
|
2018-11-30 06:47:05 +00:00
|
|
|
.prepare_message = mx31_prepare_message,
|
2018-11-30 06:47:08 +00:00
|
|
|
.prepare_transfer = mx31_prepare_transfer,
|
2011-07-09 17:16:39 +00:00
|
|
|
.trigger = mx31_trigger,
|
|
|
|
.rx_available = mx31_rx_available,
|
|
|
|
.reset = mx31_reset,
|
2017-06-08 05:16:00 +00:00
|
|
|
.fifo_size = 8,
|
|
|
|
.has_dmamode = true,
|
2017-08-10 04:50:08 +00:00
|
|
|
.dynamic_burst = false,
|
2023-08-07 12:41:01 +00:00
|
|
|
.has_targetmode = false,
|
2011-07-09 17:16:39 +00:00
|
|
|
.devtype = IMX35_CSPI,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
|
|
|
|
.intctrl = mx51_ecspi_intctrl,
|
2018-11-30 06:47:05 +00:00
|
|
|
.prepare_message = mx51_ecspi_prepare_message,
|
2018-11-30 06:47:08 +00:00
|
|
|
.prepare_transfer = mx51_ecspi_prepare_transfer,
|
2011-07-09 17:16:39 +00:00
|
|
|
.trigger = mx51_ecspi_trigger,
|
|
|
|
.rx_available = mx51_ecspi_rx_available,
|
|
|
|
.reset = mx51_ecspi_reset,
|
2018-10-10 10:32:42 +00:00
|
|
|
.setup_wml = mx51_setup_wml,
|
2017-06-08 05:16:00 +00:00
|
|
|
.fifo_size = 64,
|
|
|
|
.has_dmamode = true,
|
2017-08-10 04:50:08 +00:00
|
|
|
.dynamic_burst = true,
|
2023-08-07 12:41:01 +00:00
|
|
|
.has_targetmode = true,
|
2017-09-05 05:12:32 +00:00
|
|
|
.disable = mx51_ecspi_disable,
|
2011-07-09 17:16:39 +00:00
|
|
|
.devtype = IMX51_ECSPI,
|
|
|
|
};
|
|
|
|
|
2017-06-08 05:16:01 +00:00
|
|
|
static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
|
|
|
|
.intctrl = mx51_ecspi_intctrl,
|
2018-11-30 06:47:05 +00:00
|
|
|
.prepare_message = mx51_ecspi_prepare_message,
|
2018-11-30 06:47:08 +00:00
|
|
|
.prepare_transfer = mx51_ecspi_prepare_transfer,
|
2017-06-08 05:16:01 +00:00
|
|
|
.trigger = mx51_ecspi_trigger,
|
|
|
|
.rx_available = mx51_ecspi_rx_available,
|
|
|
|
.reset = mx51_ecspi_reset,
|
|
|
|
.fifo_size = 64,
|
|
|
|
.has_dmamode = true,
|
2023-08-07 12:41:01 +00:00
|
|
|
.has_targetmode = true,
|
2017-09-05 05:12:32 +00:00
|
|
|
.disable = mx51_ecspi_disable,
|
2017-06-08 05:16:01 +00:00
|
|
|
.devtype = IMX53_ECSPI,
|
|
|
|
};
|
|
|
|
|
2021-07-14 10:20:48 +00:00
|
|
|
static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = {
|
|
|
|
.intctrl = mx51_ecspi_intctrl,
|
|
|
|
.prepare_message = mx51_ecspi_prepare_message,
|
|
|
|
.prepare_transfer = mx51_ecspi_prepare_transfer,
|
|
|
|
.trigger = mx51_ecspi_trigger,
|
|
|
|
.rx_available = mx51_ecspi_rx_available,
|
|
|
|
.reset = mx51_ecspi_reset,
|
|
|
|
.setup_wml = mx51_setup_wml,
|
|
|
|
.fifo_size = 64,
|
|
|
|
.has_dmamode = true,
|
|
|
|
.dynamic_burst = true,
|
2023-08-07 12:41:01 +00:00
|
|
|
.has_targetmode = true,
|
2021-07-14 10:20:48 +00:00
|
|
|
.tx_glitch_fixed = true,
|
|
|
|
.disable = mx51_ecspi_disable,
|
|
|
|
.devtype = IMX51_ECSPI,
|
|
|
|
};
|
|
|
|
|
2011-07-09 17:16:41 +00:00
|
|
|
static const struct of_device_id spi_imx_dt_ids[] = {
|
|
|
|
{ .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
|
|
|
|
{ .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
|
|
|
|
{ .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
|
|
|
|
{ .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
|
|
|
|
{ .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
|
|
|
|
{ .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
|
2017-06-08 05:16:01 +00:00
|
|
|
{ .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
|
2021-07-14 10:20:48 +00:00
|
|
|
{ .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, },
|
2011-07-09 17:16:41 +00:00
|
|
|
{ /* sentinel */ }
|
|
|
|
};
|
2013-07-29 07:38:05 +00:00
|
|
|
MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
|
2011-07-09 17:16:41 +00:00
|
|
|
|
2018-07-17 14:31:54 +00:00
|
|
|
static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
|
|
|
|
{
|
|
|
|
u32 ctrl;
|
|
|
|
|
|
|
|
ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
|
|
|
|
ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
|
|
|
|
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
}
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
static void spi_imx_push(struct spi_imx_data *spi_imx)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
2021-07-16 17:39:27 +00:00
|
|
|
unsigned int burst_len;
|
2018-07-17 14:31:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reload the FIFO when the remaining bytes to be transferred in the
|
|
|
|
* current burst is 0. This only applies when bits_per_word is a
|
|
|
|
* multiple of 8.
|
|
|
|
*/
|
|
|
|
if (!spi_imx->remainder) {
|
|
|
|
if (spi_imx->dynamic_burst) {
|
|
|
|
|
|
|
|
/* We need to deal unaligned data first */
|
|
|
|
burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
|
|
|
|
|
|
|
|
if (!burst_len)
|
|
|
|
burst_len = MX51_ECSPI_CTRL_MAX_BURST;
|
|
|
|
|
|
|
|
spi_imx_set_burst_len(spi_imx, burst_len * 8);
|
|
|
|
|
|
|
|
spi_imx->remainder = burst_len;
|
|
|
|
} else {
|
2021-07-16 17:39:27 +00:00
|
|
|
spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
|
2018-07-17 14:31:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-08 05:16:00 +00:00
|
|
|
while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
|
2009-10-01 22:44:28 +00:00
|
|
|
if (!spi_imx->count)
|
2009-09-22 23:46:02 +00:00
|
|
|
break;
|
2018-07-17 14:31:54 +00:00
|
|
|
if (spi_imx->dynamic_burst &&
|
2021-07-16 17:39:27 +00:00
|
|
|
spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
|
2017-08-10 04:50:08 +00:00
|
|
|
break;
|
2009-10-01 22:44:28 +00:00
|
|
|
spi_imx->tx(spi_imx);
|
|
|
|
spi_imx->txfifo++;
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
if (!spi_imx->target_mode)
|
2017-09-05 05:12:32 +00:00
|
|
|
spi_imx->devtype_data->trigger(spi_imx);
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
2009-10-01 22:44:28 +00:00
|
|
|
struct spi_imx_data *spi_imx = dev_id;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2017-09-05 05:12:32 +00:00
|
|
|
while (spi_imx->txfifo &&
|
|
|
|
spi_imx->devtype_data->rx_available(spi_imx)) {
|
2009-10-01 22:44:28 +00:00
|
|
|
spi_imx->rx(spi_imx);
|
|
|
|
spi_imx->txfifo--;
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
if (spi_imx->count) {
|
|
|
|
spi_imx_push(spi_imx);
|
2009-09-22 23:46:02 +00:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
if (spi_imx->txfifo) {
|
2009-09-22 23:46:02 +00:00
|
|
|
/* No data left to push, but still waiting for rx data,
|
|
|
|
* enable receive data available interrupt.
|
|
|
|
*/
|
2011-07-09 17:16:35 +00:00
|
|
|
spi_imx->devtype_data->intctrl(
|
2010-09-09 13:29:01 +00:00
|
|
|
spi_imx, MXC_INT_RR);
|
2009-09-22 23:46:02 +00:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2011-07-09 17:16:35 +00:00
|
|
|
spi_imx->devtype_data->intctrl(spi_imx, 0);
|
2009-10-01 22:44:28 +00:00
|
|
|
complete(&spi_imx->xfer_done);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
static int spi_imx_dma_configure(struct spi_controller *controller)
|
2016-02-24 08:20:29 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
enum dma_slave_buswidth buswidth;
|
|
|
|
struct dma_slave_config rx = {}, tx = {};
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
|
2016-02-24 08:20:29 +00:00
|
|
|
|
2017-06-02 05:38:03 +00:00
|
|
|
switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
|
2016-02-24 08:20:29 +00:00
|
|
|
case 4:
|
|
|
|
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
tx.direction = DMA_MEM_TO_DEV;
|
|
|
|
tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
|
|
|
|
tx.dst_addr_width = buswidth;
|
|
|
|
tx.dst_maxburst = spi_imx->wml;
|
2022-05-02 17:54:54 +00:00
|
|
|
ret = dmaengine_slave_config(controller->dma_tx, &tx);
|
2016-02-24 08:20:29 +00:00
|
|
|
if (ret) {
|
|
|
|
dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx.direction = DMA_DEV_TO_MEM;
|
|
|
|
rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
|
|
|
|
rx.src_addr_width = buswidth;
|
|
|
|
rx.src_maxburst = spi_imx->wml;
|
2022-05-02 17:54:54 +00:00
|
|
|
ret = dmaengine_slave_config(controller->dma_rx, &rx);
|
2016-02-24 08:20:29 +00:00
|
|
|
if (ret) {
|
|
|
|
dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
static int spi_imx_setupxfer(struct spi_device *spi,
|
2009-09-22 23:46:02 +00:00
|
|
|
struct spi_transfer *t)
|
|
|
|
{
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2017-06-02 05:37:59 +00:00
|
|
|
if (!t)
|
|
|
|
return 0;
|
|
|
|
|
2021-04-08 10:33:47 +00:00
|
|
|
if (!t->speed_hz) {
|
|
|
|
if (!spi->max_speed_hz) {
|
|
|
|
dev_err(&spi->dev, "no speed_hz provided!\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
|
|
|
|
spi_imx->spi_bus_clk = spi->max_speed_hz;
|
|
|
|
} else
|
|
|
|
spi_imx->spi_bus_clk = t->speed_hz;
|
|
|
|
|
2017-06-02 05:38:01 +00:00
|
|
|
spi_imx->bits_per_word = t->bits_per_word;
|
2023-06-28 12:54:06 +00:00
|
|
|
spi_imx->count = t->len;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2018-07-17 14:31:51 +00:00
|
|
|
/*
|
|
|
|
* Initialize the functions for transfer. To transfer non byte-aligned
|
|
|
|
* words, we have to use multiple word-size bursts, we can't use
|
|
|
|
* dynamic_burst in that case.
|
|
|
|
*/
|
2023-08-07 12:41:01 +00:00
|
|
|
if (spi_imx->devtype_data->dynamic_burst && !spi_imx->target_mode &&
|
2021-07-27 12:42:26 +00:00
|
|
|
!(spi->mode & SPI_CS_WORD) &&
|
2018-07-17 14:31:51 +00:00
|
|
|
(spi_imx->bits_per_word == 8 ||
|
|
|
|
spi_imx->bits_per_word == 16 ||
|
|
|
|
spi_imx->bits_per_word == 32)) {
|
2017-08-10 04:50:08 +00:00
|
|
|
|
|
|
|
spi_imx->rx = spi_imx_buf_rx_swap;
|
|
|
|
spi_imx->tx = spi_imx_buf_tx_swap;
|
|
|
|
spi_imx->dynamic_burst = 1;
|
|
|
|
|
2013-05-30 08:08:09 +00:00
|
|
|
} else {
|
2017-08-10 04:50:08 +00:00
|
|
|
if (spi_imx->bits_per_word <= 8) {
|
|
|
|
spi_imx->rx = spi_imx_buf_rx_u8;
|
|
|
|
spi_imx->tx = spi_imx_buf_tx_u8;
|
|
|
|
} else if (spi_imx->bits_per_word <= 16) {
|
|
|
|
spi_imx->rx = spi_imx_buf_rx_u16;
|
|
|
|
spi_imx->tx = spi_imx_buf_tx_u16;
|
|
|
|
} else {
|
|
|
|
spi_imx->rx = spi_imx_buf_rx_u32;
|
|
|
|
spi_imx->tx = spi_imx_buf_tx_u32;
|
|
|
|
}
|
2018-07-17 14:31:54 +00:00
|
|
|
spi_imx->dynamic_burst = 0;
|
2013-05-22 02:36:35 +00:00
|
|
|
}
|
2009-10-01 22:44:33 +00:00
|
|
|
|
2022-05-02 17:54:55 +00:00
|
|
|
if (spi_imx_can_dma(spi_imx->controller, spi, t))
|
2019-12-24 03:52:05 +00:00
|
|
|
spi_imx->usedma = true;
|
2016-02-24 08:20:26 +00:00
|
|
|
else
|
2019-12-24 03:52:05 +00:00
|
|
|
spi_imx->usedma = false;
|
2016-02-24 08:20:26 +00:00
|
|
|
|
2022-04-11 18:45:29 +00:00
|
|
|
spi_imx->rx_only = ((t->tx_buf == NULL)
|
|
|
|
|| (t->tx_buf == spi->controller->dummy_tx));
|
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
if (is_imx53_ecspi(spi_imx) && spi_imx->target_mode) {
|
|
|
|
spi_imx->rx = mx53_ecspi_rx_target;
|
|
|
|
spi_imx->tx = mx53_ecspi_tx_target;
|
|
|
|
spi_imx->target_burst = t->len;
|
2017-09-05 05:12:32 +00:00
|
|
|
}
|
|
|
|
|
2021-04-08 10:33:47 +00:00
|
|
|
spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-11 01:18:44 +00:00
|
|
|
static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
|
|
|
|
{
|
2022-05-02 17:54:55 +00:00
|
|
|
struct spi_controller *controller = spi_imx->controller;
|
2014-09-11 01:18:44 +00:00
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
if (controller->dma_rx) {
|
|
|
|
dma_release_channel(controller->dma_rx);
|
|
|
|
controller->dma_rx = NULL;
|
2014-09-11 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
if (controller->dma_tx) {
|
|
|
|
dma_release_channel(controller->dma_tx);
|
|
|
|
controller->dma_tx = NULL;
|
2014-09-11 01:18:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_controller *controller)
|
2014-09-11 01:18:44 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2017-06-08 05:16:00 +00:00
|
|
|
spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
|
2015-12-05 16:57:01 +00:00
|
|
|
|
2014-09-11 01:18:44 +00:00
|
|
|
/* Prepare for TX DMA: */
|
2022-05-02 17:54:54 +00:00
|
|
|
controller->dma_tx = dma_request_chan(dev, "tx");
|
|
|
|
if (IS_ERR(controller->dma_tx)) {
|
|
|
|
ret = PTR_ERR(controller->dma_tx);
|
2024-01-10 08:54:03 +00:00
|
|
|
dev_err_probe(dev, ret, "can't get the TX DMA channel!\n");
|
2022-05-02 17:54:54 +00:00
|
|
|
controller->dma_tx = NULL;
|
2014-09-11 01:18:44 +00:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prepare for RX : */
|
2022-05-02 17:54:54 +00:00
|
|
|
controller->dma_rx = dma_request_chan(dev, "rx");
|
|
|
|
if (IS_ERR(controller->dma_rx)) {
|
|
|
|
ret = PTR_ERR(controller->dma_rx);
|
2024-01-10 08:54:03 +00:00
|
|
|
dev_err_probe(dev, ret, "can't get the RX DMA channel!\n");
|
2022-05-02 17:54:54 +00:00
|
|
|
controller->dma_rx = NULL;
|
2014-09-11 01:18:44 +00:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
init_completion(&spi_imx->dma_rx_completion);
|
|
|
|
init_completion(&spi_imx->dma_tx_completion);
|
2022-05-02 17:54:54 +00:00
|
|
|
controller->can_dma = spi_imx_can_dma;
|
|
|
|
controller->max_dma_len = MAX_SDMA_BD_BYTES;
|
2022-05-02 17:54:55 +00:00
|
|
|
spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX |
|
2022-05-02 17:54:54 +00:00
|
|
|
SPI_CONTROLLER_MUST_TX;
|
2014-09-11 01:18:44 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
spi_imx_sdma_exit(spi_imx);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_imx_dma_rx_callback(void *cookie)
|
|
|
|
{
|
|
|
|
struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
|
|
|
|
|
|
|
|
complete(&spi_imx->dma_rx_completion);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_imx_dma_tx_callback(void *cookie)
|
|
|
|
{
|
|
|
|
struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
|
|
|
|
|
|
|
|
complete(&spi_imx->dma_tx_completion);
|
|
|
|
}
|
|
|
|
|
2016-02-19 07:43:03 +00:00
|
|
|
static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
|
|
|
|
{
|
|
|
|
unsigned long timeout = 0;
|
|
|
|
|
|
|
|
/* Time with actual data transfer and CS change delay related to HW */
|
|
|
|
timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
|
|
|
|
|
|
|
|
/* Add extra second for scheduler related activities */
|
|
|
|
timeout += 1;
|
|
|
|
|
|
|
|
/* Double calculated timeout */
|
|
|
|
return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
|
|
|
|
}
|
|
|
|
|
2014-09-11 01:18:44 +00:00
|
|
|
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
|
|
|
|
struct spi_transfer *transfer)
|
|
|
|
{
|
2016-02-24 08:20:33 +00:00
|
|
|
struct dma_async_tx_descriptor *desc_tx, *desc_rx;
|
2016-02-19 07:43:03 +00:00
|
|
|
unsigned long transfer_timeout;
|
2015-02-02 08:30:35 +00:00
|
|
|
unsigned long timeout;
|
2022-05-02 17:54:55 +00:00
|
|
|
struct spi_controller *controller = spi_imx->controller;
|
2014-09-11 01:18:44 +00:00
|
|
|
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
|
2018-10-10 10:32:45 +00:00
|
|
|
struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
|
|
|
|
unsigned int bytes_per_word, i;
|
2018-10-10 10:32:42 +00:00
|
|
|
int ret;
|
|
|
|
|
2018-10-10 10:32:45 +00:00
|
|
|
/* Get the right burst length from the last sg to ensure no tail data */
|
|
|
|
bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
|
|
|
|
for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
|
|
|
|
if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Use 1 as wml in case no available burst length got */
|
|
|
|
if (i == 0)
|
|
|
|
i = 1;
|
|
|
|
|
|
|
|
spi_imx->wml = i;
|
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
ret = spi_imx_dma_configure(controller);
|
2018-10-10 10:32:42 +00:00
|
|
|
if (ret)
|
2020-06-16 22:42:09 +00:00
|
|
|
goto dma_failure_no_start;
|
2018-10-10 10:32:42 +00:00
|
|
|
|
2018-10-10 10:32:45 +00:00
|
|
|
if (!spi_imx->devtype_data->setup_wml) {
|
|
|
|
dev_err(spi_imx->dev, "No setup_wml()?\n");
|
2020-06-16 22:42:09 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto dma_failure_no_start;
|
2018-10-10 10:32:45 +00:00
|
|
|
}
|
2018-10-10 10:32:42 +00:00
|
|
|
spi_imx->devtype_data->setup_wml(spi_imx);
|
2014-09-11 01:18:44 +00:00
|
|
|
|
2016-02-24 08:20:33 +00:00
|
|
|
/*
|
|
|
|
* The TX DMA setup starts the transfer, so make sure RX is configured
|
|
|
|
* before TX.
|
|
|
|
*/
|
2022-05-02 17:54:54 +00:00
|
|
|
desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
|
2016-02-24 08:20:33 +00:00
|
|
|
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
|
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
2020-06-16 22:42:09 +00:00
|
|
|
if (!desc_rx) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto dma_failure_no_start;
|
|
|
|
}
|
2014-09-11 01:18:44 +00:00
|
|
|
|
2016-02-24 08:20:33 +00:00
|
|
|
desc_rx->callback = spi_imx_dma_rx_callback;
|
|
|
|
desc_rx->callback_param = (void *)spi_imx;
|
|
|
|
dmaengine_submit(desc_rx);
|
|
|
|
reinit_completion(&spi_imx->dma_rx_completion);
|
2022-05-02 17:54:54 +00:00
|
|
|
dma_async_issue_pending(controller->dma_rx);
|
2014-09-11 01:18:44 +00:00
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
|
2016-02-24 08:20:33 +00:00
|
|
|
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
|
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
|
|
if (!desc_tx) {
|
2022-05-02 17:54:54 +00:00
|
|
|
dmaengine_terminate_all(controller->dma_tx);
|
|
|
|
dmaengine_terminate_all(controller->dma_rx);
|
2016-02-24 08:20:33 +00:00
|
|
|
return -EINVAL;
|
2014-09-11 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2016-02-24 08:20:33 +00:00
|
|
|
desc_tx->callback = spi_imx_dma_tx_callback;
|
|
|
|
desc_tx->callback_param = (void *)spi_imx;
|
|
|
|
dmaengine_submit(desc_tx);
|
2014-09-11 01:18:44 +00:00
|
|
|
reinit_completion(&spi_imx->dma_tx_completion);
|
2022-05-02 17:54:54 +00:00
|
|
|
dma_async_issue_pending(controller->dma_tx);
|
2014-09-11 01:18:44 +00:00
|
|
|
|
2016-02-19 07:43:03 +00:00
|
|
|
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
|
|
|
|
|
2014-09-11 01:18:44 +00:00
|
|
|
/* Wait SDMA to finish the data transfer.*/
|
2015-02-02 08:30:35 +00:00
|
|
|
timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
|
2016-02-19 07:43:03 +00:00
|
|
|
transfer_timeout);
|
2015-02-02 08:30:35 +00:00
|
|
|
if (!timeout) {
|
2016-02-17 13:28:48 +00:00
|
|
|
dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
|
2022-05-02 17:54:54 +00:00
|
|
|
dmaengine_terminate_all(controller->dma_tx);
|
|
|
|
dmaengine_terminate_all(controller->dma_rx);
|
2016-02-24 08:20:33 +00:00
|
|
|
return -ETIMEDOUT;
|
2014-09-11 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2016-02-24 08:20:33 +00:00
|
|
|
timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
|
|
|
|
transfer_timeout);
|
|
|
|
if (!timeout) {
|
2022-05-02 17:54:54 +00:00
|
|
|
dev_err(&controller->dev, "I/O Error in DMA RX\n");
|
2016-02-24 08:20:33 +00:00
|
|
|
spi_imx->devtype_data->reset(spi_imx);
|
2022-05-02 17:54:54 +00:00
|
|
|
dmaengine_terminate_all(controller->dma_rx);
|
2016-02-24 08:20:33 +00:00
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
2014-09-11 01:18:44 +00:00
|
|
|
|
2022-05-02 17:54:55 +00:00
|
|
|
return 0;
|
2020-06-16 22:42:09 +00:00
|
|
|
/* fallback to pio */
|
|
|
|
dma_failure_no_start:
|
|
|
|
transfer->error |= SPI_TRANS_FAIL_NO_START;
|
|
|
|
return ret;
|
2014-09-11 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_imx_pio_transfer(struct spi_device *spi,
|
2009-09-22 23:46:02 +00:00
|
|
|
struct spi_transfer *transfer)
|
|
|
|
{
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
|
2016-06-21 12:12:54 +00:00
|
|
|
unsigned long transfer_timeout;
|
|
|
|
unsigned long timeout;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
spi_imx->tx_buf = transfer->tx_buf;
|
|
|
|
spi_imx->rx_buf = transfer->rx_buf;
|
|
|
|
spi_imx->count = transfer->len;
|
|
|
|
spi_imx->txfifo = 0;
|
2018-07-17 14:31:54 +00:00
|
|
|
spi_imx->remainder = 0;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2014-02-09 03:06:04 +00:00
|
|
|
reinit_completion(&spi_imx->xfer_done);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
spi_imx_push(spi_imx);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2011-07-09 17:16:35 +00:00
|
|
|
spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2016-06-21 12:12:54 +00:00
|
|
|
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
|
|
|
|
|
|
|
|
timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
|
|
|
|
transfer_timeout);
|
|
|
|
if (!timeout) {
|
|
|
|
dev_err(&spi->dev, "I/O Error in PIO\n");
|
|
|
|
spi_imx->devtype_data->reset(spi_imx);
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2022-05-02 17:54:55 +00:00
|
|
|
return 0;
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2022-05-02 17:54:56 +00:00
|
|
|
static int spi_imx_poll_transfer(struct spi_device *spi,
|
|
|
|
struct spi_transfer *transfer)
|
|
|
|
{
|
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
|
|
|
|
unsigned long timeout;
|
|
|
|
|
|
|
|
spi_imx->tx_buf = transfer->tx_buf;
|
|
|
|
spi_imx->rx_buf = transfer->rx_buf;
|
|
|
|
spi_imx->count = transfer->len;
|
|
|
|
spi_imx->txfifo = 0;
|
|
|
|
spi_imx->remainder = 0;
|
|
|
|
|
|
|
|
/* fill in the fifo before timeout calculations if we are
|
|
|
|
* interrupted here, then the data is getting transferred by
|
|
|
|
* the HW while we are interrupted
|
|
|
|
*/
|
|
|
|
spi_imx_push(spi_imx);
|
|
|
|
|
|
|
|
timeout = spi_imx_calculate_timeout(spi_imx, transfer->len) + jiffies;
|
|
|
|
while (spi_imx->txfifo) {
|
|
|
|
/* RX */
|
|
|
|
while (spi_imx->txfifo &&
|
|
|
|
spi_imx->devtype_data->rx_available(spi_imx)) {
|
|
|
|
spi_imx->rx(spi_imx);
|
|
|
|
spi_imx->txfifo--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TX */
|
|
|
|
if (spi_imx->count) {
|
|
|
|
spi_imx_push(spi_imx);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spi_imx->txfifo &&
|
|
|
|
time_after(jiffies, timeout)) {
|
|
|
|
|
|
|
|
dev_err_ratelimited(&spi->dev,
|
|
|
|
"timeout period reached: jiffies: %lu- falling back to interrupt mode\n",
|
|
|
|
jiffies - timeout);
|
|
|
|
|
|
|
|
/* fall back to interrupt mode */
|
|
|
|
return spi_imx_pio_transfer(spi, transfer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
static int spi_imx_pio_transfer_target(struct spi_device *spi,
|
|
|
|
struct spi_transfer *transfer)
|
2017-09-05 05:12:32 +00:00
|
|
|
{
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
|
2022-05-02 17:54:55 +00:00
|
|
|
int ret = 0;
|
2017-09-05 05:12:32 +00:00
|
|
|
|
|
|
|
if (is_imx53_ecspi(spi_imx) &&
|
|
|
|
transfer->len > MX53_MAX_TRANSFER_BYTES) {
|
|
|
|
dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
|
|
|
|
MX53_MAX_TRANSFER_BYTES);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
spi_imx->tx_buf = transfer->tx_buf;
|
|
|
|
spi_imx->rx_buf = transfer->rx_buf;
|
|
|
|
spi_imx->count = transfer->len;
|
|
|
|
spi_imx->txfifo = 0;
|
2018-07-17 14:31:54 +00:00
|
|
|
spi_imx->remainder = 0;
|
2017-09-05 05:12:32 +00:00
|
|
|
|
|
|
|
reinit_completion(&spi_imx->xfer_done);
|
2023-08-07 12:41:01 +00:00
|
|
|
spi_imx->target_aborted = false;
|
2017-09-05 05:12:32 +00:00
|
|
|
|
|
|
|
spi_imx_push(spi_imx);
|
|
|
|
|
|
|
|
spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
|
|
|
|
|
|
|
|
if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
|
2023-08-07 12:41:01 +00:00
|
|
|
spi_imx->target_aborted) {
|
2017-09-05 05:12:32 +00:00
|
|
|
dev_dbg(&spi->dev, "interrupted\n");
|
|
|
|
ret = -EINTR;
|
|
|
|
}
|
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
/* ecspi has a HW issue when works in Target mode,
|
2017-09-05 05:12:32 +00:00
|
|
|
* after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
|
|
|
|
* ECSPI_TXDATA keeps shift out the last word data,
|
2023-08-07 12:41:01 +00:00
|
|
|
* so we have to disable ECSPI when in target mode after the
|
2017-09-05 05:12:32 +00:00
|
|
|
* transfer completes
|
|
|
|
*/
|
|
|
|
if (spi_imx->devtype_data->disable)
|
|
|
|
spi_imx->devtype_data->disable(spi_imx);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-05-02 17:54:55 +00:00
|
|
|
static int spi_imx_transfer_one(struct spi_controller *controller,
|
|
|
|
struct spi_device *spi,
|
2014-09-11 01:18:44 +00:00
|
|
|
struct spi_transfer *transfer)
|
|
|
|
{
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
|
2022-05-02 17:54:56 +00:00
|
|
|
unsigned long hz_per_byte, byte_limit;
|
2014-09-11 01:18:44 +00:00
|
|
|
|
2022-05-02 17:54:55 +00:00
|
|
|
spi_imx_setupxfer(spi, transfer);
|
2020-09-17 20:24:20 +00:00
|
|
|
transfer->effective_speed_hz = spi_imx->spi_bus_clk;
|
|
|
|
|
2017-09-05 05:12:32 +00:00
|
|
|
/* flush rxfifo before transfer */
|
|
|
|
while (spi_imx->devtype_data->rx_available(spi_imx))
|
2019-03-04 20:18:49 +00:00
|
|
|
readl(spi_imx->base + MXC_CSPIRXDATA);
|
2017-09-05 05:12:32 +00:00
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
if (spi_imx->target_mode)
|
|
|
|
return spi_imx_pio_transfer_target(spi, transfer);
|
2017-09-05 05:12:32 +00:00
|
|
|
|
2022-11-16 16:49:30 +00:00
|
|
|
/*
|
|
|
|
* If we decided in spi_imx_can_dma() that we want to do a DMA
|
|
|
|
* transfer, the SPI transfer has already been mapped, so we
|
|
|
|
* have to do the DMA transfer here.
|
|
|
|
*/
|
|
|
|
if (spi_imx->usedma)
|
|
|
|
return spi_imx_dma_transfer(spi_imx, transfer);
|
2022-05-02 17:54:56 +00:00
|
|
|
/*
|
|
|
|
* Calculate the estimated time in us the transfer runs. Find
|
|
|
|
* the number of Hz per byte per polling limit.
|
|
|
|
*/
|
|
|
|
hz_per_byte = polling_limit_us ? ((8 + 4) * USEC_PER_SEC) / polling_limit_us : 0;
|
|
|
|
byte_limit = hz_per_byte ? transfer->effective_speed_hz / hz_per_byte : 1;
|
|
|
|
|
|
|
|
/* run in polling mode for short transfers */
|
|
|
|
if (transfer->len < byte_limit)
|
|
|
|
return spi_imx_poll_transfer(spi, transfer);
|
|
|
|
|
2020-05-20 20:34:17 +00:00
|
|
|
return spi_imx_pio_transfer(spi, transfer);
|
2014-09-11 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
static int spi_imx_setup(struct spi_device *spi)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
2010-01-20 20:49:45 +00:00
|
|
|
dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
|
2009-09-22 23:46:02 +00:00
|
|
|
spi->mode, spi->bits_per_word, spi->max_speed_hz);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
static void spi_imx_cleanup(struct spi_device *spi)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2013-10-23 08:31:50 +00:00
|
|
|
static int
|
2022-05-02 17:54:54 +00:00
|
|
|
spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg)
|
2013-10-23 08:31:50 +00:00
|
|
|
{
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
|
2013-10-23 08:31:50 +00:00
|
|
|
int ret;
|
|
|
|
|
2022-04-14 08:53:42 +00:00
|
|
|
ret = pm_runtime_resume_and_get(spi_imx->dev);
|
2020-07-27 06:33:54 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(spi_imx->dev, "failed to enable clock\n");
|
2013-10-23 08:31:50 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:47:05 +00:00
|
|
|
ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
|
|
|
|
if (ret) {
|
2020-07-27 06:33:54 +00:00
|
|
|
pm_runtime_mark_last_busy(spi_imx->dev);
|
|
|
|
pm_runtime_put_autosuspend(spi_imx->dev);
|
2018-11-30 06:47:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2013-10-23 08:31:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2022-05-02 17:54:54 +00:00
|
|
|
spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message *msg)
|
2013-10-23 08:31:50 +00:00
|
|
|
{
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
|
2013-10-23 08:31:50 +00:00
|
|
|
|
2020-07-27 06:33:54 +00:00
|
|
|
pm_runtime_mark_last_busy(spi_imx->dev);
|
|
|
|
pm_runtime_put_autosuspend(spi_imx->dev);
|
2013-10-23 08:31:50 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
static int spi_imx_target_abort(struct spi_controller *controller)
|
2017-09-05 05:12:32 +00:00
|
|
|
{
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
|
2017-09-05 05:12:32 +00:00
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
spi_imx->target_aborted = true;
|
2017-09-05 05:12:32 +00:00
|
|
|
complete(&spi_imx->xfer_done);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-07 16:57:14 +00:00
|
|
|
static int spi_imx_probe(struct platform_device *pdev)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
2011-07-09 17:16:41 +00:00
|
|
|
struct device_node *np = pdev->dev.of_node;
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_controller *controller;
|
2009-10-01 22:44:28 +00:00
|
|
|
struct spi_imx_data *spi_imx;
|
2009-09-22 23:46:02 +00:00
|
|
|
struct resource *res;
|
2020-06-25 20:02:52 +00:00
|
|
|
int ret, irq, spi_drctl;
|
2021-03-22 03:57:56 +00:00
|
|
|
const struct spi_imx_devtype_data *devtype_data =
|
|
|
|
of_device_get_match_data(&pdev->dev);
|
2023-08-07 12:41:01 +00:00
|
|
|
bool target_mode;
|
2020-06-25 20:02:52 +00:00
|
|
|
u32 val;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2023-08-07 12:41:01 +00:00
|
|
|
target_mode = devtype_data->has_targetmode &&
|
|
|
|
of_property_read_bool(np, "spi-slave");
|
|
|
|
if (target_mode)
|
|
|
|
controller = spi_alloc_target(&pdev->dev,
|
2022-05-02 17:54:54 +00:00
|
|
|
sizeof(struct spi_imx_data));
|
2023-08-07 12:41:01 +00:00
|
|
|
else
|
|
|
|
controller = spi_alloc_host(&pdev->dev,
|
|
|
|
sizeof(struct spi_imx_data));
|
2022-05-02 17:54:54 +00:00
|
|
|
if (!controller)
|
2017-06-20 16:50:55 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-04-23 19:19:58 +00:00
|
|
|
ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
|
|
|
|
if ((ret < 0) || (spi_drctl >= 0x3)) {
|
|
|
|
/* '11' is reserved */
|
|
|
|
spi_drctl = 0;
|
|
|
|
}
|
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
platform_set_drvdata(pdev, controller);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
|
|
|
|
controller->bus_num = np ? -1 : pdev->id;
|
|
|
|
controller->use_gpio_descriptors = true;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
spi_imx = spi_controller_get_devdata(controller);
|
2022-05-02 17:54:55 +00:00
|
|
|
spi_imx->controller = controller;
|
2016-02-17 13:28:48 +00:00
|
|
|
spi_imx->dev = &pdev->dev;
|
2023-08-07 12:41:01 +00:00
|
|
|
spi_imx->target_mode = target_mode;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2017-09-05 05:12:32 +00:00
|
|
|
spi_imx->devtype_data = devtype_data;
|
2015-12-08 06:43:44 +00:00
|
|
|
|
2020-06-25 20:02:52 +00:00
|
|
|
/*
|
|
|
|
* Get number of chip selects from device properties. This can be
|
|
|
|
* coming from device tree or boardfiles, if it is not defined,
|
|
|
|
* a default value of 3 chip selects will be used, as all the legacy
|
|
|
|
* board files have <= 3 chip selects.
|
|
|
|
*/
|
|
|
|
if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
|
2022-05-02 17:54:54 +00:00
|
|
|
controller->num_chipselect = val;
|
2020-06-25 20:02:52 +00:00
|
|
|
else
|
2022-05-02 17:54:54 +00:00
|
|
|
controller->num_chipselect = 3;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2023-04-25 13:45:25 +00:00
|
|
|
controller->transfer_one = spi_imx_transfer_one;
|
|
|
|
controller->setup = spi_imx_setup;
|
|
|
|
controller->cleanup = spi_imx_cleanup;
|
|
|
|
controller->prepare_message = spi_imx_prepare_message;
|
|
|
|
controller->unprepare_message = spi_imx_unprepare_message;
|
2023-08-07 12:41:01 +00:00
|
|
|
controller->target_abort = spi_imx_target_abort;
|
2023-05-30 14:16:38 +00:00
|
|
|
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS |
|
|
|
|
SPI_MOSI_IDLE_LOW;
|
2022-05-02 17:54:55 +00:00
|
|
|
|
2017-06-08 05:16:01 +00:00
|
|
|
if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
|
|
|
|
is_imx53_ecspi(spi_imx))
|
2023-04-25 13:45:25 +00:00
|
|
|
controller->mode_bits |= SPI_LOOP | SPI_READY;
|
2017-04-23 19:19:58 +00:00
|
|
|
|
2022-04-11 18:45:29 +00:00
|
|
|
if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx))
|
2023-04-25 13:45:25 +00:00
|
|
|
controller->mode_bits |= SPI_RX_CPHA_FLIP;
|
2022-04-11 18:45:29 +00:00
|
|
|
|
2021-07-27 12:42:26 +00:00
|
|
|
if (is_imx51_ecspi(spi_imx) &&
|
|
|
|
device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
|
|
|
|
/*
|
|
|
|
* When using HW-CS implementing SPI_CS_WORD can be done by just
|
|
|
|
* setting the burst length to the word size. This is
|
|
|
|
* considerably faster than manually controlling the CS.
|
|
|
|
*/
|
2023-04-25 13:45:25 +00:00
|
|
|
controller->mode_bits |= SPI_CS_WORD;
|
2021-07-27 12:42:26 +00:00
|
|
|
|
2023-04-25 13:45:26 +00:00
|
|
|
if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx)) {
|
|
|
|
controller->max_native_cs = 4;
|
2023-07-10 15:49:30 +00:00
|
|
|
controller->flags |= SPI_CONTROLLER_GPIO_SS;
|
2023-04-25 13:45:26 +00:00
|
|
|
}
|
|
|
|
|
2017-04-23 19:19:58 +00:00
|
|
|
spi_imx->spi_drctl = spi_drctl;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
init_completion(&spi_imx->xfer_done);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2023-03-28 06:26:00 +00:00
|
|
|
spi_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
|
2013-07-11 04:26:48 +00:00
|
|
|
if (IS_ERR(spi_imx->base)) {
|
|
|
|
ret = PTR_ERR(spi_imx->base);
|
2022-05-02 17:54:54 +00:00
|
|
|
goto out_controller_put;
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
2016-02-24 08:20:29 +00:00
|
|
|
spi_imx->base_phys = res->start;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2014-12-29 21:38:51 +00:00
|
|
|
irq = platform_get_irq(pdev, 0);
|
|
|
|
if (irq < 0) {
|
|
|
|
ret = irq;
|
2022-05-02 17:54:54 +00:00
|
|
|
goto out_controller_put;
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2014-12-29 21:38:51 +00:00
|
|
|
ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
|
2014-02-22 13:23:46 +00:00
|
|
|
dev_name(&pdev->dev), spi_imx);
|
2009-09-22 23:46:02 +00:00
|
|
|
if (ret) {
|
2014-12-29 21:38:51 +00:00
|
|
|
dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
|
2022-05-02 17:54:54 +00:00
|
|
|
goto out_controller_put;
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2012-03-07 08:30:22 +00:00
|
|
|
spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
|
|
|
|
if (IS_ERR(spi_imx->clk_ipg)) {
|
|
|
|
ret = PTR_ERR(spi_imx->clk_ipg);
|
2022-05-02 17:54:54 +00:00
|
|
|
goto out_controller_put;
|
2009-09-22 23:46:02 +00:00
|
|
|
}
|
|
|
|
|
2012-03-07 08:30:22 +00:00
|
|
|
spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
|
|
|
|
if (IS_ERR(spi_imx->clk_per)) {
|
|
|
|
ret = PTR_ERR(spi_imx->clk_per);
|
2022-05-02 17:54:54 +00:00
|
|
|
goto out_controller_put;
|
2012-03-07 08:30:22 +00:00
|
|
|
}
|
|
|
|
|
2020-10-21 10:45:13 +00:00
|
|
|
ret = clk_prepare_enable(spi_imx->clk_per);
|
|
|
|
if (ret)
|
2022-05-02 17:54:54 +00:00
|
|
|
goto out_controller_put;
|
2020-10-21 10:45:13 +00:00
|
|
|
|
|
|
|
ret = clk_prepare_enable(spi_imx->clk_ipg);
|
|
|
|
if (ret)
|
|
|
|
goto out_put_per;
|
|
|
|
|
2020-07-27 06:33:54 +00:00
|
|
|
pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
|
|
|
|
pm_runtime_use_autosuspend(spi_imx->dev);
|
2020-11-24 08:52:47 +00:00
|
|
|
pm_runtime_get_noresume(spi_imx->dev);
|
2020-10-21 10:45:13 +00:00
|
|
|
pm_runtime_set_active(spi_imx->dev);
|
|
|
|
pm_runtime_enable(spi_imx->dev);
|
2012-03-07 08:30:22 +00:00
|
|
|
|
|
|
|
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
|
2014-09-11 01:18:44 +00:00
|
|
|
/*
|
2016-10-19 22:42:25 +00:00
|
|
|
* Only validated on i.mx35 and i.mx6 now, can remove the constraint
|
|
|
|
* if validated on other chips.
|
2014-09-11 01:18:44 +00:00
|
|
|
*/
|
2017-06-08 05:16:00 +00:00
|
|
|
if (spi_imx->devtype_data->has_dmamode) {
|
2022-05-02 17:54:54 +00:00
|
|
|
ret = spi_imx_sdma_init(&pdev->dev, spi_imx, controller);
|
2015-12-08 06:43:46 +00:00
|
|
|
if (ret == -EPROBE_DEFER)
|
2020-07-27 06:33:54 +00:00
|
|
|
goto out_runtime_pm_put;
|
2015-12-08 06:43:46 +00:00
|
|
|
|
2015-12-08 06:43:45 +00:00
|
|
|
if (ret < 0)
|
2020-08-18 22:35:18 +00:00
|
|
|
dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
|
2015-12-08 06:43:45 +00:00
|
|
|
ret);
|
|
|
|
}
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2011-07-09 17:16:35 +00:00
|
|
|
spi_imx->devtype_data->reset(spi_imx);
|
2009-11-19 19:01:42 +00:00
|
|
|
|
2011-07-09 17:16:35 +00:00
|
|
|
spi_imx->devtype_data->intctrl(spi_imx, 0);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
controller->dev.of_node = pdev->dev.of_node;
|
2022-05-02 17:54:55 +00:00
|
|
|
ret = spi_register_controller(controller);
|
2017-11-06 18:38:23 +00:00
|
|
|
if (ret) {
|
2022-05-02 17:54:55 +00:00
|
|
|
dev_err_probe(&pdev->dev, ret, "register controller failed\n");
|
|
|
|
goto out_register_controller;
|
2017-11-06 18:38:23 +00:00
|
|
|
}
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2020-07-27 06:33:54 +00:00
|
|
|
pm_runtime_mark_last_busy(spi_imx->dev);
|
|
|
|
pm_runtime_put_autosuspend(spi_imx->dev);
|
|
|
|
|
2009-09-22 23:46:02 +00:00
|
|
|
return ret;
|
|
|
|
|
2022-05-02 17:54:55 +00:00
|
|
|
out_register_controller:
|
2020-10-05 13:22:29 +00:00
|
|
|
if (spi_imx->devtype_data->has_dmamode)
|
|
|
|
spi_imx_sdma_exit(spi_imx);
|
2020-07-27 06:33:54 +00:00
|
|
|
out_runtime_pm_put:
|
|
|
|
pm_runtime_dont_use_autosuspend(spi_imx->dev);
|
2020-10-21 10:45:13 +00:00
|
|
|
pm_runtime_set_suspended(&pdev->dev);
|
2020-07-27 06:33:54 +00:00
|
|
|
pm_runtime_disable(spi_imx->dev);
|
2020-10-21 10:45:13 +00:00
|
|
|
|
|
|
|
clk_disable_unprepare(spi_imx->clk_ipg);
|
|
|
|
out_put_per:
|
|
|
|
clk_disable_unprepare(spi_imx->clk_per);
|
2022-05-02 17:54:54 +00:00
|
|
|
out_controller_put:
|
|
|
|
spi_controller_put(controller);
|
2013-07-11 04:26:48 +00:00
|
|
|
|
2009-09-22 23:46:02 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-03-06 06:57:33 +00:00
|
|
|
static void spi_imx_remove(struct platform_device *pdev)
|
2009-09-22 23:46:02 +00:00
|
|
|
{
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_controller *controller = platform_get_drvdata(pdev);
|
|
|
|
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
|
2018-01-07 14:05:49 +00:00
|
|
|
int ret;
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2022-05-02 17:54:55 +00:00
|
|
|
spi_unregister_controller(controller);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2023-03-06 06:57:32 +00:00
|
|
|
ret = pm_runtime_get_sync(spi_imx->dev);
|
|
|
|
if (ret >= 0)
|
|
|
|
writel(0, spi_imx->base + MXC_CSPICTRL);
|
|
|
|
else
|
|
|
|
dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n");
|
2020-07-27 06:33:54 +00:00
|
|
|
|
|
|
|
pm_runtime_dont_use_autosuspend(spi_imx->dev);
|
|
|
|
pm_runtime_put_sync(spi_imx->dev);
|
|
|
|
pm_runtime_disable(spi_imx->dev);
|
|
|
|
|
|
|
|
spi_imx_sdma_exit(spi_imx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
|
|
|
|
{
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_controller *controller = dev_get_drvdata(dev);
|
2020-07-27 06:33:54 +00:00
|
|
|
struct spi_imx_data *spi_imx;
|
|
|
|
int ret;
|
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
spi_imx = spi_controller_get_devdata(controller);
|
2020-07-27 06:33:54 +00:00
|
|
|
|
|
|
|
ret = clk_prepare_enable(spi_imx->clk_per);
|
2018-01-07 14:05:49 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-07-27 06:33:54 +00:00
|
|
|
ret = clk_prepare_enable(spi_imx->clk_ipg);
|
2018-01-07 14:05:49 +00:00
|
|
|
if (ret) {
|
2020-07-27 06:33:54 +00:00
|
|
|
clk_disable_unprepare(spi_imx->clk_per);
|
2018-01-07 14:05:49 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-07-27 06:33:54 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
|
|
|
|
{
|
2022-05-02 17:54:54 +00:00
|
|
|
struct spi_controller *controller = dev_get_drvdata(dev);
|
2020-07-27 06:33:54 +00:00
|
|
|
struct spi_imx_data *spi_imx;
|
|
|
|
|
2022-05-02 17:54:54 +00:00
|
|
|
spi_imx = spi_controller_get_devdata(controller);
|
2020-07-27 06:33:54 +00:00
|
|
|
|
2018-01-07 14:05:49 +00:00
|
|
|
clk_disable_unprepare(spi_imx->clk_per);
|
2020-07-27 06:33:54 +00:00
|
|
|
clk_disable_unprepare(spi_imx->clk_ipg);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2020-07-27 06:33:54 +00:00
|
|
|
static int __maybe_unused spi_imx_suspend(struct device *dev)
|
|
|
|
{
|
|
|
|
pinctrl_pm_select_sleep_state(dev);
|
2009-09-22 23:46:02 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-27 06:33:54 +00:00
|
|
|
static int __maybe_unused spi_imx_resume(struct device *dev)
|
|
|
|
{
|
|
|
|
pinctrl_pm_select_default_state(dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct dev_pm_ops imx_spi_pm = {
|
|
|
|
SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend,
|
|
|
|
spi_imx_runtime_resume, NULL)
|
|
|
|
SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
|
|
|
|
};
|
|
|
|
|
2009-10-01 22:44:28 +00:00
|
|
|
static struct platform_driver spi_imx_driver = {
|
2009-09-22 23:46:02 +00:00
|
|
|
.driver = {
|
|
|
|
.name = DRIVER_NAME,
|
2011-07-09 17:16:41 +00:00
|
|
|
.of_match_table = spi_imx_dt_ids,
|
2020-07-27 06:33:54 +00:00
|
|
|
.pm = &imx_spi_pm,
|
|
|
|
},
|
2009-10-01 22:44:28 +00:00
|
|
|
.probe = spi_imx_probe,
|
2023-03-06 06:57:33 +00:00
|
|
|
.remove_new = spi_imx_remove,
|
2009-09-22 23:46:02 +00:00
|
|
|
};
|
2011-10-05 17:29:49 +00:00
|
|
|
module_platform_driver(spi_imx_driver);
|
2009-09-22 23:46:02 +00:00
|
|
|
|
2021-03-16 18:09:22 +00:00
|
|
|
MODULE_DESCRIPTION("i.MX SPI Controller driver");
|
2009-09-22 23:46:02 +00:00
|
|
|
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
|
|
|
|
MODULE_LICENSE("GPL");
|
2013-01-07 22:42:55 +00:00
|
|
|
MODULE_ALIAS("platform:" DRIVER_NAME);
|