Merge remote-tracking branch 'spi/for-5.14' into spi-next

This commit is contained in:
Mark Brown 2021-06-25 14:08:26 +01:00
commit 1bee1ecf23
No known key found for this signature in database
GPG Key ID: 24D68B725D5487D0
57 changed files with 1249 additions and 862 deletions

View File

@ -1,11 +0,0 @@
Renesas RZ/N1 SPI Controller
This controller is based on the Synopsys DW Synchronous Serial Interface and
inherits all properties defined in snps,dw-apb-ssi.txt except for the
compatible property.
Required properties:
- compatible : The device specific string followed by the generic RZ/N1 string.
Therefore it must be one of:
"renesas,r9a06g032-spi", "renesas,rzn1-spi"
"renesas,r9a06g033-spi", "renesas,rzn1-spi"

View File

@ -67,6 +67,12 @@ properties:
const: baikal,bt1-sys-ssi
- description: Canaan Kendryte K210 SoS SPI Controller
const: canaan,k210-spi
- description: Renesas RZ/N1 SPI Controller
items:
- enum:
- renesas,r9a06g032-spi # RZ/N1D
- renesas,r9a06g033-spi # RZ/N1S
- const: renesas,rzn1-spi # RZ/N1
reg:
minItems: 1

View File

@ -1,30 +0,0 @@
Cadence SPI controller Device Tree Bindings
-------------------------------------------
Required properties:
- compatible : Should be "cdns,spi-r1p6" or "xlnx,zynq-spi-r1p6".
- reg : Physical base address and size of SPI registers map.
- interrupts : Property with a value describing the interrupt
number.
- clock-names : List of input clock names - "ref_clk", "pclk"
(See clock bindings for details).
- clocks : Clock phandles (see clock bindings for details).
Optional properties:
- num-cs : Number of chip selects used.
If a decoder is used, this will be the number of
chip selects after the decoder.
- is-decoded-cs : Flag to indicate whether decoder is used or not.
Example:
spi@e0007000 {
compatible = "xlnx,zynq-spi-r1p6";
clock-names = "ref_clk", "pclk";
clocks = <&clkc 26>, <&clkc 35>;
interrupt-parent = <&intc>;
interrupts = <0 49 4>;
num-cs = <4>;
is-decoded-cs = <0>;
reg = <0xe0007000 0x1000>;
} ;

View File

@ -0,0 +1,66 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/spi/spi-cadence.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Cadence SPI controller Device Tree Bindings
maintainers:
- Michal Simek <michal.simek@xilinx.com>
allOf:
- $ref: "spi-controller.yaml#"
properties:
compatible:
enum:
- cdns,spi-r1p6
- xlnx,zynq-spi-r1p6
reg:
maxItems: 1
interrupts:
maxItems: 1
clock-names:
items:
- const: ref_clk
- const: pclk
clocks:
maxItems: 2
num-cs:
description: |
Number of chip selects used. If a decoder is used,
this will be the number of chip selects after the
decoder.
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
maximum: 4
default: 4
is-decoded-cs:
description: |
Flag to indicate whether decoder is used or not.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [ 0, 1 ]
default: 0
unevaluatedProperties: false
examples:
- |
spi@e0007000 {
compatible = "xlnx,zynq-spi-r1p6";
clock-names = "ref_clk", "pclk";
clocks = <&clkc 26>, <&clkc 35>;
interrupt-parent = <&intc>;
interrupts = <0 49 4>;
num-cs = <4>;
is-decoded-cs = <0>;
reg = <0xe0007000 0x1000>;
};
...

View File

@ -114,8 +114,11 @@ patternProperties:
Compatible of the SPI device.
reg:
minimum: 0
maximum: 256
minItems: 1
maxItems: 256
items:
minimum: 0
maximum: 256
description:
Chip select used by the device.

View File

@ -33,6 +33,7 @@ properties:
- rockchip,rk3328-spi
- rockchip,rk3368-spi
- rockchip,rk3399-spi
- rockchip,rv1126-spi
- const: rockchip,rk3066-spi
reg:

View File

@ -1,23 +0,0 @@
Xilinx SPI controller Device Tree Bindings
-------------------------------------------------
Required properties:
- compatible : Should be "xlnx,xps-spi-2.00.a", "xlnx,xps-spi-2.00.b" or "xlnx,axi-quad-spi-1.00.a"
- reg : Physical base address and size of SPI registers map.
- interrupts : Property with a value describing the interrupt
number.
Optional properties:
- xlnx,num-ss-bits : Number of chip selects used.
- xlnx,num-transfer-bits : Number of bits per transfer. This will be 8 if not specified
Example:
axi_quad_spi@41e00000 {
compatible = "xlnx,xps-spi-2.00.a";
interrupt-parent = <&intc>;
interrupts = <0 31 1>;
reg = <0x41e00000 0x10000>;
xlnx,num-ss-bits = <0x1>;
xlnx,num-transfer-bits = <32>;
};

View File

@ -0,0 +1,57 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/spi/spi-xilinx.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx SPI controller Device Tree Bindings
maintainers:
- Michal Simek <michal.simek@xilinx.com>
allOf:
- $ref: "spi-controller.yaml#"
properties:
compatible:
enum:
- xlnx,xps-spi-2.00.a
- xlnx,xps-spi-2.00.b
- xlnx,axi-quad-spi-1.00.a
reg:
maxItems: 1
interrupts:
maxItems: 1
xlnx,num-ss-bits:
description: Number of chip selects used.
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
maximum: 32
xlnx,num-transfer-bits:
description: Number of bits per transfer. This will be 8 if not specified.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [8, 16, 32]
default: 8
required:
- compatible
- reg
- interrupts
unevaluatedProperties: false
examples:
- |
spi0: spi@41e00000 {
compatible = "xlnx,xps-spi-2.00.a";
interrupt-parent = <&intc>;
interrupts = <0 31 1>;
reg = <0x41e00000 0x10000>;
xlnx,num-ss-bits = <0x1>;
xlnx,num-transfer-bits = <32>;
};
...

View File

@ -1,25 +0,0 @@
Xilinx Zynq UltraScale+ MPSoC GQSPI controller Device Tree Bindings
-------------------------------------------------------------------
Required properties:
- compatible : Should be "xlnx,zynqmp-qspi-1.0".
- reg : Physical base address and size of GQSPI registers map.
- interrupts : Property with a value describing the interrupt
number.
- clock-names : List of input clock names - "ref_clk", "pclk"
(See clock bindings for details).
- clocks : Clock phandles (see clock bindings for details).
Optional properties:
- num-cs : Number of chip selects used.
Example:
qspi: spi@ff0f0000 {
compatible = "xlnx,zynqmp-qspi-1.0";
clock-names = "ref_clk", "pclk";
clocks = <&misc_clk &misc_clk>;
interrupts = <0 15 4>;
interrupt-parent = <&gic>;
num-cs = <1>;
reg = <0x0 0xff0f0000 0x1000>,<0x0 0xc0000000 0x8000000>;
};

View File

@ -0,0 +1,51 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/spi/spi-zynqmp-qspi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx Zynq UltraScale+ MPSoC GQSPI controller Device Tree Bindings
maintainers:
- Michal Simek <michal.simek@xilinx.com>
allOf:
- $ref: "spi-controller.yaml#"
properties:
compatible:
const: xlnx,zynqmp-qspi-1.0
reg:
maxItems: 2
interrupts:
maxItems: 1
clock-names:
items:
- const: ref_clk
- const: pclk
clocks:
maxItems: 2
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/xlnx-zynqmp-clk.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
qspi: spi@ff0f0000 {
compatible = "xlnx,zynqmp-qspi-1.0";
clocks = <&zynqmp_clk QSPI_REF>, <&zynqmp_clk LPD_LSBUS>;
clock-names = "ref_clk", "pclk";
interrupts = <0 15 4>;
interrupt-parent = <&gic>;
reg = <0x0 0xff0f0000 0x0 0x1000>,
<0x0 0xc0000000 0x0 0x8000000>;
};
};

View File

@ -2,43 +2,47 @@
PXA2xx SPI on SSP driver HOWTO
==============================
This a mini howto on the pxa2xx_spi driver. The driver turns a PXA2xx
synchronous serial port into a SPI master controller
This a mini HOWTO on the pxa2xx_spi driver. The driver turns a PXA2xx
synchronous serial port into an SPI master controller
(see Documentation/spi/spi-summary.rst). The driver has the following features
- Support for any PXA2xx SSP
- Support for any PXA2xx and compatible SSP.
- SSP PIO and SSP DMA data transfers.
- External and Internal (SSPFRM) chip selects.
- Per slave device (chip) configuration.
- Full suspend, freeze, resume support.
The driver is built around a "spi_message" fifo serviced by workqueue and a
tasklet. The workqueue, "pump_messages", drives message fifo and the tasklet
(pump_transfer) is responsible for queuing SPI transactions and setting up and
launching the dma/interrupt driven transfers.
The driver is built around a &struct spi_message FIFO serviced by kernel
thread. The kernel thread, spi_pump_messages(), drives message FIFO and
is responsible for queuing SPI transactions and setting up and launching
the DMA or interrupt driven transfers.
Declaring PXA2xx Master Controllers
-----------------------------------
Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
"platform device". The master configuration is passed to the driver via a table
found in include/linux/spi/pxa2xx_spi.h::
Typically, for a legacy platform, an SPI master is defined in the
arch/.../mach-*/board-*.c as a "platform device". The master configuration
is passed to the driver via a table found in include/linux/spi/pxa2xx_spi.h::
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
...
};
The "pxa2xx_spi_controller.num_chipselect" field is used to determine the number of
slave device (chips) attached to this SPI master.
The "pxa2xx_spi_controller.enable_dma" field informs the driver that SSP DMA should
be used. This caused the driver to acquire two DMA channels: rx_channel and
tx_channel. The rx_channel has a higher DMA service priority the tx_channel.
be used. This caused the driver to acquire two DMA channels: Rx channel and
Tx channel. The Rx channel has a higher DMA service priority than the Tx channel.
See the "PXA2xx Developer Manual" section "DMA Controller".
For the new platforms the description of the controller and peripheral devices
comes from Device Tree or ACPI.
NSSP MASTER SAMPLE
------------------
Below is a sample configuration using the PXA255 NSSP::
Below is a sample configuration using the PXA255 NSSP for a legacy platform::
static struct resource pxa_spi_nssp_resources[] = {
[0] = {
@ -79,9 +83,10 @@ Below is a sample configuration using the PXA255 NSSP::
Declaring Slave Devices
-----------------------
Typically each SPI slave (chip) is defined in the arch/.../mach-*/board-*.c
using the "spi_board_info" structure found in "linux/spi/spi.h". See
"Documentation/spi/spi-summary.rst" for additional information.
Typically, for a legacy platform, each SPI slave (chip) is defined in the
arch/.../mach-*/board-*.c using the "spi_board_info" structure found in
"linux/spi/spi.h". See "Documentation/spi/spi-summary.rst" for additional
information.
Each slave device attached to the PXA must provide slave specific configuration
information via the structure "pxa2xx_spi_chip" found in
@ -101,9 +106,9 @@ device. All fields are optional.
};
The "pxa2xx_spi_chip.tx_threshold" and "pxa2xx_spi_chip.rx_threshold" fields are
used to configure the SSP hardware fifo. These fields are critical to the
used to configure the SSP hardware FIFO. These fields are critical to the
performance of pxa2xx_spi driver and misconfiguration will result in rx
fifo overruns (especially in PIO mode transfers). Good default values are::
FIFO overruns (especially in PIO mode transfers). Good default values are::
.tx_threshold = 8,
.rx_threshold = 8,
@ -118,7 +123,7 @@ use a value of 8. The driver will determine a reasonable default if
dma_burst_size == 0.
The "pxa2xx_spi_chip.timeout" fields is used to efficiently handle
trailing bytes in the SSP receiver fifo. The correct value for this field is
trailing bytes in the SSP receiver FIFO. The correct value for this field is
dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific
slave device. Please note that the PXA2xx SSP 1 does not support trailing byte
timeouts and must busy-wait any trailing bytes.
@ -131,19 +136,19 @@ testing.
The "pxa2xx_spi_chip.cs_control" field is used to point to a board specific
function for asserting/deasserting a slave device chip select. If the field is
NULL, the pxa2xx_spi master controller driver assumes that the SSP port is
configured to use SSPFRM instead.
configured to use GPIO or SSPFRM instead.
NOTE: the SPI driver cannot control the chip select if SSPFRM is used, so the
chipselect is dropped after each spi_transfer. Most devices need chip select
asserted around the complete message. Use SSPFRM as a GPIO (through cs_control)
asserted around the complete message. Use SSPFRM as a GPIO (through a descriptor)
to accommodate these chips.
NSSP SLAVE SAMPLE
-----------------
The pxa2xx_spi_chip structure is passed to the pxa2xx_spi driver in the
"spi_board_info.controller_data" field. Below is a sample configuration using
the PXA255 NSSP.
For a legacy platform or in some other cases, the pxa2xx_spi_chip structure
is passed to the pxa2xx_spi driver in the "spi_board_info.controller_data"
field. Below is a sample configuration using the PXA255 NSSP.
::
@ -212,7 +217,9 @@ DMA and PIO I/O Support
-----------------------
The pxa2xx_spi driver supports both DMA and interrupt driven PIO message
transfers. The driver defaults to PIO mode and DMA transfers must be enabled
by setting the "enable_dma" flag in the "pxa2xx_spi_controller" structure. The DMA
by setting the "enable_dma" flag in the "pxa2xx_spi_controller" structure.
For the newer platforms, that are known to support DMA, the driver will enable
it automatically and try it first with a possible fallback to PIO. The DMA
mode supports both coherent and stream based DMA mappings.
The following logic is used to determine the type of I/O to be used on
@ -236,5 +243,4 @@ a per "spi_transfer" basis::
THANKS TO
---------
David Brownell and others for mentoring the development of this driver.

View File

@ -473,20 +473,26 @@ static int spinand_erase_op(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->spimem, &op);
}
static int spinand_wait(struct spinand_device *spinand, u8 *s)
static int spinand_wait(struct spinand_device *spinand,
unsigned long initial_delay_us,
unsigned long poll_delay_us,
u8 *s)
{
unsigned long timeo = jiffies + msecs_to_jiffies(400);
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
spinand->scratchbuf);
u8 status;
int ret;
do {
ret = spinand_read_status(spinand, &status);
if (ret)
return ret;
ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
initial_delay_us,
poll_delay_us,
SPINAND_WAITRDY_TIMEOUT_MS);
if (ret)
return ret;
if (!(status & STATUS_BUSY))
goto out;
} while (time_before(jiffies, timeo));
status = *spinand->scratchbuf;
if (!(status & STATUS_BUSY))
goto out;
/*
* Extra read, just in case the STATUS_READY bit has changed
@ -526,7 +532,10 @@ static int spinand_reset_op(struct spinand_device *spinand)
if (ret)
return ret;
return spinand_wait(spinand, NULL);
return spinand_wait(spinand,
SPINAND_RESET_INITIAL_DELAY_US,
SPINAND_RESET_POLL_DELAY_US,
NULL);
}
static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
@ -549,7 +558,10 @@ static int spinand_read_page(struct spinand_device *spinand,
if (ret)
return ret;
ret = spinand_wait(spinand, &status);
ret = spinand_wait(spinand,
SPINAND_READ_INITIAL_DELAY_US,
SPINAND_READ_POLL_DELAY_US,
&status);
if (ret < 0)
return ret;
@ -585,7 +597,10 @@ static int spinand_write_page(struct spinand_device *spinand,
if (ret)
return ret;
ret = spinand_wait(spinand, &status);
ret = spinand_wait(spinand,
SPINAND_WRITE_INITIAL_DELAY_US,
SPINAND_WRITE_POLL_DELAY_US,
&status);
if (!ret && (status & STATUS_PROG_FAILED))
return -EIO;
@ -768,7 +783,11 @@ static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
ret = spinand_wait(spinand, &status);
ret = spinand_wait(spinand,
SPINAND_ERASE_INITIAL_DELAY_US,
SPINAND_ERASE_POLL_DELAY_US,
&status);
if (!ret && (status & STATUS_ERASE_FAILED))
ret = -EIO;

View File

@ -806,6 +806,7 @@ config SPI_STM32_QSPI
tristate "STMicroelectronics STM32 QUAD SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
depends on OF
depends on SPI_MEM
help
This enables support for the Quad SPI controller in master mode.
This driver does not support generic SPI. The implementation only

View File

@ -148,10 +148,8 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
base = devm_ioremap_resource(dev, &dfl_dev->mmio_res);
if (IS_ERR(base)) {
dev_err(dev, "%s get mem resource fail!\n", __func__);
if (IS_ERR(base))
return PTR_ERR(base);
}
config_spi_master(base, master);
dev_dbg(dev, "%s cs %u bpm 0x%x mode 0x%x\n", __func__,

View File

@ -19,7 +19,6 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_data/spi-ath79.h>
#define DRV_NAME "ath79-spi"
@ -138,7 +137,6 @@ static int ath79_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct ath79_spi *sp;
struct ath79_spi_platform_data *pdata;
unsigned long rate;
int ret;
@ -152,15 +150,10 @@ static int ath79_spi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, sp);
pdata = dev_get_platdata(&pdev->dev);
master->use_gpio_descriptors = true;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->flags = SPI_MASTER_GPIO_SS;
if (pdata) {
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
}
master->num_chipselect = 3;
sp->bitbang.master = master;
sp->bitbang.chipselect = ath79_spi_chipselect;

View File

@ -700,7 +700,6 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
struct spi_transfer *xfer,
u32 *plen)
__must_hold(&as->lock)
{
struct atmel_spi *as = spi_master_get_devdata(master);
struct dma_chan *rxchan = master->dma_rx;
@ -716,8 +715,6 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
if (!rxchan || !txchan)
return -ENODEV;
/* release lock for DMA operations */
atmel_spi_unlock(as);
*plen = xfer->len;
@ -786,15 +783,12 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
rxchan->device->device_issue_pending(rxchan);
txchan->device->device_issue_pending(txchan);
/* take back lock */
atmel_spi_lock(as);
return 0;
err_dma:
spi_writel(as, IDR, SPI_BIT(OVRES));
atmel_spi_stop_dma(master);
err_exit:
atmel_spi_lock(as);
return -ENOMEM;
}
@ -863,7 +857,6 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
* lock is held, spi irq is blocked
*/
static void atmel_spi_pdc_next_xfer(struct spi_master *master,
struct spi_message *msg,
struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_master_get_devdata(master);
@ -879,12 +872,12 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
spi_writel(as, RPR, rx_dma);
spi_writel(as, TPR, tx_dma);
if (msg->spi->bits_per_word > 8)
if (xfer->bits_per_word > 8)
len >>= 1;
spi_writel(as, RCR, len);
spi_writel(as, TCR, len);
dev_dbg(&msg->spi->dev,
dev_dbg(&master->dev,
" start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
@ -898,12 +891,12 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
spi_writel(as, RNPR, rx_dma);
spi_writel(as, TNPR, tx_dma);
if (msg->spi->bits_per_word > 8)
if (xfer->bits_per_word > 8)
len >>= 1;
spi_writel(as, RNCR, len);
spi_writel(as, TNCR, len);
dev_dbg(&msg->spi->dev,
dev_dbg(&master->dev,
" next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
@ -1054,8 +1047,6 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
/* Interrupt
*
* No need for locking in this Interrupt handler: done_status is the
* only information modified.
*/
static irqreturn_t
atmel_spi_pio_interrupt(int irq, void *dev_id)
@ -1273,12 +1264,28 @@ static int atmel_spi_setup(struct spi_device *spi)
return 0;
}
static void atmel_spi_set_cs(struct spi_device *spi, bool enable)
{
struct atmel_spi *as = spi_master_get_devdata(spi->master);
/* the core doesn't really pass us enable/disable, but CS HIGH vs CS LOW
* since we already have routines for activate/deactivate translate
* high/low to active/inactive
*/
enable = (!!(spi->mode & SPI_CS_HIGH) == enable);
if (enable) {
cs_activate(as, spi);
} else {
cs_deactivate(as, spi);
}
}
static int atmel_spi_one_transfer(struct spi_master *master,
struct spi_message *msg,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct atmel_spi *as;
struct spi_device *spi = msg->spi;
u8 bits;
u32 len;
struct atmel_spi_device *asd;
@ -1288,11 +1295,6 @@ static int atmel_spi_one_transfer(struct spi_master *master,
as = spi_master_get_devdata(master);
if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
dev_dbg(&spi->dev, "missing rx or tx buf\n");
return -EINVAL;
}
asd = spi->controller_state;
bits = (asd->csr >> 4) & 0xf;
if (bits != xfer->bits_per_word - 8) {
@ -1305,13 +1307,13 @@ static int atmel_spi_one_transfer(struct spi_master *master,
* DMA map early, for performance (empties dcache ASAP) and
* better fault reporting.
*/
if ((!msg->is_dma_mapped)
if ((!master->cur_msg_mapped)
&& as->use_pdc) {
if (atmel_spi_dma_map_xfer(as, xfer) < 0)
return -ENOMEM;
}
atmel_spi_set_xfer_speed(as, msg->spi, xfer);
atmel_spi_set_xfer_speed(as, spi, xfer);
as->done_status = 0;
as->current_transfer = xfer;
@ -1320,7 +1322,9 @@ static int atmel_spi_one_transfer(struct spi_master *master,
reinit_completion(&as->xfer_completion);
if (as->use_pdc) {
atmel_spi_pdc_next_xfer(master, msg, xfer);
atmel_spi_lock(as);
atmel_spi_pdc_next_xfer(master, xfer);
atmel_spi_unlock(as);
} else if (atmel_spi_use_dma(as, xfer)) {
len = as->current_remaining_bytes;
ret = atmel_spi_next_xfer_dma_submit(master,
@ -1328,21 +1332,21 @@ static int atmel_spi_one_transfer(struct spi_master *master,
if (ret) {
dev_err(&spi->dev,
"unable to use DMA, fallback to PIO\n");
atmel_spi_next_xfer_pio(master, xfer);
as->done_status = ret;
break;
} else {
as->current_remaining_bytes -= len;
if (as->current_remaining_bytes < 0)
as->current_remaining_bytes = 0;
}
} else {
atmel_spi_lock(as);
atmel_spi_next_xfer_pio(master, xfer);
atmel_spi_unlock(as);
}
/* interrupts are disabled, so free the lock for schedule */
atmel_spi_unlock(as);
dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
SPI_DMA_TIMEOUT);
atmel_spi_lock(as);
if (WARN_ON(dma_timeout == 0)) {
dev_err(&spi->dev, "spi transfer timeout\n");
as->done_status = -EIO;
@ -1381,90 +1385,16 @@ static int atmel_spi_one_transfer(struct spi_master *master,
} else if (atmel_spi_use_dma(as, xfer)) {
atmel_spi_stop_dma(master);
}
if (!msg->is_dma_mapped
&& as->use_pdc)
atmel_spi_dma_unmap_xfer(master, xfer);
return 0;
} else {
/* only update length if no error */
msg->actual_length += xfer->len;
}
if (!msg->is_dma_mapped
if (!master->cur_msg_mapped
&& as->use_pdc)
atmel_spi_dma_unmap_xfer(master, xfer);
spi_transfer_delay_exec(xfer);
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
&msg->transfers)) {
as->keep_cs = true;
} else {
cs_deactivate(as, msg->spi);
udelay(10);
cs_activate(as, msg->spi);
}
}
return 0;
}
static int atmel_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct atmel_spi *as;
struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
int ret = 0;
as = spi_master_get_devdata(master);
dev_dbg(&spi->dev, "new message %p submitted for %s\n",
msg, dev_name(&spi->dev));
atmel_spi_lock(as);
cs_activate(as, spi);
as->keep_cs = false;
msg->status = 0;
msg->actual_length = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
trace_spi_transfer_start(msg, xfer);
ret = atmel_spi_one_transfer(master, msg, xfer);
if (ret)
goto msg_done;
trace_spi_transfer_stop(msg, xfer);
}
if (as->use_pdc)
atmel_spi_disable_pdc_transfer(as);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
dev_dbg(&spi->dev,
" xfer %p: len %u tx %p/%pad rx %p/%pad\n",
xfer, xfer->len,
xfer->tx_buf, &xfer->tx_dma,
xfer->rx_buf, &xfer->rx_dma);
}
msg_done:
if (!as->keep_cs)
cs_deactivate(as, msg->spi);
atmel_spi_unlock(as);
msg->status = as->done_status;
spi_finalize_current_message(spi->master);
return ret;
return as->done_status;
}
static void atmel_spi_cleanup(struct spi_device *spi)
@ -1554,7 +1484,8 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->num_chipselect = 4;
master->setup = atmel_spi_setup;
master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
master->transfer_one_message = atmel_spi_transfer_one_message;
master->transfer_one = atmel_spi_one_transfer;
master->set_cs = atmel_spi_set_cs;
master->cleanup = atmel_spi_cleanup;
master->auto_runtime_pm = true;
master->max_dma_len = SPI_MAX_DMA_XFER;

View File

@ -68,7 +68,6 @@
#define BCM2835_SPI_FIFO_SIZE 64
#define BCM2835_SPI_FIFO_SIZE_3_4 48
#define BCM2835_SPI_DMA_MIN_LENGTH 96
#define BCM2835_SPI_NUM_CS 24 /* raise as necessary */
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
@ -96,8 +95,6 @@ MODULE_PARM_DESC(polling_limit_us,
* @rx_prologue: bytes received without DMA if first RX sglist entry's
* length is not a multiple of 4 (to overcome hardware limitation)
* @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
* @prepare_cs: precalculated CS register value for ->prepare_message()
* (uses slave-specific clock polarity and phase settings)
* @debugfs_dir: the debugfs directory - neede to remove debugfs when
* unloading the module
* @count_transfer_polling: count of how often polling mode is used
@ -107,7 +104,7 @@ MODULE_PARM_DESC(polling_limit_us,
* These are counted as well in @count_transfer_polling and
* @count_transfer_irq
* @count_transfer_dma: count how often dma mode is used
* @chip_select: SPI slave currently selected
* @slv: SPI slave currently selected
* (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs)
* @tx_dma_active: whether a TX DMA descriptor is in progress
* @rx_dma_active: whether a RX DMA descriptor is in progress
@ -115,11 +112,6 @@ MODULE_PARM_DESC(polling_limit_us,
* @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
* (cyclically copies from zero page to TX FIFO)
* @fill_tx_addr: bus address of zero page
* @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
* (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
* @clear_rx_addr: bus address of @clear_rx_cs
* @clear_rx_cs: precalculated CS register value to clear RX FIFO
* (uses slave-specific clock polarity and phase settings)
*/
struct bcm2835_spi {
void __iomem *regs;
@ -134,7 +126,6 @@ struct bcm2835_spi {
int tx_prologue;
int rx_prologue;
unsigned int tx_spillover;
u32 prepare_cs[BCM2835_SPI_NUM_CS];
struct dentry *debugfs_dir;
u64 count_transfer_polling;
@ -142,14 +133,28 @@ struct bcm2835_spi {
u64 count_transfer_irq_after_polling;
u64 count_transfer_dma;
u8 chip_select;
struct bcm2835_spidev *slv;
unsigned int tx_dma_active;
unsigned int rx_dma_active;
struct dma_async_tx_descriptor *fill_tx_desc;
dma_addr_t fill_tx_addr;
struct dma_async_tx_descriptor *clear_rx_desc[BCM2835_SPI_NUM_CS];
};
/**
* struct bcm2835_spidev - BCM2835 SPI slave
* @prepare_cs: precalculated CS register value for ->prepare_message()
* (uses slave-specific clock polarity and phase settings)
* @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
* (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
* @clear_rx_addr: bus address of @clear_rx_cs
* @clear_rx_cs: precalculated CS register value to clear RX FIFO
* (uses slave-specific clock polarity and phase settings)
*/
struct bcm2835_spidev {
u32 prepare_cs;
struct dma_async_tx_descriptor *clear_rx_desc;
dma_addr_t clear_rx_addr;
u32 clear_rx_cs[BCM2835_SPI_NUM_CS] ____cacheline_aligned;
u32 clear_rx_cs ____cacheline_aligned;
};
#if defined(CONFIG_DEBUG_FS)
@ -624,8 +629,7 @@ static void bcm2835_spi_dma_tx_done(void *data)
/* busy-wait for TX FIFO to empty */
while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
bcm2835_wr(bs, BCM2835_SPI_CS,
bs->clear_rx_cs[bs->chip_select]);
bcm2835_wr(bs, BCM2835_SPI_CS, bs->slv->clear_rx_cs);
bs->tx_dma_active = false;
smp_wmb();
@ -646,18 +650,18 @@ static void bcm2835_spi_dma_tx_done(void *data)
/**
* bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
* @ctlr: SPI master controller
* @spi: SPI slave
* @tfr: SPI transfer
* @bs: BCM2835 SPI controller
* @slv: BCM2835 SPI slave
* @is_tx: whether to submit DMA descriptor for TX or RX sglist
*
* Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
* Return 0 on success or a negative error number.
*/
static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr,
struct bcm2835_spi *bs,
struct bcm2835_spidev *slv,
bool is_tx)
{
struct dma_chan *chan;
@ -697,7 +701,7 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
} else if (!tfr->rx_buf) {
desc->callback = bcm2835_spi_dma_tx_done;
desc->callback_param = ctlr;
bs->chip_select = spi->chip_select;
bs->slv = slv;
}
/* submit it to DMA-engine */
@ -709,8 +713,8 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
/**
* bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
* @ctlr: SPI master controller
* @spi: SPI slave
* @tfr: SPI transfer
* @slv: BCM2835 SPI slave
* @cs: CS register
*
* For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
@ -754,8 +758,8 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
* performed at the end of an RX-only transfer.
*/
static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr,
struct bcm2835_spidev *slv,
u32 cs)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
@ -773,7 +777,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
/* setup tx-DMA */
if (bs->tx_buf) {
ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, true);
ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, true);
} else {
cookie = dmaengine_submit(bs->fill_tx_desc);
ret = dma_submit_error(cookie);
@ -799,9 +803,9 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
* this saves 10us or more.
*/
if (bs->rx_buf) {
ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, false);
ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, false);
} else {
cookie = dmaengine_submit(bs->clear_rx_desc[spi->chip_select]);
cookie = dmaengine_submit(slv->clear_rx_desc);
ret = dma_submit_error(cookie);
}
if (ret) {
@ -850,8 +854,6 @@ static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
static void bcm2835_dma_release(struct spi_controller *ctlr,
struct bcm2835_spi *bs)
{
int i;
if (ctlr->dma_tx) {
dmaengine_terminate_sync(ctlr->dma_tx);
@ -870,17 +872,6 @@ static void bcm2835_dma_release(struct spi_controller *ctlr,
if (ctlr->dma_rx) {
dmaengine_terminate_sync(ctlr->dma_rx);
for (i = 0; i < BCM2835_SPI_NUM_CS; i++)
if (bs->clear_rx_desc[i])
dmaengine_desc_free(bs->clear_rx_desc[i]);
if (bs->clear_rx_addr)
dma_unmap_single(ctlr->dma_rx->device->dev,
bs->clear_rx_addr,
sizeof(bs->clear_rx_cs),
DMA_TO_DEVICE);
dma_release_channel(ctlr->dma_rx);
ctlr->dma_rx = NULL;
}
@ -892,7 +883,7 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
struct dma_slave_config slave_config;
const __be32 *addr;
dma_addr_t dma_reg_base;
int ret, i;
int ret;
/* base address in dma-space */
addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
@ -972,35 +963,6 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
if (ret)
goto err_config;
bs->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
bs->clear_rx_cs,
sizeof(bs->clear_rx_cs),
DMA_TO_DEVICE);
if (dma_mapping_error(ctlr->dma_rx->device->dev, bs->clear_rx_addr)) {
dev_err(dev, "cannot map clear_rx_cs - not using DMA mode\n");
bs->clear_rx_addr = 0;
ret = -ENOMEM;
goto err_release;
}
for (i = 0; i < BCM2835_SPI_NUM_CS; i++) {
bs->clear_rx_desc[i] = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
bs->clear_rx_addr + i * sizeof(u32),
sizeof(u32), 0,
DMA_MEM_TO_DEV, 0);
if (!bs->clear_rx_desc[i]) {
dev_err(dev, "cannot prepare clear_rx_desc - not using DMA mode\n");
ret = -ENOMEM;
goto err_release;
}
ret = dmaengine_desc_set_reuse(bs->clear_rx_desc[i]);
if (ret) {
dev_err(dev, "cannot reuse clear_rx_desc - not using DMA mode\n");
goto err_release;
}
}
/* all went well, so set can_dma */
ctlr->can_dma = bcm2835_spi_can_dma;
@ -1082,9 +1044,10 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
struct spi_transfer *tfr)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct bcm2835_spidev *slv = spi_get_ctldata(spi);
unsigned long spi_hz, clk_hz, cdiv;
unsigned long hz_per_byte, byte_limit;
u32 cs = bs->prepare_cs[spi->chip_select];
u32 cs = slv->prepare_cs;
/* set clock */
spi_hz = tfr->speed_hz;
@ -1133,7 +1096,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
* this 1 idle clock cycle pattern but runs the spi clock without gaps
*/
if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
return bcm2835_spi_transfer_one_dma(ctlr, spi, tfr, cs);
return bcm2835_spi_transfer_one_dma(ctlr, tfr, slv, cs);
/* run in interrupt-mode */
return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
@ -1144,6 +1107,7 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
{
struct spi_device *spi = msg->spi;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct bcm2835_spidev *slv = spi_get_ctldata(spi);
int ret;
if (ctlr->can_dma) {
@ -1162,7 +1126,7 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
* Set up clock polarity before spi_transfer_one_message() asserts
* chip select to avoid a gratuitous clock signal edge.
*/
bcm2835_wr(bs, BCM2835_SPI_CS, bs->prepare_cs[spi->chip_select]);
bcm2835_wr(bs, BCM2835_SPI_CS, slv->prepare_cs);
return 0;
}
@ -1188,17 +1152,81 @@ static int chip_match_name(struct gpio_chip *chip, void *data)
return !strcmp(chip->label, data);
}
static void bcm2835_spi_cleanup(struct spi_device *spi)
{
struct bcm2835_spidev *slv = spi_get_ctldata(spi);
struct spi_controller *ctlr = spi->controller;
if (slv->clear_rx_desc)
dmaengine_desc_free(slv->clear_rx_desc);
if (slv->clear_rx_addr)
dma_unmap_single(ctlr->dma_rx->device->dev,
slv->clear_rx_addr,
sizeof(u32),
DMA_TO_DEVICE);
kfree(slv);
}
static int bcm2835_spi_setup_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct bcm2835_spi *bs,
struct bcm2835_spidev *slv)
{
int ret;
if (!ctlr->dma_rx)
return 0;
slv->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
&slv->clear_rx_cs,
sizeof(u32),
DMA_TO_DEVICE);
if (dma_mapping_error(ctlr->dma_rx->device->dev, slv->clear_rx_addr)) {
dev_err(&spi->dev, "cannot map clear_rx_cs\n");
slv->clear_rx_addr = 0;
return -ENOMEM;
}
slv->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
slv->clear_rx_addr,
sizeof(u32), 0,
DMA_MEM_TO_DEV, 0);
if (!slv->clear_rx_desc) {
dev_err(&spi->dev, "cannot prepare clear_rx_desc\n");
return -ENOMEM;
}
ret = dmaengine_desc_set_reuse(slv->clear_rx_desc);
if (ret) {
dev_err(&spi->dev, "cannot reuse clear_rx_desc\n");
return ret;
}
return 0;
}
static int bcm2835_spi_setup(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct bcm2835_spidev *slv = spi_get_ctldata(spi);
struct gpio_chip *chip;
int ret;
u32 cs;
if (spi->chip_select >= BCM2835_SPI_NUM_CS) {
dev_err(&spi->dev, "only %d chip-selects supported\n",
BCM2835_SPI_NUM_CS - 1);
return -EINVAL;
if (!slv) {
slv = kzalloc(ALIGN(sizeof(*slv), dma_get_cache_alignment()),
GFP_KERNEL);
if (!slv)
return -ENOMEM;
spi_set_ctldata(spi, slv);
ret = bcm2835_spi_setup_dma(ctlr, spi, bs, slv);
if (ret)
goto err_cleanup;
}
/*
@ -1212,20 +1240,19 @@ static int bcm2835_spi_setup(struct spi_device *spi)
cs |= BCM2835_SPI_CS_CPOL;
if (spi->mode & SPI_CPHA)
cs |= BCM2835_SPI_CS_CPHA;
bs->prepare_cs[spi->chip_select] = cs;
slv->prepare_cs = cs;
/*
* Precalculate SPI slave's CS register value to clear RX FIFO
* in case of a TX-only DMA transfer.
*/
if (ctlr->dma_rx) {
bs->clear_rx_cs[spi->chip_select] = cs |
BCM2835_SPI_CS_TA |
BCM2835_SPI_CS_DMAEN |
BCM2835_SPI_CS_CLEAR_RX;
slv->clear_rx_cs = cs | BCM2835_SPI_CS_TA |
BCM2835_SPI_CS_DMAEN |
BCM2835_SPI_CS_CLEAR_RX;
dma_sync_single_for_device(ctlr->dma_rx->device->dev,
bs->clear_rx_addr,
sizeof(bs->clear_rx_cs),
slv->clear_rx_addr,
sizeof(u32),
DMA_TO_DEVICE);
}
@ -1247,7 +1274,8 @@ static int bcm2835_spi_setup(struct spi_device *spi)
*/
dev_err(&spi->dev,
"setup: only two native chip-selects are supported\n");
return -EINVAL;
ret = -EINVAL;
goto err_cleanup;
}
/*
@ -1268,14 +1296,20 @@ static int bcm2835_spi_setup(struct spi_device *spi)
DRV_NAME,
GPIO_LOOKUP_FLAGS_DEFAULT,
GPIOD_OUT_LOW);
if (IS_ERR(spi->cs_gpiod))
return PTR_ERR(spi->cs_gpiod);
if (IS_ERR(spi->cs_gpiod)) {
ret = PTR_ERR(spi->cs_gpiod);
goto err_cleanup;
}
/* and set up the "mode" and level */
dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n",
spi->chip_select);
return 0;
err_cleanup:
bcm2835_spi_cleanup(spi);
return ret;
}
static int bcm2835_spi_probe(struct platform_device *pdev)
@ -1284,8 +1318,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
struct bcm2835_spi *bs;
int err;
ctlr = devm_spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
dma_get_cache_alignment()));
ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
if (!ctlr)
return -ENOMEM;
@ -1296,6 +1329,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->num_chipselect = 3;
ctlr->setup = bcm2835_spi_setup;
ctlr->cleanup = bcm2835_spi_cleanup;
ctlr->transfer_one = bcm2835_spi_transfer_one;
ctlr->handle_err = bcm2835_spi_handle_err;
ctlr->prepare_message = bcm2835_spi_prepare_message;

View File

@ -384,7 +384,7 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
bs->pending = 0;
/* Calculate the estimated time in us the transfer runs. Note that
* there are are 2 idle clocks cycles after each chunk getting
* there are 2 idle clocks cycles after each chunk getting
* transferred - in our case the chunk size is 3 bytes, so we
* approximate this by 9 cycles/byte. This is used to find the number
* of Hz per byte per polling limit. E.g., we can transfer 1 byte in

View File

@ -56,7 +56,7 @@ struct dw_spi_mscc {
/*
* The Designware SPI controller (referred to as master in the documentation)
* automatically deasserts chip select when the tx fifo is empty. The chip
* selects then needs to be either driven as GPIOs or, for the first 4 using the
* selects then needs to be either driven as GPIOs or, for the first 4 using
* the SPI boot controller registers. the final chip select is an OR gate
* between the Designware SPI controller and the SPI boot controller.
*/

View File

@ -639,8 +639,8 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
complete(&mas->abort_done);
/*
* It's safe or a good idea to Ack all of our our interrupts at the
* end of the function. Specifically:
* It's safe or a good idea to Ack all of our interrupts at the end
* of the function. Specifically:
* - M_CMD_DONE_EN / M_RX_FIFO_LAST_EN: Edge triggered interrupts and
* clearing Acks. Clearing at the end relies on nobody else having
* started a new transfer yet or else we could be clearing _their_

View File

@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@ -126,6 +127,7 @@ struct hisi_spi {
void __iomem *regs;
int irq;
u32 fifo_len; /* depth of the FIFO buffer */
u16 bus_num;
/* Current message transfer state info */
const void *tx;
@ -133,8 +135,49 @@ struct hisi_spi {
void *rx;
unsigned int rx_len;
u8 n_bytes; /* current is a 1/2/4 bytes op */
struct dentry *debugfs;
struct debugfs_regset32 regset;
};
#define HISI_SPI_DBGFS_REG(_name, _off) \
{ \
.name = _name, \
.offset = _off, \
}
static const struct debugfs_reg32 hisi_spi_regs[] = {
HISI_SPI_DBGFS_REG("CSCR", HISI_SPI_CSCR),
HISI_SPI_DBGFS_REG("CR", HISI_SPI_CR),
HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR),
HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC),
HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR),
HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN),
HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT),
HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR),
HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR),
HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR),
HISI_SPI_DBGFS_REG("ICR", HISI_SPI_ICR),
HISI_SPI_DBGFS_REG("VERSION", HISI_SPI_VERSION),
};
static int hisi_spi_debugfs_init(struct hisi_spi *hs)
{
char name[32];
snprintf(name, 32, "hisi_spi%d", hs->bus_num);
hs->debugfs = debugfs_create_dir(name, NULL);
if (!hs->debugfs)
return -ENOMEM;
hs->regset.regs = hisi_spi_regs;
hs->regset.nregs = ARRAY_SIZE(hisi_spi_regs);
hs->regset.base = hs->regs;
debugfs_create_regset32("registers", 0400, hs->debugfs, &hs->regset);
return 0;
}
static u32 hisi_spi_busy(struct hisi_spi *hs)
{
return readl(hs->regs + HISI_SPI_SR) & SR_BUSY;
@ -424,6 +467,7 @@ static int hisi_spi_probe(struct platform_device *pdev)
hs = spi_controller_get_devdata(master);
hs->dev = dev;
hs->irq = irq;
hs->bus_num = pdev->id;
hs->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hs->regs))
@ -446,7 +490,7 @@ static int hisi_spi_probe(struct platform_device *pdev)
master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->bus_num = pdev->id;
master->bus_num = hs->bus_num;
master->setup = hisi_spi_setup;
master->cleanup = hisi_spi_cleanup;
master->transfer_one = hisi_spi_transfer_one;
@ -462,6 +506,9 @@ static int hisi_spi_probe(struct platform_device *pdev)
return ret;
}
if (hisi_spi_debugfs_init(hs))
dev_info(dev, "failed to create debugfs dir\n");
ret = spi_register_controller(master);
if (ret) {
dev_err(dev, "failed to register spi master, ret=%d\n", ret);
@ -478,7 +525,9 @@ static int hisi_spi_probe(struct platform_device *pdev)
static int hisi_spi_remove(struct platform_device *pdev)
{
struct spi_controller *master = platform_get_drvdata(pdev);
struct hisi_spi *hs = spi_controller_get_devdata(master);
debugfs_remove_recursive(hs->debugfs);
spi_unregister_controller(master);
return 0;

View File

@ -202,7 +202,7 @@ static void spi_lm70llp_attach(struct parport *p)
* the lm70 driver could verify it, reading the manf ID.
*/
master = spi_alloc_master(p->physport->dev, sizeof *pp);
master = spi_alloc_master(p->physport->dev, sizeof(*pp));
if (!master) {
status = -ENOMEM;
goto out_fail;

View File

@ -875,7 +875,7 @@ static int spi_test_run_iter(struct spi_device *spi,
test.transfers[i].len = len;
if (test.transfers[i].tx_buf)
test.transfers[i].tx_buf += tx_off;
if (test.transfers[i].tx_buf)
if (test.transfers[i].rx_buf)
test.transfers[i].rx_buf += rx_off;
}

View File

@ -6,6 +6,7 @@
* Author: Boris Brezillon <boris.brezillon@bootlin.com>
*/
#include <linux/dmaengine.h>
#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
@ -743,6 +744,91 @@ static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
return container_of(drv, struct spi_mem_driver, spidrv.driver);
}
static int spi_mem_read_status(struct spi_mem *mem,
const struct spi_mem_op *op,
u16 *status)
{
const u8 *bytes = (u8 *)op->data.buf.in;
int ret;
ret = spi_mem_exec_op(mem, op);
if (ret)
return ret;
if (op->data.nbytes > 1)
*status = ((u16)bytes[0] << 8) | bytes[1];
else
*status = bytes[0];
return 0;
}
/**
* spi_mem_poll_status() - Poll memory device status
* @mem: SPI memory device
* @op: the memory operation to execute
* @mask: status bitmask to ckeck
* @match: (status & mask) expected value
* @initial_delay_us: delay in us before starting to poll
* @polling_delay_us: time to sleep between reads in us
* @timeout_ms: timeout in milliseconds
*
* This function polls a status register and returns when
* (status & mask) == match or when the timeout has expired.
*
* Return: 0 in case of success, -ETIMEDOUT in case of error,
* -EOPNOTSUPP if not supported.
*/
int spi_mem_poll_status(struct spi_mem *mem,
const struct spi_mem_op *op,
u16 mask, u16 match,
unsigned long initial_delay_us,
unsigned long polling_delay_us,
u16 timeout_ms)
{
struct spi_controller *ctlr = mem->spi->controller;
int ret = -EOPNOTSUPP;
int read_status_ret;
u16 status;
if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
op->data.dir != SPI_MEM_DATA_IN)
return -EINVAL;
if (ctlr->mem_ops && ctlr->mem_ops->poll_status) {
ret = spi_mem_access_start(mem);
if (ret)
return ret;
ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
initial_delay_us, polling_delay_us,
timeout_ms);
spi_mem_access_end(mem);
}
if (ret == -EOPNOTSUPP) {
if (!spi_mem_supports_op(mem, op))
return ret;
if (initial_delay_us < 10)
udelay(initial_delay_us);
else
usleep_range((initial_delay_us >> 2) + 1,
initial_delay_us);
ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
(read_status_ret || ((status) & mask) == match),
polling_delay_us, timeout_ms * 1000, false, mem,
op, &status);
if (read_status_ret)
return read_status_ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(spi_mem_poll_status);
static int spi_mem_probe(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
@ -810,7 +896,7 @@ int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
/**
* spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
* spi_mem_driver_unregister() - Unregister a SPI memory driver
* @memdrv: the SPI memory driver to unregister
*
* Unregisters a SPI memory driver.

View File

@ -725,7 +725,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
ret = clk_prepare_enable(spicc->pclk);
if (ret) {
dev_err(&pdev->dev, "pclk clock enable failed\n");
goto out_master;
goto out_core_clk;
}
device_reset_optional(&pdev->dev);
@ -752,7 +752,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
ret = meson_spicc_clk_init(spicc);
if (ret) {
dev_err(&pdev->dev, "clock registration failed\n");
goto out_master;
goto out_clk;
}
ret = devm_spi_register_master(&pdev->dev, master);
@ -764,9 +764,11 @@ static int meson_spicc_probe(struct platform_device *pdev)
return 0;
out_clk:
clk_disable_unprepare(spicc->core);
clk_disable_unprepare(spicc->pclk);
out_core_clk:
clk_disable_unprepare(spicc->core);
out_master:
spi_master_put(master);

View File

@ -369,7 +369,7 @@ static int mpc512x_psc_spi_setup(struct spi_device *spi)
return -EINVAL;
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
@ -491,7 +491,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
void *tempp;
struct clk *clk;
master = spi_alloc_master(dev, sizeof *mps);
master = spi_alloc_master(dev, sizeof(*mps));
if (master == NULL)
return -ENOMEM;

View File

@ -265,7 +265,7 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi)
return -EINVAL;
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
@ -365,7 +365,7 @@ static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
struct spi_master *master;
int ret;
master = spi_alloc_master(dev, sizeof *mps);
master = spi_alloc_master(dev, sizeof(*mps));
if (master == NULL)
return -ENOMEM;

View File

@ -415,7 +415,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
}
dev_dbg(&op->dev, "allocating spi_master struct\n");
master = spi_alloc_master(&op->dev, sizeof *ms);
master = spi_alloc_master(&op->dev, sizeof(*ms));
if (!master) {
rc = -ENOMEM;
goto err_alloc;

View File

@ -105,7 +105,7 @@ static void npcm_pspi_set_mode(struct spi_device *spi)
u16 regtemp;
u16 mode_val;
switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
mode_val = 0;
break;

View File

@ -86,7 +86,7 @@ static int tiny_spi_setup(struct spi_device *spi)
hw->speed_hz = spi->max_speed_hz;
hw->baud = tiny_spi_baud(spi, hw->speed_hz);
}
hw->mode = spi->mode & (SPI_CPOL | SPI_CPHA);
hw->mode = spi->mode & SPI_MODE_X_MASK;
return 0;
}

View File

@ -6,7 +6,7 @@
*
* Copyright (C) 2005, 2006 Nokia Corporation
* Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
* Juha Yrj<EFBFBD>l<EFBFBD> <juha.yrjola@nokia.com>
* Juha Yrjola <juha.yrjola@nokia.com>
*/
#include <linux/kernel.h>
#include <linux/init.h>
@ -241,7 +241,7 @@ static int omap1_spi100k_setup_transfer(struct spi_device *spi,
else
word_len = spi->bits_per_word;
if (spi->bits_per_word > 32)
if (word_len > 32)
return -EINVAL;
cs->word_len = word_len;
@ -296,7 +296,6 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master,
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
status = -EINVAL;
break;
}
status = omap1_spi100k_setup_transfer(spi, t);
@ -315,7 +314,6 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master,
m->actual_length += count;
if (count != t->len) {
status = -EIO;
break;
}
}

View File

@ -330,7 +330,7 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if (spi->mode & SPI_CPOL)
flags |= UWIRE_CLK_INVERTED;
switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
case SPI_MODE_3:
flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE;
@ -460,7 +460,7 @@ static int uwire_probe(struct platform_device *pdev)
struct uwire_spi *uwire;
int status;
master = spi_alloc_master(&pdev->dev, sizeof *uwire);
master = spi_alloc_master(&pdev->dev, sizeof(*uwire));
if (!master)
return -ENODEV;

View File

@ -4,7 +4,7 @@
*
* Copyright (C) 2005, 2006 Nokia Corporation
* Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
* Juha Yrj<EFBFBD>l<EFBFBD> <juha.yrjola@nokia.com>
* Juha Yrjola <juha.yrjola@nokia.com>
*/
#include <linux/kernel.h>
@ -1054,7 +1054,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
struct omap2_mcspi_cs *cs = spi->controller_state;
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->base = mcspi->base + spi->chip_select * 0x14;

View File

@ -288,7 +288,7 @@
#define SPI_POLLING_TIMEOUT 1000
/*
* The type of reading going on on this chip
* The type of reading going on this chip
*/
enum ssp_reading {
READING_NULL,
@ -298,7 +298,7 @@ enum ssp_reading {
};
/*
* The type of writing going on on this chip
* The type of writing going on this chip
*/
enum ssp_writing {
WRITING_NULL,

View File

@ -34,7 +34,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <asm/io.h>
#include <linux/io.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
@ -223,7 +223,7 @@ static int spi_ppc4xx_setup(struct spi_device *spi)
}
if (cs == NULL) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
@ -235,7 +235,7 @@ static int spi_ppc4xx_setup(struct spi_device *spi)
*/
cs->mode = SPI_PPC4XX_MODE_SPE;
switch (spi->mode & (SPI_CPHA | SPI_CPOL)) {
switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
cs->mode |= SPI_CLK_MODE0;
break;
@ -326,7 +326,7 @@ static void spi_ppc4xx_enable(struct ppc4xx_spi *hw)
{
/*
* On all 4xx PPC's the SPI bus is shared/multiplexed with
* the 2nd I2C bus. We need to enable the the SPI bus before
* the 2nd I2C bus. We need to enable the SPI bus before
* using it.
*/
@ -349,7 +349,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
int ret;
const unsigned int *clk;
master = spi_alloc_master(dev, sizeof *hw);
master = spi_alloc_master(dev, sizeof(*hw));
if (master == NULL)
return -ENOMEM;
master->dev.of_node = np;

View File

@ -2,18 +2,18 @@
/*
* PXA2xx SPI DMA engine support.
*
* Copyright (C) 2013, Intel Corporation
* Copyright (C) 2013, 2021 Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/pxa2xx_ssp.h>
#include <linux/scatterlist.h>
#include <linux/sizes.h>
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/spi/spi.h>
#include "spi-pxa2xx.h"
@ -26,7 +26,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
* It is possible that one CPU is handling ROR interrupt and other
* just gets DMA completion. Calling pump_transfers() twice for the
* same transfer leads to problems thus we prevent concurrent calls
* by using ->dma_running.
* by using dma_running.
*/
if (atomic_dec_and_test(&drv_data->dma_running)) {
/*
@ -34,25 +34,18 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
* might not know about the error yet. So we re-check the
* ROR bit here before we clear the status register.
*/
if (!error) {
u32 status = pxa2xx_spi_read(drv_data, SSSR)
& drv_data->mask_sr;
error = status & SSSR_ROR;
}
if (!error)
error = read_SSSR_bits(drv_data, drv_data->mask_sr) & SSSR_ROR;
/* Clear status & disable interrupts */
pxa2xx_spi_write(drv_data, SSCR1,
pxa2xx_spi_read(drv_data, SSCR1)
& ~drv_data->dma_cr1);
clear_SSCR1_bits(drv_data, drv_data->dma_cr1);
write_SSSR_CS(drv_data, drv_data->clear_sr);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
if (error) {
/* In case we got an error we disable the SSP now */
pxa2xx_spi_write(drv_data, SSCR0,
pxa2xx_spi_read(drv_data, SSCR0)
& ~SSCR0_SSE);
pxa_ssp_disable(drv_data->ssp);
msg->status = -EIO;
}
@ -94,14 +87,14 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
cfg.direction = dir;
if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = drv_data->ssdr_physical;
cfg.dst_addr = drv_data->ssp->phys_base + SSDR;
cfg.dst_addr_width = width;
cfg.dst_maxburst = chip->dma_burst_size;
sgt = &xfer->tx_sg;
chan = drv_data->controller->dma_tx;
} else {
cfg.src_addr = drv_data->ssdr_physical;
cfg.src_addr = drv_data->ssp->phys_base + SSDR;
cfg.src_addr_width = width;
cfg.src_maxburst = chip->dma_burst_size;
@ -111,7 +104,7 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
dev_warn(drv_data->ssp->dev, "DMA slave config failed\n");
return NULL;
}
@ -123,9 +116,9 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
{
u32 status;
status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
status = read_SSSR_bits(drv_data, drv_data->mask_sr);
if (status & SSSR_ROR) {
dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
dev_err(drv_data->ssp->dev, "FIFO overrun\n");
dmaengine_terminate_async(drv_data->controller->dma_rx);
dmaengine_terminate_async(drv_data->controller->dma_tx);
@ -145,16 +138,14 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer);
if (!tx_desc) {
dev_err(&drv_data->pdev->dev,
"failed to get DMA TX descriptor\n");
dev_err(drv_data->ssp->dev, "failed to get DMA TX descriptor\n");
err = -EBUSY;
goto err_tx;
}
rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer);
if (!rx_desc) {
dev_err(&drv_data->pdev->dev,
"failed to get DMA RX descriptor\n");
dev_err(drv_data->ssp->dev, "failed to get DMA RX descriptor\n");
err = -EBUSY;
goto err_rx;
}
@ -191,8 +182,8 @@ void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
struct device *dev = &drv_data->pdev->dev;
struct spi_controller *controller = drv_data->controller;
struct device *dev = drv_data->ssp->dev;
dma_cap_mask_t mask;
dma_cap_zero(mask);

View File

@ -1,13 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* CE4100's SPI device is more or less the same one as found on PXA
* PCI glue driver for SPI PXA2xx compatible controllers.
* CE4100's SPI device is more or less the same one as found on PXA.
*
* Copyright (C) 2016, Intel Corporation
* Copyright (C) 2016, 2021 Intel Corporation
*/
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/dmaengine.h>
@ -178,7 +180,7 @@ static struct pxa_spi_info spi_info_configs[] = {
.rx_param = &bsw2_rx_param,
},
[PORT_MRFLD] = {
.type = PXA27x_SSP,
.type = MRFLD_SSP,
.max_clk_rate = 25000000,
.setup = mrfld_spi_setup,
},
@ -239,6 +241,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
spi_pdata.dma_burst_size = c->dma_burst_size ? c->dma_burst_size : 1;
ssp = &spi_pdata.ssp;
ssp->dev = &dev->dev;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = pcim_iomap_table(dev)[0];
ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn;
@ -254,7 +257,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
ssp->clk = clk_register_fixed_rate(&dev->dev, buf, NULL, 0,
c->max_clk_rate);
if (IS_ERR(ssp->clk))
if (IS_ERR(ssp->clk))
return PTR_ERR(ssp->clk);
memset(&pi, 0, sizeof(pi));

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
* Copyright (C) 2013, Intel Corporation
* Copyright (C) 2013, 2021 Intel Corporation
*/
#include <linux/acpi.h>
@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
@ -25,6 +26,7 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/spi/spi.h>
@ -38,11 +40,11 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define TIMOUT_DFLT 1000
/*
* for testing SSCR1 changes that require SSP restart, basically
* everything except the service and interrupt enables, the pxa270 developer
* For testing SSCR1 changes that require SSP restart, basically
* everything except the service and interrupt enables, the PXA270 developer
* manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
* list, but the PXA255 dev man says all bits without really meaning the
* service and interrupt enables
* list, but the PXA255 developer manual says all bits without really meaning
* the service and interrupt enables.
*/
#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
@ -198,6 +200,17 @@ static bool is_mmp2_ssp(const struct driver_data *drv_data)
return drv_data->ssp_type == MMP2_SSP;
}
static bool is_mrfld_ssp(const struct driver_data *drv_data)
{
return drv_data->ssp_type == MRFLD_SSP;
}
static void pxa2xx_spi_update(const struct driver_data *drv_data, u32 reg, u32 mask, u32 value)
{
if ((pxa2xx_spi_read(drv_data, reg) & mask) != value)
pxa2xx_spi_write(drv_data, reg, value & mask);
}
static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
{
switch (drv_data->ssp_type) {
@ -239,7 +252,7 @@ static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
break;
}
return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
return read_SSSR_bits(drv_data, mask) == mask;
}
static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
@ -284,13 +297,11 @@ static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
case QUARK_X1000_SSP:
return clk_div
| QUARK_X1000_SSCR0_Motorola
| QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits)
| SSCR0_SSE;
| QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits);
default:
return clk_div
| SSCR0_Motorola
| SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
| SSCR0_SSE
| (bits > 16 ? SSCR0_EDSS : 0);
}
}
@ -325,7 +336,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
u32 value;
config = lpss_get_config(drv_data);
drv_data->lpss_base = drv_data->ioaddr + config->offset;
drv_data->lpss_base = drv_data->ssp->mmio_base + config->offset;
/* Enable software chip select control */
value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
@ -421,7 +432,7 @@ static void cs_assert(struct spi_device *spi)
spi_controller_get_devdata(spi->controller);
if (drv_data->ssp_type == CE4100_SSP) {
pxa2xx_spi_write(drv_data, SSSR, chip->frm);
pxa2xx_spi_write(drv_data, SSSR, spi->chip_select);
return;
}
@ -430,11 +441,6 @@ static void cs_assert(struct spi_device *spi)
return;
}
if (chip->gpiod_cs) {
gpiod_set_value(chip->gpiod_cs, chip->gpio_cs_inverted);
return;
}
if (is_lpss_ssp(drv_data))
lpss_ssp_cs_control(spi, true);
}
@ -460,11 +466,6 @@ static void cs_deassert(struct spi_device *spi)
return;
}
if (chip->gpiod_cs) {
gpiod_set_value(chip->gpiod_cs, !chip->gpio_cs_inverted);
return;
}
if (is_lpss_ssp(drv_data))
lpss_ssp_cs_control(spi, false);
}
@ -482,7 +483,7 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
unsigned long limit = loops_per_jiffy << 1;
do {
while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
while (read_SSSR_bits(drv_data, SSSR_RNE))
pxa2xx_spi_read(drv_data, SSDR);
} while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
write_SSSR_CS(drv_data, SSSR_ROR);
@ -496,8 +497,7 @@ static void pxa2xx_spi_off(struct driver_data *drv_data)
if (is_mmp2_ssp(drv_data))
return;
pxa2xx_spi_write(drv_data, SSCR0,
pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
pxa_ssp_disable(drv_data->ssp);
}
static int null_writer(struct driver_data *drv_data)
@ -518,8 +518,7 @@ static int null_reader(struct driver_data *drv_data)
{
u8 n_bytes = drv_data->n_bytes;
while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
&& (drv_data->rx < drv_data->rx_end)) {
while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += n_bytes;
}
@ -541,8 +540,7 @@ static int u8_writer(struct driver_data *drv_data)
static int u8_reader(struct driver_data *drv_data)
{
while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
&& (drv_data->rx < drv_data->rx_end)) {
while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
*(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
++drv_data->rx;
}
@ -564,8 +562,7 @@ static int u16_writer(struct driver_data *drv_data)
static int u16_reader(struct driver_data *drv_data)
{
while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
&& (drv_data->rx < drv_data->rx_end)) {
while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
*(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += 2;
}
@ -587,8 +584,7 @@ static int u32_writer(struct driver_data *drv_data)
static int u32_reader(struct driver_data *drv_data)
{
while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
&& (drv_data->rx < drv_data->rx_end)) {
while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
*(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += 4;
}
@ -618,47 +614,51 @@ static void reset_sccr1(struct driver_data *drv_data)
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
}
static void int_error_stop(struct driver_data *drv_data, const char *msg)
static void int_stop_and_reset(struct driver_data *drv_data)
{
/* Stop and reset SSP */
/* Clear and disable interrupts */
write_SSSR_CS(drv_data, drv_data->clear_sr);
reset_sccr1(drv_data);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
if (pxa25x_ssp_comp(drv_data))
return;
pxa2xx_spi_write(drv_data, SSTO, 0);
}
static void int_error_stop(struct driver_data *drv_data, const char *msg, int err)
{
int_stop_and_reset(drv_data);
pxa2xx_spi_flush(drv_data);
pxa2xx_spi_off(drv_data);
dev_err(&drv_data->pdev->dev, "%s\n", msg);
dev_err(drv_data->ssp->dev, "%s\n", msg);
drv_data->controller->cur_msg->status = -EIO;
drv_data->controller->cur_msg->status = err;
spi_finalize_current_transfer(drv_data->controller);
}
static void int_transfer_complete(struct driver_data *drv_data)
{
/* Clear and disable interrupts */
write_SSSR_CS(drv_data, drv_data->clear_sr);
reset_sccr1(drv_data);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
int_stop_and_reset(drv_data);
spi_finalize_current_transfer(drv_data->controller);
}
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
{
u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
u32 irq_status;
u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
irq_status = read_SSSR_bits(drv_data, drv_data->mask_sr);
if (!(pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE))
irq_status &= ~SSSR_TFS;
if (irq_status & SSSR_ROR) {
int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
int_error_stop(drv_data, "interrupt_transfer: FIFO overrun", -EIO);
return IRQ_HANDLED;
}
if (irq_status & SSSR_TUR) {
int_error_stop(drv_data, "interrupt_transfer: fifo underrun");
int_error_stop(drv_data, "interrupt_transfer: FIFO underrun", -EIO);
return IRQ_HANDLED;
}
@ -670,7 +670,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
}
}
/* Drain rx fifo, Fill tx fifo and prevent overruns */
/* Drain Rx FIFO, Fill Tx FIFO and prevent overruns */
do {
if (drv_data->read(drv_data)) {
int_transfer_complete(drv_data);
@ -691,8 +691,8 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
sccr1_reg &= ~SSCR1_TIE;
/*
* PXA25x_SSP has no timeout, set up rx threshould for the
* remaining RX bytes.
* PXA25x_SSP has no timeout, set up Rx threshold for
* the remaining Rx bytes.
*/
if (pxa25x_ssp_comp(drv_data)) {
u32 rx_thre;
@ -725,14 +725,12 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
static void handle_bad_msg(struct driver_data *drv_data)
{
pxa2xx_spi_off(drv_data);
pxa2xx_spi_write(drv_data, SSCR1,
pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1);
clear_SSCR1_bits(drv_data, drv_data->int_cr1);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
write_SSSR_CS(drv_data, drv_data->clear_sr);
dev_err(&drv_data->pdev->dev,
"bad message state in interrupt handler\n");
dev_err(drv_data->ssp->dev, "bad message state in interrupt handler\n");
}
static irqreturn_t ssp_int(int irq, void *dev_id)
@ -748,7 +746,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
* the IRQ was not for us (we shouldn't be RPM suspended when the
* interrupt is enabled).
*/
if (pm_runtime_suspended(&drv_data->pdev->dev))
if (pm_runtime_suspended(drv_data->ssp->dev))
return IRQ_NONE;
/*
@ -916,7 +914,7 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
/*
* Calculate the divisor for the SCR (Serial Clock Rate), avoiding
* that the SSP transmission rate can be greater than the device rate
* that the SSP transmission rate can be greater than the device rate.
*/
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
@ -974,7 +972,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/* Check if we can DMA this transfer */
if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
/* reject already-mapped transfers; PIO won't always work */
/* Reject already-mapped transfers; PIO won't always work */
if (message->is_dma_mapped
|| transfer->rx_dma || transfer->tx_dma) {
dev_err(&spi->dev,
@ -983,10 +981,10 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
return -EINVAL;
}
/* warn ... we force this to PIO mode */
/* Warn ... we force this to PIO mode */
dev_warn_ratelimited(&spi->dev,
"DMA disabled for transfer length %ld greater than %d\n",
(long)transfer->len, MAX_DMA_LEN);
"DMA disabled for transfer length %u greater than %d\n",
transfer->len, MAX_DMA_LEN);
}
/* Setup the transfer state based on the type of transfer */
@ -1028,8 +1026,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
u32_writer : null_writer;
}
/*
* if bits/word is changed in dma mode, then must check the
* thresholds and burst also
* If bits per word is changed in DMA mode, then must check
* the thresholds and burst also.
*/
if (chip->enable_dma) {
if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
@ -1080,47 +1078,45 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
dma_mapped ? "DMA" : "PIO");
if (is_lpss_ssp(drv_data)) {
if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
!= chip->lpss_rx_threshold)
pxa2xx_spi_write(drv_data, SSIRF,
chip->lpss_rx_threshold);
if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
!= chip->lpss_tx_threshold)
pxa2xx_spi_write(drv_data, SSITF,
chip->lpss_tx_threshold);
pxa2xx_spi_update(drv_data, SSIRF, GENMASK(7, 0), chip->lpss_rx_threshold);
pxa2xx_spi_update(drv_data, SSITF, GENMASK(15, 0), chip->lpss_tx_threshold);
}
if (is_quark_x1000_ssp(drv_data) &&
(pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
if (is_mrfld_ssp(drv_data)) {
u32 mask = SFIFOTT_RFT | SFIFOTT_TFT;
u32 thresh = 0;
/* see if we need to reload the config registers */
if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
|| (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
!= (cr1 & change_mask)) {
/* stop the SSP, and update the other bits */
if (!is_mmp2_ssp(drv_data))
pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
/* first set CR1 without interrupt and service enables */
pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
/* restart the SSP */
pxa2xx_spi_write(drv_data, SSCR0, cr0);
thresh |= SFIFOTT_RxThresh(chip->lpss_rx_threshold);
thresh |= SFIFOTT_TxThresh(chip->lpss_tx_threshold);
} else {
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
pxa2xx_spi_update(drv_data, SFIFOTT, mask, thresh);
}
if (is_quark_x1000_ssp(drv_data))
pxa2xx_spi_update(drv_data, DDS_RATE, GENMASK(23, 0), chip->dds_rate);
/* Stop the SSP */
if (!is_mmp2_ssp(drv_data))
pxa_ssp_disable(drv_data->ssp);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
/* First set CR1 without interrupt and service enables */
pxa2xx_spi_update(drv_data, SSCR1, change_mask, cr1);
/* See if we need to reload the configuration registers */
pxa2xx_spi_update(drv_data, SSCR0, GENMASK(31, 0), cr0);
/* Restart the SSP */
pxa_ssp_enable(drv_data->ssp);
if (is_mmp2_ssp(drv_data)) {
u8 tx_level = (pxa2xx_spi_read(drv_data, SSSR)
& SSSR_TFL_MASK) >> 8;
u8 tx_level = read_SSSR_bits(drv_data, SSSR_TFL_MASK) >> 8;
if (tx_level) {
/* On MMP2, flipping SSE doesn't to empty TXFIFO. */
dev_warn(&spi->dev, "%d bytes of garbage in TXFIFO!\n",
tx_level);
/* On MMP2, flipping SSE doesn't to empty Tx FIFO. */
dev_warn(&spi->dev, "%u bytes of garbage in Tx FIFO!\n", tx_level);
if (tx_level > transfer->len)
tx_level = transfer->len;
drv_data->tx += tx_level;
@ -1139,7 +1135,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/*
* Release the data by enabling service requests and interrupts,
* without changing any mode bits
* without changing any mode bits.
*/
pxa2xx_spi_write(drv_data, SSCR1, cr1);
@ -1150,18 +1146,7 @@ static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Stop and reset SSP */
write_SSSR_CS(drv_data, drv_data->clear_sr);
reset_sccr1(drv_data);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
pxa2xx_spi_flush(drv_data);
pxa2xx_spi_off(drv_data);
dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
drv_data->controller->cur_msg->status = -EINTR;
spi_finalize_current_transfer(drv_data->controller);
int_error_stop(drv_data, "transfer aborted", -EINTR);
return 0;
}
@ -1175,9 +1160,7 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
pxa2xx_spi_off(drv_data);
/* Clear and disable interrupts and service requests */
write_SSSR_CS(drv_data, drv_data->clear_sr);
pxa2xx_spi_write(drv_data, SSCR1,
pxa2xx_spi_read(drv_data, SSCR1)
& ~(drv_data->int_cr1 | drv_data->dma_cr1));
clear_SSCR1_bits(drv_data, drv_data->int_cr1 | drv_data->dma_cr1);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
@ -1202,63 +1185,61 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
return 0;
}
static void cleanup_cs(struct spi_device *spi)
{
if (!gpio_is_valid(spi->cs_gpio))
return;
gpio_free(spi->cs_gpio);
spi->cs_gpio = -ENOENT;
}
static int setup_cs(struct spi_device *spi, struct chip_data *chip,
struct pxa2xx_spi_chip *chip_info)
{
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
struct gpio_desc *gpiod;
int err = 0;
struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
if (chip == NULL)
return 0;
if (drv_data->cs_gpiods) {
gpiod = drv_data->cs_gpiods[spi->chip_select];
if (gpiod) {
chip->gpiod_cs = gpiod;
chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
gpiod_set_value(gpiod, chip->gpio_cs_inverted);
}
return 0;
}
if (chip_info == NULL)
return 0;
/* NOTE: setup() can be called multiple times, possibly with
* different chip_info, release previously requested GPIO
*/
if (chip->gpiod_cs) {
gpiod_put(chip->gpiod_cs);
chip->gpiod_cs = NULL;
}
if (drv_data->ssp_type == CE4100_SSP)
return 0;
/* If (*cs_control) is provided, ignore GPIO chip select */
/*
* NOTE: setup() can be called multiple times, possibly with
* different chip_info, release previously requested GPIO.
*/
cleanup_cs(spi);
/* If ->cs_control() is provided, ignore GPIO chip select */
if (chip_info->cs_control) {
chip->cs_control = chip_info->cs_control;
return 0;
}
if (gpio_is_valid(chip_info->gpio_cs)) {
err = gpio_request(chip_info->gpio_cs, "SPI_CS");
int gpio = chip_info->gpio_cs;
int err;
err = gpio_request(gpio, "SPI_CS");
if (err) {
dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
chip_info->gpio_cs);
dev_err(&spi->dev, "failed to request chip select GPIO%d\n", gpio);
return err;
}
gpiod = gpio_to_desc(chip_info->gpio_cs);
chip->gpiod_cs = gpiod;
chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
err = gpio_direction_output(gpio, !(spi->mode & SPI_CS_HIGH));
if (err) {
gpio_free(gpio);
return err;
}
err = gpiod_direction_output(gpiod, !chip->gpio_cs_inverted);
if (err)
gpiod_put(chip->gpiod_cs);
spi->cs_gpio = gpio;
}
return err;
return 0;
}
static int setup(struct spi_device *spi)
@ -1277,6 +1258,11 @@ static int setup(struct spi_device *spi)
tx_hi_thres = 0;
rx_thres = RX_THRESH_QUARK_X1000_DFLT;
break;
case MRFLD_SSP:
tx_thres = TX_THRESH_MRFLD_DFLT;
tx_hi_thres = 0;
rx_thres = RX_THRESH_MRFLD_DFLT;
break;
case CE4100_SSP:
tx_thres = TX_THRESH_CE4100_DFLT;
tx_hi_thres = 0;
@ -1305,7 +1291,7 @@ static int setup(struct spi_device *spi)
break;
}
/* Only alloc on first setup */
/* Only allocate on the first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
@ -1319,15 +1305,15 @@ static int setup(struct spi_device *spi)
kfree(chip);
return -EINVAL;
}
chip->frm = spi->chip_select;
}
chip->enable_dma = drv_data->controller_info->enable_dma;
chip->timeout = TIMOUT_DFLT;
}
/* protocol drivers may change the chip settings, so...
* if chip_info exists, use it */
/*
* Protocol drivers may change the chip settings, so...
* if chip_info exists, use it.
*/
chip_info = spi->controller_data;
/* chip_info isn't always needed */
@ -1352,15 +1338,24 @@ static int setup(struct spi_device *spi)
chip->cr1 |= SSCR1_SPH;
}
chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
| SSITF_TxHiThresh(tx_hi_thres);
if (is_lpss_ssp(drv_data)) {
chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres) |
SSITF_TxHiThresh(tx_hi_thres);
}
/* set dma burst and threshold outside of chip_info path so that if
* chip_info goes away after setting chip->enable_dma, the
* burst and threshold can still respond to changes in bits_per_word */
if (is_mrfld_ssp(drv_data)) {
chip->lpss_rx_threshold = rx_thres;
chip->lpss_tx_threshold = tx_thres;
}
/*
* Set DMA burst and threshold outside of chip_info path so that if
* chip_info goes away after setting chip->enable_dma, the burst and
* threshold can still respond to changes in bits_per_word.
*/
if (chip->enable_dma) {
/* set up legal burst and threshold for dma */
/* Set up legal burst and threshold for DMA */
if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
spi->bits_per_word,
&chip->dma_burst_size,
@ -1391,8 +1386,8 @@ static int setup(struct spi_device *spi)
}
chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
chip->cr1 |= ((spi->mode & SPI_CPHA) ? SSCR1_SPH : 0) |
((spi->mode & SPI_CPOL) ? SSCR1_SPO : 0);
if (spi->mode & SPI_LOOP)
chip->cr1 |= SSCR1_LBM;
@ -1426,16 +1421,8 @@ static int setup(struct spi_device *spi)
static void cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
if (!chip)
return;
if (drv_data->ssp_type != CE4100_SSP && !drv_data->cs_gpiods &&
chip->gpiod_cs)
gpiod_put(chip->gpiod_cs);
cleanup_cs(spi);
kfree(chip);
}
@ -1652,7 +1639,7 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
if (has_acpi_companion(&drv_data->pdev->dev)) {
if (has_acpi_companion(drv_data->ssp->dev)) {
switch (drv_data->ssp_type) {
/*
* For Atoms the ACPI DeviceSelection used by the Windows
@ -1684,7 +1671,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
struct driver_data *drv_data;
struct ssp_device *ssp;
const struct lpss_config *config;
int status, count;
int status;
u32 tmp;
platform_info = dev_get_platdata(dev);
@ -1701,7 +1688,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
ssp = &platform_info->ssp;
if (!ssp->mmio_base) {
dev_err(&pdev->dev, "failed to get ssp\n");
dev_err(&pdev->dev, "failed to get SSP\n");
return -ENODEV;
}
@ -1712,17 +1699,18 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (!controller) {
dev_err(&pdev->dev, "cannot alloc spi_controller\n");
pxa_ssp_free(ssp);
return -ENOMEM;
status = -ENOMEM;
goto out_error_controller_alloc;
}
drv_data = spi_controller_get_devdata(controller);
drv_data->controller = controller;
drv_data->controller_info = platform_info;
drv_data->pdev = pdev;
drv_data->ssp = ssp;
controller->dev.of_node = pdev->dev.of_node;
/* the spi->mode bits understood by this driver: */
controller->dev.of_node = dev->of_node;
controller->dev.fwnode = dev->fwnode;
/* The spi->mode bits understood by this driver: */
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
controller->bus_num = ssp->port_id;
@ -1740,8 +1728,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->ssp_type = ssp->type;
drv_data->ioaddr = ssp->mmio_base;
drv_data->ssdr_physical = ssp->phys_base + SSDR;
if (pxa25x_ssp_comp(drv_data)) {
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
@ -1803,15 +1789,16 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
controller->min_speed_hz =
DIV_ROUND_UP(controller->max_speed_hz, 512);
pxa_ssp_disable(ssp);
/* Load default SSP configuration */
pxa2xx_spi_write(drv_data, SSCR0, 0);
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT) |
QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
pxa2xx_spi_write(drv_data, SSCR1, tmp);
/* using the Motorola SPI protocol and use 8 bit frame */
/* Using the Motorola SPI protocol and use 8 bit frame */
tmp = QUARK_X1000_SSCR0_Motorola | QUARK_X1000_SSCR0_DataSize(8);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
@ -1863,38 +1850,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
}
controller->num_chipselect = platform_info->num_chipselect;
count = gpiod_count(&pdev->dev, "cs");
if (count > 0) {
int i;
controller->num_chipselect = max_t(int, count,
controller->num_chipselect);
drv_data->cs_gpiods = devm_kcalloc(&pdev->dev,
controller->num_chipselect, sizeof(struct gpio_desc *),
GFP_KERNEL);
if (!drv_data->cs_gpiods) {
status = -ENOMEM;
goto out_error_clock_enabled;
}
for (i = 0; i < controller->num_chipselect; i++) {
struct gpio_desc *gpiod;
gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
if (IS_ERR(gpiod)) {
/* Means use native chip select */
if (PTR_ERR(gpiod) == -ENOENT)
continue;
status = PTR_ERR(gpiod);
goto out_error_clock_enabled;
} else {
drv_data->cs_gpiods[i] = gpiod;
}
}
}
controller->use_gpio_descriptors = true;
if (platform_info->is_slave) {
drv_data->gpiod_ready = devm_gpiod_get_optional(dev,
@ -1913,8 +1869,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
status = spi_register_controller(controller);
if (status != 0) {
dev_err(&pdev->dev, "problem registering spi controller\n");
if (status) {
dev_err(&pdev->dev, "problem registering SPI controller\n");
goto out_error_pm_runtime_enabled;
}
@ -1945,7 +1901,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
spi_unregister_controller(drv_data->controller);
/* Disable the SSP at the peripheral and SOC level */
pxa2xx_spi_write(drv_data, SSCR0, 0);
pxa_ssp_disable(ssp);
clk_disable_unprepare(ssp->clk);
/* Release DMA */
@ -1972,9 +1928,10 @@ static int pxa2xx_spi_suspend(struct device *dev)
int status;
status = spi_controller_suspend(drv_data->controller);
if (status != 0)
if (status)
return status;
pxa2xx_spi_write(drv_data, SSCR0, 0);
pxa_ssp_disable(ssp);
if (!pm_runtime_suspended(dev))
clk_disable_unprepare(ssp->clk);

View File

@ -1,28 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
* Copyright (C) 2013, Intel Corporation
* Copyright (C) 2013, 2021 Intel Corporation
*/
#ifndef SPI_PXA2XX_H
#define SPI_PXA2XX_H
#include <linux/atomic.h>
#include <linux/dmaengine.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pxa2xx_ssp.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
#include <linux/types.h>
#include <linux/sizes.h>
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/pxa2xx_ssp.h>
struct gpio_desc;
struct pxa2xx_spi_controller;
struct spi_controller;
struct spi_device;
struct spi_transfer;
struct driver_data {
/* Driver model hookup */
struct platform_device *pdev;
/* SSP Info */
struct ssp_device *ssp;
@ -33,10 +31,6 @@ struct driver_data {
/* PXA hookup */
struct pxa2xx_spi_controller *controller_info;
/* SSP register addresses */
void __iomem *ioaddr;
phys_addr_t ssdr_physical;
/* SSP masks*/
u32 dma_cr1;
u32 int_cr1;
@ -59,9 +53,6 @@ struct driver_data {
void __iomem *lpss_base;
/* GPIOs for chip selects */
struct gpio_desc **cs_gpiods;
/* Optional slave FIFO ready signal */
struct gpio_desc *gpiod_ready;
};
@ -71,37 +62,32 @@ struct chip_data {
u32 dds_rate;
u32 timeout;
u8 n_bytes;
u8 enable_dma;
u32 dma_burst_size;
u32 threshold;
u32 dma_threshold;
u32 threshold;
u16 lpss_rx_threshold;
u16 lpss_tx_threshold;
u8 enable_dma;
union {
struct gpio_desc *gpiod_cs;
unsigned int frm;
};
int gpio_cs_inverted;
int (*write)(struct driver_data *drv_data);
int (*read)(struct driver_data *drv_data);
void (*cs_control)(u32 command);
};
static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data,
unsigned reg)
static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data, u32 reg)
{
return __raw_readl(drv_data->ioaddr + reg);
return pxa_ssp_read_reg(drv_data->ssp, reg);
}
static inline void pxa2xx_spi_write(const struct driver_data *drv_data,
unsigned reg, u32 val)
static inline void pxa2xx_spi_write(const struct driver_data *drv_data, u32 reg, u32 val)
{
__raw_writel(val, drv_data->ioaddr + reg);
pxa_ssp_write_reg(drv_data->ssp, reg, val);
}
#define DMA_ALIGNMENT 8
static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
static inline int pxa25x_ssp_comp(const struct driver_data *drv_data)
{
switch (drv_data->ssp_type) {
case PXA25x_SSP:
@ -113,11 +99,21 @@ static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
}
}
static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
static inline void clear_SSCR1_bits(const struct driver_data *drv_data, u32 bits)
{
pxa2xx_spi_write(drv_data, SSCR1, pxa2xx_spi_read(drv_data, SSCR1) & ~bits);
}
static inline u32 read_SSSR_bits(const struct driver_data *drv_data, u32 bits)
{
return pxa2xx_spi_read(drv_data, SSSR) & bits;
}
static inline void write_SSSR_CS(const struct driver_data *drv_data, u32 val)
{
if (drv_data->ssp_type == CE4100_SSP ||
drv_data->ssp_type == QUARK_X1000_SSP)
val |= pxa2xx_spi_read(drv_data, SSSR) & SSSR_ALT_FRM_MASK;
val |= read_SSSR_bits(drv_data, SSSR_ALT_FRM_MASK);
pxa2xx_spi_write(drv_data, SSSR, val);
}

View File

@ -107,6 +107,8 @@
#define CR0_OPM_MASTER 0x0
#define CR0_OPM_SLAVE 0x1
#define CR0_SOI_OFFSET 23
#define CR0_MTM_OFFSET 0x21
/* Bit fields in SER, 2bit */
@ -116,13 +118,14 @@
#define BAUDR_SCKDV_MIN 2
#define BAUDR_SCKDV_MAX 65534
/* Bit fields in SR, 5bit */
#define SR_MASK 0x1f
/* Bit fields in SR, 6bit */
#define SR_MASK 0x3f
#define SR_BUSY (1 << 0)
#define SR_TF_FULL (1 << 1)
#define SR_TF_EMPTY (1 << 2)
#define SR_RF_EMPTY (1 << 3)
#define SR_RF_FULL (1 << 4)
#define SR_SLAVE_TX_BUSY (1 << 5)
/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
#define INT_MASK 0x1f
@ -156,7 +159,8 @@
*/
#define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
#define ROCKCHIP_SPI_MAX_CS_NUM 2
/* 2 for native cs, 2 for cs-gpio */
#define ROCKCHIP_SPI_MAX_CS_NUM 4
#define ROCKCHIP_SPI_VER2_TYPE1 0x05EC0002
#define ROCKCHIP_SPI_VER2_TYPE2 0x00110002
@ -197,13 +201,19 @@ static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
writel_relaxed((enable ? 1U : 0U), rs->regs + ROCKCHIP_SPI_SSIENR);
}
static inline void wait_for_idle(struct rockchip_spi *rs)
static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool slave_mode)
{
unsigned long timeout = jiffies + msecs_to_jiffies(5);
do {
if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
return;
if (slave_mode) {
if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_SLAVE_TX_BUSY) &&
!((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)))
return;
} else {
if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
return;
}
} while (!time_after(jiffies, timeout));
dev_warn(rs->dev, "spi controller is in busy state!\n");
@ -228,7 +238,7 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
{
struct spi_controller *ctlr = spi->controller;
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
bool cs_asserted = !enable;
bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable;
/* Return immediately for no-op */
if (cs_asserted == rs->cs_asserted[spi->chip_select])
@ -238,11 +248,15 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
/* Keep things powered as long as CS is asserted */
pm_runtime_get_sync(rs->dev);
ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER,
BIT(spi->chip_select));
if (spi->cs_gpiod)
ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
else
ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
} else {
ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER,
BIT(spi->chip_select));
if (spi->cs_gpiod)
ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
else
ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
/* Drop reference from when we first asserted CS */
pm_runtime_put(rs->dev);
@ -383,7 +397,7 @@ static void rockchip_spi_dma_txcb(void *data)
return;
/* Wait until the FIFO data completely. */
wait_for_idle(rs);
wait_for_tx_idle(rs, ctlr->slave);
spi_enable_chip(rs, false);
spi_finalize_current_transfer(ctlr);
@ -495,6 +509,8 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
if (spi->mode & SPI_LSB_FIRST)
cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
if (spi->mode & SPI_CS_HIGH)
cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
if (xfer->rx_buf && xfer->tx_buf)
cr0 |= CR0_XFM_TR << CR0_XFM_OFFSET;
@ -540,12 +556,12 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
* interrupt exactly when the fifo is full doesn't seem to work,
* so we need the strict inequality here
*/
if (xfer->len < rs->fifo_len)
writel_relaxed(xfer->len - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
if ((xfer->len / rs->n_bytes) < rs->fifo_len)
writel_relaxed(xfer->len / rs->n_bytes - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
else
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
writel_relaxed(rs->fifo_len / 2, rs->regs + ROCKCHIP_SPI_DMATDLR);
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
writel_relaxed(rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes) - 1,
rs->regs + ROCKCHIP_SPI_DMARDLR);
writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
@ -783,6 +799,14 @@ static int rockchip_spi_probe(struct platform_device *pdev)
ctlr->can_dma = rockchip_spi_can_dma;
}
switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) {
case ROCKCHIP_SPI_VER2_TYPE2:
ctlr->mode_bits |= SPI_CS_HIGH;
break;
default:
break;
}
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register controller\n");
@ -922,6 +946,7 @@ static const struct of_device_id rockchip_spi_dt_match[] = {
{ .compatible = "rockchip,rk3368-spi", },
{ .compatible = "rockchip,rk3399-spi", },
{ .compatible = "rockchip,rv1108-spi", },
{ .compatible = "rockchip,rv1126-spi", },
{ },
};
MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);

View File

@ -618,9 +618,9 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
ret = -ETIMEDOUT;
}
if (tx)
dmaengine_terminate_all(rspi->ctlr->dma_tx);
dmaengine_terminate_sync(rspi->ctlr->dma_tx);
if (rx)
dmaengine_terminate_all(rspi->ctlr->dma_rx);
dmaengine_terminate_sync(rspi->ctlr->dma_rx);
}
rspi_disable_irq(rspi, irq_mask);
@ -634,7 +634,7 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
no_dma_tx:
if (rx)
dmaengine_terminate_all(rspi->ctlr->dma_rx);
dmaengine_terminate_sync(rspi->ctlr->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
dev_warn_once(&rspi->ctlr->dev,

View File

@ -853,10 +853,10 @@ stop_reset:
sh_msiof_spi_stop(p, rx);
stop_dma:
if (tx)
dmaengine_terminate_all(p->ctlr->dma_tx);
dmaengine_terminate_sync(p->ctlr->dma_tx);
no_dma_tx:
if (rx)
dmaengine_terminate_all(p->ctlr->dma_rx);
dmaengine_terminate_sync(p->ctlr->dma_rx);
sh_msiof_write(p, SIIER, 0);
return ret;
}

View File

@ -36,6 +36,7 @@
#define CR_FTIE BIT(18)
#define CR_SMIE BIT(19)
#define CR_TOIE BIT(20)
#define CR_APMS BIT(22)
#define CR_PRESC_MASK GENMASK(31, 24)
#define QSPI_DCR 0x04
@ -53,6 +54,7 @@
#define QSPI_FCR 0x0c
#define FCR_CTEF BIT(0)
#define FCR_CTCF BIT(1)
#define FCR_CSMF BIT(3)
#define QSPI_DLR 0x10
@ -91,7 +93,6 @@
#define STM32_AUTOSUSPEND_DELAY -1
struct stm32_qspi_flash {
struct stm32_qspi *qspi;
u32 cs;
u32 presc;
};
@ -107,6 +108,7 @@ struct stm32_qspi {
u32 clk_rate;
struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP];
struct completion data_completion;
struct completion match_completion;
u32 fmode;
struct dma_chan *dma_chtx;
@ -115,6 +117,7 @@ struct stm32_qspi {
u32 cr_reg;
u32 dcr_reg;
unsigned long status_timeout;
/*
* to protect device configuration, could be different between
@ -128,11 +131,20 @@ static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
u32 cr, sr;
cr = readl_relaxed(qspi->io_base + QSPI_CR);
sr = readl_relaxed(qspi->io_base + QSPI_SR);
if (cr & CR_SMIE && sr & SR_SMF) {
/* disable irq */
cr &= ~CR_SMIE;
writel_relaxed(cr, qspi->io_base + QSPI_CR);
complete(&qspi->match_completion);
return IRQ_HANDLED;
}
if (sr & (SR_TEF | SR_TCF)) {
/* disable irq */
cr = readl_relaxed(qspi->io_base + QSPI_CR);
cr &= ~CR_TCIE & ~CR_TEIE;
writel_relaxed(cr, qspi->io_base + QSPI_CR);
complete(&qspi->data_completion);
@ -322,6 +334,24 @@ wait_nobusy:
return err;
}
static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi,
const struct spi_mem_op *op)
{
u32 cr;
reinit_completion(&qspi->match_completion);
cr = readl_relaxed(qspi->io_base + QSPI_CR);
writel_relaxed(cr | CR_SMIE, qspi->io_base + QSPI_CR);
if (!wait_for_completion_timeout(&qspi->match_completion,
msecs_to_jiffies(qspi->status_timeout)))
return -ETIMEDOUT;
writel_relaxed(FCR_CSMF, qspi->io_base + QSPI_FCR);
return 0;
}
static int stm32_qspi_get_mode(struct stm32_qspi *qspi, u8 buswidth)
{
if (buswidth == 4)
@ -335,7 +365,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
struct stm32_qspi_flash *flash = &qspi->flash[mem->spi->chip_select];
u32 ccr, cr;
int timeout, err = 0;
int timeout, err = 0, err_poll_status = 0;
dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
@ -381,6 +411,9 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM)
writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR);
if (qspi->fmode == CCR_FMODE_APM)
err_poll_status = stm32_qspi_wait_poll_status(qspi, op);
err = stm32_qspi_tx(qspi, op);
/*
@ -390,7 +423,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
* byte of device (device size - fifo size). like device size is not
* knows, the prefetching is always stop.
*/
if (err || qspi->fmode == CCR_FMODE_MM)
if (err || err_poll_status || qspi->fmode == CCR_FMODE_MM)
goto abort;
/* wait end of tx in indirect mode */
@ -409,15 +442,49 @@ abort:
cr, !(cr & CR_ABORT), 1,
STM32_ABT_TIMEOUT_US);
writel_relaxed(FCR_CTCF, qspi->io_base + QSPI_FCR);
writel_relaxed(FCR_CTCF | FCR_CSMF, qspi->io_base + QSPI_FCR);
if (err || timeout)
dev_err(qspi->dev, "%s err:%d abort timeout:%d\n",
__func__, err, timeout);
if (err || err_poll_status || timeout)
dev_err(qspi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
__func__, err, err_poll_status, timeout);
return err;
}
static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *op,
u16 mask, u16 match,
unsigned long initial_delay_us,
unsigned long polling_rate_us,
unsigned long timeout_ms)
{
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
int ret;
if (!spi_mem_supports_op(mem, op))
return -EOPNOTSUPP;
ret = pm_runtime_get_sync(qspi->dev);
if (ret < 0) {
pm_runtime_put_noidle(qspi->dev);
return ret;
}
mutex_lock(&qspi->lock);
writel_relaxed(mask, qspi->io_base + QSPI_PSMKR);
writel_relaxed(match, qspi->io_base + QSPI_PSMAR);
qspi->fmode = CCR_FMODE_APM;
qspi->status_timeout = timeout_ms;
ret = stm32_qspi_send(mem, op);
mutex_unlock(&qspi->lock);
pm_runtime_mark_last_busy(qspi->dev);
pm_runtime_put_autosuspend(qspi->dev);
return ret;
}
static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
@ -525,12 +592,11 @@ static int stm32_qspi_setup(struct spi_device *spi)
presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
flash = &qspi->flash[spi->chip_select];
flash->qspi = qspi;
flash->cs = spi->chip_select;
flash->presc = presc;
mutex_lock(&qspi->lock);
qspi->cr_reg = 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
/* set dcr fsize to max address */
@ -610,6 +676,7 @@ static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
.exec_op = stm32_qspi_exec_op,
.dirmap_create = stm32_qspi_dirmap_create,
.dirmap_read = stm32_qspi_dirmap_read,
.poll_status = stm32_qspi_poll_status,
};
static int stm32_qspi_probe(struct platform_device *pdev)
@ -664,6 +731,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
}
init_completion(&qspi->data_completion);
init_completion(&qspi->match_completion);
qspi->clk = devm_clk_get(dev, NULL);
if (IS_ERR(qspi->clk)) {

View File

@ -1071,8 +1071,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
ret = wait_for_completion_timeout(&tspi->xfer_completion,
SPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
dev_err(tspi->dev,
"spi transfer timeout, err %d\n", ret);
dev_err(tspi->dev, "spi transfer timeout\n");
if (tspi->is_curr_dma_xfer &&
(tspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all(tspi->tx_dma_chan);

View File

@ -1028,7 +1028,7 @@ static int tegra_qspi_transfer_one_message(struct spi_master *master, struct spi
ret = wait_for_completion_timeout(&tqspi->xfer_completion,
QSPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
dev_err(tqspi->dev, "transfer timeout: %d\n", ret);
dev_err(tqspi->dev, "transfer timeout\n");
if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all(tqspi->tx_dma_chan);
if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))

View File

@ -580,8 +580,10 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
if (data->pkt_tx_buff != NULL) {
data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
if (!data->pkt_rx_buff)
if (!data->pkt_rx_buff) {
kfree(data->pkt_tx_buff);
data->pkt_tx_buff = NULL;
}
}
if (!data->pkt_rx_buff) {

View File

@ -142,7 +142,7 @@ static void uniphier_spi_set_mode(struct spi_device *spi)
* FSTRT start frame timing
* 0: rising edge of clock, 1: falling edge of clock
*/
switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
/* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
val1 = SSI_CKS_CKPHS | SSI_CKS_CKDLY;

View File

@ -363,6 +363,10 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
const struct spi_device *spi = to_spi_device(dev);
int rc;
rc = of_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
@ -560,6 +564,55 @@ static void spi_cleanup(struct spi_device *spi)
spi->controller->cleanup(spi);
}
static int __spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
int status;
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
if (status) {
dev_err(dev, "chipselect %d already in use\n",
spi->chip_select);
return status;
}
/* Controller may unregister concurrently */
if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
!device_is_registered(&ctlr->dev)) {
return -ENODEV;
}
/* Descriptors take precedence */
if (ctlr->cs_gpiods)
spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
else if (ctlr->cs_gpios)
spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
/* Drivers may modify this initial i/o setup, but will
* normally rely on the device being setup. Devices
* using SPI_CS_HIGH can't coexist well otherwise...
*/
status = spi_setup(spi);
if (status < 0) {
dev_err(dev, "can't setup %s, status %d\n",
dev_name(&spi->dev), status);
return status;
}
/* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev);
if (status < 0) {
dev_err(dev, "can't add %s, status %d\n",
dev_name(&spi->dev), status);
spi_cleanup(spi);
} else {
dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
}
return status;
}
/**
* spi_add_device - Add spi_device allocated with spi_alloc_device
* @spi: spi_device to register
@ -590,54 +643,31 @@ int spi_add_device(struct spi_device *spi)
* its configuration. Lock against concurrent add() calls.
*/
mutex_lock(&spi_add_lock);
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
if (status) {
dev_err(dev, "chipselect %d already in use\n",
spi->chip_select);
goto done;
}
/* Controller may unregister concurrently */
if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
!device_is_registered(&ctlr->dev)) {
status = -ENODEV;
goto done;
}
/* Descriptors take precedence */
if (ctlr->cs_gpiods)
spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
else if (ctlr->cs_gpios)
spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
/* Drivers may modify this initial i/o setup, but will
* normally rely on the device being setup. Devices
* using SPI_CS_HIGH can't coexist well otherwise...
*/
status = spi_setup(spi);
if (status < 0) {
dev_err(dev, "can't setup %s, status %d\n",
dev_name(&spi->dev), status);
goto done;
}
/* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev);
if (status < 0) {
dev_err(dev, "can't add %s, status %d\n",
dev_name(&spi->dev), status);
spi_cleanup(spi);
} else {
dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
}
done:
status = __spi_add_device(spi);
mutex_unlock(&spi_add_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_add_device);
static int spi_add_device_locked(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
/* Chipselects are numbered 0..max; validate. */
if (spi->chip_select >= ctlr->num_chipselect) {
dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
ctlr->num_chipselect);
return -EINVAL;
}
/* Set the bus ID string */
spi_dev_set_name(spi);
WARN_ON(!mutex_is_locked(&spi_add_lock));
return __spi_add_device(spi);
}
/**
* spi_new_device - instantiate one new SPI device
* @ctlr: Controller to which device is connected
@ -804,6 +834,8 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
return;
trace_spi_set_cs(spi, activate);
spi->controller->last_cs_enable = enable;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
@ -961,11 +993,15 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
if (ctlr->dma_tx)
tx_dev = ctlr->dma_tx->device->dev;
else if (ctlr->dma_map_dev)
tx_dev = ctlr->dma_map_dev;
else
tx_dev = ctlr->dev.parent;
if (ctlr->dma_rx)
rx_dev = ctlr->dma_rx->device->dev;
else if (ctlr->dma_map_dev)
rx_dev = ctlr->dma_map_dev;
else
rx_dev = ctlr->dev.parent;
@ -1132,10 +1168,20 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
if (!speed_hz)
speed_hz = 100000;
ms = 8LL * 1000LL * xfer->len;
/*
* For each byte we wait for 8 cycles of the SPI clock.
* Since speed is defined in Hz and we want milliseconds,
* use respective multiplier, but before the division,
* otherwise we may get 0 for short transfers.
*/
ms = 8LL * MSEC_PER_SEC * xfer->len;
do_div(ms, speed_hz);
ms += ms + 200; /* some tolerance */
/*
* Increase it twice and add 200 ms tolerance, use
* predefined maximum in case of overflow.
*/
ms += ms + 200;
if (ms > UINT_MAX)
ms = UINT_MAX;
@ -1158,10 +1204,10 @@ static void _spi_transfer_delay_ns(u32 ns)
{
if (!ns)
return;
if (ns <= 1000) {
if (ns <= NSEC_PER_USEC) {
ndelay(ns);
} else {
u32 us = DIV_ROUND_UP(ns, 1000);
u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
if (us <= 10)
udelay(us);
@ -1181,21 +1227,25 @@ int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
switch (unit) {
case SPI_DELAY_UNIT_USECS:
delay *= 1000;
delay *= NSEC_PER_USEC;
break;
case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
case SPI_DELAY_UNIT_NSECS:
/* Nothing to do here */
break;
case SPI_DELAY_UNIT_SCK:
/* clock cycles need to be obtained from spi_transfer */
if (!xfer)
return -EINVAL;
/* if there is no effective speed know, then approximate
* by underestimating with half the requested hz
/*
* If there is unknown effective speed, approximate it
* by underestimating with half of the requested hz.
*/
hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
if (!hz)
return -EINVAL;
delay *= DIV_ROUND_UP(1000000000, hz);
/* Convert delay to nanoseconds */
delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
break;
default:
return -EINVAL;
@ -1227,6 +1277,7 @@ EXPORT_SYMBOL_GPL(spi_delay_exec);
static void _spi_transfer_cs_change_delay(struct spi_message *msg,
struct spi_transfer *xfer)
{
u32 default_delay_ns = 10 * NSEC_PER_USEC;
u32 delay = xfer->cs_change_delay.value;
u32 unit = xfer->cs_change_delay.unit;
int ret;
@ -1234,16 +1285,16 @@ static void _spi_transfer_cs_change_delay(struct spi_message *msg,
/* return early on "fast" mode - for everything but USECS */
if (!delay) {
if (unit == SPI_DELAY_UNIT_USECS)
_spi_transfer_delay_ns(10000);
_spi_transfer_delay_ns(default_delay_ns);
return;
}
ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
if (ret) {
dev_err_once(&msg->spi->dev,
"Use of unsupported delay unit %i, using default of 10us\n",
unit);
_spi_transfer_delay_ns(10000);
"Use of unsupported delay unit %i, using default of %luus\n",
unit, default_delay_ns / NSEC_PER_USEC);
_spi_transfer_delay_ns(default_delay_ns);
}
}
@ -2105,6 +2156,55 @@ static void of_register_spi_devices(struct spi_controller *ctlr)
static void of_register_spi_devices(struct spi_controller *ctlr) { }
#endif
/**
* spi_new_ancillary_device() - Register ancillary SPI device
* @spi: Pointer to the main SPI device registering the ancillary device
* @chip_select: Chip Select of the ancillary device
*
* Register an ancillary SPI device; for example some chips have a chip-select
* for normal device usage and another one for setup/firmware upload.
*
* This may only be called from main SPI device's probe routine.
*
* Return: 0 on success; negative errno on failure
*/
struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
u8 chip_select)
{
struct spi_device *ancillary;
int rc = 0;
/* Alloc an spi_device */
ancillary = spi_alloc_device(spi->controller);
if (!ancillary) {
rc = -ENOMEM;
goto err_out;
}
strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
/* Use provided chip-select for ancillary device */
ancillary->chip_select = chip_select;
/* Take over SPI mode/speed from SPI main device */
ancillary->max_speed_hz = spi->max_speed_hz;
ancillary->mode = spi->mode;
/* Register the new device */
rc = spi_add_device_locked(ancillary);
if (rc) {
dev_err(&spi->dev, "failed to register ancillary device\n");
goto err_out;
}
return ancillary;
err_out:
spi_dev_put(ancillary);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
#ifdef CONFIG_ACPI
struct acpi_spi_lookup {
struct spi_controller *ctlr;
@ -3442,8 +3542,10 @@ int spi_setup(struct spi_device *spi)
spi_set_thread_rt(spi->controller);
}
dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
trace_spi_setup(spi, status);
dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
spi->mode & SPI_MODE_X_MASK,
(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
(spi->mode & SPI_3WIRE) ? "3wire, " : "",
@ -3455,79 +3557,6 @@ int spi_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_setup);
/**
* spi_set_cs_timing - configure CS setup, hold, and inactive delays
* @spi: the device that requires specific CS timing configuration
* @setup: CS setup time specified via @spi_delay
* @hold: CS hold time specified via @spi_delay
* @inactive: CS inactive delay between transfers specified via @spi_delay
*
* Return: zero on success, else a negative error code.
*/
int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
struct spi_delay *hold, struct spi_delay *inactive)
{
struct device *parent = spi->controller->dev.parent;
size_t len;
int status;
if (spi->controller->set_cs_timing &&
!(spi->cs_gpiod || gpio_is_valid(spi->cs_gpio))) {
mutex_lock(&spi->controller->io_mutex);
if (spi->controller->auto_runtime_pm) {
status = pm_runtime_get_sync(parent);
if (status < 0) {
mutex_unlock(&spi->controller->io_mutex);
pm_runtime_put_noidle(parent);
dev_err(&spi->controller->dev, "Failed to power device: %d\n",
status);
return status;
}
status = spi->controller->set_cs_timing(spi, setup,
hold, inactive);
pm_runtime_mark_last_busy(parent);
pm_runtime_put_autosuspend(parent);
} else {
status = spi->controller->set_cs_timing(spi, setup, hold,
inactive);
}
mutex_unlock(&spi->controller->io_mutex);
return status;
}
if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
(hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
(inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
dev_err(&spi->dev,
"Clock-cycle delays for CS not supported in SW mode\n");
return -ENOTSUPP;
}
len = sizeof(struct spi_delay);
/* copy delays to controller */
if (setup)
memcpy(&spi->controller->cs_setup, setup, len);
else
memset(&spi->controller->cs_setup, 0, len);
if (hold)
memcpy(&spi->controller->cs_hold, hold, len);
else
memset(&spi->controller->cs_hold, 0, len);
if (inactive)
memcpy(&spi->controller->cs_inactive, inactive, len);
else
memset(&spi->controller->cs_inactive, 0, len);
return 0;
}
EXPORT_SYMBOL_GPL(spi_set_cs_timing);
static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
struct spi_device *spi)
{

View File

@ -59,7 +59,7 @@ static DECLARE_BITMAP(minors, N_SPI_MINORS);
*
* REVISIT should changing those flags be privileged?
*/
#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
#define SPI_MODE_MASK (SPI_MODE_X_MASK | SPI_CS_HIGH \
| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
| SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
| SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \

View File

@ -170,6 +170,28 @@ struct spinand_op;
struct spinand_device;
#define SPINAND_MAX_ID_LEN 4
/*
* For erase, write and read operation, we got the following timings :
* tBERS (erase) 1ms to 4ms
* tPROG 300us to 400us
* tREAD 25us to 100us
* In order to minimize latency, the min value is divided by 4 for the
* initial delay, and dividing by 20 for the poll delay.
* For reset, 5us/10us/500us if the device is respectively
* reading/programming/erasing when the RESET occurs. Since we always
* issue a RESET when the device is IDLE, 5us is selected for both initial
* and poll delay.
*/
#define SPINAND_READ_INITIAL_DELAY_US 6
#define SPINAND_READ_POLL_DELAY_US 5
#define SPINAND_RESET_INITIAL_DELAY_US 5
#define SPINAND_RESET_POLL_DELAY_US 5
#define SPINAND_WRITE_INITIAL_DELAY_US 75
#define SPINAND_WRITE_POLL_DELAY_US 15
#define SPINAND_ERASE_INITIAL_DELAY_US 250
#define SPINAND_ERASE_POLL_DELAY_US 50
#define SPINAND_WAITRDY_TIMEOUT_MS 400
/**
* struct spinand_id - SPI NAND id structure

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Platform data definition for Atheros AR71XX/AR724X/AR913X SPI controller
*
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
*/
#ifndef _ATH79_SPI_PLATFORM_H
#define _ATH79_SPI_PLATFORM_H
struct ath79_spi_platform_data {
unsigned bus_num;
unsigned num_chipselect;
};
#endif /* _ATH79_SPI_PLATFORM_H */

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright (C) 2003 Russell King, All Rights Reserved.
*
* This driver supports the following PXA CPU/SSP ports:-
*
@ -11,8 +11,8 @@
* PXA3xx SSP1, SSP2, SSP3, SSP4
*/
#ifndef __LINUX_SSP_H
#define __LINUX_SSP_H
#ifndef __LINUX_PXA2XX_SSP_H
#define __LINUX_PXA2XX_SSP_H
#include <linux/bits.h>
#include <linux/compiler_types.h>
@ -38,7 +38,6 @@ struct device_node;
#define SSDR (0x10) /* SSP Data Write/Data Read Register */
#define SSTO (0x28) /* SSP Time Out Register */
#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
#define SSTSA (0x30) /* SSP Tx Timeslot Active */
#define SSRSA (0x34) /* SSP Rx Timeslot Active */
@ -60,7 +59,7 @@ struct device_node;
/* PXA27x, PXA3xx */
#define SSCR0_EDSS BIT(20) /* Extended data size select */
#define SSCR0_NCS BIT(21) /* Network clock select */
#define SSCR0_RIM BIT(22) /* Receive FIFO overrrun interrupt mask */
#define SSCR0_RIM BIT(22) /* Receive FIFO overrun interrupt mask */
#define SSCR0_TUM BIT(23) /* Transmit FIFO underrun interrupt mask */
#define SSCR0_FRDC GENMASK(26, 24) /* Frame rate divider control (mask) */
#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */
@ -105,6 +104,9 @@ struct device_node;
#define CE4100_SSCR1_RFT GENMASK(11, 10) /* Receive FIFO Threshold (mask) */
#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
/* Intel Quark X1000 */
#define DDS_RATE 0x28 /* SSP DDS Clock Rate Register */
/* QUARK_X1000 SSCR0 bit definition */
#define QUARK_X1000_SSCR0_DSS GENMASK(4, 0) /* Data Size Select (mask) */
#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
@ -124,7 +126,7 @@ struct device_node;
#define QUARK_X1000_SSCR1_EFWR BIT(16) /* Enable FIFO Write/Read */
#define QUARK_X1000_SSCR1_STRF BIT(17) /* Select FIFO or EFWR */
/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
/* Extra bits in PXA255, PXA26x and PXA27x SSP ports */
#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
@ -181,6 +183,21 @@ struct device_node;
#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
#define SSACD_SCDX8 BIT(7) /* SYSCLK division ratio select */
/* Intel Merrifield SSP */
#define SFIFOL 0x68 /* FIFO level */
#define SFIFOTT 0x6c /* FIFO trigger threshold */
#define RX_THRESH_MRFLD_DFLT 16
#define TX_THRESH_MRFLD_DFLT 16
#define SFIFOL_TFL_MASK GENMASK(15, 0) /* Transmit FIFO Level mask */
#define SFIFOL_RFL_MASK GENMASK(31, 16) /* Receive FIFO Level mask */
#define SFIFOTT_TFT GENMASK(15, 0) /* Transmit FIFO Threshold (mask) */
#define SFIFOTT_TxThresh(x) (((x) - 1) << 0) /* TX FIFO trigger threshold / level */
#define SFIFOTT_RFT GENMASK(31, 16) /* Receive FIFO Threshold (mask) */
#define SFIFOTT_RxThresh(x) (((x) - 1) << 16) /* RX FIFO trigger threshold / level */
/* LPSS SSP */
#define SSITF 0x44 /* TX FIFO trigger level */
#define SSITF_TxHiThresh(x) (((x) - 1) << 0)
@ -203,8 +220,10 @@ enum pxa_ssp_type {
MMP2_SSP,
PXA910_SSP,
CE4100_SSP,
MRFLD_SSP,
QUARK_X1000_SSP,
LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
/* Keep LPSS types sorted with lpss_platforms[] */
LPSS_LPT_SSP,
LPSS_BYT_SSP,
LPSS_BSW_SSP,
LPSS_SPT_SSP,
@ -252,6 +271,22 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
return __raw_readl(dev->mmio_base + reg);
}
static inline void pxa_ssp_enable(struct ssp_device *ssp)
{
u32 sscr0;
sscr0 = pxa_ssp_read_reg(ssp, SSCR0) | SSCR0_SSE;
pxa_ssp_write_reg(ssp, SSCR0, sscr0);
}
static inline void pxa_ssp_disable(struct ssp_device *ssp)
{
u32 sscr0;
sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~SSCR0_SSE;
pxa_ssp_write_reg(ssp, SSCR0, sscr0);
}
#if IS_ENABLED(CONFIG_PXA_SSP)
struct ssp_device *pxa_ssp_request(int port, const char *label);
void pxa_ssp_free(struct ssp_device *);
@ -270,4 +305,4 @@ static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n,
static inline void pxa_ssp_free(struct ssp_device *ssp) {}
#endif
#endif
#endif /* __LINUX_PXA2XX_SSP_H */

View File

@ -2,8 +2,10 @@
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
*/
#ifndef __linux_pxa2xx_spi_h
#define __linux_pxa2xx_spi_h
#ifndef __LINUX_SPI_PXA2XX_SPI_H
#define __LINUX_SPI_PXA2XX_SPI_H
#include <linux/types.h>
#include <linux/pxa2xx_ssp.h>
@ -12,7 +14,10 @@
struct dma_chan;
/* device.platform_data for SSP controller devices */
/*
* The platform data for SSP controller devices
* (resides in device.platform_data).
*/
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
@ -28,8 +33,11 @@ struct pxa2xx_spi_controller {
struct ssp_device ssp;
};
/* spi_board_info.controller_data for SPI slave devices,
* copied to spi_device.platform_data ... mostly for dma tuning
/*
* The controller specific data for SPI slave devices
* (resides in spi_board_info.controller_data),
* copied to spi_device.platform_data ... mostly for
* DMA tuning.
*/
struct pxa2xx_spi_chip {
u8 tx_threshold;
@ -49,4 +57,5 @@ struct pxa2xx_spi_chip {
extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info);
#endif
#endif
#endif /* __LINUX_SPI_PXA2XX_SPI_H */

View File

@ -250,6 +250,9 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* the currently mapped area), and the caller of
* spi_mem_dirmap_write() is responsible for calling it again in
* this case.
* @poll_status: poll memory device status until (status & mask) == match or
* when the timeout has expired. It fills the data buffer with
* the last status value.
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
@ -274,6 +277,12 @@ struct spi_controller_mem_ops {
u64 offs, size_t len, void *buf);
ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf);
int (*poll_status)(struct spi_mem *mem,
const struct spi_mem_op *op,
u16 mask, u16 match,
unsigned long initial_delay_us,
unsigned long polling_rate_us,
unsigned long timeout_ms);
};
/**
@ -369,6 +378,13 @@ devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
void devm_spi_mem_dirmap_destroy(struct device *dev,
struct spi_mem_dirmap_desc *desc);
int spi_mem_poll_status(struct spi_mem *mem,
const struct spi_mem_op *op,
u16 mask, u16 match,
unsigned long initial_delay_us,
unsigned long polling_delay_us,
u16 timeout_ms);
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);

View File

@ -299,6 +299,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
driver_unregister(&sdrv->driver);
}
extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select);
/* use a define to avoid include chaining to get THIS_MODULE */
#define spi_register_driver(driver) \
__spi_register_driver(THIS_MODULE, driver)
@ -586,6 +588,7 @@ struct spi_controller {
bool (*can_dma)(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer);
struct device *dma_map_dev;
/*
* These hooks are for drivers that want to use the generic
@ -1108,11 +1111,6 @@ static inline void spi_message_free(struct spi_message *m)
kfree(m);
}
extern int spi_set_cs_timing(struct spi_device *spi,
struct spi_delay *setup,
struct spi_delay *hold,
struct spi_delay *inactive);
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
extern int spi_async_locked(struct spi_device *spi,

View File

@ -42,6 +42,63 @@ DEFINE_EVENT(spi_controller, spi_controller_busy,
);
TRACE_EVENT(spi_setup,
TP_PROTO(struct spi_device *spi, int status),
TP_ARGS(spi, status),
TP_STRUCT__entry(
__field(int, bus_num)
__field(int, chip_select)
__field(unsigned long, mode)
__field(unsigned int, bits_per_word)
__field(unsigned int, max_speed_hz)
__field(int, status)
),
TP_fast_assign(
__entry->bus_num = spi->controller->bus_num;
__entry->chip_select = spi->chip_select;
__entry->mode = spi->mode;
__entry->bits_per_word = spi->bits_per_word;
__entry->max_speed_hz = spi->max_speed_hz;
__entry->status = status;
),
TP_printk("spi%d.%d setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d",
__entry->bus_num, __entry->chip_select,
(__entry->mode & SPI_MODE_X_MASK),
(__entry->mode & SPI_CS_HIGH) ? "cs_high, " : "",
(__entry->mode & SPI_LSB_FIRST) ? "lsb, " : "",
(__entry->mode & SPI_3WIRE) ? "3wire, " : "",
(__entry->mode & SPI_LOOP) ? "loopback, " : "",
__entry->bits_per_word, __entry->max_speed_hz,
__entry->status)
);
TRACE_EVENT(spi_set_cs,
TP_PROTO(struct spi_device *spi, bool enable),
TP_ARGS(spi, enable),
TP_STRUCT__entry(
__field(int, bus_num)
__field(int, chip_select)
__field(unsigned long, mode)
__field(bool, enable)
),
TP_fast_assign(
__entry->bus_num = spi->controller->bus_num;
__entry->chip_select = spi->chip_select;
__entry->mode = spi->mode;
__entry->enable = enable;
),
TP_printk("spi%d.%d %s%s",
__entry->bus_num, __entry->chip_select,
__entry->enable ? "activate" : "deactivate",
(__entry->mode & SPI_CS_HIGH) ? ", cs_high" : "")
);
DECLARE_EVENT_CLASS(spi_message,
TP_PROTO(struct spi_message *msg),

View File

@ -61,22 +61,6 @@ static void dump_registers(struct ssp_device *ssp)
pxa_ssp_read_reg(ssp, SSACD));
}
static void pxa_ssp_enable(struct ssp_device *ssp)
{
uint32_t sscr0;
sscr0 = __raw_readl(ssp->mmio_base + SSCR0) | SSCR0_SSE;
__raw_writel(sscr0, ssp->mmio_base + SSCR0);
}
static void pxa_ssp_disable(struct ssp_device *ssp)
{
uint32_t sscr0;
sscr0 = __raw_readl(ssp->mmio_base + SSCR0) & ~SSCR0_SSE;
__raw_writel(sscr0, ssp->mmio_base + SSCR0);
}
static void pxa_ssp_set_dma_params(struct ssp_device *ssp, int width4,
int out, struct snd_dmaengine_dai_dma_data *dma)
{