Merge branch 'spi-5.6' into spi-next
This commit is contained in:
commit
754a36a58c
@ -12,6 +12,7 @@ Required properties:
|
||||
- clock-names: Should be "clk_apb5".
|
||||
- pinctrl-names : a pinctrl state named "default" must be defined.
|
||||
- pinctrl-0 : phandle referencing pin configuration of the device.
|
||||
- resets : phandle to the reset control for this device.
|
||||
- cs-gpios: Specifies the gpio pins to be used for chipselects.
|
||||
See: Documentation/devicetree/bindings/spi/spi-bus.txt
|
||||
|
||||
@ -19,16 +20,6 @@ Optional properties:
|
||||
- clock-frequency : Input clock frequency to the PSPI block in Hz.
|
||||
Default is 25000000 Hz.
|
||||
|
||||
Aliases:
|
||||
- All the SPI controller nodes should be represented in the aliases node using
|
||||
the following format 'spi{n}' withe the correct numbered in "aliases" node.
|
||||
|
||||
Example:
|
||||
|
||||
aliases {
|
||||
spi0 = &spi0;
|
||||
};
|
||||
|
||||
spi0: spi@f0200000 {
|
||||
compatible = "nuvoton,npcm750-pspi";
|
||||
reg = <0xf0200000 0x1000>;
|
||||
@ -39,5 +30,6 @@ spi0: spi@f0200000 {
|
||||
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clk NPCM7XX_CLK_APB5>;
|
||||
clock-names = "clk_apb5";
|
||||
resets = <&rstc NPCM7XX_RESET_IPSRST2 NPCM7XX_RESET_PSPI1>
|
||||
cs-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
|
@ -1,62 +0,0 @@
|
||||
STMicroelectronics STM32 SPI Controller
|
||||
|
||||
The STM32 SPI controller is used to communicate with external devices using
|
||||
the Serial Peripheral Interface. It supports full-duplex, half-duplex and
|
||||
simplex synchronous serial communication with external devices. It supports
|
||||
from 4 to 32-bit data size. Although it can be configured as master or slave,
|
||||
only master is supported by the driver.
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be one of:
|
||||
"st,stm32h7-spi"
|
||||
"st,stm32f4-spi"
|
||||
- reg: Offset and length of the device's register set.
|
||||
- interrupts: Must contain the interrupt id.
|
||||
- clocks: Must contain an entry for spiclk (which feeds the internal clock
|
||||
generator).
|
||||
- #address-cells: Number of cells required to define a chip select address.
|
||||
- #size-cells: Should be zero.
|
||||
|
||||
Optional properties:
|
||||
- resets: Must contain the phandle to the reset controller.
|
||||
- A pinctrl state named "default" may be defined to set pins in mode of
|
||||
operation for SPI transfer.
|
||||
- dmas: DMA specifiers for tx and rx dma. DMA fifo mode must be used. See the
|
||||
STM32 DMA bindings, Documentation/devicetree/bindings/dma/stm32-dma.txt.
|
||||
- dma-names: DMA request names should include "tx" and "rx" if present.
|
||||
- cs-gpios: list of GPIO chip selects. See the SPI bus bindings,
|
||||
Documentation/devicetree/bindings/spi/spi-bus.txt
|
||||
|
||||
|
||||
Child nodes represent devices on the SPI bus
|
||||
See ../spi/spi-bus.txt
|
||||
|
||||
Optional properties:
|
||||
- st,spi-midi-ns: Only for STM32H7, (Master Inter-Data Idleness) minimum time
|
||||
delay in nanoseconds inserted between two consecutive data
|
||||
frames.
|
||||
|
||||
|
||||
Example:
|
||||
spi2: spi@40003800 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
compatible = "st,stm32h7-spi";
|
||||
reg = <0x40003800 0x400>;
|
||||
interrupts = <36>;
|
||||
clocks = <&rcc SPI2_CK>;
|
||||
resets = <&rcc 1166>;
|
||||
dmas = <&dmamux1 0 39 0x400 0x01>,
|
||||
<&dmamux1 1 40 0x400 0x01>;
|
||||
dma-names = "rx", "tx";
|
||||
pinctrl-0 = <&spi2_pins_b>;
|
||||
pinctrl-names = "default";
|
||||
cs-gpios = <&gpioa 11 0>;
|
||||
|
||||
aardvark@0 {
|
||||
compatible = "totalphase,aardvark";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <4000000>;
|
||||
st,spi-midi-ns = <4000>;
|
||||
};
|
||||
};
|
@ -1,7 +1,7 @@
|
||||
Atmel SPI device
|
||||
|
||||
Required properties:
|
||||
- compatible : should be "atmel,at91rm9200-spi".
|
||||
- compatible : should be "atmel,at91rm9200-spi" or "microchip,sam9x60-spi".
|
||||
- reg: Address and length of the register set for the device
|
||||
- interrupts: Should contain spi interrupt
|
||||
- cs-gpios: chipselects (optional for SPI controller version >= 2 with the
|
||||
|
105
Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
Normal file
105
Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
Normal file
@ -0,0 +1,105 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/spi/st,stm32-spi.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: STMicroelectronics STM32 SPI Controller bindings
|
||||
|
||||
description: |
|
||||
The STM32 SPI controller is used to communicate with external devices using
|
||||
the Serial Peripheral Interface. It supports full-duplex, half-duplex and
|
||||
simplex synchronous serial communication with external devices. It supports
|
||||
from 4 to 32-bit data size.
|
||||
|
||||
maintainers:
|
||||
- Erwan Leray <erwan.leray@st.com>
|
||||
- Fabrice Gasnier <fabrice.gasnier@st.com>
|
||||
|
||||
allOf:
|
||||
- $ref: "spi-controller.yaml#"
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: st,stm32f4-spi
|
||||
|
||||
then:
|
||||
properties:
|
||||
st,spi-midi-ns: false
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- st,stm32f4-spi
|
||||
- st,stm32h7-spi
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
dmas:
|
||||
description: |
|
||||
DMA specifiers for tx and rx dma. DMA fifo mode must be used. See
|
||||
the STM32 DMA bindings Documentation/devicetree/bindings/dma/stm32-dma.txt.
|
||||
items:
|
||||
- description: rx DMA channel
|
||||
- description: tx DMA channel
|
||||
|
||||
dma-names:
|
||||
items:
|
||||
- const: rx
|
||||
- const: tx
|
||||
|
||||
patternProperties:
|
||||
"^[a-zA-Z][a-zA-Z0-9,+\\-._]{0,63}@[0-9a-f]+$":
|
||||
type: object
|
||||
# SPI slave nodes must be children of the SPI master node and can
|
||||
# contain the following properties.
|
||||
properties:
|
||||
st,spi-midi-ns:
|
||||
description: |
|
||||
Only for STM32H7, (Master Inter-Data Idleness) minimum time
|
||||
delay in nanoseconds inserted between two consecutive data frames.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- interrupts
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/clock/stm32mp1-clks.h>
|
||||
#include <dt-bindings/reset/stm32mp1-resets.h>
|
||||
spi@4000b000 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
compatible = "st,stm32h7-spi";
|
||||
reg = <0x4000b000 0x400>;
|
||||
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&rcc SPI2_K>;
|
||||
resets = <&rcc SPI2_R>;
|
||||
dmas = <&dmamux1 0 39 0x400 0x05>,
|
||||
<&dmamux1 1 40 0x400 0x05>;
|
||||
dma-names = "rx", "tx";
|
||||
cs-gpios = <&gpioa 11 0>;
|
||||
|
||||
aardvark@0 {
|
||||
compatible = "totalphase,aardvark";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <4000000>;
|
||||
st,spi-midi-ns = <4000>;
|
||||
};
|
||||
};
|
||||
|
||||
...
|
@ -7498,6 +7498,12 @@ S: Supported
|
||||
F: drivers/scsi/hisi_sas/
|
||||
F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
|
||||
|
||||
HISILICON V3XX SPI NOR FLASH Controller Driver
|
||||
M: John Garry <john.garry@huawei.com>
|
||||
W: http://www.hisilicon.com
|
||||
S: Maintained
|
||||
F: drivers/spi/spi-hisi-sfc-v3xx.c
|
||||
|
||||
HISILICON QM AND ZIP Controller DRIVER
|
||||
M: Zhou Wang <wangzhou1@hisilicon.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
|
@ -281,6 +281,15 @@ config SPI_FSL_QUADSPI
|
||||
This controller does not support generic SPI messages. It only
|
||||
supports the high-level SPI memory interface.
|
||||
|
||||
config SPI_HISI_SFC_V3XX
|
||||
tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets"
|
||||
depends on (ARM64 && ACPI) || COMPILE_TEST
|
||||
depends on HAS_IOMEM
|
||||
select CONFIG_MTD_SPI_NOR
|
||||
help
|
||||
This enables support for HiSilicon v3xx SPI-NOR flash controller
|
||||
found in hi16xx chipsets.
|
||||
|
||||
config SPI_NXP_FLEXSPI
|
||||
tristate "NXP Flex SPI controller"
|
||||
depends on ARCH_LAYERSCAPE || HAS_IOMEM
|
||||
|
@ -48,6 +48,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o
|
||||
obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o
|
||||
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
|
||||
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
|
||||
obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o
|
||||
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
|
||||
obj-$(CONFIG_SPI_IMX) += spi-imx.o
|
||||
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
|
||||
|
@ -514,26 +514,19 @@ static int atmel_spi_configure_dma(struct spi_master *master,
|
||||
master->dma_tx = dma_request_chan(dev, "tx");
|
||||
if (IS_ERR(master->dma_tx)) {
|
||||
err = PTR_ERR(master->dma_tx);
|
||||
if (err == -EPROBE_DEFER) {
|
||||
dev_warn(dev, "no DMA channel available at the moment\n");
|
||||
goto error_clear;
|
||||
}
|
||||
dev_err(dev,
|
||||
"DMA TX channel not available, SPI unable to use DMA\n");
|
||||
err = -EBUSY;
|
||||
if (err != -EPROBE_DEFER)
|
||||
dev_err(dev, "No TX DMA channel, DMA is disabled\n");
|
||||
goto error_clear;
|
||||
}
|
||||
|
||||
/*
|
||||
* No reason to check EPROBE_DEFER here since we have already requested
|
||||
* tx channel. If it fails here, it's for another reason.
|
||||
*/
|
||||
master->dma_rx = dma_request_slave_channel(dev, "rx");
|
||||
|
||||
if (!master->dma_rx) {
|
||||
dev_err(dev,
|
||||
"DMA RX channel not available, SPI unable to use DMA\n");
|
||||
err = -EBUSY;
|
||||
master->dma_rx = dma_request_chan(dev, "rx");
|
||||
if (IS_ERR(master->dma_rx)) {
|
||||
err = PTR_ERR(master->dma_rx);
|
||||
/*
|
||||
* No reason to check EPROBE_DEFER here since we have already
|
||||
* requested tx channel.
|
||||
*/
|
||||
dev_err(dev, "No RX DMA channel, DMA is disabled\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
@ -548,7 +541,7 @@ static int atmel_spi_configure_dma(struct spi_master *master,
|
||||
|
||||
return 0;
|
||||
error:
|
||||
if (master->dma_rx)
|
||||
if (!IS_ERR(master->dma_rx))
|
||||
dma_release_channel(master->dma_rx);
|
||||
if (!IS_ERR(master->dma_tx))
|
||||
dma_release_channel(master->dma_tx);
|
||||
|
@ -1293,7 +1293,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
|
||||
name = qspi_irq_tab[val].irq_name;
|
||||
if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
|
||||
/* get the l2 interrupts */
|
||||
irq = platform_get_irq_byname(pdev, name);
|
||||
irq = platform_get_irq_byname_optional(pdev, name);
|
||||
} else if (!num_ints && soc_intc) {
|
||||
/* all mspi, bspi intrs muxed to one L1 intr */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
|
@ -68,7 +68,7 @@
|
||||
#define BCM2835_SPI_FIFO_SIZE 64
|
||||
#define BCM2835_SPI_FIFO_SIZE_3_4 48
|
||||
#define BCM2835_SPI_DMA_MIN_LENGTH 96
|
||||
#define BCM2835_SPI_NUM_CS 3 /* raise as necessary */
|
||||
#define BCM2835_SPI_NUM_CS 4 /* raise as necessary */
|
||||
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
|
||||
| SPI_NO_CS | SPI_3WIRE)
|
||||
|
||||
@ -888,8 +888,8 @@ static void bcm2835_dma_release(struct spi_controller *ctlr,
|
||||
}
|
||||
}
|
||||
|
||||
static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
|
||||
struct bcm2835_spi *bs)
|
||||
static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
|
||||
struct bcm2835_spi *bs)
|
||||
{
|
||||
struct dma_slave_config slave_config;
|
||||
const __be32 *addr;
|
||||
@ -900,19 +900,24 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
|
||||
addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
|
||||
if (!addr) {
|
||||
dev_err(dev, "could not get DMA-register address - not using dma mode\n");
|
||||
goto err;
|
||||
/* Fall back to interrupt mode */
|
||||
return 0;
|
||||
}
|
||||
dma_reg_base = be32_to_cpup(addr);
|
||||
|
||||
/* get tx/rx dma */
|
||||
ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
|
||||
if (!ctlr->dma_tx) {
|
||||
ctlr->dma_tx = dma_request_chan(dev, "tx");
|
||||
if (IS_ERR(ctlr->dma_tx)) {
|
||||
dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
|
||||
ret = PTR_ERR(ctlr->dma_tx);
|
||||
ctlr->dma_tx = NULL;
|
||||
goto err;
|
||||
}
|
||||
ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
|
||||
if (!ctlr->dma_rx) {
|
||||
ctlr->dma_rx = dma_request_chan(dev, "rx");
|
||||
if (IS_ERR(ctlr->dma_rx)) {
|
||||
dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
|
||||
ret = PTR_ERR(ctlr->dma_rx);
|
||||
ctlr->dma_rx = NULL;
|
||||
goto err_release;
|
||||
}
|
||||
|
||||
@ -997,7 +1002,7 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
|
||||
/* all went well, so set can_dma */
|
||||
ctlr->can_dma = bcm2835_spi_can_dma;
|
||||
|
||||
return;
|
||||
return 0;
|
||||
|
||||
err_config:
|
||||
dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
|
||||
@ -1005,7 +1010,14 @@ err_config:
|
||||
err_release:
|
||||
bcm2835_dma_release(ctlr, bs);
|
||||
err:
|
||||
return;
|
||||
/*
|
||||
* Only report error for deferred probing, otherwise fall back to
|
||||
* interrupt mode
|
||||
*/
|
||||
if (ret != -EPROBE_DEFER)
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
|
||||
@ -1305,7 +1317,10 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
|
||||
bs->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(bs->clk)) {
|
||||
err = PTR_ERR(bs->clk);
|
||||
dev_err(&pdev->dev, "could not get clk: %d\n", err);
|
||||
if (err == -EPROBE_DEFER)
|
||||
dev_dbg(&pdev->dev, "could not get clk: %d\n", err);
|
||||
else
|
||||
dev_err(&pdev->dev, "could not get clk: %d\n", err);
|
||||
goto out_controller_put;
|
||||
}
|
||||
|
||||
@ -1317,7 +1332,9 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
|
||||
|
||||
clk_prepare_enable(bs->clk);
|
||||
|
||||
bcm2835_dma_init(ctlr, &pdev->dev, bs);
|
||||
err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
|
||||
if (err)
|
||||
goto out_clk_disable;
|
||||
|
||||
/* initialise the hardware with the default polarities */
|
||||
bcm2835_wr(bs, BCM2835_SPI_CS,
|
||||
@ -1327,20 +1344,22 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
|
||||
dev_name(&pdev->dev), ctlr);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
|
||||
goto out_clk_disable;
|
||||
goto out_dma_release;
|
||||
}
|
||||
|
||||
err = devm_spi_register_controller(&pdev->dev, ctlr);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "could not register SPI controller: %d\n",
|
||||
err);
|
||||
goto out_clk_disable;
|
||||
goto out_dma_release;
|
||||
}
|
||||
|
||||
bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
|
||||
|
||||
return 0;
|
||||
|
||||
out_dma_release:
|
||||
bcm2835_dma_release(ctlr, bs);
|
||||
out_clk_disable:
|
||||
clk_disable_unprepare(bs->clk);
|
||||
out_controller_put:
|
||||
|
@ -329,8 +329,20 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
|
||||
int spi_bitbang_init(struct spi_bitbang *bitbang)
|
||||
{
|
||||
struct spi_master *master = bitbang->master;
|
||||
bool custom_cs;
|
||||
|
||||
if (!master || !bitbang->chipselect)
|
||||
if (!master)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* We only need the chipselect callback if we are actually using it.
|
||||
* If we just use GPIO descriptors, it is surplus. If the
|
||||
* SPI_MASTER_GPIO_SS flag is set, we always need to call the
|
||||
* driver-specific chipselect routine.
|
||||
*/
|
||||
custom_cs = (!master->use_gpio_descriptors ||
|
||||
(master->flags & SPI_MASTER_GPIO_SS));
|
||||
|
||||
if (custom_cs && !bitbang->chipselect)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_init(&bitbang->lock);
|
||||
@ -344,7 +356,12 @@ int spi_bitbang_init(struct spi_bitbang *bitbang)
|
||||
master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
|
||||
master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
|
||||
master->transfer_one = spi_bitbang_transfer_one;
|
||||
master->set_cs = spi_bitbang_set_cs;
|
||||
/*
|
||||
* When using GPIO descriptors, the ->set_cs() callback doesn't even
|
||||
* get called unless SPI_MASTER_GPIO_SS is set.
|
||||
*/
|
||||
if (custom_cs)
|
||||
master->set_cs = spi_bitbang_set_cs;
|
||||
|
||||
if (!bitbang->txrx_bufs) {
|
||||
bitbang->use_dma = 0;
|
||||
|
@ -472,7 +472,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
|
||||
struct spi_controller *master;
|
||||
int ret;
|
||||
|
||||
BUG_ON(dws == NULL);
|
||||
if (!dws)
|
||||
return -EINVAL;
|
||||
|
||||
master = spi_alloc_master(dev, 0);
|
||||
if (!master)
|
||||
|
@ -396,17 +396,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
|
||||
if (!dma)
|
||||
return -ENOMEM;
|
||||
|
||||
dma->chan_rx = dma_request_slave_channel(dev, "rx");
|
||||
if (!dma->chan_rx) {
|
||||
dma->chan_rx = dma_request_chan(dev, "rx");
|
||||
if (IS_ERR(dma->chan_rx)) {
|
||||
dev_err(dev, "rx dma channel not available\n");
|
||||
ret = -ENODEV;
|
||||
ret = PTR_ERR(dma->chan_rx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dma->chan_tx = dma_request_slave_channel(dev, "tx");
|
||||
if (!dma->chan_tx) {
|
||||
dma->chan_tx = dma_request_chan(dev, "tx");
|
||||
if (IS_ERR(dma->chan_tx)) {
|
||||
dev_err(dev, "tx dma channel not available\n");
|
||||
ret = -ENODEV;
|
||||
ret = PTR_ERR(dma->chan_tx);
|
||||
goto err_tx_channel;
|
||||
}
|
||||
|
||||
|
@ -469,9 +469,9 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
|
||||
fsl_lpspi->watermark = fsl_lpspi->txfifosize;
|
||||
|
||||
if (fsl_lpspi_can_dma(controller, spi, t))
|
||||
fsl_lpspi->usedma = 1;
|
||||
fsl_lpspi->usedma = true;
|
||||
else
|
||||
fsl_lpspi->usedma = 0;
|
||||
fsl_lpspi->usedma = false;
|
||||
|
||||
return fsl_lpspi_config(fsl_lpspi);
|
||||
}
|
||||
@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
|
||||
fsl_lpspi->dev = &pdev->dev;
|
||||
fsl_lpspi->is_slave = is_slave;
|
||||
|
||||
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
|
||||
controller->transfer_one = fsl_lpspi_transfer_one;
|
||||
controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
|
||||
controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
|
||||
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
|
||||
controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
|
||||
controller->dev.of_node = pdev->dev.of_node;
|
||||
controller->bus_num = pdev->id;
|
||||
controller->slave_abort = fsl_lpspi_slave_abort;
|
||||
|
||||
ret = devm_spi_register_controller(&pdev->dev, controller);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "spi_register_controller error.\n");
|
||||
goto out_controller_put;
|
||||
}
|
||||
|
||||
if (!fsl_lpspi->is_slave) {
|
||||
for (i = 0; i < controller->num_chipselect; i++) {
|
||||
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
|
||||
@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
|
||||
controller->prepare_message = fsl_lpspi_prepare_message;
|
||||
}
|
||||
|
||||
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
|
||||
controller->transfer_one = fsl_lpspi_transfer_one;
|
||||
controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
|
||||
controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
|
||||
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
|
||||
controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
|
||||
controller->dev.of_node = pdev->dev.of_node;
|
||||
controller->bus_num = pdev->id;
|
||||
controller->slave_abort = fsl_lpspi_slave_abort;
|
||||
|
||||
init_completion(&fsl_lpspi->xfer_done);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
|
||||
if (ret < 0)
|
||||
dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
|
||||
|
||||
ret = devm_spi_register_controller(&pdev->dev, controller);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "spi_register_controller error.\n");
|
||||
goto out_controller_put;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_controller_put:
|
||||
|
@ -706,8 +706,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
|
||||
struct device_node *np = ofdev->dev.of_node;
|
||||
struct spi_master *master;
|
||||
struct resource mem;
|
||||
int irq = 0, type;
|
||||
int ret = -ENOMEM;
|
||||
int irq, type;
|
||||
int ret;
|
||||
|
||||
ret = of_mpc8xxx_spi_probe(ofdev);
|
||||
if (ret)
|
||||
@ -722,10 +722,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
|
||||
|
||||
if (spisel_boot) {
|
||||
pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
|
||||
if (!pinfo->immr_spi_cs) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
if (!pinfo->immr_spi_cs)
|
||||
return -ENOMEM;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
@ -744,24 +742,15 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
|
||||
|
||||
ret = of_address_to_resource(np, 0, &mem);
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
irq = platform_get_irq(ofdev, 0);
|
||||
if (irq < 0) {
|
||||
ret = irq;
|
||||
goto err;
|
||||
}
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
master = fsl_spi_probe(dev, &mem, irq);
|
||||
if (IS_ERR(master)) {
|
||||
ret = PTR_ERR(master);
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
return ret;
|
||||
return PTR_ERR_OR_ZERO(master);
|
||||
}
|
||||
|
||||
static int of_fsl_spi_remove(struct platform_device *ofdev)
|
||||
|
284
drivers/spi/spi-hisi-sfc-v3xx.c
Normal file
284
drivers/spi/spi-hisi-sfc-v3xx.c
Normal file
@ -0,0 +1,284 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
//
|
||||
// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets
|
||||
//
|
||||
// Copyright (c) 2019 HiSilicon Technologies Co., Ltd.
|
||||
// Author: John Garry <john.garry@huawei.com>
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/spi/spi-mem.h>
|
||||
|
||||
#define HISI_SFC_V3XX_VERSION (0x1f8)
|
||||
|
||||
#define HISI_SFC_V3XX_CMD_CFG (0x300)
|
||||
#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9
|
||||
#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8)
|
||||
#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7)
|
||||
#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4
|
||||
#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3)
|
||||
#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1
|
||||
#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0)
|
||||
#define HISI_SFC_V3XX_CMD_INS (0x308)
|
||||
#define HISI_SFC_V3XX_CMD_ADDR (0x30c)
|
||||
#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400)
|
||||
|
||||
struct hisi_sfc_v3xx_host {
|
||||
struct device *dev;
|
||||
void __iomem *regbase;
|
||||
int max_cmd_dword;
|
||||
};
|
||||
|
||||
#define HISI_SFC_V3XX_WAIT_TIMEOUT_US 1000000
|
||||
#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US 10
|
||||
|
||||
static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg,
|
||||
!(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK),
|
||||
HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US,
|
||||
HISI_SFC_V3XX_WAIT_TIMEOUT_US);
|
||||
}
|
||||
|
||||
static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
|
||||
struct spi_mem_op *op)
|
||||
{
|
||||
struct spi_device *spi = mem->spi;
|
||||
struct hisi_sfc_v3xx_host *host;
|
||||
uintptr_t addr = (uintptr_t)op->data.buf.in;
|
||||
int max_byte_count;
|
||||
|
||||
host = spi_controller_get_devdata(spi->master);
|
||||
|
||||
max_byte_count = host->max_cmd_dword * 4;
|
||||
|
||||
if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4)
|
||||
op->data.nbytes = 4 - (addr % 4);
|
||||
else if (op->data.nbytes > max_byte_count)
|
||||
op->data.nbytes = max_byte_count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the
|
||||
* DATABUF registers -so use __io{read,write}32_copy when possible. For
|
||||
* trailing bytes, copy them byte-by-byte from the DATABUF register, as we
|
||||
* can't clobber outside the source/dest buffer.
|
||||
*
|
||||
* For efficient data read/write, we try to put any start 32b unaligned data
|
||||
* into a separate transaction in hisi_sfc_v3xx_adjust_op_size().
|
||||
*/
|
||||
static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host,
|
||||
u8 *to, unsigned int len)
|
||||
{
|
||||
void __iomem *from;
|
||||
int i;
|
||||
|
||||
from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
|
||||
|
||||
if (IS_ALIGNED((uintptr_t)to, 4)) {
|
||||
int words = len / 4;
|
||||
|
||||
__ioread32_copy(to, from, words);
|
||||
|
||||
len -= words * 4;
|
||||
if (len) {
|
||||
u32 val;
|
||||
|
||||
to += words * 4;
|
||||
from += words * 4;
|
||||
|
||||
val = __raw_readl(from);
|
||||
|
||||
for (i = 0; i < len; i++, val >>= 8, to++)
|
||||
*to = (u8)val;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) {
|
||||
u32 val = __raw_readl(from);
|
||||
int j;
|
||||
|
||||
for (j = 0; j < 4 && (j + (i * 4) < len);
|
||||
to++, val >>= 8, j++)
|
||||
*to = (u8)val;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host,
|
||||
const u8 *from, unsigned int len)
|
||||
{
|
||||
void __iomem *to;
|
||||
int i;
|
||||
|
||||
to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
|
||||
|
||||
if (IS_ALIGNED((uintptr_t)from, 4)) {
|
||||
int words = len / 4;
|
||||
|
||||
__iowrite32_copy(to, from, words);
|
||||
|
||||
len -= words * 4;
|
||||
if (len) {
|
||||
u32 val = 0;
|
||||
|
||||
to += words * 4;
|
||||
from += words * 4;
|
||||
|
||||
for (i = 0; i < len; i++, from++)
|
||||
val |= *from << i * 8;
|
||||
__raw_writel(val, to);
|
||||
}
|
||||
|
||||
} else {
|
||||
for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) {
|
||||
u32 val = 0;
|
||||
int j;
|
||||
|
||||
for (j = 0; j < 4 && (j + (i * 4) < len);
|
||||
from++, j++)
|
||||
val |= *from << j * 8;
|
||||
__raw_writel(val, to);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
|
||||
const struct spi_mem_op *op,
|
||||
u8 chip_select)
|
||||
{
|
||||
int ret, len = op->data.nbytes;
|
||||
u32 config = 0;
|
||||
|
||||
if (op->addr.nbytes)
|
||||
config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
|
||||
|
||||
if (op->data.dir != SPI_MEM_NO_DATA) {
|
||||
config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF;
|
||||
config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK;
|
||||
}
|
||||
|
||||
if (op->data.dir == SPI_MEM_DATA_OUT)
|
||||
hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, len);
|
||||
else if (op->data.dir == SPI_MEM_DATA_IN)
|
||||
config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK;
|
||||
|
||||
config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF |
|
||||
chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF |
|
||||
HISI_SFC_V3XX_CMD_CFG_START_MSK;
|
||||
|
||||
writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR);
|
||||
writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS);
|
||||
|
||||
writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG);
|
||||
|
||||
ret = hisi_sfc_v3xx_wait_cmd_idle(host);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (op->data.dir == SPI_MEM_DATA_IN)
|
||||
hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem,
|
||||
const struct spi_mem_op *op)
|
||||
{
|
||||
struct hisi_sfc_v3xx_host *host;
|
||||
struct spi_device *spi = mem->spi;
|
||||
u8 chip_select = spi->chip_select;
|
||||
|
||||
host = spi_controller_get_devdata(spi->master);
|
||||
|
||||
return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select);
|
||||
}
|
||||
|
||||
static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
|
||||
.adjust_op_size = hisi_sfc_v3xx_adjust_op_size,
|
||||
.exec_op = hisi_sfc_v3xx_exec_op,
|
||||
};
|
||||
|
||||
static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct hisi_sfc_v3xx_host *host;
|
||||
struct spi_controller *ctlr;
|
||||
u32 version;
|
||||
int ret;
|
||||
|
||||
ctlr = spi_alloc_master(&pdev->dev, sizeof(*host));
|
||||
if (!ctlr)
|
||||
return -ENOMEM;
|
||||
|
||||
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
|
||||
SPI_TX_DUAL | SPI_TX_QUAD;
|
||||
|
||||
host = spi_controller_get_devdata(ctlr);
|
||||
host->dev = dev;
|
||||
|
||||
platform_set_drvdata(pdev, host);
|
||||
|
||||
host->regbase = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(host->regbase)) {
|
||||
ret = PTR_ERR(host->regbase);
|
||||
goto err_put_master;
|
||||
}
|
||||
|
||||
ctlr->bus_num = -1;
|
||||
ctlr->num_chipselect = 1;
|
||||
ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops;
|
||||
|
||||
version = readl(host->regbase + HISI_SFC_V3XX_VERSION);
|
||||
|
||||
switch (version) {
|
||||
case 0x351:
|
||||
host->max_cmd_dword = 64;
|
||||
break;
|
||||
default:
|
||||
host->max_cmd_dword = 16;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = devm_spi_register_controller(dev, ctlr);
|
||||
if (ret)
|
||||
goto err_put_master;
|
||||
|
||||
dev_info(&pdev->dev, "hw version 0x%x\n", version);
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_master:
|
||||
spi_master_put(ctlr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = {
|
||||
{"HISI0341", 0},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids);
|
||||
#endif
|
||||
|
||||
static struct platform_driver hisi_sfc_v3xx_spi_driver = {
|
||||
.driver = {
|
||||
.name = "hisi-sfc-v3xx",
|
||||
.acpi_match_table = ACPI_PTR(hisi_sfc_v3xx_acpi_ids),
|
||||
},
|
||||
.probe = hisi_sfc_v3xx_probe,
|
||||
};
|
||||
|
||||
module_platform_driver(hisi_sfc_v3xx_spi_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
|
||||
MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets");
|
@ -666,8 +666,22 @@ static int img_spfi_probe(struct platform_device *pdev)
|
||||
master->unprepare_message = img_spfi_unprepare;
|
||||
master->handle_err = img_spfi_handle_err;
|
||||
|
||||
spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
|
||||
spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
|
||||
spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
|
||||
if (IS_ERR(spfi->tx_ch)) {
|
||||
ret = PTR_ERR(spfi->tx_ch);
|
||||
spfi->tx_ch = NULL;
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto disable_pm;
|
||||
}
|
||||
|
||||
spfi->rx_ch = dma_request_chan(spfi->dev, "rx");
|
||||
if (IS_ERR(spfi->rx_ch)) {
|
||||
ret = PTR_ERR(spfi->rx_ch);
|
||||
spfi->rx_ch = NULL;
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto disable_pm;
|
||||
}
|
||||
|
||||
if (!spfi->tx_ch || !spfi->rx_ch) {
|
||||
if (spfi->tx_ch)
|
||||
dma_release_channel(spfi->tx_ch);
|
||||
|
@ -1230,9 +1230,9 @@ static int spi_imx_setupxfer(struct spi_device *spi,
|
||||
}
|
||||
|
||||
if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
|
||||
spi_imx->usedma = 1;
|
||||
spi_imx->usedma = true;
|
||||
else
|
||||
spi_imx->usedma = 0;
|
||||
spi_imx->usedma = false;
|
||||
|
||||
if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
|
||||
spi_imx->rx = mx53_ecspi_rx_slave;
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/gpio.h>
|
||||
|
||||
/*
|
||||
* The Meson SPICC controller could support DMA based transfers, but is not
|
||||
@ -467,35 +466,14 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master)
|
||||
|
||||
static int meson_spicc_setup(struct spi_device *spi)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!spi->controller_state)
|
||||
spi->controller_state = spi_master_get_devdata(spi->master);
|
||||
else if (gpio_is_valid(spi->cs_gpio))
|
||||
goto out_gpio;
|
||||
else if (spi->cs_gpio == -ENOENT)
|
||||
return 0;
|
||||
|
||||
if (gpio_is_valid(spi->cs_gpio)) {
|
||||
ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
|
||||
if (ret) {
|
||||
dev_err(&spi->dev, "failed to request cs gpio\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
out_gpio:
|
||||
ret = gpio_direction_output(spi->cs_gpio,
|
||||
!(spi->mode & SPI_CS_HIGH));
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void meson_spicc_cleanup(struct spi_device *spi)
|
||||
{
|
||||
if (gpio_is_valid(spi->cs_gpio))
|
||||
gpio_free(spi->cs_gpio);
|
||||
|
||||
spi->controller_state = NULL;
|
||||
}
|
||||
|
||||
@ -564,6 +542,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
|
||||
master->prepare_message = meson_spicc_prepare_message;
|
||||
master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer;
|
||||
master->transfer_one = meson_spicc_transfer_one;
|
||||
master->use_gpio_descriptors = true;
|
||||
|
||||
/* Setup max rate according to the Meson GX datasheet */
|
||||
if ((rate >> 2) > SPICC_MAX_FREQ)
|
||||
|
@ -590,10 +590,10 @@ static int mxs_spi_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto out_master_free;
|
||||
|
||||
ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
|
||||
if (!ssp->dmach) {
|
||||
ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
|
||||
if (IS_ERR(ssp->dmach)) {
|
||||
dev_err(ssp->dev, "Failed to request DMA\n");
|
||||
ret = -ENODEV;
|
||||
ret = PTR_ERR(ssp->dmach);
|
||||
goto out_master_free;
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/reset.h>
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
@ -20,7 +21,7 @@
|
||||
|
||||
struct npcm_pspi {
|
||||
struct completion xfer_done;
|
||||
struct regmap *rst_regmap;
|
||||
struct reset_control *reset;
|
||||
struct spi_master *master;
|
||||
unsigned int tx_bytes;
|
||||
unsigned int rx_bytes;
|
||||
@ -59,12 +60,6 @@ struct npcm_pspi {
|
||||
#define NPCM_PSPI_MIN_CLK_DIVIDER 4
|
||||
#define NPCM_PSPI_DEFAULT_CLK 25000000
|
||||
|
||||
/* reset register */
|
||||
#define NPCM7XX_IPSRST2_OFFSET 0x24
|
||||
|
||||
#define NPCM7XX_PSPI1_RESET BIT(22)
|
||||
#define NPCM7XX_PSPI2_RESET BIT(23)
|
||||
|
||||
static inline unsigned int bytes_per_word(unsigned int bits)
|
||||
{
|
||||
return bits <= 8 ? 1 : 2;
|
||||
@ -178,6 +173,13 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi,
|
||||
priv->mode = spi->mode;
|
||||
}
|
||||
|
||||
/*
|
||||
* If transfer is even length, and 8 bits per word transfer,
|
||||
* then implement 16 bits-per-word transfer.
|
||||
*/
|
||||
if (priv->bits_per_word == 8 && !(t->len & 0x1))
|
||||
t->bits_per_word = 16;
|
||||
|
||||
if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
|
||||
npcm_pspi_set_transfer_size(priv, t->bits_per_word);
|
||||
priv->bits_per_word = t->bits_per_word;
|
||||
@ -286,9 +288,9 @@ static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master)
|
||||
|
||||
static void npcm_pspi_reset_hw(struct npcm_pspi *priv)
|
||||
{
|
||||
regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET,
|
||||
NPCM7XX_PSPI1_RESET << priv->id);
|
||||
regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, 0x0);
|
||||
reset_control_assert(priv->reset);
|
||||
udelay(5);
|
||||
reset_control_deassert(priv->reset);
|
||||
}
|
||||
|
||||
static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
|
||||
@ -352,10 +354,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
|
||||
if (num_cs < 0)
|
||||
return num_cs;
|
||||
|
||||
pdev->id = of_alias_get_id(np, "spi");
|
||||
if (pdev->id < 0)
|
||||
pdev->id = 0;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(*priv));
|
||||
if (!master)
|
||||
return -ENOMEM;
|
||||
@ -365,7 +363,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
|
||||
priv = spi_master_get_devdata(master);
|
||||
priv->master = master;
|
||||
priv->is_save_param = false;
|
||||
priv->id = pdev->id;
|
||||
|
||||
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(priv->base)) {
|
||||
@ -390,11 +387,10 @@ static int npcm_pspi_probe(struct platform_device *pdev)
|
||||
goto out_disable_clk;
|
||||
}
|
||||
|
||||
priv->rst_regmap =
|
||||
syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
|
||||
if (IS_ERR(priv->rst_regmap)) {
|
||||
dev_err(&pdev->dev, "failed to find nuvoton,npcm750-rst\n");
|
||||
return PTR_ERR(priv->rst_regmap);
|
||||
priv->reset = devm_reset_control_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(priv->reset)) {
|
||||
ret = PTR_ERR(priv->reset);
|
||||
goto out_disable_clk;
|
||||
}
|
||||
|
||||
/* reset SPI-HW block */
|
||||
@ -415,7 +411,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
|
||||
master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER);
|
||||
master->mode_bits = SPI_CPHA | SPI_CPOL;
|
||||
master->dev.of_node = pdev->dev.of_node;
|
||||
master->bus_num = pdev->id;
|
||||
master->bus_num = -1;
|
||||
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
|
||||
master->transfer_one = npcm_pspi_transfer_one;
|
||||
master->prepare_transfer_hardware =
|
||||
@ -448,7 +444,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto out_disable_clk;
|
||||
|
||||
pr_info("NPCM Peripheral SPI %d probed\n", pdev->id);
|
||||
pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include <linux/spi/spi_bitbang.h>
|
||||
#include <linux/spi/spi_oc_tiny.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#define DRV_NAME "spi_oc_tiny"
|
||||
@ -50,8 +49,6 @@ struct tiny_spi {
|
||||
unsigned int txc, rxc;
|
||||
const u8 *txp;
|
||||
u8 *rxp;
|
||||
int gpio_cs_count;
|
||||
int *gpio_cs;
|
||||
};
|
||||
|
||||
static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
|
||||
@ -66,16 +63,6 @@ static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz)
|
||||
return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
|
||||
}
|
||||
|
||||
static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
|
||||
{
|
||||
struct tiny_spi *hw = tiny_spi_to_hw(spi);
|
||||
|
||||
if (hw->gpio_cs_count > 0) {
|
||||
gpio_set_value(hw->gpio_cs[spi->chip_select],
|
||||
(spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
|
||||
}
|
||||
}
|
||||
|
||||
static int tiny_spi_setup_transfer(struct spi_device *spi,
|
||||
struct spi_transfer *t)
|
||||
{
|
||||
@ -203,24 +190,10 @@ static int tiny_spi_of_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct tiny_spi *hw = platform_get_drvdata(pdev);
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
unsigned int i;
|
||||
u32 val;
|
||||
|
||||
if (!np)
|
||||
return 0;
|
||||
hw->gpio_cs_count = of_gpio_count(np);
|
||||
if (hw->gpio_cs_count > 0) {
|
||||
hw->gpio_cs = devm_kcalloc(&pdev->dev,
|
||||
hw->gpio_cs_count, sizeof(unsigned int),
|
||||
GFP_KERNEL);
|
||||
if (!hw->gpio_cs)
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < hw->gpio_cs_count; i++) {
|
||||
hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL);
|
||||
if (hw->gpio_cs[i] < 0)
|
||||
return -ENODEV;
|
||||
}
|
||||
hw->bitbang.master->dev.of_node = pdev->dev.of_node;
|
||||
if (!of_property_read_u32(np, "clock-frequency", &val))
|
||||
hw->freq = val;
|
||||
@ -240,7 +213,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
|
||||
struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
|
||||
struct tiny_spi *hw;
|
||||
struct spi_master *master;
|
||||
unsigned int i;
|
||||
int err = -ENODEV;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
|
||||
@ -249,9 +221,9 @@ static int tiny_spi_probe(struct platform_device *pdev)
|
||||
|
||||
/* setup the master state. */
|
||||
master->bus_num = pdev->id;
|
||||
master->num_chipselect = 255;
|
||||
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
|
||||
master->setup = tiny_spi_setup;
|
||||
master->use_gpio_descriptors = true;
|
||||
|
||||
hw = spi_master_get_devdata(master);
|
||||
platform_set_drvdata(pdev, hw);
|
||||
@ -259,7 +231,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
|
||||
/* setup the state for the bitbang driver */
|
||||
hw->bitbang.master = master;
|
||||
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
|
||||
hw->bitbang.chipselect = tiny_spi_chipselect;
|
||||
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
|
||||
|
||||
/* find and map our resources */
|
||||
@ -279,12 +250,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
|
||||
}
|
||||
/* find platform data */
|
||||
if (platp) {
|
||||
hw->gpio_cs_count = platp->gpio_cs_count;
|
||||
hw->gpio_cs = platp->gpio_cs;
|
||||
if (platp->gpio_cs_count && !platp->gpio_cs) {
|
||||
err = -EBUSY;
|
||||
goto exit;
|
||||
}
|
||||
hw->freq = platp->freq;
|
||||
hw->baudwidth = platp->baudwidth;
|
||||
} else {
|
||||
@ -292,13 +257,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
|
||||
if (err)
|
||||
goto exit;
|
||||
}
|
||||
for (i = 0; i < hw->gpio_cs_count; i++) {
|
||||
err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev));
|
||||
if (err)
|
||||
goto exit_gpio;
|
||||
gpio_direction_output(hw->gpio_cs[i], 1);
|
||||
}
|
||||
hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count);
|
||||
|
||||
/* register our spi controller */
|
||||
err = spi_bitbang_start(&hw->bitbang);
|
||||
@ -308,9 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
|
||||
|
||||
return 0;
|
||||
|
||||
exit_gpio:
|
||||
while (i-- > 0)
|
||||
gpio_free(hw->gpio_cs[i]);
|
||||
exit:
|
||||
spi_master_put(master);
|
||||
return err;
|
||||
@ -320,11 +275,8 @@ static int tiny_spi_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct tiny_spi *hw = platform_get_drvdata(pdev);
|
||||
struct spi_master *master = hw->bitbang.master;
|
||||
unsigned int i;
|
||||
|
||||
spi_bitbang_stop(&hw->bitbang);
|
||||
for (i = 0; i < hw->gpio_cs_count; i++)
|
||||
gpio_free(hw->gpio_cs[i]);
|
||||
spi_master_put(master);
|
||||
return 0;
|
||||
}
|
||||
|
@ -137,7 +137,7 @@ enum qspi_clocks {
|
||||
struct qcom_qspi {
|
||||
void __iomem *base;
|
||||
struct device *dev;
|
||||
struct clk_bulk_data clks[QSPI_NUM_CLKS];
|
||||
struct clk_bulk_data *clks;
|
||||
struct qspi_xfer xfer;
|
||||
/* Lock to protect xfer and IRQ accessed registers */
|
||||
spinlock_t lock;
|
||||
@ -445,6 +445,13 @@ static int qcom_qspi_probe(struct platform_device *pdev)
|
||||
goto exit_probe_master_put;
|
||||
}
|
||||
|
||||
ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
|
||||
sizeof(*ctrl->clks), GFP_KERNEL);
|
||||
if (!ctrl->clks) {
|
||||
ret = -ENOMEM;
|
||||
goto exit_probe_master_put;
|
||||
}
|
||||
|
||||
ctrl->clks[QSPI_CLK_CORE].id = "core";
|
||||
ctrl->clks[QSPI_CLK_IFACE].id = "iface";
|
||||
ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
|
||||
|
@ -159,7 +159,7 @@
|
||||
#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
|
||||
#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
|
||||
#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
|
||||
#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */
|
||||
#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */
|
||||
#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
|
||||
#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
|
||||
#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
|
||||
@ -242,6 +242,7 @@ struct spi_ops {
|
||||
u16 mode_bits;
|
||||
u16 flags;
|
||||
u16 fifo_size;
|
||||
u8 num_hw_ss;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -426,8 +427,6 @@ static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
|
||||
return n;
|
||||
}
|
||||
|
||||
#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
|
||||
|
||||
static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
|
||||
{
|
||||
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
|
||||
@ -620,9 +619,8 @@ no_dma_tx:
|
||||
dmaengine_terminate_all(rspi->ctlr->dma_rx);
|
||||
no_dma_rx:
|
||||
if (ret == -EAGAIN) {
|
||||
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
|
||||
dev_driver_string(&rspi->ctlr->dev),
|
||||
dev_name(&rspi->ctlr->dev));
|
||||
dev_warn_once(&rspi->ctlr->dev,
|
||||
"DMA not available, falling back to PIO\n");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -936,12 +934,16 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
|
||||
if (spi->mode & SPI_CPHA)
|
||||
rspi->spcmd |= SPCMD_CPHA;
|
||||
|
||||
/* Configure slave signal to assert */
|
||||
rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs
|
||||
: spi->chip_select);
|
||||
|
||||
/* CMOS output mode and MOSI signal from previous transfer */
|
||||
rspi->sppcr = 0;
|
||||
if (spi->mode & SPI_LOOP)
|
||||
rspi->sppcr |= SPPCR_SPLP;
|
||||
|
||||
set_config_register(rspi, 8);
|
||||
rspi->ops->set_config_register(rspi, 8);
|
||||
|
||||
if (msg->spi->mode &
|
||||
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
|
||||
@ -1123,6 +1125,7 @@ static const struct spi_ops rspi_ops = {
|
||||
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
|
||||
.flags = SPI_CONTROLLER_MUST_TX,
|
||||
.fifo_size = 8,
|
||||
.num_hw_ss = 2,
|
||||
};
|
||||
|
||||
static const struct spi_ops rspi_rz_ops = {
|
||||
@ -1131,6 +1134,7 @@ static const struct spi_ops rspi_rz_ops = {
|
||||
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
|
||||
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
|
||||
.fifo_size = 8, /* 8 for TX, 32 for RX */
|
||||
.num_hw_ss = 1,
|
||||
};
|
||||
|
||||
static const struct spi_ops qspi_ops = {
|
||||
@ -1141,6 +1145,7 @@ static const struct spi_ops qspi_ops = {
|
||||
SPI_RX_DUAL | SPI_RX_QUAD,
|
||||
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
|
||||
.fifo_size = 32,
|
||||
.num_hw_ss = 1,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
@ -1256,6 +1261,8 @@ static int rspi_probe(struct platform_device *pdev)
|
||||
ctlr->mode_bits = ops->mode_bits;
|
||||
ctlr->flags = ops->flags;
|
||||
ctlr->dev.of_node = pdev->dev.of_node;
|
||||
ctlr->use_gpio_descriptors = true;
|
||||
ctlr->max_native_cs = rspi->ops->num_hw_ss;
|
||||
|
||||
ret = platform_get_irq_byname_optional(pdev, "rx");
|
||||
if (ret < 0) {
|
||||
@ -1314,8 +1321,6 @@ error1:
|
||||
|
||||
static const struct platform_device_id spi_driver_ids[] = {
|
||||
{ "rspi", (kernel_ulong_t)&rspi_ops },
|
||||
{ "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
|
||||
{ "qspi", (kernel_ulong_t)&qspi_ops },
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -14,8 +14,6 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
@ -55,7 +53,6 @@ struct sh_msiof_spi_priv {
|
||||
void *rx_dma_page;
|
||||
dma_addr_t tx_dma_addr;
|
||||
dma_addr_t rx_dma_addr;
|
||||
unsigned short unused_ss;
|
||||
bool native_cs_inited;
|
||||
bool native_cs_high;
|
||||
bool slave_aborted;
|
||||
@ -63,140 +60,140 @@ struct sh_msiof_spi_priv {
|
||||
|
||||
#define MAX_SS 3 /* Maximum number of native chip selects */
|
||||
|
||||
#define TMDR1 0x00 /* Transmit Mode Register 1 */
|
||||
#define TMDR2 0x04 /* Transmit Mode Register 2 */
|
||||
#define TMDR3 0x08 /* Transmit Mode Register 3 */
|
||||
#define RMDR1 0x10 /* Receive Mode Register 1 */
|
||||
#define RMDR2 0x14 /* Receive Mode Register 2 */
|
||||
#define RMDR3 0x18 /* Receive Mode Register 3 */
|
||||
#define TSCR 0x20 /* Transmit Clock Select Register */
|
||||
#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
|
||||
#define CTR 0x28 /* Control Register */
|
||||
#define FCTR 0x30 /* FIFO Control Register */
|
||||
#define STR 0x40 /* Status Register */
|
||||
#define IER 0x44 /* Interrupt Enable Register */
|
||||
#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
|
||||
#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
|
||||
#define TFDR 0x50 /* Transmit FIFO Data Register */
|
||||
#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
|
||||
#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
|
||||
#define RFDR 0x60 /* Receive FIFO Data Register */
|
||||
#define SITMDR1 0x00 /* Transmit Mode Register 1 */
|
||||
#define SITMDR2 0x04 /* Transmit Mode Register 2 */
|
||||
#define SITMDR3 0x08 /* Transmit Mode Register 3 */
|
||||
#define SIRMDR1 0x10 /* Receive Mode Register 1 */
|
||||
#define SIRMDR2 0x14 /* Receive Mode Register 2 */
|
||||
#define SIRMDR3 0x18 /* Receive Mode Register 3 */
|
||||
#define SITSCR 0x20 /* Transmit Clock Select Register */
|
||||
#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
|
||||
#define SICTR 0x28 /* Control Register */
|
||||
#define SIFCTR 0x30 /* FIFO Control Register */
|
||||
#define SISTR 0x40 /* Status Register */
|
||||
#define SIIER 0x44 /* Interrupt Enable Register */
|
||||
#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
|
||||
#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
|
||||
#define SITFDR 0x50 /* Transmit FIFO Data Register */
|
||||
#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
|
||||
#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
|
||||
#define SIRFDR 0x60 /* Receive FIFO Data Register */
|
||||
|
||||
/* TMDR1 and RMDR1 */
|
||||
#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
|
||||
#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
|
||||
#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */
|
||||
#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */
|
||||
#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
|
||||
#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
|
||||
#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
|
||||
#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
|
||||
#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
|
||||
#define MDR1_FLD_SHIFT 2
|
||||
#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
|
||||
/* TMDR1 */
|
||||
#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */
|
||||
#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
|
||||
#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
|
||||
/* SITMDR1 and SIRMDR1 */
|
||||
#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
|
||||
#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
|
||||
#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */
|
||||
#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */
|
||||
#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
|
||||
#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
|
||||
#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
|
||||
#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
|
||||
#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
|
||||
#define SIMDR1_FLD_SHIFT 2
|
||||
#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
|
||||
/* SITMDR1 */
|
||||
#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
|
||||
#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
|
||||
#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
|
||||
|
||||
/* TMDR2 and RMDR2 */
|
||||
#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
|
||||
#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
|
||||
#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
|
||||
/* SITMDR2 and SIRMDR2 */
|
||||
#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
|
||||
#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
|
||||
#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
|
||||
|
||||
/* TSCR and RSCR */
|
||||
#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
|
||||
#define SCR_BRPS(i) (((i) - 1) << 8)
|
||||
#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
|
||||
#define SCR_BRDV_DIV_2 0
|
||||
#define SCR_BRDV_DIV_4 1
|
||||
#define SCR_BRDV_DIV_8 2
|
||||
#define SCR_BRDV_DIV_16 3
|
||||
#define SCR_BRDV_DIV_32 4
|
||||
#define SCR_BRDV_DIV_1 7
|
||||
/* SITSCR and SIRSCR */
|
||||
#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
|
||||
#define SISCR_BRPS(i) (((i) - 1) << 8)
|
||||
#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
|
||||
#define SISCR_BRDV_DIV_2 0
|
||||
#define SISCR_BRDV_DIV_4 1
|
||||
#define SISCR_BRDV_DIV_8 2
|
||||
#define SISCR_BRDV_DIV_16 3
|
||||
#define SISCR_BRDV_DIV_32 4
|
||||
#define SISCR_BRDV_DIV_1 7
|
||||
|
||||
/* CTR */
|
||||
#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
|
||||
#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
|
||||
#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
|
||||
#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
|
||||
#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
|
||||
#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
|
||||
#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
|
||||
#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
|
||||
#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
|
||||
#define CTR_TXDIZ_LOW (0 << 22) /* 0 */
|
||||
#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */
|
||||
#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
|
||||
#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
|
||||
#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
|
||||
#define CTR_TXE BIT(9) /* Transmit Enable */
|
||||
#define CTR_RXE BIT(8) /* Receive Enable */
|
||||
#define CTR_TXRST BIT(1) /* Transmit Reset */
|
||||
#define CTR_RXRST BIT(0) /* Receive Reset */
|
||||
/* SICTR */
|
||||
#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
|
||||
#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
|
||||
#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
|
||||
#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
|
||||
#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
|
||||
#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
|
||||
#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
|
||||
#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
|
||||
#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
|
||||
#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */
|
||||
#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */
|
||||
#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
|
||||
#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
|
||||
#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
|
||||
#define SICTR_TXE BIT(9) /* Transmit Enable */
|
||||
#define SICTR_RXE BIT(8) /* Receive Enable */
|
||||
#define SICTR_TXRST BIT(1) /* Transmit Reset */
|
||||
#define SICTR_RXRST BIT(0) /* Receive Reset */
|
||||
|
||||
/* FCTR */
|
||||
#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
|
||||
#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
|
||||
#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
|
||||
#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
|
||||
#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
|
||||
#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
|
||||
#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
|
||||
#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
|
||||
#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
|
||||
#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
|
||||
#define FCTR_TFUA_SHIFT 20
|
||||
#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT)
|
||||
#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
|
||||
#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
|
||||
#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
|
||||
#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
|
||||
#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
|
||||
#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
|
||||
#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
|
||||
#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
|
||||
#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
|
||||
#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
|
||||
#define FCTR_RFUA_SHIFT 4
|
||||
#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT)
|
||||
/* SIFCTR */
|
||||
#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
|
||||
#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
|
||||
#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
|
||||
#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
|
||||
#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
|
||||
#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
|
||||
#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
|
||||
#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
|
||||
#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
|
||||
#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
|
||||
#define SIFCTR_TFUA_SHIFT 20
|
||||
#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
|
||||
#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
|
||||
#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
|
||||
#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
|
||||
#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
|
||||
#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
|
||||
#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
|
||||
#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
|
||||
#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
|
||||
#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
|
||||
#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
|
||||
#define SIFCTR_RFUA_SHIFT 4
|
||||
#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT)
|
||||
|
||||
/* STR */
|
||||
#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */
|
||||
#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */
|
||||
#define STR_TEOF BIT(23) /* Frame Transmission End */
|
||||
#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
|
||||
#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */
|
||||
#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */
|
||||
#define STR_RFFUL BIT(13) /* Receive FIFO Full */
|
||||
#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */
|
||||
#define STR_REOF BIT(7) /* Frame Reception End */
|
||||
#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
|
||||
#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */
|
||||
#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */
|
||||
/* SISTR */
|
||||
#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
|
||||
#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
|
||||
#define SISTR_TEOF BIT(23) /* Frame Transmission End */
|
||||
#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
|
||||
#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
|
||||
#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
|
||||
#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
|
||||
#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
|
||||
#define SISTR_REOF BIT(7) /* Frame Reception End */
|
||||
#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
|
||||
#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
|
||||
#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
|
||||
|
||||
/* IER */
|
||||
#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
|
||||
#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
|
||||
#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
|
||||
#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */
|
||||
#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
|
||||
#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
|
||||
#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
|
||||
#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
|
||||
#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */
|
||||
#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
|
||||
#define IER_REOFE BIT(7) /* Frame Reception End Enable */
|
||||
#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
|
||||
#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
|
||||
#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
|
||||
/* SIIER */
|
||||
#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
|
||||
#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
|
||||
#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
|
||||
#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
|
||||
#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
|
||||
#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
|
||||
#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
|
||||
#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
|
||||
#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
|
||||
#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
|
||||
#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
|
||||
#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
|
||||
#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
|
||||
#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
|
||||
|
||||
|
||||
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
|
||||
{
|
||||
switch (reg_offs) {
|
||||
case TSCR:
|
||||
case RSCR:
|
||||
case SITSCR:
|
||||
case SIRSCR:
|
||||
return ioread16(p->mapbase + reg_offs);
|
||||
default:
|
||||
return ioread32(p->mapbase + reg_offs);
|
||||
@ -207,8 +204,8 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
|
||||
u32 value)
|
||||
{
|
||||
switch (reg_offs) {
|
||||
case TSCR:
|
||||
case RSCR:
|
||||
case SITSCR:
|
||||
case SIRSCR:
|
||||
iowrite16(value, p->mapbase + reg_offs);
|
||||
break;
|
||||
default:
|
||||
@ -223,12 +220,12 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
|
||||
u32 mask = clr | set;
|
||||
u32 data;
|
||||
|
||||
data = sh_msiof_read(p, CTR);
|
||||
data = sh_msiof_read(p, SICTR);
|
||||
data &= ~clr;
|
||||
data |= set;
|
||||
sh_msiof_write(p, CTR, data);
|
||||
sh_msiof_write(p, SICTR, data);
|
||||
|
||||
return readl_poll_timeout_atomic(p->mapbase + CTR, data,
|
||||
return readl_poll_timeout_atomic(p->mapbase + SICTR, data,
|
||||
(data & mask) == set, 1, 100);
|
||||
}
|
||||
|
||||
@ -237,7 +234,7 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
|
||||
struct sh_msiof_spi_priv *p = data;
|
||||
|
||||
/* just disable the interrupt and wake up */
|
||||
sh_msiof_write(p, IER, 0);
|
||||
sh_msiof_write(p, SIIER, 0);
|
||||
complete(&p->done);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -245,20 +242,20 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
|
||||
|
||||
static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
|
||||
{
|
||||
u32 mask = CTR_TXRST | CTR_RXRST;
|
||||
u32 mask = SICTR_TXRST | SICTR_RXRST;
|
||||
u32 data;
|
||||
|
||||
data = sh_msiof_read(p, CTR);
|
||||
data = sh_msiof_read(p, SICTR);
|
||||
data |= mask;
|
||||
sh_msiof_write(p, CTR, data);
|
||||
sh_msiof_write(p, SICTR, data);
|
||||
|
||||
readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1,
|
||||
readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1,
|
||||
100);
|
||||
}
|
||||
|
||||
static const u32 sh_msiof_spi_div_array[] = {
|
||||
SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4,
|
||||
SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32,
|
||||
SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4,
|
||||
SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32,
|
||||
};
|
||||
|
||||
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
|
||||
@ -276,7 +273,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
|
||||
|
||||
div = DIV_ROUND_UP(parent_rate, spi_hz);
|
||||
if (div <= 1024) {
|
||||
/* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
|
||||
/* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
|
||||
if (!div_pow && div <= 32 && div > 2)
|
||||
div_pow = 1;
|
||||
|
||||
@ -295,10 +292,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
|
||||
brps = 32;
|
||||
}
|
||||
|
||||
scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
|
||||
sh_msiof_write(p, TSCR, scr);
|
||||
scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
|
||||
sh_msiof_write(p, SITSCR, scr);
|
||||
if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
|
||||
sh_msiof_write(p, RSCR, scr);
|
||||
sh_msiof_write(p, SIRSCR, scr);
|
||||
}
|
||||
|
||||
static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
|
||||
@ -337,8 +334,8 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
|
||||
val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
|
||||
val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT;
|
||||
val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT;
|
||||
|
||||
return val;
|
||||
}
|
||||
@ -357,54 +354,54 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
|
||||
* 1 0 11 11 0 0
|
||||
* 1 1 11 11 1 1
|
||||
*/
|
||||
tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
|
||||
tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
|
||||
tmp |= lsb_first << MDR1_BITLSB_SHIFT;
|
||||
tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP;
|
||||
tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
|
||||
tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
|
||||
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
|
||||
if (spi_controller_is_slave(p->ctlr)) {
|
||||
sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
|
||||
sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
|
||||
} else {
|
||||
sh_msiof_write(p, TMDR1,
|
||||
tmp | MDR1_TRMD | TMDR1_PCON |
|
||||
(ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
|
||||
sh_msiof_write(p, SITMDR1,
|
||||
tmp | SIMDR1_TRMD | SITMDR1_PCON |
|
||||
(ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT);
|
||||
}
|
||||
if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
|
||||
/* These bits are reserved if RX needs TX */
|
||||
tmp &= ~0x0000ffff;
|
||||
}
|
||||
sh_msiof_write(p, RMDR1, tmp);
|
||||
sh_msiof_write(p, SIRMDR1, tmp);
|
||||
|
||||
tmp = 0;
|
||||
tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
|
||||
tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
|
||||
tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT;
|
||||
tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT;
|
||||
|
||||
edge = cpol ^ !cpha;
|
||||
|
||||
tmp |= edge << CTR_TEDG_SHIFT;
|
||||
tmp |= edge << CTR_REDG_SHIFT;
|
||||
tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
|
||||
sh_msiof_write(p, CTR, tmp);
|
||||
tmp |= edge << SICTR_TEDG_SHIFT;
|
||||
tmp |= edge << SICTR_REDG_SHIFT;
|
||||
tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW;
|
||||
sh_msiof_write(p, SICTR, tmp);
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
|
||||
const void *tx_buf, void *rx_buf,
|
||||
u32 bits, u32 words)
|
||||
{
|
||||
u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
|
||||
u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words);
|
||||
|
||||
if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
|
||||
sh_msiof_write(p, TMDR2, dr2);
|
||||
sh_msiof_write(p, SITMDR2, dr2);
|
||||
else
|
||||
sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
|
||||
sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1);
|
||||
|
||||
if (rx_buf)
|
||||
sh_msiof_write(p, RMDR2, dr2);
|
||||
sh_msiof_write(p, SIRMDR2, dr2);
|
||||
}
|
||||
|
||||
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
|
||||
{
|
||||
sh_msiof_write(p, STR,
|
||||
sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
|
||||
sh_msiof_write(p, SISTR,
|
||||
sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ));
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
|
||||
@ -414,7 +411,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
sh_msiof_write(p, TFDR, buf_8[k] << fs);
|
||||
sh_msiof_write(p, SITFDR, buf_8[k] << fs);
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
|
||||
@ -424,7 +421,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
sh_msiof_write(p, TFDR, buf_16[k] << fs);
|
||||
sh_msiof_write(p, SITFDR, buf_16[k] << fs);
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
|
||||
@ -434,7 +431,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
|
||||
sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs);
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
|
||||
@ -444,7 +441,7 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
sh_msiof_write(p, TFDR, buf_32[k] << fs);
|
||||
sh_msiof_write(p, SITFDR, buf_32[k] << fs);
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
|
||||
@ -454,7 +451,7 @@ static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
|
||||
sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs);
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
|
||||
@ -464,7 +461,7 @@ static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
|
||||
sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs));
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
|
||||
@ -474,7 +471,7 @@ static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
|
||||
sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs));
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
|
||||
@ -484,7 +481,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
|
||||
buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs;
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
|
||||
@ -494,7 +491,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
|
||||
buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs;
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
|
||||
@ -504,7 +501,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
|
||||
put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]);
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
|
||||
@ -514,7 +511,7 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
|
||||
buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs;
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
|
||||
@ -524,7 +521,7 @@ static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
|
||||
put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]);
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
|
||||
@ -534,7 +531,7 @@ static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
|
||||
buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs);
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
|
||||
@ -544,7 +541,7 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
|
||||
int k;
|
||||
|
||||
for (k = 0; k < words; k++)
|
||||
put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
|
||||
put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]);
|
||||
}
|
||||
|
||||
static int sh_msiof_spi_setup(struct spi_device *spi)
|
||||
@ -561,17 +558,17 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
|
||||
return 0;
|
||||
|
||||
/* Configure native chip select mode/polarity early */
|
||||
clr = MDR1_SYNCMD_MASK;
|
||||
set = MDR1_SYNCMD_SPI;
|
||||
clr = SIMDR1_SYNCMD_MASK;
|
||||
set = SIMDR1_SYNCMD_SPI;
|
||||
if (spi->mode & SPI_CS_HIGH)
|
||||
clr |= BIT(MDR1_SYNCAC_SHIFT);
|
||||
clr |= BIT(SIMDR1_SYNCAC_SHIFT);
|
||||
else
|
||||
set |= BIT(MDR1_SYNCAC_SHIFT);
|
||||
set |= BIT(SIMDR1_SYNCAC_SHIFT);
|
||||
pm_runtime_get_sync(&p->pdev->dev);
|
||||
tmp = sh_msiof_read(p, TMDR1) & ~clr;
|
||||
sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON);
|
||||
tmp = sh_msiof_read(p, RMDR1) & ~clr;
|
||||
sh_msiof_write(p, RMDR1, tmp | set);
|
||||
tmp = sh_msiof_read(p, SITMDR1) & ~clr;
|
||||
sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON);
|
||||
tmp = sh_msiof_read(p, SIRMDR1) & ~clr;
|
||||
sh_msiof_write(p, SIRMDR1, tmp | set);
|
||||
pm_runtime_put(&p->pdev->dev);
|
||||
p->native_cs_high = spi->mode & SPI_CS_HIGH;
|
||||
p->native_cs_inited = true;
|
||||
@ -587,7 +584,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
|
||||
|
||||
/* Configure pins before asserting CS */
|
||||
if (spi->cs_gpiod) {
|
||||
ss = p->unused_ss;
|
||||
ss = ctlr->unused_native_cs;
|
||||
cs_high = p->native_cs_high;
|
||||
} else {
|
||||
ss = spi->chip_select;
|
||||
@ -607,15 +604,15 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
|
||||
|
||||
/* setup clock and rx/tx signals */
|
||||
if (!slave)
|
||||
ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
|
||||
ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE);
|
||||
if (rx_buf && !ret)
|
||||
ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
|
||||
ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE);
|
||||
if (!ret)
|
||||
ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
|
||||
ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE);
|
||||
|
||||
/* start by setting frame bit */
|
||||
if (!ret && !slave)
|
||||
ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
|
||||
ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -627,13 +624,13 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
|
||||
|
||||
/* shut down frame, rx/tx and clock signals */
|
||||
if (!slave)
|
||||
ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
|
||||
ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0);
|
||||
if (!ret)
|
||||
ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
|
||||
ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0);
|
||||
if (rx_buf && !ret)
|
||||
ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
|
||||
ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0);
|
||||
if (!ret && !slave)
|
||||
ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
|
||||
ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -688,11 +685,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
|
||||
fifo_shift = 32 - bits;
|
||||
|
||||
/* default FIFO watermarks for PIO */
|
||||
sh_msiof_write(p, FCTR, 0);
|
||||
sh_msiof_write(p, SIFCTR, 0);
|
||||
|
||||
/* setup msiof transfer mode registers */
|
||||
sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
|
||||
sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
|
||||
sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE);
|
||||
|
||||
/* write tx fifo */
|
||||
if (tx_buf)
|
||||
@ -731,7 +728,7 @@ stop_reset:
|
||||
sh_msiof_reset_str(p);
|
||||
sh_msiof_spi_stop(p, rx_buf);
|
||||
stop_ier:
|
||||
sh_msiof_write(p, IER, 0);
|
||||
sh_msiof_write(p, SIIER, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -750,7 +747,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
|
||||
|
||||
/* First prepare and submit the DMA request(s), as this may fail */
|
||||
if (rx) {
|
||||
ier_bits |= IER_RDREQE | IER_RDMAE;
|
||||
ier_bits |= SIIER_RDREQE | SIIER_RDMAE;
|
||||
desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
|
||||
p->rx_dma_addr, len, DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
@ -765,7 +762,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
|
||||
}
|
||||
|
||||
if (tx) {
|
||||
ier_bits |= IER_TDREQE | IER_TDMAE;
|
||||
ier_bits |= SIIER_TDREQE | SIIER_TDMAE;
|
||||
dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
|
||||
p->tx_dma_addr, len, DMA_TO_DEVICE);
|
||||
desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
|
||||
@ -786,12 +783,12 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
|
||||
}
|
||||
|
||||
/* 1 stage FIFO watermarks for DMA */
|
||||
sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
|
||||
sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1);
|
||||
|
||||
/* setup msiof transfer mode registers (32-bit words) */
|
||||
sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
|
||||
|
||||
sh_msiof_write(p, IER, ier_bits);
|
||||
sh_msiof_write(p, SIIER, ier_bits);
|
||||
|
||||
reinit_completion(&p->done);
|
||||
if (tx)
|
||||
@ -823,10 +820,10 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
|
||||
if (ret)
|
||||
goto stop_reset;
|
||||
|
||||
sh_msiof_write(p, IER, 0);
|
||||
sh_msiof_write(p, SIIER, 0);
|
||||
} else {
|
||||
/* wait for tx fifo to be emptied */
|
||||
sh_msiof_write(p, IER, IER_TEOFE);
|
||||
sh_msiof_write(p, SIIER, SIIER_TEOFE);
|
||||
ret = sh_msiof_wait_for_completion(p, &p->done);
|
||||
if (ret)
|
||||
goto stop_reset;
|
||||
@ -856,7 +853,7 @@ stop_dma:
|
||||
no_dma_tx:
|
||||
if (rx)
|
||||
dmaengine_terminate_all(p->ctlr->dma_rx);
|
||||
sh_msiof_write(p, IER, 0);
|
||||
sh_msiof_write(p, SIIER, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1124,46 +1121,6 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
|
||||
{
|
||||
struct device *dev = &p->pdev->dev;
|
||||
unsigned int used_ss_mask = 0;
|
||||
unsigned int cs_gpios = 0;
|
||||
unsigned int num_cs, i;
|
||||
int ret;
|
||||
|
||||
ret = gpiod_count(dev, "cs");
|
||||
if (ret <= 0)
|
||||
return 0;
|
||||
|
||||
num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
|
||||
for (i = 0; i < num_cs; i++) {
|
||||
struct gpio_desc *gpiod;
|
||||
|
||||
gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
|
||||
if (!IS_ERR(gpiod)) {
|
||||
devm_gpiod_put(dev, gpiod);
|
||||
cs_gpios++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (PTR_ERR(gpiod) != -ENOENT)
|
||||
return PTR_ERR(gpiod);
|
||||
|
||||
if (i >= MAX_SS) {
|
||||
dev_err(dev, "Invalid native chip select %d\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
used_ss_mask |= BIT(i);
|
||||
}
|
||||
p->unused_ss = ffz(used_ss_mask);
|
||||
if (cs_gpios && p->unused_ss >= MAX_SS) {
|
||||
dev_err(dev, "No unused native chip select available\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
|
||||
enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
|
||||
{
|
||||
@ -1232,12 +1189,12 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
|
||||
|
||||
ctlr = p->ctlr;
|
||||
ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
|
||||
dma_tx_id, res->start + TFDR);
|
||||
dma_tx_id, res->start + SITFDR);
|
||||
if (!ctlr->dma_tx)
|
||||
return -ENODEV;
|
||||
|
||||
ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
|
||||
dma_rx_id, res->start + RFDR);
|
||||
dma_rx_id, res->start + SIRFDR);
|
||||
if (!ctlr->dma_rx)
|
||||
goto free_tx_chan;
|
||||
|
||||
@ -1373,17 +1330,12 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
|
||||
if (p->info->rx_fifo_override)
|
||||
p->rx_fifo_size = p->info->rx_fifo_override;
|
||||
|
||||
/* Setup GPIO chip selects */
|
||||
ctlr->num_chipselect = p->info->num_chipselect;
|
||||
ret = sh_msiof_get_cs_gpios(p);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
/* init controller code */
|
||||
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
|
||||
ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
|
||||
ctlr->flags = chipdata->ctlr_flags;
|
||||
ctlr->bus_num = pdev->id;
|
||||
ctlr->num_chipselect = p->info->num_chipselect;
|
||||
ctlr->dev.of_node = pdev->dev.of_node;
|
||||
ctlr->setup = sh_msiof_spi_setup;
|
||||
ctlr->prepare_message = sh_msiof_prepare_message;
|
||||
@ -1392,6 +1344,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
|
||||
ctlr->auto_runtime_pm = true;
|
||||
ctlr->transfer_one = sh_msiof_transfer_one;
|
||||
ctlr->use_gpio_descriptors = true;
|
||||
ctlr->max_native_cs = MAX_SS;
|
||||
|
||||
ret = sh_msiof_request_dma(p);
|
||||
if (ret < 0)
|
||||
|
@ -1126,16 +1126,16 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
|
||||
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
|
||||
|
||||
/* request DMA channels */
|
||||
sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
|
||||
if (!sspi->rx_chan) {
|
||||
sspi->rx_chan = dma_request_chan(&pdev->dev, "rx");
|
||||
if (IS_ERR(sspi->rx_chan)) {
|
||||
dev_err(&pdev->dev, "can not allocate rx dma channel\n");
|
||||
ret = -ENODEV;
|
||||
ret = PTR_ERR(sspi->rx_chan);
|
||||
goto free_master;
|
||||
}
|
||||
sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
|
||||
if (!sspi->tx_chan) {
|
||||
sspi->tx_chan = dma_request_chan(&pdev->dev, "tx");
|
||||
if (IS_ERR(sspi->tx_chan)) {
|
||||
dev_err(&pdev->dev, "can not allocate tx dma channel\n");
|
||||
ret = -ENODEV;
|
||||
ret = PTR_ERR(sspi->tx_chan);
|
||||
goto free_rx_dma;
|
||||
}
|
||||
|
||||
|
@ -470,10 +470,11 @@ static int stm32_qspi_setup(struct spi_device *spi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
|
||||
static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
|
||||
{
|
||||
struct dma_slave_config dma_cfg;
|
||||
struct device *dev = qspi->dev;
|
||||
int ret = 0;
|
||||
|
||||
memset(&dma_cfg, 0, sizeof(dma_cfg));
|
||||
|
||||
@ -484,8 +485,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
|
||||
dma_cfg.src_maxburst = 4;
|
||||
dma_cfg.dst_maxburst = 4;
|
||||
|
||||
qspi->dma_chrx = dma_request_slave_channel(dev, "rx");
|
||||
if (qspi->dma_chrx) {
|
||||
qspi->dma_chrx = dma_request_chan(dev, "rx");
|
||||
if (IS_ERR(qspi->dma_chrx)) {
|
||||
ret = PTR_ERR(qspi->dma_chrx);
|
||||
qspi->dma_chrx = NULL;
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto out;
|
||||
} else {
|
||||
if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
|
||||
dev_err(dev, "dma rx config failed\n");
|
||||
dma_release_channel(qspi->dma_chrx);
|
||||
@ -493,8 +499,11 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
|
||||
}
|
||||
}
|
||||
|
||||
qspi->dma_chtx = dma_request_slave_channel(dev, "tx");
|
||||
if (qspi->dma_chtx) {
|
||||
qspi->dma_chtx = dma_request_chan(dev, "tx");
|
||||
if (IS_ERR(qspi->dma_chtx)) {
|
||||
ret = PTR_ERR(qspi->dma_chtx);
|
||||
qspi->dma_chtx = NULL;
|
||||
} else {
|
||||
if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
|
||||
dev_err(dev, "dma tx config failed\n");
|
||||
dma_release_channel(qspi->dma_chtx);
|
||||
@ -502,7 +511,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
init_completion(&qspi->dma_completion);
|
||||
|
||||
if (ret != -EPROBE_DEFER)
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
|
||||
@ -608,7 +623,10 @@ static int stm32_qspi_probe(struct platform_device *pdev)
|
||||
|
||||
qspi->dev = dev;
|
||||
platform_set_drvdata(pdev, qspi);
|
||||
stm32_qspi_dma_setup(qspi);
|
||||
ret = stm32_qspi_dma_setup(qspi);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
mutex_init(&qspi->lock);
|
||||
|
||||
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
@ -973,29 +972,6 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* stm32_spi_setup - setup device chip select
|
||||
*/
|
||||
static int stm32_spi_setup(struct spi_device *spi_dev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!gpio_is_valid(spi_dev->cs_gpio)) {
|
||||
dev_err(&spi_dev->dev, "%d is not a valid gpio\n",
|
||||
spi_dev->cs_gpio);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__,
|
||||
spi_dev->cs_gpio,
|
||||
(spi_dev->mode & SPI_CS_HIGH) ? "low" : "high");
|
||||
|
||||
ret = gpio_direction_output(spi_dev->cs_gpio,
|
||||
!(spi_dev->mode & SPI_CS_HIGH));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* stm32_spi_prepare_msg - set up the controller to transfer a single message
|
||||
*/
|
||||
@ -1810,7 +1786,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
|
||||
struct spi_master *master;
|
||||
struct stm32_spi *spi;
|
||||
struct resource *res;
|
||||
int i, ret;
|
||||
int ret;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
|
||||
if (!master) {
|
||||
@ -1898,22 +1874,34 @@ static int stm32_spi_probe(struct platform_device *pdev)
|
||||
master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
|
||||
master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
|
||||
master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
|
||||
master->setup = stm32_spi_setup;
|
||||
master->use_gpio_descriptors = true;
|
||||
master->prepare_message = stm32_spi_prepare_msg;
|
||||
master->transfer_one = stm32_spi_transfer_one;
|
||||
master->unprepare_message = stm32_spi_unprepare_msg;
|
||||
|
||||
spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
|
||||
if (!spi->dma_tx)
|
||||
dev_warn(&pdev->dev, "failed to request tx dma channel\n");
|
||||
else
|
||||
master->dma_tx = spi->dma_tx;
|
||||
spi->dma_tx = dma_request_chan(spi->dev, "tx");
|
||||
if (IS_ERR(spi->dma_tx)) {
|
||||
ret = PTR_ERR(spi->dma_tx);
|
||||
spi->dma_tx = NULL;
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto err_clk_disable;
|
||||
|
||||
dev_warn(&pdev->dev, "failed to request tx dma channel\n");
|
||||
} else {
|
||||
master->dma_tx = spi->dma_tx;
|
||||
}
|
||||
|
||||
spi->dma_rx = dma_request_chan(spi->dev, "rx");
|
||||
if (IS_ERR(spi->dma_rx)) {
|
||||
ret = PTR_ERR(spi->dma_rx);
|
||||
spi->dma_rx = NULL;
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto err_dma_release;
|
||||
|
||||
spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
|
||||
if (!spi->dma_rx)
|
||||
dev_warn(&pdev->dev, "failed to request rx dma channel\n");
|
||||
else
|
||||
} else {
|
||||
master->dma_rx = spi->dma_rx;
|
||||
}
|
||||
|
||||
if (spi->dma_tx || spi->dma_rx)
|
||||
master->can_dma = stm32_spi_can_dma;
|
||||
@ -1925,43 +1913,26 @@ static int stm32_spi_probe(struct platform_device *pdev)
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "spi master registration failed: %d\n",
|
||||
ret);
|
||||
goto err_dma_release;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
if (!master->cs_gpios) {
|
||||
if (!master->cs_gpiods) {
|
||||
dev_err(&pdev->dev, "no CS gpios available\n");
|
||||
ret = -EINVAL;
|
||||
goto err_dma_release;
|
||||
}
|
||||
|
||||
for (i = 0; i < master->num_chipselect; i++) {
|
||||
if (!gpio_is_valid(master->cs_gpios[i])) {
|
||||
dev_err(&pdev->dev, "%i is not a valid gpio\n",
|
||||
master->cs_gpios[i]);
|
||||
ret = -EINVAL;
|
||||
goto err_dma_release;
|
||||
}
|
||||
|
||||
ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
|
||||
DRIVER_NAME);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "can't get CS gpio %i\n",
|
||||
master->cs_gpios[i]);
|
||||
goto err_dma_release;
|
||||
}
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "driver initialized\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_pm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
err_dma_release:
|
||||
if (spi->dma_tx)
|
||||
dma_release_channel(spi->dma_tx);
|
||||
if (spi->dma_rx)
|
||||
dma_release_channel(spi->dma_rx);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
err_clk_disable:
|
||||
clk_disable_unprepare(spi->clk);
|
||||
err_master_put:
|
||||
|
@ -269,10 +269,10 @@ static unsigned tegra_spi_calculate_curr_xfer_param(
|
||||
|
||||
if ((bits_per_word == 8 || bits_per_word == 16 ||
|
||||
bits_per_word == 32) && t->len > 3) {
|
||||
tspi->is_packed = 1;
|
||||
tspi->is_packed = true;
|
||||
tspi->words_per_32bit = 32/bits_per_word;
|
||||
} else {
|
||||
tspi->is_packed = 0;
|
||||
tspi->is_packed = false;
|
||||
tspi->words_per_32bit = 1;
|
||||
}
|
||||
|
||||
|
@ -80,8 +80,6 @@ struct ti_qspi {
|
||||
|
||||
#define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
|
||||
|
||||
#define QSPI_FCLK 192000000
|
||||
|
||||
/* Clock Control */
|
||||
#define QSPI_CLK_EN (1 << 31)
|
||||
#define QSPI_CLK_DIV_MAX 0xffff
|
||||
@ -316,6 +314,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
|
||||
{
|
||||
int wlen;
|
||||
unsigned int cmd;
|
||||
u32 rx;
|
||||
u8 rxlen, rx_wlen;
|
||||
u8 *rxbuf;
|
||||
|
||||
rxbuf = t->rx_buf;
|
||||
@ -332,20 +332,67 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
|
||||
break;
|
||||
}
|
||||
wlen = t->bits_per_word >> 3; /* in bytes */
|
||||
rx_wlen = wlen;
|
||||
|
||||
while (count) {
|
||||
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
|
||||
if (qspi_is_busy(qspi))
|
||||
return -EBUSY;
|
||||
|
||||
switch (wlen) {
|
||||
case 1:
|
||||
/*
|
||||
* Optimize the 8-bit words transfers, as used by
|
||||
* the SPI flash devices.
|
||||
*/
|
||||
if (count >= QSPI_WLEN_MAX_BYTES) {
|
||||
rxlen = QSPI_WLEN_MAX_BYTES;
|
||||
} else {
|
||||
rxlen = min(count, 4);
|
||||
}
|
||||
rx_wlen = rxlen << 3;
|
||||
cmd &= ~QSPI_WLEN_MASK;
|
||||
cmd |= QSPI_WLEN(rx_wlen);
|
||||
break;
|
||||
default:
|
||||
rxlen = wlen;
|
||||
break;
|
||||
}
|
||||
|
||||
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
|
||||
if (ti_qspi_poll_wc(qspi)) {
|
||||
dev_err(qspi->dev, "read timed out\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
switch (wlen) {
|
||||
case 1:
|
||||
*rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
|
||||
/*
|
||||
* Optimize the 8-bit words transfers, as used by
|
||||
* the SPI flash devices.
|
||||
*/
|
||||
if (count >= QSPI_WLEN_MAX_BYTES) {
|
||||
u32 *rxp = (u32 *) rxbuf;
|
||||
rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
|
||||
*rxp++ = be32_to_cpu(rx);
|
||||
rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
|
||||
*rxp++ = be32_to_cpu(rx);
|
||||
rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
|
||||
*rxp++ = be32_to_cpu(rx);
|
||||
rx = readl(qspi->base + QSPI_SPI_DATA_REG);
|
||||
*rxp++ = be32_to_cpu(rx);
|
||||
} else {
|
||||
u8 *rxp = rxbuf;
|
||||
rx = readl(qspi->base + QSPI_SPI_DATA_REG);
|
||||
if (rx_wlen >= 8)
|
||||
*rxp++ = rx >> (rx_wlen - 8);
|
||||
if (rx_wlen >= 16)
|
||||
*rxp++ = rx >> (rx_wlen - 16);
|
||||
if (rx_wlen >= 24)
|
||||
*rxp++ = rx >> (rx_wlen - 24);
|
||||
if (rx_wlen >= 32)
|
||||
*rxp++ = rx;
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
|
||||
@ -354,8 +401,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
|
||||
*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
|
||||
break;
|
||||
}
|
||||
rxbuf += wlen;
|
||||
count -= wlen;
|
||||
rxbuf += rxlen;
|
||||
count -= rxlen;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -527,6 +574,35 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
|
||||
QSPI_SPI_SETUP_REG(spi->chip_select));
|
||||
}
|
||||
|
||||
static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
|
||||
{
|
||||
struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
|
||||
size_t max_len;
|
||||
|
||||
if (op->data.dir == SPI_MEM_DATA_IN) {
|
||||
if (op->addr.val < qspi->mmap_size) {
|
||||
/* Limit MMIO to the mmaped region */
|
||||
if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
|
||||
max_len = qspi->mmap_size - op->addr.val;
|
||||
op->data.nbytes = min((size_t) op->data.nbytes,
|
||||
max_len);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Use fallback mode (SW generated transfers) above the
|
||||
* mmaped region.
|
||||
* Adjust size to comply with the QSPI max frame length.
|
||||
*/
|
||||
max_len = QSPI_FRAME;
|
||||
max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
|
||||
op->data.nbytes = min((size_t) op->data.nbytes,
|
||||
max_len);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ti_qspi_exec_mem_op(struct spi_mem *mem,
|
||||
const struct spi_mem_op *op)
|
||||
{
|
||||
@ -577,6 +653,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
|
||||
|
||||
static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
|
||||
.exec_op = ti_qspi_exec_mem_op,
|
||||
.adjust_op_size = ti_qspi_adjust_op_size,
|
||||
};
|
||||
|
||||
static int ti_qspi_start_transfer_one(struct spi_master *master,
|
||||
|
@ -863,7 +863,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
|
||||
/* Set Tx DMA */
|
||||
param = &dma->param_tx;
|
||||
param->dma_dev = &dma_dev->dev;
|
||||
param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
|
||||
param->chan_id = data->ch * 2; /* Tx = 0, 2 */
|
||||
param->tx_reg = data->io_base_addr + PCH_SPDWR;
|
||||
param->width = width;
|
||||
chan = dma_request_channel(mask, pch_spi_filter, param);
|
||||
@ -878,7 +878,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
|
||||
/* Set Rx DMA */
|
||||
param = &dma->param_rx;
|
||||
param->dma_dev = &dma_dev->dev;
|
||||
param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
|
||||
param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */
|
||||
param->rx_reg = data->io_base_addr + PCH_SPDRR;
|
||||
param->width = width;
|
||||
chan = dma_request_channel(mask, pch_spi_filter, param);
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
@ -23,6 +24,7 @@
|
||||
|
||||
struct uniphier_spi_priv {
|
||||
void __iomem *base;
|
||||
dma_addr_t base_dma_addr;
|
||||
struct clk *clk;
|
||||
struct spi_master *master;
|
||||
struct completion xfer_done;
|
||||
@ -32,6 +34,7 @@ struct uniphier_spi_priv {
|
||||
unsigned int rx_bytes;
|
||||
const u8 *tx_buf;
|
||||
u8 *rx_buf;
|
||||
atomic_t dma_busy;
|
||||
|
||||
bool is_save_param;
|
||||
u8 bits_per_word;
|
||||
@ -61,11 +64,16 @@ struct uniphier_spi_priv {
|
||||
#define SSI_FPS_FSTRT BIT(14)
|
||||
|
||||
#define SSI_SR 0x14
|
||||
#define SSI_SR_BUSY BIT(7)
|
||||
#define SSI_SR_RNE BIT(0)
|
||||
|
||||
#define SSI_IE 0x18
|
||||
#define SSI_IE_TCIE BIT(4)
|
||||
#define SSI_IE_RCIE BIT(3)
|
||||
#define SSI_IE_TXRE BIT(2)
|
||||
#define SSI_IE_RXRE BIT(1)
|
||||
#define SSI_IE_RORIE BIT(0)
|
||||
#define SSI_IE_ALL_MASK GENMASK(4, 0)
|
||||
|
||||
#define SSI_IS 0x1c
|
||||
#define SSI_IS_RXRS BIT(9)
|
||||
@ -87,15 +95,19 @@ struct uniphier_spi_priv {
|
||||
#define SSI_RXDR 0x24
|
||||
|
||||
#define SSI_FIFO_DEPTH 8U
|
||||
#define SSI_FIFO_BURST_NUM 1
|
||||
|
||||
#define SSI_DMA_RX_BUSY BIT(1)
|
||||
#define SSI_DMA_TX_BUSY BIT(0)
|
||||
|
||||
static inline unsigned int bytes_per_word(unsigned int bits)
|
||||
{
|
||||
return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
|
||||
}
|
||||
|
||||
static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
|
||||
static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
|
||||
u32 mask)
|
||||
{
|
||||
struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
|
||||
u32 val;
|
||||
|
||||
val = readl(priv->base + SSI_IE);
|
||||
@ -103,9 +115,9 @@ static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
|
||||
writel(val, priv->base + SSI_IE);
|
||||
}
|
||||
|
||||
static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask)
|
||||
static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
|
||||
u32 mask)
|
||||
{
|
||||
struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
|
||||
u32 val;
|
||||
|
||||
val = readl(priv->base + SSI_IE);
|
||||
@ -334,6 +346,128 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
|
||||
writel(val, priv->base + SSI_FPS);
|
||||
}
|
||||
|
||||
static bool uniphier_spi_can_dma(struct spi_master *master,
|
||||
struct spi_device *spi,
|
||||
struct spi_transfer *t)
|
||||
{
|
||||
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
|
||||
unsigned int bpw = bytes_per_word(priv->bits_per_word);
|
||||
|
||||
if ((!master->dma_tx && !master->dma_rx)
|
||||
|| (!master->dma_tx && t->tx_buf)
|
||||
|| (!master->dma_rx && t->rx_buf))
|
||||
return false;
|
||||
|
||||
return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
|
||||
}
|
||||
|
||||
static void uniphier_spi_dma_rxcb(void *data)
|
||||
{
|
||||
struct spi_master *master = data;
|
||||
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
|
||||
int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
|
||||
|
||||
uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
|
||||
|
||||
if (!(state & SSI_DMA_TX_BUSY))
|
||||
spi_finalize_current_transfer(master);
|
||||
}
|
||||
|
||||
static void uniphier_spi_dma_txcb(void *data)
|
||||
{
|
||||
struct spi_master *master = data;
|
||||
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
|
||||
int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
|
||||
|
||||
uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
|
||||
|
||||
if (!(state & SSI_DMA_RX_BUSY))
|
||||
spi_finalize_current_transfer(master);
|
||||
}
|
||||
|
||||
static int uniphier_spi_transfer_one_dma(struct spi_master *master,
|
||||
struct spi_device *spi,
|
||||
struct spi_transfer *t)
|
||||
{
|
||||
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
|
||||
struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
|
||||
int buswidth;
|
||||
|
||||
atomic_set(&priv->dma_busy, 0);
|
||||
|
||||
uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
|
||||
|
||||
if (priv->bits_per_word <= 8)
|
||||
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
||||
else if (priv->bits_per_word <= 16)
|
||||
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
||||
else
|
||||
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
||||
if (priv->rx_buf) {
|
||||
struct dma_slave_config rxconf = {
|
||||
.direction = DMA_DEV_TO_MEM,
|
||||
.src_addr = priv->base_dma_addr + SSI_RXDR,
|
||||
.src_addr_width = buswidth,
|
||||
.src_maxburst = SSI_FIFO_BURST_NUM,
|
||||
};
|
||||
|
||||
dmaengine_slave_config(master->dma_rx, &rxconf);
|
||||
|
||||
rxdesc = dmaengine_prep_slave_sg(
|
||||
master->dma_rx,
|
||||
t->rx_sg.sgl, t->rx_sg.nents,
|
||||
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!rxdesc)
|
||||
goto out_err_prep;
|
||||
|
||||
rxdesc->callback = uniphier_spi_dma_rxcb;
|
||||
rxdesc->callback_param = master;
|
||||
|
||||
uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
|
||||
atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
|
||||
|
||||
dmaengine_submit(rxdesc);
|
||||
dma_async_issue_pending(master->dma_rx);
|
||||
}
|
||||
|
||||
if (priv->tx_buf) {
|
||||
struct dma_slave_config txconf = {
|
||||
.direction = DMA_MEM_TO_DEV,
|
||||
.dst_addr = priv->base_dma_addr + SSI_TXDR,
|
||||
.dst_addr_width = buswidth,
|
||||
.dst_maxburst = SSI_FIFO_BURST_NUM,
|
||||
};
|
||||
|
||||
dmaengine_slave_config(master->dma_tx, &txconf);
|
||||
|
||||
txdesc = dmaengine_prep_slave_sg(
|
||||
master->dma_tx,
|
||||
t->tx_sg.sgl, t->tx_sg.nents,
|
||||
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!txdesc)
|
||||
goto out_err_prep;
|
||||
|
||||
txdesc->callback = uniphier_spi_dma_txcb;
|
||||
txdesc->callback_param = master;
|
||||
|
||||
uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
|
||||
atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
|
||||
|
||||
dmaengine_submit(txdesc);
|
||||
dma_async_issue_pending(master->dma_tx);
|
||||
}
|
||||
|
||||
/* signal that we need to wait for completion */
|
||||
return (priv->tx_buf || priv->rx_buf);
|
||||
|
||||
out_err_prep:
|
||||
if (rxdesc)
|
||||
dmaengine_terminate_sync(master->dma_rx);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int uniphier_spi_transfer_one_irq(struct spi_master *master,
|
||||
struct spi_device *spi,
|
||||
struct spi_transfer *t)
|
||||
@ -346,12 +480,12 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master,
|
||||
|
||||
uniphier_spi_fill_tx_fifo(priv);
|
||||
|
||||
uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
|
||||
uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
|
||||
|
||||
time_left = wait_for_completion_timeout(&priv->xfer_done,
|
||||
msecs_to_jiffies(SSI_TIMEOUT_MS));
|
||||
|
||||
uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
|
||||
uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
|
||||
|
||||
if (!time_left) {
|
||||
dev_err(dev, "transfer timeout.\n");
|
||||
@ -395,6 +529,7 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
|
||||
{
|
||||
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
|
||||
unsigned long threshold;
|
||||
bool use_dma;
|
||||
|
||||
/* Terminate and return success for 0 byte length transfer */
|
||||
if (!t->len)
|
||||
@ -402,6 +537,10 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
|
||||
|
||||
uniphier_spi_setup_transfer(spi, t);
|
||||
|
||||
use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
|
||||
if (use_dma)
|
||||
return uniphier_spi_transfer_one_dma(master, spi, t);
|
||||
|
||||
/*
|
||||
* If the transfer operation will take longer than
|
||||
* SSI_POLL_TIMEOUT_US, it should use irq.
|
||||
@ -432,6 +571,32 @@ static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uniphier_spi_handle_err(struct spi_master *master,
|
||||
struct spi_message *msg)
|
||||
{
|
||||
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
|
||||
u32 val;
|
||||
|
||||
/* stop running spi transfer */
|
||||
writel(0, priv->base + SSI_CTL);
|
||||
|
||||
/* reset FIFOs */
|
||||
val = SSI_FC_TXFFL | SSI_FC_RXFFL;
|
||||
writel(val, priv->base + SSI_FC);
|
||||
|
||||
uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
|
||||
|
||||
if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
|
||||
dmaengine_terminate_async(master->dma_tx);
|
||||
atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
|
||||
}
|
||||
|
||||
if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
|
||||
dmaengine_terminate_async(master->dma_rx);
|
||||
atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct uniphier_spi_priv *priv = dev_id;
|
||||
@ -477,6 +642,9 @@ static int uniphier_spi_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct uniphier_spi_priv *priv;
|
||||
struct spi_master *master;
|
||||
struct resource *res;
|
||||
struct dma_slave_caps caps;
|
||||
u32 dma_tx_burst = 0, dma_rx_burst = 0;
|
||||
unsigned long clk_rate;
|
||||
int irq;
|
||||
int ret;
|
||||
@ -491,11 +659,13 @@ static int uniphier_spi_probe(struct platform_device *pdev)
|
||||
priv->master = master;
|
||||
priv->is_save_param = false;
|
||||
|
||||
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
priv->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(priv->base)) {
|
||||
ret = PTR_ERR(priv->base);
|
||||
goto out_master_put;
|
||||
}
|
||||
priv->base_dma_addr = res->start;
|
||||
|
||||
priv->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(priv->clk)) {
|
||||
@ -538,7 +708,45 @@ static int uniphier_spi_probe(struct platform_device *pdev)
|
||||
= uniphier_spi_prepare_transfer_hardware;
|
||||
master->unprepare_transfer_hardware
|
||||
= uniphier_spi_unprepare_transfer_hardware;
|
||||
master->handle_err = uniphier_spi_handle_err;
|
||||
master->can_dma = uniphier_spi_can_dma;
|
||||
|
||||
master->num_chipselect = 1;
|
||||
master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
|
||||
|
||||
master->dma_tx = dma_request_chan(&pdev->dev, "tx");
|
||||
if (IS_ERR_OR_NULL(master->dma_tx)) {
|
||||
if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
|
||||
goto out_disable_clk;
|
||||
master->dma_tx = NULL;
|
||||
dma_tx_burst = INT_MAX;
|
||||
} else {
|
||||
ret = dma_get_slave_caps(master->dma_tx, &caps);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
|
||||
ret);
|
||||
goto out_disable_clk;
|
||||
}
|
||||
dma_tx_burst = caps.max_burst;
|
||||
}
|
||||
|
||||
master->dma_rx = dma_request_chan(&pdev->dev, "rx");
|
||||
if (IS_ERR_OR_NULL(master->dma_rx)) {
|
||||
if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
|
||||
goto out_disable_clk;
|
||||
master->dma_rx = NULL;
|
||||
dma_rx_burst = INT_MAX;
|
||||
} else {
|
||||
ret = dma_get_slave_caps(master->dma_rx, &caps);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
|
||||
ret);
|
||||
goto out_disable_clk;
|
||||
}
|
||||
dma_rx_burst = caps.max_burst;
|
||||
}
|
||||
|
||||
master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
|
||||
|
||||
ret = devm_spi_register_master(&pdev->dev, master);
|
||||
if (ret)
|
||||
@ -558,6 +766,11 @@ static int uniphier_spi_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct uniphier_spi_priv *priv = platform_get_drvdata(pdev);
|
||||
|
||||
if (priv->master->dma_tx)
|
||||
dma_release_channel(priv->master->dma_tx);
|
||||
if (priv->master->dma_rx)
|
||||
dma_release_channel(priv->master->dma_rx);
|
||||
|
||||
clk_disable_unprepare(priv->clk);
|
||||
|
||||
return 0;
|
||||
|
@ -1674,6 +1674,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(ctlr->ptp_sts_supported)) {
|
||||
list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
|
||||
WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre);
|
||||
WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post);
|
||||
}
|
||||
}
|
||||
|
||||
spi_unmap_msg(ctlr, mesg);
|
||||
|
||||
if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
|
||||
@ -2451,6 +2458,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
|
||||
int nb, i;
|
||||
struct gpio_desc **cs;
|
||||
struct device *dev = &ctlr->dev;
|
||||
unsigned long native_cs_mask = 0;
|
||||
unsigned int num_cs_gpios = 0;
|
||||
|
||||
nb = gpiod_count(dev, "cs");
|
||||
ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
|
||||
@ -2492,7 +2501,22 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
|
||||
if (!gpioname)
|
||||
return -ENOMEM;
|
||||
gpiod_set_consumer_name(cs[i], gpioname);
|
||||
num_cs_gpios++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
|
||||
dev_err(dev, "Invalid native chip select %d\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
native_cs_mask |= BIT(i);
|
||||
}
|
||||
|
||||
ctlr->unused_native_cs = ffz(native_cs_mask);
|
||||
if (num_cs_gpios && ctlr->max_native_cs &&
|
||||
ctlr->unused_native_cs >= ctlr->max_native_cs) {
|
||||
dev_err(dev, "No unused native chip select available\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -423,6 +423,12 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
|
||||
* GPIO descriptors rather than using global GPIO numbers grabbed by the
|
||||
* driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
|
||||
* and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
|
||||
* @unused_native_cs: When cs_gpiods is used, spi_register_controller() will
|
||||
* fill in this field with the first unused native CS, to be used by SPI
|
||||
* controller drivers that need to drive a native CS when using GPIO CS.
|
||||
* @max_native_cs: When cs_gpiods is used, and this field is filled in,
|
||||
* spi_register_controller() will validate all native CS (including the
|
||||
* unused native CS) against this value.
|
||||
* @statistics: statistics for the spi_controller
|
||||
* @dma_tx: DMA transmit channel
|
||||
* @dma_rx: DMA receive channel
|
||||
@ -624,6 +630,8 @@ struct spi_controller {
|
||||
int *cs_gpios;
|
||||
struct gpio_desc **cs_gpiods;
|
||||
bool use_gpio_descriptors;
|
||||
u8 unused_native_cs;
|
||||
u8 max_native_cs;
|
||||
|
||||
/* statistics */
|
||||
struct spi_statistics statistics;
|
||||
|
@ -6,16 +6,12 @@
|
||||
* struct tiny_spi_platform_data - platform data of the OpenCores tiny SPI
|
||||
* @freq: input clock freq to the core.
|
||||
* @baudwidth: baud rate divider width of the core.
|
||||
* @gpio_cs_count: number of gpio pins used for chipselect.
|
||||
* @gpio_cs: array of gpio pins used for chipselect.
|
||||
*
|
||||
* freq and baudwidth are used only if the divider is programmable.
|
||||
*/
|
||||
struct tiny_spi_platform_data {
|
||||
unsigned int freq;
|
||||
unsigned int baudwidth;
|
||||
unsigned int gpio_cs_count;
|
||||
int *gpio_cs;
|
||||
};
|
||||
|
||||
#endif /* _LINUX_SPI_SPI_OC_TINY_H */
|
||||
|
Loading…
Reference in New Issue
Block a user