Merge master.kernel.org:/home/rmk/linux-2.6-mmc
* master.kernel.org:/home/rmk/linux-2.6-mmc:
[MMC] Always use a sector size of 512 bytes
[MMC] Cleanup 385e3227d4
[ARM] 3751/1: i.MX/MX1 SD/MMC use 512 bytes request for SCR read
[MMC] Fix SD timeout calculation
[MMC] constify mmc_host_ops
This commit is contained in:
commit
e004876c3b
@ -91,6 +91,8 @@ struct imxmci_host {
|
|||||||
int dma_allocated;
|
int dma_allocated;
|
||||||
|
|
||||||
unsigned char actual_bus_width;
|
unsigned char actual_bus_width;
|
||||||
|
|
||||||
|
int prev_cmd_code;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IMXMCI_PEND_IRQ_b 0
|
#define IMXMCI_PEND_IRQ_b 0
|
||||||
@ -248,16 +250,14 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
|
|||||||
* partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
|
* partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
|
||||||
* This is required for SCR read at least.
|
* This is required for SCR read at least.
|
||||||
*/
|
*/
|
||||||
if (datasz < 64) {
|
if (datasz < 512) {
|
||||||
host->dma_size = datasz;
|
host->dma_size = datasz;
|
||||||
if (data->flags & MMC_DATA_READ) {
|
if (data->flags & MMC_DATA_READ) {
|
||||||
host->dma_dir = DMA_FROM_DEVICE;
|
host->dma_dir = DMA_FROM_DEVICE;
|
||||||
|
|
||||||
/* Hack to enable read SCR */
|
/* Hack to enable read SCR */
|
||||||
if(datasz < 16) {
|
MMC_NOB = 1;
|
||||||
MMC_NOB = 1;
|
MMC_BLK_LEN = 512;
|
||||||
MMC_BLK_LEN = 16;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
host->dma_dir = DMA_TO_DEVICE;
|
host->dma_dir = DMA_TO_DEVICE;
|
||||||
}
|
}
|
||||||
@ -409,6 +409,9 @@ static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *
|
|||||||
|
|
||||||
spin_unlock_irqrestore(&host->lock, flags);
|
spin_unlock_irqrestore(&host->lock, flags);
|
||||||
|
|
||||||
|
if(req && req->cmd)
|
||||||
|
host->prev_cmd_code = req->cmd->opcode;
|
||||||
|
|
||||||
host->req = NULL;
|
host->req = NULL;
|
||||||
host->cmd = NULL;
|
host->cmd = NULL;
|
||||||
host->data = NULL;
|
host->data = NULL;
|
||||||
@ -553,7 +556,6 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int burst_len;
|
int burst_len;
|
||||||
int flush_len;
|
|
||||||
int trans_done = 0;
|
int trans_done = 0;
|
||||||
unsigned int stat = *pstat;
|
unsigned int stat = *pstat;
|
||||||
|
|
||||||
@ -566,44 +568,43 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
|
|||||||
dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
|
dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
|
||||||
stat);
|
stat);
|
||||||
|
|
||||||
|
udelay(20); /* required for clocks < 8MHz*/
|
||||||
|
|
||||||
if(host->dma_dir == DMA_FROM_DEVICE) {
|
if(host->dma_dir == DMA_FROM_DEVICE) {
|
||||||
imxmci_busy_wait_for_status(host, &stat,
|
imxmci_busy_wait_for_status(host, &stat,
|
||||||
STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE,
|
STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE,
|
||||||
20, "imxmci_cpu_driven_data read");
|
50, "imxmci_cpu_driven_data read");
|
||||||
|
|
||||||
while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
|
while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
|
||||||
(host->data_cnt < host->dma_size)) {
|
(host->data_cnt < 512)) {
|
||||||
if(burst_len >= host->dma_size - host->data_cnt) {
|
|
||||||
flush_len = burst_len;
|
udelay(20); /* required for clocks < 8MHz*/
|
||||||
burst_len = host->dma_size - host->data_cnt;
|
|
||||||
flush_len -= burst_len;
|
|
||||||
host->data_cnt = host->dma_size;
|
|
||||||
trans_done = 1;
|
|
||||||
} else {
|
|
||||||
flush_len = 0;
|
|
||||||
host->data_cnt += burst_len;
|
|
||||||
}
|
|
||||||
|
|
||||||
for(i = burst_len; i>=2 ; i-=2) {
|
for(i = burst_len; i>=2 ; i-=2) {
|
||||||
*(host->data_ptr++) = MMC_BUFFER_ACCESS;
|
u16 data;
|
||||||
udelay(20); /* required for clocks < 8MHz*/
|
data = MMC_BUFFER_ACCESS;
|
||||||
|
udelay(10); /* required for clocks < 8MHz*/
|
||||||
|
if(host->data_cnt+2 <= host->dma_size) {
|
||||||
|
*(host->data_ptr++) = data;
|
||||||
|
} else {
|
||||||
|
if(host->data_cnt < host->dma_size)
|
||||||
|
*(u8*)(host->data_ptr) = data;
|
||||||
|
}
|
||||||
|
host->data_cnt += 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(i == 1)
|
|
||||||
*(u8*)(host->data_ptr) = MMC_BUFFER_ACCESS;
|
|
||||||
|
|
||||||
stat = MMC_STATUS;
|
stat = MMC_STATUS;
|
||||||
|
|
||||||
/* Flush extra bytes from FIFO */
|
dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
|
||||||
while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){
|
host->data_cnt, burst_len, stat);
|
||||||
i = MMC_BUFFER_ACCESS;
|
|
||||||
stat = MMC_STATUS;
|
|
||||||
stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read burst %d STATUS = 0x%x\n",
|
|
||||||
burst_len, stat);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
|
||||||
|
trans_done = 1;
|
||||||
|
|
||||||
|
if(host->dma_size & 0x1ff)
|
||||||
|
stat &= ~STATUS_CRC_READ_ERR;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
imxmci_busy_wait_for_status(host, &stat,
|
imxmci_busy_wait_for_status(host, &stat,
|
||||||
STATUS_APPL_BUFF_FE,
|
STATUS_APPL_BUFF_FE,
|
||||||
@ -692,8 +693,8 @@ static void imxmci_tasklet_fnc(unsigned long data)
|
|||||||
what, stat, MMC_INT_MASK);
|
what, stat, MMC_INT_MASK);
|
||||||
dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
|
dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
|
||||||
MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
|
MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
|
||||||
dev_err(mmc_dev(host->mmc), "CMD%d, bus %d-bit, dma_size = 0x%x\n",
|
dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
|
||||||
host->cmd?host->cmd->opcode:0, 1<<host->actual_bus_width, host->dma_size);
|
host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!host->present || timeout)
|
if(!host->present || timeout)
|
||||||
|
@ -247,6 +247,55 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
|
|||||||
|
|
||||||
EXPORT_SYMBOL(mmc_wait_for_app_cmd);
|
EXPORT_SYMBOL(mmc_wait_for_app_cmd);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mmc_set_data_timeout - set the timeout for a data command
|
||||||
|
* @data: data phase for command
|
||||||
|
* @card: the MMC card associated with the data transfer
|
||||||
|
* @write: flag to differentiate reads from writes
|
||||||
|
*/
|
||||||
|
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
|
||||||
|
int write)
|
||||||
|
{
|
||||||
|
unsigned int mult;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SD cards use a 100 multiplier rather than 10
|
||||||
|
*/
|
||||||
|
mult = mmc_card_sd(card) ? 100 : 10;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Scale up the multiplier (and therefore the timeout) by
|
||||||
|
* the r2w factor for writes.
|
||||||
|
*/
|
||||||
|
if (write)
|
||||||
|
mult <<= card->csd.r2w_factor;
|
||||||
|
|
||||||
|
data->timeout_ns = card->csd.tacc_ns * mult;
|
||||||
|
data->timeout_clks = card->csd.tacc_clks * mult;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SD cards also have an upper limit on the timeout.
|
||||||
|
*/
|
||||||
|
if (mmc_card_sd(card)) {
|
||||||
|
unsigned int timeout_us, limit_us;
|
||||||
|
|
||||||
|
timeout_us = data->timeout_ns / 1000;
|
||||||
|
timeout_us += data->timeout_clks * 1000 /
|
||||||
|
(card->host->ios.clock / 1000);
|
||||||
|
|
||||||
|
if (write)
|
||||||
|
limit_us = 250000;
|
||||||
|
else
|
||||||
|
limit_us = 100000;
|
||||||
|
|
||||||
|
if (timeout_us > limit_us) {
|
||||||
|
data->timeout_ns = limit_us * 1000;
|
||||||
|
data->timeout_clks = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mmc_set_data_timeout);
|
||||||
|
|
||||||
static int mmc_select_card(struct mmc_host *host, struct mmc_card *card);
|
static int mmc_select_card(struct mmc_host *host, struct mmc_card *card);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -908,11 +957,9 @@ static void mmc_read_scrs(struct mmc_host *host)
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
struct mmc_card *card;
|
struct mmc_card *card;
|
||||||
|
|
||||||
struct mmc_request mrq;
|
struct mmc_request mrq;
|
||||||
struct mmc_command cmd;
|
struct mmc_command cmd;
|
||||||
struct mmc_data data;
|
struct mmc_data data;
|
||||||
|
|
||||||
struct scatterlist sg;
|
struct scatterlist sg;
|
||||||
|
|
||||||
list_for_each_entry(card, &host->cards, node) {
|
list_for_each_entry(card, &host->cards, node) {
|
||||||
@ -947,8 +994,8 @@ static void mmc_read_scrs(struct mmc_host *host)
|
|||||||
|
|
||||||
memset(&data, 0, sizeof(struct mmc_data));
|
memset(&data, 0, sizeof(struct mmc_data));
|
||||||
|
|
||||||
data.timeout_ns = card->csd.tacc_ns * 10;
|
mmc_set_data_timeout(&data, card, 0);
|
||||||
data.timeout_clks = card->csd.tacc_clks * 10;
|
|
||||||
data.blksz_bits = 3;
|
data.blksz_bits = 3;
|
||||||
data.blksz = 1 << 3;
|
data.blksz = 1 << 3;
|
||||||
data.blocks = 1;
|
data.blocks = 1;
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
|
|
||||||
#include <linux/mmc/card.h>
|
#include <linux/mmc/card.h>
|
||||||
|
#include <linux/mmc/host.h>
|
||||||
#include <linux/mmc/protocol.h>
|
#include <linux/mmc/protocol.h>
|
||||||
|
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
@ -171,8 +172,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||||||
|
|
||||||
brq.cmd.arg = req->sector << 9;
|
brq.cmd.arg = req->sector << 9;
|
||||||
brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
|
brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
|
||||||
brq.data.timeout_ns = card->csd.tacc_ns * 10;
|
|
||||||
brq.data.timeout_clks = card->csd.tacc_clks * 10;
|
|
||||||
brq.data.blksz_bits = md->block_bits;
|
brq.data.blksz_bits = md->block_bits;
|
||||||
brq.data.blksz = 1 << md->block_bits;
|
brq.data.blksz = 1 << md->block_bits;
|
||||||
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
|
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
|
||||||
@ -180,6 +179,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||||||
brq.stop.arg = 0;
|
brq.stop.arg = 0;
|
||||||
brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
|
brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
|
||||||
|
|
||||||
|
mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
|
||||||
|
|
||||||
if (rq_data_dir(req) == READ) {
|
if (rq_data_dir(req) == READ) {
|
||||||
brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
|
brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
|
||||||
brq.data.flags |= MMC_DATA_READ;
|
brq.data.flags |= MMC_DATA_READ;
|
||||||
@ -187,12 +188,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||||||
brq.cmd.opcode = MMC_WRITE_BLOCK;
|
brq.cmd.opcode = MMC_WRITE_BLOCK;
|
||||||
brq.data.flags |= MMC_DATA_WRITE;
|
brq.data.flags |= MMC_DATA_WRITE;
|
||||||
brq.data.blocks = 1;
|
brq.data.blocks = 1;
|
||||||
|
|
||||||
/*
|
|
||||||
* Scale up the timeout by the r2w factor
|
|
||||||
*/
|
|
||||||
brq.data.timeout_ns <<= card->csd.r2w_factor;
|
|
||||||
brq.data.timeout_clks <<= card->csd.r2w_factor;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (brq.data.blocks > 1) {
|
if (brq.data.blocks > 1) {
|
||||||
@ -324,52 +319,11 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
|
|||||||
md->read_only = mmc_blk_readonly(card);
|
md->read_only = mmc_blk_readonly(card);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Figure out a workable block size. MMC cards have:
|
* Both SD and MMC specifications state (although a bit
|
||||||
* - two block sizes, one for read and one for write.
|
* unclearly in the MMC case) that a block size of 512
|
||||||
* - may support partial reads and/or writes
|
* bytes must always be supported by the card.
|
||||||
* (allows block sizes smaller than specified)
|
|
||||||
*/
|
*/
|
||||||
md->block_bits = card->csd.read_blkbits;
|
md->block_bits = 9;
|
||||||
if (card->csd.write_blkbits != card->csd.read_blkbits) {
|
|
||||||
if (card->csd.write_blkbits < card->csd.read_blkbits &&
|
|
||||||
card->csd.read_partial) {
|
|
||||||
/*
|
|
||||||
* write block size is smaller than read block
|
|
||||||
* size, but we support partial reads, so choose
|
|
||||||
* the smaller write block size.
|
|
||||||
*/
|
|
||||||
md->block_bits = card->csd.write_blkbits;
|
|
||||||
} else if (card->csd.write_blkbits > card->csd.read_blkbits &&
|
|
||||||
card->csd.write_partial) {
|
|
||||||
/*
|
|
||||||
* read block size is smaller than write block
|
|
||||||
* size, but we support partial writes. Use read
|
|
||||||
* block size.
|
|
||||||
*/
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* We don't support this configuration for writes.
|
|
||||||
*/
|
|
||||||
printk(KERN_ERR "%s: unable to select block size for "
|
|
||||||
"writing (rb%u wb%u rp%u wp%u)\n",
|
|
||||||
mmc_card_id(card),
|
|
||||||
1 << card->csd.read_blkbits,
|
|
||||||
1 << card->csd.write_blkbits,
|
|
||||||
card->csd.read_partial,
|
|
||||||
card->csd.write_partial);
|
|
||||||
md->read_only = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Refuse to allow block sizes smaller than 512 bytes.
|
|
||||||
*/
|
|
||||||
if (md->block_bits < 9) {
|
|
||||||
printk(KERN_ERR "%s: unable to support block size %u\n",
|
|
||||||
mmc_card_id(card), 1 << md->block_bits);
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto err_kfree;
|
|
||||||
}
|
|
||||||
|
|
||||||
md->disk = alloc_disk(1 << MMC_SHIFT);
|
md->disk = alloc_disk(1 << MMC_SHIFT);
|
||||||
if (md->disk == NULL) {
|
if (md->disk == NULL) {
|
||||||
|
@ -77,7 +77,7 @@ struct mmc_host {
|
|||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct class_device class_dev;
|
struct class_device class_dev;
|
||||||
int index;
|
int index;
|
||||||
struct mmc_host_ops *ops;
|
const struct mmc_host_ops *ops;
|
||||||
unsigned int f_min;
|
unsigned int f_min;
|
||||||
unsigned int f_max;
|
unsigned int f_max;
|
||||||
u32 ocr_avail;
|
u32 ocr_avail;
|
||||||
|
@ -105,6 +105,8 @@ extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
|
|||||||
extern int mmc_wait_for_app_cmd(struct mmc_host *, unsigned int,
|
extern int mmc_wait_for_app_cmd(struct mmc_host *, unsigned int,
|
||||||
struct mmc_command *, int);
|
struct mmc_command *, int);
|
||||||
|
|
||||||
|
extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *, int);
|
||||||
|
|
||||||
extern int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card);
|
extern int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card);
|
||||||
|
|
||||||
static inline void mmc_claim_host(struct mmc_host *host)
|
static inline void mmc_claim_host(struct mmc_host *host)
|
||||||
|
Loading…
Reference in New Issue
Block a user