mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
Merge branch 'sh/dmaengine'
Conflicts: arch/sh/drivers/dma/dma-sh.c
This commit is contained in:
commit
2e18e04798
@ -20,7 +20,7 @@ Description:
|
||||
lsm: [[subj_user=] [subj_role=] [subj_type=]
|
||||
[obj_user=] [obj_role=] [obj_type=]]
|
||||
|
||||
base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION]
|
||||
base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK]
|
||||
mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
|
||||
fsmagic:= hex value
|
||||
uid:= decimal value
|
||||
@ -40,11 +40,11 @@ Description:
|
||||
|
||||
measure func=BPRM_CHECK
|
||||
measure func=FILE_MMAP mask=MAY_EXEC
|
||||
measure func=INODE_PERM mask=MAY_READ uid=0
|
||||
measure func=FILE_CHECK mask=MAY_READ uid=0
|
||||
|
||||
The default policy measures all executables in bprm_check,
|
||||
all files mmapped executable in file_mmap, and all files
|
||||
open for read by root in inode_permission.
|
||||
open for read by root in do_filp_open.
|
||||
|
||||
Examples of LSM specific definitions:
|
||||
|
||||
@ -54,8 +54,8 @@ Description:
|
||||
|
||||
dont_measure obj_type=var_log_t
|
||||
dont_measure obj_type=auditd_log_t
|
||||
measure subj_user=system_u func=INODE_PERM mask=MAY_READ
|
||||
measure subj_role=system_r func=INODE_PERM mask=MAY_READ
|
||||
measure subj_user=system_u func=FILE_CHECK mask=MAY_READ
|
||||
measure subj_role=system_r func=FILE_CHECK mask=MAY_READ
|
||||
|
||||
Smack:
|
||||
measure subj_user=_ func=INODE_PERM mask=MAY_READ
|
||||
measure subj_user=_ func=FILE_CHECK mask=MAY_READ
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 33
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Man-Eating Seals of Antiquity
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -52,11 +52,14 @@ static inline unsigned int get_dmte_irq(unsigned int chan)
|
||||
*
|
||||
* iterations to complete the transfer.
|
||||
*/
|
||||
static unsigned int ts_shift[] = TS_SHIFT;
|
||||
static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
|
||||
{
|
||||
u32 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR);
|
||||
int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
|
||||
((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
|
||||
|
||||
return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT];
|
||||
return ts_shift[cnt];
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -64,8 +64,10 @@ static int dmte_irq_map[] __maybe_unused = {
|
||||
#define ACK_L 0x00010000
|
||||
#define DM_INC 0x00004000
|
||||
#define DM_DEC 0x00008000
|
||||
#define DM_FIX 0x0000c000
|
||||
#define SM_INC 0x00001000
|
||||
#define SM_DEC 0x00002000
|
||||
#define SM_FIX 0x00003000
|
||||
#define RS_IN 0x00000200
|
||||
#define RS_OUT 0x00000300
|
||||
#define TS_BLK 0x00000040
|
||||
@ -83,7 +85,7 @@ static int dmte_irq_map[] __maybe_unused = {
|
||||
* Define the default configuration for dual address memory-memory transfer.
|
||||
* The 0x400 value represents auto-request, external->external.
|
||||
*/
|
||||
#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_32)
|
||||
#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT))
|
||||
|
||||
/* DMA base address */
|
||||
static u32 dma_base_addr[] __maybe_unused = {
|
||||
@ -123,10 +125,47 @@ static u32 dma_base_addr[] __maybe_unused = {
|
||||
*/
|
||||
#define SHDMA_MIX_IRQ (1 << 1)
|
||||
#define SHDMA_DMAOR1 (1 << 2)
|
||||
#define SHDMA_DMAE1 (1 << 3)
|
||||
#define SHDMA_DMAE1 (1 << 3)
|
||||
|
||||
enum sh_dmae_slave_chan_id {
|
||||
SHDMA_SLAVE_SCIF0_TX,
|
||||
SHDMA_SLAVE_SCIF0_RX,
|
||||
SHDMA_SLAVE_SCIF1_TX,
|
||||
SHDMA_SLAVE_SCIF1_RX,
|
||||
SHDMA_SLAVE_SCIF2_TX,
|
||||
SHDMA_SLAVE_SCIF2_RX,
|
||||
SHDMA_SLAVE_SCIF3_TX,
|
||||
SHDMA_SLAVE_SCIF3_RX,
|
||||
SHDMA_SLAVE_SCIF4_TX,
|
||||
SHDMA_SLAVE_SCIF4_RX,
|
||||
SHDMA_SLAVE_SCIF5_TX,
|
||||
SHDMA_SLAVE_SCIF5_RX,
|
||||
SHDMA_SLAVE_SIUA_TX,
|
||||
SHDMA_SLAVE_SIUA_RX,
|
||||
SHDMA_SLAVE_SIUB_TX,
|
||||
SHDMA_SLAVE_SIUB_RX,
|
||||
SHDMA_SLAVE_NUMBER, /* Must stay last */
|
||||
};
|
||||
|
||||
struct sh_dmae_slave_config {
|
||||
enum sh_dmae_slave_chan_id slave_id;
|
||||
dma_addr_t addr;
|
||||
u32 chcr;
|
||||
char mid_rid;
|
||||
};
|
||||
|
||||
struct sh_dmae_pdata {
|
||||
unsigned int mode;
|
||||
struct sh_dmae_slave_config *config;
|
||||
int config_num;
|
||||
};
|
||||
|
||||
struct device;
|
||||
|
||||
struct sh_dmae_slave {
|
||||
enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */
|
||||
struct device *dma_dev; /* Set by the platform */
|
||||
struct sh_dmae_slave_config *config; /* Set by the driver */
|
||||
};
|
||||
|
||||
#endif /* __DMA_SH_H */
|
||||
|
@ -20,8 +20,10 @@
|
||||
#define TS_32 0x00000010
|
||||
#define TS_128 0x00000018
|
||||
|
||||
#define CHCR_TS_MASK 0x18
|
||||
#define CHCR_TS_SHIFT 3
|
||||
#define CHCR_TS_LOW_MASK 0x18
|
||||
#define CHCR_TS_LOW_SHIFT 3
|
||||
#define CHCR_TS_HIGH_MASK 0
|
||||
#define CHCR_TS_HIGH_SHIFT 0
|
||||
|
||||
#define DMAOR_INIT DMAOR_DME
|
||||
|
||||
@ -36,11 +38,13 @@ enum {
|
||||
XMIT_SZ_128BIT,
|
||||
};
|
||||
|
||||
static unsigned int ts_shift[] __maybe_unused = {
|
||||
[XMIT_SZ_8BIT] = 0,
|
||||
[XMIT_SZ_16BIT] = 1,
|
||||
[XMIT_SZ_32BIT] = 2,
|
||||
[XMIT_SZ_128BIT] = 4,
|
||||
};
|
||||
#define TS_SHIFT { \
|
||||
[XMIT_SZ_8BIT] = 0, \
|
||||
[XMIT_SZ_16BIT] = 1, \
|
||||
[XMIT_SZ_32BIT] = 2, \
|
||||
[XMIT_SZ_128BIT] = 4, \
|
||||
}
|
||||
|
||||
#define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT)
|
||||
|
||||
#endif /* __ASM_CPU_SH3_DMA_H */
|
||||
|
@ -2,22 +2,38 @@
|
||||
#define __ASM_SH_CPU_SH4_DMA_SH7780_H
|
||||
|
||||
#if defined(CONFIG_CPU_SUBTYPE_SH7343) || \
|
||||
defined(CONFIG_CPU_SUBTYPE_SH7722) || \
|
||||
defined(CONFIG_CPU_SUBTYPE_SH7730)
|
||||
#define DMTE0_IRQ 48
|
||||
#define DMTE4_IRQ 76
|
||||
#define DMAE0_IRQ 78 /* DMA Error IRQ*/
|
||||
#define SH_DMAC_BASE0 0xFE008020
|
||||
#define SH_DMARS_BASE 0xFE009000
|
||||
#define SH_DMARS_BASE0 0xFE009000
|
||||
#define CHCR_TS_LOW_MASK 0x00000018
|
||||
#define CHCR_TS_LOW_SHIFT 3
|
||||
#define CHCR_TS_HIGH_MASK 0
|
||||
#define CHCR_TS_HIGH_SHIFT 0
|
||||
#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
|
||||
#define DMTE0_IRQ 48
|
||||
#define DMTE4_IRQ 76
|
||||
#define DMAE0_IRQ 78 /* DMA Error IRQ*/
|
||||
#define SH_DMAC_BASE0 0xFE008020
|
||||
#define SH_DMARS_BASE0 0xFE009000
|
||||
#define CHCR_TS_LOW_MASK 0x00000018
|
||||
#define CHCR_TS_LOW_SHIFT 3
|
||||
#define CHCR_TS_HIGH_MASK 0x00300000
|
||||
#define CHCR_TS_HIGH_SHIFT 20
|
||||
#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
|
||||
defined(CONFIG_CPU_SUBTYPE_SH7764)
|
||||
#define DMTE0_IRQ 34
|
||||
#define DMTE4_IRQ 44
|
||||
#define DMAE0_IRQ 38
|
||||
#define SH_DMAC_BASE0 0xFF608020
|
||||
#define SH_DMARS_BASE 0xFF609000
|
||||
#elif defined(CONFIG_CPU_SUBTYPE_SH7723) || \
|
||||
defined(CONFIG_CPU_SUBTYPE_SH7724)
|
||||
#define SH_DMARS_BASE0 0xFF609000
|
||||
#define CHCR_TS_LOW_MASK 0x00000018
|
||||
#define CHCR_TS_LOW_SHIFT 3
|
||||
#define CHCR_TS_HIGH_MASK 0
|
||||
#define CHCR_TS_HIGH_SHIFT 0
|
||||
#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
|
||||
#define DMTE0_IRQ 48 /* DMAC0A*/
|
||||
#define DMTE4_IRQ 76 /* DMAC0B */
|
||||
#define DMTE6_IRQ 40
|
||||
@ -29,7 +45,29 @@
|
||||
#define DMAE1_IRQ 74 /* DMA Error IRQ*/
|
||||
#define SH_DMAC_BASE0 0xFE008020
|
||||
#define SH_DMAC_BASE1 0xFDC08020
|
||||
#define SH_DMARS_BASE 0xFDC09000
|
||||
#define SH_DMARS_BASE0 0xFDC09000
|
||||
#define CHCR_TS_LOW_MASK 0x00000018
|
||||
#define CHCR_TS_LOW_SHIFT 3
|
||||
#define CHCR_TS_HIGH_MASK 0
|
||||
#define CHCR_TS_HIGH_SHIFT 0
|
||||
#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
|
||||
#define DMTE0_IRQ 48 /* DMAC0A*/
|
||||
#define DMTE4_IRQ 76 /* DMAC0B */
|
||||
#define DMTE6_IRQ 40
|
||||
#define DMTE8_IRQ 42 /* DMAC1A */
|
||||
#define DMTE9_IRQ 43
|
||||
#define DMTE10_IRQ 72 /* DMAC1B */
|
||||
#define DMTE11_IRQ 73
|
||||
#define DMAE0_IRQ 78 /* DMA Error IRQ*/
|
||||
#define DMAE1_IRQ 74 /* DMA Error IRQ*/
|
||||
#define SH_DMAC_BASE0 0xFE008020
|
||||
#define SH_DMAC_BASE1 0xFDC08020
|
||||
#define SH_DMARS_BASE0 0xFE009000
|
||||
#define SH_DMARS_BASE1 0xFDC09000
|
||||
#define CHCR_TS_LOW_MASK 0x00000018
|
||||
#define CHCR_TS_LOW_SHIFT 3
|
||||
#define CHCR_TS_HIGH_MASK 0x00600000
|
||||
#define CHCR_TS_HIGH_SHIFT 21
|
||||
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
|
||||
#define DMTE0_IRQ 34
|
||||
#define DMTE4_IRQ 44
|
||||
@ -41,7 +79,11 @@
|
||||
#define DMAE0_IRQ 38 /* DMA Error IRQ */
|
||||
#define SH_DMAC_BASE0 0xFC808020
|
||||
#define SH_DMAC_BASE1 0xFC818020
|
||||
#define SH_DMARS_BASE 0xFC809000
|
||||
#define SH_DMARS_BASE0 0xFC809000
|
||||
#define CHCR_TS_LOW_MASK 0x00000018
|
||||
#define CHCR_TS_LOW_SHIFT 3
|
||||
#define CHCR_TS_HIGH_MASK 0
|
||||
#define CHCR_TS_HIGH_SHIFT 0
|
||||
#else /* SH7785 */
|
||||
#define DMTE0_IRQ 33
|
||||
#define DMTE4_IRQ 37
|
||||
@ -54,18 +96,17 @@
|
||||
#define DMAE1_IRQ 58 /* DMA Error IRQ1 */
|
||||
#define SH_DMAC_BASE0 0xFC808020
|
||||
#define SH_DMAC_BASE1 0xFCC08020
|
||||
#define SH_DMARS_BASE 0xFC809000
|
||||
#define SH_DMARS_BASE0 0xFC809000
|
||||
#define CHCR_TS_LOW_MASK 0x00000018
|
||||
#define CHCR_TS_LOW_SHIFT 3
|
||||
#define CHCR_TS_HIGH_MASK 0
|
||||
#define CHCR_TS_HIGH_SHIFT 0
|
||||
#endif
|
||||
|
||||
#define REQ_HE 0x000000C0
|
||||
#define REQ_H 0x00000080
|
||||
#define REQ_LE 0x00000040
|
||||
#define TM_BURST 0x0000020
|
||||
#define TS_8 0x00000000
|
||||
#define TS_16 0x00000008
|
||||
#define TS_32 0x00000010
|
||||
#define TS_16BLK 0x00000018
|
||||
#define TS_32BLK 0x00100000
|
||||
#define REQ_HE 0x000000C0
|
||||
#define REQ_H 0x00000080
|
||||
#define REQ_LE 0x00000040
|
||||
#define TM_BURST 0x00000020
|
||||
|
||||
/*
|
||||
* The SuperH DMAC supports a number of transmit sizes, we list them here,
|
||||
@ -74,22 +115,31 @@
|
||||
* Defaults to a 64-bit transfer size.
|
||||
*/
|
||||
enum {
|
||||
XMIT_SZ_8BIT,
|
||||
XMIT_SZ_16BIT,
|
||||
XMIT_SZ_32BIT,
|
||||
XMIT_SZ_128BIT,
|
||||
XMIT_SZ_256BIT,
|
||||
XMIT_SZ_8BIT = 0,
|
||||
XMIT_SZ_16BIT = 1,
|
||||
XMIT_SZ_32BIT = 2,
|
||||
XMIT_SZ_64BIT = 7,
|
||||
XMIT_SZ_128BIT = 3,
|
||||
XMIT_SZ_256BIT = 4,
|
||||
XMIT_SZ_128BIT_BLK = 0xb,
|
||||
XMIT_SZ_256BIT_BLK = 0xc,
|
||||
};
|
||||
|
||||
/*
|
||||
* The DMA count is defined as the number of bytes to transfer.
|
||||
*/
|
||||
static unsigned int ts_shift[] __maybe_unused = {
|
||||
[XMIT_SZ_8BIT] = 0,
|
||||
[XMIT_SZ_16BIT] = 1,
|
||||
[XMIT_SZ_32BIT] = 2,
|
||||
[XMIT_SZ_128BIT] = 4,
|
||||
[XMIT_SZ_256BIT] = 5,
|
||||
};
|
||||
#define TS_SHIFT { \
|
||||
[XMIT_SZ_8BIT] = 0, \
|
||||
[XMIT_SZ_16BIT] = 1, \
|
||||
[XMIT_SZ_32BIT] = 2, \
|
||||
[XMIT_SZ_64BIT] = 3, \
|
||||
[XMIT_SZ_128BIT] = 4, \
|
||||
[XMIT_SZ_256BIT] = 5, \
|
||||
[XMIT_SZ_128BIT_BLK] = 4, \
|
||||
[XMIT_SZ_256BIT_BLK] = 5, \
|
||||
}
|
||||
|
||||
#define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \
|
||||
((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT))
|
||||
|
||||
#endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */
|
||||
|
@ -6,8 +6,6 @@
|
||||
#ifdef CONFIG_CPU_SH4A
|
||||
|
||||
#define DMAOR_INIT (DMAOR_DME)
|
||||
#define CHCR_TS_MASK 0x18
|
||||
#define CHCR_TS_SHIFT 3
|
||||
|
||||
#include <cpu/dma-sh4a.h>
|
||||
#else /* CONFIG_CPU_SH4A */
|
||||
@ -29,8 +27,10 @@
|
||||
#define TS_32 0x00000030
|
||||
#define TS_64 0x00000000
|
||||
|
||||
#define CHCR_TS_MASK 0x70
|
||||
#define CHCR_TS_SHIFT 4
|
||||
#define CHCR_TS_LOW_MASK 0x70
|
||||
#define CHCR_TS_LOW_SHIFT 4
|
||||
#define CHCR_TS_HIGH_MASK 0
|
||||
#define CHCR_TS_HIGH_SHIFT 0
|
||||
|
||||
#define DMAOR_COD 0x00000008
|
||||
|
||||
@ -41,23 +41,26 @@
|
||||
* Defaults to a 64-bit transfer size.
|
||||
*/
|
||||
enum {
|
||||
XMIT_SZ_64BIT,
|
||||
XMIT_SZ_8BIT,
|
||||
XMIT_SZ_16BIT,
|
||||
XMIT_SZ_32BIT,
|
||||
XMIT_SZ_256BIT,
|
||||
XMIT_SZ_8BIT = 1,
|
||||
XMIT_SZ_16BIT = 2,
|
||||
XMIT_SZ_32BIT = 3,
|
||||
XMIT_SZ_64BIT = 0,
|
||||
XMIT_SZ_256BIT = 4,
|
||||
};
|
||||
|
||||
/*
|
||||
* The DMA count is defined as the number of bytes to transfer.
|
||||
*/
|
||||
static unsigned int ts_shift[] __maybe_unused = {
|
||||
[XMIT_SZ_64BIT] = 3,
|
||||
[XMIT_SZ_8BIT] = 0,
|
||||
[XMIT_SZ_16BIT] = 1,
|
||||
[XMIT_SZ_32BIT] = 2,
|
||||
[XMIT_SZ_256BIT] = 5,
|
||||
};
|
||||
#define TS_SHIFT { \
|
||||
[XMIT_SZ_8BIT] = 0, \
|
||||
[XMIT_SZ_16BIT] = 1, \
|
||||
[XMIT_SZ_32BIT] = 2, \
|
||||
[XMIT_SZ_64BIT] = 3, \
|
||||
[XMIT_SZ_256BIT] = 5, \
|
||||
}
|
||||
|
||||
#define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT)
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_CPU_SH4_DMA_H */
|
||||
|
@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
|
||||
},
|
||||
.driver_data = "F.23", /* cutoff BIOS version */
|
||||
},
|
||||
/*
|
||||
* Acer eMachines G725 has the same problem. BIOS
|
||||
* V1.03 is known to be broken. V3.04 is known to
|
||||
* work. Inbetween, there are V1.06, V2.06 and V3.03
|
||||
* that we don't have much idea about. For now,
|
||||
* blacklist anything older than V3.04.
|
||||
*/
|
||||
{
|
||||
.ident = "G725",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
|
||||
},
|
||||
.driver_data = "V3.04", /* cutoff BIOS version */
|
||||
},
|
||||
{ } /* terminate list */
|
||||
};
|
||||
const struct dmi_system_id *dmi = dmi_first_match(sysids);
|
||||
|
@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
|
||||
* write indication (used for PIO/DMA setup), result TF is
|
||||
* copied back and we don't whine too much about its failure.
|
||||
*/
|
||||
tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
|
||||
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
|
||||
if (scmd->sc_data_direction == DMA_TO_DEVICE)
|
||||
tf->flags |= ATA_TFLAG_WRITE;
|
||||
|
||||
|
@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
||||
do_write);
|
||||
}
|
||||
|
||||
if (!do_write)
|
||||
flush_dcache_page(page);
|
||||
|
||||
qc->curbytes += qc->sect_size;
|
||||
qc->cursg_ofs += qc->sect_size;
|
||||
|
||||
|
@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on)
|
||||
pid = task_pid(current);
|
||||
type = PIDTYPE_PID;
|
||||
}
|
||||
retval = __f_setown(filp, pid, type, 0);
|
||||
get_pid(pid);
|
||||
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
|
||||
retval = __f_setown(filp, pid, type, 0);
|
||||
put_pid(pid);
|
||||
if (retval)
|
||||
goto out;
|
||||
} else {
|
||||
|
@ -48,23 +48,20 @@ enum sh_dmae_desc_status {
|
||||
*/
|
||||
#define RS_DEFAULT (RS_DUAL)
|
||||
|
||||
/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
|
||||
static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
|
||||
|
||||
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
|
||||
|
||||
#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
|
||||
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
|
||||
{
|
||||
ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
|
||||
ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
|
||||
}
|
||||
|
||||
static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
|
||||
{
|
||||
return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
|
||||
}
|
||||
|
||||
static void dmae_init(struct sh_dmae_chan *sh_chan)
|
||||
{
|
||||
u32 chcr = RS_DEFAULT; /* default is DUAL mode */
|
||||
sh_dmae_writel(sh_chan, chcr, CHCR);
|
||||
return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -95,27 +92,30 @@ static int sh_dmae_rst(int id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dmae_is_busy(struct sh_dmae_chan *sh_chan)
|
||||
static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
|
||||
{
|
||||
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
|
||||
if (chcr & CHCR_DE) {
|
||||
if (!(chcr & CHCR_TE))
|
||||
return -EBUSY; /* working */
|
||||
}
|
||||
return 0; /* waiting */
|
||||
|
||||
if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
|
||||
return true; /* working */
|
||||
|
||||
return false; /* waiting */
|
||||
}
|
||||
|
||||
static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
|
||||
static unsigned int ts_shift[] = TS_SHIFT;
|
||||
static inline unsigned int calc_xmit_shift(u32 chcr)
|
||||
{
|
||||
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
|
||||
return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
|
||||
int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
|
||||
((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
|
||||
|
||||
return ts_shift[cnt];
|
||||
}
|
||||
|
||||
static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
|
||||
{
|
||||
sh_dmae_writel(sh_chan, hw->sar, SAR);
|
||||
sh_dmae_writel(sh_chan, hw->dar, DAR);
|
||||
sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR);
|
||||
sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
|
||||
}
|
||||
|
||||
static void dmae_start(struct sh_dmae_chan *sh_chan)
|
||||
@ -123,7 +123,7 @@ static void dmae_start(struct sh_dmae_chan *sh_chan)
|
||||
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
|
||||
|
||||
chcr |= CHCR_DE | CHCR_IE;
|
||||
sh_dmae_writel(sh_chan, chcr, CHCR);
|
||||
sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
|
||||
}
|
||||
|
||||
static void dmae_halt(struct sh_dmae_chan *sh_chan)
|
||||
@ -134,55 +134,50 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
|
||||
sh_dmae_writel(sh_chan, chcr, CHCR);
|
||||
}
|
||||
|
||||
static void dmae_init(struct sh_dmae_chan *sh_chan)
|
||||
{
|
||||
u32 chcr = RS_DEFAULT; /* default is DUAL mode */
|
||||
sh_chan->xmit_shift = calc_xmit_shift(chcr);
|
||||
sh_dmae_writel(sh_chan, chcr, CHCR);
|
||||
}
|
||||
|
||||
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
|
||||
{
|
||||
int ret = dmae_is_busy(sh_chan);
|
||||
/* When DMA was working, can not set data to CHCR */
|
||||
if (ret)
|
||||
return ret;
|
||||
if (dmae_is_busy(sh_chan))
|
||||
return -EBUSY;
|
||||
|
||||
sh_chan->xmit_shift = calc_xmit_shift(val);
|
||||
sh_dmae_writel(sh_chan, val, CHCR);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define DMARS1_ADDR 0x04
|
||||
#define DMARS2_ADDR 0x08
|
||||
#define DMARS_SHIFT 8
|
||||
#define DMARS_CHAN_MSK 0x01
|
||||
#define DMARS_SHIFT 8
|
||||
#define DMARS_CHAN_MSK 0x01
|
||||
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
|
||||
{
|
||||
u32 addr;
|
||||
int shift = 0;
|
||||
int ret = dmae_is_busy(sh_chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dmae_is_busy(sh_chan))
|
||||
return -EBUSY;
|
||||
|
||||
if (sh_chan->id & DMARS_CHAN_MSK)
|
||||
shift = DMARS_SHIFT;
|
||||
|
||||
switch (sh_chan->id) {
|
||||
/* DMARS0 */
|
||||
case 0:
|
||||
case 1:
|
||||
addr = SH_DMARS_BASE;
|
||||
break;
|
||||
/* DMARS1 */
|
||||
case 2:
|
||||
case 3:
|
||||
addr = (SH_DMARS_BASE + DMARS1_ADDR);
|
||||
break;
|
||||
/* DMARS2 */
|
||||
case 4:
|
||||
case 5:
|
||||
addr = (SH_DMARS_BASE + DMARS2_ADDR);
|
||||
break;
|
||||
default:
|
||||
if (sh_chan->id < 6)
|
||||
/* DMA0RS0 - DMA0RS2 */
|
||||
addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4;
|
||||
#ifdef SH_DMARS_BASE1
|
||||
else if (sh_chan->id < 12)
|
||||
/* DMA1RS0 - DMA1RS2 */
|
||||
addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4;
|
||||
#endif
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctrl_outw((val << shift) |
|
||||
(ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)),
|
||||
addr);
|
||||
ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -250,10 +245,53 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct sh_dmae_slave_config *sh_dmae_find_slave(
|
||||
struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
|
||||
{
|
||||
struct dma_device *dma_dev = sh_chan->common.device;
|
||||
struct sh_dmae_device *shdev = container_of(dma_dev,
|
||||
struct sh_dmae_device, common);
|
||||
struct sh_dmae_pdata *pdata = &shdev->pdata;
|
||||
int i;
|
||||
|
||||
if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < pdata->config_num; i++)
|
||||
if (pdata->config[i].slave_id == slave_id)
|
||||
return pdata->config + i;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
|
||||
struct sh_desc *desc;
|
||||
struct sh_dmae_slave *param = chan->private;
|
||||
|
||||
/*
|
||||
* This relies on the guarantee from dmaengine that alloc_chan_resources
|
||||
* never runs concurrently with itself or free_chan_resources.
|
||||
*/
|
||||
if (param) {
|
||||
struct sh_dmae_slave_config *cfg;
|
||||
|
||||
cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
|
||||
if (!cfg)
|
||||
return -EINVAL;
|
||||
|
||||
if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
|
||||
return -EBUSY;
|
||||
|
||||
param->config = cfg;
|
||||
|
||||
dmae_set_dmars(sh_chan, cfg->mid_rid);
|
||||
dmae_set_chcr(sh_chan, cfg->chcr);
|
||||
} else {
|
||||
if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400)
|
||||
dmae_set_chcr(sh_chan, RS_DEFAULT);
|
||||
}
|
||||
|
||||
spin_lock_bh(&sh_chan->desc_lock);
|
||||
while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
|
||||
@ -286,10 +324,18 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
|
||||
struct sh_desc *desc, *_desc;
|
||||
LIST_HEAD(list);
|
||||
|
||||
dmae_halt(sh_chan);
|
||||
|
||||
/* Prepared and not submitted descriptors can still be on the queue */
|
||||
if (!list_empty(&sh_chan->ld_queue))
|
||||
sh_dmae_chan_ld_cleanup(sh_chan, true);
|
||||
|
||||
if (chan->private) {
|
||||
/* The caller is holding dma_list_mutex */
|
||||
struct sh_dmae_slave *param = chan->private;
|
||||
clear_bit(param->slave_id, sh_dmae_slave_used);
|
||||
}
|
||||
|
||||
spin_lock_bh(&sh_chan->desc_lock);
|
||||
|
||||
list_splice_init(&sh_chan->ld_free, &list);
|
||||
@ -301,23 +347,97 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
|
||||
kfree(desc);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
|
||||
struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
|
||||
size_t len, unsigned long flags)
|
||||
/**
|
||||
* sh_dmae_add_desc - get, set up and return one transfer descriptor
|
||||
* @sh_chan: DMA channel
|
||||
* @flags: DMA transfer flags
|
||||
* @dest: destination DMA address, incremented when direction equals
|
||||
* DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
|
||||
* @src: source DMA address, incremented when direction equals
|
||||
* DMA_TO_DEVICE or DMA_BIDIRECTIONAL
|
||||
* @len: DMA transfer length
|
||||
* @first: if NULL, set to the current descriptor and cookie set to -EBUSY
|
||||
* @direction: needed for slave DMA to decide which address to keep constant,
|
||||
* equals DMA_BIDIRECTIONAL for MEMCPY
|
||||
* Returns 0 or an error
|
||||
* Locks: called with desc_lock held
|
||||
*/
|
||||
static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
|
||||
unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
|
||||
struct sh_desc **first, enum dma_data_direction direction)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan;
|
||||
struct sh_desc *first = NULL, *prev = NULL, *new;
|
||||
struct sh_desc *new;
|
||||
size_t copy_size;
|
||||
|
||||
if (!*len)
|
||||
return NULL;
|
||||
|
||||
/* Allocate the link descriptor from the free list */
|
||||
new = sh_dmae_get_desc(sh_chan);
|
||||
if (!new) {
|
||||
dev_err(sh_chan->dev, "No free link descriptor available\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
|
||||
|
||||
new->hw.sar = *src;
|
||||
new->hw.dar = *dest;
|
||||
new->hw.tcr = copy_size;
|
||||
|
||||
if (!*first) {
|
||||
/* First desc */
|
||||
new->async_tx.cookie = -EBUSY;
|
||||
*first = new;
|
||||
} else {
|
||||
/* Other desc - invisible to the user */
|
||||
new->async_tx.cookie = -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(sh_chan->dev,
|
||||
"chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
|
||||
copy_size, *len, *src, *dest, &new->async_tx,
|
||||
new->async_tx.cookie, sh_chan->xmit_shift);
|
||||
|
||||
new->mark = DESC_PREPARED;
|
||||
new->async_tx.flags = flags;
|
||||
new->direction = direction;
|
||||
|
||||
*len -= copy_size;
|
||||
if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
|
||||
*src += copy_size;
|
||||
if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
|
||||
*dest += copy_size;
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
/*
|
||||
* sh_dmae_prep_sg - prepare transfer descriptors from an SG list
|
||||
*
|
||||
* Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
|
||||
* converted to scatter-gather to guarantee consistent locking and a correct
|
||||
* list manipulation. For slave DMA direction carries the usual meaning, and,
|
||||
* logically, the SG list is RAM and the addr variable contains slave address,
|
||||
* e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
|
||||
* and the SG list contains only one element and points at the source buffer.
|
||||
*/
|
||||
static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
|
||||
struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
|
||||
enum dma_data_direction direction, unsigned long flags)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
|
||||
LIST_HEAD(tx_list);
|
||||
int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
|
||||
int chunks = 0;
|
||||
int i;
|
||||
|
||||
if (!chan)
|
||||
if (!sg_len)
|
||||
return NULL;
|
||||
|
||||
if (!len)
|
||||
return NULL;
|
||||
|
||||
sh_chan = to_sh_chan(chan);
|
||||
for_each_sg(sgl, sg, sg_len, i)
|
||||
chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
|
||||
(SH_DMA_TCR_MAX + 1);
|
||||
|
||||
/* Have to lock the whole loop to protect against concurrent release */
|
||||
spin_lock_bh(&sh_chan->desc_lock);
|
||||
@ -333,49 +453,32 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
|
||||
* only during this function, then they are immediately spliced
|
||||
* back onto the free list in form of a chain
|
||||
*/
|
||||
do {
|
||||
/* Allocate the link descriptor from the free list */
|
||||
new = sh_dmae_get_desc(sh_chan);
|
||||
if (!new) {
|
||||
dev_err(sh_chan->dev,
|
||||
"No free memory for link descriptor\n");
|
||||
list_for_each_entry(new, &tx_list, node)
|
||||
new->mark = DESC_IDLE;
|
||||
list_splice(&tx_list, &sh_chan->ld_free);
|
||||
spin_unlock_bh(&sh_chan->desc_lock);
|
||||
return NULL;
|
||||
}
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
dma_addr_t sg_addr = sg_dma_address(sg);
|
||||
size_t len = sg_dma_len(sg);
|
||||
|
||||
copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1);
|
||||
if (!len)
|
||||
goto err_get_desc;
|
||||
|
||||
new->hw.sar = dma_src;
|
||||
new->hw.dar = dma_dest;
|
||||
new->hw.tcr = copy_size;
|
||||
if (!first) {
|
||||
/* First desc */
|
||||
new->async_tx.cookie = -EBUSY;
|
||||
first = new;
|
||||
} else {
|
||||
/* Other desc - invisible to the user */
|
||||
new->async_tx.cookie = -EINVAL;
|
||||
}
|
||||
do {
|
||||
dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
|
||||
i, sg, len, (unsigned long long)sg_addr);
|
||||
|
||||
dev_dbg(sh_chan->dev,
|
||||
"chaining %u of %u with %p, dst %x, cookie %d\n",
|
||||
copy_size, len, &new->async_tx, dma_dest,
|
||||
new->async_tx.cookie);
|
||||
if (direction == DMA_FROM_DEVICE)
|
||||
new = sh_dmae_add_desc(sh_chan, flags,
|
||||
&sg_addr, addr, &len, &first,
|
||||
direction);
|
||||
else
|
||||
new = sh_dmae_add_desc(sh_chan, flags,
|
||||
addr, &sg_addr, &len, &first,
|
||||
direction);
|
||||
if (!new)
|
||||
goto err_get_desc;
|
||||
|
||||
new->mark = DESC_PREPARED;
|
||||
new->async_tx.flags = flags;
|
||||
new->chunks = chunks--;
|
||||
|
||||
prev = new;
|
||||
len -= copy_size;
|
||||
dma_src += copy_size;
|
||||
dma_dest += copy_size;
|
||||
/* Insert the link descriptor to the LD ring */
|
||||
list_add_tail(&new->node, &tx_list);
|
||||
} while (len);
|
||||
new->chunks = chunks--;
|
||||
list_add_tail(&new->node, &tx_list);
|
||||
} while (len);
|
||||
}
|
||||
|
||||
if (new != first)
|
||||
new->async_tx.cookie = -ENOSPC;
|
||||
@ -386,6 +489,77 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
|
||||
spin_unlock_bh(&sh_chan->desc_lock);
|
||||
|
||||
return &first->async_tx;
|
||||
|
||||
err_get_desc:
|
||||
list_for_each_entry(new, &tx_list, node)
|
||||
new->mark = DESC_IDLE;
|
||||
list_splice(&tx_list, &sh_chan->ld_free);
|
||||
|
||||
spin_unlock_bh(&sh_chan->desc_lock);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
|
||||
struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
|
||||
size_t len, unsigned long flags)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan;
|
||||
struct scatterlist sg;
|
||||
|
||||
if (!chan || !len)
|
||||
return NULL;
|
||||
|
||||
chan->private = NULL;
|
||||
|
||||
sh_chan = to_sh_chan(chan);
|
||||
|
||||
sg_init_table(&sg, 1);
|
||||
sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
|
||||
offset_in_page(dma_src));
|
||||
sg_dma_address(&sg) = dma_src;
|
||||
sg_dma_len(&sg) = len;
|
||||
|
||||
return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
|
||||
flags);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
|
||||
enum dma_data_direction direction, unsigned long flags)
|
||||
{
|
||||
struct sh_dmae_slave *param;
|
||||
struct sh_dmae_chan *sh_chan;
|
||||
|
||||
if (!chan)
|
||||
return NULL;
|
||||
|
||||
sh_chan = to_sh_chan(chan);
|
||||
param = chan->private;
|
||||
|
||||
/* Someone calling slave DMA on a public channel? */
|
||||
if (!param || !sg_len) {
|
||||
dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
|
||||
__func__, param, sg_len, param ? param->slave_id : -1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* if (param != NULL), this is a successfully requested slave channel,
|
||||
* therefore param->config != NULL too.
|
||||
*/
|
||||
return sh_dmae_prep_sg(sh_chan, sgl, sg_len, ¶m->config->addr,
|
||||
direction, flags);
|
||||
}
|
||||
|
||||
static void sh_dmae_terminate_all(struct dma_chan *chan)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
|
||||
|
||||
if (!chan)
|
||||
return;
|
||||
|
||||
sh_dmae_chan_ld_cleanup(sh_chan, true);
|
||||
}
|
||||
|
||||
static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
|
||||
@ -419,7 +593,11 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
|
||||
cookie = tx->cookie;
|
||||
|
||||
if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
|
||||
BUG_ON(sh_chan->completed_cookie != desc->cookie - 1);
|
||||
if (sh_chan->completed_cookie != desc->cookie - 1)
|
||||
dev_dbg(sh_chan->dev,
|
||||
"Completing cookie %d, expected %d\n",
|
||||
desc->cookie,
|
||||
sh_chan->completed_cookie + 1);
|
||||
sh_chan->completed_cookie = desc->cookie;
|
||||
}
|
||||
|
||||
@ -492,7 +670,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Find the first un-transfer desciptor */
|
||||
/* Find the first not transferred desciptor */
|
||||
list_for_each_entry(sd, &sh_chan->ld_queue, node)
|
||||
if (sd->mark == DESC_SUBMITTED) {
|
||||
/* Get the ld start address from ld_queue */
|
||||
@ -559,7 +737,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
|
||||
|
||||
/* IRQ Multi */
|
||||
if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
|
||||
int cnt = 0;
|
||||
int __maybe_unused cnt = 0;
|
||||
switch (irq) {
|
||||
#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
|
||||
case DMTE6_IRQ:
|
||||
@ -596,11 +774,14 @@ static void dmae_do_tasklet(unsigned long data)
|
||||
struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
|
||||
struct sh_desc *desc;
|
||||
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
|
||||
u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
|
||||
|
||||
spin_lock(&sh_chan->desc_lock);
|
||||
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
|
||||
if ((desc->hw.sar + desc->hw.tcr) == sar_buf &&
|
||||
desc->mark == DESC_SUBMITTED) {
|
||||
if (desc->mark == DESC_SUBMITTED &&
|
||||
((desc->direction == DMA_FROM_DEVICE &&
|
||||
(desc->hw.dar + desc->hw.tcr) == dar_buf) ||
|
||||
(desc->hw.sar + desc->hw.tcr) == sar_buf)) {
|
||||
dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
|
||||
desc->async_tx.cookie, &desc->async_tx,
|
||||
desc->hw.dar);
|
||||
@ -673,7 +854,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
|
||||
}
|
||||
|
||||
snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
|
||||
"sh-dmae%d", new_sh_chan->id);
|
||||
"sh-dmae%d", new_sh_chan->id);
|
||||
|
||||
/* set up channel irq */
|
||||
err = request_irq(irq, &sh_dmae_interrupt, irqflags,
|
||||
@ -684,11 +865,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
|
||||
goto err_no_irq;
|
||||
}
|
||||
|
||||
/* CHCR register control function */
|
||||
new_sh_chan->set_chcr = dmae_set_chcr;
|
||||
/* DMARS register control function */
|
||||
new_sh_chan->set_dmars = dmae_set_dmars;
|
||||
|
||||
shdev->chan[id] = new_sh_chan;
|
||||
return 0;
|
||||
|
||||
@ -759,12 +935,19 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
|
||||
INIT_LIST_HEAD(&shdev->common.channels);
|
||||
|
||||
dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
|
||||
dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
|
||||
|
||||
shdev->common.device_alloc_chan_resources
|
||||
= sh_dmae_alloc_chan_resources;
|
||||
shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
|
||||
shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
|
||||
shdev->common.device_is_tx_complete = sh_dmae_is_complete;
|
||||
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
|
||||
|
||||
/* Compulsory for DMA_SLAVE fields */
|
||||
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
|
||||
shdev->common.device_terminate_all = sh_dmae_terminate_all;
|
||||
|
||||
shdev->common.dev = &pdev->dev;
|
||||
/* Default transfer size of 32 bytes requires 32-byte alignment */
|
||||
shdev->common.copy_align = 5;
|
||||
|
@ -29,6 +29,7 @@ struct sh_desc {
|
||||
struct sh_dmae_regs hw;
|
||||
struct list_head node;
|
||||
struct dma_async_tx_descriptor async_tx;
|
||||
enum dma_data_direction direction;
|
||||
dma_cookie_t cookie;
|
||||
int chunks;
|
||||
int mark;
|
||||
@ -45,13 +46,9 @@ struct sh_dmae_chan {
|
||||
struct device *dev; /* Channel device */
|
||||
struct tasklet_struct tasklet; /* Tasklet */
|
||||
int descs_allocated; /* desc count */
|
||||
int xmit_shift; /* log_2(bytes_per_xfer) */
|
||||
int id; /* Raw id of this channel */
|
||||
char dev_id[16]; /* unique name per DMAC of channel */
|
||||
|
||||
/* Set chcr */
|
||||
int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
|
||||
/* Set DMA resource */
|
||||
int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res);
|
||||
};
|
||||
|
||||
struct sh_dmae_device {
|
||||
|
@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
||||
|
||||
if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
|
||||
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
|
||||
gart_info->table_mask);
|
||||
(unsigned long long)gart_info->table_mask);
|
||||
ret = 1;
|
||||
goto done;
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = {
|
||||
|
||||
const static struct intel_device_info intel_pineview_info = {
|
||||
.is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
|
||||
.has_pipe_cxsr = 1,
|
||||
.need_gfx_hws = 1,
|
||||
.has_hotplug = 1,
|
||||
};
|
||||
|
||||
|
@ -3564,6 +3564,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
|
||||
uint32_t reloc_count = 0, i;
|
||||
int ret = 0;
|
||||
|
||||
if (relocs == NULL)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < buffer_count; i++) {
|
||||
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
||||
int unwritten;
|
||||
@ -3653,7 +3656,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_gem_object *batch_obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_clip_rect *cliprects = NULL;
|
||||
struct drm_i915_gem_relocation_entry *relocs;
|
||||
struct drm_i915_gem_relocation_entry *relocs = NULL;
|
||||
int ret = 0, ret2, i, pinned = 0;
|
||||
uint64_t exec_offset;
|
||||
uint32_t seqno, flush_domains, reloc_index;
|
||||
@ -3722,6 +3725,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
if (object_list[i] == NULL) {
|
||||
DRM_ERROR("Invalid object handle %d at index %d\n",
|
||||
exec_list[i].handle, i);
|
||||
/* prevent error path from reading uninitialized data */
|
||||
args->buffer_count = i + 1;
|
||||
ret = -EBADF;
|
||||
goto err;
|
||||
}
|
||||
@ -3730,6 +3735,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
if (obj_priv->in_execbuffer) {
|
||||
DRM_ERROR("Object %p appears more than once in object list\n",
|
||||
object_list[i]);
|
||||
/* prevent error path from reading uninitialized data */
|
||||
args->buffer_count = i + 1;
|
||||
ret = -EBADF;
|
||||
goto err;
|
||||
}
|
||||
@ -3926,6 +3933,7 @@ err:
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
pre_mutex_err:
|
||||
/* Copy the updated relocations out regardless of current error
|
||||
* state. Failure to update the relocs would mean that the next
|
||||
* time userland calls execbuf, it would do so with presumed offset
|
||||
@ -3940,7 +3948,6 @@ err:
|
||||
ret = ret2;
|
||||
}
|
||||
|
||||
pre_mutex_err:
|
||||
drm_free_large(object_list);
|
||||
kfree(cliprects);
|
||||
|
||||
|
@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
|
||||
if (de_iir & DE_GSE)
|
||||
ironlake_opregion_gse_intr(dev);
|
||||
|
||||
if (de_iir & DE_PLANEA_FLIP_DONE)
|
||||
intel_prepare_page_flip(dev, 0);
|
||||
|
||||
if (de_iir & DE_PLANEB_FLIP_DONE)
|
||||
intel_prepare_page_flip(dev, 1);
|
||||
|
||||
if (de_iir & DE_PIPEA_VBLANK) {
|
||||
drm_handle_vblank(dev, 0);
|
||||
intel_finish_page_flip(dev, 0);
|
||||
}
|
||||
|
||||
if (de_iir & DE_PIPEB_VBLANK) {
|
||||
drm_handle_vblank(dev, 1);
|
||||
intel_finish_page_flip(dev, 1);
|
||||
}
|
||||
|
||||
/* check event from PCH */
|
||||
if ((de_iir & DE_PCH_EVENT) &&
|
||||
(pch_iir & SDE_HOTPLUG_MASK)) {
|
||||
@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
|
||||
if (!(pipeconf & PIPEACONF_ENABLE))
|
||||
return -EINVAL;
|
||||
|
||||
if (IS_IRONLAKE(dev))
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
if (IS_I965G(dev))
|
||||
if (IS_IRONLAKE(dev))
|
||||
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
|
||||
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
|
||||
else if (IS_I965G(dev))
|
||||
i915_enable_pipestat(dev_priv, pipe,
|
||||
PIPE_START_VBLANK_INTERRUPT_ENABLE);
|
||||
else
|
||||
@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (IS_IRONLAKE(dev))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
i915_disable_pipestat(dev_priv, pipe,
|
||||
PIPE_VBLANK_INTERRUPT_ENABLE |
|
||||
PIPE_START_VBLANK_INTERRUPT_ENABLE);
|
||||
if (IS_IRONLAKE(dev))
|
||||
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
|
||||
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
|
||||
else
|
||||
i915_disable_pipestat(dev_priv, pipe,
|
||||
PIPE_VBLANK_INTERRUPT_ENABLE |
|
||||
PIPE_START_VBLANK_INTERRUPT_ENABLE);
|
||||
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
|
||||
}
|
||||
|
||||
@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
/* enable kind of interrupts always enabled */
|
||||
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
|
||||
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
|
||||
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
|
||||
u32 render_mask = GT_USER_INTERRUPT;
|
||||
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
|
||||
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
|
||||
|
||||
dev_priv->irq_mask_reg = ~display_mask;
|
||||
dev_priv->de_irq_enable_reg = display_mask;
|
||||
dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
|
||||
|
||||
/* should always can generate irq */
|
||||
I915_WRITE(DEIIR, I915_READ(DEIIR));
|
||||
|
@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
|
||||
adpa = I915_READ(PCH_ADPA);
|
||||
|
||||
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
|
||||
/* disable HPD first */
|
||||
I915_WRITE(PCH_ADPA, adpa);
|
||||
(void)I915_READ(PCH_ADPA);
|
||||
|
||||
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
|
||||
ADPA_CRT_HOTPLUG_WARMUP_10MS |
|
||||
|
@ -1638,6 +1638,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
|
||||
|
||||
drm_vblank_off(dev, pipe);
|
||||
/* Disable display plane */
|
||||
temp = I915_READ(dspcntr_reg);
|
||||
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
|
||||
@ -2519,6 +2520,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
|
||||
sr_entries = roundup(sr_entries / cacheline_size, 1);
|
||||
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
|
||||
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
|
||||
} else {
|
||||
/* Turn off self refresh if both pipes are enabled */
|
||||
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
|
||||
& ~FW_BLC_SELF_EN);
|
||||
}
|
||||
|
||||
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
|
||||
@ -2562,6 +2567,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
|
||||
srwm = 1;
|
||||
srwm &= 0x3f;
|
||||
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
|
||||
} else {
|
||||
/* Turn off self refresh if both pipes are enabled */
|
||||
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
|
||||
& ~FW_BLC_SELF_EN);
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
|
||||
@ -2630,6 +2639,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
|
||||
if (srwm < 0)
|
||||
srwm = 1;
|
||||
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
|
||||
} else {
|
||||
/* Turn off self refresh if both pipes are enabled */
|
||||
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
|
||||
& ~FW_BLC_SELF_EN);
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
|
||||
@ -3984,6 +3997,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
work = intel_crtc->unpin_work;
|
||||
if (work == NULL || !work->pending) {
|
||||
if (work && !work->pending) {
|
||||
obj_priv = work->obj->driver_private;
|
||||
DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
|
||||
obj_priv,
|
||||
atomic_read(&obj_priv->pending_flip));
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
return;
|
||||
}
|
||||
@ -4005,7 +4024,10 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
obj_priv = work->obj->driver_private;
|
||||
if (atomic_dec_and_test(&obj_priv->pending_flip))
|
||||
|
||||
/* Initial scanout buffer will have a 0 pending flip count */
|
||||
if ((atomic_read(&obj_priv->pending_flip) == 0) ||
|
||||
atomic_dec_and_test(&obj_priv->pending_flip))
|
||||
DRM_WAKEUP(&dev_priv->pending_flip_queue);
|
||||
schedule_work(&work->work);
|
||||
}
|
||||
@ -4018,8 +4040,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
if (intel_crtc->unpin_work)
|
||||
if (intel_crtc->unpin_work) {
|
||||
intel_crtc->unpin_work->pending = 1;
|
||||
} else {
|
||||
DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
@ -4053,6 +4078,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
/* We borrow the event spin lock for protecting unpin_work */
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
if (intel_crtc->unpin_work) {
|
||||
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
kfree(work);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -4066,7 +4092,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
|
||||
ret = intel_pin_and_fence_fb_obj(dev, obj);
|
||||
if (ret != 0) {
|
||||
DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
|
||||
obj->driver_private);
|
||||
kfree(work);
|
||||
intel_crtc->unpin_work = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = {
|
||||
{
|
||||
.ident = "Samsung SX20S",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"),
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
|
||||
},
|
||||
},
|
||||
@ -622,6 +622,13 @@ static const struct dmi_system_id bad_lid_status[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Aspire 1810T",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "PC-81005",
|
||||
.matches = {
|
||||
@ -643,7 +650,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
|
||||
{
|
||||
enum drm_connector_status status = connector_status_connected;
|
||||
|
||||
if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
|
||||
if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
|
||||
status = connector_status_disconnected;
|
||||
|
||||
return status;
|
||||
|
@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
|
||||
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
|
||||
intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
|
||||
(1 << INTEL_ANALOG_CLONE_BIT);
|
||||
} else if (flags & SDVO_OUTPUT_CVBS0) {
|
||||
|
||||
sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
|
||||
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
|
||||
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
|
||||
sdvo_priv->is_tv = true;
|
||||
intel_output->needs_tv_clock = true;
|
||||
intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
|
||||
} else if (flags & SDVO_OUTPUT_LVDS0) {
|
||||
|
||||
sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
|
||||
|
@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
|
||||
return RREG32(RADEON_CRTC2_CRNT_FRAME);
|
||||
}
|
||||
|
||||
/* Who ever call radeon_fence_emit should call ring_lock and ask
|
||||
* for enough space (today caller are ib schedule and buffer move) */
|
||||
void r100_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
/* Who ever call radeon_fence_emit should call ring_lock and ask
|
||||
* for enough space (today caller are ib schedule and buffer move) */
|
||||
/* We have to make sure that caches are flushed before
|
||||
* CPU might read something from VRAM. */
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
|
||||
/* Wait until IDLE & CLEAN */
|
||||
radeon_ring_write(rdev, PACKET0(0x1720, 0));
|
||||
radeon_ring_write(rdev, (1 << 16) | (1 << 17));
|
||||
@ -3369,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev)
|
||||
|
||||
void r100_fini(struct radeon_device *rdev)
|
||||
{
|
||||
r100_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
@ -3481,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
r100_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCI)
|
||||
r100_pci_gart_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
return 0;
|
||||
|
@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
|
||||
|
||||
/* DDR for all card after R300 & IGP */
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
|
||||
tmp = RREG32(RADEON_MEM_CNTL);
|
||||
if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
|
||||
rdev->mc.vram_width = 128;
|
||||
} else {
|
||||
rdev->mc.vram_width = 64;
|
||||
tmp &= R300_MEM_NUM_CHANNELS_MASK;
|
||||
switch (tmp) {
|
||||
case 0: rdev->mc.vram_width = 64; break;
|
||||
case 1: rdev->mc.vram_width = 128; break;
|
||||
case 2: rdev->mc.vram_width = 256; break;
|
||||
default: rdev->mc.vram_width = 128; break;
|
||||
}
|
||||
|
||||
r100_vram_init_sizes(rdev);
|
||||
@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
|
||||
|
||||
void r300_fini(struct radeon_device *rdev)
|
||||
{
|
||||
r300_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
r300_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCIE)
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCI)
|
||||
r100_pci_gart_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
radeon_agp_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
return 0;
|
||||
|
@ -389,16 +389,15 @@ int r420_init(struct radeon_device *rdev)
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
r420_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCIE)
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCI)
|
||||
r100_pci_gart_fini(rdev);
|
||||
radeon_agp_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
return 0;
|
||||
|
@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
rv515_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
radeon_agp_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
return 0;
|
||||
|
@ -1654,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
||||
rdev->cp.align_mask = 16 - 1;
|
||||
}
|
||||
|
||||
void r600_cp_fini(struct radeon_device *rdev)
|
||||
{
|
||||
r600_cp_stop(rdev);
|
||||
radeon_ring_fini(rdev);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GPU scratch registers helpers function.
|
||||
@ -1861,6 +1867,12 @@ int r600_startup(struct radeon_device *rdev)
|
||||
return r;
|
||||
}
|
||||
r600_gpu_init(rdev);
|
||||
r = r600_blit_init(rdev);
|
||||
if (r) {
|
||||
r600_blit_fini(rdev);
|
||||
rdev->asic->copy = NULL;
|
||||
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
|
||||
}
|
||||
/* pin copy shader into vram */
|
||||
if (rdev->r600_blit.shader_obj) {
|
||||
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
|
||||
@ -2045,19 +2057,15 @@ int r600_init(struct radeon_device *rdev)
|
||||
r = r600_pcie_gart_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
r = r600_blit_init(rdev);
|
||||
if (r) {
|
||||
r600_blit_fini(rdev);
|
||||
rdev->asic->copy = NULL;
|
||||
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
|
||||
}
|
||||
|
||||
rdev->accel_working = true;
|
||||
r = r600_startup(rdev);
|
||||
if (r) {
|
||||
r600_suspend(rdev);
|
||||
dev_err(rdev->dev, "disabling GPU acceleration\n");
|
||||
r600_cp_fini(rdev);
|
||||
r600_wb_fini(rdev);
|
||||
radeon_ring_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
r600_pcie_gart_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
@ -2083,20 +2091,17 @@ int r600_init(struct radeon_device *rdev)
|
||||
|
||||
void r600_fini(struct radeon_device *rdev)
|
||||
{
|
||||
/* Suspend operations */
|
||||
r600_suspend(rdev);
|
||||
|
||||
r600_audio_fini(rdev);
|
||||
r600_blit_fini(rdev);
|
||||
r600_cp_fini(rdev);
|
||||
r600_wb_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
radeon_ring_fini(rdev);
|
||||
r600_wb_fini(rdev);
|
||||
r600_pcie_gart_fini(rdev);
|
||||
radeon_agp_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
radeon_fence_driver_fini(rdev);
|
||||
radeon_clocks_fini(rdev);
|
||||
radeon_agp_fini(rdev);
|
||||
radeon_bo_fini(rdev);
|
||||
radeon_atombios_fini(rdev);
|
||||
kfree(rdev->bios);
|
||||
@ -2900,3 +2905,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
|
||||
* rdev: radeon device structure
|
||||
* bo: buffer object struct which userspace is waiting for idle
|
||||
*
|
||||
* Some R6XX/R7XX doesn't seems to take into account HDP flush performed
|
||||
* through ring buffer, this leads to corruption in rendering, see
|
||||
* http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
|
||||
* directly perform HDP flush by writing register through MMIO.
|
||||
*/
|
||||
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
|
||||
{
|
||||
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
|
||||
}
|
||||
|
@ -35,7 +35,7 @@
|
||||
*/
|
||||
static int r600_audio_chipset_supported(struct radeon_device *rdev)
|
||||
{
|
||||
return rdev->family >= CHIP_R600
|
||||
return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
|
||||
|| rdev->family == CHIP_RS600
|
||||
|| rdev->family == CHIP_RS690
|
||||
|| rdev->family == CHIP_RS740;
|
||||
|
@ -661,6 +661,13 @@ struct radeon_asic {
|
||||
void (*hpd_fini)(struct radeon_device *rdev);
|
||||
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
|
||||
void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
|
||||
/* ioctl hw specific callback. Some hw might want to perform special
|
||||
* operation on specific ioctl. For instance on wait idle some hw
|
||||
* might want to perform and HDP flush through MMIO as it seems that
|
||||
* some R6XX/R7XX hw doesn't take HDP flush into account if programmed
|
||||
* through ring.
|
||||
*/
|
||||
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1143,6 +1150,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
|
||||
extern void r600_cp_stop(struct radeon_device *rdev);
|
||||
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
|
||||
extern int r600_cp_resume(struct radeon_device *rdev);
|
||||
extern void r600_cp_fini(struct radeon_device *rdev);
|
||||
extern int r600_count_pipe_bits(uint32_t val);
|
||||
extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
|
||||
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
|
@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = {
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = {
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = {
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = {
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = {
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = {
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = {
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = {
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
|
||||
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
|
||||
void r600_hpd_set_polarity(struct radeon_device *rdev,
|
||||
enum radeon_hpd_id hpd);
|
||||
extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
|
||||
|
||||
static struct radeon_asic r600_asic = {
|
||||
.init = &r600_init,
|
||||
@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = {
|
||||
.hpd_fini = &r600_hpd_fini,
|
||||
.hpd_sense = &r600_hpd_sense,
|
||||
.hpd_set_polarity = &r600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = {
|
||||
.hpd_fini = &r600_hpd_fini,
|
||||
.hpd_sense = &r600_hpd_sense,
|
||||
.hpd_set_polarity = &r600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
|
||||
lvds->native_mode.vdisplay);
|
||||
|
||||
lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
|
||||
if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
|
||||
lvds->panel_vcc_delay = 2000;
|
||||
lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
|
||||
|
||||
lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
|
||||
lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
|
||||
|
@ -1343,7 +1343,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
|
||||
radeon_connector->dac_load_detect = false;
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.load_detect_property,
|
||||
1);
|
||||
radeon_connector->dac_load_detect);
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.tv_std_property,
|
||||
radeon_combios_get_tv_info(rdev));
|
||||
|
@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
robj = gobj->driver_private;
|
||||
r = radeon_bo_wait(robj, NULL, false);
|
||||
/* callback hw specific functions if any */
|
||||
if (robj->rdev->asic->ioctl_wait_idle)
|
||||
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned i;
|
||||
uint32_t tmp;
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
tmp = RREG32(0x0150);
|
||||
if (tmp & (1 << 2)) {
|
||||
return 0;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void rs400_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: HDP same place on rs400 ? */
|
||||
r100_hdp_reset(rdev);
|
||||
/* FIXME: is this correct ? */
|
||||
r420_pipes_init(rdev);
|
||||
if (r300_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
if (rs400_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "rs400: Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
|
||||
}
|
||||
}
|
||||
|
||||
@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
|
||||
r100_mc_stop(rdev, &save);
|
||||
|
||||
/* Wait for mc idle */
|
||||
if (r300_mc_wait_for_idle(rdev))
|
||||
dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
|
||||
if (rs400_mc_wait_for_idle(rdev))
|
||||
dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
|
||||
WREG32(R_000148_MC_FB_LOCATION,
|
||||
S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
|
||||
S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
|
||||
@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
|
||||
|
||||
void rs400_fini(struct radeon_device *rdev)
|
||||
{
|
||||
rs400_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
rs400_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
|
@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
|
||||
|
||||
void rs600_fini(struct radeon_device *rdev)
|
||||
{
|
||||
rs600_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
rs600_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
|
@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
|
||||
|
||||
void rs690_fini(struct radeon_device *rdev)
|
||||
{
|
||||
rs690_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
rs690_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
|
@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
|
||||
|
||||
void rv515_fini(struct radeon_device *rdev)
|
||||
{
|
||||
rv515_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
rv515_suspend(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
radeon_agp_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
return 0;
|
||||
|
@ -887,6 +887,12 @@ static int rv770_startup(struct radeon_device *rdev)
|
||||
return r;
|
||||
}
|
||||
rv770_gpu_init(rdev);
|
||||
r = r600_blit_init(rdev);
|
||||
if (r) {
|
||||
r600_blit_fini(rdev);
|
||||
rdev->asic->copy = NULL;
|
||||
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
|
||||
}
|
||||
/* pin copy shader into vram */
|
||||
if (rdev->r600_blit.shader_obj) {
|
||||
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
|
||||
@ -1055,19 +1061,15 @@ int rv770_init(struct radeon_device *rdev)
|
||||
r = r600_pcie_gart_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
r = r600_blit_init(rdev);
|
||||
if (r) {
|
||||
r600_blit_fini(rdev);
|
||||
rdev->asic->copy = NULL;
|
||||
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
|
||||
}
|
||||
|
||||
rdev->accel_working = true;
|
||||
r = rv770_startup(rdev);
|
||||
if (r) {
|
||||
rv770_suspend(rdev);
|
||||
dev_err(rdev->dev, "disabling GPU acceleration\n");
|
||||
r600_cp_fini(rdev);
|
||||
r600_wb_fini(rdev);
|
||||
radeon_ring_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rv770_pcie_gart_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
@ -1089,13 +1091,11 @@ int rv770_init(struct radeon_device *rdev)
|
||||
|
||||
void rv770_fini(struct radeon_device *rdev)
|
||||
{
|
||||
rv770_suspend(rdev);
|
||||
|
||||
r600_blit_fini(rdev);
|
||||
r600_cp_fini(rdev);
|
||||
r600_wb_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
radeon_ring_fini(rdev);
|
||||
r600_wb_fini(rdev);
|
||||
rv770_pcie_gart_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
radeon_fence_driver_fini(rdev);
|
||||
|
@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
|
||||
*
|
||||
* Some, but not all, of these voltages have low/high limits.
|
||||
*/
|
||||
#define ADT7462_VOLT_COUNT 12
|
||||
#define ADT7462_VOLT_COUNT 13
|
||||
|
||||
#define ADT7462_VENDOR 0x41
|
||||
#define ADT7462_DEVICE 0x62
|
||||
|
@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev)
|
||||
static int __init lm78_isa_found(unsigned short address)
|
||||
{
|
||||
int val, save, found = 0;
|
||||
int port;
|
||||
|
||||
/* We have to request the region in two parts because some
|
||||
boards declare base+4 to base+7 as a PNP device */
|
||||
if (!request_region(address, 4, "lm78")) {
|
||||
pr_debug("lm78: Failed to request low part of region\n");
|
||||
return 0;
|
||||
}
|
||||
if (!request_region(address + 4, 4, "lm78")) {
|
||||
pr_debug("lm78: Failed to request high part of region\n");
|
||||
release_region(address, 4);
|
||||
return 0;
|
||||
/* Some boards declare base+0 to base+7 as a PNP device, some base+4
|
||||
* to base+7 and some base+5 to base+6. So we better request each port
|
||||
* individually for the probing phase. */
|
||||
for (port = address; port < address + LM78_EXTENT; port++) {
|
||||
if (!request_region(port, 1, "lm78")) {
|
||||
pr_debug("lm78: Failed to request port 0x%x\n", port);
|
||||
goto release;
|
||||
}
|
||||
}
|
||||
|
||||
#define REALLY_SLOW_IO
|
||||
@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address)
|
||||
val & 0x80 ? "LM79" : "LM78", (int)address);
|
||||
|
||||
release:
|
||||
release_region(address + 4, 4);
|
||||
release_region(address, 4);
|
||||
for (port--; port >= address; port--)
|
||||
release_region(port, 1);
|
||||
return found;
|
||||
}
|
||||
|
||||
|
@ -1793,17 +1793,17 @@ static int __init
|
||||
w83781d_isa_found(unsigned short address)
|
||||
{
|
||||
int val, save, found = 0;
|
||||
int port;
|
||||
|
||||
/* We have to request the region in two parts because some
|
||||
boards declare base+4 to base+7 as a PNP device */
|
||||
if (!request_region(address, 4, "w83781d")) {
|
||||
pr_debug("w83781d: Failed to request low part of region\n");
|
||||
return 0;
|
||||
}
|
||||
if (!request_region(address + 4, 4, "w83781d")) {
|
||||
pr_debug("w83781d: Failed to request high part of region\n");
|
||||
release_region(address, 4);
|
||||
return 0;
|
||||
/* Some boards declare base+0 to base+7 as a PNP device, some base+4
|
||||
* to base+7 and some base+5 to base+6. So we better request each port
|
||||
* individually for the probing phase. */
|
||||
for (port = address; port < address + W83781D_EXTENT; port++) {
|
||||
if (!request_region(port, 1, "w83781d")) {
|
||||
pr_debug("w83781d: Failed to request port 0x%x\n",
|
||||
port);
|
||||
goto release;
|
||||
}
|
||||
}
|
||||
|
||||
#define REALLY_SLOW_IO
|
||||
@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address)
|
||||
val == 0x30 ? "W83782D" : "W83781D", (int)address);
|
||||
|
||||
release:
|
||||
release_region(address + 4, 4);
|
||||
release_region(address, 4);
|
||||
for (port--; port >= address; port--)
|
||||
release_region(port, 1);
|
||||
return found;
|
||||
}
|
||||
|
||||
|
@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev)
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
|
||||
|
||||
/*
|
||||
* Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
|
||||
* ver. 1.33 20070103) don't set the correct ISA PCI region header info.
|
||||
* BAR0 should be 8 bytes; instead, it may be set to something like 8k
|
||||
* (which conflicts w/ BAR1's memory range).
|
||||
*/
|
||||
static void __devinit quirk_cs5536_vsa(struct pci_dev *dev)
|
||||
{
|
||||
if (pci_resource_len(dev, 0) != 8) {
|
||||
struct resource *res = &dev->resource[0];
|
||||
res->end = res->start + 8 - 1;
|
||||
dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
|
||||
"(incorrect header); workaround applied.\n");
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
|
||||
|
||||
static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
|
||||
unsigned size, int nr, const char *name)
|
||||
{
|
||||
|
@ -873,6 +873,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
brelse(bh);
|
||||
|
||||
unacquire_priv_sbp:
|
||||
kfree(befs_sb->mount_opts.iocharset);
|
||||
kfree(sb->s_fs_info);
|
||||
|
||||
unacquire_none:
|
||||
|
@ -246,7 +246,8 @@ struct super_block *freeze_bdev(struct block_device *bdev)
|
||||
if (!sb)
|
||||
goto out;
|
||||
if (sb->s_flags & MS_RDONLY) {
|
||||
deactivate_locked_super(sb);
|
||||
sb->s_frozen = SB_FREEZE_TRANS;
|
||||
up_write(&sb->s_umount);
|
||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||
return sb;
|
||||
}
|
||||
@ -307,7 +308,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
|
||||
BUG_ON(sb->s_bdev != bdev);
|
||||
down_write(&sb->s_umount);
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
goto out_deactivate;
|
||||
goto out_unfrozen;
|
||||
|
||||
if (sb->s_op->unfreeze_fs) {
|
||||
error = sb->s_op->unfreeze_fs(sb);
|
||||
@ -321,11 +322,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
|
||||
}
|
||||
}
|
||||
|
||||
out_unfrozen:
|
||||
sb->s_frozen = SB_UNFROZEN;
|
||||
smp_wmb();
|
||||
wake_up(&sb->s_wait_unfrozen);
|
||||
|
||||
out_deactivate:
|
||||
if (sb)
|
||||
deactivate_locked_super(sb);
|
||||
out_unlock:
|
||||
|
@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
||||
|
||||
if (!(sb->s_flags & MS_RDONLY)) {
|
||||
ret = btrfs_recover_relocation(tree_root);
|
||||
BUG_ON(ret);
|
||||
if (ret < 0) {
|
||||
printk(KERN_WARNING
|
||||
"btrfs: failed to recover relocation\n");
|
||||
err = -EINVAL;
|
||||
goto fail_trans_kthread;
|
||||
}
|
||||
}
|
||||
|
||||
location.objectid = BTRFS_FS_TREE_OBJECTID;
|
||||
|
@ -5402,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
|
||||
int ret;
|
||||
|
||||
while (level >= 0) {
|
||||
if (path->slots[level] >=
|
||||
btrfs_header_nritems(path->nodes[level]))
|
||||
break;
|
||||
|
||||
ret = walk_down_proc(trans, root, path, wc, lookup_info);
|
||||
if (ret > 0)
|
||||
break;
|
||||
@ -5413,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
|
||||
if (level == 0)
|
||||
break;
|
||||
|
||||
if (path->slots[level] >=
|
||||
btrfs_header_nritems(path->nodes[level]))
|
||||
break;
|
||||
|
||||
ret = do_walk_down(trans, root, path, wc, &lookup_info);
|
||||
if (ret > 0) {
|
||||
path->slots[level]++;
|
||||
|
@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
|
||||
spin_unlock(&tree->buffer_lock);
|
||||
goto free_eb;
|
||||
}
|
||||
spin_unlock(&tree->buffer_lock);
|
||||
|
||||
/* add one reference for the tree */
|
||||
atomic_inc(&eb->refs);
|
||||
spin_unlock(&tree->buffer_lock);
|
||||
return eb;
|
||||
|
||||
free_eb:
|
||||
|
@ -1133,7 +1133,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
|
||||
}
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
out:
|
||||
return ret > 0 ? EIO : ret;
|
||||
return ret > 0 ? -EIO : ret;
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct btrfs_file_vm_ops = {
|
||||
|
@ -1681,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
|
||||
* before we start the transaction. It limits the amount of btree
|
||||
* reads required while inside the transaction.
|
||||
*/
|
||||
static noinline void reada_csum(struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_ordered_extent *ordered_extent)
|
||||
{
|
||||
struct btrfs_ordered_sum *sum;
|
||||
u64 bytenr;
|
||||
|
||||
sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
|
||||
list);
|
||||
bytenr = sum->sums[0].bytenr;
|
||||
|
||||
/*
|
||||
* we don't care about the results, the point of this search is
|
||||
* just to get the btree leaves into ram
|
||||
*/
|
||||
btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
|
||||
}
|
||||
|
||||
/* as ordered data IO finishes, this gets called so we can finish
|
||||
* an ordered extent if the range of bytes in the file it covers are
|
||||
* fully written.
|
||||
@ -1709,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_ordered_extent *ordered_extent = NULL;
|
||||
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
||||
struct btrfs_path *path;
|
||||
int compressed = 0;
|
||||
int ret;
|
||||
|
||||
@ -1717,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* before we join the transaction, try to do some of our IO.
|
||||
* This will limit the amount of IO that we have to do with
|
||||
* the transaction running. We're unlikely to need to do any
|
||||
* IO if the file extents are new, the disk_i_size checks
|
||||
* covers the most common case.
|
||||
*/
|
||||
if (start < BTRFS_I(inode)->disk_i_size) {
|
||||
path = btrfs_alloc_path();
|
||||
if (path) {
|
||||
ret = btrfs_lookup_file_extent(NULL, root, path,
|
||||
inode->i_ino,
|
||||
start, 0);
|
||||
ordered_extent = btrfs_lookup_ordered_extent(inode,
|
||||
start);
|
||||
if (!list_empty(&ordered_extent->list)) {
|
||||
btrfs_release_path(root, path);
|
||||
reada_csum(root, path, ordered_extent);
|
||||
}
|
||||
btrfs_free_path(path);
|
||||
}
|
||||
}
|
||||
|
||||
if (!ordered_extent)
|
||||
ordered_extent = btrfs_lookup_ordered_extent(inode, start);
|
||||
ordered_extent = btrfs_lookup_ordered_extent(inode, start);
|
||||
BUG_ON(!ordered_extent);
|
||||
|
||||
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
|
||||
BUG_ON(!list_empty(&ordered_extent->list));
|
||||
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
|
||||
@ -5841,7 +5799,9 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
|
||||
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
|
||||
cur_offset > inode->i_size) {
|
||||
(actual_len > inode->i_size) &&
|
||||
(cur_offset > inode->i_size)) {
|
||||
|
||||
if (cur_offset > actual_len)
|
||||
i_size = actual_len;
|
||||
else
|
||||
|
@ -3764,7 +3764,8 @@ out:
|
||||
BTRFS_DATA_RELOC_TREE_OBJECTID);
|
||||
if (IS_ERR(fs_root))
|
||||
err = PTR_ERR(fs_root);
|
||||
btrfs_orphan_cleanup(fs_root);
|
||||
else
|
||||
btrfs_orphan_cleanup(fs_root);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
@ -199,9 +199,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
|
||||
static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
|
||||
int force)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&filp->f_owner.lock, flags);
|
||||
write_lock_irq(&filp->f_owner.lock);
|
||||
if (force || !filp->f_owner.pid) {
|
||||
put_pid(filp->f_owner.pid);
|
||||
filp->f_owner.pid = get_pid(pid);
|
||||
@ -213,7 +211,7 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
|
||||
filp->f_owner.euid = cred->euid;
|
||||
}
|
||||
}
|
||||
write_unlock_irqrestore(&filp->f_owner.lock, flags);
|
||||
write_unlock_irq(&filp->f_owner.lock);
|
||||
}
|
||||
|
||||
int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
|
||||
|
@ -253,6 +253,7 @@ void __fput(struct file *file)
|
||||
if (file->f_op && file->f_op->release)
|
||||
file->f_op->release(inode, file);
|
||||
security_file_free(file);
|
||||
ima_file_free(file);
|
||||
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
|
||||
cdev_put(inode->i_cdev);
|
||||
fops_put(file->f_op);
|
||||
|
@ -1736,8 +1736,7 @@ do_last:
|
||||
if (nd.root.mnt)
|
||||
path_put(&nd.root);
|
||||
if (!IS_ERR(filp)) {
|
||||
error = ima_path_check(&filp->f_path, filp->f_mode &
|
||||
(MAY_READ | MAY_WRITE | MAY_EXEC));
|
||||
error = ima_file_check(filp, acc_mode);
|
||||
if (error) {
|
||||
fput(filp);
|
||||
filp = ERR_PTR(error);
|
||||
@ -1797,8 +1796,7 @@ ok:
|
||||
}
|
||||
filp = nameidata_to_filp(&nd);
|
||||
if (!IS_ERR(filp)) {
|
||||
error = ima_path_check(&filp->f_path, filp->f_mode &
|
||||
(MAY_READ | MAY_WRITE | MAY_EXEC));
|
||||
error = ima_file_check(filp, acc_mode);
|
||||
if (error) {
|
||||
fput(filp);
|
||||
filp = ERR_PTR(error);
|
||||
|
@ -752,6 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
|
||||
flags, current_cred());
|
||||
if (IS_ERR(*filp))
|
||||
host_err = PTR_ERR(*filp);
|
||||
host_err = ima_file_check(*filp, access);
|
||||
out_nfserr:
|
||||
err = nfserrno(host_err);
|
||||
out:
|
||||
@ -2127,7 +2128,6 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
|
||||
*/
|
||||
path.mnt = exp->ex_path.mnt;
|
||||
path.dentry = dentry;
|
||||
err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
|
||||
nfsd_out:
|
||||
return err? nfserrno(err) : 0;
|
||||
}
|
||||
|
@ -647,9 +647,9 @@ static inline int ata_id_has_large_logical_sectors(const u16 *id)
|
||||
return id[ATA_ID_SECTOR_SIZE] & (1 << 13);
|
||||
}
|
||||
|
||||
static inline u8 ata_id_logical_per_physical_sectors(const u16 *id)
|
||||
static inline u16 ata_id_logical_per_physical_sectors(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SECTOR_SIZE] & 0xf;
|
||||
return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf);
|
||||
}
|
||||
|
||||
static inline int ata_id_has_lba48(const u16 *id)
|
||||
|
@ -15,6 +15,7 @@
|
||||
# define __acquire(x) __context__(x,1)
|
||||
# define __release(x) __context__(x,-1)
|
||||
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
|
||||
# define __percpu __attribute__((noderef, address_space(3)))
|
||||
extern void __chk_user_ptr(const volatile void __user *);
|
||||
extern void __chk_io_ptr(const volatile void __iomem *);
|
||||
#else
|
||||
@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
|
||||
# define __acquire(x) (void)0
|
||||
# define __release(x) (void)0
|
||||
# define __cond_lock(x,c) (c)
|
||||
# define __percpu
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
@ -17,7 +17,7 @@ struct linux_binprm;
|
||||
extern int ima_bprm_check(struct linux_binprm *bprm);
|
||||
extern int ima_inode_alloc(struct inode *inode);
|
||||
extern void ima_inode_free(struct inode *inode);
|
||||
extern int ima_path_check(struct path *path, int mask);
|
||||
extern int ima_file_check(struct file *file, int mask);
|
||||
extern void ima_file_free(struct file *file);
|
||||
extern int ima_file_mmap(struct file *file, unsigned long prot);
|
||||
extern void ima_counts_get(struct file *file);
|
||||
@ -38,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode)
|
||||
return;
|
||||
}
|
||||
|
||||
static inline int ima_path_check(struct path *path, int mask)
|
||||
static inline int ima_file_check(struct file *file, int mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -657,9 +657,9 @@ asmlinkage void __init start_kernel(void)
|
||||
proc_caches_init();
|
||||
buffer_init();
|
||||
key_init();
|
||||
radix_tree_init();
|
||||
security_init();
|
||||
vfs_caches_init(totalram_pages);
|
||||
radix_tree_init();
|
||||
signals_init();
|
||||
/* rootfs populating might need page-writeback */
|
||||
page_writeback_init();
|
||||
|
@ -912,6 +912,9 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
|
||||
goto out_pm;
|
||||
|
||||
err = -ENODEV;
|
||||
if (node < 0 || node >= MAX_NUMNODES)
|
||||
goto out_pm;
|
||||
|
||||
if (!node_state(node, N_HIGH_MEMORY))
|
||||
goto out_pm;
|
||||
|
||||
|
@ -65,7 +65,6 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
|
||||
const char *cause, int result, int info);
|
||||
|
||||
/* Internal IMA function definitions */
|
||||
void ima_iintcache_init(void);
|
||||
int ima_init(void);
|
||||
void ima_cleanup(void);
|
||||
int ima_fs_init(void);
|
||||
@ -131,7 +130,7 @@ void iint_free(struct kref *kref);
|
||||
void iint_rcu_free(struct rcu_head *rcu);
|
||||
|
||||
/* IMA policy related functions */
|
||||
enum ima_hooks { PATH_CHECK = 1, FILE_MMAP, BPRM_CHECK };
|
||||
enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
|
||||
|
||||
int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask);
|
||||
void ima_init_policy(void);
|
||||
|
@ -95,12 +95,12 @@ err_out:
|
||||
* ima_must_measure - measure decision based on policy.
|
||||
* @inode: pointer to inode to measure
|
||||
* @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
|
||||
* @function: calling function (PATH_CHECK, BPRM_CHECK, FILE_MMAP)
|
||||
* @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP)
|
||||
*
|
||||
* The policy is defined in terms of keypairs:
|
||||
* subj=, obj=, type=, func=, mask=, fsmagic=
|
||||
* subj,obj, and type: are LSM specific.
|
||||
* func: PATH_CHECK | BPRM_CHECK | FILE_MMAP
|
||||
* func: FILE_CHECK | BPRM_CHECK | FILE_MMAP
|
||||
* mask: contains the permission mask
|
||||
* fsmagic: hex value
|
||||
*
|
||||
|
@ -52,9 +52,6 @@ int ima_inode_alloc(struct inode *inode)
|
||||
struct ima_iint_cache *iint = NULL;
|
||||
int rc = 0;
|
||||
|
||||
if (!ima_initialized)
|
||||
return 0;
|
||||
|
||||
iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
|
||||
if (!iint)
|
||||
return -ENOMEM;
|
||||
@ -118,8 +115,6 @@ void ima_inode_free(struct inode *inode)
|
||||
{
|
||||
struct ima_iint_cache *iint;
|
||||
|
||||
if (!ima_initialized)
|
||||
return;
|
||||
spin_lock(&ima_iint_lock);
|
||||
iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode);
|
||||
spin_unlock(&ima_iint_lock);
|
||||
@ -141,9 +136,11 @@ static void init_once(void *foo)
|
||||
kref_set(&iint->refcount, 1);
|
||||
}
|
||||
|
||||
void __init ima_iintcache_init(void)
|
||||
static int __init ima_iintcache_init(void)
|
||||
{
|
||||
iint_cache =
|
||||
kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
|
||||
SLAB_PANIC, init_once);
|
||||
return 0;
|
||||
}
|
||||
security_initcall(ima_iintcache_init);
|
||||
|
@ -14,7 +14,7 @@
|
||||
*
|
||||
* File: ima_main.c
|
||||
* implements the IMA hooks: ima_bprm_check, ima_file_mmap,
|
||||
* and ima_path_check.
|
||||
* and ima_file_check.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/file.h>
|
||||
@ -84,6 +84,36 @@ out:
|
||||
return found;
|
||||
}
|
||||
|
||||
/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
|
||||
*
|
||||
* When opening a file for read, if the file is already open for write,
|
||||
* the file could change, resulting in a file measurement error.
|
||||
*
|
||||
* Opening a file for write, if the file is already open for read, results
|
||||
* in a time of measure, time of use (ToMToU) error.
|
||||
*
|
||||
* In either case invalidate the PCR.
|
||||
*/
|
||||
enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
|
||||
static void ima_read_write_check(enum iint_pcr_error error,
|
||||
struct ima_iint_cache *iint,
|
||||
struct inode *inode,
|
||||
const unsigned char *filename)
|
||||
{
|
||||
switch (error) {
|
||||
case TOMTOU:
|
||||
if (iint->readcount > 0)
|
||||
ima_add_violation(inode, filename, "invalid_pcr",
|
||||
"ToMToU");
|
||||
break;
|
||||
case OPEN_WRITERS:
|
||||
if (iint->writecount > 0)
|
||||
ima_add_violation(inode, filename, "invalid_pcr",
|
||||
"open_writers");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the counts given an fmode_t
|
||||
*/
|
||||
@ -98,6 +128,47 @@ static void ima_inc_counts(struct ima_iint_cache *iint, fmode_t mode)
|
||||
iint->writecount++;
|
||||
}
|
||||
|
||||
/*
|
||||
* ima_counts_get - increment file counts
|
||||
*
|
||||
* Maintain read/write counters for all files, but only
|
||||
* invalidate the PCR for measured files:
|
||||
* - Opening a file for write when already open for read,
|
||||
* results in a time of measure, time of use (ToMToU) error.
|
||||
* - Opening a file for read when already open for write,
|
||||
* could result in a file measurement error.
|
||||
*
|
||||
*/
|
||||
void ima_counts_get(struct file *file)
|
||||
{
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
struct inode *inode = dentry->d_inode;
|
||||
fmode_t mode = file->f_mode;
|
||||
struct ima_iint_cache *iint;
|
||||
int rc;
|
||||
|
||||
if (!ima_initialized || !S_ISREG(inode->i_mode))
|
||||
return;
|
||||
iint = ima_iint_find_get(inode);
|
||||
if (!iint)
|
||||
return;
|
||||
mutex_lock(&iint->mutex);
|
||||
rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
|
||||
if (rc < 0)
|
||||
goto out;
|
||||
|
||||
if (mode & FMODE_WRITE) {
|
||||
ima_read_write_check(TOMTOU, iint, inode, dentry->d_name.name);
|
||||
goto out;
|
||||
}
|
||||
ima_read_write_check(OPEN_WRITERS, iint, inode, dentry->d_name.name);
|
||||
out:
|
||||
ima_inc_counts(iint, file->f_mode);
|
||||
mutex_unlock(&iint->mutex);
|
||||
|
||||
kref_put(&iint->refcount, iint_free);
|
||||
}
|
||||
|
||||
/*
|
||||
* Decrement ima counts
|
||||
*/
|
||||
@ -153,123 +224,6 @@ void ima_file_free(struct file *file)
|
||||
kref_put(&iint->refcount, iint_free);
|
||||
}
|
||||
|
||||
/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
|
||||
*
|
||||
* When opening a file for read, if the file is already open for write,
|
||||
* the file could change, resulting in a file measurement error.
|
||||
*
|
||||
* Opening a file for write, if the file is already open for read, results
|
||||
* in a time of measure, time of use (ToMToU) error.
|
||||
*
|
||||
* In either case invalidate the PCR.
|
||||
*/
|
||||
enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
|
||||
static void ima_read_write_check(enum iint_pcr_error error,
|
||||
struct ima_iint_cache *iint,
|
||||
struct inode *inode,
|
||||
const unsigned char *filename)
|
||||
{
|
||||
switch (error) {
|
||||
case TOMTOU:
|
||||
if (iint->readcount > 0)
|
||||
ima_add_violation(inode, filename, "invalid_pcr",
|
||||
"ToMToU");
|
||||
break;
|
||||
case OPEN_WRITERS:
|
||||
if (iint->writecount > 0)
|
||||
ima_add_violation(inode, filename, "invalid_pcr",
|
||||
"open_writers");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int get_path_measurement(struct ima_iint_cache *iint, struct file *file,
|
||||
const unsigned char *filename)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
ima_inc_counts(iint, file->f_mode);
|
||||
|
||||
rc = ima_collect_measurement(iint, file);
|
||||
if (!rc)
|
||||
ima_store_measurement(iint, file, filename);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* ima_path_check - based on policy, collect/store measurement.
|
||||
* @path: contains a pointer to the path to be measured
|
||||
* @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
|
||||
*
|
||||
* Measure the file being open for readonly, based on the
|
||||
* ima_must_measure() policy decision.
|
||||
*
|
||||
* Keep read/write counters for all files, but only
|
||||
* invalidate the PCR for measured files:
|
||||
* - Opening a file for write when already open for read,
|
||||
* results in a time of measure, time of use (ToMToU) error.
|
||||
* - Opening a file for read when already open for write,
|
||||
* could result in a file measurement error.
|
||||
*
|
||||
* Always return 0 and audit dentry_open failures.
|
||||
* (Return code will be based upon measurement appraisal.)
|
||||
*/
|
||||
int ima_path_check(struct path *path, int mask)
|
||||
{
|
||||
struct inode *inode = path->dentry->d_inode;
|
||||
struct ima_iint_cache *iint;
|
||||
struct file *file = NULL;
|
||||
int rc;
|
||||
|
||||
if (!ima_initialized || !S_ISREG(inode->i_mode))
|
||||
return 0;
|
||||
iint = ima_iint_find_get(inode);
|
||||
if (!iint)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&iint->mutex);
|
||||
|
||||
rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK);
|
||||
if (rc < 0)
|
||||
goto out;
|
||||
|
||||
if ((mask & MAY_WRITE) || (mask == 0))
|
||||
ima_read_write_check(TOMTOU, iint, inode,
|
||||
path->dentry->d_name.name);
|
||||
|
||||
if ((mask & (MAY_WRITE | MAY_READ | MAY_EXEC)) != MAY_READ)
|
||||
goto out;
|
||||
|
||||
ima_read_write_check(OPEN_WRITERS, iint, inode,
|
||||
path->dentry->d_name.name);
|
||||
if (!(iint->flags & IMA_MEASURED)) {
|
||||
struct dentry *dentry = dget(path->dentry);
|
||||
struct vfsmount *mnt = mntget(path->mnt);
|
||||
|
||||
file = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE,
|
||||
current_cred());
|
||||
if (IS_ERR(file)) {
|
||||
int audit_info = 0;
|
||||
|
||||
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
|
||||
dentry->d_name.name,
|
||||
"add_measurement",
|
||||
"dentry_open failed",
|
||||
1, audit_info);
|
||||
file = NULL;
|
||||
goto out;
|
||||
}
|
||||
rc = get_path_measurement(iint, file, dentry->d_name.name);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&iint->mutex);
|
||||
if (file)
|
||||
fput(file);
|
||||
kref_put(&iint->refcount, iint_free);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ima_path_check);
|
||||
|
||||
static int process_measurement(struct file *file, const unsigned char *filename,
|
||||
int mask, int function)
|
||||
{
|
||||
@ -297,33 +251,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* ima_counts_get - increment file counts
|
||||
*
|
||||
* - for IPC shm and shmat file.
|
||||
* - for nfsd exported files.
|
||||
*
|
||||
* Increment the counts for these files to prevent unnecessary
|
||||
* imbalance messages.
|
||||
*/
|
||||
void ima_counts_get(struct file *file)
|
||||
{
|
||||
struct inode *inode = file->f_dentry->d_inode;
|
||||
struct ima_iint_cache *iint;
|
||||
|
||||
if (!ima_initialized || !S_ISREG(inode->i_mode))
|
||||
return;
|
||||
iint = ima_iint_find_get(inode);
|
||||
if (!iint)
|
||||
return;
|
||||
mutex_lock(&iint->mutex);
|
||||
ima_inc_counts(iint, file->f_mode);
|
||||
mutex_unlock(&iint->mutex);
|
||||
|
||||
kref_put(&iint->refcount, iint_free);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ima_counts_get);
|
||||
|
||||
/**
|
||||
* ima_file_mmap - based on policy, collect/store measurement.
|
||||
* @file: pointer to the file to be measured (May be NULL)
|
||||
@ -369,11 +296,31 @@ int ima_bprm_check(struct linux_binprm *bprm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ima_path_check - based on policy, collect/store measurement.
|
||||
* @file: pointer to the file to be measured
|
||||
* @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
|
||||
*
|
||||
* Measure files based on the ima_must_measure() policy decision.
|
||||
*
|
||||
* Always return 0 and audit dentry_open failures.
|
||||
* (Return code will be based upon measurement appraisal.)
|
||||
*/
|
||||
int ima_file_check(struct file *file, int mask)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = process_measurement(file, file->f_dentry->d_name.name,
|
||||
mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
|
||||
FILE_CHECK);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ima_file_check);
|
||||
|
||||
static int __init init_ima(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
ima_iintcache_init();
|
||||
error = ima_init();
|
||||
ima_initialized = 1;
|
||||
return error;
|
||||
|
@ -67,7 +67,7 @@ static struct ima_measure_rule_entry default_rules[] = {
|
||||
.flags = IMA_FUNC | IMA_MASK},
|
||||
{.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC,
|
||||
.flags = IMA_FUNC | IMA_MASK},
|
||||
{.action = MEASURE,.func = PATH_CHECK,.mask = MAY_READ,.uid = 0,
|
||||
{.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0,
|
||||
.flags = IMA_FUNC | IMA_MASK | IMA_UID},
|
||||
};
|
||||
|
||||
@ -282,8 +282,11 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
|
||||
break;
|
||||
case Opt_func:
|
||||
audit_log_format(ab, "func=%s ", args[0].from);
|
||||
if (strcmp(args[0].from, "PATH_CHECK") == 0)
|
||||
entry->func = PATH_CHECK;
|
||||
if (strcmp(args[0].from, "FILE_CHECK") == 0)
|
||||
entry->func = FILE_CHECK;
|
||||
/* PATH_CHECK is for backwards compat */
|
||||
else if (strcmp(args[0].from, "PATH_CHECK") == 0)
|
||||
entry->func = FILE_CHECK;
|
||||
else if (strcmp(args[0].from, "FILE_MMAP") == 0)
|
||||
entry->func = FILE_MMAP;
|
||||
else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
|
||||
|
@ -666,8 +666,6 @@ int security_file_alloc(struct file *file)
|
||||
void security_file_free(struct file *file)
|
||||
{
|
||||
security_ops->file_free_security(file);
|
||||
if (file->f_dentry)
|
||||
ima_file_free(file);
|
||||
}
|
||||
|
||||
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
|
@ -166,18 +166,7 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
|
||||
|
||||
static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index)
|
||||
{
|
||||
struct ct_vm *vm;
|
||||
void *kvirt_addr;
|
||||
unsigned long phys_addr;
|
||||
|
||||
vm = atc->vm;
|
||||
kvirt_addr = vm->get_ptp_virt(vm, index);
|
||||
if (kvirt_addr == NULL)
|
||||
phys_addr = (~0UL);
|
||||
else
|
||||
phys_addr = virt_to_phys(kvirt_addr);
|
||||
|
||||
return phys_addr;
|
||||
return atc->vm->get_ptp_phys(atc->vm, index);
|
||||
}
|
||||
|
||||
static unsigned int convert_format(snd_pcm_format_t snd_format)
|
||||
@ -1669,7 +1658,7 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
|
||||
}
|
||||
|
||||
/* Set up device virtual memory management object */
|
||||
err = ct_vm_create(&atc->vm);
|
||||
err = ct_vm_create(&atc->vm, pci);
|
||||
if (err < 0)
|
||||
goto error1;
|
||||
|
||||
|
@ -138,7 +138,7 @@ ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ptp = vm->ptp[0];
|
||||
ptp = (unsigned long *)vm->ptp[0].area;
|
||||
pte_start = (block->addr >> CT_PAGE_SHIFT);
|
||||
pages = block->size >> CT_PAGE_SHIFT;
|
||||
for (i = 0; i < pages; i++) {
|
||||
@ -158,25 +158,25 @@ static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
|
||||
}
|
||||
|
||||
/* *
|
||||
* return the host (kmalloced) addr of the @index-th device
|
||||
* page talbe page on success, or NULL on failure.
|
||||
* The first returned NULL indicates the termination.
|
||||
* return the host physical addr of the @index-th device
|
||||
* page table page on success, or ~0UL on failure.
|
||||
* The first returned ~0UL indicates the termination.
|
||||
* */
|
||||
static void *
|
||||
ct_get_ptp_virt(struct ct_vm *vm, int index)
|
||||
static dma_addr_t
|
||||
ct_get_ptp_phys(struct ct_vm *vm, int index)
|
||||
{
|
||||
void *addr;
|
||||
dma_addr_t addr;
|
||||
|
||||
addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index];
|
||||
addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
int ct_vm_create(struct ct_vm **rvm)
|
||||
int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
|
||||
{
|
||||
struct ct_vm *vm;
|
||||
struct ct_vm_block *block;
|
||||
int i;
|
||||
int i, err = 0;
|
||||
|
||||
*rvm = NULL;
|
||||
|
||||
@ -188,23 +188,21 @@ int ct_vm_create(struct ct_vm **rvm)
|
||||
|
||||
/* Allocate page table pages */
|
||||
for (i = 0; i < CT_PTP_NUM; i++) {
|
||||
vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!vm->ptp[i])
|
||||
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
|
||||
snd_dma_pci_data(pci),
|
||||
PAGE_SIZE, &vm->ptp[i]);
|
||||
if (err < 0)
|
||||
break;
|
||||
}
|
||||
if (!i) {
|
||||
if (err < 0) {
|
||||
/* no page table pages are allocated */
|
||||
kfree(vm);
|
||||
ct_vm_destroy(vm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
vm->size = CT_ADDRS_PER_PAGE * i;
|
||||
/* Initialise remaining ptps */
|
||||
for (; i < CT_PTP_NUM; i++)
|
||||
vm->ptp[i] = NULL;
|
||||
|
||||
vm->map = ct_vm_map;
|
||||
vm->unmap = ct_vm_unmap;
|
||||
vm->get_ptp_virt = ct_get_ptp_virt;
|
||||
vm->get_ptp_phys = ct_get_ptp_phys;
|
||||
INIT_LIST_HEAD(&vm->unused);
|
||||
INIT_LIST_HEAD(&vm->used);
|
||||
block = kzalloc(sizeof(*block), GFP_KERNEL);
|
||||
@ -242,7 +240,7 @@ void ct_vm_destroy(struct ct_vm *vm)
|
||||
|
||||
/* free allocated page table pages */
|
||||
for (i = 0; i < CT_PTP_NUM; i++)
|
||||
kfree(vm->ptp[i]);
|
||||
snd_dma_free_pages(&vm->ptp[i]);
|
||||
|
||||
vm->size = 0;
|
||||
|
||||
|
@ -22,6 +22,8 @@
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/pci.h>
|
||||
#include <sound/memalloc.h>
|
||||
|
||||
/* The chip can handle the page table of 4k pages
|
||||
* (emu20k1 can handle even 8k pages, but we don't use it right now)
|
||||
@ -41,7 +43,7 @@ struct snd_pcm_substream;
|
||||
|
||||
/* Virtual memory management object for card device */
|
||||
struct ct_vm {
|
||||
void *ptp[CT_PTP_NUM]; /* Device page table pages */
|
||||
struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */
|
||||
unsigned int size; /* Available addr space in bytes */
|
||||
struct list_head unused; /* List of unused blocks */
|
||||
struct list_head used; /* List of used blocks */
|
||||
@ -52,10 +54,10 @@ struct ct_vm {
|
||||
int size);
|
||||
/* Unmap device logical addr area. */
|
||||
void (*unmap)(struct ct_vm *, struct ct_vm_block *block);
|
||||
void *(*get_ptp_virt)(struct ct_vm *vm, int index);
|
||||
dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index);
|
||||
};
|
||||
|
||||
int ct_vm_create(struct ct_vm **rvm);
|
||||
int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci);
|
||||
void ct_vm_destroy(struct ct_vm *vm);
|
||||
|
||||
#endif /* CTVMEM_H */
|
||||
|
@ -426,6 +426,7 @@ struct azx {
|
||||
|
||||
/* flags */
|
||||
int position_fix;
|
||||
int poll_count;
|
||||
unsigned int running :1;
|
||||
unsigned int initialized :1;
|
||||
unsigned int single_cmd :1;
|
||||
@ -506,7 +507,7 @@ static char *driver_short_names[] __devinitdata = {
|
||||
#define get_azx_dev(substream) (substream->runtime->private_data)
|
||||
|
||||
static int azx_acquire_irq(struct azx *chip, int do_disconnect);
|
||||
|
||||
static int azx_send_cmd(struct hda_bus *bus, unsigned int val);
|
||||
/*
|
||||
* Interface for HD codec
|
||||
*/
|
||||
@ -664,11 +665,12 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
|
||||
{
|
||||
struct azx *chip = bus->private_data;
|
||||
unsigned long timeout;
|
||||
int do_poll = 0;
|
||||
|
||||
again:
|
||||
timeout = jiffies + msecs_to_jiffies(1000);
|
||||
for (;;) {
|
||||
if (chip->polling_mode) {
|
||||
if (chip->polling_mode || do_poll) {
|
||||
spin_lock_irq(&chip->reg_lock);
|
||||
azx_update_rirb(chip);
|
||||
spin_unlock_irq(&chip->reg_lock);
|
||||
@ -676,6 +678,9 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
|
||||
if (!chip->rirb.cmds[addr]) {
|
||||
smp_rmb();
|
||||
bus->rirb_error = 0;
|
||||
|
||||
if (!do_poll)
|
||||
chip->poll_count = 0;
|
||||
return chip->rirb.res[addr]; /* the last value */
|
||||
}
|
||||
if (time_after(jiffies, timeout))
|
||||
@ -688,6 +693,16 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
|
||||
}
|
||||
}
|
||||
|
||||
if (!chip->polling_mode && chip->poll_count < 2) {
|
||||
snd_printdd(SFX "azx_get_response timeout, "
|
||||
"polling the codec once: last cmd=0x%08x\n",
|
||||
chip->last_cmd[addr]);
|
||||
do_poll = 1;
|
||||
chip->poll_count++;
|
||||
goto again;
|
||||
}
|
||||
|
||||
|
||||
if (!chip->polling_mode) {
|
||||
snd_printk(KERN_WARNING SFX "azx_get_response timeout, "
|
||||
"switching to polling mode: last cmd=0x%08x\n",
|
||||
@ -2043,7 +2058,7 @@ static int azx_acquire_irq(struct azx *chip, int do_disconnect)
|
||||
{
|
||||
if (request_irq(chip->pci->irq, azx_interrupt,
|
||||
chip->msi ? 0 : IRQF_SHARED,
|
||||
"HDA Intel", chip)) {
|
||||
"hda_intel", chip)) {
|
||||
printk(KERN_ERR "hda-intel: unable to grab IRQ %d, "
|
||||
"disabling device\n", chip->pci->irq);
|
||||
if (do_disconnect)
|
||||
|
@ -703,11 +703,13 @@ static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned sho
|
||||
{
|
||||
unsigned char nvol;
|
||||
|
||||
if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE))
|
||||
if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) {
|
||||
nvol = 0;
|
||||
else
|
||||
} else {
|
||||
nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) /
|
||||
WM_VOL_MAX;
|
||||
nvol += 0x1b;
|
||||
}
|
||||
|
||||
wm_put(ice, index, nvol);
|
||||
wm_put_nocache(ice, index, 0x180 | nvol);
|
||||
@ -778,7 +780,7 @@ static int wm_master_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
|
||||
for (ch = 0; ch < 2; ch++) {
|
||||
unsigned int vol = ucontrol->value.integer.value[ch];
|
||||
if (vol > WM_VOL_MAX)
|
||||
continue;
|
||||
vol = WM_VOL_MAX;
|
||||
vol |= spec->master[ch] & WM_VOL_MUTE;
|
||||
if (vol != spec->master[ch]) {
|
||||
int dac;
|
||||
@ -834,8 +836,8 @@ static int wm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *
|
||||
for (i = 0; i < voices; i++) {
|
||||
unsigned int vol = ucontrol->value.integer.value[i];
|
||||
if (vol > WM_VOL_MAX)
|
||||
continue;
|
||||
vol |= spec->vol[ofs+i];
|
||||
vol = WM_VOL_MAX;
|
||||
vol |= spec->vol[ofs+i] & WM_VOL_MUTE;
|
||||
if (vol != spec->vol[ofs+i]) {
|
||||
spec->vol[ofs+i] = vol;
|
||||
idx = WM_DAC_ATTEN + ofs + i;
|
||||
|
@ -145,6 +145,7 @@ static const struct snd_soc_dapm_widget omap3pandora_in_dapm_widgets[] = {
|
||||
};
|
||||
|
||||
static const struct snd_soc_dapm_route omap3pandora_out_map[] = {
|
||||
{"PCM DAC", NULL, "APLL Enable"},
|
||||
{"Headphone Amplifier", NULL, "PCM DAC"},
|
||||
{"Line Out", NULL, "PCM DAC"},
|
||||
{"Headphone Jack", NULL, "Headphone Amplifier"},
|
||||
|
Loading…
Reference in New Issue
Block a user