ARM: i.MX: Fix SSI clock associations for i.MX25/i.MX35
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQIcBAABCAAGBQJQTwn8AAoJEPFlmONMx+ezcuQP/iSJjSTecLvWH8c7cD9To+nv ItpQakHtabQZVEtQgfCb2uy8u1vKbIZcJ4FlqwDz6Q9AiV0SngXjHfR+PPO9mDuj 4JmnF7ZnKtYcjBC/rhMAmiU+cVqrjY7WLwqz2FTEUc4OVC8m9xFIh0nhRSvQbngj MQdp4kBRriroaoF6ZgoanjhD/Xw0kEeERiAMbjlq7je+ZAZWYXj38tHg8BYp3Hrf ERVMXGoYgwglwNyooJHs5lAWTqKjSrXy+RAIQhV8yXOWE1Q+Jd7NE6ZOtn3VE20D 8iO0WgjITi3xm+ewCzJT5UVcQmktGjpYTD5ePvbi0eilX1D8sL704QiOauJETcQq j4EHjgb+o53V9hyJt5Z3SWmJiyCsQRxlKagmbpJoRXWII08qR+Tgh2I8jgmCEAnw HTrA+IPZ1hFCUyQT7JZzDLak6nwOLQAc1S2H/GxyCOPhURZsFpLBdi9dqIyiMUdw KFw8T8GE5NgqXKB9osa3CGtjBfDtNWBTTRF+akQg9k1656HyWKxl24oSbX4kXsnC O9N4SSxjx7mL87elGV+jzIE61O2cTH5cTGA6Z7HAKQLAckLdF4+hd/NS8oOPtNOm K0r0EIt2U127I9OlpxvzosGjDgMzytGBP4NH0OmEovdvTX2Xh3uAyaiA/7rGWdoH /Tec1NLs0VbDv+jM2rKx =1cS3 -----END PGP SIGNATURE----- Merge tag 'imx-fixes' of git://git.pengutronix.de/git/imx/linux-2.6 into fixes ARM: i.MX: Fix SSI clock associations for i.MX25/i.MX35 * tag 'imx-fixes' of git://git.pengutronix.de/git/imx/linux-2.6: ARM: clk-imx35: Fix SSI clock registration ARM: clk-imx25: Fix SSI clock registration + Linux 3.6-rc5
This commit is contained in:
commit
2bc733e8b4
@ -210,3 +210,15 @@ Users:
|
|||||||
firmware assigned instance number of the PCI
|
firmware assigned instance number of the PCI
|
||||||
device that can help in understanding the firmware
|
device that can help in understanding the firmware
|
||||||
intended order of the PCI device.
|
intended order of the PCI device.
|
||||||
|
|
||||||
|
What: /sys/bus/pci/devices/.../d3cold_allowed
|
||||||
|
Date: July 2012
|
||||||
|
Contact: Huang Ying <ying.huang@intel.com>
|
||||||
|
Description:
|
||||||
|
d3cold_allowed is bit to control whether the corresponding PCI
|
||||||
|
device can be put into D3Cold state. If it is cleared, the
|
||||||
|
device will never be put into D3Cold state. If it is set, the
|
||||||
|
device may be put into D3Cold state if other requirements are
|
||||||
|
satisfied too. Reading this attribute will show the current
|
||||||
|
value of d3cold_allowed bit. Writing this attribute will set
|
||||||
|
the value of d3cold_allowed bit.
|
||||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
VERSION = 3
|
VERSION = 3
|
||||||
PATCHLEVEL = 6
|
PATCHLEVEL = 6
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc4
|
EXTRAVERSION = -rc5
|
||||||
NAME = Saber-toothed Squirrel
|
NAME = Saber-toothed Squirrel
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -6,7 +6,7 @@ config ARM
|
|||||||
select HAVE_DMA_API_DEBUG
|
select HAVE_DMA_API_DEBUG
|
||||||
select HAVE_IDE if PCI || ISA || PCMCIA
|
select HAVE_IDE if PCI || ISA || PCMCIA
|
||||||
select HAVE_DMA_ATTRS
|
select HAVE_DMA_ATTRS
|
||||||
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
|
select HAVE_DMA_CONTIGUOUS if MMU
|
||||||
select HAVE_MEMBLOCK
|
select HAVE_MEMBLOCK
|
||||||
select RTC_LIB
|
select RTC_LIB
|
||||||
select SYS_SUPPORTS_APM_EMULATION
|
select SYS_SUPPORTS_APM_EMULATION
|
||||||
|
@ -202,6 +202,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
|
|||||||
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
|
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This can be called during early boot to increase the size of the atomic
|
||||||
|
* coherent DMA pool above the default value of 256KiB. It must be called
|
||||||
|
* before postcore_initcall.
|
||||||
|
*/
|
||||||
|
extern void __init init_dma_coherent_pool_size(unsigned long size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This can be called during boot to increase the size of the consistent
|
* This can be called during boot to increase the size of the consistent
|
||||||
* DMA region above it's default value of 2MB. It must be called before the
|
* DMA region above it's default value of 2MB. It must be called before the
|
||||||
|
@ -222,10 +222,8 @@ int __init mx25_clocks_init(void)
|
|||||||
clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
|
clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
|
||||||
clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
|
clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
|
||||||
clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
|
clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
|
||||||
clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
|
clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0");
|
||||||
clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
|
clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1");
|
||||||
clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
|
|
||||||
clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
|
|
||||||
clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
|
clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
|
||||||
clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
|
clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
|
||||||
clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
|
clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
|
||||||
|
@ -230,10 +230,8 @@ int __init mx35_clocks_init()
|
|||||||
clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
|
clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
|
||||||
clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
|
clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
|
||||||
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
|
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
|
||||||
clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
|
clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
|
||||||
clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
|
clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
|
||||||
clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
|
|
||||||
clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
|
|
||||||
/* i.mx35 has the i.mx21 type uart */
|
/* i.mx35 has the i.mx21 type uart */
|
||||||
clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
|
clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
|
||||||
clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
|
clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
|
||||||
|
@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void)
|
|||||||
void __init kirkwood_init_early(void)
|
void __init kirkwood_init_early(void)
|
||||||
{
|
{
|
||||||
orion_time_set_base(TIMER_VIRT_BASE);
|
orion_time_set_base(TIMER_VIRT_BASE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some Kirkwood devices allocate their coherent buffers from atomic
|
||||||
|
* context. Increase size of atomic coherent pool to make sure such
|
||||||
|
* the allocations won't fail.
|
||||||
|
*/
|
||||||
|
init_dma_coherent_pool_size(SZ_1M);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kirkwood_tclk;
|
int kirkwood_tclk;
|
||||||
|
@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
|
|||||||
vunmap(cpu_addr);
|
vunmap(cpu_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
|
||||||
|
|
||||||
struct dma_pool {
|
struct dma_pool {
|
||||||
size_t size;
|
size_t size;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
unsigned long *bitmap;
|
unsigned long *bitmap;
|
||||||
unsigned long nr_pages;
|
unsigned long nr_pages;
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
struct page *page;
|
struct page **pages;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct dma_pool atomic_pool = {
|
static struct dma_pool atomic_pool = {
|
||||||
.size = SZ_256K,
|
.size = DEFAULT_DMA_COHERENT_POOL_SIZE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init early_coherent_pool(char *p)
|
static int __init early_coherent_pool(char *p)
|
||||||
@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p)
|
|||||||
}
|
}
|
||||||
early_param("coherent_pool", early_coherent_pool);
|
early_param("coherent_pool", early_coherent_pool);
|
||||||
|
|
||||||
|
void __init init_dma_coherent_pool_size(unsigned long size)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Catch any attempt to set the pool size too late.
|
||||||
|
*/
|
||||||
|
BUG_ON(atomic_pool.vaddr);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set architecture specific coherent pool size only if
|
||||||
|
* it has not been changed by kernel command line parameter.
|
||||||
|
*/
|
||||||
|
if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
|
||||||
|
atomic_pool.size = size;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialise the coherent pool for atomic allocations.
|
* Initialise the coherent pool for atomic allocations.
|
||||||
*/
|
*/
|
||||||
@ -297,6 +314,7 @@ static int __init atomic_pool_init(void)
|
|||||||
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
|
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
|
||||||
unsigned long *bitmap;
|
unsigned long *bitmap;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
struct page **pages;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
|
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
|
||||||
|
|
||||||
@ -304,21 +322,31 @@ static int __init atomic_pool_init(void)
|
|||||||
if (!bitmap)
|
if (!bitmap)
|
||||||
goto no_bitmap;
|
goto no_bitmap;
|
||||||
|
|
||||||
|
pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
|
||||||
|
if (!pages)
|
||||||
|
goto no_pages;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_CMA))
|
if (IS_ENABLED(CONFIG_CMA))
|
||||||
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
|
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
|
||||||
else
|
else
|
||||||
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
|
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
|
||||||
&page, NULL);
|
&page, NULL);
|
||||||
if (ptr) {
|
if (ptr) {
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_pages; i++)
|
||||||
|
pages[i] = page + i;
|
||||||
|
|
||||||
spin_lock_init(&pool->lock);
|
spin_lock_init(&pool->lock);
|
||||||
pool->vaddr = ptr;
|
pool->vaddr = ptr;
|
||||||
pool->page = page;
|
pool->pages = pages;
|
||||||
pool->bitmap = bitmap;
|
pool->bitmap = bitmap;
|
||||||
pool->nr_pages = nr_pages;
|
pool->nr_pages = nr_pages;
|
||||||
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
|
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
|
||||||
(unsigned)pool->size / 1024);
|
(unsigned)pool->size / 1024);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
no_pages:
|
||||||
kfree(bitmap);
|
kfree(bitmap);
|
||||||
no_bitmap:
|
no_bitmap:
|
||||||
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
|
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
|
||||||
@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
|
|||||||
if (pageno < pool->nr_pages) {
|
if (pageno < pool->nr_pages) {
|
||||||
bitmap_set(pool->bitmap, pageno, count);
|
bitmap_set(pool->bitmap, pageno, count);
|
||||||
ptr = pool->vaddr + PAGE_SIZE * pageno;
|
ptr = pool->vaddr + PAGE_SIZE * pageno;
|
||||||
*ret_page = pool->page + pageno;
|
*ret_page = pool->pages[pageno];
|
||||||
|
} else {
|
||||||
|
pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
|
||||||
|
"Please increase it with coherent_pool= kernel parameter!\n",
|
||||||
|
(unsigned)pool->size / 1024);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&pool->lock, flags);
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
|
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool __in_atomic_pool(void *start, size_t size)
|
||||||
|
{
|
||||||
|
struct dma_pool *pool = &atomic_pool;
|
||||||
|
void *end = start + size;
|
||||||
|
void *pool_start = pool->vaddr;
|
||||||
|
void *pool_end = pool->vaddr + pool->size;
|
||||||
|
|
||||||
|
if (start < pool_start || start > pool_end)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (end <= pool_end)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
|
||||||
|
start, end - 1, pool_start, pool_end - 1);
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int __free_from_pool(void *start, size_t size)
|
static int __free_from_pool(void *start, size_t size)
|
||||||
{
|
{
|
||||||
struct dma_pool *pool = &atomic_pool;
|
struct dma_pool *pool = &atomic_pool;
|
||||||
unsigned long pageno, count;
|
unsigned long pageno, count;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (start < pool->vaddr || start > pool->vaddr + pool->size)
|
if (!__in_atomic_pool(start, size))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (start + size > pool->vaddr + pool->size) {
|
|
||||||
WARN(1, "freeing wrong coherent size from pool\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
pageno = (start - pool->vaddr) >> PAGE_SHIFT;
|
pageno = (start - pool->vaddr) >> PAGE_SHIFT;
|
||||||
count = size >> PAGE_SHIFT;
|
count = size >> PAGE_SHIFT;
|
||||||
|
|
||||||
@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct page **__atomic_get_pages(void *addr)
|
||||||
|
{
|
||||||
|
struct dma_pool *pool = &atomic_pool;
|
||||||
|
struct page **pages = pool->pages;
|
||||||
|
int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
return pages + offs;
|
||||||
|
}
|
||||||
|
|
||||||
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
struct vm_struct *area;
|
struct vm_struct *area;
|
||||||
|
|
||||||
|
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
|
||||||
|
return __atomic_get_pages(cpu_addr);
|
||||||
|
|
||||||
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
|
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
|
||||||
return cpu_addr;
|
return cpu_addr;
|
||||||
|
|
||||||
@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void *__iommu_alloc_atomic(struct device *dev, size_t size,
|
||||||
|
dma_addr_t *handle)
|
||||||
|
{
|
||||||
|
struct page *page;
|
||||||
|
void *addr;
|
||||||
|
|
||||||
|
addr = __alloc_from_pool(size, &page);
|
||||||
|
if (!addr)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
*handle = __iommu_create_mapping(dev, &page, size);
|
||||||
|
if (*handle == DMA_ERROR_CODE)
|
||||||
|
goto err_mapping;
|
||||||
|
|
||||||
|
return addr;
|
||||||
|
|
||||||
|
err_mapping:
|
||||||
|
__free_from_pool(addr, size);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __iommu_free_atomic(struct device *dev, struct page **pages,
|
||||||
|
dma_addr_t handle, size_t size)
|
||||||
|
{
|
||||||
|
__iommu_remove_mapping(dev, handle, size);
|
||||||
|
__free_from_pool(page_address(pages[0]), size);
|
||||||
|
}
|
||||||
|
|
||||||
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
|||||||
*handle = DMA_ERROR_CODE;
|
*handle = DMA_ERROR_CODE;
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
|
if (gfp & GFP_ATOMIC)
|
||||||
|
return __iommu_alloc_atomic(dev, size, handle);
|
||||||
|
|
||||||
pages = __iommu_alloc_buffer(dev, size, gfp);
|
pages = __iommu_alloc_buffer(dev, size, gfp);
|
||||||
if (!pages)
|
if (!pages)
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (__in_atomic_pool(cpu_addr, size)) {
|
||||||
|
__iommu_free_atomic(dev, pages, handle, size);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
|
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
|
||||||
unmap_kernel_range((unsigned long)cpu_addr, size);
|
unmap_kernel_range((unsigned long)cpu_addr, size);
|
||||||
vunmap(cpu_addr);
|
vunmap(cpu_addr);
|
||||||
|
@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
|
|||||||
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
|
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
|
||||||
|
|
||||||
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
|
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
|
||||||
if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
|
if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
|
||||||
args->op.cmd = MMUEXT_INVLPG_MULTI;
|
args->op.cmd = MMUEXT_INVLPG_MULTI;
|
||||||
args->op.arg1.linear_addr = start;
|
args->op.arg1.linear_addr = start;
|
||||||
}
|
}
|
||||||
|
@ -599,7 +599,7 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_
|
|||||||
if (p2m_index(set_pfn))
|
if (p2m_index(set_pfn))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
|
for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
|
||||||
topidx = p2m_top_index(pfn);
|
topidx = p2m_top_index(pfn);
|
||||||
|
|
||||||
if (!p2m_top[topidx])
|
if (!p2m_top[topidx])
|
||||||
|
@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Sanitise input arguments */
|
/* Sanitise input arguments */
|
||||||
alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
|
alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
|
||||||
base = ALIGN(base, alignment);
|
base = ALIGN(base, alignment);
|
||||||
size = ALIGN(size, alignment);
|
size = ALIGN(size, alignment);
|
||||||
limit &= ~(alignment - 1);
|
limit &= ~(alignment - 1);
|
||||||
|
@ -996,7 +996,8 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field,
|
|||||||
struct hid_driver *hdrv = hid->driver;
|
struct hid_driver *hdrv = hid->driver;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
hid_dump_input(hid, usage, value);
|
if (!list_empty(&hid->debug_list))
|
||||||
|
hid_dump_input(hid, usage, value);
|
||||||
|
|
||||||
if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
|
if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
|
||||||
ret = hdrv->event(hid, field, usage, value);
|
ret = hdrv->event(hid, field, usage, value);
|
||||||
@ -1558,7 +1559,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
|
|||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
|
#if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD)
|
||||||
|
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
|
||||||
|
#endif
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
|
||||||
|
@ -70,6 +70,7 @@ static const struct hid_blacklist {
|
|||||||
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
|
||||||
|
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
|
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
|
||||||
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
|
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
|
||||||
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
|
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
|
||||||
|
@ -358,6 +358,7 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad)
|
|||||||
/* Inhibit KDI and KRI interrupts. */
|
/* Inhibit KDI and KRI interrupts. */
|
||||||
reg_val = readw(keypad->mmio_base + KPSR);
|
reg_val = readw(keypad->mmio_base + KPSR);
|
||||||
reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE);
|
reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE);
|
||||||
|
reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD;
|
||||||
writew(reg_val, keypad->mmio_base + KPSR);
|
writew(reg_val, keypad->mmio_base + KPSR);
|
||||||
|
|
||||||
/* Colums as open drain and disable all rows */
|
/* Colums as open drain and disable all rows */
|
||||||
@ -515,7 +516,9 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev)
|
|||||||
input_set_drvdata(input_dev, keypad);
|
input_set_drvdata(input_dev, keypad);
|
||||||
|
|
||||||
/* Ensure that the keypad will stay dormant until opened */
|
/* Ensure that the keypad will stay dormant until opened */
|
||||||
|
clk_enable(keypad->clk);
|
||||||
imx_keypad_inhibit(keypad);
|
imx_keypad_inhibit(keypad);
|
||||||
|
clk_disable(keypad->clk);
|
||||||
|
|
||||||
error = request_irq(irq, imx_keypad_irq_handler, 0,
|
error = request_irq(irq, imx_keypad_irq_handler, 0,
|
||||||
pdev->name, keypad);
|
pdev->name, keypad);
|
||||||
|
@ -176,6 +176,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
|
|||||||
DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
/* Gigabyte T1005 - defines wrong chassis type ("Other") */
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
/* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||||
|
@ -1848,7 +1848,10 @@ static const struct wacom_features wacom_features_0x2A =
|
|||||||
{ "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
|
{ "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
|
||||||
63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
|
63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
|
||||||
static const struct wacom_features wacom_features_0xF4 =
|
static const struct wacom_features wacom_features_0xF4 =
|
||||||
{ "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
|
{ "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
|
||||||
|
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
|
||||||
|
static const struct wacom_features wacom_features_0xF8 =
|
||||||
|
{ "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
|
||||||
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
|
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
|
||||||
static const struct wacom_features wacom_features_0x3F =
|
static const struct wacom_features wacom_features_0x3F =
|
||||||
{ "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023,
|
{ "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023,
|
||||||
@ -2091,6 +2094,7 @@ const struct usb_device_id wacom_ids[] = {
|
|||||||
{ USB_DEVICE_WACOM(0xEF) },
|
{ USB_DEVICE_WACOM(0xEF) },
|
||||||
{ USB_DEVICE_WACOM(0x47) },
|
{ USB_DEVICE_WACOM(0x47) },
|
||||||
{ USB_DEVICE_WACOM(0xF4) },
|
{ USB_DEVICE_WACOM(0xF4) },
|
||||||
|
{ USB_DEVICE_WACOM(0xF8) },
|
||||||
{ USB_DEVICE_WACOM(0xFA) },
|
{ USB_DEVICE_WACOM(0xFA) },
|
||||||
{ USB_DEVICE_LENOVO(0x6004) },
|
{ USB_DEVICE_LENOVO(0x6004) },
|
||||||
{ }
|
{ }
|
||||||
|
@ -602,6 +602,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
|
|||||||
{
|
{
|
||||||
if (tsdata->debug_dir)
|
if (tsdata->debug_dir)
|
||||||
debugfs_remove_recursive(tsdata->debug_dir);
|
debugfs_remove_recursive(tsdata->debug_dir);
|
||||||
|
kfree(tsdata->raw_buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
@ -843,7 +844,6 @@ static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client)
|
|||||||
if (gpio_is_valid(pdata->reset_pin))
|
if (gpio_is_valid(pdata->reset_pin))
|
||||||
gpio_free(pdata->reset_pin);
|
gpio_free(pdata->reset_pin);
|
||||||
|
|
||||||
kfree(tsdata->raw_buffer);
|
|
||||||
kfree(tsdata);
|
kfree(tsdata);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -280,8 +280,12 @@ static long local_pci_probe(void *_ddi)
|
|||||||
{
|
{
|
||||||
struct drv_dev_and_id *ddi = _ddi;
|
struct drv_dev_and_id *ddi = _ddi;
|
||||||
struct device *dev = &ddi->dev->dev;
|
struct device *dev = &ddi->dev->dev;
|
||||||
|
struct device *parent = dev->parent;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
/* The parent bridge must be in active state when probing */
|
||||||
|
if (parent)
|
||||||
|
pm_runtime_get_sync(parent);
|
||||||
/* Unbound PCI devices are always set to disabled and suspended.
|
/* Unbound PCI devices are always set to disabled and suspended.
|
||||||
* During probe, the device is set to enabled and active and the
|
* During probe, the device is set to enabled and active and the
|
||||||
* usage count is incremented. If the driver supports runtime PM,
|
* usage count is incremented. If the driver supports runtime PM,
|
||||||
@ -298,6 +302,8 @@ static long local_pci_probe(void *_ddi)
|
|||||||
pm_runtime_set_suspended(dev);
|
pm_runtime_set_suspended(dev);
|
||||||
pm_runtime_put_noidle(dev);
|
pm_runtime_put_noidle(dev);
|
||||||
}
|
}
|
||||||
|
if (parent)
|
||||||
|
pm_runtime_put(parent);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -458,6 +458,40 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
|
|||||||
}
|
}
|
||||||
struct device_attribute vga_attr = __ATTR_RO(boot_vga);
|
struct device_attribute vga_attr = __ATTR_RO(boot_vga);
|
||||||
|
|
||||||
|
static void
|
||||||
|
pci_config_pm_runtime_get(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
struct device *parent = dev->parent;
|
||||||
|
|
||||||
|
if (parent)
|
||||||
|
pm_runtime_get_sync(parent);
|
||||||
|
pm_runtime_get_noresume(dev);
|
||||||
|
/*
|
||||||
|
* pdev->current_state is set to PCI_D3cold during suspending,
|
||||||
|
* so wait until suspending completes
|
||||||
|
*/
|
||||||
|
pm_runtime_barrier(dev);
|
||||||
|
/*
|
||||||
|
* Only need to resume devices in D3cold, because config
|
||||||
|
* registers are still accessible for devices suspended but
|
||||||
|
* not in D3cold.
|
||||||
|
*/
|
||||||
|
if (pdev->current_state == PCI_D3cold)
|
||||||
|
pm_runtime_resume(dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
pci_config_pm_runtime_put(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
struct device *parent = dev->parent;
|
||||||
|
|
||||||
|
pm_runtime_put(dev);
|
||||||
|
if (parent)
|
||||||
|
pm_runtime_put_sync(parent);
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
pci_read_config(struct file *filp, struct kobject *kobj,
|
pci_read_config(struct file *filp, struct kobject *kobj,
|
||||||
struct bin_attribute *bin_attr,
|
struct bin_attribute *bin_attr,
|
||||||
@ -484,6 +518,8 @@ pci_read_config(struct file *filp, struct kobject *kobj,
|
|||||||
size = count;
|
size = count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pci_config_pm_runtime_get(dev);
|
||||||
|
|
||||||
if ((off & 1) && size) {
|
if ((off & 1) && size) {
|
||||||
u8 val;
|
u8 val;
|
||||||
pci_user_read_config_byte(dev, off, &val);
|
pci_user_read_config_byte(dev, off, &val);
|
||||||
@ -529,6 +565,8 @@ pci_read_config(struct file *filp, struct kobject *kobj,
|
|||||||
--size;
|
--size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pci_config_pm_runtime_put(dev);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -549,6 +587,8 @@ pci_write_config(struct file* filp, struct kobject *kobj,
|
|||||||
count = size;
|
count = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pci_config_pm_runtime_get(dev);
|
||||||
|
|
||||||
if ((off & 1) && size) {
|
if ((off & 1) && size) {
|
||||||
pci_user_write_config_byte(dev, off, data[off - init_off]);
|
pci_user_write_config_byte(dev, off, data[off - init_off]);
|
||||||
off++;
|
off++;
|
||||||
@ -587,6 +627,8 @@ pci_write_config(struct file* filp, struct kobject *kobj,
|
|||||||
--size;
|
--size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pci_config_pm_runtime_put(dev);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1941,6 +1941,7 @@ void pci_pm_init(struct pci_dev *dev)
|
|||||||
dev->pm_cap = pm;
|
dev->pm_cap = pm;
|
||||||
dev->d3_delay = PCI_PM_D3_WAIT;
|
dev->d3_delay = PCI_PM_D3_WAIT;
|
||||||
dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
|
dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
|
||||||
|
dev->d3cold_allowed = true;
|
||||||
|
|
||||||
dev->d1_support = false;
|
dev->d1_support = false;
|
||||||
dev->d2_support = false;
|
dev->d2_support = false;
|
||||||
|
@ -140,9 +140,17 @@ static int pcie_port_runtime_resume(struct device *dev)
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int pcie_port_runtime_idle(struct device *dev)
|
||||||
|
{
|
||||||
|
/* Delay for a short while to prevent too frequent suspend/resume */
|
||||||
|
pm_schedule_suspend(dev, 10);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
#define pcie_port_runtime_suspend NULL
|
#define pcie_port_runtime_suspend NULL
|
||||||
#define pcie_port_runtime_resume NULL
|
#define pcie_port_runtime_resume NULL
|
||||||
|
#define pcie_port_runtime_idle NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
|
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
|
||||||
@ -155,6 +163,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
|
|||||||
.resume_noirq = pcie_port_resume_noirq,
|
.resume_noirq = pcie_port_resume_noirq,
|
||||||
.runtime_suspend = pcie_port_runtime_suspend,
|
.runtime_suspend = pcie_port_runtime_suspend,
|
||||||
.runtime_resume = pcie_port_runtime_resume,
|
.runtime_resume = pcie_port_runtime_resume,
|
||||||
|
.runtime_idle = pcie_port_runtime_idle,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
|
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
|
||||||
@ -200,6 +209,11 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
|
|||||||
return status;
|
return status;
|
||||||
|
|
||||||
pci_save_state(dev);
|
pci_save_state(dev);
|
||||||
|
/*
|
||||||
|
* D3cold may not work properly on some PCIe port, so disable
|
||||||
|
* it by default.
|
||||||
|
*/
|
||||||
|
dev->d3cold_allowed = false;
|
||||||
if (!pci_match_id(port_runtime_pm_black_list, dev))
|
if (!pci_match_id(port_runtime_pm_black_list, dev))
|
||||||
pm_runtime_put_noidle(&dev->dev);
|
pm_runtime_put_noidle(&dev->dev);
|
||||||
|
|
||||||
|
@ -144,15 +144,13 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
|
|||||||
case PCI_BASE_ADDRESS_MEM_TYPE_32:
|
case PCI_BASE_ADDRESS_MEM_TYPE_32:
|
||||||
break;
|
break;
|
||||||
case PCI_BASE_ADDRESS_MEM_TYPE_1M:
|
case PCI_BASE_ADDRESS_MEM_TYPE_1M:
|
||||||
dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n");
|
/* 1M mem BAR treated as 32-bit BAR */
|
||||||
break;
|
break;
|
||||||
case PCI_BASE_ADDRESS_MEM_TYPE_64:
|
case PCI_BASE_ADDRESS_MEM_TYPE_64:
|
||||||
flags |= IORESOURCE_MEM_64;
|
flags |= IORESOURCE_MEM_64;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
dev_warn(&dev->dev,
|
/* mem unknown type treated as 32-bit BAR */
|
||||||
"mem unknown type %x treated as 32-bit BAR\n",
|
|
||||||
mem_type);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return flags;
|
return flags;
|
||||||
@ -173,9 +171,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
|
|||||||
u32 l, sz, mask;
|
u32 l, sz, mask;
|
||||||
u16 orig_cmd;
|
u16 orig_cmd;
|
||||||
struct pci_bus_region region;
|
struct pci_bus_region region;
|
||||||
|
bool bar_too_big = false, bar_disabled = false;
|
||||||
|
|
||||||
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
|
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
|
||||||
|
|
||||||
|
/* No printks while decoding is disabled! */
|
||||||
if (!dev->mmio_always_on) {
|
if (!dev->mmio_always_on) {
|
||||||
pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
|
pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
|
||||||
pci_write_config_word(dev, PCI_COMMAND,
|
pci_write_config_word(dev, PCI_COMMAND,
|
||||||
@ -240,8 +240,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
|
|||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
|
if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
|
||||||
dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n",
|
bar_too_big = true;
|
||||||
pos);
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,12 +251,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
|
|||||||
region.start = 0;
|
region.start = 0;
|
||||||
region.end = sz64;
|
region.end = sz64;
|
||||||
pcibios_bus_to_resource(dev, res, ®ion);
|
pcibios_bus_to_resource(dev, res, ®ion);
|
||||||
|
bar_disabled = true;
|
||||||
} else {
|
} else {
|
||||||
region.start = l64;
|
region.start = l64;
|
||||||
region.end = l64 + sz64;
|
region.end = l64 + sz64;
|
||||||
pcibios_bus_to_resource(dev, res, ®ion);
|
pcibios_bus_to_resource(dev, res, ®ion);
|
||||||
dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n",
|
|
||||||
pos, res);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
sz = pci_size(l, sz, mask);
|
sz = pci_size(l, sz, mask);
|
||||||
@ -268,18 +266,23 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
|
|||||||
region.start = l;
|
region.start = l;
|
||||||
region.end = l + sz;
|
region.end = l + sz;
|
||||||
pcibios_bus_to_resource(dev, res, ®ion);
|
pcibios_bus_to_resource(dev, res, ®ion);
|
||||||
|
|
||||||
dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
goto out;
|
||||||
|
|
||||||
|
|
||||||
|
fail:
|
||||||
|
res->flags = 0;
|
||||||
|
out:
|
||||||
if (!dev->mmio_always_on)
|
if (!dev->mmio_always_on)
|
||||||
pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
|
pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
|
||||||
|
|
||||||
|
if (bar_too_big)
|
||||||
|
dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos);
|
||||||
|
if (res->flags && !bar_disabled)
|
||||||
|
dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
|
||||||
|
|
||||||
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
|
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
|
||||||
fail:
|
|
||||||
res->flags = 0;
|
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
||||||
|
@ -232,7 +232,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (hwdev && hwdev->coherent_dma_mask)
|
if (hwdev && hwdev->coherent_dma_mask)
|
||||||
dma_mask = hwdev->coherent_dma_mask;
|
dma_mask = dma_alloc_coherent_mask(hwdev, flags);
|
||||||
|
|
||||||
phys = virt_to_phys(ret);
|
phys = virt_to_phys(ret);
|
||||||
dev_addr = xen_phys_to_bus(phys);
|
dev_addr = xen_phys_to_bus(phys);
|
||||||
|
@ -353,16 +353,16 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
|
|||||||
if (err)
|
if (err)
|
||||||
goto config_release;
|
goto config_release;
|
||||||
|
|
||||||
dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
|
|
||||||
__pci_reset_function_locked(dev);
|
|
||||||
|
|
||||||
/* We need the device active to save the state. */
|
/* We need the device active to save the state. */
|
||||||
dev_dbg(&dev->dev, "save state of device\n");
|
dev_dbg(&dev->dev, "save state of device\n");
|
||||||
pci_save_state(dev);
|
pci_save_state(dev);
|
||||||
dev_data->pci_saved_state = pci_store_saved_state(dev);
|
dev_data->pci_saved_state = pci_store_saved_state(dev);
|
||||||
if (!dev_data->pci_saved_state)
|
if (!dev_data->pci_saved_state)
|
||||||
dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
|
dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
|
||||||
|
else {
|
||||||
|
dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
|
||||||
|
__pci_reset_function_locked(dev);
|
||||||
|
}
|
||||||
/* Now disable the device (this also ensures some private device
|
/* Now disable the device (this also ensures some private device
|
||||||
* data is setup before we export)
|
* data is setup before we export)
|
||||||
*/
|
*/
|
||||||
|
Loading…
Reference in New Issue
Block a user