Merge branch 'sched/locking' into sched/core

Merge reason: the rq locking changes are stable,
              propagate them into the .40 queue.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2011-04-18 14:53:18 +02:00
commit 6ddafdaab3
216 changed files with 2087 additions and 2469 deletions

View File

@ -387,26 +387,6 @@ Who: Tejun Heo <tj@kernel.org>
----------------------------
What: Support for lcd_switch and display_get in asus-laptop driver
When: March 2010
Why: These two features use non-standard interfaces. There are the
only features that really need multiple path to guess what's
the right method name on a specific laptop.
Removing them will allow to remove a lot of code an significantly
clean the drivers.
This will affect the backlight code which won't be able to know
if the backlight is on or off. The platform display file will also be
write only (like the one in eeepc-laptop).
This should'nt affect a lot of user because they usually know
when their display is on or off.
Who: Corentin Chary <corentin.chary@gmail.com>
----------------------------
What: sysfs-class-rfkill state file
When: Feb 2014
Files: net/rfkill/core.c

View File

@ -6916,6 +6916,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
S: Maintained
F: drivers/platform/x86
XEN NETWORK BACKEND DRIVER
M: Ian Campbell <ian.campbell@citrix.com>
L: xen-devel@lists.xensource.com (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/xen-netback/*
XEN PCI SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: xen-devel@lists.xensource.com (moderated for non-subscribers)

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 39
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*

View File

@ -585,8 +585,7 @@ handle_ipi(struct pt_regs *regs)
switch (which) {
case IPI_RESCHEDULE:
/* Reschedule callback. Everything to be done
is done by the interrupt return path. */
scheduler_ipi();
break;
case IPI_CALL_FUNC:

View File

@ -560,10 +560,7 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
break;
case IPI_RESCHEDULE:
/*
* nothing more to do - eveything is
* done on the interrupt return path
*/
scheduler_ipi();
break;
case IPI_CALL_FUNC:

View File

@ -94,6 +94,13 @@ struct tag_ethernet {
#define ETH_INVALID_PHY 0xff
/* board information */
#define ATAG_BOARDINFO 0x54410008
struct tag_boardinfo {
u32 board_number;
};
struct tag {
struct tag_header hdr;
union {
@ -102,6 +109,7 @@ struct tag {
struct tag_cmdline cmdline;
struct tag_clock clock;
struct tag_ethernet ethernet;
struct tag_boardinfo boardinfo;
} u;
};
@ -128,6 +136,7 @@ extern struct tag *bootloader_tags;
extern resource_size_t fbmem_start;
extern resource_size_t fbmem_size;
extern u32 board_number;
void setup_processor(void);

View File

@ -390,6 +390,21 @@ static int __init parse_tag_clock(struct tag *tag)
}
__tagtable(ATAG_CLOCK, parse_tag_clock);
/*
* The board_number correspond to the bd->bi_board_number in U-Boot. This
* parameter is only available during initialisation and can be used in some
* kind of board identification.
*/
u32 __initdata board_number;
static int __init parse_tag_boardinfo(struct tag *tag)
{
board_number = tag->u.boardinfo.board_number;
return 0;
}
__tagtable(ATAG_BOARDINFO, parse_tag_boardinfo);
/*
* Scan the tag table for this tag, and call its parse function. The
* tag table is built by the linker from all the __tagtable

View File

@ -95,28 +95,6 @@ void _exception(long signr, struct pt_regs *regs, int code,
info.si_code = code;
info.si_addr = (void __user *)addr;
force_sig_info(signr, &info, current);
/*
* Init gets no signals that it doesn't have a handler for.
* That's all very well, but if it has caused a synchronous
* exception and we ignore the resulting signal, it will just
* generate the same exception over and over again and we get
* nowhere. Better to kill it and let the kernel panic.
*/
if (is_global_init(current)) {
__sighandler_t handler;
spin_lock_irq(&current->sighand->siglock);
handler = current->sighand->action[signr-1].sa.sa_handler;
spin_unlock_irq(&current->sighand->siglock);
if (handler == SIG_DFL) {
/* init has generated a synchronous exception
and it doesn't have a handler for the signal */
printk(KERN_CRIT "init has generated signal %ld "
"but has no handler for it\n", signr);
do_exit(signr);
}
}
}
asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)

View File

@ -35,22 +35,30 @@ void at32_clk_register(struct clk *clk)
spin_unlock(&clk_list_lock);
}
static struct clk *__clk_get(struct device *dev, const char *id)
{
struct clk *clk;
list_for_each_entry(clk, &at32_clock_list, list) {
if (clk->dev == dev && strcmp(id, clk->name) == 0) {
return clk;
}
}
return ERR_PTR(-ENOENT);
}
struct clk *clk_get(struct device *dev, const char *id)
{
struct clk *clk;
spin_lock(&clk_list_lock);
list_for_each_entry(clk, &at32_clock_list, list) {
if (clk->dev == dev && strcmp(id, clk->name) == 0) {
spin_unlock(&clk_list_lock);
return clk;
}
}
clk = __clk_get(dev, id);
spin_unlock(&clk_list_lock);
return ERR_PTR(-ENOENT);
return clk;
}
EXPORT_SYMBOL(clk_get);
void clk_put(struct clk *clk)
@ -257,15 +265,15 @@ static int clk_show(struct seq_file *s, void *unused)
spin_lock(&clk_list_lock);
/* show clock tree as derived from the three oscillators */
clk = clk_get(NULL, "osc32k");
clk = __clk_get(NULL, "osc32k");
dump_clock(clk, &r);
clk_put(clk);
clk = clk_get(NULL, "osc0");
clk = __clk_get(NULL, "osc0");
dump_clock(clk, &r);
clk_put(clk);
clk = clk_get(NULL, "osc1");
clk = __clk_get(NULL, "osc1");
dump_clock(clk, &r);
clk_put(clk);

View File

@ -61,34 +61,34 @@ struct eic {
static struct eic *nmi_eic;
static bool nmi_enabled;
static void eic_ack_irq(struct irq_chip *d)
static void eic_ack_irq(struct irq_data *d)
{
struct eic *eic = irq_data_get_irq_chip_data(data);
struct eic *eic = irq_data_get_irq_chip_data(d);
eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
}
static void eic_mask_irq(struct irq_chip *d)
static void eic_mask_irq(struct irq_data *d)
{
struct eic *eic = irq_data_get_irq_chip_data(data);
struct eic *eic = irq_data_get_irq_chip_data(d);
eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
}
static void eic_mask_ack_irq(struct irq_chip *d)
static void eic_mask_ack_irq(struct irq_data *d)
{
struct eic *eic = irq_data_get_irq_chip_data(data);
struct eic *eic = irq_data_get_irq_chip_data(d);
eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
}
static void eic_unmask_irq(struct irq_chip *d)
static void eic_unmask_irq(struct irq_data *d)
{
struct eic *eic = irq_data_get_irq_chip_data(data);
struct eic *eic = irq_data_get_irq_chip_data(d);
eic_writel(eic, IER, 1 << (d->irq - eic->first_irq));
}
static int eic_set_irq_type(struct irq_chip *d, unsigned int flow_type)
static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
struct eic *eic = irq_data_get_irq_chip_data(data);
struct eic *eic = irq_data_get_irq_chip_data(d);
unsigned int irq = d->irq;
unsigned int i = irq - eic->first_irq;
u32 mode, edge, level;
@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev)
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int_irq = platform_get_irq(pdev, 0);
if (!regs || !int_irq) {
if (!regs || (int)int_irq <= 0) {
dev_dbg(&pdev->dev, "missing regs and/or irq resource\n");
return -ENXIO;
}

View File

@ -257,7 +257,7 @@ static void gpio_irq_mask(struct irq_data *d)
pio_writel(pio, IDR, 1 << (gpio & 0x1f));
}
static void gpio_irq_unmask(struct irq_data *d))
static void gpio_irq_unmask(struct irq_data *d)
{
unsigned gpio = irq_to_gpio(d->irq);
struct pio_device *pio = &pio_dev[gpio >> 5];

View File

@ -53,7 +53,7 @@ cpu_enter_idle:
st.w r8[TI_flags], r9
unmask_interrupts
sleep CPU_SLEEP_IDLE
.size cpu_idle_sleep, . - cpu_idle_sleep
.size cpu_enter_idle, . - cpu_enter_idle
/*
* Common return path for PM functions that don't run from

View File

@ -164,6 +164,9 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
while (msg_queue->count) {
msg = &msg_queue->ipi_message[msg_queue->head];
switch (msg->type) {
case BFIN_IPI_RESCHEDULE:
scheduler_ipi();
break;
case BFIN_IPI_CALL_FUNC:
spin_unlock_irqrestore(&msg_queue->lock, flags);
ipi_call_function(cpu, msg);

View File

@ -342,15 +342,18 @@ irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id)
ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi);
if (ipi.vector & IPI_SCHEDULE) {
scheduler_ipi();
}
if (ipi.vector & IPI_CALL) {
func(info);
func(info);
}
if (ipi.vector & IPI_FLUSH_TLB) {
if (flush_mm == FLUSH_ALL)
__flush_tlb_all();
else if (flush_vma == FLUSH_ALL)
if (flush_mm == FLUSH_ALL)
__flush_tlb_all();
else if (flush_vma == FLUSH_ALL)
__flush_tlb_mm(flush_mm);
else
else
__flush_tlb_page(flush_vma, flush_addr);
}

View File

@ -31,6 +31,7 @@
#include <linux/irq.h>
#include <linux/ratelimit.h>
#include <linux/acpi.h>
#include <linux/sched.h>
#include <asm/delay.h>
#include <asm/intrinsics.h>
@ -496,6 +497,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
smp_local_flush_tlb();
kstat_incr_irqs_this_cpu(irq, desc);
} else if (unlikely(IS_RESCHEDULE(vector))) {
scheduler_ipi();
kstat_incr_irqs_this_cpu(irq, desc);
} else {
ia64_setreg(_IA64_REG_CR_TPR, vector);

View File

@ -92,6 +92,8 @@ static unsigned short saved_irq_cnt;
static int xen_slab_ready;
#ifdef CONFIG_SMP
#include <linux/sched.h>
/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
* it ends up to issue several memory accesses upon percpu data and
* thus adds unnecessary traffic to other paths.
@ -99,7 +101,13 @@ static int xen_slab_ready;
static irqreturn_t
xen_dummy_handler(int irq, void *dev_id)
{
return IRQ_HANDLED;
}
static irqreturn_t
xen_resched_handler(int irq, void *dev_id)
{
scheduler_ipi();
return IRQ_HANDLED;
}
@ -110,7 +118,7 @@ static struct irqaction xen_ipi_irqaction = {
};
static struct irqaction xen_resched_irqaction = {
.handler = xen_dummy_handler,
.handler = xen_resched_handler,
.flags = IRQF_DISABLED,
.name = "resched"
};

View File

@ -122,8 +122,6 @@ void smp_send_reschedule(int cpu_id)
*
* Description: This routine executes on CPU which received
* 'RESCHEDULE_IPI'.
* Rescheduling is processed at the exit of interrupt
* operation.
*
* Born on Date: 2002.02.05
*
@ -138,7 +136,7 @@ void smp_send_reschedule(int cpu_id)
*==========================================================================*/
void smp_reschedule_interrupt(void)
{
/* nothing to do */
scheduler_ipi();
}
/*==========================================================================*

View File

@ -343,10 +343,14 @@
#define __NR_fanotify_init 337
#define __NR_fanotify_mark 338
#define __NR_prlimit64 339
#define __NR_name_to_handle_at 340
#define __NR_open_by_handle_at 341
#define __NR_clock_adjtime 342
#define __NR_syncfs 343
#ifdef __KERNEL__
#define NR_syscalls 340
#define NR_syscalls 344
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR

View File

@ -750,4 +750,8 @@ sys_call_table:
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
.long sys_name_to_handle_at /* 340 */
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs

View File

@ -358,6 +358,10 @@ ENTRY(sys_call_table)
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
.long sys_name_to_handle_at /* 340 */
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall

View File

@ -44,6 +44,8 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
/* Check if we've been told to flush the icache */
if (action & SMP_ICACHE_FLUSH)

View File

@ -929,7 +929,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
static void ipi_resched_interrupt(void)
{
/* Return from interrupt should be enough to cause scheduler check */
scheduler_ipi();
}
static void ipi_call_interrupt(void)

View File

@ -309,6 +309,8 @@ static void ipi_call_dispatch(void)
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
scheduler_ipi();
return IRQ_HANDLED;
}

View File

@ -55,6 +55,8 @@ void titan_mailbox_irq(void)
if (status & 0x2)
smp_call_function_interrupt();
if (status & 0x4)
scheduler_ipi();
break;
case 1:
@ -63,6 +65,8 @@ void titan_mailbox_irq(void)
if (status & 0x2)
smp_call_function_interrupt();
if (status & 0x4)
scheduler_ipi();
break;
}
}

View File

@ -147,8 +147,10 @@ static void ip27_do_irq_mask0(void)
#ifdef CONFIG_SMP
if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
scheduler_ipi();
} else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
scheduler_ipi();
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
smp_call_function_interrupt();

View File

@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
@ -189,10 +190,8 @@ void bcm1480_mailbox_interrupt(void)
/* Clear the mailbox to clear the interrupt */
__raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]);
/*
* Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the
* interrupt will do the reschedule for us
*/
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();

View File

@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
@ -177,10 +178,8 @@ void sb1250_mailbox_interrupt(void)
/* Clear the mailbox to clear the interrupt */
____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]);
/*
* Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the
* interrupt will do the reschedule for us
*/
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();

View File

@ -494,14 +494,11 @@ void smp_send_stop(void)
* @irq: The interrupt number.
* @dev_id: The device ID.
*
* We need do nothing here, since the scheduling will be effected on our way
* back through entry.S.
*
* Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
*/
static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
{
/* do nothing */
scheduler_ipi();
return IRQ_HANDLED;
}

View File

@ -155,10 +155,7 @@ ipi_interrupt(int irq, void *dev_id)
case IPI_RESCHEDULE:
smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
/*
* Reschedule callback. Everything to be
* done is done by the interrupt return path.
*/
scheduler_ipi();
break;
case IPI_CALL_FUNC:

View File

@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev)
#endif /* !CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
#ifdef CONFIG_HIBERNATE_CALLBACKS
static int ibmebus_bus_pm_freeze(struct device *dev)
{
@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
return ret;
}
#else /* !CONFIG_HIBERNATION */
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define ibmebus_bus_pm_freeze NULL
#define ibmebus_bus_pm_thaw NULL
@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
#define ibmebus_bus_pm_poweroff_noirq NULL
#define ibmebus_bus_pm_restore_noirq NULL
#endif /* !CONFIG_HIBERNATION */
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
.prepare = ibmebus_bus_pm_prepare,

View File

@ -116,7 +116,7 @@ void smp_message_recv(int msg)
generic_smp_call_function_interrupt();
break;
case PPC_MSG_RESCHEDULE:
/* we notice need_resched on exit */
scheduler_ipi();
break;
case PPC_MSG_CALL_FUNC_SINGLE:
generic_smp_call_function_single_interrupt();
@ -146,7 +146,7 @@ static irqreturn_t call_function_action(int irq, void *data)
static irqreturn_t reschedule_action(int irq, void *data)
{
/* we just need the return path side effect of checking need_resched */
scheduler_ipi();
return IRQ_HANDLED;
}

View File

@ -165,12 +165,12 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
/*
* handle bit signal external calls
*
* For the ec_schedule signal we have to do nothing. All the work
* is done automatically when we return from the interrupt.
*/
bits = xchg(&S390_lowcore.ext_call_fast, 0);
if (test_bit(ec_schedule, &bits))
scheduler_ipi();
if (test_bit(ec_call_function, &bits))
generic_smp_call_function_interrupt();

View File

@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <asm/atomic.h>
#include <asm/processor.h>
#include <asm/system.h>
@ -323,6 +324,7 @@ void smp_message_recv(unsigned int msg)
generic_smp_call_function_interrupt();
break;
case SMP_MSG_RESCHEDULE:
scheduler_ipi();
break;
case SMP_MSG_FUNCTION_SINGLE:
generic_smp_call_function_single_interrupt();

View File

@ -125,7 +125,9 @@ struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
void smp_send_reschedule(int cpu)
{
/* See sparc64 */
/*
* XXX missing reschedule IPI, see scheduler_ipi()
*/
}
void smp_send_stop(void)

View File

@ -1368,6 +1368,7 @@ void smp_send_reschedule(int cpu)
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
scheduler_ipi();
}
/* This is a nop because we capture all other cpus

View File

@ -189,12 +189,8 @@ void flush_icache_range(unsigned long start, unsigned long end)
/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
static irqreturn_t handle_reschedule_ipi(int irq, void *token)
{
/*
* Nothing to do here; when we return from interrupt, the
* rescheduling will occur there. But do bump the interrupt
* profiler count in the meantime.
*/
__get_cpu_var(irq_stat).irq_resched_count++;
scheduler_ipi();
return IRQ_HANDLED;
}

View File

@ -173,7 +173,7 @@ void IPI_handler(int cpu)
break;
case 'R':
set_tsk_need_resched(current);
scheduler_ipi();
break;
case 'S':

View File

@ -194,14 +194,13 @@ static void native_stop_other_cpus(int wait)
}
/*
* Reschedule call back. Nothing to do,
* all the work is done automatically when
* we return from the interrupt.
* Reschedule call back.
*/
void smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
inc_irq_stat(irq_resched_count);
scheduler_ipi();
/*
* KVM uses this interrupt to force a cpu out of guest mode
*/

View File

@ -39,6 +39,7 @@ config XEN_MAX_DOMAIN_MEMORY
config XEN_SAVE_RESTORE
bool
depends on XEN
select HIBERNATE_CALLBACKS
default y
config XEN_DEBUG_FS

View File

@ -238,6 +238,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
static __init void xen_init_cpuid_mask(void)
{
unsigned int ax, bx, cx, dx;
unsigned int xsave_mask;
cpuid_leaf1_edx_mask =
~((1 << X86_FEATURE_MCE) | /* disable MCE */
@ -249,24 +250,16 @@ static __init void xen_init_cpuid_mask(void)
cpuid_leaf1_edx_mask &=
~((1 << X86_FEATURE_APIC) | /* disable local APIC */
(1 << X86_FEATURE_ACPI)); /* disable ACPI */
ax = 1;
cx = 0;
xen_cpuid(&ax, &bx, &cx, &dx);
/* cpuid claims we support xsave; try enabling it to see what happens */
if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
unsigned long cr4;
xsave_mask =
(1 << (X86_FEATURE_XSAVE % 32)) |
(1 << (X86_FEATURE_OSXSAVE % 32));
set_in_cr4(X86_CR4_OSXSAVE);
cr4 = read_cr4();
if ((cr4 & X86_CR4_OSXSAVE) == 0)
cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));
clear_in_cr4(X86_CR4_OSXSAVE);
}
/* Xen will set CR4.OSXSAVE if supported and not disabled by force */
if ((cx & xsave_mask) != xsave_mask)
cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
}
static void xen_set_debugreg(int reg, unsigned long val)

View File

@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte)
if (io_page &&
(xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
WARN(addr != other_addr,
WARN_ONCE(addr != other_addr,
"0x%lx is using VM_IO, but it is 0x%lx!\n",
(unsigned long)addr, (unsigned long)other_addr);
} else {
pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
other_addr = (_pte.pte & PTE_PFN_MASK);
WARN((addr == other_addr) && (!io_page) && (!iomap_set),
WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
"0x%lx is missing VM_IO (and wasn't fixed)!\n",
(unsigned long)addr);
}

View File

@ -46,13 +46,12 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
/*
* Reschedule call back. Nothing to do,
* all the work is done automatically when
* we return from the interrupt.
* Reschedule call back.
*/
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
{
inc_irq_stat(irq_resched_count);
scheduler_ipi();
return IRQ_HANDLED;
}

View File

@ -214,7 +214,7 @@ static int amba_pm_resume_noirq(struct device *dev)
#endif /* !CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
#ifdef CONFIG_HIBERNATE_CALLBACKS
static int amba_pm_freeze(struct device *dev)
{
@ -352,7 +352,7 @@ static int amba_pm_restore_noirq(struct device *dev)
return ret;
}
#else /* !CONFIG_HIBERNATION */
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define amba_pm_freeze NULL
#define amba_pm_thaw NULL
@ -363,7 +363,7 @@ static int amba_pm_restore_noirq(struct device *dev)
#define amba_pm_poweroff_noirq NULL
#define amba_pm_restore_noirq NULL
#endif /* !CONFIG_HIBERNATION */
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
#ifdef CONFIG_PM

View File

@ -149,6 +149,7 @@ static void platform_device_release(struct device *dev)
of_device_node_put(&pa->pdev.dev);
kfree(pa->pdev.dev.platform_data);
kfree(pa->pdev.mfd_cell);
kfree(pa->pdev.resource);
kfree(pa);
}
@ -771,7 +772,7 @@ int __weak platform_pm_resume_noirq(struct device *dev)
#endif /* !CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
#ifdef CONFIG_HIBERNATE_CALLBACKS
static int platform_pm_freeze(struct device *dev)
{
@ -909,7 +910,7 @@ static int platform_pm_restore_noirq(struct device *dev)
return ret;
}
#else /* !CONFIG_HIBERNATION */
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define platform_pm_freeze NULL
#define platform_pm_thaw NULL
@ -920,7 +921,7 @@ static int platform_pm_restore_noirq(struct device *dev)
#define platform_pm_poweroff_noirq NULL
#define platform_pm_restore_noirq NULL
#endif /* !CONFIG_HIBERNATION */
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
#ifdef CONFIG_PM_RUNTIME

View File

@ -233,7 +233,7 @@ static int pm_op(struct device *dev,
}
break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
if (ops->freeze) {
@ -260,7 +260,7 @@ static int pm_op(struct device *dev,
suspend_report_result(ops->restore, error);
}
break;
#endif /* CONFIG_HIBERNATION */
#endif /* CONFIG_HIBERNATE_CALLBACKS */
default:
error = -EINVAL;
}
@ -308,7 +308,7 @@ static int pm_noirq_op(struct device *dev,
}
break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
if (ops->freeze_noirq) {
@ -335,7 +335,7 @@ static int pm_noirq_op(struct device *dev,
suspend_report_result(ops->restore_noirq, error);
}
break;
#endif /* CONFIG_HIBERNATION */
#endif /* CONFIG_HIBERNATE_CALLBACKS */
default:
error = -EINVAL;
}

View File

@ -1448,7 +1448,7 @@ static const struct of_device_id fsldma_of_ids[] = {
{}
};
static struct of_platform_driver fsldma_of_driver = {
static struct platform_driver fsldma_of_driver = {
.driver = {
.name = "fsl-elo-dma",
.owner = THIS_MODULE,

View File

@ -116,6 +116,7 @@ static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
reg_val |= (1 << nr);
else
reg_val &= ~(1 << nr);
iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
mutex_unlock(&chip->lock);

View File

@ -558,7 +558,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
ret = gpiochip_add(&chip->gpio_chip);
if (ret)
goto out_failed;
goto out_failed_irq;
if (pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
@ -570,8 +570,9 @@ static int __devinit pca953x_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
return 0;
out_failed:
out_failed_irq:
pca953x_irq_teardown(chip);
out_failed:
kfree(chip->dyn_pdata);
kfree(chip);
return ret;

View File

@ -105,6 +105,7 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
reg_val |= (1 << nr);
else
reg_val &= ~(1 << nr);
iowrite32(reg_val, &chip->reg->po);
mutex_unlock(&chip->lock);

View File

@ -96,6 +96,7 @@ config DRM_I915
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI

View File

@ -269,7 +269,7 @@ struct init_tbl_entry {
int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *);
#define MACRO_INDEX_SIZE 2
#define MACRO_SIZE 8
@ -2010,6 +2010,27 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
return 3;
}
static int
init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
* INIT_JUMP opcode: 0x5C ('\')
*
* offset (8 bit): opcode
* offset + 1 (16 bit): offset (in bios)
*
* Continue execution of init table from 'offset'
*/
uint16_t jmp_offset = ROM16(bios->data[offset + 1]);
if (!iexec->execute)
return 3;
BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset);
return jmp_offset - offset;
}
static int
init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
@ -3659,6 +3680,7 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
/* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
{ "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
{ "INIT_JUMP" , 0x5C, init_jump },
{ "INIT_I2C_IF" , 0x5E, init_i2c_if },
{ "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
{ "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
@ -3700,8 +3722,7 @@ static struct init_tbl_entry itbl_entry[] = {
#define MAX_TABLE_OPS 1000
static int
parse_init_table(struct nvbios *bios, unsigned int offset,
struct init_exec *iexec)
parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
* Parses all commands in an init table.
@ -6333,6 +6354,32 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
}
}
/* XFX GT-240X-YA
*
* So many things wrong here, replace the entire encoder table..
*/
if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) {
if (idx == 0) {
*conn = 0x02001300; /* VGA, connector 1 */
*conf = 0x00000028;
} else
if (idx == 1) {
*conn = 0x01010312; /* DVI, connector 0 */
*conf = 0x00020030;
} else
if (idx == 2) {
*conn = 0x01010310; /* VGA, connector 0 */
*conf = 0x00000028;
} else
if (idx == 3) {
*conn = 0x02022362; /* HDMI, connector 2 */
*conf = 0x00020010;
} else {
*conn = 0x0000000e; /* EOL */
*conf = 0x00000000;
}
}
return true;
}

View File

@ -1190,7 +1190,7 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
extern int nv50_grctx_init(struct nouveau_grctx *);
extern void nv50_graph_tlb_flush(struct drm_device *dev);
extern void nv86_graph_tlb_flush(struct drm_device *dev);
extern void nv84_graph_tlb_flush(struct drm_device *dev);
extern struct nouveau_enum nv50_data_error_names[];
/* nvc0_graph.c */

View File

@ -552,6 +552,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
u8 tRC; /* Byte 9 */
u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
u8 magic_number = 0; /* Yeah... sorry*/
u8 *mem = NULL, *entry;
int i, recordlen, entries;
@ -596,6 +597,12 @@ nouveau_mem_timing_init(struct drm_device *dev)
if (!memtimings->timing)
return;
/* Get "some number" from the timing reg for NV_40
* Used in calculations later */
if(dev_priv->card_type == NV_40) {
magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24;
}
entry = mem + mem[1];
for (i = 0; i < entries; i++, entry += recordlen) {
struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
@ -635,36 +642,51 @@ nouveau_mem_timing_init(struct drm_device *dev)
/* XXX: I don't trust the -1's and +1's... they must come
* from somewhere! */
timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
tUNK_18 << 16 |
(tUNK_1 + tUNK_19 + 1) << 8 |
(tUNK_2 - 1));
(tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
if(dev_priv->chipset == 0xa8) {
timing->reg_100224 |= (tUNK_2 - 1);
} else {
timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
}
timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
if(recordlen > 19) {
timing->reg_100228 += (tUNK_19 - 1) << 24;
}/* I cannot back-up this else-statement right now
else {
timing->reg_100228 += tUNK_12 << 24;
}*/
if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) {
timing->reg_100228 |= (tUNK_19 - 1) << 24;
}
/* XXX: reg_10022c */
timing->reg_10022c = tUNK_2 - 1;
if(dev_priv->card_type == NV_40) {
/* NV40: don't know what the rest of the regs are..
* And don't need to know either */
timing->reg_100228 |= 0x20200000 | magic_number << 24;
} else if(dev_priv->card_type >= NV_50) {
/* XXX: reg_10022c */
timing->reg_10022c = tUNK_2 - 1;
timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
tUNK_13 << 8 | tUNK_13);
timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
tUNK_13 << 8 | tUNK_13);
/* XXX: +6? */
timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
timing->reg_100234 = (tRAS << 24 | tRC);
timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
/* XXX; reg_100238, reg_10023c
* reg: 0x00??????
* reg_10023c:
* 0 for pre-NV50 cards
* 0x????0202 for NV50+ cards (empirical evidence) */
if(dev_priv->card_type >= NV_50) {
if(dev_priv->chipset < 0xa3) {
timing->reg_100234 |= (tUNK_2 + 2) << 8;
} else {
/* XXX: +6? */
timing->reg_100234 |= (tUNK_19 + 6) << 8;
}
/* XXX; reg_100238, reg_10023c
* reg_100238: 0x00??????
* reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */
timing->reg_10023c = 0x202;
if(dev_priv->chipset < 0xa3) {
timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
} else {
/* currently unknown
* 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
}
}
NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
@ -675,7 +697,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
timing->reg_100238, timing->reg_10023c);
}
memtimings->nr_timing = entries;
memtimings->nr_timing = entries;
memtimings->supported = true;
}

View File

@ -134,7 +134,7 @@ nouveau_perf_init(struct drm_device *dev)
case 0x13:
case 0x15:
perflvl->fanspeed = entry[55];
perflvl->voltage = entry[56];
perflvl->voltage = (recordlen > 56) ? entry[56] : 0;
perflvl->core = ROM32(entry[1]) * 10;
perflvl->memory = ROM32(entry[5]) * 20;
break;

View File

@ -376,15 +376,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv50_graph_destroy_context;
engine->graph.load_context = nv50_graph_load_context;
engine->graph.unload_context = nv50_graph_unload_context;
if (dev_priv->chipset != 0x86)
if (dev_priv->chipset == 0x50 ||
dev_priv->chipset == 0xac)
engine->graph.tlb_flush = nv50_graph_tlb_flush;
else {
/* from what i can see nvidia do this on every
* pre-NVA3 board except NVAC, but, we've only
* ever seen problems on NV86
*/
engine->graph.tlb_flush = nv86_graph_tlb_flush;
}
else
engine->graph.tlb_flush = nv84_graph_tlb_flush;
engine->fifo.channels = 128;
engine->fifo.init = nv50_fifo_init;
engine->fifo.takedown = nv50_fifo_takedown;

View File

@ -581,12 +581,13 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
int head = nv_encoder->restore.head;
if (nv_encoder->dcb->type == OUTPUT_LVDS) {
struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
if (native_mode)
call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
native_mode->clock);
else
NV_ERROR(dev, "Not restoring LVDS without native mode\n");
struct nouveau_connector *connector =
nouveau_encoder_connector_get(nv_encoder);
if (connector && connector->native_mode)
call_lvds_script(dev, nv_encoder->dcb, head,
LVDS_PANEL_ON,
connector->native_mode->clock);
} else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
int clock = nouveau_hw_pllvals_to_clk

View File

@ -469,9 +469,6 @@ nv50_crtc_wait_complete(struct drm_crtc *crtc)
start = ptimer->read(dev);
do {
nv_wr32(dev, 0x61002c, 0x370);
nv_wr32(dev, 0x000140, 1);
if (nv_ro32(disp->ntfy, 0x000))
return 0;
} while (ptimer->read(dev) - start < 2000000000ULL);

View File

@ -186,6 +186,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
evo->dma.max = (4096/4) - 2;
evo->dma.max &= ~7;
evo->dma.put = 0;
evo->dma.cur = evo->dma.put;
evo->dma.free = evo->dma.max - evo->dma.cur;

View File

@ -503,7 +503,7 @@ nv50_graph_tlb_flush(struct drm_device *dev)
}
void
nv86_graph_tlb_flush(struct drm_device *dev)
nv84_graph_tlb_flush(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;

View File

@ -104,20 +104,26 @@ nvc0_vm_flush(struct nouveau_vm *vm)
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct drm_device *dev = vm->dev;
struct nouveau_vm_pgd *vpgd;
u32 r100c80, engine;
u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
pinstmem->flush(vm->dev);
if (vm == dev_priv->chan_vm)
engine = 1;
else
engine = 5;
spin_lock(&dev_priv->ramin_lock);
list_for_each_entry(vpgd, &vm->pgd_list, head) {
r100c80 = nv_rd32(dev, 0x100c80);
/* looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases
*/
if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
nv_rd32(dev, 0x100c80), engine);
}
nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
/* wait for flush to be queued? */
if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
nv_rd32(dev, 0x100c80), engine);
}
}
spin_unlock(&dev_priv->ramin_lock);
}

View File

@ -32,6 +32,7 @@
#include "atom.h"
#include "atom-names.h"
#include "atom-bits.h"
#include "radeon.h"
#define ATOM_COND_ABOVE 0
#define ATOM_COND_ABOVEOREQUAL 1
@ -101,7 +102,9 @@ static void debug_print_spaces(int n)
static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
uint32_t index, uint32_t data)
{
struct radeon_device *rdev = ctx->card->dev->dev_private;
uint32_t temp = 0xCDCDCDCD;
while (1)
switch (CU8(base)) {
case ATOM_IIO_NOP:
@ -112,7 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base += 3;
break;
case ATOM_IIO_WRITE:
(void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
if (rdev->family == CHIP_RV515)
(void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;

View File

@ -531,6 +531,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
if ((rdev->family == CHIP_R600) ||
(rdev->family == CHIP_RV610) ||
(rdev->family == CHIP_RV630) ||
(rdev->family == CHIP_RV670))
pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
} else {
pll->flags |= RADEON_PLL_LEGACY;

View File

@ -120,11 +120,16 @@ void evergreen_pm_misc(struct radeon_device *rdev)
struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
if (voltage->voltage != rdev->pm.current_vddc) {
radeon_atom_set_voltage(rdev, voltage->voltage);
if (voltage->type == VOLTAGE_SW) {
if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
}
if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
rdev->pm.current_vddci = voltage->vddci;
DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
}
}
}
@ -3036,9 +3041,6 @@ int evergreen_init(struct radeon_device *rdev)
{
int r;
r = radeon_dummy_page_init(rdev);
if (r)
return r;
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
@ -3150,7 +3152,6 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);
}
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)

View File

@ -587,7 +587,7 @@ void r600_pm_misc(struct radeon_device *rdev)
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
if (voltage->voltage != rdev->pm.current_vddc) {
radeon_atom_set_voltage(rdev, voltage->voltage);
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
}
@ -2509,9 +2509,6 @@ int r600_init(struct radeon_device *rdev)
{
int r;
r = radeon_dummy_page_init(rdev);
if (r)
return r;
if (r600_debugfs_mc_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for mc !\n");
}
@ -2625,7 +2622,6 @@ void r600_fini(struct radeon_device *rdev)
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);
}

View File

@ -177,7 +177,7 @@ void radeon_pm_suspend(struct radeon_device *rdev);
void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
@ -767,7 +767,9 @@ struct radeon_voltage {
u8 vddci_id; /* index into vddci voltage table */
bool vddci_enabled;
/* r6xx+ sw */
u32 voltage;
u16 voltage;
/* evergreen+ vddci */
u16 vddci;
};
/* clock mode flags */
@ -835,10 +837,12 @@ struct radeon_pm {
int default_power_state_index;
u32 current_sclk;
u32 current_mclk;
u32 current_vddc;
u16 current_vddc;
u16 current_vddci;
u32 default_sclk;
u32 default_mclk;
u32 default_vddc;
u16 default_vddc;
u16 default_vddci;
struct radeon_i2c_chan *i2c_bus;
/* selected pm method */
enum radeon_pm_method pm_method;

View File

@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) {
if (rdev->family >= CHIP_R600) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}

View File

@ -2176,24 +2176,27 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
}
}
static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
u16 *vddc, u16 *vddci)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
u8 frev, crev;
u16 data_offset;
union firmware_info *firmware_info;
u16 vddc = 0;
*vddc = 0;
*vddci = 0;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
firmware_info =
(union firmware_info *)(mode_info->atom_context->bios +
data_offset);
vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
*vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
if ((frev == 2) && (crev >= 2))
*vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
}
return vddc;
}
static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
@ -2203,7 +2206,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
int j;
u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
u16 vddc = radeon_atombios_get_default_vddc(rdev);
u16 vddc, vddci;
radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
rdev->pm.power_state[state_index].misc = misc;
rdev->pm.power_state[state_index].misc2 = misc2;
@ -2244,6 +2249,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
} else {
/* patch the table values with the default slck/mclk from firmware info */
for (j = 0; j < mode_index; j++) {
@ -2286,6 +2292,8 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
le16_to_cpu(clock_info->evergreen.usVDDC);
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
le16_to_cpu(clock_info->evergreen.usVDDCI);
} else {
sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
sclk |= clock_info->r600.ucEngineClockHigh << 16;
@ -2577,25 +2585,25 @@ union set_voltage {
struct _SET_VOLTAGE_PARAMETERS_V2 v2;
};
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level)
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
u8 frev, crev, volt_index = level;
u8 frev, crev, volt_index = voltage_level;
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
switch (crev) {
case 1:
args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
args.v1.ucVoltageType = voltage_type;
args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
args.v1.ucVoltageIndex = volt_index;
break;
case 2:
args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
args.v2.ucVoltageType = voltage_type;
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
args.v2.usVoltageLevel = cpu_to_le16(level);
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);

View File

@ -79,7 +79,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
seq = rdev->wb.wb[scratch_index/4];
seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
} else
seq = RREG32(rdev->fence_drv.scratch_reg);
if (seq != rdev->fence_drv.last_seq) {

View File

@ -285,4 +285,6 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
rdev->gart.ttm_alloced = NULL;
radeon_dummy_page_fini(rdev);
}

View File

@ -1062,7 +1062,7 @@ void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
*val = in_buf[0];
DRM_DEBUG("val = 0x%02x\n", *val);
} else {
DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
addr, *val);
}
}
@ -1084,7 +1084,7 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
out_buf[1] = val;
if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
addr, val);
}

View File

@ -269,7 +269,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
.disable = radeon_legacy_encoder_disable,
};
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
#define MAX_RADEON_LEVEL 0xFF

View File

@ -23,6 +23,7 @@
#include "drmP.h"
#include "radeon.h"
#include "avivod.h"
#include "atom.h"
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#endif
@ -535,7 +536,11 @@ void radeon_pm_resume(struct radeon_device *rdev)
/* set up the default clocks if the MC ucode is loaded */
if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
if (rdev->pm.default_vddci)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
SET_VOLTAGE_TYPE_ASIC_VDDCI);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
@ -548,6 +553,7 @@ void radeon_pm_resume(struct radeon_device *rdev)
rdev->pm.current_sclk = rdev->pm.default_sclk;
rdev->pm.current_mclk = rdev->pm.default_mclk;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
@ -585,7 +591,8 @@ int radeon_pm_init(struct radeon_device *rdev)
/* set up the default clocks if the MC ucode is loaded */
if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)

View File

@ -248,7 +248,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
void radeon_ring_free_size(struct radeon_device *rdev)
{
if (rdev->wb.enabled)
rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4];
rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
else {
if (rdev->family >= CHIP_R600)
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);

View File

@ -114,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
udelay(voltage->delay);
}
} else if (voltage->type == VOLTAGE_VDDC)
radeon_atom_set_voltage(rdev, voltage->vddc_id);
radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);

View File

@ -106,7 +106,7 @@ void rv770_pm_misc(struct radeon_device *rdev)
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
if (voltage->voltage != rdev->pm.current_vddc) {
radeon_atom_set_voltage(rdev, voltage->voltage);
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
}
@ -1255,9 +1255,6 @@ int rv770_init(struct radeon_device *rdev)
{
int r;
r = radeon_dummy_page_init(rdev);
if (r)
return r;
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
@ -1372,7 +1369,6 @@ void rv770_fini(struct radeon_device *rdev)
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);
}
static void rv770_pcie_gen2_enable(struct radeon_device *rdev)

View File

@ -683,22 +683,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) {
if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
void *addr;
addr = dma_alloc_coherent(NULL, PAGE_SIZE,
&dma_address[r],
gfp_flags);
if (addr == NULL)
return -ENOMEM;
p = virt_to_page(addr);
} else
p = alloc_page(gfp_flags);
p = alloc_page(gfp_flags);
if (!p) {
printk(KERN_ERR TTM_PFX
"Unable to allocate page.");
return -ENOMEM;
}
list_add(&p->lru, pages);
}
return 0;
@ -746,24 +738,12 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p, *tmp;
unsigned r;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
r = page_count-1;
list_for_each_entry_safe(p, tmp, pages, lru) {
if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
void *addr = page_address(p);
WARN_ON(!addr || !dma_address[r]);
if (addr)
dma_free_coherent(NULL, PAGE_SIZE,
addr,
dma_address[r]);
dma_address[r] = 0;
} else
__free_page(p);
r--;
__free_page(p);
}
/* Make the pages list empty */
INIT_LIST_HEAD(pages);

View File

@ -5,6 +5,7 @@ config STUB_POULSBO
# Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select THERMAL if ACPI

View File

@ -55,6 +55,19 @@ int mfd_cell_disable(struct platform_device *pdev)
}
EXPORT_SYMBOL(mfd_cell_disable);
static int mfd_platform_add_cell(struct platform_device *pdev,
const struct mfd_cell *cell)
{
if (!cell)
return 0;
pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
if (!pdev->mfd_cell)
return -ENOMEM;
return 0;
}
static int mfd_add_device(struct device *parent, int id,
const struct mfd_cell *cell,
struct resource *mem_base,
@ -75,7 +88,7 @@ static int mfd_add_device(struct device *parent, int id,
pdev->dev.parent = parent;
ret = platform_device_add_data(pdev, cell, sizeof(*cell));
ret = mfd_platform_add_cell(pdev, cell);
if (ret)
goto fail_res;
@ -123,7 +136,6 @@ static int mfd_add_device(struct device *parent, int id,
return 0;
/* platform_device_del(pdev); */
fail_res:
kfree(res);
fail_device:

View File

@ -154,7 +154,7 @@ struct be_eq_obj {
u16 min_eqd; /* in usecs */
u16 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */
u8 msix_vec_idx;
u8 eq_idx;
struct napi_struct napi;
};
@ -291,7 +291,7 @@ struct be_adapter {
u32 num_rx_qs;
u32 big_page_size; /* Compounded page size shared by rx wrbs */
u8 msix_vec_next_idx;
u8 eq_next_idx;
struct be_drv_stats drv_stats;
struct vlan_group *vlan_grp;

View File

@ -1497,7 +1497,7 @@ static int be_tx_queues_create(struct be_adapter *adapter)
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
goto tx_eq_free;
adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
/* Alloc TX eth compl queue */
@ -1590,7 +1590,7 @@ static int be_rx_queues_create(struct be_adapter *adapter)
if (rc)
goto err;
rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
/* CQ */
cq = &rxo->cq;
@ -1666,11 +1666,11 @@ static irqreturn_t be_intx(int irq, void *dev)
if (!isr)
return IRQ_NONE;
if ((1 << adapter->tx_eq.msix_vec_idx & isr))
if ((1 << adapter->tx_eq.eq_idx & isr))
event_handle(adapter, &adapter->tx_eq);
for_all_rx_queues(adapter, rxo, i) {
if ((1 << rxo->rx_eq.msix_vec_idx & isr))
if ((1 << rxo->rx_eq.eq_idx & isr))
event_handle(adapter, &rxo->rx_eq);
}
}
@ -1951,7 +1951,7 @@ static void be_sriov_disable(struct be_adapter *adapter)
static inline int be_msix_vec_get(struct be_adapter *adapter,
struct be_eq_obj *eq_obj)
{
return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
return adapter->msix_entries[eq_obj->eq_idx].vector;
}
static int be_request_irq(struct be_adapter *adapter,
@ -2345,6 +2345,7 @@ static int be_clear(struct be_adapter *adapter)
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
adapter->eq_next_idx = 0;
if (be_physfn(adapter) && adapter->sriov_enabled)
for (vf = 0; vf < num_vfs; vf++)
@ -3141,12 +3142,14 @@ static int be_resume(struct pci_dev *pdev)
static void be_shutdown(struct pci_dev *pdev)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
if (netif_running(netdev))
if (!adapter)
return;
if (netif_running(adapter->netdev))
cancel_delayed_work_sync(&adapter->work);
netif_device_detach(netdev);
netif_device_detach(adapter->netdev);
be_cmd_reset_function(adapter);

View File

@ -2219,13 +2219,9 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
static void
bfa_ioc_recover(struct bfa_ioc *ioc)
{
u16 bdf;
bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 |
ioc->pcidev.device_id);
pr_crit("Firmware heartbeat failure at %d", bdf);
BUG_ON(1);
pr_crit("Heart Beat of IOC has failed\n");
bfa_ioc_stats(ioc, ioc_hbfails);
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
static void

View File

@ -931,7 +931,8 @@ static int mcp251x_open(struct net_device *net)
priv->tx_len = 0;
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
IRQF_TRIGGER_FALLING, DEVICE_NAME, priv);
pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
if (pdata->transceiver_enable)

View File

@ -345,6 +345,8 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
err = mlx4_en_init_allocator(priv, ring);
if (err) {
en_err(priv, "Failed initializing ring allocator\n");
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
ring_ind--;
goto err_allocator;
}
@ -369,6 +371,8 @@ err_buffers:
ring_ind = priv->rx_ring_num - 1;
err_allocator:
while (ring_ind >= 0) {
if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
ring_ind--;
}

View File

@ -944,6 +944,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
}
for (port = 1; port <= dev->caps.num_ports; port++) {
enum mlx4_port_type port_type = 0;
mlx4_SENSE_PORT(dev, port, &port_type);
if (port_type)
dev->caps.port_type[port] = port_type;
ib_port_default_caps = 0;
err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
if (err)
@ -958,6 +962,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_mcg_table_free;
}
}
mlx4_set_port_mask(dev);
return 0;

View File

@ -431,6 +431,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_handle_catas_err(struct mlx4_dev *dev);
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);
void mlx4_do_sense_ports(struct mlx4_dev *dev,
enum mlx4_port_type *stype,
enum mlx4_port_type *defaults);

View File

@ -38,8 +38,8 @@
#include "mlx4.h"
static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type)
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type)
{
u64 out_param;
int err = 0;

View File

@ -317,7 +317,7 @@ static void pppoe_flush_dev(struct net_device *dev)
lock_sock(sk);
if (po->pppoe_dev == dev &&
sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
pppox_unbind_sock(sk);
sk->sk_state = PPPOX_ZOMBIE;
sk->sk_state_change(sk);

View File

@ -1818,6 +1818,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
SMSC_TRACE(PROBE, "PHY will be autodetected.");
spin_lock_init(&pdata->dev_lock);
spin_lock_init(&pdata->mac_lock);
if (pdata->ioaddr == 0) {
SMSC_WARNING(PROBE, "pdata->ioaddr: 0x00000000");
@ -1895,8 +1896,11 @@ static int __devinit smsc911x_init(struct net_device *dev)
/* workaround for platforms without an eeprom, where the mac address
* is stored elsewhere and set by the bootloader. This saves the
* mac address before resetting the device */
if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS)
if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) {
spin_lock_irq(&pdata->mac_lock);
smsc911x_read_mac_address(dev);
spin_unlock_irq(&pdata->mac_lock);
}
/* Reset the LAN911x */
if (smsc911x_soft_reset(pdata))
@ -2059,8 +2063,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
SMSC_TRACE(PROBE, "Network interface: \"%s\"", dev->name);
}
spin_lock_init(&pdata->mac_lock);
retval = smsc911x_mii_init(pdev, dev);
if (retval) {
SMSC_WARNING(PROBE,

View File

@ -1313,6 +1313,21 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0424, 0x9909),
.driver_info = (unsigned long) &smsc95xx_info,
},
{
/* SMSC LAN9530 USB Ethernet Device */
USB_DEVICE(0x0424, 0x9530),
.driver_info = (unsigned long) &smsc95xx_info,
},
{
/* SMSC LAN9730 USB Ethernet Device */
USB_DEVICE(0x0424, 0x9730),
.driver_info = (unsigned long) &smsc95xx_info,
},
{
/* SMSC LAN89530 USB Ethernet Device */
USB_DEVICE(0x0424, 0x9E08),
.driver_info = (unsigned long) &smsc95xx_info,
},
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);

View File

@ -2546,6 +2546,7 @@ static struct {
{ AR_SREV_VERSION_9287, "9287" },
{ AR_SREV_VERSION_9271, "9271" },
{ AR_SREV_VERSION_9300, "9300" },
{ AR_SREV_VERSION_9485, "9485" },
};
/* For devices with external radios */

View File

@ -1536,7 +1536,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
dmaaddr = meta->dmaaddr;
goto drop_recycle_buffer;
}
if (unlikely(len > ring->rx_buffersize)) {
if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
/* The data did not fit into one descriptor buffer
* and is split over multiple buffers.
* This should never happen, as we try to allocate buffers

View File

@ -163,7 +163,7 @@ struct b43_dmadesc_generic {
/* DMA engine tuning knobs */
#define B43_TXRING_SLOTS 256
#define B43_RXRING_SLOTS 64
#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN
#define B43_DMA0_RX_BUFFERSIZE (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN)
/* Pointer poison */
#define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM))

View File

@ -241,7 +241,7 @@ struct iwl_eeprom_enhanced_txpwr {
/* 6x00 Specific */
#define EEPROM_6000_TX_POWER_VERSION (4)
#define EEPROM_6000_EEPROM_VERSION (0x434)
#define EEPROM_6000_EEPROM_VERSION (0x423)
/* 6x50 Specific */
#define EEPROM_6050_TX_POWER_VERSION (4)

View File

@ -56,6 +56,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */
{USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
{USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
{USB_DEVICE(0x0bf8, 0x1007)}, /* Fujitsu E-5400 USB */
{USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */
{USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
{USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
@ -68,6 +69,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */
{USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */
{USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */
{USB_DEVICE(0x2001, 0x3762)}, /* Conceptronic C54U */
{USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */
{USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */

View File

@ -1062,8 +1062,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
* Stop all work.
*/
cancel_work_sync(&rt2x00dev->intf_work);
cancel_work_sync(&rt2x00dev->rxdone_work);
cancel_work_sync(&rt2x00dev->txdone_work);
if (rt2x00_is_usb(rt2x00dev)) {
cancel_work_sync(&rt2x00dev->rxdone_work);
cancel_work_sync(&rt2x00dev->txdone_work);
}
destroy_workqueue(rt2x00dev->workqueue);
/*

View File

@ -685,7 +685,7 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
u8 efuse_data, word_cnts = 0;
u16 efuse_addr = 0;
u8 hworden;
u8 hworden = 0;
u8 tmpdata[8];
if (data == NULL)

View File

@ -303,7 +303,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
u16 box_reg, box_extreg;
u8 u1b_tmp;
bool isfw_read = false;
u8 buf_index;
u8 buf_index = 0;
bool bwrite_sucess = false;
u8 wait_h2c_limmit = 100;
u8 wait_writeh2c_limmit = 100;

View File

@ -246,7 +246,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_priv __maybe_unused *rtlpriv = rtl_priv(hw);
mutex_destroy(&rtlpriv->io.bb_mutex);
}

View File

@ -340,7 +340,7 @@ module_init(wl1271_init);
module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
MODULE_FIRMWARE(WL1271_AP_FW_NAME);

View File

@ -487,7 +487,7 @@ module_init(wl1271_init);
module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
MODULE_FIRMWARE(WL1271_AP_FW_NAME);

Some files were not shown because too many files have changed in this diff Show More