Merge branches 'core-fixes-for-linus', 'perf-fixes-for-linus', 'sched-fixes-for-linus', 'timer-fixes-for-linus' and 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: futex: Set FLAGS_HAS_TIMEOUT during futex_wait restart setup * 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf_event: Fix cgrp event scheduling bug in perf_enable_on_exec() perf: Fix a build error with some GCC versions * 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: Fix erroneous all_pinned logic sched: Fix sched-domain avg_load calculation * 'timer-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: RTC: rtc-mrst: follow on to the change of rtc_device_register() RTC: add missing "return 0" in new alarm func for rtc-bfin.c RTC: Fix s3c compile error due to missing s3c_rtc_setpie RTC: Fix early irqs caused by calling rtc_set_alarm too early * 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, amd: Disable GartTlbWlkErr when BIOS forgets it x86, NUMA: Fix fakenuma boot failure x86/mrst: Fix boot crash caused by incorrect pin to irq mapping x86/ce4100: Add reg property to bridges
This commit is contained in:
		
						commit
						fdfc552abe
					
				| @ -96,11 +96,15 @@ | ||||
| #define MSR_IA32_MC0_ADDR		0x00000402 | ||||
| #define MSR_IA32_MC0_MISC		0x00000403 | ||||
| 
 | ||||
| #define MSR_AMD64_MC0_MASK		0xc0010044 | ||||
| 
 | ||||
| #define MSR_IA32_MCx_CTL(x)		(MSR_IA32_MC0_CTL + 4*(x)) | ||||
| #define MSR_IA32_MCx_STATUS(x)		(MSR_IA32_MC0_STATUS + 4*(x)) | ||||
| #define MSR_IA32_MCx_ADDR(x)		(MSR_IA32_MC0_ADDR + 4*(x)) | ||||
| #define MSR_IA32_MCx_MISC(x)		(MSR_IA32_MC0_MISC + 4*(x)) | ||||
| 
 | ||||
| #define MSR_AMD64_MCx_MASK(x)		(MSR_AMD64_MC0_MASK + (x)) | ||||
| 
 | ||||
| /* These are consecutive and not in the normal 4er MCE bank block */ | ||||
| #define MSR_IA32_MC0_CTL2		0x00000280 | ||||
| #define MSR_IA32_MCx_CTL2(x)		(MSR_IA32_MC0_CTL2 + (x)) | ||||
|  | ||||
| @ -615,6 +615,25 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | ||||
| 	/* As a rule processors have APIC timer running in deep C states */ | ||||
| 	if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) | ||||
| 		set_cpu_cap(c, X86_FEATURE_ARAT); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Disable GART TLB Walk Errors on Fam10h. We do this here | ||||
| 	 * because this is always needed when GART is enabled, even in a | ||||
| 	 * kernel which has no MCE support built in. | ||||
| 	 */ | ||||
| 	if (c->x86 == 0x10) { | ||||
| 		/*
 | ||||
| 		 * BIOS should disable GartTlbWlk Errors themself. If | ||||
| 		 * it doesn't do it here as suggested by the BKDG. | ||||
| 		 * | ||||
| 		 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
 | ||||
| 		 */ | ||||
| 		u64 mask; | ||||
| 
 | ||||
| 		rdmsrl(MSR_AMD64_MCx_MASK(4), mask); | ||||
| 		mask |= (1 << 10); | ||||
| 		wrmsrl(MSR_AMD64_MCx_MASK(4), mask); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_X86_32 | ||||
|  | ||||
| @ -312,6 +312,26 @@ void __cpuinit smp_store_cpu_info(int id) | ||||
| 		identify_secondary_cpu(c); | ||||
| } | ||||
| 
 | ||||
| static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2) | ||||
| { | ||||
| 	int node1 = early_cpu_to_node(cpu1); | ||||
| 	int node2 = early_cpu_to_node(cpu2); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Our CPU scheduler assumes all logical cpus in the same physical cpu | ||||
| 	 * share the same node. But, buggy ACPI or NUMA emulation might assign | ||||
| 	 * them to different node. Fix it. | ||||
| 	 */ | ||||
| 	if (node1 != node2) { | ||||
| 		pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n", | ||||
| 			   cpu1, node1, cpu2, node2, node2); | ||||
| 
 | ||||
| 		numa_remove_cpu(cpu1); | ||||
| 		numa_set_node(cpu1, node2); | ||||
| 		numa_add_cpu(cpu1); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | ||||
| { | ||||
| 	cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); | ||||
| @ -320,6 +340,7 @@ static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | ||||
| 	cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); | ||||
| 	cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); | ||||
| 	cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); | ||||
| 	check_cpu_siblings_on_same_node(cpu1, cpu2); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| @ -361,10 +382,12 @@ void __cpuinit set_cpu_sibling_map(int cpu) | ||||
| 		    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | ||||
| 			cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); | ||||
| 			cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); | ||||
| 			check_cpu_siblings_on_same_node(cpu, i); | ||||
| 		} | ||||
| 		if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | ||||
| 			cpumask_set_cpu(i, cpu_core_mask(cpu)); | ||||
| 			cpumask_set_cpu(cpu, cpu_core_mask(i)); | ||||
| 			check_cpu_siblings_on_same_node(cpu, i); | ||||
| 			/*
 | ||||
| 			 *  Does this new cpu bringup a new core? | ||||
| 			 */ | ||||
|  | ||||
| @ -74,6 +74,7 @@ | ||||
| 				compatible = "intel,ce4100-pci", "pci"; | ||||
| 				device_type = "pci"; | ||||
| 				bus-range = <1 1>; | ||||
| 				reg = <0x0800 0x0 0x0 0x0 0x0>; | ||||
| 				ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; | ||||
| 
 | ||||
| 				interrupt-parent = <&ioapic2>; | ||||
| @ -412,6 +413,7 @@ | ||||
| 				#address-cells = <2>; | ||||
| 				#size-cells = <1>; | ||||
| 				compatible = "isa"; | ||||
| 				reg = <0xf800 0x0 0x0 0x0 0x0>; | ||||
| 				ranges = <1 0 0 0 0 0x100>; | ||||
| 
 | ||||
| 				rtc@70 { | ||||
|  | ||||
| @ -97,11 +97,11 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table) | ||||
| 			pentry->freq_hz, pentry->irq); | ||||
| 			if (!pentry->irq) | ||||
| 				continue; | ||||
| 			mp_irq.type = MP_IOAPIC; | ||||
| 			mp_irq.type = MP_INTSRC; | ||||
| 			mp_irq.irqtype = mp_INT; | ||||
| /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ | ||||
| 			mp_irq.irqflag = 5; | ||||
| 			mp_irq.srcbus = 0; | ||||
| 			mp_irq.srcbus = MP_BUS_ISA; | ||||
| 			mp_irq.srcbusirq = pentry->irq;	/* IRQ */ | ||||
| 			mp_irq.dstapic = MP_APIC_ALL; | ||||
| 			mp_irq.dstirq = pentry->irq; | ||||
| @ -168,10 +168,10 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table) | ||||
| 	for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { | ||||
| 		pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", | ||||
| 			totallen, (u32)pentry->phys_addr, pentry->irq); | ||||
| 		mp_irq.type = MP_IOAPIC; | ||||
| 		mp_irq.type = MP_INTSRC; | ||||
| 		mp_irq.irqtype = mp_INT; | ||||
| 		mp_irq.irqflag = 0xf;	/* level trigger and active low */ | ||||
| 		mp_irq.srcbus = 0; | ||||
| 		mp_irq.srcbus = MP_BUS_ISA; | ||||
| 		mp_irq.srcbusirq = pentry->irq;	/* IRQ */ | ||||
| 		mp_irq.dstapic = MP_APIC_ALL; | ||||
| 		mp_irq.dstirq = pentry->irq; | ||||
| @ -282,7 +282,7 @@ void __init x86_mrst_early_setup(void) | ||||
| 	/* Avoid searching for BIOS MP tables */ | ||||
| 	x86_init.mpparse.find_smp_config = x86_init_noop; | ||||
| 	x86_init.mpparse.get_smp_config = x86_init_uint_noop; | ||||
| 
 | ||||
| 	set_bit(MP_BUS_ISA, mp_bus_not_pci); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -171,7 +171,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, | ||||
| 	err = __rtc_read_alarm(rtc, &alrm); | ||||
| 
 | ||||
| 	if (!err && !rtc_valid_tm(&alrm.time)) | ||||
| 		rtc_set_alarm(rtc, &alrm); | ||||
| 		rtc_initialize_alarm(rtc, &alrm); | ||||
| 
 | ||||
| 	strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); | ||||
| 	dev_set_name(&rtc->dev, "rtc%d", id); | ||||
|  | ||||
| @ -375,6 +375,32 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(rtc_set_alarm); | ||||
| 
 | ||||
| /* Called once per device from rtc_device_register */ | ||||
| int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | ||||
| { | ||||
| 	int err; | ||||
| 
 | ||||
| 	err = rtc_valid_tm(&alarm->time); | ||||
| 	if (err != 0) | ||||
| 		return err; | ||||
| 
 | ||||
| 	err = mutex_lock_interruptible(&rtc->ops_lock); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); | ||||
| 	rtc->aie_timer.period = ktime_set(0, 0); | ||||
| 	if (alarm->enabled) { | ||||
| 		rtc->aie_timer.enabled = 1; | ||||
| 		timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); | ||||
| 	} | ||||
| 	mutex_unlock(&rtc->ops_lock); | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(rtc_initialize_alarm); | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) | ||||
| { | ||||
| 	int err = mutex_lock_interruptible(&rtc->ops_lock); | ||||
|  | ||||
| @ -250,6 +250,8 @@ static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||||
| 		bfin_rtc_int_set_alarm(rtc); | ||||
| 	else | ||||
| 		bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||||
|  | ||||
| @ -336,7 +336,6 @@ static void s3c_rtc_release(struct device *dev) | ||||
| 
 | ||||
| 	/* do not clear AIE here, it may be needed for wake */ | ||||
| 
 | ||||
| 	s3c_rtc_setpie(dev, 0); | ||||
| 	free_irq(s3c_rtc_alarmno, rtc_dev); | ||||
| 	free_irq(s3c_rtc_tickno, rtc_dev); | ||||
| } | ||||
| @ -408,7 +407,6 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev) | ||||
| 	platform_set_drvdata(dev, NULL); | ||||
| 	rtc_device_unregister(rtc); | ||||
| 
 | ||||
| 	s3c_rtc_setpie(&dev->dev, 0); | ||||
| 	s3c_rtc_setaie(&dev->dev, 0); | ||||
| 
 | ||||
| 	clk_disable(rtc_clk); | ||||
|  | ||||
| @ -228,6 +228,8 @@ extern int rtc_read_alarm(struct rtc_device *rtc, | ||||
| 			struct rtc_wkalrm *alrm); | ||||
| extern int rtc_set_alarm(struct rtc_device *rtc, | ||||
| 				struct rtc_wkalrm *alrm); | ||||
| extern int rtc_initialize_alarm(struct rtc_device *rtc, | ||||
| 				struct rtc_wkalrm *alrm); | ||||
| extern void rtc_update_irq(struct rtc_device *rtc, | ||||
| 			unsigned long num, unsigned long events); | ||||
| 
 | ||||
|  | ||||
| @ -1886,7 +1886,7 @@ retry: | ||||
| 	restart->futex.val = val; | ||||
| 	restart->futex.time = abs_time->tv64; | ||||
| 	restart->futex.bitset = bitset; | ||||
| 	restart->futex.flags = flags; | ||||
| 	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; | ||||
| 
 | ||||
| 	ret = -ERESTART_RESTARTBLOCK; | ||||
| 
 | ||||
|  | ||||
| @ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | ||||
| 			} | ||||
| 
 | ||||
| 			if (mode & PERF_CGROUP_SWIN) { | ||||
| 				WARN_ON_ONCE(cpuctx->cgrp); | ||||
| 				/* set cgrp before ctxsw in to
 | ||||
| 				 * allow event_filter_match() to not | ||||
| 				 * have to pass task around | ||||
| @ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | ||||
| 	if (!ctx || !ctx->nr_events) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * We must ctxsw out cgroup events to avoid conflict | ||||
| 	 * when invoking perf_task_event_sched_in() later on | ||||
| 	 * in this function. Otherwise we end up trying to | ||||
| 	 * ctxswin cgroup events which are already scheduled | ||||
| 	 * in. | ||||
| 	 */ | ||||
| 	perf_cgroup_sched_out(current); | ||||
| 	task_ctx_sched_out(ctx, EVENT_ALL); | ||||
| 
 | ||||
| 	raw_spin_lock(&ctx->lock); | ||||
| @ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | ||||
| 
 | ||||
| 	raw_spin_unlock(&ctx->lock); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Also calls ctxswin for cgroup events, if any: | ||||
| 	 */ | ||||
| 	perf_event_context_sched_in(ctx, ctx->task); | ||||
| out: | ||||
| 	local_irq_restore(flags); | ||||
|  | ||||
| @ -2104,21 +2104,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||||
| 	      enum cpu_idle_type idle, int *all_pinned, | ||||
| 	      int *this_best_prio, struct cfs_rq *busiest_cfs_rq) | ||||
| { | ||||
| 	int loops = 0, pulled = 0, pinned = 0; | ||||
| 	int loops = 0, pulled = 0; | ||||
| 	long rem_load_move = max_load_move; | ||||
| 	struct task_struct *p, *n; | ||||
| 
 | ||||
| 	if (max_load_move == 0) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	pinned = 1; | ||||
| 
 | ||||
| 	list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { | ||||
| 		if (loops++ > sysctl_sched_nr_migrate) | ||||
| 			break; | ||||
| 
 | ||||
| 		if ((p->se.load.weight >> 1) > rem_load_move || | ||||
| 		    !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) | ||||
| 		    !can_migrate_task(p, busiest, this_cpu, sd, idle, | ||||
| 				      all_pinned)) | ||||
| 			continue; | ||||
| 
 | ||||
| 		pull_task(busiest, p, this_rq, this_cpu); | ||||
| @ -2153,9 +2152,6 @@ out: | ||||
| 	 */ | ||||
| 	schedstat_add(sd, lb_gained[idle], pulled); | ||||
| 
 | ||||
| 	if (all_pinned) | ||||
| 		*all_pinned = pinned; | ||||
| 
 | ||||
| 	return max_load_move - rem_load_move; | ||||
| } | ||||
| 
 | ||||
| @ -3127,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | ||||
| 	if (!sds.busiest || sds.busiest_nr_running == 0) | ||||
| 		goto out_balanced; | ||||
| 
 | ||||
| 	sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If the busiest group is imbalanced the below checks don't | ||||
| 	 * work because they assumes all things are equal, which typically | ||||
| @ -3151,7 +3149,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | ||||
| 	 * Don't pull any tasks if this group is already above the domain | ||||
| 	 * average load. | ||||
| 	 */ | ||||
| 	sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||||
| 	if (sds.this_load >= sds.avg_load) | ||||
| 		goto out_balanced; | ||||
| 
 | ||||
| @ -3340,6 +3337,7 @@ redo: | ||||
| 		 * still unbalanced. ld_moved simply stays zero, so it is | ||||
| 		 * correctly treated as an imbalance. | ||||
| 		 */ | ||||
| 		all_pinned = 1; | ||||
| 		local_irq_save(flags); | ||||
| 		double_rq_lock(this_rq, busiest); | ||||
| 		ld_moved = move_tasks(this_rq, this_cpu, busiest, | ||||
|  | ||||
| @ -13,7 +13,7 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen) | ||||
| { | ||||
| 	FILE *fp; | ||||
| 	char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1]; | ||||
| 	char *token, *saved_ptr; | ||||
| 	char *token, *saved_ptr = NULL; | ||||
| 	int found = 0; | ||||
| 
 | ||||
| 	fp = fopen("/proc/mounts", "r"); | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user