mirror of
https://github.com/torvalds/linux.git
synced 2024-12-31 23:31:29 +00:00
Merge branch 'for-3.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue / powernow-k8 fix from Tejun Heo: "This is the fix for the bug where cpufreq/powernow-k8 was tripping BUG_ON() in try_to_wake_up_local() by migrating workqueue worker to a different CPU. https://bugzilla.kernel.org/show_bug.cgi?id=47301 As discussed, the fix is now two parts - one to reimplement work_on_cpu() so that it doesn't create a new kthread each time and the actual fix which makes powernow-k8 use work_on_cpu() instead of performing manual migration. While pretty late in the merge cycle, both changes are on the safer side. Jiri and I verified two existing users of work_on_cpu() and Duncan confirmed that the powernow-k8 fix survived about 18 hours of testing." * 'for-3.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: cpufreq/powernow-k8: workqueue user shouldn't migrate the kworker to another CPU workqueue: reimplement work_on_cpu() using system_wq
This commit is contained in:
commit
c5c473e29c
@ -35,7 +35,6 @@
|
|||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <linux/sched.h> /* for current / set_cpus_allowed() */
|
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
|
|
||||||
@ -1139,16 +1138,23 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Driver entry point to switch to the target frequency */
|
struct powernowk8_target_arg {
|
||||||
static int powernowk8_target(struct cpufreq_policy *pol,
|
struct cpufreq_policy *pol;
|
||||||
unsigned targfreq, unsigned relation)
|
unsigned targfreq;
|
||||||
|
unsigned relation;
|
||||||
|
};
|
||||||
|
|
||||||
|
static long powernowk8_target_fn(void *arg)
|
||||||
{
|
{
|
||||||
cpumask_var_t oldmask;
|
struct powernowk8_target_arg *pta = arg;
|
||||||
|
struct cpufreq_policy *pol = pta->pol;
|
||||||
|
unsigned targfreq = pta->targfreq;
|
||||||
|
unsigned relation = pta->relation;
|
||||||
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
|
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
|
||||||
u32 checkfid;
|
u32 checkfid;
|
||||||
u32 checkvid;
|
u32 checkvid;
|
||||||
unsigned int newstate;
|
unsigned int newstate;
|
||||||
int ret = -EIO;
|
int ret;
|
||||||
|
|
||||||
if (!data)
|
if (!data)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -1156,29 +1162,16 @@ static int powernowk8_target(struct cpufreq_policy *pol,
|
|||||||
checkfid = data->currfid;
|
checkfid = data->currfid;
|
||||||
checkvid = data->currvid;
|
checkvid = data->currvid;
|
||||||
|
|
||||||
/* only run on specific CPU from here on. */
|
|
||||||
/* This is poor form: use a workqueue or smp_call_function_single */
|
|
||||||
if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
cpumask_copy(oldmask, tsk_cpus_allowed(current));
|
|
||||||
set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
|
|
||||||
|
|
||||||
if (smp_processor_id() != pol->cpu) {
|
|
||||||
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pending_bit_stuck()) {
|
if (pending_bit_stuck()) {
|
||||||
printk(KERN_ERR PFX "failing targ, change pending bit set\n");
|
printk(KERN_ERR PFX "failing targ, change pending bit set\n");
|
||||||
goto err_out;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
|
pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
|
||||||
pol->cpu, targfreq, pol->min, pol->max, relation);
|
pol->cpu, targfreq, pol->min, pol->max, relation);
|
||||||
|
|
||||||
if (query_current_values_with_pending_wait(data))
|
if (query_current_values_with_pending_wait(data))
|
||||||
goto err_out;
|
return -EIO;
|
||||||
|
|
||||||
if (cpu_family != CPU_HW_PSTATE) {
|
if (cpu_family != CPU_HW_PSTATE) {
|
||||||
pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
|
pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
|
||||||
@ -1196,7 +1189,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
|
|||||||
|
|
||||||
if (cpufreq_frequency_table_target(pol, data->powernow_table,
|
if (cpufreq_frequency_table_target(pol, data->powernow_table,
|
||||||
targfreq, relation, &newstate))
|
targfreq, relation, &newstate))
|
||||||
goto err_out;
|
return -EIO;
|
||||||
|
|
||||||
mutex_lock(&fidvid_mutex);
|
mutex_lock(&fidvid_mutex);
|
||||||
|
|
||||||
@ -1209,9 +1202,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
|
|||||||
ret = transition_frequency_fidvid(data, newstate);
|
ret = transition_frequency_fidvid(data, newstate);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
printk(KERN_ERR PFX "transition frequency failed\n");
|
printk(KERN_ERR PFX "transition frequency failed\n");
|
||||||
ret = 1;
|
|
||||||
mutex_unlock(&fidvid_mutex);
|
mutex_unlock(&fidvid_mutex);
|
||||||
goto err_out;
|
return 1;
|
||||||
}
|
}
|
||||||
mutex_unlock(&fidvid_mutex);
|
mutex_unlock(&fidvid_mutex);
|
||||||
|
|
||||||
@ -1220,12 +1212,25 @@ static int powernowk8_target(struct cpufreq_policy *pol,
|
|||||||
data->powernow_table[newstate].index);
|
data->powernow_table[newstate].index);
|
||||||
else
|
else
|
||||||
pol->cur = find_khz_freq_from_fid(data->currfid);
|
pol->cur = find_khz_freq_from_fid(data->currfid);
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
err_out:
|
return 0;
|
||||||
set_cpus_allowed_ptr(current, oldmask);
|
}
|
||||||
free_cpumask_var(oldmask);
|
|
||||||
return ret;
|
/* Driver entry point to switch to the target frequency */
|
||||||
|
static int powernowk8_target(struct cpufreq_policy *pol,
|
||||||
|
unsigned targfreq, unsigned relation)
|
||||||
|
{
|
||||||
|
struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
|
||||||
|
.relation = relation };
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Must run on @pol->cpu. cpufreq core is responsible for ensuring
|
||||||
|
* that we're bound to the current CPU and pol->cpu stays online.
|
||||||
|
*/
|
||||||
|
if (smp_processor_id() == pol->cpu)
|
||||||
|
return powernowk8_target_fn(&pta);
|
||||||
|
else
|
||||||
|
return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Driver entry point to verify the policy and range of frequencies */
|
/* Driver entry point to verify the policy and range of frequencies */
|
||||||
|
@ -3576,18 +3576,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
|
|||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
||||||
struct work_for_cpu {
|
struct work_for_cpu {
|
||||||
struct completion completion;
|
struct work_struct work;
|
||||||
long (*fn)(void *);
|
long (*fn)(void *);
|
||||||
void *arg;
|
void *arg;
|
||||||
long ret;
|
long ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int do_work_for_cpu(void *_wfc)
|
static void work_for_cpu_fn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct work_for_cpu *wfc = _wfc;
|
struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
|
||||||
|
|
||||||
wfc->ret = wfc->fn(wfc->arg);
|
wfc->ret = wfc->fn(wfc->arg);
|
||||||
complete(&wfc->completion);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3602,19 +3601,11 @@ static int do_work_for_cpu(void *_wfc)
|
|||||||
*/
|
*/
|
||||||
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
|
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
|
||||||
{
|
{
|
||||||
struct task_struct *sub_thread;
|
struct work_for_cpu wfc = { .fn = fn, .arg = arg };
|
||||||
struct work_for_cpu wfc = {
|
|
||||||
.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
|
|
||||||
.fn = fn,
|
|
||||||
.arg = arg,
|
|
||||||
};
|
|
||||||
|
|
||||||
sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
|
INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
|
||||||
if (IS_ERR(sub_thread))
|
schedule_work_on(cpu, &wfc.work);
|
||||||
return PTR_ERR(sub_thread);
|
flush_work(&wfc.work);
|
||||||
kthread_bind(sub_thread, cpu);
|
|
||||||
wake_up_process(sub_thread);
|
|
||||||
wait_for_completion(&wfc.completion);
|
|
||||||
return wfc.ret;
|
return wfc.ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(work_on_cpu);
|
EXPORT_SYMBOL_GPL(work_on_cpu);
|
||||||
|
Loading…
Reference in New Issue
Block a user