forked from Minki/linux
2d06d8c49a
With dynamic debug having gained the capability to report debug messages also during the boot process, it offers a far superior interface for debug messages than the custom cpufreq infrastructure. As a first step, remove the old cpufreq_debug_printk() function and replace it with a call to the generic pr_debug() function. How can dynamic debug be used on cpufreq? You need a kernel which has CONFIG_DYNAMIC_DEBUG enabled. To enabled debugging during runtime, mount debugfs and $ echo -n 'module cpufreq +p' > /sys/kernel/debug/dynamic_debug/control for debugging the complete "cpufreq" module. To achieve the same goal during boot, append ddebug_query="module cpufreq +p" as a boot parameter to the kernel of your choice. For more detailled instructions, please see Documentation/dynamic-debug-howto.txt Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net> Signed-off-by: Dave Jones <davej@redhat.com>
237 lines
6.0 KiB
C
237 lines
6.0 KiB
C
/*
|
|
* linux/drivers/cpufreq/freq_table.c
|
|
*
|
|
* Copyright (C) 2002 - 2003 Dominik Brodowski
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/cpufreq.h>
|
|
|
|
/*********************************************************************
|
|
* FREQUENCY TABLE HELPERS *
|
|
*********************************************************************/
|
|
|
|
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
|
|
struct cpufreq_frequency_table *table)
|
|
{
|
|
unsigned int min_freq = ~0;
|
|
unsigned int max_freq = 0;
|
|
unsigned int i;
|
|
|
|
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
|
|
unsigned int freq = table[i].frequency;
|
|
if (freq == CPUFREQ_ENTRY_INVALID) {
|
|
pr_debug("table entry %u is invalid, skipping\n", i);
|
|
|
|
continue;
|
|
}
|
|
pr_debug("table entry %u: %u kHz, %u index\n",
|
|
i, freq, table[i].index);
|
|
if (freq < min_freq)
|
|
min_freq = freq;
|
|
if (freq > max_freq)
|
|
max_freq = freq;
|
|
}
|
|
|
|
policy->min = policy->cpuinfo.min_freq = min_freq;
|
|
policy->max = policy->cpuinfo.max_freq = max_freq;
|
|
|
|
if (policy->min == ~0)
|
|
return -EINVAL;
|
|
else
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
|
|
|
|
|
|
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
|
|
struct cpufreq_frequency_table *table)
|
|
{
|
|
unsigned int next_larger = ~0;
|
|
unsigned int i;
|
|
unsigned int count = 0;
|
|
|
|
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
|
|
policy->min, policy->max, policy->cpu);
|
|
|
|
if (!cpu_online(policy->cpu))
|
|
return -EINVAL;
|
|
|
|
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
|
|
policy->cpuinfo.max_freq);
|
|
|
|
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
|
|
unsigned int freq = table[i].frequency;
|
|
if (freq == CPUFREQ_ENTRY_INVALID)
|
|
continue;
|
|
if ((freq >= policy->min) && (freq <= policy->max))
|
|
count++;
|
|
else if ((next_larger > freq) && (freq > policy->max))
|
|
next_larger = freq;
|
|
}
|
|
|
|
if (!count)
|
|
policy->max = next_larger;
|
|
|
|
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
|
|
policy->cpuinfo.max_freq);
|
|
|
|
pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
|
|
policy->min, policy->max, policy->cpu);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
|
|
|
|
|
|
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
|
struct cpufreq_frequency_table *table,
|
|
unsigned int target_freq,
|
|
unsigned int relation,
|
|
unsigned int *index)
|
|
{
|
|
struct cpufreq_frequency_table optimal = {
|
|
.index = ~0,
|
|
.frequency = 0,
|
|
};
|
|
struct cpufreq_frequency_table suboptimal = {
|
|
.index = ~0,
|
|
.frequency = 0,
|
|
};
|
|
unsigned int i;
|
|
|
|
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
|
|
target_freq, relation, policy->cpu);
|
|
|
|
switch (relation) {
|
|
case CPUFREQ_RELATION_H:
|
|
suboptimal.frequency = ~0;
|
|
break;
|
|
case CPUFREQ_RELATION_L:
|
|
optimal.frequency = ~0;
|
|
break;
|
|
}
|
|
|
|
if (!cpu_online(policy->cpu))
|
|
return -EINVAL;
|
|
|
|
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
|
|
unsigned int freq = table[i].frequency;
|
|
if (freq == CPUFREQ_ENTRY_INVALID)
|
|
continue;
|
|
if ((freq < policy->min) || (freq > policy->max))
|
|
continue;
|
|
switch (relation) {
|
|
case CPUFREQ_RELATION_H:
|
|
if (freq <= target_freq) {
|
|
if (freq >= optimal.frequency) {
|
|
optimal.frequency = freq;
|
|
optimal.index = i;
|
|
}
|
|
} else {
|
|
if (freq <= suboptimal.frequency) {
|
|
suboptimal.frequency = freq;
|
|
suboptimal.index = i;
|
|
}
|
|
}
|
|
break;
|
|
case CPUFREQ_RELATION_L:
|
|
if (freq >= target_freq) {
|
|
if (freq <= optimal.frequency) {
|
|
optimal.frequency = freq;
|
|
optimal.index = i;
|
|
}
|
|
} else {
|
|
if (freq >= suboptimal.frequency) {
|
|
suboptimal.frequency = freq;
|
|
suboptimal.index = i;
|
|
}
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
if (optimal.index > i) {
|
|
if (suboptimal.index > i)
|
|
return -EINVAL;
|
|
*index = suboptimal.index;
|
|
} else
|
|
*index = optimal.index;
|
|
|
|
pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency,
|
|
table[*index].index);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
|
|
|
|
static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
|
|
/**
|
|
* show_available_freqs - show available frequencies for the specified CPU
|
|
*/
|
|
static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
|
|
{
|
|
unsigned int i = 0;
|
|
unsigned int cpu = policy->cpu;
|
|
ssize_t count = 0;
|
|
struct cpufreq_frequency_table *table;
|
|
|
|
if (!per_cpu(cpufreq_show_table, cpu))
|
|
return -ENODEV;
|
|
|
|
table = per_cpu(cpufreq_show_table, cpu);
|
|
|
|
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
|
|
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
|
|
continue;
|
|
count += sprintf(&buf[count], "%d ", table[i].frequency);
|
|
}
|
|
count += sprintf(&buf[count], "\n");
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
|
|
.attr = { .name = "scaling_available_frequencies",
|
|
.mode = 0444,
|
|
},
|
|
.show = show_available_freqs,
|
|
};
|
|
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
|
|
|
|
/*
|
|
* if you use these, you must assure that the frequency table is valid
|
|
* all the time between get_attr and put_attr!
|
|
*/
|
|
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
|
|
unsigned int cpu)
|
|
{
|
|
pr_debug("setting show_table for cpu %u to %p\n", cpu, table);
|
|
per_cpu(cpufreq_show_table, cpu) = table;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
|
|
|
|
void cpufreq_frequency_table_put_attr(unsigned int cpu)
|
|
{
|
|
pr_debug("clearing show_table for cpu %u\n", cpu);
|
|
per_cpu(cpufreq_show_table, cpu) = NULL;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
|
|
|
|
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
|
|
{
|
|
return per_cpu(cpufreq_show_table, cpu);
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
|
|
|
|
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
|
|
MODULE_DESCRIPTION("CPUfreq frequency table helpers");
|
|
MODULE_LICENSE("GPL");
|