lib: logic_pio: Fix RCU usage

The traversing of io_range_list with list_for_each_entry_rcu()
is not properly protected by rcu_read_lock() and rcu_read_unlock(),
so add them.

These functions mark the critical section scope where the list is
protected for the reader, it cannot be  "reclaimed". Any updater - in
this case, the logical PIO registration functions - cannot update the
list until the reader exits this critical section.

In addition, the list traversing used in logic_pio_register_range()
does not need to use the rcu variant.

This is because we are already using io_range_mutex to guarantee mutual
exclusion from mutating the list.

Cc: stable@vger.kernel.org
Fixes: 031e360186 ("lib: Add generic PIO mapping method")
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Wei Xu <xuwei5@hisilicon.com>
This commit is contained in:
John Garry 2019-07-30 21:29:52 +08:00 committed by Wei Xu
parent 5f9e832c13
commit 06709e81c6

View File

@ -46,7 +46,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
end = new_range->hw_start + new_range->size;
mutex_lock(&io_range_mutex);
list_for_each_entry_rcu(range, &io_range_list, list) {
list_for_each_entry(range, &io_range_list, list) {
if (range->fwnode == new_range->fwnode) {
/* range already there */
goto end_register;
@ -108,26 +108,38 @@ end_register:
*/
struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
{
struct logic_pio_hwaddr *range;
struct logic_pio_hwaddr *range, *found_range = NULL;
rcu_read_lock();
list_for_each_entry_rcu(range, &io_range_list, list) {
if (range->fwnode == fwnode)
return range;
if (range->fwnode == fwnode) {
found_range = range;
break;
}
}
return NULL;
rcu_read_unlock();
return found_range;
}
/* Return a registered range given an input PIO token */
static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
{
struct logic_pio_hwaddr *range;
struct logic_pio_hwaddr *range, *found_range = NULL;
rcu_read_lock();
list_for_each_entry_rcu(range, &io_range_list, list) {
if (in_range(pio, range->io_start, range->size))
return range;
if (in_range(pio, range->io_start, range->size)) {
found_range = range;
break;
}
}
pr_err("PIO entry token %lx invalid\n", pio);
return NULL;
rcu_read_unlock();
if (!found_range)
pr_err("PIO entry token 0x%lx invalid\n", pio);
return found_range;
}
/**
@ -180,14 +192,23 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
{
struct logic_pio_hwaddr *range;
rcu_read_lock();
list_for_each_entry_rcu(range, &io_range_list, list) {
if (range->flags != LOGIC_PIO_CPU_MMIO)
continue;
if (in_range(addr, range->hw_start, range->size))
return addr - range->hw_start + range->io_start;
if (in_range(addr, range->hw_start, range->size)) {
unsigned long cpuaddr;
cpuaddr = addr - range->hw_start + range->io_start;
rcu_read_unlock();
return cpuaddr;
}
}
pr_err("addr %llx not registered in io_range_list\n",
(unsigned long long) addr);
rcu_read_unlock();
pr_err("addr %pa not registered in io_range_list\n", &addr);
return ~0UL;
}