mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 17:12:06 +00:00
iio: core: move @info_exist_lock to struct iio_dev_opaque
This lock is only of interest to the IIO core, so make it only visible there. Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com> Link: https://lore.kernel.org/r/20210426174911.397061-7-jic23@kernel.org
This commit is contained in:
parent
62f4f36cdf
commit
b804e2b76a
@ -1150,12 +1150,13 @@ int iio_update_buffers(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *insert_buffer,
|
||||
struct iio_buffer *remove_buffer)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
|
||||
int ret;
|
||||
|
||||
if (insert_buffer == remove_buffer)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&indio_dev->info_exist_lock);
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
mutex_lock(&indio_dev->mlock);
|
||||
|
||||
if (insert_buffer && iio_buffer_is_active(insert_buffer))
|
||||
@ -1178,7 +1179,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&indio_dev->mlock);
|
||||
mutex_unlock(&indio_dev->info_exist_lock);
|
||||
mutex_unlock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1642,7 +1642,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
|
||||
device_initialize(&indio_dev->dev);
|
||||
iio_device_set_drvdata(indio_dev, (void *)indio_dev);
|
||||
mutex_init(&indio_dev->mlock);
|
||||
mutex_init(&indio_dev->info_exist_lock);
|
||||
mutex_init(&iio_dev_opaque->info_exist_lock);
|
||||
INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list);
|
||||
|
||||
iio_dev_opaque->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
|
||||
@ -1779,7 +1779,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
struct iio_ioctl_handler *h;
|
||||
int ret = -ENODEV;
|
||||
|
||||
mutex_lock(&indio_dev->info_exist_lock);
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
/**
|
||||
* The NULL check here is required to prevent crashing when a device
|
||||
@ -1799,7 +1799,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
ret = -ENODEV;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&indio_dev->info_exist_lock);
|
||||
mutex_unlock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1938,7 +1938,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
|
||||
{
|
||||
cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
|
||||
|
||||
mutex_lock(&indio_dev->info_exist_lock);
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
iio_device_unregister_debugfs(indio_dev);
|
||||
|
||||
@ -1949,7 +1949,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
|
||||
iio_device_wakeup_eventset(indio_dev);
|
||||
iio_buffer_wakeup_poll(indio_dev);
|
||||
|
||||
mutex_unlock(&indio_dev->info_exist_lock);
|
||||
mutex_unlock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
iio_buffers_free_sysfs_and_mask(indio_dev);
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/of.h>
|
||||
|
||||
#include <linux/iio/iio.h>
|
||||
#include <linux/iio/iio-opaque.h>
|
||||
#include "iio_core.h"
|
||||
#include <linux/iio/machine.h>
|
||||
#include <linux/iio/driver.h>
|
||||
@ -538,9 +539,10 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
|
||||
|
||||
int iio_read_channel_raw(struct iio_channel *chan, int *val)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
if (chan->indio_dev->info == NULL) {
|
||||
ret = -ENODEV;
|
||||
goto err_unlock;
|
||||
@ -548,7 +550,7 @@ int iio_read_channel_raw(struct iio_channel *chan, int *val)
|
||||
|
||||
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
|
||||
err_unlock:
|
||||
mutex_unlock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_unlock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -556,9 +558,10 @@ EXPORT_SYMBOL_GPL(iio_read_channel_raw);
|
||||
|
||||
int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
if (chan->indio_dev->info == NULL) {
|
||||
ret = -ENODEV;
|
||||
goto err_unlock;
|
||||
@ -566,7 +569,7 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
|
||||
|
||||
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
|
||||
err_unlock:
|
||||
mutex_unlock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_unlock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -631,9 +634,10 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
|
||||
int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
|
||||
int *processed, unsigned int scale)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
if (chan->indio_dev->info == NULL) {
|
||||
ret = -ENODEV;
|
||||
goto err_unlock;
|
||||
@ -642,7 +646,7 @@ int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
|
||||
ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
|
||||
scale);
|
||||
err_unlock:
|
||||
mutex_unlock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_unlock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -651,9 +655,10 @@ EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
|
||||
int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
|
||||
enum iio_chan_info_enum attribute)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
if (chan->indio_dev->info == NULL) {
|
||||
ret = -ENODEV;
|
||||
goto err_unlock;
|
||||
@ -661,7 +666,7 @@ int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
|
||||
|
||||
ret = iio_channel_read(chan, val, val2, attribute);
|
||||
err_unlock:
|
||||
mutex_unlock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_unlock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -676,9 +681,10 @@ EXPORT_SYMBOL_GPL(iio_read_channel_offset);
|
||||
int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
|
||||
unsigned int scale)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
if (chan->indio_dev->info == NULL) {
|
||||
ret = -ENODEV;
|
||||
goto err_unlock;
|
||||
@ -699,7 +705,7 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
|
||||
}
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_unlock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -733,9 +739,10 @@ int iio_read_avail_channel_attribute(struct iio_channel *chan,
|
||||
const int **vals, int *type, int *length,
|
||||
enum iio_chan_info_enum attribute)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
if (!chan->indio_dev->info) {
|
||||
ret = -ENODEV;
|
||||
goto err_unlock;
|
||||
@ -743,7 +750,7 @@ int iio_read_avail_channel_attribute(struct iio_channel *chan,
|
||||
|
||||
ret = iio_channel_read_avail(chan, vals, type, length, attribute);
|
||||
err_unlock:
|
||||
mutex_unlock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_unlock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -815,10 +822,11 @@ static int iio_channel_read_max(struct iio_channel *chan,
|
||||
|
||||
int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
|
||||
int ret;
|
||||
int type;
|
||||
|
||||
mutex_lock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
if (!chan->indio_dev->info) {
|
||||
ret = -ENODEV;
|
||||
goto err_unlock;
|
||||
@ -826,7 +834,7 @@ int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
|
||||
|
||||
ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
|
||||
err_unlock:
|
||||
mutex_unlock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_unlock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -834,10 +842,11 @@ EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
|
||||
|
||||
int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
|
||||
int ret = 0;
|
||||
/* Need to verify underlying driver has not gone away */
|
||||
|
||||
mutex_lock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
if (chan->indio_dev->info == NULL) {
|
||||
ret = -ENODEV;
|
||||
goto err_unlock;
|
||||
@ -845,7 +854,7 @@ int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
|
||||
|
||||
*type = chan->channel->type;
|
||||
err_unlock:
|
||||
mutex_unlock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_unlock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -861,9 +870,10 @@ static int iio_channel_write(struct iio_channel *chan, int val, int val2,
|
||||
int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
|
||||
enum iio_chan_info_enum attribute)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
if (chan->indio_dev->info == NULL) {
|
||||
ret = -ENODEV;
|
||||
goto err_unlock;
|
||||
@ -871,7 +881,7 @@ int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
|
||||
|
||||
ret = iio_channel_write(chan, val, val2, attribute);
|
||||
err_unlock:
|
||||
mutex_unlock(&chan->indio_dev->info_exist_lock);
|
||||
mutex_unlock(&iio_dev_opaque->info_exist_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
* @indio_dev: public industrial I/O device information
|
||||
* @id: used to identify device internally
|
||||
* @driver_module: used to make it harder to undercut users
|
||||
* @info_exist_lock: lock to prevent use during removal
|
||||
* @trig_readonly: mark the current trigger immutable
|
||||
* @event_interface: event chrdevs associated with interrupt lines
|
||||
* @attached_buffers: array of buffers statically attached by the driver
|
||||
@ -32,6 +33,7 @@ struct iio_dev_opaque {
|
||||
struct iio_dev indio_dev;
|
||||
int id;
|
||||
struct module *driver_module;
|
||||
struct mutex info_exist_lock;
|
||||
bool trig_readonly;
|
||||
struct iio_event_interface *event_interface;
|
||||
struct iio_buffer **attached_buffers;
|
||||
|
@ -510,7 +510,6 @@ struct iio_buffer_setup_ops {
|
||||
* @label: [DRIVER] unique name to identify which device this is
|
||||
* @info: [DRIVER] callbacks and constant info from driver
|
||||
* @clock_id: [INTERN] timestamping clock posix identifier
|
||||
* @info_exist_lock: [INTERN] lock to prevent use during removal
|
||||
* @setup_ops: [DRIVER] callbacks to call before and after buffer
|
||||
* enable/disable
|
||||
* @chrdev: [INTERN] associated character device
|
||||
@ -542,7 +541,6 @@ struct iio_dev {
|
||||
const char *label;
|
||||
const struct iio_info *info;
|
||||
clockid_t clock_id;
|
||||
struct mutex info_exist_lock;
|
||||
const struct iio_buffer_setup_ops *setup_ops;
|
||||
struct cdev chrdev;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user