SCMI cleanups for v4.18

This contains all of the trivial review comments that were not
 addressed as the series was already queued up for v4.17 and were not
 critical to go as fixes.
 
 They generally just improve code readability, fix kernel-docs, remove
 unused/unnecessary code, follow standard function naming and simplifies
 certain exit paths.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABCAAGBQJa+a0LAAoJEABBurwxfuKYfb8P/Rorx2vzPuHBDUPAurGkMKJe
 9+VQIN3zAvKTF6QIsXVMtLpRJ9LOj1b5i4LpRM92pujliN5w1NcE00TV+BDyXCVh
 SY/H8ECbPj8szg3UKVCivFDRzcdDP4TqY0rlxKr45W2nN5J36GFZqZzOoC/FDKCK
 vvmUKAW/lkI4U5N16YskddzfZhz1OByqVxblXDq6HlBRaqjG5JZzIL3TAynCwlD8
 3X/anQ+XHmM4EAr7WZ5JxlmOCcUw4FfXU18oDwzhF3wHJlLGWwM7e9ORsmpCPQCe
 x3W6SwDMW67Ol9Oj89GSUldHmR7jSrXZg4TWY+LK/gvnqFgysHD+TZkhhETI3ori
 YRXu9AA6XEr/f/1NRDHUAmlu9vRIqBrK4mFqadcfWixFCDuEwlN8sehxj1Hv/6oj
 AP9utzT4pHpsZdEhwArXiSn8RkEal/3Q4WEaBXnVzQQUgXFLIRJ2cyuh/BCy68v/
 0Z6W++dmtxfpYmYGfwjLCaAN/OqdnhDUoRhVxXt8fNroqDM30qHIfCObHIC0L4lK
 QITqC6Ewbga2eASARuGuM7WksYh54S7GoT91sxif7XGSaJTeey0QE3NnsXxouwLB
 wlMuH5JDEHtOBY/E+8c1Jq5NNsp/mfdP0DtUTyMLfhQe8lx7DOryx6Si0zCEdvjX
 td9yfnZzZ/hUrBCiHKmO
 =OzDu
 -----END PGP SIGNATURE-----

Merge tag 'scmi-updates-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into next/drivers

SCMI cleanups for v4.18

This contains all of the trivial review comments that were not
addressed as the series was already queued up for v4.17 and were not
critical to go as fixes.

They generally just improve code readability, fix kernel-docs, remove
unused/unnecessary code, follow standard function naming and simplifies
certain exit paths.

* tag 'scmi-updates-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux:
  firmware: arm_scmi: simplify exit path by returning on error
  firmware: arm_scmi: improve exit paths and code readability
  firmware: arm_scmi: remove unnecessary bitmap_zero
  firmware: arm_scmi: drop unused `con_priv` structure member
  firmware: arm_scmi: rename scmi_xfer_{init,get,put}
  firmware: arm_scmi: rename get_transition_latency and add_opps_to_device
  firmware: arm_scmi: fix kernel-docs documentation
  firmware: arm_scmi: improve code readability using bitfield accessor macros

Signed-off-by: Olof Johansson <olof@lixom.net>
This commit is contained in:
Olof Johansson 2018-05-15 13:48:46 -07:00
commit 51fe3a373e
10 changed files with 166 additions and 150 deletions

View File

@ -117,7 +117,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
return -ENODEV;
}
ret = handle->perf_ops->add_opps_to_device(handle, cpu_dev);
ret = handle->perf_ops->device_opps_add(handle, cpu_dev);
if (ret) {
dev_warn(cpu_dev, "failed to add opps to the device\n");
return ret;
@ -164,7 +164,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
/* SCMI allows DVFS request for any domain from any CPU */
policy->dvfs_possible_from_any_cpu = true;
latency = handle->perf_ops->get_transition_latency(handle, cpu_dev);
latency = handle->perf_ops->transition_latency_get(handle, cpu_dev);
if (!latency)
latency = CPUFREQ_ETERNAL;

View File

@ -26,7 +26,7 @@ struct scmi_msg_resp_base_attributes {
* scmi_base_attributes_get() - gets the implementation details
* that are associated with the base protocol.
*
* @handle - SCMI entity handle
* @handle: SCMI entity handle
*
* Return: 0 on success, else appropriate SCMI error.
*/
@ -37,7 +37,7 @@ static int scmi_base_attributes_get(const struct scmi_handle *handle)
struct scmi_msg_resp_base_attributes *attr_info;
struct scmi_revision_info *rev = handle->version;
ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES,
ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
SCMI_PROTOCOL_BASE, 0, sizeof(*attr_info), &t);
if (ret)
return ret;
@ -49,15 +49,16 @@ static int scmi_base_attributes_get(const struct scmi_handle *handle)
rev->num_agents = attr_info->num_agents;
}
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
/**
* scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string.
*
* @handle - SCMI entity handle
* @sub_vendor - specify true if sub-vendor ID is needed
* @handle: SCMI entity handle
* @sub_vendor: specify true if sub-vendor ID is needed
*
* Return: 0 on success, else appropriate SCMI error.
*/
@ -80,7 +81,7 @@ scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
size = ARRAY_SIZE(rev->vendor_id);
}
ret = scmi_one_xfer_init(handle, cmd, SCMI_PROTOCOL_BASE, 0, size, &t);
ret = scmi_xfer_get_init(handle, cmd, SCMI_PROTOCOL_BASE, 0, size, &t);
if (ret)
return ret;
@ -88,7 +89,8 @@ scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
if (!ret)
memcpy(vendor_id, t->rx.buf, size);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -97,7 +99,7 @@ scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
* implementation 32-bit version. The format of the version number is
* vendor-specific
*
* @handle - SCMI entity handle
* @handle: SCMI entity handle
*
* Return: 0 on success, else appropriate SCMI error.
*/
@ -109,7 +111,7 @@ scmi_base_implementation_version_get(const struct scmi_handle *handle)
struct scmi_xfer *t;
struct scmi_revision_info *rev = handle->version;
ret = scmi_one_xfer_init(handle, BASE_DISCOVER_IMPLEMENT_VERSION,
ret = scmi_xfer_get_init(handle, BASE_DISCOVER_IMPLEMENT_VERSION,
SCMI_PROTOCOL_BASE, 0, sizeof(*impl_ver), &t);
if (ret)
return ret;
@ -120,7 +122,8 @@ scmi_base_implementation_version_get(const struct scmi_handle *handle)
rev->impl_ver = le32_to_cpu(*impl_ver);
}
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -128,8 +131,8 @@ scmi_base_implementation_version_get(const struct scmi_handle *handle)
* scmi_base_implementation_list_get() - gets the list of protocols it is
* OSPM is allowed to access
*
* @handle - SCMI entity handle
* @protocols_imp - pointer to hold the list of protocol identifiers
* @handle: SCMI entity handle
* @protocols_imp: pointer to hold the list of protocol identifiers
*
* Return: 0 on success, else appropriate SCMI error.
*/
@ -143,7 +146,7 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
u32 tot_num_ret = 0, loop_num_ret;
struct device *dev = handle->dev;
ret = scmi_one_xfer_init(handle, BASE_DISCOVER_LIST_PROTOCOLS,
ret = scmi_xfer_get_init(handle, BASE_DISCOVER_LIST_PROTOCOLS,
SCMI_PROTOCOL_BASE, sizeof(*num_skip), 0, &t);
if (ret)
return ret;
@ -172,16 +175,17 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
tot_num_ret += loop_num_ret;
} while (loop_num_ret);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
/**
* scmi_base_discover_agent_get() - discover the name of an agent
*
* @handle - SCMI entity handle
* @id - Agent identifier
* @name - Agent identifier ASCII string
* @handle: SCMI entity handle
* @id: Agent identifier
* @name: Agent identifier ASCII string
*
* An agent id of 0 is reserved to identify the platform itself.
* Generally operating system is represented as "OSPM"
@ -194,7 +198,7 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
int ret;
struct scmi_xfer *t;
ret = scmi_one_xfer_init(handle, BASE_DISCOVER_AGENT,
ret = scmi_xfer_get_init(handle, BASE_DISCOVER_AGENT,
SCMI_PROTOCOL_BASE, sizeof(__le32),
SCMI_MAX_STR_SIZE, &t);
if (ret)
@ -206,7 +210,8 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
if (!ret)
memcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}

View File

@ -125,13 +125,13 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
int id, retval;
struct scmi_device *scmi_dev;
id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
if (id < 0)
return NULL;
scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL);
if (!scmi_dev)
goto no_mem;
return NULL;
id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
if (id < 0)
goto free_mem;
scmi_dev->id = id;
scmi_dev->protocol_id = protocol;
@ -141,13 +141,15 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
retval = device_register(&scmi_dev->dev);
if (!retval)
return scmi_dev;
if (retval)
goto put_dev;
return scmi_dev;
put_dev:
put_device(&scmi_dev->dev);
kfree(scmi_dev);
no_mem:
ida_simple_remove(&scmi_bus_id, id);
free_mem:
kfree(scmi_dev);
return NULL;
}
@ -171,9 +173,9 @@ int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn)
spin_lock(&protocol_lock);
ret = idr_alloc(&scmi_protocols, fn, protocol_id, protocol_id + 1,
GFP_ATOMIC);
spin_unlock(&protocol_lock);
if (ret != protocol_id)
pr_err("unable to allocate SCMI idr slot, err %d\n", ret);
spin_unlock(&protocol_lock);
return ret;
}

View File

@ -77,7 +77,7 @@ static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle,
struct scmi_xfer *t;
struct scmi_msg_resp_clock_protocol_attributes *attr;
ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES,
ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t);
if (ret)
return ret;
@ -90,7 +90,7 @@ static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle,
ci->max_async_req = attr->max_async_req;
}
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -101,7 +101,7 @@ static int scmi_clock_attributes_get(const struct scmi_handle *handle,
struct scmi_xfer *t;
struct scmi_msg_resp_clock_attributes *attr;
ret = scmi_one_xfer_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK,
ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK,
sizeof(clk_id), sizeof(*attr), &t);
if (ret)
return ret;
@ -115,7 +115,7 @@ static int scmi_clock_attributes_get(const struct scmi_handle *handle,
else
clk->name[0] = '\0';
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -132,7 +132,7 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
struct scmi_msg_clock_describe_rates *clk_desc;
struct scmi_msg_resp_clock_describe_rates *rlist;
ret = scmi_one_xfer_init(handle, CLOCK_DESCRIBE_RATES,
ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES,
SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t);
if (ret)
return ret;
@ -186,7 +186,7 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
clk->list.num_rates = tot_rate_cnt;
err:
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -196,7 +196,7 @@ scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
int ret;
struct scmi_xfer *t;
ret = scmi_one_xfer_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK,
ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK,
sizeof(__le32), sizeof(u64), &t);
if (ret)
return ret;
@ -211,7 +211,7 @@ scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
}
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -222,7 +222,7 @@ static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
struct scmi_xfer *t;
struct scmi_clock_set_rate *cfg;
ret = scmi_one_xfer_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
sizeof(*cfg), 0, &t);
if (ret)
return ret;
@ -235,7 +235,7 @@ static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -246,7 +246,7 @@ scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
struct scmi_xfer *t;
struct scmi_clock_set_config *cfg;
ret = scmi_one_xfer_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK,
ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK,
sizeof(*cfg), 0, &t);
if (ret)
return ret;
@ -257,7 +257,7 @@ scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}

View File

@ -7,6 +7,7 @@
* Copyright (C) 2018 ARM Ltd.
*/
#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/errno.h>
@ -14,10 +15,10 @@
#include <linux/scmi_protocol.h>
#include <linux/types.h>
#define PROTOCOL_REV_MINOR_BITS 16
#define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1)
#define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS)
#define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK)
#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
#define PROTOCOL_REV_MINOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))
#define MAX_PROTOCOLS_IMP 16
#define MAX_OPPS 16
@ -50,8 +51,11 @@ struct scmi_msg_resp_prot_version {
* @id: The identifier of the command being sent
* @protocol_id: The identifier of the protocol used to send @id command
* @seq: The token to identify the message. when a message/command returns,
* the platform returns the whole message header unmodified including
* the token.
* the platform returns the whole message header unmodified including
* the token
* @status: Status of the transfer once it's complete
* @poll_completion: Indicate if the transfer needs to be polled for
* completion or interrupt mode is used
*/
struct scmi_msg_hdr {
u8 id;
@ -82,18 +86,16 @@ struct scmi_msg {
* buffer for the rx path as we use for the tx path.
* @done: completion event
*/
struct scmi_xfer {
void *con_priv;
struct scmi_msg_hdr hdr;
struct scmi_msg tx;
struct scmi_msg rx;
struct completion done;
};
void scmi_one_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
int scmi_one_xfer_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
size_t tx_size, size_t rx_size, struct scmi_xfer **p);
int scmi_handle_put(const struct scmi_handle *handle);
struct scmi_handle *scmi_handle_get(struct device *dev);

View File

@ -29,16 +29,12 @@
#include "common.h"
#define MSG_ID_SHIFT 0
#define MSG_ID_MASK 0xff
#define MSG_TYPE_SHIFT 8
#define MSG_TYPE_MASK 0x3
#define MSG_PROTOCOL_ID_SHIFT 10
#define MSG_PROTOCOL_ID_MASK 0xff
#define MSG_TOKEN_ID_SHIFT 18
#define MSG_TOKEN_ID_MASK 0x3ff
#define MSG_XTRACT_TOKEN(header) \
(((header) >> MSG_TOKEN_ID_SHIFT) & MSG_TOKEN_ID_MASK)
#define MSG_ID_MASK GENMASK(7, 0)
#define MSG_TYPE_MASK GENMASK(9, 8)
#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
enum scmi_error_codes {
SCMI_SUCCESS = 0, /* Success */
@ -55,7 +51,7 @@ enum scmi_error_codes {
SCMI_ERR_MAX
};
/* List of all SCMI devices active in system */
/* List of all SCMI devices active in system */
static LIST_HEAD(scmi_list);
/* Protection for the entire list */
static DEFINE_MUTEX(scmi_list_mutex);
@ -72,7 +68,6 @@ static DEFINE_MUTEX(scmi_list_mutex);
struct scmi_xfers_info {
struct scmi_xfer *xfer_block;
unsigned long *xfer_alloc_table;
/* protect transfer allocation */
spinlock_t xfer_lock;
};
@ -98,6 +93,7 @@ struct scmi_desc {
* @payload: Transmit/Receive mailbox channel payload area
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
* @handle: Pointer to SCMI entity handle
*/
struct scmi_chan_info {
struct mbox_client cl;
@ -108,7 +104,7 @@ struct scmi_chan_info {
};
/**
* struct scmi_info - Structure representing a SCMI instance
* struct scmi_info - Structure representing a SCMI instance
*
* @dev: Device pointer
* @desc: SoC description for this instance
@ -117,9 +113,9 @@ struct scmi_chan_info {
* implementation version and (sub-)vendor identification.
* @minfo: Message info
* @tx_idr: IDR object to map protocol id to channel info pointer
* @protocols_imp: list of protocols implemented, currently maximum of
* @protocols_imp: List of protocols implemented, currently maximum of
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
* @node: list head
* @node: List head
* @users: Number of users of this instance
*/
struct scmi_info {
@ -225,9 +221,7 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m)
xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
/*
* Are we even expecting this?
*/
/* Are we even expecting this? */
if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
dev_err(dev, "message for %d is not expected!\n", xfer_id);
return;
@ -252,12 +246,14 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m)
*
* @hdr: pointer to header containing all the information on message id,
* protocol id and sequence id.
*
* Return: 32-bit packed command header to be sent to the platform.
*/
static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
{
return ((hdr->id & MSG_ID_MASK) << MSG_ID_SHIFT) |
((hdr->seq & MSG_TOKEN_ID_MASK) << MSG_TOKEN_ID_SHIFT) |
((hdr->protocol_id & MSG_PROTOCOL_ID_MASK) << MSG_PROTOCOL_ID_SHIFT);
return FIELD_PREP(MSG_ID_MASK, hdr->id) |
FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
}
/**
@ -286,9 +282,9 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
}
/**
* scmi_one_xfer_get() - Allocate one message
* scmi_xfer_get() - Allocate one message
*
* @handle: SCMI entity handle
* @handle: Pointer to SCMI entity handle
*
* Helper function which is used by various command functions that are
* exposed to clients of this driver for allocating a message traffic event.
@ -299,7 +295,7 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
*
* Return: 0 if all went fine, else corresponding error.
*/
static struct scmi_xfer *scmi_one_xfer_get(const struct scmi_handle *handle)
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
{
u16 xfer_id;
struct scmi_xfer *xfer;
@ -328,14 +324,14 @@ static struct scmi_xfer *scmi_one_xfer_get(const struct scmi_handle *handle)
}
/**
* scmi_one_xfer_put() - Release a message
* scmi_xfer_put() - Release a message
*
* @minfo: transfer info pointer
* @xfer: message that was reserved by scmi_one_xfer_get
* @handle: Pointer to SCMI entity handle
* @xfer: message that was reserved by scmi_xfer_get
*
* This holds a spinlock to maintain integrity of internal data structures.
*/
void scmi_one_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
unsigned long flags;
struct scmi_info *info = handle_to_scmi_info(handle);
@ -378,12 +374,12 @@ static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
/**
* scmi_do_xfer() - Do one transfer
*
* @info: Pointer to SCMI entity information
* @handle: Pointer to SCMI entity handle
* @xfer: Transfer to initiate and wait for response
*
* Return: -ETIMEDOUT in case of no response, if transmit error,
* return corresponding error, else if all goes well,
* return 0.
* return corresponding error, else if all goes well,
* return 0.
*/
int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
@ -440,22 +436,22 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
}
/**
* scmi_one_xfer_init() - Allocate and initialise one message
* scmi_xfer_get_init() - Allocate and initialise one message
*
* @handle: SCMI entity handle
* @handle: Pointer to SCMI entity handle
* @msg_id: Message identifier
* @msg_prot_id: Protocol identifier for the message
* @prot_id: Protocol identifier for the message
* @tx_size: transmit message size
* @rx_size: receive message size
* @p: pointer to the allocated and initialised message
*
* This function allocates the message using @scmi_one_xfer_get and
* This function allocates the message using @scmi_xfer_get and
* initialise the header.
*
* Return: 0 if all went fine with @p pointing to message, else
* corresponding error.
*/
int scmi_one_xfer_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
size_t tx_size, size_t rx_size, struct scmi_xfer **p)
{
int ret;
@ -468,7 +464,7 @@ int scmi_one_xfer_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
tx_size > info->desc->max_msg_size)
return -ERANGE;
xfer = scmi_one_xfer_get(handle);
xfer = scmi_xfer_get(handle);
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "failed to get free message slot(%d)\n", ret);
@ -482,13 +478,16 @@ int scmi_one_xfer_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
xfer->hdr.poll_completion = false;
*p = xfer;
return 0;
}
/**
* scmi_version_get() - command to get the revision of the SCMI entity
*
* @handle: Handle to SCMI entity information
* @handle: Pointer to SCMI entity handle
* @protocol: Protocol identifier for the message
* @version: Holds returned version of protocol.
*
* Updates the SCMI information in the internal data structure.
*
@ -501,7 +500,7 @@ int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
__le32 *rev_info;
struct scmi_xfer *t;
ret = scmi_one_xfer_init(handle, PROTOCOL_VERSION, protocol, 0,
ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
sizeof(*version), &t);
if (ret)
return ret;
@ -512,7 +511,7 @@ int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
*version = le32_to_cpu(*rev_info);
}
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -540,12 +539,12 @@ scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
}
/**
* scmi_handle_get() - Get the SCMI handle for a device
* scmi_handle_get() - Get the SCMI handle for a device
*
* @dev: pointer to device for which we want SCMI handle
*
* NOTE: The function does not track individual clients of the framework
* and is expected to be maintained by caller of SCMI protocol library.
* and is expected to be maintained by caller of SCMI protocol library.
* scmi_handle_put must be balanced with successful scmi_handle_get
*
* Return: pointer to handle if successful, NULL on error
@ -576,7 +575,7 @@ struct scmi_handle *scmi_handle_get(struct device *dev)
* @handle: handle acquired by scmi_handle_get
*
* NOTE: The function does not track individual clients of the framework
* and is expected to be maintained by caller of SCMI protocol library.
* and is expected to be maintained by caller of SCMI protocol library.
* scmi_handle_put must be balanced with successful scmi_handle_get
*
* Return: 0 is successfully released
@ -599,7 +598,7 @@ int scmi_handle_put(const struct scmi_handle *handle)
}
static const struct scmi_desc scmi_generic_desc = {
.max_rx_timeout_ms = 30, /* we may increase this if required */
.max_rx_timeout_ms = 30, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
.max_msg_size = 128,
};
@ -621,9 +620,9 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
struct scmi_xfers_info *info = &sinfo->minfo;
/* Pre-allocated messages, no more than what hdr.seq can support */
if (WARN_ON(desc->max_msg >= (MSG_TOKEN_ID_MASK + 1))) {
dev_err(dev, "Maximum message of %d exceeds supported %d\n",
desc->max_msg, MSG_TOKEN_ID_MASK + 1);
if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
desc->max_msg, MSG_TOKEN_MAX);
return -EINVAL;
}
@ -637,8 +636,6 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
if (!info->xfer_alloc_table)
return -ENOMEM;
bitmap_zero(info->xfer_alloc_table, desc->max_msg);
/* Pre-initialize the buffer pointer to pre-allocated buffers */
for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
@ -690,11 +687,12 @@ static int scmi_remove(struct platform_device *pdev)
list_del(&info->node);
mutex_unlock(&scmi_list_mutex);
if (!ret) {
/* Safe to free channels since no more users */
ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
idr_destroy(&info->tx_idr);
}
if (ret)
return ret;
/* Safe to free channels since no more users */
ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
idr_destroy(&info->tx_idr);
return ret;
}
@ -840,7 +838,8 @@ static int scmi_probe(struct platform_device *pdev)
if (of_property_read_u32(child, "reg", &prot_id))
continue;
prot_id &= MSG_PROTOCOL_ID_MASK;
if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
dev_err(dev, "Out of range protocol %d\n", prot_id);
if (!scmi_is_protocol_implemented(handle, prot_id)) {
dev_err(dev, "SCMI protocol %d not implemented\n",

View File

@ -115,7 +115,7 @@ static int scmi_perf_attributes_get(const struct scmi_handle *handle,
struct scmi_xfer *t;
struct scmi_msg_resp_perf_attributes *attr;
ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES,
ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
if (ret)
return ret;
@ -133,7 +133,7 @@ static int scmi_perf_attributes_get(const struct scmi_handle *handle,
pi->stats_size = le32_to_cpu(attr->stats_size);
}
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -145,7 +145,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
struct scmi_xfer *t;
struct scmi_msg_resp_perf_domain_attributes *attr;
ret = scmi_one_xfer_init(handle, PERF_DOMAIN_ATTRIBUTES,
ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES,
SCMI_PROTOCOL_PERF, sizeof(domain),
sizeof(*attr), &t);
if (ret)
@ -171,7 +171,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -194,7 +194,7 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
struct scmi_msg_perf_describe_levels *dom_info;
struct scmi_msg_resp_perf_describe_levels *level_info;
ret = scmi_one_xfer_init(handle, PERF_DESCRIBE_LEVELS,
ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS,
SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
if (ret)
return ret;
@ -237,7 +237,7 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
} while (num_returned && num_remaining);
perf_dom->opp_count = tot_opp_cnt;
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
return ret;
@ -250,7 +250,7 @@ static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
struct scmi_xfer *t;
struct scmi_perf_set_limits *limits;
ret = scmi_one_xfer_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
sizeof(*limits), 0, &t);
if (ret)
return ret;
@ -262,7 +262,7 @@ static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -273,7 +273,7 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
struct scmi_xfer *t;
struct scmi_perf_get_limits *limits;
ret = scmi_one_xfer_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
sizeof(__le32), 0, &t);
if (ret)
return ret;
@ -288,7 +288,7 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
*min_perf = le32_to_cpu(limits->min_level);
}
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -299,7 +299,7 @@ static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
struct scmi_xfer *t;
struct scmi_perf_set_level *lvl;
ret = scmi_one_xfer_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
sizeof(*lvl), 0, &t);
if (ret)
return ret;
@ -311,7 +311,7 @@ static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -321,7 +321,7 @@ static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
int ret;
struct scmi_xfer *t;
ret = scmi_one_xfer_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
sizeof(u32), sizeof(u32), &t);
if (ret)
return ret;
@ -333,7 +333,7 @@ static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
if (!ret)
*level = le32_to_cpu(*(__le32 *)t->rx.buf);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -349,8 +349,8 @@ static int scmi_dev_domain_id(struct device *dev)
return clkspec.args[0];
}
static int scmi_dvfs_add_opps_to_device(const struct scmi_handle *handle,
struct device *dev)
static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
struct device *dev)
{
int idx, ret, domain;
unsigned long freq;
@ -383,7 +383,7 @@ static int scmi_dvfs_add_opps_to_device(const struct scmi_handle *handle,
return 0;
}
static int scmi_dvfs_get_transition_latency(const struct scmi_handle *handle,
static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
struct device *dev)
{
struct perf_dom_info *dom;
@ -432,8 +432,8 @@ static struct scmi_perf_ops perf_ops = {
.level_set = scmi_perf_level_set,
.level_get = scmi_perf_level_get,
.device_domain_id = scmi_dev_domain_id,
.get_transition_latency = scmi_dvfs_get_transition_latency,
.add_opps_to_device = scmi_dvfs_add_opps_to_device,
.transition_latency_get = scmi_dvfs_transition_latency_get,
.device_opps_add = scmi_dvfs_device_opps_add,
.freq_set = scmi_dvfs_freq_set,
.freq_get = scmi_dvfs_freq_get,
};

View File

@ -63,7 +63,7 @@ static int scmi_power_attributes_get(const struct scmi_handle *handle,
struct scmi_xfer *t;
struct scmi_msg_resp_power_attributes *attr;
ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES,
ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
SCMI_PROTOCOL_POWER, 0, sizeof(*attr), &t);
if (ret)
return ret;
@ -78,7 +78,7 @@ static int scmi_power_attributes_get(const struct scmi_handle *handle,
pi->stats_size = le32_to_cpu(attr->stats_size);
}
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -90,7 +90,7 @@ scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
struct scmi_xfer *t;
struct scmi_msg_resp_power_domain_attributes *attr;
ret = scmi_one_xfer_init(handle, POWER_DOMAIN_ATTRIBUTES,
ret = scmi_xfer_get_init(handle, POWER_DOMAIN_ATTRIBUTES,
SCMI_PROTOCOL_POWER, sizeof(domain),
sizeof(*attr), &t);
if (ret)
@ -109,7 +109,7 @@ scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -120,7 +120,7 @@ scmi_power_state_set(const struct scmi_handle *handle, u32 domain, u32 state)
struct scmi_xfer *t;
struct scmi_power_set_state *st;
ret = scmi_one_xfer_init(handle, POWER_STATE_SET, SCMI_PROTOCOL_POWER,
ret = scmi_xfer_get_init(handle, POWER_STATE_SET, SCMI_PROTOCOL_POWER,
sizeof(*st), 0, &t);
if (ret)
return ret;
@ -132,7 +132,7 @@ scmi_power_state_set(const struct scmi_handle *handle, u32 domain, u32 state)
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -142,7 +142,7 @@ scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state)
int ret;
struct scmi_xfer *t;
ret = scmi_one_xfer_init(handle, POWER_STATE_GET, SCMI_PROTOCOL_POWER,
ret = scmi_xfer_get_init(handle, POWER_STATE_GET, SCMI_PROTOCOL_POWER,
sizeof(u32), sizeof(u32), &t);
if (ret)
return ret;
@ -153,7 +153,7 @@ scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state)
if (!ret)
*state = le32_to_cpu(*(__le32 *)t->rx.buf);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}

View File

@ -79,7 +79,7 @@ static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
struct scmi_xfer *t;
struct scmi_msg_resp_sensor_attributes *attr;
ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES,
ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
SCMI_PROTOCOL_SENSOR, 0, sizeof(*attr), &t);
if (ret)
return ret;
@ -95,7 +95,7 @@ static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
si->reg_size = le32_to_cpu(attr->reg_size);
}
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -108,7 +108,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
struct scmi_xfer *t;
struct scmi_msg_resp_sensor_description *buf;
ret = scmi_one_xfer_init(handle, SENSOR_DESCRIPTION_GET,
ret = scmi_xfer_get_init(handle, SENSOR_DESCRIPTION_GET,
SCMI_PROTOCOL_SENSOR, sizeof(__le32), 0, &t);
if (ret)
return ret;
@ -150,7 +150,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
*/
} while (num_returned && num_remaining);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -162,7 +162,7 @@ scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
struct scmi_xfer *t;
struct scmi_msg_set_sensor_config *cfg;
ret = scmi_one_xfer_init(handle, SENSOR_CONFIG_SET,
ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@ -173,7 +173,7 @@ scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -185,7 +185,7 @@ static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
struct scmi_xfer *t;
struct scmi_msg_set_sensor_trip_point *trip;
ret = scmi_one_xfer_init(handle, SENSOR_TRIP_POINT_SET,
ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_SET,
SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
if (ret)
return ret;
@ -198,7 +198,7 @@ static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
@ -209,7 +209,7 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
ret = scmi_one_xfer_init(handle, SENSOR_READING_GET,
ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
sizeof(u64), &t);
if (ret)
@ -227,7 +227,7 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
}
scmi_one_xfer_put(handle, t);
scmi_xfer_put(handle, t);
return ret;
}

View File

@ -85,8 +85,8 @@ struct scmi_clk_ops {
* @level_set: sets the performance level of a domain
* @level_get: gets the performance level of a domain
* @device_domain_id: gets the scmi domain id for a given device
* @get_transition_latency: gets the DVFS transition latency for a given device
* @add_opps_to_device: adds all the OPPs for a given device
* @transition_latency_get: gets the DVFS transition latency for a given device
* @device_opps_add: adds all the OPPs for a given device
* @freq_set: sets the frequency for a given device using sustained frequency
* to sustained performance level mapping
* @freq_get: gets the frequency for a given device using sustained frequency
@ -102,10 +102,10 @@ struct scmi_perf_ops {
int (*level_get)(const struct scmi_handle *handle, u32 domain,
u32 *level, bool poll);
int (*device_domain_id)(struct device *dev);
int (*get_transition_latency)(const struct scmi_handle *handle,
int (*transition_latency_get)(const struct scmi_handle *handle,
struct device *dev);
int (*add_opps_to_device)(const struct scmi_handle *handle,
struct device *dev);
int (*device_opps_add)(const struct scmi_handle *handle,
struct device *dev);
int (*freq_set)(const struct scmi_handle *handle, u32 domain,
unsigned long rate, bool poll);
int (*freq_get)(const struct scmi_handle *handle, u32 domain,
@ -189,6 +189,14 @@ struct scmi_sensor_ops {
* @perf_ops: pointer to set of performance protocol operations
* @clk_ops: pointer to set of clock protocol operations
* @sensor_ops: pointer to set of sensor protocol operations
* @perf_priv: pointer to private data structure specific to performance
* protocol(for internal use only)
* @clk_priv: pointer to private data structure specific to clock
* protocol(for internal use only)
* @power_priv: pointer to private data structure specific to power
* protocol(for internal use only)
* @sensor_priv: pointer to private data structure specific to sensors
* protocol(for internal use only)
*/
struct scmi_handle {
struct device *dev;