mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
net: ipa: don't save the platform device
The IPA platform device is now only used as the structure containing the IPA device structure. Replace the platform device pointer with a pointer to the device structure. Signed-off-by: Alex Elder <elder@linaro.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
81d65f3413
commit
5245f4fd28
@ -21,7 +21,6 @@
|
||||
struct clk;
|
||||
struct icc_path;
|
||||
struct net_device;
|
||||
struct platform_device;
|
||||
|
||||
struct ipa_power;
|
||||
struct ipa_smp2p;
|
||||
@ -31,7 +30,7 @@ struct ipa_interrupt;
|
||||
* struct ipa - IPA information
|
||||
* @gsi: Embedded GSI structure
|
||||
* @version: IPA hardware version
|
||||
* @pdev: Platform device
|
||||
* @dev: IPA device pointer
|
||||
* @completion: Used to signal pipeline clear transfer complete
|
||||
* @nb: Notifier block used for remoteproc SSR
|
||||
* @notifier: Remoteproc SSR notifier
|
||||
@ -79,7 +78,7 @@ struct ipa_interrupt;
|
||||
struct ipa {
|
||||
struct gsi gsi;
|
||||
enum ipa_version version;
|
||||
struct platform_device *pdev;
|
||||
struct device *dev;
|
||||
struct completion completion;
|
||||
struct notifier_block nb;
|
||||
void *notifier;
|
||||
|
@ -174,7 +174,7 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
|
||||
u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
|
||||
u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
|
||||
const char *table = route ? "route" : "filter";
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
u32 size;
|
||||
|
||||
size = route ? ipa->route_count : ipa->filter_count + 1;
|
||||
@ -204,7 +204,7 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
|
||||
/* Validate the memory region that holds headers */
|
||||
static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
const struct ipa_mem *mem;
|
||||
u32 offset_max;
|
||||
u32 size_max;
|
||||
@ -256,7 +256,7 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
|
||||
const char *name, u32 offset)
|
||||
{
|
||||
struct ipa_cmd_register_write *payload;
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
u32 offset_max;
|
||||
u32 bit_count;
|
||||
|
||||
|
@ -233,8 +233,8 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
|
||||
const struct ipa_gsi_endpoint_data *data)
|
||||
{
|
||||
const struct ipa_gsi_endpoint_data *other_data;
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
enum ipa_endpoint_name other_name;
|
||||
struct device *dev = ipa->dev;
|
||||
|
||||
if (ipa_gsi_endpoint_data_empty(data))
|
||||
return true;
|
||||
@ -388,7 +388,7 @@ static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
|
||||
const struct ipa_gsi_endpoint_data *data)
|
||||
{
|
||||
const struct ipa_gsi_endpoint_data *dp = data;
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
enum ipa_endpoint_name name;
|
||||
u32 max;
|
||||
|
||||
@ -606,7 +606,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
|
||||
count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
|
||||
trans = ipa_cmd_trans_alloc(ipa, count);
|
||||
if (!trans) {
|
||||
dev_err(&ipa->pdev->dev,
|
||||
dev_err(ipa->dev,
|
||||
"no transaction to reset modem exception endpoints\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
@ -1498,8 +1498,7 @@ ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
|
||||
if (endpoint_id == command_endpoint->endpoint_id) {
|
||||
complete(&ipa->completion);
|
||||
} else {
|
||||
dev_err(&ipa->pdev->dev,
|
||||
"unexpected tagged packet from endpoint %u\n",
|
||||
dev_err(ipa->dev, "unexpected tagged packet from endpoint %u\n",
|
||||
endpoint_id);
|
||||
}
|
||||
|
||||
@ -1536,6 +1535,7 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
|
||||
void *data = page_address(page) + NET_SKB_PAD;
|
||||
u32 unused = buffer_size - total_len;
|
||||
struct ipa *ipa = endpoint->ipa;
|
||||
struct device *dev = ipa->dev;
|
||||
u32 resid = total_len;
|
||||
|
||||
while (resid) {
|
||||
@ -1544,7 +1544,7 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
|
||||
u32 len;
|
||||
|
||||
if (resid < IPA_STATUS_SIZE) {
|
||||
dev_err(&endpoint->ipa->pdev->dev,
|
||||
dev_err(dev,
|
||||
"short message (%u bytes < %zu byte status)\n",
|
||||
resid, IPA_STATUS_SIZE);
|
||||
break;
|
||||
@ -1666,8 +1666,8 @@ void ipa_endpoint_default_route_clear(struct ipa *ipa)
|
||||
*/
|
||||
static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
|
||||
{
|
||||
struct device *dev = &endpoint->ipa->pdev->dev;
|
||||
struct ipa *ipa = endpoint->ipa;
|
||||
struct device *dev = ipa->dev;
|
||||
struct gsi *gsi = &ipa->gsi;
|
||||
bool suspended = false;
|
||||
dma_addr_t addr;
|
||||
@ -1769,7 +1769,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
|
||||
gsi_channel_reset(&ipa->gsi, channel_id, true);
|
||||
|
||||
if (ret)
|
||||
dev_err(&ipa->pdev->dev,
|
||||
dev_err(ipa->dev,
|
||||
"error %d resetting channel %u for endpoint %u\n",
|
||||
ret, endpoint->channel_id, endpoint->endpoint_id);
|
||||
}
|
||||
@ -1817,7 +1817,7 @@ int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
|
||||
|
||||
ret = gsi_channel_start(gsi, endpoint->channel_id);
|
||||
if (ret) {
|
||||
dev_err(&ipa->pdev->dev,
|
||||
dev_err(ipa->dev,
|
||||
"error %d starting %cX channel %u for endpoint %u\n",
|
||||
ret, endpoint->toward_ipa ? 'T' : 'R',
|
||||
endpoint->channel_id, endpoint_id);
|
||||
@ -1854,14 +1854,13 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
|
||||
/* Note that if stop fails, the channel's state is not well-defined */
|
||||
ret = gsi_channel_stop(gsi, endpoint->channel_id);
|
||||
if (ret)
|
||||
dev_err(&ipa->pdev->dev,
|
||||
"error %d attempting to stop endpoint %u\n", ret,
|
||||
endpoint_id);
|
||||
dev_err(ipa->dev, "error %d attempting to stop endpoint %u\n",
|
||||
ret, endpoint_id);
|
||||
}
|
||||
|
||||
void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
|
||||
{
|
||||
struct device *dev = &endpoint->ipa->pdev->dev;
|
||||
struct device *dev = endpoint->ipa->dev;
|
||||
struct gsi *gsi = &endpoint->ipa->gsi;
|
||||
int ret;
|
||||
|
||||
@ -1881,7 +1880,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
|
||||
|
||||
void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
|
||||
{
|
||||
struct device *dev = &endpoint->ipa->pdev->dev;
|
||||
struct device *dev = endpoint->ipa->dev;
|
||||
struct gsi *gsi = &endpoint->ipa->gsi;
|
||||
int ret;
|
||||
|
||||
@ -1983,7 +1982,7 @@ void ipa_endpoint_deconfig(struct ipa *ipa)
|
||||
|
||||
int ipa_endpoint_config(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
const struct reg *reg;
|
||||
u32 endpoint_id;
|
||||
u32 hw_limit;
|
||||
|
@ -110,14 +110,13 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
|
||||
struct ipa_interrupt *interrupt = dev_id;
|
||||
struct ipa *ipa = interrupt->ipa;
|
||||
u32 enabled = interrupt->enabled;
|
||||
struct device *dev = ipa->dev;
|
||||
const struct reg *reg;
|
||||
struct device *dev;
|
||||
u32 pending;
|
||||
u32 offset;
|
||||
u32 mask;
|
||||
int ret;
|
||||
|
||||
dev = &ipa->pdev->dev;
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (WARN_ON(ret < 0))
|
||||
goto out_power_put;
|
||||
@ -240,8 +239,8 @@ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
|
||||
int ipa_interrupt_config(struct ipa *ipa)
|
||||
{
|
||||
struct ipa_interrupt *interrupt = ipa->interrupt;
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
unsigned int irq = interrupt->irq;
|
||||
struct device *dev = ipa->dev;
|
||||
const struct reg *reg;
|
||||
int ret;
|
||||
|
||||
@ -281,7 +280,7 @@ err_kfree:
|
||||
void ipa_interrupt_deconfig(struct ipa *ipa)
|
||||
{
|
||||
struct ipa_interrupt *interrupt = ipa->interrupt;
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
|
||||
ipa->interrupt = NULL;
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/firmware.h>
|
||||
@ -114,7 +113,7 @@ int ipa_setup(struct ipa *ipa)
|
||||
{
|
||||
struct ipa_endpoint *exception_endpoint;
|
||||
struct ipa_endpoint *command_endpoint;
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
int ret;
|
||||
|
||||
ret = gsi_setup(&ipa->gsi);
|
||||
@ -858,7 +857,7 @@ static int ipa_probe(struct platform_device *pdev)
|
||||
goto err_power_exit;
|
||||
}
|
||||
|
||||
ipa->pdev = pdev;
|
||||
ipa->dev = dev;
|
||||
dev_set_drvdata(dev, ipa);
|
||||
ipa->interrupt = interrupt;
|
||||
ipa->power = power;
|
||||
@ -953,12 +952,16 @@ err_interrupt_exit:
|
||||
|
||||
static void ipa_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct ipa *ipa = dev_get_drvdata(&pdev->dev);
|
||||
struct device *dev = &pdev->dev;
|
||||
struct ipa_interrupt *interrupt;
|
||||
struct ipa_power *power;
|
||||
struct device *dev;
|
||||
struct ipa *ipa;
|
||||
int ret;
|
||||
|
||||
ipa = dev_get_drvdata(&pdev->dev);
|
||||
dev = ipa->dev;
|
||||
WARN_ON(dev != &pdev->dev);
|
||||
|
||||
power = ipa->power;
|
||||
interrupt = ipa->interrupt;
|
||||
|
||||
|
@ -88,7 +88,7 @@ int ipa_mem_setup(struct ipa *ipa)
|
||||
*/
|
||||
trans = ipa_cmd_trans_alloc(ipa, 4);
|
||||
if (!trans) {
|
||||
dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
|
||||
dev_err(ipa->dev, "no transaction for memory setup\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@ -218,8 +218,8 @@ static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
|
||||
|
||||
static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
enum ipa_mem_id mem_id = mem->id;
|
||||
struct device *dev = ipa->dev;
|
||||
u16 size_multiple;
|
||||
|
||||
/* Make sure the memory region is valid for this version of IPA */
|
||||
@ -255,7 +255,7 @@ static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
|
||||
static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
|
||||
{
|
||||
DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
enum ipa_mem_id mem_id;
|
||||
u32 i;
|
||||
|
||||
@ -291,7 +291,7 @@ static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
|
||||
/* Do all memory regions fit within the IPA local memory? */
|
||||
static bool ipa_mem_size_valid(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
u32 limit = ipa->mem_size;
|
||||
u32 i;
|
||||
|
||||
@ -318,7 +318,7 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
|
||||
*/
|
||||
int ipa_mem_config(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
const struct ipa_mem *mem;
|
||||
const struct reg *reg;
|
||||
dma_addr_t addr;
|
||||
@ -394,7 +394,7 @@ err_dma_free:
|
||||
/* Inverse of ipa_mem_config() */
|
||||
void ipa_mem_deconfig(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
|
||||
dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
|
||||
ipa->zero_size = 0;
|
||||
@ -421,8 +421,7 @@ int ipa_mem_zero_modem(struct ipa *ipa)
|
||||
*/
|
||||
trans = ipa_cmd_trans_alloc(ipa, 3);
|
||||
if (!trans) {
|
||||
dev_err(&ipa->pdev->dev,
|
||||
"no transaction to zero modem memory\n");
|
||||
dev_err(ipa->dev, "no transaction to zero modem memory\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@ -453,7 +452,7 @@ int ipa_mem_zero_modem(struct ipa *ipa)
|
||||
*/
|
||||
static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
struct iommu_domain *domain;
|
||||
unsigned long iova;
|
||||
phys_addr_t phys;
|
||||
@ -486,13 +485,12 @@ static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
|
||||
|
||||
static void ipa_imem_exit(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = ipa->dev;
|
||||
struct iommu_domain *domain;
|
||||
struct device *dev;
|
||||
|
||||
if (!ipa->imem_size)
|
||||
return;
|
||||
|
||||
dev = &ipa->pdev->dev;
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
if (domain) {
|
||||
size_t size;
|
||||
@ -528,7 +526,7 @@ static void ipa_imem_exit(struct ipa *ipa)
|
||||
*/
|
||||
static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
struct iommu_domain *domain;
|
||||
unsigned long iova;
|
||||
phys_addr_t phys;
|
||||
@ -595,7 +593,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
|
||||
|
||||
static void ipa_smem_exit(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
struct iommu_domain *domain;
|
||||
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
|
@ -58,7 +58,7 @@ static int ipa_open(struct net_device *netdev)
|
||||
struct device *dev;
|
||||
int ret;
|
||||
|
||||
dev = &ipa->pdev->dev;
|
||||
dev = ipa->dev;
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0)
|
||||
goto err_power_put;
|
||||
@ -94,7 +94,7 @@ static int ipa_stop(struct net_device *netdev)
|
||||
struct device *dev;
|
||||
int ret;
|
||||
|
||||
dev = &ipa->pdev->dev;
|
||||
dev = ipa->dev;
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0)
|
||||
goto out_power_put;
|
||||
@ -158,7 +158,7 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
*/
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
dev = &ipa->pdev->dev;
|
||||
dev = ipa->dev;
|
||||
ret = pm_runtime_get(dev);
|
||||
if (ret < 1) {
|
||||
/* If a resume won't happen, just drop the packet */
|
||||
@ -322,7 +322,7 @@ int ipa_modem_start(struct ipa *ipa)
|
||||
goto out_set_state;
|
||||
}
|
||||
|
||||
SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
|
||||
SET_NETDEV_DEV(netdev, ipa->dev);
|
||||
priv = netdev_priv(netdev);
|
||||
priv->ipa = ipa;
|
||||
priv->tx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
|
||||
@ -396,7 +396,7 @@ int ipa_modem_stop(struct ipa *ipa)
|
||||
/* Treat a "clean" modem stop the same as a crash */
|
||||
static void ipa_modem_crashed(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
int ret;
|
||||
|
||||
/* Prevent the modem from triggering a call to ipa_setup() */
|
||||
@ -443,7 +443,7 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
|
||||
{
|
||||
struct ipa *ipa = container_of(nb, struct ipa, nb);
|
||||
struct qcom_ssr_notify_data *notify_data = data;
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
|
||||
switch (action) {
|
||||
case QCOM_SSR_BEFORE_POWERUP:
|
||||
@ -492,7 +492,7 @@ int ipa_modem_config(struct ipa *ipa)
|
||||
|
||||
void ipa_modem_deconfig(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
int ret;
|
||||
|
||||
ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
|
||||
|
@ -238,7 +238,7 @@ int ipa_power_setup(struct ipa *ipa)
|
||||
|
||||
ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
|
||||
|
||||
ret = device_init_wakeup(&ipa->pdev->dev, true);
|
||||
ret = device_init_wakeup(ipa->dev, true);
|
||||
if (ret)
|
||||
ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
|
||||
|
||||
@ -247,7 +247,7 @@ int ipa_power_setup(struct ipa *ipa)
|
||||
|
||||
void ipa_power_teardown(struct ipa *ipa)
|
||||
{
|
||||
(void)device_init_wakeup(&ipa->pdev->dev, false);
|
||||
(void)device_init_wakeup(ipa->dev, false);
|
||||
ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
|
||||
}
|
||||
|
||||
|
@ -96,7 +96,7 @@ static void ipa_server_init_complete(struct ipa_qmi *ipa_qmi)
|
||||
IPA_QMI_INIT_COMPLETE_IND_SZ,
|
||||
ipa_init_complete_ind_ei, &ind);
|
||||
if (ret)
|
||||
dev_err(&ipa->pdev->dev,
|
||||
dev_err(ipa->dev,
|
||||
"error %d sending init complete indication\n", ret);
|
||||
else
|
||||
ipa_qmi->indication_sent = true;
|
||||
@ -148,7 +148,7 @@ static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
|
||||
ipa = container_of(ipa_qmi, struct ipa, qmi);
|
||||
ret = ipa_modem_start(ipa);
|
||||
if (ret)
|
||||
dev_err(&ipa->pdev->dev, "error %d starting modem\n", ret);
|
||||
dev_err(ipa->dev, "error %d starting modem\n", ret);
|
||||
}
|
||||
|
||||
/* All QMI clients from the modem node are gone (modem shut down or crashed). */
|
||||
@ -199,7 +199,7 @@ static void ipa_server_indication_register(struct qmi_handle *qmi,
|
||||
ipa_qmi->indication_requested = true;
|
||||
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
|
||||
} else {
|
||||
dev_err(&ipa->pdev->dev,
|
||||
dev_err(ipa->dev,
|
||||
"error %d sending register indication response\n", ret);
|
||||
}
|
||||
}
|
||||
@ -228,7 +228,7 @@ static void ipa_server_driver_init_complete(struct qmi_handle *qmi,
|
||||
ipa_qmi->uc_ready = true;
|
||||
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
|
||||
} else {
|
||||
dev_err(&ipa->pdev->dev,
|
||||
dev_err(ipa->dev,
|
||||
"error %d sending init complete response\n", ret);
|
||||
}
|
||||
}
|
||||
@ -417,7 +417,7 @@ static void ipa_client_init_driver_work(struct work_struct *work)
|
||||
qmi = &ipa_qmi->client_handle;
|
||||
|
||||
ipa = container_of(ipa_qmi, struct ipa, qmi);
|
||||
dev = &ipa->pdev->dev;
|
||||
dev = ipa->dev;
|
||||
|
||||
ret = qmi_txn_init(qmi, &txn, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
|
@ -84,15 +84,13 @@ struct ipa_smp2p {
|
||||
*/
|
||||
static void ipa_smp2p_notify(struct ipa_smp2p *smp2p)
|
||||
{
|
||||
struct device *dev;
|
||||
u32 value;
|
||||
u32 mask;
|
||||
|
||||
if (smp2p->notified)
|
||||
return;
|
||||
|
||||
dev = &smp2p->ipa->pdev->dev;
|
||||
smp2p->power_on = pm_runtime_get_if_active(dev, true) > 0;
|
||||
smp2p->power_on = pm_runtime_get_if_active(smp2p->ipa->dev, true) > 0;
|
||||
|
||||
/* Signal whether the IPA power is enabled */
|
||||
mask = BIT(smp2p->enabled_bit);
|
||||
@ -152,15 +150,16 @@ static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
|
||||
static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
|
||||
{
|
||||
struct ipa_smp2p *smp2p = dev_id;
|
||||
struct ipa *ipa = smp2p->ipa;
|
||||
struct device *dev;
|
||||
int ret;
|
||||
|
||||
/* Ignore any (spurious) interrupts received after the first */
|
||||
if (smp2p->ipa->setup_complete)
|
||||
if (ipa->setup_complete)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Power needs to be active for setup */
|
||||
dev = &smp2p->ipa->pdev->dev;
|
||||
dev = ipa->dev;
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "error %d getting power for setup\n", ret);
|
||||
@ -168,7 +167,7 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
/* An error here won't cause driver shutdown, so warn if one occurs */
|
||||
ret = ipa_setup(smp2p->ipa);
|
||||
ret = ipa_setup(ipa);
|
||||
WARN(ret != 0, "error %d from ipa_setup()\n", ret);
|
||||
|
||||
out_power_put:
|
||||
@ -209,7 +208,7 @@ static void ipa_smp2p_irq_exit(struct ipa_smp2p *smp2p, u32 irq)
|
||||
/* Drop the power reference if it was taken in ipa_smp2p_notify() */
|
||||
static void ipa_smp2p_power_release(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
|
||||
if (!ipa->smp2p->power_on)
|
||||
return;
|
||||
|
@ -163,7 +163,7 @@ ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
|
||||
|
||||
bool ipa_filtered_valid(struct ipa *ipa, u64 filtered)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
u32 count;
|
||||
|
||||
if (!filtered) {
|
||||
@ -236,8 +236,7 @@ ipa_filter_reset_table(struct ipa *ipa, bool hashed, bool ipv6, bool modem)
|
||||
|
||||
trans = ipa_cmd_trans_alloc(ipa, hweight64(ep_mask));
|
||||
if (!trans) {
|
||||
dev_err(&ipa->pdev->dev,
|
||||
"no transaction for %s filter reset\n",
|
||||
dev_err(ipa->dev, "no transaction for %s filter reset\n",
|
||||
modem ? "modem" : "AP");
|
||||
return -EBUSY;
|
||||
}
|
||||
@ -298,8 +297,7 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
|
||||
|
||||
trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2);
|
||||
if (!trans) {
|
||||
dev_err(&ipa->pdev->dev,
|
||||
"no transaction for %s route reset\n",
|
||||
dev_err(ipa->dev, "no transaction for %s route reset\n",
|
||||
modem ? "modem" : "AP");
|
||||
return -EBUSY;
|
||||
}
|
||||
@ -327,7 +325,7 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
|
||||
|
||||
void ipa_table_reset(struct ipa *ipa, bool modem)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
const char *ee_name;
|
||||
int ret;
|
||||
|
||||
@ -356,7 +354,7 @@ int ipa_table_hash_flush(struct ipa *ipa)
|
||||
|
||||
trans = ipa_cmd_trans_alloc(ipa, 1);
|
||||
if (!trans) {
|
||||
dev_err(&ipa->pdev->dev, "no transaction for hash flush\n");
|
||||
dev_err(ipa->dev, "no transaction for hash flush\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@ -469,7 +467,7 @@ int ipa_table_setup(struct ipa *ipa)
|
||||
*/
|
||||
trans = ipa_cmd_trans_alloc(ipa, 8);
|
||||
if (!trans) {
|
||||
dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
|
||||
dev_err(ipa->dev, "no transaction for table setup\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@ -713,7 +711,7 @@ bool ipa_table_mem_valid(struct ipa *ipa, bool filter)
|
||||
*/
|
||||
int ipa_table_init(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
dma_addr_t addr;
|
||||
__le64 le_addr;
|
||||
__le64 *virt;
|
||||
@ -763,7 +761,7 @@ int ipa_table_init(struct ipa *ipa)
|
||||
void ipa_table_exit(struct ipa *ipa)
|
||||
{
|
||||
u32 count = max_t(u32, 1 + ipa->filter_count, ipa->route_count);
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
size_t size;
|
||||
|
||||
size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
|
||||
|
@ -127,7 +127,7 @@ static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
|
||||
static void ipa_uc_event_handler(struct ipa *ipa)
|
||||
{
|
||||
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
|
||||
if (shared->event == IPA_UC_EVENT_ERROR)
|
||||
dev_err(dev, "microcontroller error event\n");
|
||||
@ -141,7 +141,7 @@ static void ipa_uc_event_handler(struct ipa *ipa)
|
||||
static void ipa_uc_response_hdlr(struct ipa *ipa)
|
||||
{
|
||||
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
|
||||
/* An INIT_COMPLETED response message is sent to the AP by the
|
||||
* microcontroller when it is operational. Other than this, the AP
|
||||
@ -191,7 +191,7 @@ void ipa_uc_config(struct ipa *ipa)
|
||||
/* Inverse of ipa_uc_config() */
|
||||
void ipa_uc_deconfig(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
struct device *dev = ipa->dev;
|
||||
|
||||
ipa_interrupt_disable(ipa, IPA_IRQ_UC_1);
|
||||
ipa_interrupt_disable(ipa, IPA_IRQ_UC_0);
|
||||
@ -208,8 +208,8 @@ void ipa_uc_deconfig(struct ipa *ipa)
|
||||
/* Take a proxy power reference for the microcontroller */
|
||||
void ipa_uc_power(struct ipa *ipa)
|
||||
{
|
||||
struct device *dev = ipa->dev;
|
||||
static bool already;
|
||||
struct device *dev;
|
||||
int ret;
|
||||
|
||||
if (already)
|
||||
@ -217,7 +217,6 @@ void ipa_uc_power(struct ipa *ipa)
|
||||
already = true; /* Only do this on first boot */
|
||||
|
||||
/* This power reference dropped in ipa_uc_response_hdlr() above */
|
||||
dev = &ipa->pdev->dev;
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(dev);
|
||||
|
Loading…
Reference in New Issue
Block a user