2011-07-11 14:35:34 +00:00
|
|
|
/******************************************************************************
|
2018-10-24 07:04:36 +00:00
|
|
|
*
|
|
|
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
|
|
|
* redistributing this file, you may do so under either license.
|
|
|
|
*
|
|
|
|
* GPL LICENSE SUMMARY
|
2011-07-11 14:35:34 +00:00
|
|
|
*
|
2015-05-13 11:34:07 +00:00
|
|
|
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
|
|
|
|
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
2016-09-28 14:16:53 +00:00
|
|
|
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
2019-01-17 12:27:20 +00:00
|
|
|
* Copyright(c) 2018 - 2019 Intel Corporation
|
2011-07-11 14:35:34 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of version 2 of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* The full GNU General Public License is included in this distribution in the
|
2018-10-24 07:04:36 +00:00
|
|
|
* file called COPYING.
|
2011-07-11 14:35:34 +00:00
|
|
|
*
|
|
|
|
* Contact Information:
|
2015-11-17 13:39:56 +00:00
|
|
|
* Intel Linux Wireless <linuxwifi@intel.com>
|
2011-07-11 14:35:34 +00:00
|
|
|
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
|
|
|
*
|
2018-10-24 07:04:36 +00:00
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
|
|
|
|
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
|
|
|
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
2019-01-17 12:27:20 +00:00
|
|
|
* Copyright(c) 2018 - 2019 Intel Corporation
|
2018-10-24 07:04:36 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
2011-07-11 14:35:34 +00:00
|
|
|
*****************************************************************************/
|
|
|
|
#ifndef __iwl_trans_int_pcie_h__
|
|
|
|
#define __iwl_trans_int_pcie_h__
|
|
|
|
|
2011-08-26 06:11:13 +00:00
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/skbuff.h>
|
2012-03-06 21:31:00 +00:00
|
|
|
#include <linux/wait.h>
|
2011-09-06 16:31:19 +00:00
|
|
|
#include <linux/pci.h>
|
2012-04-10 00:46:54 +00:00
|
|
|
#include <linux/timer.h>
|
2016-03-13 15:51:59 +00:00
|
|
|
#include <linux/cpu.h>
|
2011-08-26 06:11:13 +00:00
|
|
|
|
2011-08-26 06:11:11 +00:00
|
|
|
#include "iwl-fh.h"
|
2011-08-26 06:11:13 +00:00
|
|
|
#include "iwl-csr.h"
|
|
|
|
#include "iwl-trans.h"
|
|
|
|
#include "iwl-debug.h"
|
|
|
|
#include "iwl-io.h"
|
2012-02-09 14:08:15 +00:00
|
|
|
#include "iwl-op-mode.h"
|
2018-02-18 16:20:09 +00:00
|
|
|
#include "iwl-drv.h"
|
2011-08-26 06:11:13 +00:00
|
|
|
|
2015-04-17 14:38:31 +00:00
|
|
|
/* We need 2 entries for the TX command and header, and another one might
|
|
|
|
* be needed for potential data in the SKB's head. The remaining ones can
|
|
|
|
* be used for frags.
|
|
|
|
*/
|
2016-06-23 13:31:40 +00:00
|
|
|
#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
|
2015-04-17 14:38:31 +00:00
|
|
|
|
2015-04-28 09:56:54 +00:00
|
|
|
/*
|
|
|
|
* RX related structures and functions
|
|
|
|
*/
|
|
|
|
#define RX_NUM_QUEUES 1
|
|
|
|
#define RX_POST_REQ_ALLOC 2
|
|
|
|
#define RX_CLAIM_REQ_ALLOC 8
|
2015-12-14 15:44:11 +00:00
|
|
|
#define RX_PENDING_WATERMARK 16
|
2018-02-11 08:48:32 +00:00
|
|
|
#define FIRST_RX_QUEUE 512
|
2015-04-28 09:56:54 +00:00
|
|
|
|
2011-08-26 06:11:13 +00:00
|
|
|
struct iwl_host_cmd;
|
2011-08-26 06:11:11 +00:00
|
|
|
|
2011-07-11 14:35:34 +00:00
|
|
|
/*This file includes the declaration that are internal to the
|
|
|
|
* trans_pcie layer */
|
|
|
|
|
2015-12-23 13:10:03 +00:00
|
|
|
/**
|
|
|
|
* struct iwl_rx_mem_buffer
|
|
|
|
* @page_dma: bus address of rxb page
|
|
|
|
* @page: driver's pointer to the rxb page
|
2016-06-21 09:44:01 +00:00
|
|
|
* @invalid: rxb is in driver ownership - not owned by HW
|
2015-12-23 13:10:03 +00:00
|
|
|
* @vid: index of this rxb in the global table
|
|
|
|
*/
|
2012-03-05 19:24:39 +00:00
|
|
|
struct iwl_rx_mem_buffer {
|
|
|
|
dma_addr_t page_dma;
|
|
|
|
struct page *page;
|
2015-12-23 13:10:03 +00:00
|
|
|
u16 vid;
|
2016-06-21 09:44:01 +00:00
|
|
|
bool invalid;
|
2012-03-05 19:24:39 +00:00
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
2011-08-26 06:10:59 +00:00
|
|
|
/**
|
|
|
|
* struct isr_statistics - interrupt statistics
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct isr_statistics {
|
|
|
|
u32 hw;
|
|
|
|
u32 sw;
|
|
|
|
u32 err_code;
|
|
|
|
u32 sch;
|
|
|
|
u32 alive;
|
|
|
|
u32 rfkill;
|
|
|
|
u32 ctkill;
|
|
|
|
u32 wakeup;
|
|
|
|
u32 rx;
|
|
|
|
u32 tx;
|
|
|
|
u32 unhandled;
|
|
|
|
};
|
|
|
|
|
2018-02-04 14:38:10 +00:00
|
|
|
/**
|
|
|
|
* struct iwl_rx_transfer_desc - transfer descriptor
|
|
|
|
* @addr: ptr to free buffer start address
|
|
|
|
* @rbid: unique tag of the buffer
|
|
|
|
* @reserved: reserved
|
|
|
|
*/
|
|
|
|
struct iwl_rx_transfer_desc {
|
|
|
|
__le16 rbid;
|
2019-01-30 10:09:22 +00:00
|
|
|
__le16 reserved[3];
|
|
|
|
__le64 addr;
|
2018-02-04 14:38:10 +00:00
|
|
|
} __packed;
|
|
|
|
|
2019-01-30 10:09:22 +00:00
|
|
|
#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
|
2018-02-04 14:38:10 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* struct iwl_rx_completion_desc - completion descriptor
|
|
|
|
* @reserved1: reserved
|
|
|
|
* @rbid: unique tag of the received buffer
|
2019-01-30 10:09:22 +00:00
|
|
|
* @flags: flags (0: fragmented, all others: reserved)
|
2018-02-04 14:38:10 +00:00
|
|
|
* @reserved2: reserved
|
|
|
|
*/
|
|
|
|
struct iwl_rx_completion_desc {
|
2019-01-30 10:09:22 +00:00
|
|
|
__le32 reserved1;
|
2018-02-04 14:38:10 +00:00
|
|
|
__le16 rbid;
|
2019-01-30 10:09:22 +00:00
|
|
|
u8 flags;
|
|
|
|
u8 reserved2[25];
|
2018-02-04 14:38:10 +00:00
|
|
|
} __packed;
|
|
|
|
|
2011-08-26 06:10:51 +00:00
|
|
|
/**
|
2012-11-14 10:39:52 +00:00
|
|
|
* struct iwl_rxq - Rx queue
|
2015-12-23 13:10:03 +00:00
|
|
|
* @id: queue index
|
|
|
|
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
|
|
|
|
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
|
iwlwifi: pcie: support rx structures for 22560 devices
The rfh for 22560 devices has changed so it supports now
the same arch of using used and free lists, but different
structures to support the last.
Use the new structures, hw dependent, to manage the lists.
bd, the free list, uses the iwl_rx_transfer_desc,
in which the vid is stored in the structs' rbid
field, and the page address in the addr field.
used_bd, the used list, uses the iwl_rx_completion_desc
struct, in which the vid is stored in the structs' rbid
field.
rb_stts, the hw "write" pointer of rx is stored in a
__le16 array, in which each entry represents the write
pointer per queue.
Signed-off-by: Golan Ben Ami <golan.ben.ami@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
2018-02-05 10:01:36 +00:00
|
|
|
* In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
|
2011-08-26 06:10:51 +00:00
|
|
|
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
|
2015-12-23 13:10:03 +00:00
|
|
|
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
|
|
|
|
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
|
2018-02-11 08:48:32 +00:00
|
|
|
* @tr_tail: driver's pointer to the transmission ring tail buffer
|
|
|
|
* @tr_tail_dma: physical address of the buffer for the transmission ring tail
|
|
|
|
* @cr_tail: driver's pointer to the completion ring tail buffer
|
|
|
|
* @cr_tail_dma: physical address of the buffer for the completion ring tail
|
2011-08-26 06:10:51 +00:00
|
|
|
* @read: Shared index to newest available Rx buffer
|
|
|
|
* @write: Shared index to oldest written Rx packet
|
|
|
|
* @free_count: Number of pre-allocated buffers in rx_free
|
2015-04-28 09:56:54 +00:00
|
|
|
* @used_count: Number of RBDs handled to allocator to use for allocation
|
2011-08-26 06:10:51 +00:00
|
|
|
* @write_actual:
|
2015-04-28 09:56:54 +00:00
|
|
|
* @rx_free: list of RBDs with allocated RB ready for use
|
|
|
|
* @rx_used: list of RBDs with no RB attached
|
2011-08-26 06:10:51 +00:00
|
|
|
* @need_update: flag to indicate we need to update read/write index
|
|
|
|
* @rb_stts: driver's pointer to receive buffer status
|
|
|
|
* @rb_stts_dma: bus address of receive buffer status
|
|
|
|
* @lock:
|
2015-12-23 13:10:03 +00:00
|
|
|
* @queue: actual rx queue. Not used for multi-rx queue.
|
2011-08-26 06:10:51 +00:00
|
|
|
*
|
|
|
|
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
|
|
|
|
*/
|
2012-11-14 10:39:52 +00:00
|
|
|
struct iwl_rxq {
|
2015-12-23 13:10:03 +00:00
|
|
|
int id;
|
|
|
|
void *bd;
|
2011-08-26 06:10:51 +00:00
|
|
|
dma_addr_t bd_dma;
|
2018-03-04 11:09:01 +00:00
|
|
|
union {
|
|
|
|
void *used_bd;
|
|
|
|
__le32 *bd_32;
|
|
|
|
struct iwl_rx_completion_desc *cd;
|
|
|
|
};
|
2015-12-23 13:10:03 +00:00
|
|
|
dma_addr_t used_bd_dma;
|
2018-02-11 08:48:32 +00:00
|
|
|
__le16 *tr_tail;
|
|
|
|
dma_addr_t tr_tail_dma;
|
|
|
|
__le16 *cr_tail;
|
|
|
|
dma_addr_t cr_tail_dma;
|
2011-08-26 06:10:51 +00:00
|
|
|
u32 read;
|
|
|
|
u32 write;
|
|
|
|
u32 free_count;
|
2015-04-28 09:56:54 +00:00
|
|
|
u32 used_count;
|
2011-08-26 06:10:51 +00:00
|
|
|
u32 write_actual;
|
2015-12-23 13:10:03 +00:00
|
|
|
u32 queue_size;
|
2011-08-26 06:10:51 +00:00
|
|
|
struct list_head rx_free;
|
|
|
|
struct list_head rx_used;
|
2014-02-27 10:20:07 +00:00
|
|
|
bool need_update;
|
iwlwifi: pcie: support rx structures for 22560 devices
The rfh for 22560 devices has changed so it supports now
the same arch of using used and free lists, but different
structures to support the last.
Use the new structures, hw dependent, to manage the lists.
bd, the free list, uses the iwl_rx_transfer_desc,
in which the vid is stored in the structs' rbid
field, and the page address in the addr field.
used_bd, the used list, uses the iwl_rx_completion_desc
struct, in which the vid is stored in the structs' rbid
field.
rb_stts, the hw "write" pointer of rx is stored in a
__le16 array, in which each entry represents the write
pointer per queue.
Signed-off-by: Golan Ben Ami <golan.ben.ami@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
2018-02-05 10:01:36 +00:00
|
|
|
void *rb_stts;
|
2011-08-26 06:10:51 +00:00
|
|
|
dma_addr_t rb_stts_dma;
|
|
|
|
spinlock_t lock;
|
2016-01-25 16:14:49 +00:00
|
|
|
struct napi_struct napi;
|
2015-04-28 09:56:54 +00:00
|
|
|
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct iwl_rb_allocator - Rx allocator
|
|
|
|
* @req_pending: number of requests the allcator had not processed yet
|
|
|
|
* @req_ready: number of requests honored and ready for claiming
|
|
|
|
* @rbd_allocated: RBDs with pages allocated and ready to be handled to
|
|
|
|
* the queue. This is a list of &struct iwl_rx_mem_buffer
|
|
|
|
* @rbd_empty: RBDs with no page attached for allocator use. This is a list
|
|
|
|
* of &struct iwl_rx_mem_buffer
|
|
|
|
* @lock: protects the rbd_allocated and rbd_empty lists
|
|
|
|
* @alloc_wq: work queue for background calls
|
|
|
|
* @rx_alloc: work struct for background calls
|
|
|
|
*/
|
|
|
|
struct iwl_rb_allocator {
|
|
|
|
atomic_t req_pending;
|
|
|
|
atomic_t req_ready;
|
|
|
|
struct list_head rbd_allocated;
|
|
|
|
struct list_head rbd_empty;
|
|
|
|
spinlock_t lock;
|
|
|
|
struct workqueue_struct *alloc_wq;
|
|
|
|
struct work_struct rx_alloc;
|
2011-08-26 06:10:51 +00:00
|
|
|
};
|
|
|
|
|
2011-08-26 06:11:13 +00:00
|
|
|
struct iwl_dma_ptr {
|
|
|
|
dma_addr_t dma;
|
|
|
|
void *addr;
|
|
|
|
size_t size;
|
|
|
|
};
|
|
|
|
|
2012-03-05 19:24:42 +00:00
|
|
|
/**
|
|
|
|
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
|
|
|
|
* @index -- current index
|
|
|
|
*/
|
2018-02-04 10:51:45 +00:00
|
|
|
static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
|
2012-03-05 19:24:42 +00:00
|
|
|
{
|
2018-08-02 11:57:55 +00:00
|
|
|
return ++index &
|
2019-07-12 12:03:48 +00:00
|
|
|
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
|
2012-03-05 19:24:42 +00:00
|
|
|
}
|
|
|
|
|
iwlwifi: pcie: support rx structures for 22560 devices
The rfh for 22560 devices has changed so it supports now
the same arch of using used and free lists, but different
structures to support the last.
Use the new structures, hw dependent, to manage the lists.
bd, the free list, uses the iwl_rx_transfer_desc,
in which the vid is stored in the structs' rbid
field, and the page address in the addr field.
used_bd, the used list, uses the iwl_rx_completion_desc
struct, in which the vid is stored in the structs' rbid
field.
rb_stts, the hw "write" pointer of rx is stored in a
__le16 array, in which each entry represents the write
pointer per queue.
Signed-off-by: Golan Ben Ami <golan.ben.ami@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
2018-02-05 10:01:36 +00:00
|
|
|
/**
|
|
|
|
* iwl_get_closed_rb_stts - get closed rb stts from different structs
|
|
|
|
* @rxq - the rxq to get the rb stts from
|
|
|
|
*/
|
|
|
|
static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
|
|
|
|
struct iwl_rxq *rxq)
|
|
|
|
{
|
2019-07-12 12:03:48 +00:00
|
|
|
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
|
iwlwifi: pcie: support rx structures for 22560 devices
The rfh for 22560 devices has changed so it supports now
the same arch of using used and free lists, but different
structures to support the last.
Use the new structures, hw dependent, to manage the lists.
bd, the free list, uses the iwl_rx_transfer_desc,
in which the vid is stored in the structs' rbid
field, and the page address in the addr field.
used_bd, the used list, uses the iwl_rx_completion_desc
struct, in which the vid is stored in the structs' rbid
field.
rb_stts, the hw "write" pointer of rx is stored in a
__le16 array, in which each entry represents the write
pointer per queue.
Signed-off-by: Golan Ben Ami <golan.ben.ami@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
2018-02-05 10:01:36 +00:00
|
|
|
__le16 *rb_stts = rxq->rb_stts;
|
|
|
|
|
|
|
|
return READ_ONCE(*rb_stts);
|
|
|
|
} else {
|
|
|
|
struct iwl_rb_status *rb_stts = rxq->rb_stts;
|
|
|
|
|
|
|
|
return READ_ONCE(rb_stts->closed_rb_num);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-05 19:24:42 +00:00
|
|
|
/**
|
|
|
|
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
|
|
|
|
* @index -- current index
|
|
|
|
*/
|
2018-02-04 10:51:45 +00:00
|
|
|
static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
|
2012-03-05 19:24:42 +00:00
|
|
|
{
|
2018-08-02 11:57:55 +00:00
|
|
|
return --index &
|
2019-07-12 12:03:48 +00:00
|
|
|
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
|
2012-03-05 19:24:42 +00:00
|
|
|
}
|
|
|
|
|
2011-09-06 16:31:19 +00:00
|
|
|
struct iwl_cmd_meta {
|
|
|
|
/* only for SYNC commands, iff the reply skb is wanted */
|
|
|
|
struct iwl_host_cmd *source;
|
2012-04-16 21:48:08 +00:00
|
|
|
u32 flags;
|
2016-06-23 13:31:40 +00:00
|
|
|
u32 tbs;
|
2011-09-06 16:31:19 +00:00
|
|
|
};
|
|
|
|
|
2013-02-25 15:01:34 +00:00
|
|
|
/*
|
2016-06-09 14:56:38 +00:00
|
|
|
* The FH will write back to the first TB only, so we need to copy some data
|
|
|
|
* into the buffer regardless of whether it should be mapped or not.
|
|
|
|
* This indicates how big the first TB must be to include the scratch buffer
|
|
|
|
* and the assigned PN.
|
2017-01-24 13:29:57 +00:00
|
|
|
* Since PN location is 8 bytes at offset 12, it's 20 now.
|
2016-06-09 14:56:38 +00:00
|
|
|
* If we make it bigger then allocations will be bigger and copy slower, so
|
|
|
|
* that's probably not useful.
|
2013-02-25 15:01:34 +00:00
|
|
|
*/
|
2017-01-24 13:29:57 +00:00
|
|
|
#define IWL_FIRST_TB_SIZE 20
|
2016-06-09 14:56:38 +00:00
|
|
|
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
|
2013-02-25 15:01:34 +00:00
|
|
|
|
2012-11-14 10:39:52 +00:00
|
|
|
struct iwl_pcie_txq_entry {
|
2012-03-19 16:12:06 +00:00
|
|
|
struct iwl_device_cmd *cmd;
|
|
|
|
struct sk_buff *skb;
|
2012-10-19 12:24:43 +00:00
|
|
|
/* buffer to free after command completes */
|
|
|
|
const void *free_buf;
|
2012-03-19 16:12:06 +00:00
|
|
|
struct iwl_cmd_meta meta;
|
|
|
|
};
|
|
|
|
|
2016-06-09 14:56:38 +00:00
|
|
|
struct iwl_pcie_first_tb_buf {
|
|
|
|
u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
|
2013-02-27 12:18:50 +00:00
|
|
|
};
|
|
|
|
|
2011-09-06 16:31:19 +00:00
|
|
|
/**
|
2012-11-14 10:39:52 +00:00
|
|
|
* struct iwl_txq - Tx Queue for DMA
|
2011-09-06 16:31:19 +00:00
|
|
|
* @q: generic Rx/Tx queue descriptor
|
2012-03-19 16:12:06 +00:00
|
|
|
* @tfds: transmit frame descriptors (DMA memory)
|
2016-06-09 14:56:38 +00:00
|
|
|
* @first_tb_bufs: start of command headers, including scratch buffers, for
|
2013-02-27 12:18:50 +00:00
|
|
|
* the writeback -- this is DMA memory and an array holding one buffer
|
|
|
|
* for each command on the queue
|
2016-06-09 14:56:38 +00:00
|
|
|
* @first_tb_dma: DMA address for the first_tb_bufs start
|
2012-03-19 16:12:06 +00:00
|
|
|
* @entries: transmit entries (driver state)
|
|
|
|
* @lock: queue lock
|
|
|
|
* @stuck_timer: timer that fires if queue gets stuck
|
|
|
|
* @trans_pcie: pointer back to transport (for timer)
|
2011-09-06 16:31:19 +00:00
|
|
|
* @need_update: indicates need to update read/write index
|
2013-06-11 17:05:27 +00:00
|
|
|
* @ampdu: true if this queue is an ampdu queue for an specific RA/TID
|
2015-01-12 12:38:29 +00:00
|
|
|
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
|
2015-01-20 15:02:40 +00:00
|
|
|
* @frozen: tx stuck queue timer is frozen
|
|
|
|
* @frozen_expiry_remainder: remember how long until the timer fires
|
2016-11-29 11:49:59 +00:00
|
|
|
* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
|
2016-07-07 15:17:45 +00:00
|
|
|
* @write_ptr: 1-st empty entry (index) host_w
|
|
|
|
* @read_ptr: last used entry (index) host_r
|
|
|
|
* @dma_addr: physical addr for BD's
|
|
|
|
* @n_window: safe queue window
|
|
|
|
* @id: queue id
|
|
|
|
* @low_mark: low watermark, resume queue if free space more than this
|
|
|
|
* @high_mark: high watermark, stop queue if free space less than this
|
2011-09-06 16:31:19 +00:00
|
|
|
*
|
|
|
|
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
|
|
|
|
* descriptors) and required locking structures.
|
2016-07-07 15:17:45 +00:00
|
|
|
*
|
|
|
|
* Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
|
|
|
|
* always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
|
|
|
|
* there might be HW changes in the future). For the normal TX
|
|
|
|
* queues, n_window, which is the size of the software queue data
|
|
|
|
* is also 256; however, for the command queue, n_window is only
|
|
|
|
* 32 since we don't need so many commands pending. Since the HW
|
|
|
|
* still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
|
|
|
|
* This means that we end up with the following:
|
|
|
|
* HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
|
|
|
|
* SW entries: | 0 | ... | 31 |
|
|
|
|
* where N is a number between 0 and 7. This means that the SW
|
|
|
|
* data is a window overlayed over the HW queue.
|
2011-09-06 16:31:19 +00:00
|
|
|
*/
|
2012-11-14 10:39:52 +00:00
|
|
|
struct iwl_txq {
|
2016-06-26 10:17:56 +00:00
|
|
|
void *tfds;
|
2016-06-09 14:56:38 +00:00
|
|
|
struct iwl_pcie_first_tb_buf *first_tb_bufs;
|
|
|
|
dma_addr_t first_tb_dma;
|
2012-11-14 10:39:52 +00:00
|
|
|
struct iwl_pcie_txq_entry *entries;
|
2012-03-05 19:24:24 +00:00
|
|
|
spinlock_t lock;
|
2015-01-20 15:02:40 +00:00
|
|
|
unsigned long frozen_expiry_remainder;
|
2012-04-10 00:46:54 +00:00
|
|
|
struct timer_list stuck_timer;
|
|
|
|
struct iwl_trans_pcie *trans_pcie;
|
2014-02-27 13:24:36 +00:00
|
|
|
bool need_update;
|
2015-01-20 15:02:40 +00:00
|
|
|
bool frozen;
|
2013-06-11 17:05:27 +00:00
|
|
|
bool ampdu;
|
2017-01-07 18:11:47 +00:00
|
|
|
int block;
|
2015-01-12 12:38:29 +00:00
|
|
|
unsigned long wd_timeout;
|
2016-01-14 07:39:21 +00:00
|
|
|
struct sk_buff_head overflow_q;
|
2016-11-29 11:49:59 +00:00
|
|
|
struct iwl_dma_ptr bc_tbl;
|
2016-07-07 15:17:45 +00:00
|
|
|
|
|
|
|
int write_ptr;
|
|
|
|
int read_ptr;
|
|
|
|
dma_addr_t dma_addr;
|
|
|
|
int n_window;
|
|
|
|
u32 id;
|
|
|
|
int low_mark;
|
|
|
|
int high_mark;
|
iwlwifi: pcie: fix TX while flushing
When flushing TX queues no new TX should go into the system.
However, in the following scenario we get TX:
1. Queues are stopped and there are packets in overflow queue
2. Station is removed and flush begins
3. Flush empties space, and reclaim path TXes SKB from overflow
queue.
Note that the fact the queues are stopped during the process
doesn't matter - the packet will be TXed since the TX path
doesn't care if TX queues are stopped or not, just if there is
space in the queue, which there is, since we just freed a
packet.
A fix here is rather complicated, since the flow is very racy.
Change code not to warn if we are TXing from overflow TX.
In case there is TX from both overflow TX and TX path we will
miss a warning we optimally had, but we can live with that.
Make sure we don't return before overflow queue is empty, otherwise
we will think queues are empty, but they will be refilled, resulting
with assert.
Signed-off-by: Sara Sharon <sara.sharon@intel.com>
Fixes: 3955525d5d17 ("iwlwifi: pcie: buffer packets to avoid overflowing Tx queues")
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
2018-12-25 10:16:32 +00:00
|
|
|
|
|
|
|
bool overflow_tx;
|
2011-09-06 16:31:19 +00:00
|
|
|
};
|
|
|
|
|
2013-02-27 12:18:50 +00:00
|
|
|
static inline dma_addr_t
|
2016-06-09 14:56:38 +00:00
|
|
|
iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
|
2013-02-27 12:18:50 +00:00
|
|
|
{
|
2016-06-09 14:56:38 +00:00
|
|
|
return txq->first_tb_dma +
|
|
|
|
sizeof(struct iwl_pcie_first_tb_buf) * idx;
|
2013-02-27 12:18:50 +00:00
|
|
|
}
|
|
|
|
|
2015-10-18 06:31:24 +00:00
|
|
|
struct iwl_tso_hdr_page {
|
|
|
|
struct page *page;
|
|
|
|
u8 *pos;
|
|
|
|
};
|
|
|
|
|
2018-05-03 07:17:05 +00:00
|
|
|
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
|
|
|
/**
|
|
|
|
* enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
|
|
|
|
* debugfs file
|
|
|
|
*
|
|
|
|
* @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
|
|
|
|
* @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
|
|
|
|
* @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
|
|
|
|
* set the file can no longer be used.
|
|
|
|
*/
|
|
|
|
enum iwl_fw_mon_dbgfs_state {
|
|
|
|
IWL_FW_MON_DBGFS_STATE_CLOSED,
|
|
|
|
IWL_FW_MON_DBGFS_STATE_OPEN,
|
|
|
|
IWL_FW_MON_DBGFS_STATE_DISABLED,
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2016-03-20 15:57:22 +00:00
|
|
|
/**
|
|
|
|
* enum iwl_shared_irq_flags - level of sharing for irq
|
|
|
|
* @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
|
|
|
|
* @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
|
|
|
|
*/
|
|
|
|
enum iwl_shared_irq_flags {
|
|
|
|
IWL_SHARED_IRQ_NON_RX = BIT(0),
|
|
|
|
IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
|
|
|
|
};
|
|
|
|
|
2018-02-01 15:54:48 +00:00
|
|
|
/**
|
|
|
|
* enum iwl_image_response_code - image response values
|
|
|
|
* @IWL_IMAGE_RESP_DEF: the default value of the register
|
|
|
|
* @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
|
|
|
|
* @IWL_IMAGE_RESP_FAIL: iml reading failed
|
|
|
|
*/
|
|
|
|
enum iwl_image_response_code {
|
|
|
|
IWL_IMAGE_RESP_DEF = 0,
|
|
|
|
IWL_IMAGE_RESP_SUCCESS = 1,
|
|
|
|
IWL_IMAGE_RESP_FAIL = 2,
|
|
|
|
};
|
|
|
|
|
2018-05-03 07:17:05 +00:00
|
|
|
/**
|
|
|
|
* struct cont_rec: continuous recording data structure
|
|
|
|
* @prev_wr_ptr: the last address that was read in monitor_data
|
|
|
|
* debugfs file
|
|
|
|
* @prev_wrap_cnt: the wrap count that was used during the last read in
|
|
|
|
* monitor_data debugfs file
|
|
|
|
* @state: the state of monitor_data debugfs file as described
|
|
|
|
* in &iwl_fw_mon_dbgfs_state enum
|
|
|
|
* @mutex: locked while reading from monitor_data debugfs file
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
|
|
|
struct cont_rec {
|
|
|
|
u32 prev_wr_ptr;
|
|
|
|
u32 prev_wrap_cnt;
|
|
|
|
u8 state;
|
|
|
|
/* Used to sync monitor_data debugfs file with driver unload flow */
|
|
|
|
struct mutex mutex;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 06:10:48 +00:00
|
|
|
/**
|
|
|
|
* struct iwl_trans_pcie - PCIe transport specific data
|
2011-08-26 06:10:51 +00:00
|
|
|
* @rxq: all the RX queue data
|
2015-12-14 15:44:11 +00:00
|
|
|
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
|
2015-12-23 13:10:03 +00:00
|
|
|
* @global_table: table mapping received VID from hw to rxb
|
2015-04-28 09:56:54 +00:00
|
|
|
* @rba: allocator for RX replenishing
|
2016-09-28 14:16:53 +00:00
|
|
|
* @ctxt_info: context information for FW self init
|
2018-02-11 08:57:18 +00:00
|
|
|
* @ctxt_info_gen3: context information for gen3 devices
|
|
|
|
* @prph_info: prph info for self init
|
|
|
|
* @prph_scratch: prph scratch for self init
|
|
|
|
* @ctxt_info_dma_addr: dma addr of context information
|
|
|
|
* @prph_info_dma_addr: dma addr of prph info
|
|
|
|
* @prph_scratch_dma_addr: dma addr of prph scratch
|
2016-09-28 14:16:53 +00:00
|
|
|
* @ctxt_info_dma_addr: dma addr of context information
|
|
|
|
* @init_dram: DRAM data of firmware image (including paging).
|
|
|
|
* Context information addresses will be taken from here.
|
|
|
|
* This is driver's local copy for keeping track of size and
|
|
|
|
* count for allocating and freeing the memory.
|
2011-08-26 06:10:51 +00:00
|
|
|
* @trans: pointer to the generic transport area
|
2011-08-26 06:11:02 +00:00
|
|
|
* @scd_base_addr: scheduler sram base address in SRAM
|
|
|
|
* @scd_bc_tbls: pointer to the byte count table of the scheduler
|
2011-08-26 06:11:12 +00:00
|
|
|
* @kw: keep warm address
|
2012-02-02 22:33:08 +00:00
|
|
|
* @pci_dev: basic pci-network driver stuff
|
|
|
|
* @hw_base: pci hardware address support
|
2012-03-06 21:31:00 +00:00
|
|
|
* @ucode_write_complete: indicates that the ucode has been copied.
|
|
|
|
* @ucode_write_waitq: wait queue for uCode load
|
2012-03-08 19:29:12 +00:00
|
|
|
* @cmd_queue - command queue number
|
2018-03-25 08:28:33 +00:00
|
|
|
* @def_rx_queue - default rx queue number
|
2015-11-10 09:57:41 +00:00
|
|
|
* @rx_buf_size: Rx buffer size
|
2012-12-05 13:07:54 +00:00
|
|
|
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
|
2014-09-10 08:16:41 +00:00
|
|
|
* @scd_set_active: should the transport configure the SCD for HCMD queue
|
2015-10-21 06:00:07 +00:00
|
|
|
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
|
|
|
|
* frame.
|
2012-04-10 00:46:51 +00:00
|
|
|
* @rx_page_order: page order for receive buffer size
|
2013-01-16 09:34:49 +00:00
|
|
|
* @reg_lock: protect hw register access
|
2015-06-11 17:45:49 +00:00
|
|
|
* @mutex: to protect stop_device / start_fw / start_hw
|
2013-12-22 13:09:40 +00:00
|
|
|
* @cmd_in_flight: true when we have a host command in flight
|
2018-05-03 07:17:05 +00:00
|
|
|
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
|
|
|
* @fw_mon_data: fw continuous recording data
|
|
|
|
#endif
|
2015-12-17 10:17:58 +00:00
|
|
|
* @msix_entries: array of MSI-X entries
|
|
|
|
* @msix_enabled: true if managed to enable MSI-X
|
2016-03-20 15:57:22 +00:00
|
|
|
* @shared_vec_mask: the type of causes the shared vector handles
|
|
|
|
* (see iwl_shared_irq_flags).
|
|
|
|
* @alloc_vecs: the number of interrupt vectors allocated by the OS
|
|
|
|
* @def_irq: default irq for non rx causes
|
2015-12-17 10:17:58 +00:00
|
|
|
* @fh_init_mask: initial unmasked fh causes
|
|
|
|
* @hw_init_mask: initial unmasked hw causes
|
|
|
|
* @fh_mask: current unmasked fh causes
|
|
|
|
* @hw_mask: current unmasked hw causes
|
2017-12-12 06:58:41 +00:00
|
|
|
* @in_rescan: true if we have triggered a device rescan
|
2019-01-17 12:27:20 +00:00
|
|
|
* @base_rb_stts: base virtual address of receive buffer status for all queues
|
|
|
|
* @base_rb_stts_dma: base physical address of receive buffer status
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 06:10:48 +00:00
|
|
|
*/
|
|
|
|
struct iwl_trans_pcie {
|
2015-12-14 15:44:11 +00:00
|
|
|
struct iwl_rxq *rxq;
|
2016-02-01 11:46:06 +00:00
|
|
|
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
|
2016-03-14 11:11:47 +00:00
|
|
|
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
|
2015-04-28 09:56:54 +00:00
|
|
|
struct iwl_rb_allocator rba;
|
2018-02-11 08:57:18 +00:00
|
|
|
union {
|
|
|
|
struct iwl_context_info *ctxt_info;
|
|
|
|
struct iwl_context_info_gen3 *ctxt_info_gen3;
|
|
|
|
};
|
|
|
|
struct iwl_prph_info *prph_info;
|
|
|
|
struct iwl_prph_scratch *prph_scratch;
|
2016-09-28 14:16:53 +00:00
|
|
|
dma_addr_t ctxt_info_dma_addr;
|
2018-02-11 08:57:18 +00:00
|
|
|
dma_addr_t prph_info_dma_addr;
|
|
|
|
dma_addr_t prph_scratch_dma_addr;
|
|
|
|
dma_addr_t iml_dma_addr;
|
2011-08-26 06:10:51 +00:00
|
|
|
struct iwl_trans *trans;
|
2011-08-26 06:10:53 +00:00
|
|
|
|
2014-03-21 12:30:03 +00:00
|
|
|
struct net_device napi_dev;
|
|
|
|
|
2015-10-18 06:31:24 +00:00
|
|
|
struct __percpu iwl_tso_hdr_page *tso_hdr_page;
|
|
|
|
|
2011-08-26 06:10:53 +00:00
|
|
|
/* INT ICT Table */
|
|
|
|
__le32 *ict_tbl;
|
|
|
|
dma_addr_t ict_tbl_dma;
|
|
|
|
int ict_index;
|
|
|
|
bool use_ict;
|
2017-04-25 11:41:20 +00:00
|
|
|
bool is_down, opmode_down;
|
2019-03-25 12:19:56 +00:00
|
|
|
s8 debug_rfkill;
|
2011-08-26 06:10:59 +00:00
|
|
|
struct isr_statistics isr_stats;
|
2011-08-26 06:10:53 +00:00
|
|
|
|
2012-02-05 21:55:11 +00:00
|
|
|
spinlock_t irq_lock;
|
2015-06-11 17:45:49 +00:00
|
|
|
struct mutex mutex;
|
2011-08-26 06:10:53 +00:00
|
|
|
u32 inta_mask;
|
2011-08-26 06:11:02 +00:00
|
|
|
u32 scd_base_addr;
|
|
|
|
struct iwl_dma_ptr scd_bc_tbls;
|
2011-08-26 06:11:12 +00:00
|
|
|
struct iwl_dma_ptr kw;
|
2011-08-26 06:11:24 +00:00
|
|
|
|
2016-12-11 09:36:38 +00:00
|
|
|
struct iwl_txq *txq_memory;
|
2017-02-23 12:19:45 +00:00
|
|
|
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
|
|
|
|
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
|
|
|
|
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
|
2012-02-02 22:33:08 +00:00
|
|
|
|
|
|
|
/* PCI bus related data */
|
|
|
|
struct pci_dev *pci_dev;
|
|
|
|
void __iomem *hw_base;
|
2012-03-06 21:31:00 +00:00
|
|
|
|
|
|
|
bool ucode_write_complete;
|
2019-04-03 11:37:54 +00:00
|
|
|
bool sx_complete;
|
2012-03-06 21:31:00 +00:00
|
|
|
wait_queue_head_t ucode_write_waitq;
|
2012-10-25 15:25:52 +00:00
|
|
|
wait_queue_head_t wait_command_queue;
|
2019-04-03 11:37:54 +00:00
|
|
|
wait_queue_head_t sx_waitq;
|
2012-10-25 15:25:52 +00:00
|
|
|
|
2016-06-21 11:11:48 +00:00
|
|
|
u8 page_offs, dev_cmd_offs;
|
|
|
|
|
2012-03-08 19:29:12 +00:00
|
|
|
u8 cmd_queue;
|
2018-03-25 08:28:33 +00:00
|
|
|
u8 def_rx_queue;
|
2012-06-21 08:53:44 +00:00
|
|
|
u8 cmd_fifo;
|
2015-01-12 12:38:29 +00:00
|
|
|
unsigned int cmd_q_wdg_timeout;
|
2012-03-10 21:00:07 +00:00
|
|
|
u8 n_no_reclaim_cmds;
|
|
|
|
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
|
2016-06-23 13:31:40 +00:00
|
|
|
u8 max_tbs;
|
2016-06-26 10:17:56 +00:00
|
|
|
u16 tfd_size;
|
2012-04-10 00:46:51 +00:00
|
|
|
|
2015-11-10 09:57:41 +00:00
|
|
|
enum iwl_amsdu_size rx_buf_size;
|
2012-12-05 13:07:54 +00:00
|
|
|
bool bc_table_dword;
|
2014-09-10 08:16:41 +00:00
|
|
|
bool scd_set_active;
|
2015-10-21 06:00:07 +00:00
|
|
|
bool sw_csum_tx;
|
2017-08-17 19:05:12 +00:00
|
|
|
bool pcie_dbg_dumped_once;
|
2012-04-10 00:46:51 +00:00
|
|
|
u32 rx_page_order;
|
2012-04-10 00:46:54 +00:00
|
|
|
|
2013-01-16 09:34:49 +00:00
|
|
|
/*protect hw register */
|
|
|
|
spinlock_t reg_lock;
|
2015-05-13 11:34:07 +00:00
|
|
|
bool cmd_hold_nic_awake;
|
2014-11-20 15:33:43 +00:00
|
|
|
|
2018-05-03 07:17:05 +00:00
|
|
|
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
|
|
|
struct cont_rec fw_mon_data;
|
|
|
|
#endif
|
|
|
|
|
2015-12-17 10:17:58 +00:00
|
|
|
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
|
|
|
|
bool msix_enabled;
|
2016-03-20 15:57:22 +00:00
|
|
|
u8 shared_vec_mask;
|
|
|
|
u32 alloc_vecs;
|
|
|
|
u32 def_irq;
|
2015-12-17 10:17:58 +00:00
|
|
|
u32 fh_init_mask;
|
|
|
|
u32 hw_init_mask;
|
|
|
|
u32 fh_mask;
|
|
|
|
u32 hw_mask;
|
2016-03-13 15:51:59 +00:00
|
|
|
cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
|
2017-12-12 06:58:41 +00:00
|
|
|
u16 tx_cmd_queue_size;
|
|
|
|
bool in_rescan;
|
2019-01-17 12:27:20 +00:00
|
|
|
|
|
|
|
void *base_rb_stts;
|
|
|
|
dma_addr_t base_rb_stts_dma;
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 06:10:48 +00:00
|
|
|
};
|
|
|
|
|
2015-11-12 15:16:01 +00:00
|
|
|
static inline struct iwl_trans_pcie *
|
|
|
|
IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
|
|
|
|
{
|
|
|
|
return (void *)trans->trans_specific;
|
|
|
|
}
|
2011-08-26 06:10:51 +00:00
|
|
|
|
2018-02-18 16:20:09 +00:00
|
|
|
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
|
|
|
|
struct msix_entry *entry)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Before sending the interrupt the HW disables it to prevent
|
|
|
|
* a nested interrupt. This is done by writing 1 to the corresponding
|
|
|
|
* bit in the mask register. After handling the interrupt, it should be
|
|
|
|
* re-enabled by clearing this bit. This register is defined as
|
|
|
|
* write 1 clear (W1C) register, meaning that it's being clear
|
|
|
|
* by writing 1 to the bit.
|
|
|
|
*/
|
|
|
|
iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
|
|
|
|
}
|
|
|
|
|
2012-04-10 00:46:54 +00:00
|
|
|
static inline struct iwl_trans *
|
|
|
|
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
|
|
|
|
{
|
|
|
|
return container_of((void *)trans_pcie, struct iwl_trans,
|
|
|
|
trans_specific);
|
|
|
|
}
|
|
|
|
|
2012-11-14 12:44:18 +00:00
|
|
|
/*
|
|
|
|
* Convention: trans API functions: iwl_trans_pcie_XXX
|
|
|
|
* Other functions: iwl_pcie_XXX
|
|
|
|
*/
|
2018-08-03 10:24:18 +00:00
|
|
|
struct iwl_trans
|
|
|
|
*iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|
|
|
const struct pci_device_id *ent,
|
|
|
|
const struct iwl_cfg_trans_params *cfg_trans);
|
2012-04-12 13:24:30 +00:00
|
|
|
void iwl_trans_pcie_free(struct iwl_trans *trans);
|
|
|
|
|
2011-07-11 14:39:46 +00:00
|
|
|
/*****************************************************
|
|
|
|
* RX
|
|
|
|
******************************************************/
|
2018-03-25 06:57:08 +00:00
|
|
|
int _iwl_pcie_rx_init(struct iwl_trans *trans);
|
2012-11-14 12:44:18 +00:00
|
|
|
int iwl_pcie_rx_init(struct iwl_trans *trans);
|
2016-09-28 14:16:53 +00:00
|
|
|
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
|
2015-12-17 10:17:58 +00:00
|
|
|
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
|
2012-12-27 20:43:48 +00:00
|
|
|
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
|
2015-12-17 10:17:58 +00:00
|
|
|
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
|
|
|
|
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
|
2012-11-14 12:44:18 +00:00
|
|
|
int iwl_pcie_rx_stop(struct iwl_trans *trans);
|
|
|
|
void iwl_pcie_rx_free(struct iwl_trans *trans);
|
2018-02-18 16:20:09 +00:00
|
|
|
void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
|
|
|
|
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
|
|
|
|
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
|
|
|
|
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
|
|
|
|
struct iwl_rxq *rxq);
|
2018-03-25 06:57:08 +00:00
|
|
|
int iwl_pcie_rx_alloc(struct iwl_trans *trans);
|
2011-07-11 14:35:34 +00:00
|
|
|
|
2011-07-11 14:44:57 +00:00
|
|
|
/*****************************************************
|
2012-11-14 10:39:52 +00:00
|
|
|
* ICT - interrupt handling
|
2011-07-11 14:44:57 +00:00
|
|
|
******************************************************/
|
2013-12-09 09:48:30 +00:00
|
|
|
irqreturn_t iwl_pcie_isr(int irq, void *data);
|
2012-11-14 10:39:52 +00:00
|
|
|
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
|
|
|
|
void iwl_pcie_free_ict(struct iwl_trans *trans);
|
|
|
|
void iwl_pcie_reset_ict(struct iwl_trans *trans);
|
|
|
|
void iwl_pcie_disable_ict(struct iwl_trans *trans);
|
2011-07-11 14:44:57 +00:00
|
|
|
|
2011-07-11 14:39:46 +00:00
|
|
|
/*****************************************************
|
|
|
|
* TX / HCMD
|
|
|
|
******************************************************/
|
2012-11-14 12:44:18 +00:00
|
|
|
int iwl_pcie_tx_init(struct iwl_trans *trans);
|
2018-03-25 13:57:42 +00:00
|
|
|
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
|
|
|
|
int queue_size);
|
2012-11-14 12:44:18 +00:00
|
|
|
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
|
|
|
|
int iwl_pcie_tx_stop(struct iwl_trans *trans);
|
|
|
|
void iwl_pcie_tx_free(struct iwl_trans *trans);
|
2017-05-07 12:00:31 +00:00
|
|
|
bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
|
2015-01-12 12:38:29 +00:00
|
|
|
const struct iwl_trans_txq_scd_cfg *cfg,
|
|
|
|
unsigned int wdg_timeout);
|
2014-08-01 10:17:40 +00:00
|
|
|
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
|
|
|
|
bool configure_scd);
|
2016-05-02 11:01:14 +00:00
|
|
|
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
|
|
|
|
bool shared_mode);
|
2016-06-30 08:48:30 +00:00
|
|
|
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
|
|
|
|
struct iwl_txq *txq);
|
2012-11-14 12:44:18 +00:00
|
|
|
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
|
struct iwl_device_cmd *dev_cmd, int txq_id);
|
2014-02-27 13:36:55 +00:00
|
|
|
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
|
2012-11-14 12:44:18 +00:00
|
|
|
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
2018-03-25 06:57:08 +00:00
|
|
|
void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
|
|
|
|
void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
|
|
|
|
struct iwl_txq *txq);
|
2012-11-14 10:39:52 +00:00
|
|
|
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
|
2015-06-23 19:58:17 +00:00
|
|
|
struct iwl_rx_cmd_buffer *rxb);
|
2012-11-14 12:44:18 +00:00
|
|
|
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
|
|
|
struct sk_buff_head *skbs);
|
2019-06-10 12:19:23 +00:00
|
|
|
void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
|
2013-01-08 10:25:44 +00:00
|
|
|
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
|
2018-03-25 06:57:08 +00:00
|
|
|
void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
|
|
|
|
struct iwl_txq *txq, u16 byte_cnt,
|
|
|
|
int num_tbs);
|
2013-01-08 10:25:44 +00:00
|
|
|
|
2016-09-09 07:34:46 +00:00
|
|
|
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
|
2016-06-26 10:17:56 +00:00
|
|
|
u8 idx)
|
2014-04-24 08:41:31 +00:00
|
|
|
{
|
2019-07-12 12:03:48 +00:00
|
|
|
if (trans->trans_cfg->use_tfh) {
|
2016-09-09 07:34:46 +00:00
|
|
|
struct iwl_tfh_tfd *tfd = _tfd;
|
|
|
|
struct iwl_tfh_tb *tb = &tfd->tbs[idx];
|
2016-06-26 10:17:56 +00:00
|
|
|
|
|
|
|
return le16_to_cpu(tb->tb_len);
|
2016-09-09 07:34:46 +00:00
|
|
|
} else {
|
|
|
|
struct iwl_tfd *tfd = _tfd;
|
|
|
|
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
|
2014-04-24 08:41:31 +00:00
|
|
|
|
2016-09-09 07:34:46 +00:00
|
|
|
return le16_to_cpu(tb->hi_n_len) >> 4;
|
|
|
|
}
|
2014-04-24 08:41:31 +00:00
|
|
|
}
|
|
|
|
|
2011-08-26 06:10:54 +00:00
|
|
|
/*****************************************************
|
|
|
|
* Error handling
|
|
|
|
******************************************************/
|
2012-11-14 10:39:52 +00:00
|
|
|
void iwl_pcie_dump_csr(struct iwl_trans *trans);
|
2011-08-26 06:11:08 +00:00
|
|
|
|
2011-08-26 06:11:32 +00:00
|
|
|
/*****************************************************
|
|
|
|
* Helpers
|
|
|
|
******************************************************/
|
2016-06-13 05:28:26 +00:00
|
|
|
static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
|
2011-08-26 06:10:53 +00:00
|
|
|
{
|
2015-12-17 10:17:58 +00:00
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
2011-08-26 06:10:53 +00:00
|
|
|
|
2015-12-17 10:17:58 +00:00
|
|
|
clear_bit(STATUS_INT_ENABLED, &trans->status);
|
|
|
|
if (!trans_pcie->msix_enabled) {
|
|
|
|
/* disable interrupts from uCode/NIC to host */
|
|
|
|
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
|
|
|
|
|
|
|
|
/* acknowledge/clear/reset any interrupts still pending
|
|
|
|
* from uCode or flow handler (Rx/Tx DMA) */
|
|
|
|
iwl_write32(trans, CSR_INT, 0xffffffff);
|
|
|
|
iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
|
|
|
|
} else {
|
|
|
|
/* disable all the interrupt we might use */
|
|
|
|
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
|
|
|
|
trans_pcie->fh_init_mask);
|
|
|
|
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
|
|
|
|
trans_pcie->hw_init_mask);
|
|
|
|
}
|
2011-08-26 06:10:53 +00:00
|
|
|
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
|
|
|
|
}
|
2018-02-11 08:57:18 +00:00
|
|
|
|
|
|
|
#define IWL_NUM_OF_COMPLETION_RINGS 31
|
|
|
|
#define IWL_NUM_OF_TRANSFER_RINGS 527
|
|
|
|
|
|
|
|
static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
|
|
|
|
int start)
|
|
|
|
{
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
while (start < fw->num_sec &&
|
|
|
|
fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
|
|
|
|
fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
|
|
|
|
start++;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
|
|
|
|
const struct fw_desc *sec,
|
|
|
|
struct iwl_dram_data *dram)
|
|
|
|
{
|
|
|
|
dram->block = dma_alloc_coherent(trans->dev, sec->len,
|
|
|
|
&dram->physical,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!dram->block)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
dram->size = sec->len;
|
|
|
|
memcpy(dram->block, sec->data, sec->len);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
|
|
|
|
{
|
2018-12-05 08:08:19 +00:00
|
|
|
struct iwl_self_init_dram *dram = &trans->init_dram;
|
2018-02-11 08:57:18 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!dram->fw) {
|
|
|
|
WARN_ON(dram->fw_cnt);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < dram->fw_cnt; i++)
|
|
|
|
dma_free_coherent(trans->dev, dram->fw[i].size,
|
|
|
|
dram->fw[i].block, dram->fw[i].physical);
|
|
|
|
|
|
|
|
kfree(dram->fw);
|
|
|
|
dram->fw_cnt = 0;
|
|
|
|
dram->fw = NULL;
|
|
|
|
}
|
2011-08-26 06:10:53 +00:00
|
|
|
|
2016-06-13 05:28:26 +00:00
|
|
|
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
|
|
|
|
{
|
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
|
|
spin_lock(&trans_pcie->irq_lock);
|
|
|
|
_iwl_disable_interrupts(trans);
|
|
|
|
spin_unlock(&trans_pcie->irq_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
|
2011-08-26 06:10:53 +00:00
|
|
|
{
|
2012-03-07 17:52:37 +00:00
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
2011-08-26 06:10:53 +00:00
|
|
|
|
|
|
|
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
|
2013-12-01 10:30:38 +00:00
|
|
|
set_bit(STATUS_INT_ENABLED, &trans->status);
|
2015-12-17 10:17:58 +00:00
|
|
|
if (!trans_pcie->msix_enabled) {
|
|
|
|
trans_pcie->inta_mask = CSR_INI_SET_MASK;
|
|
|
|
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* fh/hw_mask keeps all the unmasked causes.
|
|
|
|
* Unlike msi, in msix cause is enabled when it is unset.
|
|
|
|
*/
|
|
|
|
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
|
|
|
|
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
|
|
|
|
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
|
|
|
|
~trans_pcie->fh_mask);
|
|
|
|
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
|
|
|
|
~trans_pcie->hw_mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-13 05:28:26 +00:00
|
|
|
static inline void iwl_enable_interrupts(struct iwl_trans *trans)
|
|
|
|
{
|
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
|
|
spin_lock(&trans_pcie->irq_lock);
|
|
|
|
_iwl_enable_interrupts(trans);
|
|
|
|
spin_unlock(&trans_pcie->irq_lock);
|
|
|
|
}
|
2015-12-17 10:17:58 +00:00
|
|
|
static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
|
|
|
|
{
|
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
|
|
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
|
|
|
|
trans_pcie->hw_mask = msk;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
|
|
|
|
{
|
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
|
|
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
|
|
|
|
trans_pcie->fh_mask = msk;
|
2011-08-26 06:10:53 +00:00
|
|
|
}
|
|
|
|
|
2016-01-31 13:02:30 +00:00
|
|
|
static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
|
|
|
|
{
|
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
|
|
IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
|
2015-12-17 10:17:58 +00:00
|
|
|
if (!trans_pcie->msix_enabled) {
|
|
|
|
trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
|
|
|
|
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
|
|
|
|
} else {
|
|
|
|
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
|
|
|
|
trans_pcie->hw_init_mask);
|
|
|
|
iwl_enable_fh_int_msk_msix(trans,
|
|
|
|
MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
|
|
|
|
}
|
2016-01-31 13:02:30 +00:00
|
|
|
}
|
|
|
|
|
2019-05-20 12:18:24 +00:00
|
|
|
static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
|
|
|
|
{
|
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
|
|
IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
|
|
|
|
|
|
|
|
if (!trans_pcie->msix_enabled) {
|
|
|
|
/*
|
|
|
|
* When we'll receive the ALIVE interrupt, the ISR will call
|
|
|
|
* iwl_enable_fw_load_int_ctx_info again to set the ALIVE
|
|
|
|
* interrupt (which is not really needed anymore) but also the
|
|
|
|
* RX interrupt which will allow us to receive the ALIVE
|
|
|
|
* notification (which is Rx) and continue the flow.
|
|
|
|
*/
|
|
|
|
trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
|
|
|
|
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
|
|
|
|
} else {
|
|
|
|
iwl_enable_hw_int_msk_msix(trans,
|
|
|
|
MSIX_HW_INT_CAUSES_REG_ALIVE);
|
|
|
|
/*
|
|
|
|
* Leave all the FH causes enabled to get the ALIVE
|
|
|
|
* notification.
|
|
|
|
*/
|
|
|
|
iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-04 10:51:45 +00:00
|
|
|
static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
|
2017-07-16 09:28:05 +00:00
|
|
|
{
|
|
|
|
return index & (q->n_window - 1);
|
|
|
|
}
|
|
|
|
|
2018-01-04 07:19:13 +00:00
|
|
|
static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
|
2016-11-01 10:37:49 +00:00
|
|
|
struct iwl_txq *txq, int idx)
|
|
|
|
{
|
2018-01-04 07:19:13 +00:00
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
2019-07-12 12:03:48 +00:00
|
|
|
if (trans->trans_cfg->use_tfh)
|
2018-01-04 07:19:13 +00:00
|
|
|
idx = iwl_pcie_get_cmd_index(txq, idx);
|
|
|
|
|
|
|
|
return txq->tfds + trans_pcie->tfd_size * idx;
|
2016-11-01 10:37:49 +00:00
|
|
|
}
|
|
|
|
|
2018-02-18 16:20:09 +00:00
|
|
|
static inline const char *queue_name(struct device *dev,
|
|
|
|
struct iwl_trans_pcie *trans_p, int i)
|
|
|
|
{
|
|
|
|
if (trans_p->shared_vec_mask) {
|
|
|
|
int vec = trans_p->shared_vec_mask &
|
|
|
|
IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
|
|
|
|
|
|
|
|
if (i == 0)
|
|
|
|
return DRV_NAME ": shared IRQ";
|
|
|
|
|
|
|
|
return devm_kasprintf(dev, GFP_KERNEL,
|
|
|
|
DRV_NAME ": queue %d", i + vec);
|
|
|
|
}
|
|
|
|
if (i == 0)
|
|
|
|
return DRV_NAME ": default queue";
|
|
|
|
|
|
|
|
if (i == trans_p->alloc_vecs - 1)
|
|
|
|
return DRV_NAME ": exception";
|
|
|
|
|
|
|
|
return devm_kasprintf(dev, GFP_KERNEL,
|
|
|
|
DRV_NAME ": queue %d", i);
|
|
|
|
}
|
|
|
|
|
2012-03-07 17:52:28 +00:00
|
|
|
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
|
|
|
|
{
|
2013-12-09 09:09:47 +00:00
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
2012-03-07 17:52:28 +00:00
|
|
|
IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
|
2015-12-17 10:17:58 +00:00
|
|
|
if (!trans_pcie->msix_enabled) {
|
|
|
|
trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
|
|
|
|
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
|
|
|
|
} else {
|
|
|
|
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
|
|
|
|
trans_pcie->fh_init_mask);
|
|
|
|
iwl_enable_hw_int_msk_msix(trans,
|
|
|
|
MSIX_HW_INT_CAUSES_REG_RF_KILL);
|
|
|
|
}
|
2017-05-18 15:02:05 +00:00
|
|
|
|
2019-07-12 12:03:48 +00:00
|
|
|
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
|
2017-05-18 15:02:05 +00:00
|
|
|
/*
|
|
|
|
* On 9000-series devices this bit isn't enabled by default, so
|
|
|
|
* when we power down the device we need set the bit to allow it
|
|
|
|
* to wake up the PCI-E bus for RF-kill interrupts.
|
|
|
|
*/
|
|
|
|
iwl_set_bit(trans, CSR_GP_CNTRL,
|
|
|
|
CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
|
|
|
|
}
|
2012-03-07 17:52:28 +00:00
|
|
|
}
|
|
|
|
|
2017-04-25 07:58:25 +00:00
|
|
|
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
|
|
|
|
|
2011-08-26 06:11:31 +00:00
|
|
|
static inline void iwl_wake_queue(struct iwl_trans *trans,
|
2012-11-14 10:39:52 +00:00
|
|
|
struct iwl_txq *txq)
|
2011-08-26 06:11:31 +00:00
|
|
|
{
|
2012-03-15 20:26:52 +00:00
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
2016-07-07 15:17:45 +00:00
|
|
|
if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
|
|
|
|
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
|
|
|
|
iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
|
2011-11-10 14:55:24 +00:00
|
|
|
}
|
2011-08-26 06:11:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void iwl_stop_queue(struct iwl_trans *trans,
|
2012-11-14 10:39:52 +00:00
|
|
|
struct iwl_txq *txq)
|
2011-08-26 06:11:31 +00:00
|
|
|
{
|
2012-03-15 20:26:52 +00:00
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
2011-08-26 06:11:32 +00:00
|
|
|
|
2016-07-07 15:17:45 +00:00
|
|
|
if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
|
|
|
|
iwl_op_mode_queue_full(trans->op_mode, txq->id);
|
|
|
|
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
|
2012-03-15 20:26:52 +00:00
|
|
|
} else
|
|
|
|
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
|
2016-07-07 15:17:45 +00:00
|
|
|
txq->id);
|
2011-08-26 06:11:32 +00:00
|
|
|
}
|
|
|
|
|
2016-07-07 15:17:45 +00:00
|
|
|
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
|
2011-08-26 06:11:32 +00:00
|
|
|
{
|
2018-02-04 08:50:05 +00:00
|
|
|
int index = iwl_pcie_get_cmd_index(q, i);
|
|
|
|
int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
|
|
|
|
int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
|
|
|
|
|
|
|
|
return w >= r ?
|
|
|
|
(index >= r && index < w) :
|
|
|
|
!(index < r && index >= w);
|
2011-08-26 06:11:32 +00:00
|
|
|
}
|
|
|
|
|
2012-03-28 09:00:58 +00:00
|
|
|
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
|
|
|
|
{
|
2017-04-25 07:58:25 +00:00
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
|
|
lockdep_assert_held(&trans_pcie->mutex);
|
|
|
|
|
2019-03-25 12:19:56 +00:00
|
|
|
if (trans_pcie->debug_rfkill == 1)
|
2017-04-25 07:58:25 +00:00
|
|
|
return true;
|
2016-12-13 09:29:07 +00:00
|
|
|
|
2012-03-28 09:00:58 +00:00
|
|
|
return !(iwl_read32(trans, CSR_GP_CNTRL) &
|
|
|
|
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
|
|
|
|
}
|
|
|
|
|
2013-12-22 13:09:40 +00:00
|
|
|
static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
|
|
|
|
u32 reg, u32 mask, u32 value)
|
|
|
|
{
|
|
|
|
u32 v;
|
|
|
|
|
|
|
|
#ifdef CONFIG_IWLWIFI_DEBUG
|
|
|
|
WARN_ON_ONCE(value & ~mask);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
v = iwl_read32(trans, reg);
|
|
|
|
v &= ~mask;
|
|
|
|
v |= value;
|
|
|
|
iwl_write32(trans, reg, v);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
|
|
|
|
u32 reg, u32 mask)
|
|
|
|
{
|
|
|
|
__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
|
|
|
|
u32 reg, u32 mask)
|
|
|
|
{
|
|
|
|
__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
|
|
|
|
}
|
|
|
|
|
2018-06-13 12:24:13 +00:00
|
|
|
static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
|
|
|
|
{
|
2019-06-30 06:31:22 +00:00
|
|
|
return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
|
2018-06-13 12:24:13 +00:00
|
|
|
}
|
|
|
|
|
2014-02-25 19:50:53 +00:00
|
|
|
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
|
2018-04-11 14:17:00 +00:00
|
|
|
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
|
2019-02-10 08:39:59 +00:00
|
|
|
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
|
2014-02-25 19:50:53 +00:00
|
|
|
|
2015-11-11 10:53:32 +00:00
|
|
|
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
2019-01-22 15:21:20 +00:00
|
|
|
void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
|
2015-11-11 10:53:32 +00:00
|
|
|
#else
|
2019-01-22 15:21:20 +00:00
|
|
|
static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
|
2015-11-11 10:53:32 +00:00
|
|
|
#endif
|
|
|
|
|
2017-08-22 07:37:29 +00:00
|
|
|
void iwl_pcie_rx_allocator_work(struct work_struct *data);
|
|
|
|
|
2016-09-28 14:16:53 +00:00
|
|
|
/* common functions that are used by gen2 transport */
|
2018-03-22 12:57:11 +00:00
|
|
|
int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
|
2016-09-28 14:16:53 +00:00
|
|
|
void iwl_pcie_apm_config(struct iwl_trans *trans);
|
|
|
|
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
|
|
|
|
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
|
2017-06-20 13:10:31 +00:00
|
|
|
bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
|
2017-04-25 11:41:20 +00:00
|
|
|
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
|
|
|
|
bool was_in_rfkill);
|
2016-09-29 11:36:19 +00:00
|
|
|
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
|
2018-02-04 10:51:45 +00:00
|
|
|
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
|
2017-05-05 15:24:06 +00:00
|
|
|
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
|
2016-12-12 10:48:48 +00:00
|
|
|
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
|
2016-11-29 11:49:59 +00:00
|
|
|
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
2017-01-17 12:14:29 +00:00
|
|
|
int slots_num, bool cmd_queue);
|
2016-11-29 11:49:59 +00:00
|
|
|
int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
2017-01-17 12:14:29 +00:00
|
|
|
struct iwl_txq *txq, int slots_num, bool cmd_queue);
|
2016-11-29 11:49:59 +00:00
|
|
|
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
|
|
|
|
struct iwl_dma_ptr *ptr, size_t size);
|
|
|
|
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
|
2017-02-22 12:39:10 +00:00
|
|
|
void iwl_pcie_apply_destination(struct iwl_trans *trans);
|
2017-07-16 09:45:12 +00:00
|
|
|
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
|
|
|
|
struct sk_buff *skb);
|
2017-03-16 09:06:41 +00:00
|
|
|
#ifdef CONFIG_INET
|
|
|
|
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
|
|
|
|
#endif
|
2016-09-28 14:16:53 +00:00
|
|
|
|
2017-12-26 12:49:30 +00:00
|
|
|
/* common functions that are used by gen3 transport */
|
|
|
|
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
|
|
|
|
|
2016-09-28 14:16:53 +00:00
|
|
|
/* transport gen 2 exported functions */
|
|
|
|
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
|
|
|
const struct fw_img *fw, bool run_in_rfkill);
|
|
|
|
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
|
2018-04-03 15:37:51 +00:00
|
|
|
void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
|
|
|
|
struct iwl_txq *txq);
|
|
|
|
int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
|
|
|
|
struct iwl_txq **intxq, int size,
|
|
|
|
unsigned int timeout);
|
|
|
|
int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
|
|
|
|
struct iwl_txq *txq,
|
|
|
|
struct iwl_host_cmd *hcmd);
|
2016-09-29 11:36:19 +00:00
|
|
|
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
|
2018-04-03 15:37:51 +00:00
|
|
|
__le16 flags, u8 sta_id, u8 tid,
|
2018-01-04 12:17:06 +00:00
|
|
|
int cmd_id, int size,
|
2016-09-29 11:36:19 +00:00
|
|
|
unsigned int timeout);
|
|
|
|
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
|
2016-11-01 10:37:49 +00:00
|
|
|
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
|
struct iwl_device_cmd *dev_cmd, int txq_id);
|
2016-12-08 11:22:55 +00:00
|
|
|
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
|
|
|
|
struct iwl_host_cmd *cmd);
|
2019-06-06 11:56:14 +00:00
|
|
|
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
|
|
|
|
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
|
2016-11-29 11:49:59 +00:00
|
|
|
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
|
|
|
|
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
|
|
|
|
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
|
2019-04-03 11:37:54 +00:00
|
|
|
void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
|
|
|
|
bool test, bool reset);
|
2011-07-11 14:35:34 +00:00
|
|
|
#endif /* __iwl_trans_int_pcie_h__ */
|