mirror of
https://github.com/torvalds/linux.git
synced 2024-11-15 16:41:58 +00:00
Merge remote-tracking branch 'wireless-next/master' into iwlwifi-next
This commit is contained in:
commit
fc8fed0eae
@ -30,6 +30,7 @@ The target is named "raid" and it accepts the following parameters:
|
||||
raid10 Various RAID10 inspired algorithms chosen by additional params
|
||||
- RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
|
||||
- RAID1E: Integrated Adjacent Stripe Mirroring
|
||||
- RAID1E: Integrated Offset Stripe Mirroring
|
||||
- and other similar RAID10 variants
|
||||
|
||||
Reference: Chapter 4 of
|
||||
@ -64,15 +65,15 @@ The target is named "raid" and it accepts the following parameters:
|
||||
synchronisation state for each region.
|
||||
|
||||
[raid10_copies <# copies>]
|
||||
[raid10_format near]
|
||||
[raid10_format <near|far|offset>]
|
||||
These two options are used to alter the default layout of
|
||||
a RAID10 configuration. The number of copies is can be
|
||||
specified, but the default is 2. There are other variations
|
||||
to how the copies are laid down - the default and only current
|
||||
option is "near". Near copies are what most people think of
|
||||
with respect to mirroring. If these options are left
|
||||
unspecified, or 'raid10_copies 2' and/or 'raid10_format near'
|
||||
are given, then the layouts for 2, 3 and 4 devices are:
|
||||
specified, but the default is 2. There are also three
|
||||
variations to how the copies are laid down - the default
|
||||
is "near". Near copies are what most people think of with
|
||||
respect to mirroring. If these options are left unspecified,
|
||||
or 'raid10_copies 2' and/or 'raid10_format near' are given,
|
||||
then the layouts for 2, 3 and 4 devices are:
|
||||
2 drives 3 drives 4 drives
|
||||
-------- ---------- --------------
|
||||
A1 A1 A1 A1 A2 A1 A1 A2 A2
|
||||
@ -85,6 +86,33 @@ The target is named "raid" and it accepts the following parameters:
|
||||
3-device layout is what might be called a 'RAID1E - Integrated
|
||||
Adjacent Stripe Mirroring'.
|
||||
|
||||
If 'raid10_copies 2' and 'raid10_format far', then the layouts
|
||||
for 2, 3 and 4 devices are:
|
||||
2 drives 3 drives 4 drives
|
||||
-------- -------------- --------------------
|
||||
A1 A2 A1 A2 A3 A1 A2 A3 A4
|
||||
A3 A4 A4 A5 A6 A5 A6 A7 A8
|
||||
A5 A6 A7 A8 A9 A9 A10 A11 A12
|
||||
.. .. .. .. .. .. .. .. ..
|
||||
A2 A1 A3 A1 A2 A2 A1 A4 A3
|
||||
A4 A3 A6 A4 A5 A6 A5 A8 A7
|
||||
A6 A5 A9 A7 A8 A10 A9 A12 A11
|
||||
.. .. .. .. .. .. .. .. ..
|
||||
|
||||
If 'raid10_copies 2' and 'raid10_format offset', then the
|
||||
layouts for 2, 3 and 4 devices are:
|
||||
2 drives 3 drives 4 drives
|
||||
-------- ------------ -----------------
|
||||
A1 A2 A1 A2 A3 A1 A2 A3 A4
|
||||
A2 A1 A3 A1 A2 A2 A1 A4 A3
|
||||
A3 A4 A4 A5 A6 A5 A6 A7 A8
|
||||
A4 A3 A6 A4 A5 A6 A5 A8 A7
|
||||
A5 A6 A7 A8 A9 A9 A10 A11 A12
|
||||
A6 A5 A9 A7 A8 A10 A9 A12 A11
|
||||
.. .. .. .. .. .. .. .. ..
|
||||
Here we see layouts closely akin to 'RAID1E - Integrated
|
||||
Offset Stripe Mirroring'.
|
||||
|
||||
<#raid_devs>: The number of devices composing the array.
|
||||
Each device consists of two entries. The first is the device
|
||||
containing the metadata (if any); the second is the one containing the
|
||||
@ -142,3 +170,5 @@ Version History
|
||||
1.3.0 Added support for RAID 10
|
||||
1.3.1 Allow device replacement/rebuild for RAID 10
|
||||
1.3.2 Fix/improve redundancy checking for RAID10
|
||||
1.4.0 Non-functional change. Removes arg from mapping function.
|
||||
1.4.1 Add RAID10 "far" and "offset" algorithm support.
|
||||
|
@ -105,6 +105,83 @@ Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com>
|
||||
Proto [2 bytes]
|
||||
Raw protocol(IP, IPv6, etc) frame.
|
||||
|
||||
3.3 Multiqueue tuntap interface:
|
||||
|
||||
From version 3.8, Linux supports multiqueue tuntap which can uses multiple
|
||||
file descriptors (queues) to parallelize packets sending or receiving. The
|
||||
device allocation is the same as before, and if user wants to create multiple
|
||||
queues, TUNSETIFF with the same device name must be called many times with
|
||||
IFF_MULTI_QUEUE flag.
|
||||
|
||||
char *dev should be the name of the device, queues is the number of queues to
|
||||
be created, fds is used to store and return the file descriptors (queues)
|
||||
created to the caller. Each file descriptor were served as the interface of a
|
||||
queue which could be accessed by userspace.
|
||||
|
||||
#include <linux/if.h>
|
||||
#include <linux/if_tun.h>
|
||||
|
||||
int tun_alloc_mq(char *dev, int queues, int *fds)
|
||||
{
|
||||
struct ifreq ifr;
|
||||
int fd, err, i;
|
||||
|
||||
if (!dev)
|
||||
return -1;
|
||||
|
||||
memset(&ifr, 0, sizeof(ifr));
|
||||
/* Flags: IFF_TUN - TUN device (no Ethernet headers)
|
||||
* IFF_TAP - TAP device
|
||||
*
|
||||
* IFF_NO_PI - Do not provide packet information
|
||||
* IFF_MULTI_QUEUE - Create a queue of multiqueue device
|
||||
*/
|
||||
ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_MULTI_QUEUE;
|
||||
strcpy(ifr.ifr_name, dev);
|
||||
|
||||
for (i = 0; i < queues; i++) {
|
||||
if ((fd = open("/dev/net/tun", O_RDWR)) < 0)
|
||||
goto err;
|
||||
err = ioctl(fd, TUNSETIFF, (void *)&ifr);
|
||||
if (err) {
|
||||
close(fd);
|
||||
goto err;
|
||||
}
|
||||
fds[i] = fd;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
for (--i; i >= 0; i--)
|
||||
close(fds[i]);
|
||||
return err;
|
||||
}
|
||||
|
||||
A new ioctl(TUNSETQUEUE) were introduced to enable or disable a queue. When
|
||||
calling it with IFF_DETACH_QUEUE flag, the queue were disabled. And when
|
||||
calling it with IFF_ATTACH_QUEUE flag, the queue were enabled. The queue were
|
||||
enabled by default after it was created through TUNSETIFF.
|
||||
|
||||
fd is the file descriptor (queue) that we want to enable or disable, when
|
||||
enable is true we enable it, otherwise we disable it
|
||||
|
||||
#include <linux/if.h>
|
||||
#include <linux/if_tun.h>
|
||||
|
||||
int tun_set_queue(int fd, int enable)
|
||||
{
|
||||
struct ifreq ifr;
|
||||
|
||||
memset(&ifr, 0, sizeof(ifr));
|
||||
|
||||
if (enable)
|
||||
ifr.ifr_flags = IFF_ATTACH_QUEUE;
|
||||
else
|
||||
ifr.ifr_flags = IFF_DETACH_QUEUE;
|
||||
|
||||
return ioctl(fd, TUNSETQUEUE, (void *)&ifr);
|
||||
}
|
||||
|
||||
Universal TUN/TAP device driver Frequently Asked Question.
|
||||
|
||||
1. What platforms are supported by TUN/TAP driver ?
|
||||
|
20
MAINTAINERS
20
MAINTAINERS
@ -114,12 +114,6 @@ Maintainers List (try to look for most precise areas first)
|
||||
|
||||
-----------------------------------
|
||||
|
||||
3C505 NETWORK DRIVER
|
||||
M: Philip Blundell <philb@gnu.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/i825xx/3c505*
|
||||
|
||||
3C59X NETWORK DRIVER
|
||||
M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
|
||||
L: netdev@vger.kernel.org
|
||||
@ -2361,12 +2355,6 @@ W: http://www.arm.linux.org.uk/
|
||||
S: Maintained
|
||||
F: drivers/video/cyber2000fb.*
|
||||
|
||||
CYCLADES 2X SYNC CARD DRIVER
|
||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
||||
W: http://oops.ghostprotocols.net:81/blog
|
||||
S: Maintained
|
||||
F: drivers/net/wan/cycx*
|
||||
|
||||
CYCLADES ASYNC MUX DRIVER
|
||||
W: http://www.cyclades.com/
|
||||
S: Orphan
|
||||
@ -3067,12 +3055,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
|
||||
F: drivers/video/s1d13xxxfb.c
|
||||
F: include/video/s1d13xxxfb.h
|
||||
|
||||
ETHEREXPRESS-16 NETWORK DRIVER
|
||||
M: Philip Blundell <philb@gnu.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/i825xx/eexpress.*
|
||||
|
||||
ETHERNET BRIDGE
|
||||
M: Stephen Hemminger <stephen@networkplumber.org>
|
||||
L: bridge@lists.linux-foundation.org
|
||||
@ -8504,7 +8486,7 @@ F: drivers/usb/gadget/*uvc*.c
|
||||
F: drivers/usb/gadget/webcam.c
|
||||
|
||||
USB WIRELESS RNDIS DRIVER (rndis_wlan)
|
||||
M: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
|
||||
M: Jussi Kivilinna <jussi.kivilinna@iki.fi>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/rndis_wlan.c
|
||||
|
@ -113,7 +113,7 @@
|
||||
STEPUP4((t)+16, fn)
|
||||
|
||||
_GLOBAL(powerpc_sha_transform)
|
||||
PPC_STLU r1,-STACKFRAMESIZE(r1)
|
||||
PPC_STLU r1,-INT_FRAME_SIZE(r1)
|
||||
SAVE_8GPRS(14, r1)
|
||||
SAVE_10GPRS(22, r1)
|
||||
|
||||
@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform)
|
||||
|
||||
REST_8GPRS(14, r1)
|
||||
REST_10GPRS(22, r1)
|
||||
addi r1,r1,STACKFRAMESIZE
|
||||
addi r1,r1,INT_FRAME_SIZE
|
||||
blr
|
||||
|
@ -52,8 +52,6 @@
|
||||
#define smp_mb__before_clear_bit() smp_mb()
|
||||
#define smp_mb__after_clear_bit() smp_mb()
|
||||
|
||||
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
|
||||
|
||||
/* Macro for generating the ***_bits() functions */
|
||||
#define DEFINE_BITOP(fn, op, prefix, postfix) \
|
||||
static __inline__ void fn(unsigned long mask, \
|
||||
|
@ -266,7 +266,8 @@
|
||||
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
|
||||
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
|
||||
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
|
||||
#define FSCR_TAR (1<<8) /* Enable Target Adress Register */
|
||||
#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
|
||||
#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
|
||||
#define SPRN_TAR 0x32f /* Target Address Register */
|
||||
#define SPRN_LPCR 0x13E /* LPAR Control Register */
|
||||
#define LPCR_VPM0 (1ul << (63-0))
|
||||
|
@ -358,3 +358,4 @@ SYSCALL_SPU(setns)
|
||||
COMPAT_SYS(process_vm_readv)
|
||||
COMPAT_SYS(process_vm_writev)
|
||||
SYSCALL(finit_module)
|
||||
SYSCALL(ni_syscall) /* sys_kcmp */
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define __NR_syscalls 354
|
||||
#define __NR_syscalls 355
|
||||
|
||||
#define __NR__exit __NR_exit
|
||||
#define NR_syscalls __NR_syscalls
|
||||
|
@ -376,6 +376,7 @@
|
||||
#define __NR_process_vm_readv 351
|
||||
#define __NR_process_vm_writev 352
|
||||
#define __NR_finit_module 353
|
||||
#define __NR_kcmp 354
|
||||
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7)
|
||||
|
||||
_GLOBAL(__setup_cpu_power8)
|
||||
mflr r11
|
||||
bl __init_FSCR
|
||||
bl __init_hvmode_206
|
||||
mtlr r11
|
||||
beqlr
|
||||
@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8)
|
||||
mfspr r3,SPRN_LPCR
|
||||
oris r3, r3, LPCR_AIL_3@h
|
||||
bl __init_LPCR
|
||||
bl __init_FSCR
|
||||
bl __init_TLB
|
||||
mtlr r11
|
||||
blr
|
||||
|
||||
_GLOBAL(__restore_cpu_power8)
|
||||
mflr r11
|
||||
bl __init_FSCR
|
||||
mfmsr r3
|
||||
rldicl. r0,r3,4,63
|
||||
beqlr
|
||||
@ -115,7 +116,7 @@ __init_LPCR:
|
||||
|
||||
__init_FSCR:
|
||||
mfspr r3,SPRN_FSCR
|
||||
ori r3,r3,FSCR_TAR
|
||||
ori r3,r3,FSCR_TAR|FSCR_DSCR
|
||||
mtspr SPRN_FSCR,r3
|
||||
blr
|
||||
|
||||
|
@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
|
||||
mflr r10 ; \
|
||||
ld r12,PACAKBASE(r13) ; \
|
||||
LOAD_HANDLER(r12, system_call_entry_direct) ; \
|
||||
mtlr r12 ; \
|
||||
mtctr r12 ; \
|
||||
mfspr r12,SPRN_SRR1 ; \
|
||||
/* Re-use of r13... No spare regs to do this */ \
|
||||
li r13,MSR_RI ; \
|
||||
mtmsrd r13,1 ; \
|
||||
GET_PACA(r13) ; /* get r13 back */ \
|
||||
blr ;
|
||||
bctr ;
|
||||
#else
|
||||
/* We can branch directly */
|
||||
#define SYSCALL_PSERIES_2_DIRECT \
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <asm/hvcall.h>
|
||||
#include <asm/hvcserver.h>
|
||||
@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
|
||||
= (unsigned int)last_p_partition_ID;
|
||||
|
||||
/* copy the Null-term char too */
|
||||
strncpy(&next_partner_info->location_code[0],
|
||||
strlcpy(&next_partner_info->location_code[0],
|
||||
(char *)&pi_buff[2],
|
||||
strlen((char *)&pi_buff[2]) + 1);
|
||||
sizeof(next_partner_info->location_code));
|
||||
|
||||
list_add_tail(&(next_partner_info->node), head);
|
||||
next_partner_info = NULL;
|
||||
|
@ -104,7 +104,13 @@ void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
|
||||
if (i)
|
||||
bcma_err(core->bus, "PLL enable timeout\n");
|
||||
} else {
|
||||
bcma_warn(core->bus, "Disabling PLL not supported yet!\n");
|
||||
/*
|
||||
* Mask the PLL but don't wait for it to be disabled. PLL may be
|
||||
* shared between cores and will be still up if there is another
|
||||
* core using it.
|
||||
*/
|
||||
bcma_mask32(core, BCMA_CLKCTLST, ~req);
|
||||
bcma_read32(core, BCMA_CLKCTLST);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
|
||||
|
@ -404,6 +404,8 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_init(&pc_host->cfgspace_lock);
|
||||
|
||||
pc->host_controller = pc_host;
|
||||
pc_host->pci_controller.io_resource = &pc_host->io_resource;
|
||||
pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
|
||||
|
@ -120,6 +120,11 @@ static int bcma_register_cores(struct bcma_bus *bus)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Only first GMAC core on BCM4706 is connected and working */
|
||||
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
|
||||
core->core_unit > 0)
|
||||
continue;
|
||||
|
||||
core->dev.release = bcma_release_core_dev;
|
||||
core->dev.bus = &bcma_bus_type;
|
||||
dev_set_name(&core->dev, "bcma%d:%d", bus->num, dev_id);
|
||||
|
@ -74,8 +74,10 @@ static struct usb_device_id ath3k_table[] = {
|
||||
|
||||
/* Atheros AR3012 with sflash firmware*/
|
||||
{ USB_DEVICE(0x0CF3, 0x3004) },
|
||||
{ USB_DEVICE(0x0CF3, 0x3008) },
|
||||
{ USB_DEVICE(0x0CF3, 0x311D) },
|
||||
{ USB_DEVICE(0x13d3, 0x3375) },
|
||||
{ USB_DEVICE(0x04CA, 0x3004) },
|
||||
{ USB_DEVICE(0x04CA, 0x3005) },
|
||||
{ USB_DEVICE(0x04CA, 0x3006) },
|
||||
{ USB_DEVICE(0x04CA, 0x3008) },
|
||||
@ -106,8 +108,10 @@ static struct usb_device_id ath3k_blist_tbl[] = {
|
||||
|
||||
/* Atheros AR3012 with sflash firmware*/
|
||||
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -132,8 +132,10 @@ static struct usb_device_id blacklist_table[] = {
|
||||
|
||||
/* Atheros 3012 with sflash firmware */
|
||||
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
|
||||
@ -52,8 +53,12 @@ static struct hwrng *current_rng;
|
||||
static LIST_HEAD(rng_list);
|
||||
static DEFINE_MUTEX(rng_mutex);
|
||||
static int data_avail;
|
||||
static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
|
||||
__cacheline_aligned;
|
||||
static u8 *rng_buffer;
|
||||
|
||||
static size_t rng_buffer_size(void)
|
||||
{
|
||||
return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
|
||||
}
|
||||
|
||||
static inline int hwrng_init(struct hwrng *rng)
|
||||
{
|
||||
@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
|
||||
|
||||
if (!data_avail) {
|
||||
bytes_read = rng_get_data(current_rng, rng_buffer,
|
||||
sizeof(rng_buffer),
|
||||
rng_buffer_size(),
|
||||
!(filp->f_flags & O_NONBLOCK));
|
||||
if (bytes_read < 0) {
|
||||
err = bytes_read;
|
||||
@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng)
|
||||
|
||||
mutex_lock(&rng_mutex);
|
||||
|
||||
/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
|
||||
err = -ENOMEM;
|
||||
if (!rng_buffer) {
|
||||
rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
|
||||
if (!rng_buffer)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Must not register two RNGs with the same name. */
|
||||
err = -EEXIST;
|
||||
list_for_each_entry(tmp, &rng_list, list) {
|
||||
|
@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
|
||||
(task_active_pid_ns(current) != &init_pid_ns))
|
||||
return;
|
||||
|
||||
/* Can only change if privileged. */
|
||||
if (!capable(CAP_NET_ADMIN)) {
|
||||
err = EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mc_op = (enum proc_cn_mcast_op *)msg->data;
|
||||
switch (*mc_op) {
|
||||
case PROC_CN_MCAST_LISTEN:
|
||||
@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
|
||||
err = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
cn_proc_ack(err, msg->seq, msg->ack);
|
||||
}
|
||||
|
||||
|
@ -128,9 +128,9 @@ static int ichx_read_bit(int reg, unsigned nr)
|
||||
return data & (1 << bit) ? 1 : 0;
|
||||
}
|
||||
|
||||
static int ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
|
||||
static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
|
||||
{
|
||||
return (ichx_priv.use_gpio & (1 << (nr / 32))) ? 0 : -ENXIO;
|
||||
return ichx_priv.use_gpio & (1 << (nr / 32));
|
||||
}
|
||||
|
||||
static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
|
||||
|
@ -88,13 +88,14 @@ static int gpiod_request(struct gpio_desc *desc, const char *label);
|
||||
static void gpiod_free(struct gpio_desc *desc);
|
||||
static int gpiod_direction_input(struct gpio_desc *desc);
|
||||
static int gpiod_direction_output(struct gpio_desc *desc, int value);
|
||||
static int gpiod_get_direction(const struct gpio_desc *desc);
|
||||
static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
|
||||
static int gpiod_get_value_cansleep(struct gpio_desc *desc);
|
||||
static int gpiod_get_value_cansleep(const struct gpio_desc *desc);
|
||||
static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
|
||||
static int gpiod_get_value(struct gpio_desc *desc);
|
||||
static int gpiod_get_value(const struct gpio_desc *desc);
|
||||
static void gpiod_set_value(struct gpio_desc *desc, int value);
|
||||
static int gpiod_cansleep(struct gpio_desc *desc);
|
||||
static int gpiod_to_irq(struct gpio_desc *desc);
|
||||
static int gpiod_cansleep(const struct gpio_desc *desc);
|
||||
static int gpiod_to_irq(const struct gpio_desc *desc);
|
||||
static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
|
||||
static int gpiod_export_link(struct device *dev, const char *name,
|
||||
struct gpio_desc *desc);
|
||||
@ -171,12 +172,12 @@ static int gpio_ensure_requested(struct gpio_desc *desc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* caller holds gpio_lock *OR* gpio is marked as requested */
|
||||
static struct gpio_chip *gpiod_to_chip(struct gpio_desc *desc)
|
||||
static struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
|
||||
{
|
||||
return desc->chip;
|
||||
return desc ? desc->chip : NULL;
|
||||
}
|
||||
|
||||
/* caller holds gpio_lock *OR* gpio is marked as requested */
|
||||
struct gpio_chip *gpio_to_chip(unsigned gpio)
|
||||
{
|
||||
return gpiod_to_chip(gpio_to_desc(gpio));
|
||||
@ -207,7 +208,7 @@ static int gpiochip_find_base(int ngpio)
|
||||
}
|
||||
|
||||
/* caller ensures gpio is valid and requested, chip->get_direction may sleep */
|
||||
static int gpiod_get_direction(struct gpio_desc *desc)
|
||||
static int gpiod_get_direction(const struct gpio_desc *desc)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
unsigned offset;
|
||||
@ -223,11 +224,13 @@ static int gpiod_get_direction(struct gpio_desc *desc)
|
||||
if (status > 0) {
|
||||
/* GPIOF_DIR_IN, or other positive */
|
||||
status = 1;
|
||||
clear_bit(FLAG_IS_OUT, &desc->flags);
|
||||
/* FLAG_IS_OUT is just a cache of the result of get_direction(),
|
||||
* so it does not affect constness per se */
|
||||
clear_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
|
||||
}
|
||||
if (status == 0) {
|
||||
/* GPIOF_DIR_OUT */
|
||||
set_bit(FLAG_IS_OUT, &desc->flags);
|
||||
set_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
@ -263,7 +266,7 @@ static DEFINE_MUTEX(sysfs_lock);
|
||||
static ssize_t gpio_direction_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gpio_desc *desc = dev_get_drvdata(dev);
|
||||
const struct gpio_desc *desc = dev_get_drvdata(dev);
|
||||
ssize_t status;
|
||||
|
||||
mutex_lock(&sysfs_lock);
|
||||
@ -654,6 +657,11 @@ static ssize_t export_store(struct class *class,
|
||||
goto done;
|
||||
|
||||
desc = gpio_to_desc(gpio);
|
||||
/* reject invalid GPIOs */
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* No extra locking here; FLAG_SYSFS just signifies that the
|
||||
* request and export were done by on behalf of userspace, so
|
||||
@ -690,12 +698,14 @@ static ssize_t unexport_store(struct class *class,
|
||||
if (status < 0)
|
||||
goto done;
|
||||
|
||||
status = -EINVAL;
|
||||
|
||||
desc = gpio_to_desc(gpio);
|
||||
/* reject bogus commands (gpio_unexport ignores them) */
|
||||
if (!desc)
|
||||
goto done;
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
status = -EINVAL;
|
||||
|
||||
/* No extra locking here; FLAG_SYSFS just signifies that the
|
||||
* request and export were done by on behalf of userspace, so
|
||||
@ -846,8 +856,10 @@ static int gpiod_export_link(struct device *dev, const char *name,
|
||||
{
|
||||
int status = -EINVAL;
|
||||
|
||||
if (!desc)
|
||||
goto done;
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&sysfs_lock);
|
||||
|
||||
@ -865,7 +877,6 @@ static int gpiod_export_link(struct device *dev, const char *name,
|
||||
|
||||
mutex_unlock(&sysfs_lock);
|
||||
|
||||
done:
|
||||
if (status)
|
||||
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
|
||||
status);
|
||||
@ -896,8 +907,10 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
|
||||
struct device *dev = NULL;
|
||||
int status = -EINVAL;
|
||||
|
||||
if (!desc)
|
||||
goto done;
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&sysfs_lock);
|
||||
|
||||
@ -914,7 +927,6 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
|
||||
unlock:
|
||||
mutex_unlock(&sysfs_lock);
|
||||
|
||||
done:
|
||||
if (status)
|
||||
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
|
||||
status);
|
||||
@ -940,8 +952,8 @@ static void gpiod_unexport(struct gpio_desc *desc)
|
||||
struct device *dev = NULL;
|
||||
|
||||
if (!desc) {
|
||||
status = -EINVAL;
|
||||
goto done;
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&sysfs_lock);
|
||||
@ -962,7 +974,7 @@ static void gpiod_unexport(struct gpio_desc *desc)
|
||||
device_unregister(dev);
|
||||
put_device(dev);
|
||||
}
|
||||
done:
|
||||
|
||||
if (status)
|
||||
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
|
||||
status);
|
||||
@ -1384,12 +1396,13 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
|
||||
int status = -EPROBE_DEFER;
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if (!desc) {
|
||||
status = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
chip = desc->chip;
|
||||
if (chip == NULL)
|
||||
goto done;
|
||||
@ -1432,8 +1445,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
|
||||
done:
|
||||
if (status)
|
||||
pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
|
||||
desc ? desc_to_gpio(desc) : -1,
|
||||
label ? : "?", status);
|
||||
desc_to_gpio(desc), label ? : "?", status);
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
return status;
|
||||
}
|
||||
@ -1616,10 +1628,13 @@ static int gpiod_direction_input(struct gpio_desc *desc)
|
||||
int status = -EINVAL;
|
||||
int offset;
|
||||
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if (!desc)
|
||||
goto fail;
|
||||
chip = desc->chip;
|
||||
if (!chip || !chip->get || !chip->direction_input)
|
||||
goto fail;
|
||||
@ -1655,13 +1670,9 @@ lose:
|
||||
return status;
|
||||
fail:
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
if (status) {
|
||||
int gpio = -1;
|
||||
if (desc)
|
||||
gpio = desc_to_gpio(desc);
|
||||
pr_debug("%s: gpio-%d status %d\n",
|
||||
__func__, gpio, status);
|
||||
}
|
||||
if (status)
|
||||
pr_debug("%s: gpio-%d status %d\n", __func__,
|
||||
desc_to_gpio(desc), status);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -1678,6 +1689,11 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
|
||||
int status = -EINVAL;
|
||||
int offset;
|
||||
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Open drain pin should not be driven to 1 */
|
||||
if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags))
|
||||
return gpiod_direction_input(desc);
|
||||
@ -1688,8 +1704,6 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if (!desc)
|
||||
goto fail;
|
||||
chip = desc->chip;
|
||||
if (!chip || !chip->set || !chip->direction_output)
|
||||
goto fail;
|
||||
@ -1725,13 +1739,9 @@ lose:
|
||||
return status;
|
||||
fail:
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
if (status) {
|
||||
int gpio = -1;
|
||||
if (desc)
|
||||
gpio = desc_to_gpio(desc);
|
||||
pr_debug("%s: gpio-%d status %d\n",
|
||||
__func__, gpio, status);
|
||||
}
|
||||
if (status)
|
||||
pr_debug("%s: gpio-%d status %d\n", __func__,
|
||||
desc_to_gpio(desc), status);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -1753,10 +1763,13 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
|
||||
int status = -EINVAL;
|
||||
int offset;
|
||||
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if (!desc)
|
||||
goto fail;
|
||||
chip = desc->chip;
|
||||
if (!chip || !chip->set || !chip->set_debounce)
|
||||
goto fail;
|
||||
@ -1776,13 +1789,9 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
|
||||
|
||||
fail:
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
if (status) {
|
||||
int gpio = -1;
|
||||
if (desc)
|
||||
gpio = desc_to_gpio(desc);
|
||||
pr_debug("%s: gpio-%d status %d\n",
|
||||
__func__, gpio, status);
|
||||
}
|
||||
if (status)
|
||||
pr_debug("%s: gpio-%d status %d\n", __func__,
|
||||
desc_to_gpio(desc), status);
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1824,12 +1833,14 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
|
||||
* It returns the zero or nonzero value provided by the associated
|
||||
* gpio_chip.get() method; or zero if no such method is provided.
|
||||
*/
|
||||
static int gpiod_get_value(struct gpio_desc *desc)
|
||||
static int gpiod_get_value(const struct gpio_desc *desc)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
int value;
|
||||
int offset;
|
||||
|
||||
if (!desc)
|
||||
return 0;
|
||||
chip = desc->chip;
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
/* Should be using gpio_get_value_cansleep() */
|
||||
@ -1912,6 +1923,8 @@ static void gpiod_set_value(struct gpio_desc *desc, int value)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
|
||||
if (!desc)
|
||||
return;
|
||||
chip = desc->chip;
|
||||
/* Should be using gpio_set_value_cansleep() */
|
||||
WARN_ON(chip->can_sleep);
|
||||
@ -1938,8 +1951,10 @@ EXPORT_SYMBOL_GPL(__gpio_set_value);
|
||||
* This is used directly or indirectly to implement gpio_cansleep(). It
|
||||
* returns nonzero if access reading or writing the GPIO value can sleep.
|
||||
*/
|
||||
static int gpiod_cansleep(struct gpio_desc *desc)
|
||||
static int gpiod_cansleep(const struct gpio_desc *desc)
|
||||
{
|
||||
if (!desc)
|
||||
return 0;
|
||||
/* only call this on GPIOs that are valid! */
|
||||
return desc->chip->can_sleep;
|
||||
}
|
||||
@ -1959,11 +1974,13 @@ EXPORT_SYMBOL_GPL(__gpio_cansleep);
|
||||
* It returns the number of the IRQ signaled by this (input) GPIO,
|
||||
* or a negative errno.
|
||||
*/
|
||||
static int gpiod_to_irq(struct gpio_desc *desc)
|
||||
static int gpiod_to_irq(const struct gpio_desc *desc)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
int offset;
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
chip = desc->chip;
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
|
||||
@ -1980,13 +1997,15 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq);
|
||||
* Common examples include ones connected to I2C or SPI chips.
|
||||
*/
|
||||
|
||||
static int gpiod_get_value_cansleep(struct gpio_desc *desc)
|
||||
static int gpiod_get_value_cansleep(const struct gpio_desc *desc)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
int value;
|
||||
int offset;
|
||||
|
||||
might_sleep_if(extra_checks);
|
||||
if (!desc)
|
||||
return 0;
|
||||
chip = desc->chip;
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
value = chip->get ? chip->get(chip, offset) : 0;
|
||||
@ -2005,6 +2024,8 @@ static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
|
||||
struct gpio_chip *chip;
|
||||
|
||||
might_sleep_if(extra_checks);
|
||||
if (!desc)
|
||||
return;
|
||||
chip = desc->chip;
|
||||
trace_gpio_value(desc_to_gpio(desc), 0, value);
|
||||
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
|
||||
|
@ -294,13 +294,13 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
|
||||
// Allocate URBs and buffers for interrupt endpoint
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!urb) {
|
||||
return -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
intr->urb = urb;
|
||||
|
||||
buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
return -ENOMEM;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
endpoint = &altsetting->endpoint[EP_INT-1];
|
||||
@ -313,6 +313,14 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
|
||||
endpoint->desc.bInterval);
|
||||
|
||||
return 0;
|
||||
err2:
|
||||
usb_free_urb(intr->urb);
|
||||
intr->urb = NULL;
|
||||
err1:
|
||||
usb_free_urb(ctrl->urb);
|
||||
ctrl->urb = NULL;
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -154,17 +154,6 @@ config MD_RAID456
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config MULTICORE_RAID456
|
||||
bool "RAID-4/RAID-5/RAID-6 Multicore processing (EXPERIMENTAL)"
|
||||
depends on MD_RAID456
|
||||
depends on SMP
|
||||
depends on EXPERIMENTAL
|
||||
---help---
|
||||
Enable the raid456 module to dispatch per-stripe raid operations to a
|
||||
thread pool.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config MD_MULTIPATH
|
||||
tristate "Multipath I/O support"
|
||||
depends on BLK_DEV_MD
|
||||
|
@ -91,15 +91,44 @@ static struct raid_type {
|
||||
{"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
|
||||
};
|
||||
|
||||
static char *raid10_md_layout_to_format(int layout)
|
||||
{
|
||||
/*
|
||||
* Bit 16 and 17 stand for "offset" and "use_far_sets"
|
||||
* Refer to MD's raid10.c for details
|
||||
*/
|
||||
if ((layout & 0x10000) && (layout & 0x20000))
|
||||
return "offset";
|
||||
|
||||
if ((layout & 0xFF) > 1)
|
||||
return "near";
|
||||
|
||||
return "far";
|
||||
}
|
||||
|
||||
static unsigned raid10_md_layout_to_copies(int layout)
|
||||
{
|
||||
return layout & 0xFF;
|
||||
if ((layout & 0xFF) > 1)
|
||||
return layout & 0xFF;
|
||||
return (layout >> 8) & 0xFF;
|
||||
}
|
||||
|
||||
static int raid10_format_to_md_layout(char *format, unsigned copies)
|
||||
{
|
||||
/* 1 "far" copy, and 'copies' "near" copies */
|
||||
return (1 << 8) | (copies & 0xFF);
|
||||
unsigned n = 1, f = 1;
|
||||
|
||||
if (!strcmp("near", format))
|
||||
n = copies;
|
||||
else
|
||||
f = copies;
|
||||
|
||||
if (!strcmp("offset", format))
|
||||
return 0x30000 | (f << 8) | n;
|
||||
|
||||
if (!strcmp("far", format))
|
||||
return 0x20000 | (f << 8) | n;
|
||||
|
||||
return (f << 8) | n;
|
||||
}
|
||||
|
||||
static struct raid_type *get_raid_type(char *name)
|
||||
@ -352,6 +381,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
|
||||
{
|
||||
unsigned i, rebuild_cnt = 0;
|
||||
unsigned rebuilds_per_group, copies, d;
|
||||
unsigned group_size, last_group_start;
|
||||
|
||||
for (i = 0; i < rs->md.raid_disks; i++)
|
||||
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
|
||||
@ -379,9 +409,6 @@ static int validate_raid_redundancy(struct raid_set *rs)
|
||||
* as long as the failed devices occur in different mirror
|
||||
* groups (i.e. different stripes).
|
||||
*
|
||||
* Right now, we only allow for "near" copies. When other
|
||||
* formats are added, we will have to check those too.
|
||||
*
|
||||
* When checking "near" format, make sure no adjacent devices
|
||||
* have failed beyond what can be handled. In addition to the
|
||||
* simple case where the number of devices is a multiple of the
|
||||
@ -391,14 +418,41 @@ static int validate_raid_redundancy(struct raid_set *rs)
|
||||
* A A B B C
|
||||
* C D D E E
|
||||
*/
|
||||
for (i = 0; i < rs->md.raid_disks * copies; i++) {
|
||||
if (!(i % copies))
|
||||
if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
|
||||
for (i = 0; i < rs->md.raid_disks * copies; i++) {
|
||||
if (!(i % copies))
|
||||
rebuilds_per_group = 0;
|
||||
d = i % rs->md.raid_disks;
|
||||
if ((!rs->dev[d].rdev.sb_page ||
|
||||
!test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
|
||||
(++rebuilds_per_group >= copies))
|
||||
goto too_many;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* When checking "far" and "offset" formats, we need to ensure
|
||||
* that the device that holds its copy is not also dead or
|
||||
* being rebuilt. (Note that "far" and "offset" formats only
|
||||
* support two copies right now. These formats also only ever
|
||||
* use the 'use_far_sets' variant.)
|
||||
*
|
||||
* This check is somewhat complicated by the need to account
|
||||
* for arrays that are not a multiple of (far) copies. This
|
||||
* results in the need to treat the last (potentially larger)
|
||||
* set differently.
|
||||
*/
|
||||
group_size = (rs->md.raid_disks / copies);
|
||||
last_group_start = (rs->md.raid_disks / group_size) - 1;
|
||||
last_group_start *= group_size;
|
||||
for (i = 0; i < rs->md.raid_disks; i++) {
|
||||
if (!(i % copies) && !(i > last_group_start))
|
||||
rebuilds_per_group = 0;
|
||||
d = i % rs->md.raid_disks;
|
||||
if ((!rs->dev[d].rdev.sb_page ||
|
||||
!test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
|
||||
if ((!rs->dev[i].rdev.sb_page ||
|
||||
!test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
|
||||
(++rebuilds_per_group >= copies))
|
||||
goto too_many;
|
||||
goto too_many;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -433,7 +487,7 @@ too_many:
|
||||
*
|
||||
* RAID10-only options:
|
||||
* [raid10_copies <# copies>] Number of copies. (Default: 2)
|
||||
* [raid10_format <near>] Layout algorithm. (Default: near)
|
||||
* [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
|
||||
*/
|
||||
static int parse_raid_params(struct raid_set *rs, char **argv,
|
||||
unsigned num_raid_params)
|
||||
@ -520,7 +574,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
|
||||
rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
|
||||
return -EINVAL;
|
||||
}
|
||||
if (strcmp("near", argv[i])) {
|
||||
if (strcmp("near", argv[i]) &&
|
||||
strcmp("far", argv[i]) &&
|
||||
strcmp("offset", argv[i])) {
|
||||
rs->ti->error = "Invalid 'raid10_format' value given";
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -644,6 +700,15 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the format is not "near", we only support
|
||||
* two copies at the moment.
|
||||
*/
|
||||
if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
|
||||
rs->ti->error = "Too many copies for given RAID10 format.";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* (Len * #mirrors) / #devices */
|
||||
sectors_per_dev = rs->ti->len * raid10_copies;
|
||||
sector_div(sectors_per_dev, rs->md.raid_disks);
|
||||
@ -854,17 +919,30 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
|
||||
/*
|
||||
* Reshaping is not currently allowed
|
||||
*/
|
||||
if ((le32_to_cpu(sb->level) != mddev->level) ||
|
||||
(le32_to_cpu(sb->layout) != mddev->layout) ||
|
||||
(le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
|
||||
DMERR("Reshaping arrays not yet supported.");
|
||||
if (le32_to_cpu(sb->level) != mddev->level) {
|
||||
DMERR("Reshaping arrays not yet supported. (RAID level change)");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (le32_to_cpu(sb->layout) != mddev->layout) {
|
||||
DMERR("Reshaping arrays not yet supported. (RAID layout change)");
|
||||
DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
|
||||
DMERR(" Old layout: %s w/ %d copies",
|
||||
raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
|
||||
raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
|
||||
DMERR(" New layout: %s w/ %d copies",
|
||||
raid10_md_layout_to_format(mddev->layout),
|
||||
raid10_md_layout_to_copies(mddev->layout));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
|
||||
DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* We can only change the number of devices in RAID1 right now */
|
||||
if ((rs->raid_type->level != 1) &&
|
||||
(le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
|
||||
DMERR("Reshaping arrays not yet supported.");
|
||||
DMERR("Reshaping arrays not yet supported. (device count change)");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1329,7 +1407,8 @@ static void raid_status(struct dm_target *ti, status_type_t type,
|
||||
raid10_md_layout_to_copies(rs->md.layout));
|
||||
|
||||
if (rs->print_flags & DMPF_RAID10_FORMAT)
|
||||
DMEMIT(" raid10_format near");
|
||||
DMEMIT(" raid10_format %s",
|
||||
raid10_md_layout_to_format(rs->md.layout));
|
||||
|
||||
DMEMIT(" %d", rs->md.raid_disks);
|
||||
for (i = 0; i < rs->md.raid_disks; i++) {
|
||||
@ -1418,6 +1497,10 @@ static struct target_type raid_target = {
|
||||
|
||||
static int __init dm_raid_init(void)
|
||||
{
|
||||
DMINFO("Loading target version %u.%u.%u",
|
||||
raid_target.version[0],
|
||||
raid_target.version[1],
|
||||
raid_target.version[2]);
|
||||
return dm_register_target(&raid_target);
|
||||
}
|
||||
|
||||
|
@ -307,6 +307,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
}
|
||||
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
|
||||
bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
|
||||
return;
|
||||
}
|
||||
smp_rmb(); /* Ensure implications of 'active' are visible */
|
||||
rcu_read_lock();
|
||||
if (mddev->suspended) {
|
||||
@ -2994,6 +2998,9 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
|
||||
} else if (!sectors)
|
||||
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
|
||||
rdev->data_offset;
|
||||
if (!my_mddev->pers->resize)
|
||||
/* Cannot change size for RAID0 or Linear etc */
|
||||
return -EINVAL;
|
||||
}
|
||||
if (sectors < my_mddev->dev_sectors)
|
||||
return -EINVAL; /* component must fit device */
|
||||
@ -6525,7 +6532,17 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
mddev->ro = 0;
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_state);
|
||||
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
/* mddev_unlock will wake thread */
|
||||
/* If a device failed while we were read-only, we
|
||||
* need to make sure the metadata is updated now.
|
||||
*/
|
||||
if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
|
||||
mddev_unlock(mddev);
|
||||
wait_event(mddev->sb_wait,
|
||||
!test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
|
||||
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
|
||||
mddev_lock(mddev);
|
||||
}
|
||||
} else {
|
||||
err = -EROFS;
|
||||
goto abort_unlock;
|
||||
|
@ -175,7 +175,13 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
||||
rdev1->new_raid_disk = j;
|
||||
}
|
||||
|
||||
if (j < 0 || j >= mddev->raid_disks) {
|
||||
if (j < 0) {
|
||||
printk(KERN_ERR
|
||||
"md/raid0:%s: remove inactive devices before converting to RAID0\n",
|
||||
mdname(mddev));
|
||||
goto abort;
|
||||
}
|
||||
if (j >= mddev->raid_disks) {
|
||||
printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
|
||||
"aborting!\n", mdname(mddev), j);
|
||||
goto abort;
|
||||
@ -289,7 +295,7 @@ abort:
|
||||
kfree(conf->strip_zone);
|
||||
kfree(conf->devlist);
|
||||
kfree(conf);
|
||||
*private_conf = NULL;
|
||||
*private_conf = ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -411,7 +417,8 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
|
||||
"%s does not support generic reshape\n", __func__);
|
||||
|
||||
rdev_for_each(rdev, mddev)
|
||||
array_sectors += rdev->sectors;
|
||||
array_sectors += (rdev->sectors &
|
||||
~(sector_t)(mddev->chunk_sectors-1));
|
||||
|
||||
return array_sectors;
|
||||
}
|
||||
|
@ -967,6 +967,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||
bio_list_merge(&conf->pending_bio_list, &plug->pending);
|
||||
conf->pending_count += plug->pending_cnt;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
wake_up(&conf->wait_barrier);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
kfree(plug);
|
||||
return;
|
||||
@ -1000,6 +1001,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
||||
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
|
||||
const unsigned long do_discard = (bio->bi_rw
|
||||
& (REQ_DISCARD | REQ_SECURE));
|
||||
const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
|
||||
struct md_rdev *blocked_rdev;
|
||||
struct blk_plug_cb *cb;
|
||||
struct raid1_plug_cb *plug = NULL;
|
||||
@ -1301,7 +1303,8 @@ read_again:
|
||||
conf->mirrors[i].rdev->data_offset);
|
||||
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
|
||||
mbio->bi_end_io = raid1_end_write_request;
|
||||
mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
|
||||
mbio->bi_rw =
|
||||
WRITE | do_flush_fua | do_sync | do_discard | do_same;
|
||||
mbio->bi_private = r1_bio;
|
||||
|
||||
atomic_inc(&r1_bio->remaining);
|
||||
@ -2818,6 +2821,9 @@ static int run(struct mddev *mddev)
|
||||
if (IS_ERR(conf))
|
||||
return PTR_ERR(conf);
|
||||
|
||||
if (mddev->queue)
|
||||
blk_queue_max_write_same_sectors(mddev->queue,
|
||||
mddev->chunk_sectors);
|
||||
rdev_for_each(rdev, mddev) {
|
||||
if (!mddev->gendisk)
|
||||
continue;
|
||||
|
@ -38,21 +38,36 @@
|
||||
* near_copies (stored in low byte of layout)
|
||||
* far_copies (stored in second byte of layout)
|
||||
* far_offset (stored in bit 16 of layout )
|
||||
* use_far_sets (stored in bit 17 of layout )
|
||||
*
|
||||
* The data to be stored is divided into chunks using chunksize.
|
||||
* Each device is divided into far_copies sections.
|
||||
* In each section, chunks are laid out in a style similar to raid0, but
|
||||
* near_copies copies of each chunk is stored (each on a different drive).
|
||||
* The starting device for each section is offset near_copies from the starting
|
||||
* device of the previous section.
|
||||
* Thus they are (near_copies*far_copies) of each chunk, and each is on a different
|
||||
* drive.
|
||||
* near_copies and far_copies must be at least one, and their product is at most
|
||||
* raid_disks.
|
||||
* The data to be stored is divided into chunks using chunksize. Each device
|
||||
* is divided into far_copies sections. In each section, chunks are laid out
|
||||
* in a style similar to raid0, but near_copies copies of each chunk is stored
|
||||
* (each on a different drive). The starting device for each section is offset
|
||||
* near_copies from the starting device of the previous section. Thus there
|
||||
* are (near_copies * far_copies) of each chunk, and each is on a different
|
||||
* drive. near_copies and far_copies must be at least one, and their product
|
||||
* is at most raid_disks.
|
||||
*
|
||||
* If far_offset is true, then the far_copies are handled a bit differently.
|
||||
* The copies are still in different stripes, but instead of be very far apart
|
||||
* on disk, there are adjacent stripes.
|
||||
* The copies are still in different stripes, but instead of being very far
|
||||
* apart on disk, there are adjacent stripes.
|
||||
*
|
||||
* The far and offset algorithms are handled slightly differently if
|
||||
* 'use_far_sets' is true. In this case, the array's devices are grouped into
|
||||
* sets that are (near_copies * far_copies) in size. The far copied stripes
|
||||
* are still shifted by 'near_copies' devices, but this shifting stays confined
|
||||
* to the set rather than the entire array. This is done to improve the number
|
||||
* of device combinations that can fail without causing the array to fail.
|
||||
* Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
|
||||
* on a device):
|
||||
* A B C D A B C D E
|
||||
* ... ...
|
||||
* D A B C E A B C D
|
||||
* Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
|
||||
* [A B] [C D] [A B] [C D E]
|
||||
* |...| |...| |...| | ... |
|
||||
* [B A] [D C] [B A] [E C D]
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -535,6 +550,13 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
|
||||
sector_t stripe;
|
||||
int dev;
|
||||
int slot = 0;
|
||||
int last_far_set_start, last_far_set_size;
|
||||
|
||||
last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
|
||||
last_far_set_start *= geo->far_set_size;
|
||||
|
||||
last_far_set_size = geo->far_set_size;
|
||||
last_far_set_size += (geo->raid_disks % geo->far_set_size);
|
||||
|
||||
/* now calculate first sector/dev */
|
||||
chunk = r10bio->sector >> geo->chunk_shift;
|
||||
@ -551,15 +573,25 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
|
||||
/* and calculate all the others */
|
||||
for (n = 0; n < geo->near_copies; n++) {
|
||||
int d = dev;
|
||||
int set;
|
||||
sector_t s = sector;
|
||||
r10bio->devs[slot].addr = sector;
|
||||
r10bio->devs[slot].devnum = d;
|
||||
r10bio->devs[slot].addr = s;
|
||||
slot++;
|
||||
|
||||
for (f = 1; f < geo->far_copies; f++) {
|
||||
set = d / geo->far_set_size;
|
||||
d += geo->near_copies;
|
||||
if (d >= geo->raid_disks)
|
||||
d -= geo->raid_disks;
|
||||
|
||||
if ((geo->raid_disks % geo->far_set_size) &&
|
||||
(d > last_far_set_start)) {
|
||||
d -= last_far_set_start;
|
||||
d %= last_far_set_size;
|
||||
d += last_far_set_start;
|
||||
} else {
|
||||
d %= geo->far_set_size;
|
||||
d += geo->far_set_size * set;
|
||||
}
|
||||
s += geo->stride;
|
||||
r10bio->devs[slot].devnum = d;
|
||||
r10bio->devs[slot].addr = s;
|
||||
@ -595,6 +627,20 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
|
||||
* or recovery, so reshape isn't happening
|
||||
*/
|
||||
struct geom *geo = &conf->geo;
|
||||
int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
|
||||
int far_set_size = geo->far_set_size;
|
||||
int last_far_set_start;
|
||||
|
||||
if (geo->raid_disks % geo->far_set_size) {
|
||||
last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
|
||||
last_far_set_start *= geo->far_set_size;
|
||||
|
||||
if (dev >= last_far_set_start) {
|
||||
far_set_size = geo->far_set_size;
|
||||
far_set_size += (geo->raid_disks % geo->far_set_size);
|
||||
far_set_start = last_far_set_start;
|
||||
}
|
||||
}
|
||||
|
||||
offset = sector & geo->chunk_mask;
|
||||
if (geo->far_offset) {
|
||||
@ -602,13 +648,13 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
|
||||
chunk = sector >> geo->chunk_shift;
|
||||
fc = sector_div(chunk, geo->far_copies);
|
||||
dev -= fc * geo->near_copies;
|
||||
if (dev < 0)
|
||||
dev += geo->raid_disks;
|
||||
if (dev < far_set_start)
|
||||
dev += far_set_size;
|
||||
} else {
|
||||
while (sector >= geo->stride) {
|
||||
sector -= geo->stride;
|
||||
if (dev < geo->near_copies)
|
||||
dev += geo->raid_disks - geo->near_copies;
|
||||
if (dev < (geo->near_copies + far_set_start))
|
||||
dev += far_set_size - geo->near_copies;
|
||||
else
|
||||
dev -= geo->near_copies;
|
||||
}
|
||||
@ -1073,6 +1119,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||
bio_list_merge(&conf->pending_bio_list, &plug->pending);
|
||||
conf->pending_count += plug->pending_cnt;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
wake_up(&conf->wait_barrier);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
kfree(plug);
|
||||
return;
|
||||
@ -1105,6 +1152,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
||||
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
|
||||
const unsigned long do_discard = (bio->bi_rw
|
||||
& (REQ_DISCARD | REQ_SECURE));
|
||||
const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
|
||||
unsigned long flags;
|
||||
struct md_rdev *blocked_rdev;
|
||||
struct blk_plug_cb *cb;
|
||||
@ -1460,7 +1508,8 @@ retry_write:
|
||||
rdev));
|
||||
mbio->bi_bdev = rdev->bdev;
|
||||
mbio->bi_end_io = raid10_end_write_request;
|
||||
mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
|
||||
mbio->bi_rw =
|
||||
WRITE | do_sync | do_fua | do_discard | do_same;
|
||||
mbio->bi_private = r10_bio;
|
||||
|
||||
atomic_inc(&r10_bio->remaining);
|
||||
@ -1502,7 +1551,8 @@ retry_write:
|
||||
r10_bio, rdev));
|
||||
mbio->bi_bdev = rdev->bdev;
|
||||
mbio->bi_end_io = raid10_end_write_request;
|
||||
mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
|
||||
mbio->bi_rw =
|
||||
WRITE | do_sync | do_fua | do_discard | do_same;
|
||||
mbio->bi_private = r10_bio;
|
||||
|
||||
atomic_inc(&r10_bio->remaining);
|
||||
@ -3436,7 +3486,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
|
||||
disks = mddev->raid_disks + mddev->delta_disks;
|
||||
break;
|
||||
}
|
||||
if (layout >> 17)
|
||||
if (layout >> 18)
|
||||
return -1;
|
||||
if (chunk < (PAGE_SIZE >> 9) ||
|
||||
!is_power_of_2(chunk))
|
||||
@ -3448,6 +3498,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
|
||||
geo->near_copies = nc;
|
||||
geo->far_copies = fc;
|
||||
geo->far_offset = fo;
|
||||
geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
|
||||
geo->chunk_mask = chunk - 1;
|
||||
geo->chunk_shift = ffz(~chunk);
|
||||
return nc*fc;
|
||||
@ -3569,6 +3620,8 @@ static int run(struct mddev *mddev)
|
||||
if (mddev->queue) {
|
||||
blk_queue_max_discard_sectors(mddev->queue,
|
||||
mddev->chunk_sectors);
|
||||
blk_queue_max_write_same_sectors(mddev->queue,
|
||||
mddev->chunk_sectors);
|
||||
blk_queue_io_min(mddev->queue, chunk_size);
|
||||
if (conf->geo.raid_disks % conf->geo.near_copies)
|
||||
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
|
||||
|
@ -33,6 +33,11 @@ struct r10conf {
|
||||
* far_offset, in which case it is
|
||||
* 1 stripe.
|
||||
*/
|
||||
int far_set_size; /* The number of devices in a set,
|
||||
* where a 'set' are devices that
|
||||
* contain far/offset copies of
|
||||
* each other.
|
||||
*/
|
||||
int chunk_shift; /* shift from chunks to sectors */
|
||||
sector_t chunk_mask;
|
||||
} prev, geo;
|
||||
|
@ -1403,7 +1403,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
|
||||
&sh->ops.zero_sum_result, percpu->spare_page, &submit);
|
||||
}
|
||||
|
||||
static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
||||
static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
||||
{
|
||||
int overlap_clear = 0, i, disks = sh->disks;
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
@ -1468,36 +1468,6 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MULTICORE_RAID456
|
||||
static void async_run_ops(void *param, async_cookie_t cookie)
|
||||
{
|
||||
struct stripe_head *sh = param;
|
||||
unsigned long ops_request = sh->ops.request;
|
||||
|
||||
clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
|
||||
wake_up(&sh->ops.wait_for_ops);
|
||||
|
||||
__raid_run_ops(sh, ops_request);
|
||||
release_stripe(sh);
|
||||
}
|
||||
|
||||
static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
||||
{
|
||||
/* since handle_stripe can be called outside of raid5d context
|
||||
* we need to ensure sh->ops.request is de-staged before another
|
||||
* request arrives
|
||||
*/
|
||||
wait_event(sh->ops.wait_for_ops,
|
||||
!test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
|
||||
sh->ops.request = ops_request;
|
||||
|
||||
atomic_inc(&sh->count);
|
||||
async_schedule(async_run_ops, sh);
|
||||
}
|
||||
#else
|
||||
#define raid_run_ops __raid_run_ops
|
||||
#endif
|
||||
|
||||
static int grow_one_stripe(struct r5conf *conf)
|
||||
{
|
||||
struct stripe_head *sh;
|
||||
@ -1506,9 +1476,6 @@ static int grow_one_stripe(struct r5conf *conf)
|
||||
return 0;
|
||||
|
||||
sh->raid_conf = conf;
|
||||
#ifdef CONFIG_MULTICORE_RAID456
|
||||
init_waitqueue_head(&sh->ops.wait_for_ops);
|
||||
#endif
|
||||
|
||||
spin_lock_init(&sh->stripe_lock);
|
||||
|
||||
@ -1627,9 +1594,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
|
||||
break;
|
||||
|
||||
nsh->raid_conf = conf;
|
||||
#ifdef CONFIG_MULTICORE_RAID456
|
||||
init_waitqueue_head(&nsh->ops.wait_for_ops);
|
||||
#endif
|
||||
spin_lock_init(&nsh->stripe_lock);
|
||||
|
||||
list_add(&nsh->lru, &newstripes);
|
||||
|
@ -1629,7 +1629,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
||||
|
||||
/* If this is the first slave, then we need to set the master's hardware
|
||||
* address to be the same as the slave's. */
|
||||
if (bond->dev_addr_from_first)
|
||||
if (bond->slave_cnt == 0 && bond->dev_addr_from_first)
|
||||
bond_set_dev_addr(bond->dev, slave_dev);
|
||||
|
||||
new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
|
||||
|
@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
|
||||
bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
|
||||
ring->start);
|
||||
} else {
|
||||
/* Omit CRC. */
|
||||
len -= ETH_FCS_LEN;
|
||||
|
||||
new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
|
||||
if (new_skb) {
|
||||
skb_put(new_skb, len);
|
||||
skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
|
||||
new_skb->data,
|
||||
len);
|
||||
skb_checksum_none_assert(skb);
|
||||
new_skb->protocol =
|
||||
eth_type_trans(new_skb, bgmac->net_dev);
|
||||
netif_receive_skb(new_skb);
|
||||
|
@ -3142,7 +3142,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
|
||||
tsum = ~csum_fold(csum_add((__force __wsum) csum,
|
||||
csum_partial(t_header, -fix, 0)));
|
||||
|
||||
return bswab16(csum);
|
||||
return bswab16(tsum);
|
||||
}
|
||||
|
||||
static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
|
||||
|
@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
|
||||
if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
|
||||
cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
|
||||
if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
|
||||
cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
|
||||
}
|
||||
|
||||
cmd->maxtxpkt = 0;
|
||||
@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
ADVERTISED_10000baseKR_Full))
|
||||
bp->link_params.speed_cap_mask[cfg_idx] |=
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
|
||||
|
||||
if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
|
||||
bp->link_params.speed_cap_mask[cfg_idx] |=
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
|
||||
}
|
||||
} else { /* forced speed */
|
||||
/* advertise the requested speed and duplex if supported */
|
||||
|
@ -10422,6 +10422,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
|
||||
MDIO_PMA_DEVAD,
|
||||
MDIO_PMA_REG_8481_LED1_MASK,
|
||||
0x0);
|
||||
if (phy->type ==
|
||||
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
|
||||
/* Disable MI_INT interrupt before setting LED4
|
||||
* source to constant off.
|
||||
*/
|
||||
if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
|
||||
params->port*4) &
|
||||
NIG_MASK_MI_INT) {
|
||||
params->link_flags |=
|
||||
LINK_FLAGS_INT_DISABLED;
|
||||
|
||||
bnx2x_bits_dis(
|
||||
bp,
|
||||
NIG_REG_MASK_INTERRUPT_PORT0 +
|
||||
params->port*4,
|
||||
NIG_MASK_MI_INT);
|
||||
}
|
||||
bnx2x_cl45_write(bp, phy,
|
||||
MDIO_PMA_DEVAD,
|
||||
MDIO_PMA_REG_8481_SIGNAL_MASK,
|
||||
0x0);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case LED_MODE_ON:
|
||||
@ -10468,6 +10490,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
|
||||
MDIO_PMA_DEVAD,
|
||||
MDIO_PMA_REG_8481_LED1_MASK,
|
||||
0x20);
|
||||
if (phy->type ==
|
||||
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
|
||||
/* Disable MI_INT interrupt before setting LED4
|
||||
* source to constant on.
|
||||
*/
|
||||
if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
|
||||
params->port*4) &
|
||||
NIG_MASK_MI_INT) {
|
||||
params->link_flags |=
|
||||
LINK_FLAGS_INT_DISABLED;
|
||||
|
||||
bnx2x_bits_dis(
|
||||
bp,
|
||||
NIG_REG_MASK_INTERRUPT_PORT0 +
|
||||
params->port*4,
|
||||
NIG_MASK_MI_INT);
|
||||
}
|
||||
bnx2x_cl45_write(bp, phy,
|
||||
MDIO_PMA_DEVAD,
|
||||
MDIO_PMA_REG_8481_SIGNAL_MASK,
|
||||
0x20);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
@ -10532,6 +10576,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
|
||||
MDIO_PMA_DEVAD,
|
||||
MDIO_PMA_REG_8481_LINK_SIGNAL,
|
||||
val);
|
||||
if (phy->type ==
|
||||
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
|
||||
/* Restore LED4 source to external link,
|
||||
* and re-enable interrupts.
|
||||
*/
|
||||
bnx2x_cl45_write(bp, phy,
|
||||
MDIO_PMA_DEVAD,
|
||||
MDIO_PMA_REG_8481_SIGNAL_MASK,
|
||||
0x40);
|
||||
if (params->link_flags &
|
||||
LINK_FLAGS_INT_DISABLED) {
|
||||
bnx2x_link_int_enable(params);
|
||||
params->link_flags &=
|
||||
~LINK_FLAGS_INT_DISABLED;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -11791,6 +11851,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
|
||||
phy->media_type = ETH_PHY_KR;
|
||||
phy->flags |= FLAGS_WC_DUAL_MODE;
|
||||
phy->supported &= (SUPPORTED_20000baseKR2_Full |
|
||||
SUPPORTED_10000baseT_Full |
|
||||
SUPPORTED_1000baseT_Full |
|
||||
SUPPORTED_Autoneg |
|
||||
SUPPORTED_FIBRE |
|
||||
SUPPORTED_Pause |
|
||||
@ -13437,7 +13499,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
|
||||
struct bnx2x_phy *phy = ¶ms->phy[INT_PHY];
|
||||
bnx2x_set_aer_mmd(params, phy);
|
||||
if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
|
||||
(phy->speed_cap_mask & SPEED_20000))
|
||||
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
|
||||
bnx2x_check_kr2_wa(params, vars, phy);
|
||||
bnx2x_check_over_curr(params, vars);
|
||||
if (vars->rx_tx_asic_rst)
|
||||
|
@ -307,7 +307,8 @@ struct link_params {
|
||||
struct bnx2x *bp;
|
||||
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
|
||||
req_flow_ctrl is set to AUTO */
|
||||
u16 rsrv1;
|
||||
u16 link_flags;
|
||||
#define LINK_FLAGS_INT_DISABLED (1<<0)
|
||||
u32 lfa_base;
|
||||
};
|
||||
|
||||
|
@ -349,6 +349,7 @@ struct be_adapter {
|
||||
struct pci_dev *pdev;
|
||||
struct net_device *netdev;
|
||||
|
||||
u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
|
||||
u8 __iomem *db; /* Door Bell */
|
||||
|
||||
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
|
||||
|
@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
|
||||
static u16 be_POST_stage_get(struct be_adapter *adapter)
|
||||
{
|
||||
u32 sem;
|
||||
u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
|
||||
SLIPORT_SEMAPHORE_OFFSET_BE;
|
||||
|
||||
pci_read_config_dword(adapter->pdev, reg, &sem);
|
||||
*stage = sem & POST_STAGE_MASK;
|
||||
|
||||
if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
|
||||
return -1;
|
||||
if (BEx_chip(adapter))
|
||||
sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
|
||||
else
|
||||
return 0;
|
||||
pci_read_config_dword(adapter->pdev,
|
||||
SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
|
||||
|
||||
return sem & POST_STAGE_MASK;
|
||||
}
|
||||
|
||||
int lancer_wait_ready(struct be_adapter *adapter)
|
||||
@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter)
|
||||
}
|
||||
|
||||
do {
|
||||
status = be_POST_stage_get(adapter, &stage);
|
||||
if (status) {
|
||||
dev_err(dev, "POST error; stage=0x%x\n", stage);
|
||||
return -1;
|
||||
} else if (stage != POST_STAGE_ARMFW_RDY) {
|
||||
if (msleep_interruptible(2000)) {
|
||||
dev_err(dev, "Waiting for POST aborted\n");
|
||||
return -EINTR;
|
||||
}
|
||||
timeout += 2;
|
||||
} else {
|
||||
stage = be_POST_stage_get(adapter);
|
||||
if (stage == POST_STAGE_ARMFW_RDY)
|
||||
return 0;
|
||||
|
||||
dev_info(dev, "Waiting for POST, %ds elapsed\n",
|
||||
timeout);
|
||||
if (msleep_interruptible(2000)) {
|
||||
dev_err(dev, "Waiting for POST aborted\n");
|
||||
return -EINTR;
|
||||
}
|
||||
timeout += 2;
|
||||
} while (timeout < 60);
|
||||
|
||||
dev_err(dev, "POST timeout; stage=0x%x\n", stage);
|
||||
|
@ -32,8 +32,8 @@
|
||||
#define MPU_EP_CONTROL 0
|
||||
|
||||
/********** MPU semphore: used for SH & BE *************/
|
||||
#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c
|
||||
#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94
|
||||
#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
|
||||
#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
|
||||
#define POST_STAGE_MASK 0x0000FFFF
|
||||
#define POST_ERR_MASK 0x1
|
||||
#define POST_ERR_SHIFT 31
|
||||
|
@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev)
|
||||
|
||||
static void be_unmap_pci_bars(struct be_adapter *adapter)
|
||||
{
|
||||
if (adapter->csr)
|
||||
pci_iounmap(adapter->pdev, adapter->csr);
|
||||
if (adapter->db)
|
||||
pci_iounmap(adapter->pdev, adapter->db);
|
||||
}
|
||||
@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
|
||||
adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
|
||||
SLI_INTF_IF_TYPE_SHIFT;
|
||||
|
||||
if (BEx_chip(adapter) && be_physfn(adapter)) {
|
||||
adapter->csr = pci_iomap(adapter->pdev, 2, 0);
|
||||
if (adapter->csr == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
|
||||
if (addr == NULL)
|
||||
goto pci_map_err;
|
||||
@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
|
||||
pci_restore_state(pdev);
|
||||
|
||||
/* Check if card is ok and fw is ready */
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Waiting for FW to be ready after EEH reset\n");
|
||||
status = be_fw_wait_ready(adapter);
|
||||
if (status)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
struct bufdesc *bdp;
|
||||
void *bufaddr;
|
||||
unsigned short status;
|
||||
unsigned long flags;
|
||||
unsigned int index;
|
||||
|
||||
if (!fep->link) {
|
||||
/* Link is down or autonegotiation is in progress. */
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fep->hw_lock, flags);
|
||||
/* Fill in a Tx ring entry */
|
||||
bdp = fep->cur_tx;
|
||||
|
||||
@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
* This should not happen, since ndev->tbusy should be set.
|
||||
*/
|
||||
printk("%s: tx queue full!.\n", ndev->name);
|
||||
spin_unlock_irqrestore(&fep->hw_lock, flags);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
* 4-byte boundaries. Use bounce buffers to copy data
|
||||
* and get it aligned. Ugh.
|
||||
*/
|
||||
if (fep->bufdesc_ex)
|
||||
index = (struct bufdesc_ex *)bdp -
|
||||
(struct bufdesc_ex *)fep->tx_bd_base;
|
||||
else
|
||||
index = bdp - fep->tx_bd_base;
|
||||
|
||||
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
|
||||
unsigned int index;
|
||||
if (fep->bufdesc_ex)
|
||||
index = (struct bufdesc_ex *)bdp -
|
||||
(struct bufdesc_ex *)fep->tx_bd_base;
|
||||
else
|
||||
index = bdp - fep->tx_bd_base;
|
||||
memcpy(fep->tx_bounce[index], skb->data, skb->len);
|
||||
bufaddr = fep->tx_bounce[index];
|
||||
}
|
||||
@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
swap_buffer(bufaddr, skb->len);
|
||||
|
||||
/* Save skb pointer */
|
||||
fep->tx_skbuff[fep->skb_cur] = skb;
|
||||
|
||||
ndev->stats.tx_bytes += skb->len;
|
||||
fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
|
||||
fep->tx_skbuff[index] = skb;
|
||||
|
||||
/* Push the data cache so the CPM does not get stale memory
|
||||
* data.
|
||||
@ -331,25 +326,21 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
ebdp->cbd_esc = BD_ENET_TX_INT;
|
||||
}
|
||||
}
|
||||
/* Trigger transmission start */
|
||||
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
|
||||
|
||||
/* If this was the last BD in the ring, start at the beginning again. */
|
||||
if (status & BD_ENET_TX_WRAP)
|
||||
bdp = fep->tx_bd_base;
|
||||
else
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
|
||||
if (bdp == fep->dirty_tx) {
|
||||
fep->tx_full = 1;
|
||||
netif_stop_queue(ndev);
|
||||
}
|
||||
|
||||
fep->cur_tx = bdp;
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
if (fep->cur_tx == fep->dirty_tx)
|
||||
netif_stop_queue(ndev);
|
||||
|
||||
spin_unlock_irqrestore(&fep->hw_lock, flags);
|
||||
/* Trigger transmission start */
|
||||
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)
|
||||
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
|
||||
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
|
||||
|
||||
fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
|
||||
fep->cur_rx = fep->rx_bd_base;
|
||||
|
||||
/* Reset SKB transmit buffers. */
|
||||
fep->skb_cur = fep->skb_dirty = 0;
|
||||
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
|
||||
if (fep->tx_skbuff[i]) {
|
||||
dev_kfree_skb_any(fep->tx_skbuff[i]);
|
||||
@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)
|
||||
struct bufdesc *bdp;
|
||||
unsigned short status;
|
||||
struct sk_buff *skb;
|
||||
int index = 0;
|
||||
|
||||
fep = netdev_priv(ndev);
|
||||
spin_lock(&fep->hw_lock);
|
||||
bdp = fep->dirty_tx;
|
||||
|
||||
/* get next bdp of dirty_tx */
|
||||
if (bdp->cbd_sc & BD_ENET_TX_WRAP)
|
||||
bdp = fep->tx_bd_base;
|
||||
else
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
|
||||
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
|
||||
if (bdp == fep->cur_tx && fep->tx_full == 0)
|
||||
|
||||
/* current queue is empty */
|
||||
if (bdp == fep->cur_tx)
|
||||
break;
|
||||
|
||||
if (fep->bufdesc_ex)
|
||||
index = (struct bufdesc_ex *)bdp -
|
||||
(struct bufdesc_ex *)fep->tx_bd_base;
|
||||
else
|
||||
index = bdp - fep->tx_bd_base;
|
||||
|
||||
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
|
||||
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
|
||||
bdp->cbd_bufaddr = 0;
|
||||
|
||||
skb = fep->tx_skbuff[fep->skb_dirty];
|
||||
skb = fep->tx_skbuff[index];
|
||||
|
||||
/* Check for errors. */
|
||||
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
|
||||
BD_ENET_TX_RL | BD_ENET_TX_UN |
|
||||
@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)
|
||||
|
||||
/* Free the sk buffer associated with this last transmit */
|
||||
dev_kfree_skb_any(skb);
|
||||
fep->tx_skbuff[fep->skb_dirty] = NULL;
|
||||
fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
|
||||
fep->tx_skbuff[index] = NULL;
|
||||
|
||||
fep->dirty_tx = bdp;
|
||||
|
||||
/* Update pointer to next buffer descriptor to be transmitted */
|
||||
if (status & BD_ENET_TX_WRAP)
|
||||
@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)
|
||||
|
||||
/* Since we have freed up a buffer, the ring is no longer full
|
||||
*/
|
||||
if (fep->tx_full) {
|
||||
fep->tx_full = 0;
|
||||
if (fep->dirty_tx != fep->cur_tx) {
|
||||
if (netif_queue_stopped(ndev))
|
||||
netif_wake_queue(ndev);
|
||||
}
|
||||
}
|
||||
fep->dirty_tx = bdp;
|
||||
spin_unlock(&fep->hw_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)
|
||||
int_events = readl(fep->hwp + FEC_IEVENT);
|
||||
writel(int_events, fep->hwp + FEC_IEVENT);
|
||||
|
||||
if (int_events & FEC_ENET_RXF) {
|
||||
if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
/* Disable the RX interrupt */
|
||||
@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)
|
||||
}
|
||||
}
|
||||
|
||||
/* Transmit OK, or non-fatal error. Update the buffer
|
||||
* descriptors. FEC handles all errors, we just discover
|
||||
* them as part of the transmit process.
|
||||
*/
|
||||
if (int_events & FEC_ENET_TXF) {
|
||||
ret = IRQ_HANDLED;
|
||||
fec_enet_tx(ndev);
|
||||
}
|
||||
|
||||
if (int_events & FEC_ENET_MII) {
|
||||
ret = IRQ_HANDLED;
|
||||
complete(&fep->mdio_done);
|
||||
@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
|
||||
int pkts = fec_enet_rx(ndev, budget);
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
|
||||
fec_enet_tx(ndev);
|
||||
|
||||
if (pkts < budget) {
|
||||
napi_complete(napi);
|
||||
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
|
||||
@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev)
|
||||
|
||||
/* ...and the same for transmit */
|
||||
bdp = fep->tx_bd_base;
|
||||
fep->cur_tx = bdp;
|
||||
for (i = 0; i < TX_RING_SIZE; i++) {
|
||||
|
||||
/* Initialize the BD for every fragment in the page. */
|
||||
@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev)
|
||||
/* Set the last buffer to wrap */
|
||||
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
|
||||
bdp->cbd_sc |= BD_SC_WRAP;
|
||||
fep->dirty_tx = bdp;
|
||||
|
||||
fec_restart(ndev, 0);
|
||||
|
||||
|
@ -97,6 +97,13 @@ struct bufdesc {
|
||||
unsigned short cbd_sc; /* Control and status info */
|
||||
unsigned long cbd_bufaddr; /* Buffer address */
|
||||
};
|
||||
#else
|
||||
struct bufdesc {
|
||||
unsigned short cbd_sc; /* Control and status info */
|
||||
unsigned short cbd_datlen; /* Data length */
|
||||
unsigned long cbd_bufaddr; /* Buffer address */
|
||||
};
|
||||
#endif
|
||||
|
||||
struct bufdesc_ex {
|
||||
struct bufdesc desc;
|
||||
@ -107,14 +114,6 @@ struct bufdesc_ex {
|
||||
unsigned short res0[4];
|
||||
};
|
||||
|
||||
#else
|
||||
struct bufdesc {
|
||||
unsigned short cbd_sc; /* Control and status info */
|
||||
unsigned short cbd_datlen; /* Data length */
|
||||
unsigned long cbd_bufaddr; /* Buffer address */
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The following definitions courtesy of commproc.h, which where
|
||||
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
|
||||
@ -214,8 +213,6 @@ struct fec_enet_private {
|
||||
unsigned char *tx_bounce[TX_RING_SIZE];
|
||||
struct sk_buff *tx_skbuff[TX_RING_SIZE];
|
||||
struct sk_buff *rx_skbuff[RX_RING_SIZE];
|
||||
ushort skb_cur;
|
||||
ushort skb_dirty;
|
||||
|
||||
/* CPM dual port RAM relative addresses */
|
||||
dma_addr_t bd_dma;
|
||||
@ -227,7 +224,6 @@ struct fec_enet_private {
|
||||
/* The ring entries to be free()ed */
|
||||
struct bufdesc *dirty_tx;
|
||||
|
||||
uint tx_full;
|
||||
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
|
||||
spinlock_t hw_lock;
|
||||
|
||||
|
@ -781,6 +781,59 @@ release:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
|
||||
* @hw: pointer to the HW structure
|
||||
* @link: link up bool flag
|
||||
*
|
||||
* When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
|
||||
* preventing further DMA write requests. Workaround the issue by disabling
|
||||
* the de-assertion of the clock request when in 1Gpbs mode.
|
||||
**/
|
||||
static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
|
||||
{
|
||||
u32 fextnvm6 = er32(FEXTNVM6);
|
||||
s32 ret_val = 0;
|
||||
|
||||
if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
|
||||
u16 kmrn_reg;
|
||||
|
||||
ret_val = hw->phy.ops.acquire(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
ret_val =
|
||||
e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
|
||||
&kmrn_reg);
|
||||
if (ret_val)
|
||||
goto release;
|
||||
|
||||
ret_val =
|
||||
e1000e_write_kmrn_reg_locked(hw,
|
||||
E1000_KMRNCTRLSTA_K1_CONFIG,
|
||||
kmrn_reg &
|
||||
~E1000_KMRNCTRLSTA_K1_ENABLE);
|
||||
if (ret_val)
|
||||
goto release;
|
||||
|
||||
usleep_range(10, 20);
|
||||
|
||||
ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
|
||||
|
||||
ret_val =
|
||||
e1000e_write_kmrn_reg_locked(hw,
|
||||
E1000_KMRNCTRLSTA_K1_CONFIG,
|
||||
kmrn_reg);
|
||||
release:
|
||||
hw->phy.ops.release(hw);
|
||||
} else {
|
||||
/* clear FEXTNVM6 bit 8 on link down or 10/100 */
|
||||
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
|
||||
* @hw: pointer to the HW structure
|
||||
@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/* Work-around I218 hang issue */
|
||||
if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
|
||||
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
|
||||
ret_val = e1000_k1_workaround_lpt_lp(hw, link);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/* Clear link partner's EEE ability */
|
||||
hw->dev_spec.ich8lan.eee_lp_ability = 0;
|
||||
|
||||
@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
|
||||
|
||||
phy_ctrl = er32(PHY_CTRL);
|
||||
phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
|
||||
|
||||
if (hw->phy.type == e1000_phy_i217) {
|
||||
u16 phy_reg;
|
||||
u16 phy_reg, device_id = hw->adapter->pdev->device;
|
||||
|
||||
if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
|
||||
(device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
|
||||
u32 fextnvm6 = er32(FEXTNVM6);
|
||||
|
||||
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
|
||||
}
|
||||
|
||||
ret_val = hw->phy.ops.acquire(hw);
|
||||
if (ret_val)
|
||||
|
@ -92,6 +92,8 @@
|
||||
#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
|
||||
#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
|
||||
|
||||
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
|
||||
|
||||
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
|
||||
|
||||
#define E1000_ICH_RAR_ENTRIES 7
|
||||
|
@ -42,6 +42,7 @@
|
||||
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
|
||||
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
|
||||
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
|
||||
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
|
||||
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
|
||||
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
|
||||
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
|
||||
|
@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
|
||||
switch (hw->phy.type) {
|
||||
case e1000_phy_i210:
|
||||
case e1000_phy_m88:
|
||||
if (hw->phy.id == I347AT4_E_PHY_ID ||
|
||||
hw->phy.id == M88E1112_E_PHY_ID)
|
||||
switch (hw->phy.id) {
|
||||
case I347AT4_E_PHY_ID:
|
||||
case M88E1112_E_PHY_ID:
|
||||
case I210_I_PHY_ID:
|
||||
ret_val = igb_copper_link_setup_m88_gen2(hw);
|
||||
else
|
||||
break;
|
||||
default:
|
||||
ret_val = igb_copper_link_setup_m88(hw);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case e1000_phy_igp_3:
|
||||
ret_val = igb_copper_link_setup_igp(hw);
|
||||
|
@ -447,7 +447,7 @@ struct igb_adapter {
|
||||
#endif
|
||||
struct i2c_algo_bit_data i2c_algo;
|
||||
struct i2c_adapter i2c_adap;
|
||||
struct igb_i2c_client_list *i2c_clients;
|
||||
struct i2c_client *i2c_client;
|
||||
};
|
||||
|
||||
#define IGB_FLAG_HAS_MSI (1 << 0)
|
||||
|
@ -39,6 +39,10 @@
|
||||
#include <linux/pci.h>
|
||||
|
||||
#ifdef CONFIG_IGB_HWMON
|
||||
struct i2c_board_info i350_sensor_info = {
|
||||
I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
|
||||
};
|
||||
|
||||
/* hwmon callback functions */
|
||||
static ssize_t igb_hwmon_show_location(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
|
||||
unsigned int i;
|
||||
int n_attrs;
|
||||
int rc = 0;
|
||||
struct i2c_client *client = NULL;
|
||||
|
||||
/* If this method isn't defined we don't support thermals */
|
||||
if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
|
||||
@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter)
|
||||
if (rc)
|
||||
goto exit;
|
||||
|
||||
/* init i2c_client */
|
||||
client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
|
||||
if (client == NULL) {
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Failed to create new i2c device..\n");
|
||||
goto exit;
|
||||
}
|
||||
adapter->i2c_client = client;
|
||||
|
||||
/* Allocation space for max attributes
|
||||
* max num sensors * values (loc, temp, max, caution)
|
||||
*/
|
||||
|
@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
|
||||
return;
|
||||
}
|
||||
|
||||
static const struct i2c_board_info i350_sensor_info = {
|
||||
I2C_BOARD_INFO("i350bb", 0Xf8),
|
||||
};
|
||||
|
||||
/* igb_init_i2c - Init I2C interface
|
||||
* @adapter: pointer to adapter structure
|
||||
*
|
||||
@ -6227,13 +6223,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
|
||||
/* If we spanned a buffer we have a huge mess so test for it */
|
||||
BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
|
||||
|
||||
/* Guarantee this function can be used by verifying buffer sizes */
|
||||
BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
|
||||
NET_IP_ALIGN +
|
||||
IGB_TS_HDR_LEN +
|
||||
ETH_FRAME_LEN +
|
||||
ETH_FCS_LEN));
|
||||
|
||||
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
||||
page = rx_buffer->page;
|
||||
prefetchw(page);
|
||||
@ -7724,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
|
||||
}
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(i2c_clients_lock);
|
||||
|
||||
/* igb_get_i2c_client - returns matching client
|
||||
* in adapters's client list.
|
||||
* @adapter: adapter struct
|
||||
* @dev_addr: device address of i2c needed.
|
||||
*/
|
||||
static struct i2c_client *
|
||||
igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
|
||||
{
|
||||
ulong flags;
|
||||
struct igb_i2c_client_list *client_list;
|
||||
struct i2c_client *client = NULL;
|
||||
struct i2c_board_info client_info = {
|
||||
I2C_BOARD_INFO("igb", 0x00),
|
||||
};
|
||||
|
||||
spin_lock_irqsave(&i2c_clients_lock, flags);
|
||||
client_list = adapter->i2c_clients;
|
||||
|
||||
/* See if we already have an i2c_client */
|
||||
while (client_list) {
|
||||
if (client_list->client->addr == (dev_addr >> 1)) {
|
||||
client = client_list->client;
|
||||
goto exit;
|
||||
} else {
|
||||
client_list = client_list->next;
|
||||
}
|
||||
}
|
||||
|
||||
/* no client_list found, create a new one */
|
||||
client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
|
||||
if (client_list == NULL)
|
||||
goto exit;
|
||||
|
||||
/* dev_addr passed to us is left-shifted by 1 bit
|
||||
* i2c_new_device call expects it to be flush to the right.
|
||||
*/
|
||||
client_info.addr = dev_addr >> 1;
|
||||
client_info.platform_data = adapter;
|
||||
client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
|
||||
if (client_list->client == NULL) {
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Failed to create new i2c device..\n");
|
||||
goto err_no_client;
|
||||
}
|
||||
|
||||
/* insert new client at head of list */
|
||||
client_list->next = adapter->i2c_clients;
|
||||
adapter->i2c_clients = client_list;
|
||||
|
||||
client = client_list->client;
|
||||
goto exit;
|
||||
|
||||
err_no_client:
|
||||
kfree(client_list);
|
||||
exit:
|
||||
spin_unlock_irqrestore(&i2c_clients_lock, flags);
|
||||
return client;
|
||||
}
|
||||
|
||||
/* igb_read_i2c_byte - Reads 8 bit word over I2C
|
||||
* @hw: pointer to hardware structure
|
||||
* @byte_offset: byte offset to read
|
||||
@ -7798,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
|
||||
u8 dev_addr, u8 *data)
|
||||
{
|
||||
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
|
||||
struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
|
||||
struct i2c_client *this_client = adapter->i2c_client;
|
||||
s32 status;
|
||||
u16 swfw_mask = 0;
|
||||
|
||||
@ -7835,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
|
||||
u8 dev_addr, u8 data)
|
||||
{
|
||||
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
|
||||
struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
|
||||
struct i2c_client *this_client = adapter->i2c_client;
|
||||
s32 status;
|
||||
u16 swfw_mask = E1000_SWFW_PHY0_SM;
|
||||
|
||||
|
@ -4765,8 +4765,10 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
|
||||
|
||||
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
|
||||
|
||||
rtl_tx_performance_tweak(pdev,
|
||||
(0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN) {
|
||||
rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
|
||||
PCI_EXP_DEVCTL_NOSNOOP_EN);
|
||||
}
|
||||
}
|
||||
|
||||
static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
|
||||
@ -4789,7 +4791,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
|
||||
|
||||
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
rtl_disable_clock_request(pdev);
|
||||
|
||||
@ -4822,7 +4825,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
|
||||
|
||||
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
|
||||
}
|
||||
@ -4841,7 +4845,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
|
||||
|
||||
RTL_W8(MaxTxPacketSize, TxPacketMax);
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
|
||||
}
|
||||
@ -4901,7 +4906,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
|
||||
|
||||
RTL_W8(MaxTxPacketSize, TxPacketMax);
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
|
||||
}
|
||||
@ -4913,7 +4919,8 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
|
||||
|
||||
rtl_csi_access_enable_1(tp);
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
RTL_W8(MaxTxPacketSize, TxPacketMax);
|
||||
|
||||
@ -4972,7 +4979,8 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
|
||||
|
||||
rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
RTL_W8(MaxTxPacketSize, TxPacketMax);
|
||||
|
||||
@ -4998,7 +5006,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
|
||||
|
||||
rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
|
||||
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
|
||||
|
@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
|
||||
tx_queue->txd.entries);
|
||||
}
|
||||
|
||||
efx_device_detach_sync(efx);
|
||||
efx_stop_all(efx);
|
||||
efx_stop_interrupts(efx, true);
|
||||
|
||||
@ -832,6 +833,7 @@ out:
|
||||
|
||||
efx_start_interrupts(efx, true);
|
||||
efx_start_all(efx);
|
||||
netif_device_attach(efx->net_dev);
|
||||
return rc;
|
||||
|
||||
rollback:
|
||||
@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
|
||||
/* Flush efx_mac_work(), refill_workqueue, monitor_work */
|
||||
efx_flush_all(efx);
|
||||
|
||||
/* Stop the kernel transmit interface late, so the watchdog
|
||||
* timer isn't ticking over the flush */
|
||||
/* Stop the kernel transmit interface. This is only valid if
|
||||
* the device is stopped or detached; otherwise the watchdog
|
||||
* may fire immediately.
|
||||
*/
|
||||
WARN_ON(netif_running(efx->net_dev) &&
|
||||
netif_device_present(efx->net_dev));
|
||||
netif_tx_disable(efx->net_dev);
|
||||
|
||||
efx_stop_datapath(efx);
|
||||
@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
|
||||
if (new_mtu > EFX_MAX_MTU)
|
||||
return -EINVAL;
|
||||
|
||||
efx_stop_all(efx);
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
|
||||
|
||||
efx_device_detach_sync(efx);
|
||||
efx_stop_all(efx);
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
net_dev->mtu = new_mtu;
|
||||
efx->type->reconfigure_mac(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
efx_start_all(efx);
|
||||
netif_device_attach(efx->net_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
|
||||
* TX scheduler is stopped when we're done and before
|
||||
* netif_device_present() becomes false.
|
||||
*/
|
||||
netif_tx_lock(dev);
|
||||
netif_tx_lock_bh(dev);
|
||||
netif_device_detach(dev);
|
||||
netif_tx_unlock(dev);
|
||||
netif_tx_unlock_bh(dev);
|
||||
}
|
||||
|
||||
#endif /* EFX_EFX_H */
|
||||
|
@ -210,6 +210,7 @@ struct efx_tx_queue {
|
||||
* Will be %NULL if the buffer slot is currently free.
|
||||
* @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
|
||||
* Will be %NULL if the buffer slot is currently free.
|
||||
* @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
|
||||
* @len: Buffer length, in bytes.
|
||||
* @flags: Flags for buffer and packet state.
|
||||
*/
|
||||
@ -219,7 +220,8 @@ struct efx_rx_buffer {
|
||||
struct sk_buff *skb;
|
||||
struct page *page;
|
||||
} u;
|
||||
unsigned int len;
|
||||
u16 page_offset;
|
||||
u16 len;
|
||||
u16 flags;
|
||||
};
|
||||
#define EFX_RX_BUF_PAGE 0x0001
|
||||
|
@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
|
||||
static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *buf)
|
||||
{
|
||||
/* Offset is always within one page, so we don't need to consider
|
||||
* the page order.
|
||||
*/
|
||||
return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
|
||||
efx->type->rx_buffer_hash_size;
|
||||
return buf->page_offset + efx->type->rx_buffer_hash_size;
|
||||
}
|
||||
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
|
||||
{
|
||||
@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
struct page *page;
|
||||
unsigned int page_offset;
|
||||
struct efx_rx_page_state *state;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned index, count;
|
||||
@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
||||
state->dma_addr = dma_addr;
|
||||
|
||||
dma_addr += sizeof(struct efx_rx_page_state);
|
||||
page_offset = sizeof(struct efx_rx_page_state);
|
||||
|
||||
split:
|
||||
index = rx_queue->added_count & rx_queue->ptr_mask;
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
|
||||
rx_buf->u.page = page;
|
||||
rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
|
||||
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
|
||||
rx_buf->flags = EFX_RX_BUF_PAGE;
|
||||
++rx_queue->added_count;
|
||||
@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
||||
/* Use the second half of the page */
|
||||
get_page(page);
|
||||
dma_addr += (PAGE_SIZE >> 1);
|
||||
page_offset += (PAGE_SIZE >> 1);
|
||||
++count;
|
||||
goto split;
|
||||
}
|
||||
@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
||||
}
|
||||
|
||||
static void efx_unmap_rx_buffer(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int used_len)
|
||||
{
|
||||
if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
|
||||
struct efx_rx_page_state *state;
|
||||
@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
|
||||
state->dma_addr,
|
||||
efx_rx_buf_size(efx),
|
||||
DMA_FROM_DEVICE);
|
||||
} else if (used_len) {
|
||||
dma_sync_single_for_cpu(&efx->pci_dev->dev,
|
||||
rx_buf->dma_addr, used_len,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
|
||||
dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
|
||||
@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
|
||||
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
|
||||
efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
|
||||
efx_free_rx_buffer(rx_queue->efx, rx_buf);
|
||||
}
|
||||
|
||||
@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Release card resources - assumes all RX buffers consumed in-order
|
||||
* per RX queue
|
||||
/* Release and/or sync DMA mapping - assumes all RX buffers
|
||||
* consumed in-order per RX queue
|
||||
*/
|
||||
efx_unmap_rx_buffer(efx, rx_buf);
|
||||
efx_unmap_rx_buffer(efx, rx_buf, len);
|
||||
|
||||
/* Prefetch nice and early so data will (hopefully) be in cache by
|
||||
* the time we look at it.
|
||||
|
@ -731,7 +731,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
|
||||
|
||||
writel(vlan, &priv->host_port_regs->port_vlan);
|
||||
|
||||
for (i = 0; i < 2; i++)
|
||||
for (i = 0; i < priv->data.slaves; i++)
|
||||
slave_write(priv->slaves + i, vlan, reg);
|
||||
|
||||
cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
|
||||
|
@ -257,8 +257,7 @@ static struct phy_driver ksphy_driver[] = {
|
||||
.phy_id = PHY_ID_KSZ9021,
|
||||
.phy_id_mask = 0x000ffffe,
|
||||
.name = "Micrel KSZ9021 Gigabit PHY",
|
||||
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
|
||||
| SUPPORTED_Asym_Pause),
|
||||
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
|
||||
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
|
||||
.config_init = kszphy_config_init,
|
||||
.config_aneg = genphy_config_aneg,
|
||||
|
@ -44,13 +44,13 @@ MODULE_LICENSE("GPL");
|
||||
|
||||
void phy_device_free(struct phy_device *phydev)
|
||||
{
|
||||
kfree(phydev);
|
||||
put_device(&phydev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(phy_device_free);
|
||||
|
||||
static void phy_device_release(struct device *dev)
|
||||
{
|
||||
phy_device_free(to_phy_device(dev));
|
||||
kfree(to_phy_device(dev));
|
||||
}
|
||||
|
||||
static struct phy_driver genphy_driver;
|
||||
@ -201,6 +201,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
|
||||
there's no driver _already_ loaded. */
|
||||
request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
|
||||
|
||||
device_initialize(&dev->dev);
|
||||
|
||||
return dev;
|
||||
}
|
||||
EXPORT_SYMBOL(phy_device_create);
|
||||
@ -363,9 +365,9 @@ int phy_device_register(struct phy_device *phydev)
|
||||
/* Run all of the fixups for this PHY */
|
||||
phy_scan_fixups(phydev);
|
||||
|
||||
err = device_register(&phydev->dev);
|
||||
err = device_add(&phydev->dev);
|
||||
if (err) {
|
||||
pr_err("phy %d failed to register\n", phydev->addr);
|
||||
pr_err("PHY %d failed to add\n", phydev->addr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -156,6 +156,24 @@ config USB_NET_AX8817X
|
||||
This driver creates an interface named "ethX", where X depends on
|
||||
what other networking devices you have in use.
|
||||
|
||||
config USB_NET_AX88179_178A
|
||||
tristate "ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet"
|
||||
depends on USB_USBNET
|
||||
select CRC32
|
||||
select PHYLIB
|
||||
default y
|
||||
help
|
||||
This option adds support for ASIX AX88179 based USB 3.0/2.0
|
||||
to Gigabit Ethernet adapters.
|
||||
|
||||
This driver should work with at least the following devices:
|
||||
* ASIX AX88179
|
||||
* ASIX AX88178A
|
||||
* Sitcomm LN-032
|
||||
|
||||
This driver creates an interface named "ethX", where X depends on
|
||||
what other networking devices you have in use.
|
||||
|
||||
config USB_NET_CDCETHER
|
||||
tristate "CDC Ethernet support (smart devices such as cable modems)"
|
||||
depends on USB_USBNET
|
||||
|
@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150) += rtl8150.o
|
||||
obj-$(CONFIG_USB_HSO) += hso.o
|
||||
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
|
||||
asix-y := asix_devices.o asix_common.o ax88172a.o
|
||||
obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
|
||||
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
|
||||
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
|
||||
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
|
||||
|
@ -924,6 +924,29 @@ static const struct driver_info ax88178_info = {
|
||||
.tx_fixup = asix_tx_fixup,
|
||||
};
|
||||
|
||||
/*
|
||||
* USBLINK 20F9 "USB 2.0 LAN" USB ethernet adapter, typically found in
|
||||
* no-name packaging.
|
||||
* USB device strings are:
|
||||
* 1: Manufacturer: USBLINK
|
||||
* 2: Product: HG20F9 USB2.0
|
||||
* 3: Serial: 000003
|
||||
* Appears to be compatible with Asix 88772B.
|
||||
*/
|
||||
static const struct driver_info hg20f9_info = {
|
||||
.description = "HG20F9 USB 2.0 Ethernet",
|
||||
.bind = ax88772_bind,
|
||||
.unbind = ax88772_unbind,
|
||||
.status = asix_status,
|
||||
.link_reset = ax88772_link_reset,
|
||||
.reset = ax88772_reset,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
|
||||
FLAG_MULTI_PACKET,
|
||||
.rx_fixup = asix_rx_fixup_common,
|
||||
.tx_fixup = asix_tx_fixup,
|
||||
.data = FLAG_EEPROM_MAC,
|
||||
};
|
||||
|
||||
extern const struct driver_info ax88172a_info;
|
||||
|
||||
static const struct usb_device_id products [] = {
|
||||
@ -1063,6 +1086,14 @@ static const struct usb_device_id products [] = {
|
||||
/* ASIX 88172a demo board */
|
||||
USB_DEVICE(0x0b95, 0x172a),
|
||||
.driver_info = (unsigned long) &ax88172a_info,
|
||||
}, {
|
||||
/*
|
||||
* USBLINK HG20F9 "USB 2.0 LAN"
|
||||
* Appears to have gazumped Linksys's manufacturer ID but
|
||||
* doesn't (yet) conflict with any known Linksys product.
|
||||
*/
|
||||
USB_DEVICE(0x066b, 0x20f9),
|
||||
.driver_info = (unsigned long) &hg20f9_info,
|
||||
},
|
||||
{ }, // END
|
||||
};
|
||||
|
1448
drivers/net/usb/ax88179_178a.c
Normal file
1448
drivers/net/usb/ax88179_178a.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -1213,6 +1213,14 @@ static const struct usb_device_id cdc_devs[] = {
|
||||
.driver_info = (unsigned long) &wwan_info,
|
||||
},
|
||||
|
||||
/* tag Huawei devices as wwan */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1,
|
||||
USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_NCM,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
},
|
||||
|
||||
/* Huawei NCM devices disguised as vendor specific */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
|
@ -961,6 +961,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
|
||||
tunnel_ip_select_ident(skb, old_iph, &rt->dst);
|
||||
|
||||
nf_reset(skb);
|
||||
|
||||
vxlan_set_owner(dev, skb);
|
||||
|
||||
/* See iptunnel_xmit() */
|
||||
|
@ -1523,7 +1523,8 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah);
|
||||
/* EEPROM access functions */
|
||||
int ath5k_eeprom_init(struct ath5k_hw *ah);
|
||||
void ath5k_eeprom_detach(struct ath5k_hw *ah);
|
||||
|
||||
int ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
|
||||
struct ieee80211_channel *channel);
|
||||
|
||||
/* Protocol Control Unit Functions */
|
||||
/* Helpers */
|
||||
|
@ -1779,7 +1779,8 @@ ath5k_eeprom_detach(struct ath5k_hw *ah)
|
||||
}
|
||||
|
||||
int
|
||||
ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel)
|
||||
ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
|
||||
struct ieee80211_channel *channel)
|
||||
{
|
||||
switch (channel->hw_value) {
|
||||
case AR5K_MODE_11A:
|
||||
@ -1789,6 +1790,7 @@ ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel)
|
||||
case AR5K_MODE_11B:
|
||||
return AR5K_EEPROM_MODE_11B;
|
||||
default:
|
||||
return -1;
|
||||
ATH5K_WARN(ah, "channel is not A/B/G!");
|
||||
return AR5K_EEPROM_MODE_11A;
|
||||
}
|
||||
}
|
||||
|
@ -493,6 +493,3 @@ struct ath5k_eeprom_info {
|
||||
/* Antenna raw switch tables */
|
||||
u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
|
||||
};
|
||||
|
||||
int
|
||||
ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel);
|
||||
|
@ -1612,11 +1612,7 @@ ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
|
||||
|
||||
ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
|
||||
|
||||
ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
|
||||
if (WARN_ON(ee_mode < 0)) {
|
||||
ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
|
||||
return;
|
||||
}
|
||||
ee_mode = ath5k_eeprom_mode_from_channel(ah, ah->ah_current_channel);
|
||||
|
||||
/* completed NF calibration, test threshold */
|
||||
nf = ath5k_hw_read_measured_noise_floor(ah);
|
||||
@ -2317,12 +2313,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
|
||||
|
||||
def_ant = ah->ah_def_ant;
|
||||
|
||||
ee_mode = ath5k_eeprom_mode_from_channel(channel);
|
||||
if (ee_mode < 0) {
|
||||
ATH5K_ERR(ah,
|
||||
"invalid channel: %d\n", channel->center_freq);
|
||||
return;
|
||||
}
|
||||
ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
|
||||
|
||||
switch (ant_mode) {
|
||||
case AR5K_ANTMODE_DEFAULT:
|
||||
@ -3622,12 +3613,7 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ee_mode = ath5k_eeprom_mode_from_channel(channel);
|
||||
if (ee_mode < 0) {
|
||||
ATH5K_ERR(ah,
|
||||
"invalid channel: %d\n", channel->center_freq);
|
||||
return -EINVAL;
|
||||
}
|
||||
ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
|
||||
|
||||
/* Initialize TX power table */
|
||||
switch (ah->ah_radio) {
|
||||
|
@ -984,9 +984,7 @@ ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
|
||||
if (ah->ah_version == AR5K_AR5210)
|
||||
return;
|
||||
|
||||
ee_mode = ath5k_eeprom_mode_from_channel(channel);
|
||||
if (WARN_ON(ee_mode < 0))
|
||||
return;
|
||||
ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
|
||||
|
||||
/* Adjust power delta for channel 14 */
|
||||
if (channel->center_freq == 2484)
|
||||
|
@ -30,6 +30,15 @@ config ATH6KL_DEBUG
|
||||
---help---
|
||||
Enables debug support
|
||||
|
||||
config ATH6KL_TRACING
|
||||
bool "Atheros ath6kl tracing support"
|
||||
depends on ATH6KL
|
||||
depends on EVENT_TRACING
|
||||
---help---
|
||||
Select this to ath6kl use tracing infrastructure.
|
||||
|
||||
If unsure, say Y to make it easier to debug problems.
|
||||
|
||||
config ATH6KL_REGDOMAIN
|
||||
bool "Atheros ath6kl regdomain support"
|
||||
depends on ATH6KL
|
||||
|
@ -35,10 +35,15 @@ ath6kl_core-y += txrx.o
|
||||
ath6kl_core-y += wmi.o
|
||||
ath6kl_core-y += core.o
|
||||
ath6kl_core-y += recovery.o
|
||||
|
||||
ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
|
||||
ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o
|
||||
|
||||
obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o
|
||||
ath6kl_sdio-y += sdio.o
|
||||
|
||||
obj-$(CONFIG_ATH6KL_USB) += ath6kl_usb.o
|
||||
ath6kl_usb-y += usb.o
|
||||
|
||||
# for tracing framework to find trace.h
|
||||
CFLAGS_trace.o := -I$(src)
|
||||
|
@ -402,7 +402,7 @@ static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
|
||||
if (type == NL80211_IFTYPE_STATION ||
|
||||
type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) {
|
||||
for (i = 0; i < ar->vif_max; i++) {
|
||||
if ((ar->avail_idx_map >> i) & BIT(0)) {
|
||||
if ((ar->avail_idx_map) & BIT(i)) {
|
||||
*if_idx = i;
|
||||
return true;
|
||||
}
|
||||
@ -412,7 +412,7 @@ static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
|
||||
if (type == NL80211_IFTYPE_P2P_CLIENT ||
|
||||
type == NL80211_IFTYPE_P2P_GO) {
|
||||
for (i = ar->max_norm_iface; i < ar->vif_max; i++) {
|
||||
if ((ar->avail_idx_map >> i) & BIT(0)) {
|
||||
if ((ar->avail_idx_map) & BIT(i)) {
|
||||
*if_idx = i;
|
||||
return true;
|
||||
}
|
||||
@ -1535,7 +1535,9 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
|
||||
|
||||
ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
|
||||
|
||||
rtnl_lock();
|
||||
ath6kl_cfg80211_vif_cleanup(vif);
|
||||
rtnl_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2990,13 +2992,15 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
|
||||
{
|
||||
struct ath6kl *ar = ath6kl_priv(dev);
|
||||
struct ath6kl_vif *vif = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
if (vif->nw_type != AP_NETWORK)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Use this only for authorizing/unauthorizing a station */
|
||||
if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
|
||||
return -EOPNOTSUPP;
|
||||
err = cfg80211_check_station_change(wiphy, params,
|
||||
CFG80211_STA_AP_MLME_CLIENT);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
|
||||
return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
|
||||
@ -3659,7 +3663,6 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
|
||||
vif->sme_state = SME_DISCONNECTED;
|
||||
set_bit(WLAN_ENABLED, &vif->flags);
|
||||
ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
|
||||
set_bit(NETDEV_REGISTERED, &vif->flags);
|
||||
|
||||
if (type == NL80211_IFTYPE_ADHOC)
|
||||
ar->ibss_if_active = true;
|
||||
|
@ -560,7 +560,6 @@ enum ath6kl_vif_state {
|
||||
WMM_ENABLED,
|
||||
NETQ_STOPPED,
|
||||
DTIM_EXPIRED,
|
||||
NETDEV_REGISTERED,
|
||||
CLEAR_BSSFILTER_ON_BEACON,
|
||||
DTIM_PERIOD_AVAIL,
|
||||
WLAN_ENABLED,
|
||||
@ -936,8 +935,6 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
|
||||
u8 win_sz);
|
||||
void ath6kl_wakeup_event(void *dev);
|
||||
|
||||
void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
|
||||
bool wait_fot_compltn, bool cold_reset);
|
||||
void ath6kl_init_control_info(struct ath6kl_vif *vif);
|
||||
struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
|
||||
void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready);
|
||||
|
@ -56,6 +56,60 @@ int ath6kl_printk(const char *level, const char *fmt, ...)
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_printk);
|
||||
|
||||
int ath6kl_info(const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf = {
|
||||
.fmt = fmt,
|
||||
};
|
||||
va_list args;
|
||||
int ret;
|
||||
|
||||
va_start(args, fmt);
|
||||
vaf.va = &args;
|
||||
ret = ath6kl_printk(KERN_INFO, "%pV", &vaf);
|
||||
trace_ath6kl_log_info(&vaf);
|
||||
va_end(args);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_info);
|
||||
|
||||
int ath6kl_err(const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf = {
|
||||
.fmt = fmt,
|
||||
};
|
||||
va_list args;
|
||||
int ret;
|
||||
|
||||
va_start(args, fmt);
|
||||
vaf.va = &args;
|
||||
ret = ath6kl_printk(KERN_ERR, "%pV", &vaf);
|
||||
trace_ath6kl_log_err(&vaf);
|
||||
va_end(args);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_err);
|
||||
|
||||
int ath6kl_warn(const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf = {
|
||||
.fmt = fmt,
|
||||
};
|
||||
va_list args;
|
||||
int ret;
|
||||
|
||||
va_start(args, fmt);
|
||||
vaf.va = &args;
|
||||
ret = ath6kl_printk(KERN_WARNING, "%pV", &vaf);
|
||||
trace_ath6kl_log_warn(&vaf);
|
||||
va_end(args);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_warn);
|
||||
|
||||
#ifdef CONFIG_ATH6KL_DEBUG
|
||||
|
||||
void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
|
||||
@ -63,15 +117,15 @@ void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
if (!(debug_mask & mask))
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
ath6kl_printk(KERN_DEBUG, "%pV", &vaf);
|
||||
if (debug_mask & mask)
|
||||
ath6kl_printk(KERN_DEBUG, "%pV", &vaf);
|
||||
|
||||
trace_ath6kl_log_dbg(mask, &vaf);
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
@ -87,6 +141,10 @@ void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
|
||||
|
||||
print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
|
||||
}
|
||||
|
||||
/* tracing code doesn't like null strings :/ */
|
||||
trace_ath6kl_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
|
||||
buf, len);
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_dbg_dump);
|
||||
|
||||
@ -1752,8 +1810,10 @@ int ath6kl_debug_init_fs(struct ath6kl *ar)
|
||||
debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar,
|
||||
&fops_tgt_stats);
|
||||
|
||||
debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar,
|
||||
&fops_credit_dist_stats);
|
||||
if (ar->hif_type == ATH6KL_HIF_TYPE_SDIO)
|
||||
debugfs_create_file("credit_dist_stats", S_IRUSR,
|
||||
ar->debugfs_phy, ar,
|
||||
&fops_credit_dist_stats);
|
||||
|
||||
debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR,
|
||||
ar->debugfs_phy, ar, &fops_endpoint_stats);
|
||||
|
@ -19,6 +19,7 @@
|
||||
#define DEBUG_H
|
||||
|
||||
#include "hif.h"
|
||||
#include "trace.h"
|
||||
|
||||
enum ATH6K_DEBUG_MASK {
|
||||
ATH6KL_DBG_CREDIT = BIT(0),
|
||||
@ -51,13 +52,9 @@ enum ATH6K_DEBUG_MASK {
|
||||
extern unsigned int debug_mask;
|
||||
extern __printf(2, 3)
|
||||
int ath6kl_printk(const char *level, const char *fmt, ...);
|
||||
|
||||
#define ath6kl_info(fmt, ...) \
|
||||
ath6kl_printk(KERN_INFO, fmt, ##__VA_ARGS__)
|
||||
#define ath6kl_err(fmt, ...) \
|
||||
ath6kl_printk(KERN_ERR, fmt, ##__VA_ARGS__)
|
||||
#define ath6kl_warn(fmt, ...) \
|
||||
ath6kl_printk(KERN_WARNING, fmt, ##__VA_ARGS__)
|
||||
extern __printf(1, 2) int ath6kl_info(const char *fmt, ...);
|
||||
extern __printf(1, 2) int ath6kl_err(const char *fmt, ...);
|
||||
extern __printf(1, 2) int ath6kl_warn(const char *fmt, ...);
|
||||
|
||||
enum ath6kl_war {
|
||||
ATH6KL_WAR_INVALID_RATE,
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "target.h"
|
||||
#include "hif-ops.h"
|
||||
#include "debug.h"
|
||||
#include "trace.h"
|
||||
|
||||
#define MAILBOX_FOR_BLOCK_SIZE 1
|
||||
|
||||
@ -436,6 +437,8 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
|
||||
|
||||
ath6kl_dump_registers(dev, &dev->irq_proc_reg,
|
||||
&dev->irq_en_reg);
|
||||
trace_ath6kl_sdio_irq(&dev->irq_en_reg,
|
||||
sizeof(dev->irq_en_reg));
|
||||
|
||||
/* Update only those registers that are enabled */
|
||||
host_int_status = dev->irq_proc_reg.host_int_status &
|
||||
|
@ -19,6 +19,8 @@
|
||||
#include "hif.h"
|
||||
#include "debug.h"
|
||||
#include "hif-ops.h"
|
||||
#include "trace.h"
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
|
||||
@ -537,6 +539,8 @@ static int ath6kl_htc_tx_issue(struct htc_target *target,
|
||||
packet->buf, padded_len,
|
||||
HIF_WR_ASYNC_BLOCK_INC, packet);
|
||||
|
||||
trace_ath6kl_htc_tx(status, packet->endpoint, packet->buf, send_len);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -757,7 +761,8 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
|
||||
{
|
||||
struct htc_target *target = endpoint->target;
|
||||
struct hif_scatter_req *scat_req = NULL;
|
||||
int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
|
||||
int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0, i;
|
||||
struct htc_packet *packet;
|
||||
int status;
|
||||
u32 txb_mask;
|
||||
u8 ac = WMM_NUM_AC;
|
||||
@ -832,6 +837,13 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
|
||||
ath6kl_dbg(ATH6KL_DBG_HTC,
|
||||
"htc tx scatter bytes %d entries %d\n",
|
||||
scat_req->len, scat_req->scat_entries);
|
||||
|
||||
for (i = 0; i < scat_req->scat_entries; i++) {
|
||||
packet = scat_req->scat_list[i].packet;
|
||||
trace_ath6kl_htc_tx(packet->status, packet->endpoint,
|
||||
packet->buf, packet->act_len);
|
||||
}
|
||||
|
||||
ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
|
||||
|
||||
if (status)
|
||||
@ -1903,6 +1915,7 @@ static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
|
||||
ath6kl_dbg(ATH6KL_DBG_HTC,
|
||||
"htc rx complete ep %d packet 0x%p\n",
|
||||
endpoint->eid, packet);
|
||||
|
||||
endpoint->ep_cb.rx(endpoint->target, packet);
|
||||
}
|
||||
|
||||
@ -2011,6 +2024,9 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
|
||||
list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
|
||||
ep = &target->endpoint[packet->endpoint];
|
||||
|
||||
trace_ath6kl_htc_rx(packet->status, packet->endpoint,
|
||||
packet->buf, packet->act_len);
|
||||
|
||||
/* process header for each of the recv packet */
|
||||
status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
|
||||
n_lk_ahd);
|
||||
@ -2291,6 +2307,9 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
|
||||
if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
|
||||
goto fail_ctrl_rx;
|
||||
|
||||
trace_ath6kl_htc_rx(packet->status, packet->endpoint,
|
||||
packet->buf, packet->act_len);
|
||||
|
||||
/* process receive header */
|
||||
packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
|
||||
|
||||
|
@ -988,8 +988,6 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
|
||||
|
||||
htc_hdr = (struct htc_frame_hdr *) netdata;
|
||||
|
||||
ep = &target->endpoint[htc_hdr->eid];
|
||||
|
||||
if (htc_hdr->eid >= ENDPOINT_MAX) {
|
||||
ath6kl_dbg(ATH6KL_DBG_HTC,
|
||||
"HTC Rx: invalid EndpointID=%d\n",
|
||||
@ -997,6 +995,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
|
||||
status = -EINVAL;
|
||||
goto free_skb;
|
||||
}
|
||||
ep = &target->endpoint[htc_hdr->eid];
|
||||
|
||||
payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
|
||||
|
||||
@ -1168,8 +1167,8 @@ static int htc_wait_recv_ctrl_message(struct htc_target *target)
|
||||
}
|
||||
|
||||
if (count <= 0) {
|
||||
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__);
|
||||
return -ECOMM;
|
||||
ath6kl_warn("htc pipe control receive timeout!\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1582,16 +1581,16 @@ static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
|
||||
return status;
|
||||
|
||||
if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
|
||||
ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n",
|
||||
target->pipe.ctrl_response_len);
|
||||
ath6kl_warn("invalid htc pipe ready msg len: %d\n",
|
||||
target->pipe.ctrl_response_len);
|
||||
return -ECOMM;
|
||||
}
|
||||
|
||||
ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
|
||||
|
||||
if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
|
||||
ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n",
|
||||
ready_msg->ver2_0_info.msg_id);
|
||||
ath6kl_warn("invalid htc pipe ready msg: 0x%x\n",
|
||||
ready_msg->ver2_0_info.msg_id);
|
||||
return -ECOMM;
|
||||
}
|
||||
|
||||
|
@ -201,8 +201,8 @@ struct sk_buff *ath6kl_buf_alloc(int size)
|
||||
u16 reserved;
|
||||
|
||||
/* Add chacheline space at front and back of buffer */
|
||||
reserved = (2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
|
||||
sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES;
|
||||
reserved = roundup((2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
|
||||
sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES, 4);
|
||||
skb = dev_alloc_skb(size + reserved);
|
||||
|
||||
if (skb)
|
||||
@ -1549,10 +1549,89 @@ static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static const struct fw_capa_str_map {
|
||||
int id;
|
||||
const char *name;
|
||||
} fw_capa_map[] = {
|
||||
{ ATH6KL_FW_CAPABILITY_HOST_P2P, "host-p2p" },
|
||||
{ ATH6KL_FW_CAPABILITY_SCHED_SCAN, "sched-scan" },
|
||||
{ ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, "sta-p2pdev-duplex" },
|
||||
{ ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, "inactivity-timeout" },
|
||||
{ ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, "rsn-cap-override" },
|
||||
{ ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, "wow-mc-filter" },
|
||||
{ ATH6KL_FW_CAPABILITY_BMISS_ENHANCE, "bmiss-enhance" },
|
||||
{ ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST, "sscan-match-list" },
|
||||
{ ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD, "rssi-scan-thold" },
|
||||
{ ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR, "custom-mac-addr" },
|
||||
{ ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, "tx-err-notify" },
|
||||
{ ATH6KL_FW_CAPABILITY_REGDOMAIN, "regdomain" },
|
||||
{ ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, "sched-scan-v2" },
|
||||
{ ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL, "hb-poll" },
|
||||
};
|
||||
|
||||
static const char *ath6kl_init_get_fw_capa_name(unsigned int id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fw_capa_map); i++) {
|
||||
if (fw_capa_map[i].id == id)
|
||||
return fw_capa_map[i].name;
|
||||
}
|
||||
|
||||
return "<unknown>";
|
||||
}
|
||||
|
||||
static void ath6kl_init_get_fwcaps(struct ath6kl *ar, char *buf, size_t buf_len)
|
||||
{
|
||||
u8 *data = (u8 *) ar->fw_capabilities;
|
||||
size_t trunc_len, len = 0;
|
||||
int i, index, bit;
|
||||
char *trunc = "...";
|
||||
|
||||
for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) {
|
||||
index = i / 8;
|
||||
bit = i % 8;
|
||||
|
||||
if (index >= sizeof(ar->fw_capabilities) * 4)
|
||||
break;
|
||||
|
||||
if (buf_len - len < 4) {
|
||||
ath6kl_warn("firmware capability buffer too small!\n");
|
||||
|
||||
/* add "..." to the end of string */
|
||||
trunc_len = strlen(trunc) + 1;
|
||||
strncpy(buf + buf_len - trunc_len, trunc, trunc_len);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (data[index] & (1 << bit)) {
|
||||
len += scnprintf(buf + len, buf_len - len, "%s,",
|
||||
ath6kl_init_get_fw_capa_name(i));
|
||||
}
|
||||
}
|
||||
|
||||
/* overwrite the last comma */
|
||||
if (len > 0)
|
||||
len--;
|
||||
|
||||
buf[len] = '\0';
|
||||
}
|
||||
|
||||
static int ath6kl_init_hw_reset(struct ath6kl *ar)
|
||||
{
|
||||
ath6kl_dbg(ATH6KL_DBG_BOOT, "cold resetting the device");
|
||||
|
||||
return ath6kl_diag_write32(ar, RESET_CONTROL_ADDRESS,
|
||||
cpu_to_le32(RESET_CONTROL_COLD_RST));
|
||||
}
|
||||
|
||||
static int __ath6kl_init_hw_start(struct ath6kl *ar)
|
||||
{
|
||||
long timeleft;
|
||||
int ret, i;
|
||||
char buf[200];
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n");
|
||||
|
||||
@ -1569,24 +1648,35 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
|
||||
goto err_power_off;
|
||||
|
||||
/* Do we need to finish the BMI phase */
|
||||
/* FIXME: return error from ath6kl_bmi_done() */
|
||||
if (ath6kl_bmi_done(ar)) {
|
||||
ret = -EIO;
|
||||
ret = ath6kl_bmi_done(ar);
|
||||
if (ret)
|
||||
goto err_power_off;
|
||||
}
|
||||
|
||||
/*
|
||||
* The reason we have to wait for the target here is that the
|
||||
* driver layer has to init BMI in order to set the host block
|
||||
* size.
|
||||
*/
|
||||
if (ath6kl_htc_wait_target(ar->htc_target)) {
|
||||
ret = -EIO;
|
||||
ret = ath6kl_htc_wait_target(ar->htc_target);
|
||||
|
||||
if (ret == -ETIMEDOUT) {
|
||||
/*
|
||||
* Most likely USB target is in odd state after reboot and
|
||||
* needs a reset. A cold reset makes the whole device
|
||||
* disappear from USB bus and initialisation starts from
|
||||
* beginning.
|
||||
*/
|
||||
ath6kl_warn("htc wait target timed out, resetting device\n");
|
||||
ath6kl_init_hw_reset(ar);
|
||||
goto err_power_off;
|
||||
} else if (ret) {
|
||||
ath6kl_err("htc wait target failed: %d\n", ret);
|
||||
goto err_power_off;
|
||||
}
|
||||
|
||||
if (ath6kl_init_service_ep(ar)) {
|
||||
ret = -EIO;
|
||||
ret = ath6kl_init_service_ep(ar);
|
||||
if (ret) {
|
||||
ath6kl_err("Endpoint service initilisation failed: %d\n", ret);
|
||||
goto err_cleanup_scatter;
|
||||
}
|
||||
|
||||
@ -1617,6 +1707,8 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
|
||||
ar->wiphy->fw_version,
|
||||
ar->fw_api,
|
||||
test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
|
||||
ath6kl_init_get_fwcaps(ar, buf, sizeof(buf));
|
||||
ath6kl_info("firmware supports: %s\n", buf);
|
||||
}
|
||||
|
||||
if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
|
||||
@ -1765,9 +1857,7 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
|
||||
* Try to reset the device if we can. The driver may have been
|
||||
* configure NOT to reset the target during a debug session.
|
||||
*/
|
||||
ath6kl_dbg(ATH6KL_DBG_TRC,
|
||||
"attempting to reset target on instance destroy\n");
|
||||
ath6kl_reset_device(ar, ar->target_type, true, true);
|
||||
ath6kl_init_hw_reset(ar);
|
||||
|
||||
up(&ar->sem);
|
||||
}
|
||||
|
@ -345,39 +345,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* FIXME: move to a better place, target.h? */
|
||||
#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
|
||||
#define AR6004_RESET_CONTROL_ADDRESS 0x00004000
|
||||
|
||||
void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
|
||||
bool wait_fot_compltn, bool cold_reset)
|
||||
{
|
||||
int status = 0;
|
||||
u32 address;
|
||||
__le32 data;
|
||||
|
||||
if (target_type != TARGET_TYPE_AR6003 &&
|
||||
target_type != TARGET_TYPE_AR6004)
|
||||
return;
|
||||
|
||||
data = cold_reset ? cpu_to_le32(RESET_CONTROL_COLD_RST) :
|
||||
cpu_to_le32(RESET_CONTROL_MBOX_RST);
|
||||
|
||||
switch (target_type) {
|
||||
case TARGET_TYPE_AR6003:
|
||||
address = AR6003_RESET_CONTROL_ADDRESS;
|
||||
break;
|
||||
case TARGET_TYPE_AR6004:
|
||||
address = AR6004_RESET_CONTROL_ADDRESS;
|
||||
break;
|
||||
}
|
||||
|
||||
status = ath6kl_diag_write32(ar, address, data);
|
||||
|
||||
if (status)
|
||||
ath6kl_err("failed to reset target\n");
|
||||
}
|
||||
|
||||
static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
|
||||
{
|
||||
u8 index;
|
||||
@ -1327,9 +1294,11 @@ void init_netdev(struct net_device *dev)
|
||||
dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
|
||||
|
||||
dev->needed_headroom = ETH_HLEN;
|
||||
dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) +
|
||||
sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
|
||||
+ WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES;
|
||||
dev->needed_headroom += roundup(sizeof(struct ath6kl_llc_snap_hdr) +
|
||||
sizeof(struct wmi_data_hdr) +
|
||||
HTC_HDR_LENGTH +
|
||||
WMI_MAX_TX_META_SZ +
|
||||
ATH6KL_HTC_ALIGN_BYTES, 4);
|
||||
|
||||
dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "target.h"
|
||||
#include "debug.h"
|
||||
#include "cfg80211.h"
|
||||
#include "trace.h"
|
||||
|
||||
struct ath6kl_sdio {
|
||||
struct sdio_func *func;
|
||||
@ -179,6 +180,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
|
||||
request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
|
||||
ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
|
||||
|
||||
trace_ath6kl_sdio(addr, request, buf, len);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -309,6 +312,13 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
|
||||
sdio_claim_host(ar_sdio->func);
|
||||
|
||||
mmc_set_data_timeout(&data, ar_sdio->func->card);
|
||||
|
||||
trace_ath6kl_sdio_scat(scat_req->addr,
|
||||
scat_req->req,
|
||||
scat_req->len,
|
||||
scat_req->scat_entries,
|
||||
scat_req->scat_list);
|
||||
|
||||
/* synchronous call to process request */
|
||||
mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
|
||||
|
||||
@ -1123,10 +1133,12 @@ static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
|
||||
|
||||
ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
|
||||
HIF_WR_SYNC_BYTE_INC);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
ath6kl_err("unable to send the bmi data to the device\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
|
||||
|
@ -25,7 +25,7 @@
|
||||
#define AR6004_BOARD_DATA_SZ 6144
|
||||
#define AR6004_BOARD_EXT_DATA_SZ 0
|
||||
|
||||
#define RESET_CONTROL_ADDRESS 0x00000000
|
||||
#define RESET_CONTROL_ADDRESS 0x00004000
|
||||
#define RESET_CONTROL_COLD_RST 0x00000100
|
||||
#define RESET_CONTROL_MBOX_RST 0x00000004
|
||||
|
||||
|
23
drivers/net/wireless/ath/ath6kl/trace.c
Normal file
23
drivers/net/wireless/ath/ath6kl/trace.c
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL(ath6kl_sdio);
|
||||
EXPORT_TRACEPOINT_SYMBOL(ath6kl_sdio_scat);
|
332
drivers/net/wireless/ath/ath6kl/trace.h
Normal file
332
drivers/net/wireless/ath/ath6kl/trace.h
Normal file
@ -0,0 +1,332 @@
|
||||
#if !defined(_ATH6KL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
|
||||
#include <net/cfg80211.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include "wmi.h"
|
||||
#include "hif.h"
|
||||
|
||||
#if !defined(_ATH6KL_TRACE_H)
|
||||
static inline unsigned int ath6kl_get_wmi_id(void *buf, size_t buf_len)
|
||||
{
|
||||
struct wmi_cmd_hdr *hdr = buf;
|
||||
|
||||
if (buf_len < sizeof(*hdr))
|
||||
return 0;
|
||||
|
||||
return le16_to_cpu(hdr->cmd_id);
|
||||
}
|
||||
#endif /* __ATH6KL_TRACE_H */
|
||||
|
||||
#define _ATH6KL_TRACE_H
|
||||
|
||||
/* create empty functions when tracing is disabled */
|
||||
#if !defined(CONFIG_ATH6KL_TRACING)
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(name, proto, ...) \
|
||||
static inline void trace_ ## name(proto) {}
|
||||
#undef DECLARE_EVENT_CLASS
|
||||
#define DECLARE_EVENT_CLASS(...)
|
||||
#undef DEFINE_EVENT
|
||||
#define DEFINE_EVENT(evt_class, name, proto, ...) \
|
||||
static inline void trace_ ## name(proto) {}
|
||||
#endif /* !CONFIG_ATH6KL_TRACING || __CHECKER__ */
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM ath6kl
|
||||
|
||||
TRACE_EVENT(ath6kl_wmi_cmd,
|
||||
TP_PROTO(void *buf, size_t buf_len),
|
||||
|
||||
TP_ARGS(buf, buf_len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, id)
|
||||
__field(size_t, buf_len)
|
||||
__dynamic_array(u8, buf, buf_len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = ath6kl_get_wmi_id(buf, buf_len);
|
||||
__entry->buf_len = buf_len;
|
||||
memcpy(__get_dynamic_array(buf), buf, buf_len);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"id %d len %d",
|
||||
__entry->id, __entry->buf_len
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ath6kl_wmi_event,
|
||||
TP_PROTO(void *buf, size_t buf_len),
|
||||
|
||||
TP_ARGS(buf, buf_len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, id)
|
||||
__field(size_t, buf_len)
|
||||
__dynamic_array(u8, buf, buf_len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = ath6kl_get_wmi_id(buf, buf_len);
|
||||
__entry->buf_len = buf_len;
|
||||
memcpy(__get_dynamic_array(buf), buf, buf_len);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"id %d len %d",
|
||||
__entry->id, __entry->buf_len
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ath6kl_sdio,
|
||||
TP_PROTO(unsigned int addr, int flags,
|
||||
void *buf, size_t buf_len),
|
||||
|
||||
TP_ARGS(addr, flags, buf, buf_len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, tx)
|
||||
__field(unsigned int, addr)
|
||||
__field(int, flags)
|
||||
__field(size_t, buf_len)
|
||||
__dynamic_array(u8, buf, buf_len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->addr = addr;
|
||||
__entry->flags = flags;
|
||||
__entry->buf_len = buf_len;
|
||||
memcpy(__get_dynamic_array(buf), buf, buf_len);
|
||||
|
||||
if (flags & HIF_WRITE)
|
||||
__entry->tx = 1;
|
||||
else
|
||||
__entry->tx = 0;
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"%s addr 0x%x flags 0x%x len %d\n",
|
||||
__entry->tx ? "tx" : "rx",
|
||||
__entry->addr,
|
||||
__entry->flags,
|
||||
__entry->buf_len
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ath6kl_sdio_scat,
|
||||
TP_PROTO(unsigned int addr, int flags, unsigned int total_len,
|
||||
unsigned int entries, struct hif_scatter_item *list),
|
||||
|
||||
TP_ARGS(addr, flags, total_len, entries, list),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, tx)
|
||||
__field(unsigned int, addr)
|
||||
__field(int, flags)
|
||||
__field(unsigned int, entries)
|
||||
__field(size_t, total_len)
|
||||
__dynamic_array(unsigned int, len_array, entries)
|
||||
__dynamic_array(u8, data, total_len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
unsigned int *len_array;
|
||||
int i, offset = 0;
|
||||
size_t len;
|
||||
|
||||
__entry->addr = addr;
|
||||
__entry->flags = flags;
|
||||
__entry->entries = entries;
|
||||
__entry->total_len = total_len;
|
||||
|
||||
if (flags & HIF_WRITE)
|
||||
__entry->tx = 1;
|
||||
else
|
||||
__entry->tx = 0;
|
||||
|
||||
len_array = __get_dynamic_array(len_array);
|
||||
|
||||
for (i = 0; i < entries; i++) {
|
||||
len = list[i].len;
|
||||
|
||||
memcpy((u8 *) __get_dynamic_array(data) + offset,
|
||||
list[i].buf, len);
|
||||
|
||||
len_array[i] = len;
|
||||
offset += len;
|
||||
}
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"%s addr 0x%x flags 0x%x entries %d total_len %d\n",
|
||||
__entry->tx ? "tx" : "rx",
|
||||
__entry->addr,
|
||||
__entry->flags,
|
||||
__entry->entries,
|
||||
__entry->total_len
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ath6kl_sdio_irq,
|
||||
TP_PROTO(void *buf, size_t buf_len),
|
||||
|
||||
TP_ARGS(buf, buf_len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(size_t, buf_len)
|
||||
__dynamic_array(u8, buf, buf_len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->buf_len = buf_len;
|
||||
memcpy(__get_dynamic_array(buf), buf, buf_len);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"irq len %d\n", __entry->buf_len
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ath6kl_htc_rx,
|
||||
TP_PROTO(int status, int endpoint, void *buf,
|
||||
size_t buf_len),
|
||||
|
||||
TP_ARGS(status, endpoint, buf, buf_len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, status)
|
||||
__field(int, endpoint)
|
||||
__field(size_t, buf_len)
|
||||
__dynamic_array(u8, buf, buf_len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->status = status;
|
||||
__entry->endpoint = endpoint;
|
||||
__entry->buf_len = buf_len;
|
||||
memcpy(__get_dynamic_array(buf), buf, buf_len);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"status %d endpoint %d len %d\n",
|
||||
__entry->status,
|
||||
__entry->endpoint,
|
||||
__entry->buf_len
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ath6kl_htc_tx,
|
||||
TP_PROTO(int status, int endpoint, void *buf,
|
||||
size_t buf_len),
|
||||
|
||||
TP_ARGS(status, endpoint, buf, buf_len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, status)
|
||||
__field(int, endpoint)
|
||||
__field(size_t, buf_len)
|
||||
__dynamic_array(u8, buf, buf_len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->status = status;
|
||||
__entry->endpoint = endpoint;
|
||||
__entry->buf_len = buf_len;
|
||||
memcpy(__get_dynamic_array(buf), buf, buf_len);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"status %d endpoint %d len %d\n",
|
||||
__entry->status,
|
||||
__entry->endpoint,
|
||||
__entry->buf_len
|
||||
)
|
||||
);
|
||||
|
||||
#define ATH6KL_MSG_MAX 200
|
||||
|
||||
DECLARE_EVENT_CLASS(ath6kl_log_event,
|
||||
TP_PROTO(struct va_format *vaf),
|
||||
TP_ARGS(vaf),
|
||||
TP_STRUCT__entry(
|
||||
__dynamic_array(char, msg, ATH6KL_MSG_MAX)
|
||||
),
|
||||
TP_fast_assign(
|
||||
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
|
||||
ATH6KL_MSG_MAX,
|
||||
vaf->fmt,
|
||||
*vaf->va) >= ATH6KL_MSG_MAX);
|
||||
),
|
||||
TP_printk("%s", __get_str(msg))
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ath6kl_log_event, ath6kl_log_err,
|
||||
TP_PROTO(struct va_format *vaf),
|
||||
TP_ARGS(vaf)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ath6kl_log_event, ath6kl_log_warn,
|
||||
TP_PROTO(struct va_format *vaf),
|
||||
TP_ARGS(vaf)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ath6kl_log_event, ath6kl_log_info,
|
||||
TP_PROTO(struct va_format *vaf),
|
||||
TP_ARGS(vaf)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ath6kl_log_dbg,
|
||||
TP_PROTO(unsigned int level, struct va_format *vaf),
|
||||
TP_ARGS(level, vaf),
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, level)
|
||||
__dynamic_array(char, msg, ATH6KL_MSG_MAX)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->level = level;
|
||||
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
|
||||
ATH6KL_MSG_MAX,
|
||||
vaf->fmt,
|
||||
*vaf->va) >= ATH6KL_MSG_MAX);
|
||||
),
|
||||
TP_printk("%s", __get_str(msg))
|
||||
);
|
||||
|
||||
TRACE_EVENT(ath6kl_log_dbg_dump,
|
||||
TP_PROTO(const char *msg, const char *prefix,
|
||||
const void *buf, size_t buf_len),
|
||||
|
||||
TP_ARGS(msg, prefix, buf, buf_len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(msg, msg)
|
||||
__string(prefix, prefix)
|
||||
__field(size_t, buf_len)
|
||||
__dynamic_array(u8, buf, buf_len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(msg, msg);
|
||||
__assign_str(prefix, prefix);
|
||||
__entry->buf_len = buf_len;
|
||||
memcpy(__get_dynamic_array(buf), buf, buf_len);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"%s/%s\n", __get_str(prefix), __get_str(msg)
|
||||
)
|
||||
);
|
||||
|
||||
#endif /* _ ATH6KL_TRACE_H || TRACE_HEADER_MULTI_READ*/
|
||||
|
||||
/* we don't want to use include/trace/events */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@ -20,6 +20,7 @@
|
||||
#include "core.h"
|
||||
#include "debug.h"
|
||||
#include "htc-ops.h"
|
||||
#include "trace.h"
|
||||
|
||||
/*
|
||||
* tid - tid_mux0..tid_mux3
|
||||
@ -288,6 +289,8 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
|
||||
int status = 0;
|
||||
struct ath6kl_cookie *cookie = NULL;
|
||||
|
||||
trace_ath6kl_wmi_cmd(skb->data, skb->len);
|
||||
|
||||
if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
|
||||
dev_kfree_skb(skb);
|
||||
return -EACCES;
|
||||
@ -1324,7 +1327,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
|
||||
__func__, ar, ept, skb, packet->buf,
|
||||
packet->act_len, status);
|
||||
|
||||
if (status || !(skb->data + HTC_HDR_LENGTH)) {
|
||||
if (status || packet->act_len < HTC_HDR_LENGTH) {
|
||||
dev_kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
@ -856,11 +856,9 @@ static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
|
||||
int ret;
|
||||
|
||||
if (size > 0) {
|
||||
buf = kmalloc(size, GFP_KERNEL);
|
||||
buf = kmemdup(data, size, GFP_KERNEL);
|
||||
if (buf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(buf, data, size);
|
||||
}
|
||||
|
||||
/* note: if successful returns number of bytes transfered */
|
||||
@ -872,8 +870,9 @@ static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
|
||||
size, 1000);
|
||||
|
||||
if (ret < 0) {
|
||||
ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n",
|
||||
__func__, ret);
|
||||
ath6kl_warn("Failed to submit usb control message: %d\n", ret);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
kfree(buf);
|
||||
@ -903,8 +902,9 @@ static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb,
|
||||
size, 2 * HZ);
|
||||
|
||||
if (ret < 0) {
|
||||
ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n",
|
||||
__func__, ret);
|
||||
ath6kl_warn("Failed to read usb control message: %d\n", ret);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
memcpy((u8 *) data, buf, size);
|
||||
@ -961,8 +961,10 @@ static int ath6kl_usb_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
|
||||
ATH6KL_USB_CONTROL_REQ_DIAG_RESP,
|
||||
ar_usb->diag_resp_buffer, &resp_len);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
ath6kl_warn("diag read32 failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
resp = (struct ath6kl_usb_ctrl_diag_resp_read *)
|
||||
ar_usb->diag_resp_buffer;
|
||||
@ -976,6 +978,7 @@ static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data)
|
||||
{
|
||||
struct ath6kl_usb *ar_usb = ar->hif_priv;
|
||||
struct ath6kl_usb_ctrl_diag_cmd_write *cmd;
|
||||
int ret;
|
||||
|
||||
cmd = (struct ath6kl_usb_ctrl_diag_cmd_write *) ar_usb->diag_cmd_buffer;
|
||||
|
||||
@ -984,12 +987,17 @@ static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data)
|
||||
cmd->address = cpu_to_le32(address);
|
||||
cmd->value = data;
|
||||
|
||||
return ath6kl_usb_ctrl_msg_exchange(ar_usb,
|
||||
ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
|
||||
(u8 *) cmd,
|
||||
sizeof(*cmd),
|
||||
0, NULL, NULL);
|
||||
ret = ath6kl_usb_ctrl_msg_exchange(ar_usb,
|
||||
ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
|
||||
(u8 *) cmd,
|
||||
sizeof(*cmd),
|
||||
0, NULL, NULL);
|
||||
if (ret) {
|
||||
ath6kl_warn("diag_write32 failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
|
||||
@ -1001,7 +1009,7 @@ static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
|
||||
ret = ath6kl_usb_submit_ctrl_in(ar_usb,
|
||||
ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP,
|
||||
0, 0, buf, len);
|
||||
if (ret != 0) {
|
||||
if (ret) {
|
||||
ath6kl_err("Unable to read the bmi data from the device: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
@ -1019,7 +1027,7 @@ static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
|
||||
ret = ath6kl_usb_submit_ctrl_out(ar_usb,
|
||||
ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD,
|
||||
0, 0, buf, len);
|
||||
if (ret != 0) {
|
||||
if (ret) {
|
||||
ath6kl_err("unable to send the bmi data to the device: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "core.h"
|
||||
#include "debug.h"
|
||||
#include "testmode.h"
|
||||
#include "trace.h"
|
||||
#include "../regd.h"
|
||||
#include "../regd_common.h"
|
||||
|
||||
@ -2028,6 +2029,9 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
|
||||
if (!sband)
|
||||
continue;
|
||||
|
||||
if (WARN_ON(band >= ATH6KL_NUM_BANDS))
|
||||
break;
|
||||
|
||||
ratemask = rates[band];
|
||||
supp_rates = sc->supp_rates[band].rates;
|
||||
num_rates = 0;
|
||||
@ -4086,6 +4090,8 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
trace_ath6kl_wmi_event(skb->data, skb->len);
|
||||
|
||||
return ath6kl_wmi_proc_events(wmi, skb);
|
||||
}
|
||||
|
||||
|
@ -3606,6 +3606,12 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
|
||||
value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
|
||||
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
|
||||
|
||||
if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
|
||||
value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
|
||||
REG_RMW_FIELD(ah, switch_chain_reg[0],
|
||||
AR_SWITCH_TABLE_ALL, value);
|
||||
}
|
||||
|
||||
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
|
||||
if ((ah->rxchainmask & BIT(chain)) ||
|
||||
(ah->txchainmask & BIT(chain))) {
|
||||
@ -3772,6 +3778,17 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
|
||||
AR_PHY_EXT_ATTEN_CTL_2,
|
||||
};
|
||||
|
||||
if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
|
||||
value = ar9003_hw_atten_chain_get(ah, 1, chan);
|
||||
REG_RMW_FIELD(ah, ext_atten_reg[0],
|
||||
AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
|
||||
|
||||
value = ar9003_hw_atten_chain_get_margin(ah, 1, chan);
|
||||
REG_RMW_FIELD(ah, ext_atten_reg[0],
|
||||
AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
|
||||
value);
|
||||
}
|
||||
|
||||
/* Test value. if 0 then attenuation is unused. Don't load anything. */
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (ah->txchainmask & BIT(i)) {
|
||||
|
@ -37,28 +37,28 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
|
||||
/* Addr allmodes */
|
||||
{0x00018c00, 0x18253ede},
|
||||
{0x00018c04, 0x000801d8},
|
||||
{0x00018c08, 0x0003580c},
|
||||
{0x00018c08, 0x0003780c},
|
||||
};
|
||||
|
||||
static const u32 ar9462_2p0_baseband_postamble[][5] = {
|
||||
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
|
||||
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
|
||||
{0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
|
||||
{0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
|
||||
{0x00009824, 0x63c640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
|
||||
{0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
|
||||
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
|
||||
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
|
||||
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
|
||||
{0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
|
||||
{0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a2},
|
||||
{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
|
||||
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
|
||||
{0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
|
||||
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x32395d5e},
|
||||
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
|
||||
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
|
||||
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
|
||||
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
|
||||
{0x00009e3c, 0xcf946222, 0xcf946222, 0xcfd5c782, 0xcfd5c282},
|
||||
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
|
||||
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
|
||||
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
|
||||
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
|
||||
@ -82,9 +82,9 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
|
||||
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
|
||||
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
|
||||
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
{0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000},
|
||||
{0x0000a3a4, 0x00000050, 0x00000050, 0x00000000, 0x00000000},
|
||||
{0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
|
||||
{0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00},
|
||||
{0x0000a3ac, 0xaaaaaa00, 0xaa30aa30, 0xaaaaaa00, 0xaaaaaa00},
|
||||
{0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
|
||||
{0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
|
||||
{0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
|
||||
@ -363,14 +363,14 @@ static const u32 ar9462_pciephy_clkreq_disable_L1_2p0[][2] = {
|
||||
/* Addr allmodes */
|
||||
{0x00018c00, 0x18213ede},
|
||||
{0x00018c04, 0x000801d8},
|
||||
{0x00018c08, 0x0003580c},
|
||||
{0x00018c08, 0x0003780c},
|
||||
};
|
||||
|
||||
static const u32 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0[][2] = {
|
||||
/* Addr allmodes */
|
||||
{0x00018c00, 0x18212ede},
|
||||
{0x00018c04, 0x000801d8},
|
||||
{0x00018c08, 0x0003580c},
|
||||
{0x00018c08, 0x0003780c},
|
||||
};
|
||||
|
||||
static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
|
||||
@ -775,7 +775,7 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
|
||||
{0x00009fc0, 0x803e4788},
|
||||
{0x00009fc4, 0x0001efb5},
|
||||
{0x00009fcc, 0x40000014},
|
||||
{0x00009fd0, 0x01193b93},
|
||||
{0x00009fd0, 0x0a193b93},
|
||||
{0x0000a20c, 0x00000000},
|
||||
{0x0000a220, 0x00000000},
|
||||
{0x0000a224, 0x00000000},
|
||||
@ -850,7 +850,7 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
|
||||
{0x0000a7cc, 0x00000000},
|
||||
{0x0000a7d0, 0x00000000},
|
||||
{0x0000a7d4, 0x00000004},
|
||||
{0x0000a7dc, 0x00000001},
|
||||
{0x0000a7dc, 0x00000000},
|
||||
{0x0000a7f0, 0x80000000},
|
||||
{0x0000a8d0, 0x004b6a8e},
|
||||
{0x0000a8d4, 0x00000820},
|
||||
@ -886,7 +886,7 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
|
||||
{0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
|
||||
{0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
|
||||
{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
|
||||
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
|
||||
{0x0000a410, 0x000050da, 0x000050da, 0x000050de, 0x000050de},
|
||||
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
|
||||
{0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
|
||||
@ -906,20 +906,20 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
|
||||
{0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
|
||||
{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
|
||||
{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
|
||||
{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
|
||||
{0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
|
||||
{0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
|
||||
{0x0000a548, 0x55025eb3, 0x55025eb3, 0x3e001a81, 0x3e001a81},
|
||||
{0x0000a54c, 0x58025ef3, 0x58025ef3, 0x42001a83, 0x42001a83},
|
||||
{0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001a84, 0x44001a84},
|
||||
{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
|
||||
{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
|
||||
{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
|
||||
{0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
|
||||
{0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
|
||||
{0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
|
||||
{0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
|
||||
{0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
|
||||
{0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
|
||||
{0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
|
||||
{0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
|
||||
{0x0000a564, 0x751ffff6, 0x751ffff6, 0x56001eec, 0x56001eec},
|
||||
{0x0000a568, 0x751ffff6, 0x751ffff6, 0x58001ef0, 0x58001ef0},
|
||||
{0x0000a56c, 0x751ffff6, 0x751ffff6, 0x5a001ef4, 0x5a001ef4},
|
||||
{0x0000a570, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
|
||||
{0x0000a574, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
|
||||
{0x0000a578, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
|
||||
{0x0000a57c, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
|
||||
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
@ -1053,7 +1053,6 @@ static const u32 ar9462_2p0_mac_core[][2] = {
|
||||
{0x00008044, 0x00000000},
|
||||
{0x00008048, 0x00000000},
|
||||
{0x0000804c, 0xffffffff},
|
||||
{0x00008050, 0xffffffff},
|
||||
{0x00008054, 0x00000000},
|
||||
{0x00008058, 0x00000000},
|
||||
{0x0000805c, 0x000fc78f},
|
||||
@ -1117,9 +1116,9 @@ static const u32 ar9462_2p0_mac_core[][2] = {
|
||||
{0x000081f8, 0x00000000},
|
||||
{0x000081fc, 0x00000000},
|
||||
{0x00008240, 0x00100000},
|
||||
{0x00008244, 0x0010f424},
|
||||
{0x00008244, 0x0010f400},
|
||||
{0x00008248, 0x00000800},
|
||||
{0x0000824c, 0x0001e848},
|
||||
{0x0000824c, 0x0001e800},
|
||||
{0x00008250, 0x00000000},
|
||||
{0x00008254, 0x00000000},
|
||||
{0x00008258, 0x00000000},
|
||||
|
@ -369,7 +369,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
|
||||
struct ieee80211_channel *c = chan->chan;
|
||||
struct ath9k_hw_cal_data *caldata = ah->caldata;
|
||||
|
||||
chan->channelFlags &= (~CHANNEL_CW_INT);
|
||||
if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
|
||||
ath_dbg(common, CALIBRATE,
|
||||
"NF did not complete in calibration window\n");
|
||||
@ -384,7 +383,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
|
||||
ath_dbg(common, CALIBRATE,
|
||||
"noise floor failed detected; detected %d, threshold %d\n",
|
||||
nf, nfThresh);
|
||||
chan->channelFlags |= CHANNEL_CW_INT;
|
||||
}
|
||||
|
||||
if (!caldata) {
|
||||
@ -410,7 +408,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
|
||||
int i, j;
|
||||
|
||||
ah->caldata->channel = chan->channel;
|
||||
ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
|
||||
ah->caldata->channelFlags = chan->channelFlags;
|
||||
ah->caldata->chanmode = chan->chanmode;
|
||||
h = ah->caldata->nfCalHist;
|
||||
default_nf = ath9k_hw_get_default_nf(ah, chan);
|
||||
|
@ -27,7 +27,7 @@
|
||||
#define WME_MAX_BA WME_BA_BMP_SIZE
|
||||
#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
|
||||
|
||||
#define ATH_RSSI_DUMMY_MARKER 0x127
|
||||
#define ATH_RSSI_DUMMY_MARKER 127
|
||||
#define ATH_RSSI_LPF_LEN 10
|
||||
#define RSSI_LPF_THRESHOLD -20
|
||||
#define ATH_RSSI_EP_MULTIPLIER (1<<7)
|
||||
@ -40,7 +40,7 @@
|
||||
x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
|
||||
} while (0)
|
||||
#define ATH_EP_RND(x, mul) \
|
||||
((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
|
||||
(((x) + ((mul)/2)) / (mul))
|
||||
|
||||
int ath9k_cmn_padpos(__le16 frame_control);
|
||||
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
|
||||
|
@ -537,6 +537,7 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
|
||||
PR("AMPDUs Completed:", a_completed);
|
||||
PR("AMPDUs Retried: ", a_retries);
|
||||
PR("AMPDUs XRetried: ", a_xretries);
|
||||
PR("TXERR Filtered: ", txerr_filtered);
|
||||
PR("FIFO Underrun: ", fifo_underrun);
|
||||
PR("TXOP Exceeded: ", xtxop);
|
||||
PR("TXTIMER Expiry: ", timer_exp);
|
||||
@ -756,6 +757,8 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
|
||||
TX_STAT_INC(qnum, completed);
|
||||
}
|
||||
|
||||
if (ts->ts_status & ATH9K_TXERR_FILT)
|
||||
TX_STAT_INC(qnum, txerr_filtered);
|
||||
if (ts->ts_status & ATH9K_TXERR_FIFO)
|
||||
TX_STAT_INC(qnum, fifo_underrun);
|
||||
if (ts->ts_status & ATH9K_TXERR_XTXOP)
|
||||
@ -1909,6 +1912,7 @@ static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
|
||||
AMKSTR(d_tx_desc_cfg_err),
|
||||
AMKSTR(d_tx_data_underrun),
|
||||
AMKSTR(d_tx_delim_underrun),
|
||||
"d_rx_crc_err",
|
||||
"d_rx_decrypt_crc_err",
|
||||
"d_rx_phy_err",
|
||||
"d_rx_mic_err",
|
||||
@ -1989,6 +1993,7 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
|
||||
AWDATA(data_underrun);
|
||||
AWDATA(delim_underrun);
|
||||
|
||||
AWDATA_RX(crc_err);
|
||||
AWDATA_RX(decrypt_crc_err);
|
||||
AWDATA_RX(phy_err);
|
||||
AWDATA_RX(mic_err);
|
||||
|
@ -142,6 +142,7 @@ struct ath_interrupt_stats {
|
||||
* @a_completed: Total AMPDUs completed
|
||||
* @a_retries: No. of AMPDUs retried (SW)
|
||||
* @a_xretries: No. of AMPDUs dropped due to xretries
|
||||
* @txerr_filtered: No. of frames with TXERR_FILT flag set.
|
||||
* @fifo_underrun: FIFO underrun occurrences
|
||||
Valid only for:
|
||||
- non-aggregate condition.
|
||||
@ -168,6 +169,7 @@ struct ath_tx_stats {
|
||||
u32 a_completed;
|
||||
u32 a_retries;
|
||||
u32 a_xretries;
|
||||
u32 txerr_filtered;
|
||||
u32 fifo_underrun;
|
||||
u32 xtxop;
|
||||
u32 timer_exp;
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/mac80211.h>
|
||||
|
@ -1067,15 +1067,19 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
|
||||
|
||||
last_rssi = priv->rx.last_rssi;
|
||||
|
||||
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
|
||||
rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
|
||||
ATH_RSSI_EP_MULTIPLIER);
|
||||
if (ieee80211_is_beacon(hdr->frame_control) &&
|
||||
!is_zero_ether_addr(common->curbssid) &&
|
||||
ether_addr_equal(hdr->addr3, common->curbssid)) {
|
||||
s8 rssi = rxbuf->rxstatus.rs_rssi;
|
||||
|
||||
if (rxbuf->rxstatus.rs_rssi < 0)
|
||||
rxbuf->rxstatus.rs_rssi = 0;
|
||||
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
|
||||
rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
|
||||
|
||||
if (ieee80211_is_beacon(fc))
|
||||
priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
|
||||
if (rssi < 0)
|
||||
rssi = 0;
|
||||
|
||||
priv->ah->stats.avgbrssi = rssi;
|
||||
}
|
||||
|
||||
rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
|
||||
rx_status->band = hw->conf.channel->band;
|
||||
|
@ -1463,7 +1463,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
|
||||
reset_type = ATH9K_RESET_POWER_ON;
|
||||
else
|
||||
reset_type = ATH9K_RESET_COLD;
|
||||
}
|
||||
} else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) ||
|
||||
(REG_READ(ah, AR_CR) & AR_CR_RXE))
|
||||
reset_type = ATH9K_RESET_COLD;
|
||||
|
||||
if (!ath9k_hw_set_reset_reg(ah, reset_type))
|
||||
return false;
|
||||
@ -1667,6 +1669,104 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
|
||||
}
|
||||
EXPORT_SYMBOL(ath9k_hw_check_alive);
|
||||
|
||||
static void ath9k_hw_init_mfp(struct ath_hw *ah)
|
||||
{
|
||||
/* Setup MFP options for CCMP */
|
||||
if (AR_SREV_9280_20_OR_LATER(ah)) {
|
||||
/* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
|
||||
* frames when constructing CCMP AAD. */
|
||||
REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
|
||||
0xc7ff);
|
||||
ah->sw_mgmt_crypto = false;
|
||||
} else if (AR_SREV_9160_10_OR_LATER(ah)) {
|
||||
/* Disable hardware crypto for management frames */
|
||||
REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
|
||||
AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
|
||||
REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
|
||||
AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
|
||||
ah->sw_mgmt_crypto = true;
|
||||
} else {
|
||||
ah->sw_mgmt_crypto = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void ath9k_hw_reset_opmode(struct ath_hw *ah,
|
||||
u32 macStaId1, u32 saveDefAntenna)
|
||||
{
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
|
||||
ENABLE_REGWRITE_BUFFER(ah);
|
||||
|
||||
REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
|
||||
REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
|
||||
| macStaId1
|
||||
| AR_STA_ID1_RTS_USE_DEF
|
||||
| (ah->config.ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
|
||||
| ah->sta_id1_defaults);
|
||||
ath_hw_setbssidmask(common);
|
||||
REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
|
||||
ath9k_hw_write_associd(ah);
|
||||
REG_WRITE(ah, AR_ISR, ~0);
|
||||
REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
|
||||
|
||||
REGWRITE_BUFFER_FLUSH(ah);
|
||||
|
||||
ath9k_hw_set_operating_mode(ah, ah->opmode);
|
||||
}
|
||||
|
||||
static void ath9k_hw_init_queues(struct ath_hw *ah)
|
||||
{
|
||||
int i;
|
||||
|
||||
ENABLE_REGWRITE_BUFFER(ah);
|
||||
|
||||
for (i = 0; i < AR_NUM_DCU; i++)
|
||||
REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
|
||||
|
||||
REGWRITE_BUFFER_FLUSH(ah);
|
||||
|
||||
ah->intr_txqs = 0;
|
||||
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
|
||||
ath9k_hw_resettxqueue(ah, i);
|
||||
}
|
||||
|
||||
/*
|
||||
* For big endian systems turn on swapping for descriptors
|
||||
*/
|
||||
static void ath9k_hw_init_desc(struct ath_hw *ah)
|
||||
{
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
|
||||
if (AR_SREV_9100(ah)) {
|
||||
u32 mask;
|
||||
mask = REG_READ(ah, AR_CFG);
|
||||
if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
|
||||
ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
|
||||
mask);
|
||||
} else {
|
||||
mask = INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
|
||||
REG_WRITE(ah, AR_CFG, mask);
|
||||
ath_dbg(common, RESET, "Setting CFG 0x%x\n",
|
||||
REG_READ(ah, AR_CFG));
|
||||
}
|
||||
} else {
|
||||
if (common->bus_ops->ath_bus_type == ATH_USB) {
|
||||
/* Configure AR9271 target WLAN */
|
||||
if (AR_SREV_9271(ah))
|
||||
REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
|
||||
else
|
||||
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
|
||||
}
|
||||
#ifdef __BIG_ENDIAN
|
||||
else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
|
||||
AR_SREV_9550(ah))
|
||||
REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
|
||||
else
|
||||
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Fast channel change:
|
||||
* (Change synthesizer based on channel freq without resetting chip)
|
||||
@ -1744,7 +1844,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
|
||||
u32 saveDefAntenna;
|
||||
u32 macStaId1;
|
||||
u64 tsf = 0;
|
||||
int i, r;
|
||||
int r;
|
||||
bool start_mci_reset = false;
|
||||
bool save_fullsleep = ah->chip_fullsleep;
|
||||
|
||||
@ -1761,10 +1861,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
|
||||
ath9k_hw_getnf(ah, ah->curchan);
|
||||
|
||||
ah->caldata = caldata;
|
||||
if (caldata &&
|
||||
(chan->channel != caldata->channel ||
|
||||
(chan->channelFlags & ~CHANNEL_CW_INT) !=
|
||||
(caldata->channelFlags & ~CHANNEL_CW_INT))) {
|
||||
if (caldata && (chan->channel != caldata->channel ||
|
||||
chan->channelFlags != caldata->channelFlags)) {
|
||||
/* Operating channel changed, reset channel calibration data */
|
||||
memset(caldata, 0, sizeof(*caldata));
|
||||
ath9k_init_nfcal_hist_buffer(ah, chan);
|
||||
@ -1851,22 +1949,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
|
||||
ath9k_hw_settsf64(ah, tsf);
|
||||
}
|
||||
|
||||
/* Setup MFP options for CCMP */
|
||||
if (AR_SREV_9280_20_OR_LATER(ah)) {
|
||||
/* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
|
||||
* frames when constructing CCMP AAD. */
|
||||
REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
|
||||
0xc7ff);
|
||||
ah->sw_mgmt_crypto = false;
|
||||
} else if (AR_SREV_9160_10_OR_LATER(ah)) {
|
||||
/* Disable hardware crypto for management frames */
|
||||
REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
|
||||
AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
|
||||
REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
|
||||
AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
|
||||
ah->sw_mgmt_crypto = true;
|
||||
} else
|
||||
ah->sw_mgmt_crypto = true;
|
||||
ath9k_hw_init_mfp(ah);
|
||||
|
||||
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
|
||||
ath9k_hw_set_delta_slope(ah, chan);
|
||||
@ -1874,24 +1957,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
|
||||
ath9k_hw_spur_mitigate_freq(ah, chan);
|
||||
ah->eep_ops->set_board_values(ah, chan);
|
||||
|
||||
ENABLE_REGWRITE_BUFFER(ah);
|
||||
|
||||
REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
|
||||
REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
|
||||
| macStaId1
|
||||
| AR_STA_ID1_RTS_USE_DEF
|
||||
| (ah->config.
|
||||
ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
|
||||
| ah->sta_id1_defaults);
|
||||
ath_hw_setbssidmask(common);
|
||||
REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
|
||||
ath9k_hw_write_associd(ah);
|
||||
REG_WRITE(ah, AR_ISR, ~0);
|
||||
REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
|
||||
|
||||
REGWRITE_BUFFER_FLUSH(ah);
|
||||
|
||||
ath9k_hw_set_operating_mode(ah, ah->opmode);
|
||||
ath9k_hw_reset_opmode(ah, macStaId1, saveDefAntenna);
|
||||
|
||||
r = ath9k_hw_rf_set_freq(ah, chan);
|
||||
if (r)
|
||||
@ -1899,17 +1965,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
|
||||
|
||||
ath9k_hw_set_clockrate(ah);
|
||||
|
||||
ENABLE_REGWRITE_BUFFER(ah);
|
||||
|
||||
for (i = 0; i < AR_NUM_DCU; i++)
|
||||
REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
|
||||
|
||||
REGWRITE_BUFFER_FLUSH(ah);
|
||||
|
||||
ah->intr_txqs = 0;
|
||||
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
|
||||
ath9k_hw_resettxqueue(ah, i);
|
||||
|
||||
ath9k_hw_init_queues(ah);
|
||||
ath9k_hw_init_interrupt_masks(ah, ah->opmode);
|
||||
ath9k_hw_ani_cache_ini_regs(ah);
|
||||
ath9k_hw_init_qos(ah);
|
||||
@ -1964,38 +2020,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
|
||||
|
||||
REGWRITE_BUFFER_FLUSH(ah);
|
||||
|
||||
/*
|
||||
* For big endian systems turn on swapping for descriptors
|
||||
*/
|
||||
if (AR_SREV_9100(ah)) {
|
||||
u32 mask;
|
||||
mask = REG_READ(ah, AR_CFG);
|
||||
if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
|
||||
ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
|
||||
mask);
|
||||
} else {
|
||||
mask =
|
||||
INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
|
||||
REG_WRITE(ah, AR_CFG, mask);
|
||||
ath_dbg(common, RESET, "Setting CFG 0x%x\n",
|
||||
REG_READ(ah, AR_CFG));
|
||||
}
|
||||
} else {
|
||||
if (common->bus_ops->ath_bus_type == ATH_USB) {
|
||||
/* Configure AR9271 target WLAN */
|
||||
if (AR_SREV_9271(ah))
|
||||
REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
|
||||
else
|
||||
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
|
||||
}
|
||||
#ifdef __BIG_ENDIAN
|
||||
else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
|
||||
AR_SREV_9550(ah))
|
||||
REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
|
||||
else
|
||||
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
|
||||
#endif
|
||||
}
|
||||
ath9k_hw_init_desc(ah);
|
||||
|
||||
if (ath9k_hw_btcoex_is_enabled(ah))
|
||||
ath9k_hw_btcoex_enable(ah);
|
||||
@ -2008,7 +2033,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
|
||||
|
||||
if (AR_SREV_9300_20_OR_LATER(ah)) {
|
||||
ar9003_hw_bb_watchdog_config(ah);
|
||||
|
||||
ar9003_hw_disable_phy_restart(ah);
|
||||
}
|
||||
|
||||
|
@ -363,7 +363,6 @@ enum ath9k_int {
|
||||
ATH9K_INT_NOCARD = 0xffffffff
|
||||
};
|
||||
|
||||
#define CHANNEL_CW_INT 0x00002
|
||||
#define CHANNEL_CCK 0x00020
|
||||
#define CHANNEL_OFDM 0x00040
|
||||
#define CHANNEL_2GHZ 0x00080
|
||||
|
@ -387,8 +387,7 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
|
||||
u8 tid;
|
||||
|
||||
if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
|
||||
txinfo->flags & IEEE80211_TX_CTL_INJECTED ||
|
||||
(!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
|
||||
txinfo->flags & IEEE80211_TX_CTL_INJECTED)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
@ -981,30 +980,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
|
||||
|
||||
SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
|
||||
txc->s.ampdu_settings, factor);
|
||||
|
||||
for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
|
||||
txrate = &info->control.rates[i];
|
||||
if (txrate->idx >= 0) {
|
||||
txc->s.ri[i] =
|
||||
CARL9170_TX_SUPER_RI_AMPDU;
|
||||
|
||||
if (WARN_ON(!(txrate->flags &
|
||||
IEEE80211_TX_RC_MCS))) {
|
||||
/*
|
||||
* Not sure if it's even possible
|
||||
* to aggregate non-ht rates with
|
||||
* this HW.
|
||||
*/
|
||||
goto err_out;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
txrate->idx = 0;
|
||||
txrate->count = ar->hw->max_rate_tries;
|
||||
}
|
||||
|
||||
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1012,11 +987,31 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
|
||||
* taken from mac_control. For all fallback rate, the firmware
|
||||
* updates the mac_control flags from the rate info field.
|
||||
*/
|
||||
for (i = 1; i < CARL9170_TX_MAX_RATES; i++) {
|
||||
for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
|
||||
__le32 phy_set;
|
||||
txrate = &info->control.rates[i];
|
||||
if (txrate->idx < 0)
|
||||
break;
|
||||
|
||||
phy_set = carl9170_tx_physet(ar, info, txrate);
|
||||
if (i == 0) {
|
||||
/* first rate - part of the hw's frame header */
|
||||
txc->f.phy_control = phy_set;
|
||||
|
||||
if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
|
||||
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
|
||||
if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
|
||||
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
|
||||
else if (carl9170_tx_cts_check(ar, txrate))
|
||||
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
|
||||
|
||||
} else {
|
||||
/* fallback rates are stored in the firmware's
|
||||
* retry rate set array.
|
||||
*/
|
||||
txc->s.rr[i - 1] = phy_set;
|
||||
}
|
||||
|
||||
SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
|
||||
txrate->count);
|
||||
|
||||
@ -1027,21 +1022,13 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
|
||||
txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
|
||||
CARL9170_TX_SUPER_RI_ERP_PROT_S);
|
||||
|
||||
txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate);
|
||||
if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
|
||||
txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
|
||||
}
|
||||
|
||||
txrate = &info->control.rates[0];
|
||||
SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[0], txrate->count);
|
||||
|
||||
if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
|
||||
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
|
||||
else if (carl9170_tx_cts_check(ar, txrate))
|
||||
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
|
||||
|
||||
txc->s.len = cpu_to_le16(skb->len);
|
||||
txc->f.length = cpu_to_le16(len + FCS_LEN);
|
||||
txc->f.mac_control = mac_tmp;
|
||||
txc->f.phy_control = carl9170_tx_physet(ar, info, txrate);
|
||||
|
||||
arinfo = (void *)info->rate_driver_data;
|
||||
arinfo->timeout = jiffies;
|
||||
@ -1381,9 +1368,9 @@ static void carl9170_tx(struct ar9170 *ar)
|
||||
}
|
||||
|
||||
static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
|
||||
struct ieee80211_sta *sta, struct sk_buff *skb)
|
||||
struct ieee80211_sta *sta, struct sk_buff *skb,
|
||||
struct ieee80211_tx_info *txinfo)
|
||||
{
|
||||
struct _carl9170_tx_superframe *super = (void *) skb->data;
|
||||
struct carl9170_sta_info *sta_info;
|
||||
struct carl9170_sta_tid *agg;
|
||||
struct sk_buff *iter;
|
||||
@ -1450,7 +1437,7 @@ err_unlock:
|
||||
|
||||
err_unlock_rcu:
|
||||
rcu_read_unlock();
|
||||
super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR);
|
||||
txinfo->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
||||
carl9170_tx_status(ar, skb, false);
|
||||
ar->tx_dropped++;
|
||||
return false;
|
||||
@ -1492,7 +1479,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
|
||||
* sta == NULL checks are redundant in this
|
||||
* special case.
|
||||
*/
|
||||
run = carl9170_tx_ampdu_queue(ar, sta, skb);
|
||||
run = carl9170_tx_ampdu_queue(ar, sta, skb, info);
|
||||
if (run)
|
||||
carl9170_tx_ampdu(ar);
|
||||
|
||||
|
@ -9,5 +9,7 @@ wil6210-objs += wmi.o
|
||||
wil6210-objs += interrupt.o
|
||||
wil6210-objs += txrx.o
|
||||
|
||||
subdir-ccflags-y += -Werror
|
||||
ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
|
||||
subdir-ccflags-y += -Werror
|
||||
endif
|
||||
subdir-ccflags-y += -D__CHECK_ENDIAN__
|
||||
|
@ -14,16 +14,6 @@
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/wireless.h>
|
||||
#include <linux/ieee80211.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/version.h>
|
||||
#include <net/cfg80211.h>
|
||||
|
||||
#include "wil6210.h"
|
||||
#include "wmi.h"
|
||||
|
||||
@ -292,7 +282,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
|
||||
|
||||
/* WMI_CONNECT_CMD */
|
||||
memset(&conn, 0, sizeof(conn));
|
||||
switch (bss->capability & 0x03) {
|
||||
switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) {
|
||||
case WLAN_CAPABILITY_DMG_TYPE_AP:
|
||||
conn.network_type = WMI_NETTYPE_INFRA;
|
||||
break;
|
||||
@ -437,17 +427,18 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = wmi_set_channel(wil, channel->hw_value);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* MAC address - pre-requisite for other commands */
|
||||
wmi_set_mac_address(wil, ndev->dev_addr);
|
||||
|
||||
/* IE's */
|
||||
/* bcon 'head IE's are not relevant for 60g band */
|
||||
wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
|
||||
bcon->beacon_ies);
|
||||
/*
|
||||
* FW do not form regular beacon, so bcon IE's are not set
|
||||
* For the DMG bcon, when it will be supported, bcon IE's will
|
||||
* be reused; add something like:
|
||||
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
|
||||
* bcon->beacon_ies);
|
||||
*/
|
||||
wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
|
||||
bcon->proberesp_ies);
|
||||
wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
|
||||
@ -455,7 +446,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
|
||||
|
||||
wil->secure_pcp = info->privacy;
|
||||
|
||||
rc = wmi_set_bcon(wil, info->beacon_interval, wmi_nettype);
|
||||
rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
|
||||
channel->hw_value);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -472,11 +464,8 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
|
||||
{
|
||||
int rc = 0;
|
||||
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
|
||||
struct wireless_dev *wdev = ndev->ieee80211_ptr;
|
||||
u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
|
||||
|
||||
/* To stop beaconing, set BI to 0 */
|
||||
rc = wmi_set_bcon(wil, 0, wmi_nettype);
|
||||
rc = wmi_pcp_stop(wil);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -1,20 +0,0 @@
|
||||
#ifndef WIL_DBG_HEXDUMP_H_
|
||||
#define WIL_DBG_HEXDUMP_H_
|
||||
|
||||
#include <linux/printk.h>
|
||||
#include <linux/dynamic_debug.h>
|
||||
|
||||
#if defined(CONFIG_DYNAMIC_DEBUG)
|
||||
#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
|
||||
groupsize, buf, len, ascii) \
|
||||
dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
|
||||
groupsize, buf, len, ascii)
|
||||
|
||||
#else /* defined(CONFIG_DYNAMIC_DEBUG) */
|
||||
#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
|
||||
groupsize, buf, len, ascii) \
|
||||
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
|
||||
groupsize, buf, len, ascii)
|
||||
#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
|
||||
|
||||
#endif /* WIL_DBG_HEXDUMP_H_ */
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user