mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Two entries being added at the same time to the IFLA policy table, whilst parallel bug fixes to decnet routing dst handling overlapping with the dst gc removal in net-next. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
3d09198243
@ -3811,6 +3811,13 @@
|
||||
expediting. Set to zero to disable automatic
|
||||
expediting.
|
||||
|
||||
stack_guard_gap= [MM]
|
||||
override the default stack gap protection. The value
|
||||
is in page units and it defines how many pages prior
|
||||
to (for stacks growing down) resp. after (for stacks
|
||||
growing up) the main stack are reserved for no other
|
||||
mapping. Default value is 256 pages.
|
||||
|
||||
stacktrace [FTRACE]
|
||||
Enabled the stack tracer on boot up.
|
||||
|
||||
|
@ -22,7 +22,8 @@ Required properties :
|
||||
- #clock-cells : must contain 1
|
||||
- #reset-cells : must contain 1
|
||||
|
||||
For the PRCM CCUs on H3/A64, one more clock is needed:
|
||||
For the PRCM CCUs on H3/A64, two more clocks are needed:
|
||||
- "pll-periph": the SoC's peripheral PLL from the main CCU
|
||||
- "iosc": the SoC's internal frequency oscillator
|
||||
|
||||
Example for generic CCU:
|
||||
@ -39,8 +40,8 @@ Example for PRCM CCU:
|
||||
r_ccu: clock@01f01400 {
|
||||
compatible = "allwinner,sun50i-a64-r-ccu";
|
||||
reg = <0x01f01400 0x100>;
|
||||
clocks = <&osc24M>, <&osc32k>, <&iosc>;
|
||||
clock-names = "hosc", "losc", "iosc";
|
||||
clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>;
|
||||
clock-names = "hosc", "losc", "iosc", "pll-periph";
|
||||
#clock-cells = <1>;
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
@ -37,7 +37,7 @@ Required properties:
|
||||
"brcm,bcm6328-switch"
|
||||
"brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
|
||||
|
||||
See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
|
||||
See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
|
||||
required and optional properties.
|
||||
|
||||
Examples:
|
||||
|
@ -27,6 +27,7 @@ Optional properties:
|
||||
of the device. On many systems this is wired high so the device goes
|
||||
out of reset at power-on, but if it is under program control, this
|
||||
optional GPIO can wake up in response to it.
|
||||
- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
|
||||
|
||||
Examples:
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -65,7 +65,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -220,7 +220,7 @@
|
||||
|
||||
mmc1_pins: pinmux_mmc1_pins {
|
||||
pinctrl-single,pins = <
|
||||
AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */
|
||||
AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE7) /* uart0_rtsn.gpio1_9 */
|
||||
>;
|
||||
};
|
||||
|
||||
@ -280,10 +280,6 @@
|
||||
AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */
|
||||
AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */
|
||||
AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */
|
||||
/* AVR Programming - SPI Bus (bit bang) - Screen and Keyboard */
|
||||
AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMOSI spi0_d0.gpio0_3 */
|
||||
AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMISO spi0_d1.gpio0_4 */
|
||||
AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattSCLK spi0_clk.gpio0_2 */
|
||||
/* PDI Bus - Battery system */
|
||||
AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */
|
||||
AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */
|
||||
@ -384,7 +380,7 @@
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&mmc1_pins>;
|
||||
bus-width = <4>;
|
||||
cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
|
||||
cd-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <&vmmcsd_fixed>;
|
||||
};
|
||||
|
||||
|
@ -598,10 +598,11 @@
|
||||
};
|
||||
|
||||
r_ccu: clock@1f01400 {
|
||||
compatible = "allwinner,sun50i-a64-r-ccu";
|
||||
compatible = "allwinner,sun8i-h3-r-ccu";
|
||||
reg = <0x01f01400 0x100>;
|
||||
clocks = <&osc24M>, <&osc32k>, <&iosc>;
|
||||
clock-names = "hosc", "losc", "iosc";
|
||||
clocks = <&osc24M>, <&osc32k>, <&iosc>,
|
||||
<&ccu 9>;
|
||||
clock-names = "hosc", "losc", "iosc", "pll-periph";
|
||||
#clock-cells = <1>;
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
@ -90,7 +90,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
@ -141,7 +141,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -447,8 +447,9 @@
|
||||
r_ccu: clock@1f01400 {
|
||||
compatible = "allwinner,sun50i-a64-r-ccu";
|
||||
reg = <0x01f01400 0x100>;
|
||||
clocks = <&osc24M>, <&osc32k>, <&iosc>;
|
||||
clock-names = "hosc", "losc", "iosc";
|
||||
clocks = <&osc24M>, <&osc32k>, <&iosc>,
|
||||
<&ccu 11>;
|
||||
clock-names = "hosc", "losc", "iosc", "pll-periph";
|
||||
#clock-cells = <1>;
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
@ -40,7 +40,7 @@
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "sunxi-h3-h5.dtsi"
|
||||
#include <arm/sunxi-h3-h5.dtsi>
|
||||
|
||||
/ {
|
||||
cpus {
|
||||
|
@ -1 +0,0 @@
|
||||
../../../../arm/boot/dts/sunxi-h3-h5.dtsi
|
@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(current->mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
goto success;
|
||||
}
|
||||
|
||||
|
@ -128,19 +128,19 @@ quiet_cmd_cpp_its_S = ITS $@
|
||||
-DADDR_BITS=$(ADDR_BITS) \
|
||||
-DADDR_CELLS=$(itb_addr_cells)
|
||||
|
||||
$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
|
||||
$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
|
||||
$(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
|
||||
|
||||
$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
|
||||
$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
|
||||
$(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
|
||||
|
||||
$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
|
||||
$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
|
||||
$(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
|
||||
|
||||
$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
|
||||
$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
|
||||
$(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
|
||||
|
||||
$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
|
||||
$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
|
||||
$(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
|
||||
|
||||
quiet_cmd_itb-image = ITB $@
|
||||
|
@ -35,7 +35,12 @@ extern pte_t *pkmap_page_table;
|
||||
* easily, subsequent pte tables have to be allocated in one physical
|
||||
* chunk of RAM.
|
||||
*/
|
||||
#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
||||
#define LAST_PKMAP 512
|
||||
#else
|
||||
#define LAST_PKMAP 1024
|
||||
#endif
|
||||
|
||||
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
|
||||
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
|
||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
||||
|
@ -43,7 +43,8 @@ typedef union mips_instruction kprobe_opcode_t;
|
||||
|
||||
#define flush_insn_slot(p) \
|
||||
do { \
|
||||
flush_icache_range((unsigned long)p->addr, \
|
||||
if (p->addr) \
|
||||
flush_icache_range((unsigned long)p->addr, \
|
||||
(unsigned long)p->addr + \
|
||||
(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
|
||||
} while (0)
|
||||
|
@ -19,6 +19,10 @@
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
#include <asm/highmem.h>
|
||||
#endif
|
||||
|
||||
extern int temp_tlb_entry;
|
||||
|
||||
/*
|
||||
@ -62,7 +66,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
|
||||
|
||||
#define VMALLOC_START MAP_BASE
|
||||
|
||||
#define PKMAP_BASE (0xfe000000UL)
|
||||
#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
|
||||
#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
|
||||
|
@ -804,8 +804,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
|
||||
break;
|
||||
}
|
||||
/* Compact branch: BNEZC || JIALC */
|
||||
if (insn.i_format.rs)
|
||||
if (!insn.i_format.rs) {
|
||||
/* JIALC: set $31/ra */
|
||||
regs->regs[31] = epc + 4;
|
||||
}
|
||||
regs->cp0_epc += 8;
|
||||
break;
|
||||
#endif
|
||||
|
@ -38,20 +38,6 @@ void arch_ftrace_update_code(int command)
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Check if the address is in kernel space
|
||||
*
|
||||
* Clone core_kernel_text() from kernel/extable.c, but doesn't call
|
||||
* init_kernel_text() for Ftrace doesn't trace functions in init sections.
|
||||
*/
|
||||
static inline int in_kernel_space(unsigned long ip)
|
||||
{
|
||||
if (ip >= (unsigned long)_stext &&
|
||||
ip <= (unsigned long)_etext)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
|
||||
@ -198,7 +184,7 @@ int ftrace_make_nop(struct module *mod,
|
||||
* If ip is in kernel space, no long call, otherwise, long call is
|
||||
* needed.
|
||||
*/
|
||||
new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
|
||||
new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
|
||||
#ifdef CONFIG_64BIT
|
||||
return ftrace_modify_code(ip, new);
|
||||
#else
|
||||
@ -218,12 +204,12 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
unsigned int new;
|
||||
unsigned long ip = rec->ip;
|
||||
|
||||
new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
|
||||
new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
return ftrace_modify_code(ip, new);
|
||||
#else
|
||||
return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ?
|
||||
return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
|
||||
INSN_NOP : insn_la_mcount[1]);
|
||||
#endif
|
||||
}
|
||||
@ -289,7 +275,7 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
|
||||
* instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
|
||||
* kernel, move after the instruction "move ra, at"(offset is 16)
|
||||
*/
|
||||
ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
|
||||
ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
|
||||
|
||||
/*
|
||||
* search the text until finding the non-store instruction or "s{d,w}
|
||||
@ -394,7 +380,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
|
||||
* entries configured through the tracing/set_graph_function interface.
|
||||
*/
|
||||
|
||||
insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
|
||||
insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
|
||||
trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
|
||||
|
||||
/* Only trace if the calling function expects to */
|
||||
|
@ -1597,7 +1597,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
|
||||
break;
|
||||
case CPU_P5600:
|
||||
case CPU_P6600:
|
||||
case CPU_I6400:
|
||||
/* 8-bit event numbers */
|
||||
raw_id = config & 0x1ff;
|
||||
base_id = raw_id & 0xff;
|
||||
@ -1610,6 +1609,11 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
|
||||
raw_event.range = P;
|
||||
#endif
|
||||
break;
|
||||
case CPU_I6400:
|
||||
/* 8-bit event numbers */
|
||||
base_id = config & 0xff;
|
||||
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
|
||||
break;
|
||||
case CPU_1004K:
|
||||
if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
|
||||
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
|
||||
|
@ -93,7 +93,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -51,15 +51,15 @@ void __init pagetable_init(void)
|
||||
/*
|
||||
* Fixed mappings:
|
||||
*/
|
||||
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
|
||||
fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
|
||||
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
|
||||
fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
* Permanent kmaps:
|
||||
*/
|
||||
vaddr = PKMAP_BASE;
|
||||
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
|
||||
fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
|
||||
|
||||
pgd = swapper_pg_dir + __pgd_offset(vaddr);
|
||||
pud = pud_offset(pgd, vaddr);
|
||||
|
@ -90,7 +90,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_area_struct *vma, *prev;
|
||||
unsigned long task_size = TASK_SIZE;
|
||||
int do_color_align, last_mmap;
|
||||
struct vm_unmapped_area_info info;
|
||||
@ -117,9 +117,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
else
|
||||
addr = PAGE_ALIGN(addr);
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
vma = find_vma_prev(mm, addr, &prev);
|
||||
if (task_size - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)) &&
|
||||
(!prev || addr >= vm_end_gap(prev)))
|
||||
goto found_addr;
|
||||
}
|
||||
|
||||
@ -143,7 +144,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
||||
const unsigned long len, const unsigned long pgoff,
|
||||
const unsigned long flags)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_area_struct *vma, *prev;
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr = addr0;
|
||||
int do_color_align, last_mmap;
|
||||
@ -177,9 +178,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
||||
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
|
||||
else
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
|
||||
vma = find_vma_prev(mm, addr, &prev);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)) &&
|
||||
(!prev || addr >= vm_end_gap(prev)))
|
||||
goto found_addr;
|
||||
}
|
||||
|
||||
|
@ -104,7 +104,7 @@
|
||||
"1: "PPC_TLNEI" %4,0\n" \
|
||||
_EMIT_BUG_ENTRY \
|
||||
: : "i" (__FILE__), "i" (__LINE__), \
|
||||
"i" (BUGFLAG_TAINT(TAINT_WARN)), \
|
||||
"i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
|
||||
"i" (sizeof(struct bug_entry)), \
|
||||
"r" (__ret_warn_on)); \
|
||||
} \
|
||||
|
@ -94,11 +94,13 @@ struct xive_q {
|
||||
* store at 0 and some ESBs support doing a trigger via a
|
||||
* separate trigger page.
|
||||
*/
|
||||
#define XIVE_ESB_GET 0x800
|
||||
#define XIVE_ESB_SET_PQ_00 0xc00
|
||||
#define XIVE_ESB_SET_PQ_01 0xd00
|
||||
#define XIVE_ESB_SET_PQ_10 0xe00
|
||||
#define XIVE_ESB_SET_PQ_11 0xf00
|
||||
#define XIVE_ESB_STORE_EOI 0x400 /* Store */
|
||||
#define XIVE_ESB_LOAD_EOI 0x000 /* Load */
|
||||
#define XIVE_ESB_GET 0x800 /* Load */
|
||||
#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
|
||||
#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
|
||||
#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
|
||||
#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
|
||||
|
||||
#define XIVE_ESB_VAL_P 0x2
|
||||
#define XIVE_ESB_VAL_Q 0x1
|
||||
|
@ -69,7 +69,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
|
||||
{
|
||||
/* If the XIVE supports the new "store EOI facility, use it */
|
||||
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
|
||||
__x_writeq(0, __x_eoi_page(xd));
|
||||
__x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
|
||||
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
|
||||
opal_int_eoi(hw_irq);
|
||||
} else {
|
||||
@ -89,7 +89,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
|
||||
* properly.
|
||||
*/
|
||||
if (xd->flags & XIVE_IRQ_FLAG_LSI)
|
||||
__x_readq(__x_eoi_page(xd));
|
||||
__x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
|
||||
else {
|
||||
eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
|
||||
|
||||
|
@ -68,7 +68,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
addr = ALIGN(addr, huge_page_size(h));
|
||||
vma = find_vma(mm, addr);
|
||||
if (mm->task_size - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
/*
|
||||
|
@ -112,7 +112,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
@ -157,7 +157,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
|
||||
if ((mm->task_size - len) < addr)
|
||||
return 0;
|
||||
vma = find_vma(mm, addr);
|
||||
return (!vma || (addr + len) <= vma->vm_start);
|
||||
return (!vma || (addr + len) <= vm_start_gap(vma));
|
||||
}
|
||||
|
||||
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
|
||||
|
@ -75,7 +75,8 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
|
||||
if (WARN_ON(!gpdev))
|
||||
return NULL;
|
||||
|
||||
if (WARN_ON(!gpdev->dev.of_node))
|
||||
/* Not all PCI devices have device-tree nodes */
|
||||
if (!gpdev->dev.of_node)
|
||||
return NULL;
|
||||
|
||||
/* Get assoicated PCI device */
|
||||
|
@ -297,7 +297,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
|
||||
{
|
||||
/* If the XIVE supports the new "store EOI facility, use it */
|
||||
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
|
||||
out_be64(xd->eoi_mmio, 0);
|
||||
out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0);
|
||||
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
|
||||
/*
|
||||
* The FW told us to call it. This happens for some
|
||||
|
@ -101,7 +101,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
goto check_asce_limit;
|
||||
}
|
||||
|
||||
@ -151,7 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
goto check_asce_limit;
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
@ -114,7 +114,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (task_size - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (task_size - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
addr = ALIGN(addr, huge_page_size(h));
|
||||
vma = find_vma(mm, addr);
|
||||
if (task_size - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
if (mm->get_unmapped_area == arch_get_unmapped_area)
|
||||
|
@ -233,7 +233,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
addr = ALIGN(addr, huge_page_size(h));
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
if (current->mm->get_unmapped_area == arch_get_unmapped_area)
|
||||
|
@ -29,6 +29,7 @@ struct pt_regs;
|
||||
} while (0)
|
||||
|
||||
extern int fixup_exception(struct pt_regs *regs, int trapnr);
|
||||
extern int fixup_bug(struct pt_regs *regs, int trapnr);
|
||||
extern bool ex_has_fault_handler(unsigned long ip);
|
||||
extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
|
||||
|
||||
|
@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (end - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
@ -187,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,7 @@ int is_valid_bugaddr(unsigned long addr)
|
||||
return ud == INSN_UD0 || ud == INSN_UD2;
|
||||
}
|
||||
|
||||
static int fixup_bug(struct pt_regs *regs, int trapnr)
|
||||
int fixup_bug(struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
if (trapnr != X86_TRAP_UD)
|
||||
return 0;
|
||||
|
@ -162,6 +162,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
|
||||
if (fixup_exception(regs, trapnr))
|
||||
return;
|
||||
|
||||
if (fixup_bug(regs, trapnr))
|
||||
return;
|
||||
|
||||
fail:
|
||||
early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
|
||||
(unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
|
||||
|
@ -148,7 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
addr = ALIGN(addr, huge_page_size(h));
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
if (mm->get_unmapped_area == arch_get_unmapped_area)
|
||||
|
@ -161,16 +161,16 @@ static int page_size_mask;
|
||||
|
||||
static void __init probe_page_size_mask(void)
|
||||
{
|
||||
#if !defined(CONFIG_KMEMCHECK)
|
||||
/*
|
||||
* For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
|
||||
* use small pages.
|
||||
* This will simplify cpa(), which otherwise needs to support splitting
|
||||
* large pages into small in interrupt context, etc.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
|
||||
if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
|
||||
page_size_mask |= 1 << PG_LEVEL_2M;
|
||||
#endif
|
||||
else
|
||||
direct_gbpages = 0;
|
||||
|
||||
/* Enable PSE if available */
|
||||
if (boot_cpu_has(X86_FEATURE_PSE))
|
||||
|
@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
/* At this point: (!vmm || addr < vmm->vm_end). */
|
||||
if (TASK_SIZE - len < addr)
|
||||
return -ENOMEM;
|
||||
if (!vmm || addr + len <= vmm->vm_start)
|
||||
if (!vmm || addr + len <= vm_start_gap(vmm))
|
||||
return addr;
|
||||
addr = vmm->vm_end;
|
||||
if (flags & MAP_SHARED)
|
||||
|
@ -777,24 +777,25 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_release_queue: - release a &struct request_queue when it is no longer needed
|
||||
* @kobj: the kobj belonging to the request queue to be released
|
||||
* __blk_release_queue - release a request queue when it is no longer needed
|
||||
* @work: pointer to the release_work member of the request queue to be released
|
||||
*
|
||||
* Description:
|
||||
* blk_release_queue is the pair to blk_init_queue() or
|
||||
* blk_queue_make_request(). It should be called when a request queue is
|
||||
* being released; typically when a block device is being de-registered.
|
||||
* Currently, its primary task it to free all the &struct request
|
||||
* structures that were allocated to the queue and the queue itself.
|
||||
* blk_release_queue is the counterpart of blk_init_queue(). It should be
|
||||
* called when a request queue is being released; typically when a block
|
||||
* device is being de-registered. Its primary task it to free the queue
|
||||
* itself.
|
||||
*
|
||||
* Note:
|
||||
* Notes:
|
||||
* The low level driver must have finished any outstanding requests first
|
||||
* via blk_cleanup_queue().
|
||||
**/
|
||||
static void blk_release_queue(struct kobject *kobj)
|
||||
*
|
||||
* Although blk_release_queue() may be called with preemption disabled,
|
||||
* __blk_release_queue() may sleep.
|
||||
*/
|
||||
static void __blk_release_queue(struct work_struct *work)
|
||||
{
|
||||
struct request_queue *q =
|
||||
container_of(kobj, struct request_queue, kobj);
|
||||
struct request_queue *q = container_of(work, typeof(*q), release_work);
|
||||
|
||||
if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
|
||||
blk_stat_remove_callback(q, q->poll_cb);
|
||||
@ -834,6 +835,15 @@ static void blk_release_queue(struct kobject *kobj)
|
||||
call_rcu(&q->rcu_head, blk_free_queue_rcu);
|
||||
}
|
||||
|
||||
static void blk_release_queue(struct kobject *kobj)
|
||||
{
|
||||
struct request_queue *q =
|
||||
container_of(kobj, struct request_queue, kobj);
|
||||
|
||||
INIT_WORK(&q->release_work, __blk_release_queue);
|
||||
schedule_work(&q->release_work);
|
||||
}
|
||||
|
||||
static const struct sysfs_ops queue_sysfs_ops = {
|
||||
.show = queue_attr_show,
|
||||
.store = queue_attr_store,
|
||||
|
@ -14,6 +14,7 @@ config COMMON_CLK_MESON8B
|
||||
config COMMON_CLK_GXBB
|
||||
bool
|
||||
depends on COMMON_CLK_AMLOGIC
|
||||
select RESET_CONTROLLER
|
||||
help
|
||||
Support for the clock controller on AmLogic S905 devices, aka gxbb.
|
||||
Say Y if you want peripherals and CPU frequency scaling to work.
|
||||
|
@ -156,6 +156,7 @@ config SUN8I_R_CCU
|
||||
bool "Support for Allwinner SoCs' PRCM CCUs"
|
||||
select SUNXI_CCU_DIV
|
||||
select SUNXI_CCU_GATE
|
||||
select SUNXI_CCU_MP
|
||||
default MACH_SUN8I || (ARCH_SUNXI && ARM64)
|
||||
|
||||
endif
|
||||
|
@ -31,7 +31,9 @@
|
||||
#define CLK_PLL_VIDEO0_2X 8
|
||||
#define CLK_PLL_VE 9
|
||||
#define CLK_PLL_DDR0 10
|
||||
#define CLK_PLL_PERIPH0 11
|
||||
|
||||
/* PLL_PERIPH0 exported for PRCM */
|
||||
|
||||
#define CLK_PLL_PERIPH0_2X 12
|
||||
#define CLK_PLL_PERIPH1 13
|
||||
#define CLK_PLL_PERIPH1_2X 14
|
||||
|
@ -243,7 +243,7 @@ static SUNXI_CCU_GATE(ahb_ss_clk, "ahb-ss", "ahb",
|
||||
static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb",
|
||||
0x060, BIT(6), 0);
|
||||
static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb",
|
||||
0x060, BIT(6), 0);
|
||||
0x060, BIT(7), 0);
|
||||
static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb",
|
||||
0x060, BIT(8), 0);
|
||||
static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb",
|
||||
|
@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents,
|
||||
0x12c, 0, 4, 24, 3, BIT(31),
|
||||
CLK_SET_RATE_PARENT);
|
||||
static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
|
||||
0x12c, 0, 4, 24, 3, BIT(31),
|
||||
0x130, 0, 4, 24, 3, BIT(31),
|
||||
CLK_SET_RATE_PARENT);
|
||||
|
||||
static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
|
||||
|
@ -29,7 +29,9 @@
|
||||
#define CLK_PLL_VIDEO 6
|
||||
#define CLK_PLL_VE 7
|
||||
#define CLK_PLL_DDR 8
|
||||
#define CLK_PLL_PERIPH0 9
|
||||
|
||||
/* PLL_PERIPH0 exported for PRCM */
|
||||
|
||||
#define CLK_PLL_PERIPH0_2X 10
|
||||
#define CLK_PLL_GPU 11
|
||||
#define CLK_PLL_PERIPH1 12
|
||||
|
@ -537,7 +537,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
|
||||
[RST_BUS_EMAC] = { 0x2c0, BIT(17) },
|
||||
[RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
|
||||
[RST_BUS_SPI0] = { 0x2c0, BIT(20) },
|
||||
[RST_BUS_OTG] = { 0x2c0, BIT(23) },
|
||||
[RST_BUS_OTG] = { 0x2c0, BIT(24) },
|
||||
[RST_BUS_EHCI0] = { 0x2c0, BIT(26) },
|
||||
[RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
|
||||
|
||||
|
@ -47,7 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
|
||||
DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
|
||||
DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
|
||||
DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
|
||||
DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0400, DMI_PRODUCT_FAMILY);
|
||||
DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY);
|
||||
DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
|
||||
DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
|
||||
DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION);
|
||||
@ -192,7 +192,7 @@ static void __init dmi_id_init_attr_table(void)
|
||||
ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
|
||||
ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
|
||||
ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
|
||||
ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
|
||||
ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
|
||||
ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
|
||||
ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
|
||||
ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
|
||||
|
@ -144,7 +144,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
|
||||
|
||||
buf = dmi_early_remap(dmi_base, orig_dmi_len);
|
||||
if (buf == NULL)
|
||||
return -1;
|
||||
return -ENOMEM;
|
||||
|
||||
dmi_decode_table(buf, decode, NULL);
|
||||
|
||||
@ -178,7 +178,7 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
|
||||
const char *d = (const char *) dm;
|
||||
const char *p;
|
||||
|
||||
if (dmi_ident[slot])
|
||||
if (dmi_ident[slot] || dm->length <= string)
|
||||
return;
|
||||
|
||||
p = dmi_string(dm, d[string]);
|
||||
@ -191,13 +191,14 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
|
||||
static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
|
||||
int index)
|
||||
{
|
||||
const u8 *d = (u8 *) dm + index;
|
||||
const u8 *d;
|
||||
char *s;
|
||||
int is_ff = 1, is_00 = 1, i;
|
||||
|
||||
if (dmi_ident[slot])
|
||||
if (dmi_ident[slot] || dm->length <= index + 16)
|
||||
return;
|
||||
|
||||
d = (u8 *) dm + index;
|
||||
for (i = 0; i < 16 && (is_ff || is_00); i++) {
|
||||
if (d[i] != 0x00)
|
||||
is_00 = 0;
|
||||
@ -228,16 +229,17 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
|
||||
static void __init dmi_save_type(const struct dmi_header *dm, int slot,
|
||||
int index)
|
||||
{
|
||||
const u8 *d = (u8 *) dm + index;
|
||||
const u8 *d;
|
||||
char *s;
|
||||
|
||||
if (dmi_ident[slot])
|
||||
if (dmi_ident[slot] || dm->length <= index)
|
||||
return;
|
||||
|
||||
s = dmi_alloc(4);
|
||||
if (!s)
|
||||
return;
|
||||
|
||||
d = (u8 *) dm + index;
|
||||
sprintf(s, "%u", *d & 0x7F);
|
||||
dmi_ident[slot] = s;
|
||||
}
|
||||
@ -278,9 +280,13 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
|
||||
|
||||
static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
|
||||
{
|
||||
int i, count = *(u8 *)(dm + 1);
|
||||
int i, count;
|
||||
struct dmi_device *dev;
|
||||
|
||||
if (dm->length < 0x05)
|
||||
return;
|
||||
|
||||
count = *(u8 *)(dm + 1);
|
||||
for (i = 1; i <= count; i++) {
|
||||
const char *devname = dmi_string(dm, i);
|
||||
|
||||
@ -353,6 +359,9 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
|
||||
const char *name;
|
||||
const u8 *d = (u8 *)dm;
|
||||
|
||||
if (dm->length < 0x0B)
|
||||
return;
|
||||
|
||||
/* Skip disabled device */
|
||||
if ((d[0x5] & 0x80) == 0)
|
||||
return;
|
||||
@ -387,7 +396,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
|
||||
const char *d = (const char *)dm;
|
||||
static int nr;
|
||||
|
||||
if (dm->type != DMI_ENTRY_MEM_DEVICE)
|
||||
if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12)
|
||||
return;
|
||||
if (nr >= dmi_memdev_nr) {
|
||||
pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
|
||||
@ -649,6 +658,21 @@ void __init dmi_scan_machine(void)
|
||||
if (p == NULL)
|
||||
goto error;
|
||||
|
||||
/*
|
||||
* Same logic as above, look for a 64-bit entry point
|
||||
* first, and if not found, fall back to 32-bit entry point.
|
||||
*/
|
||||
memcpy_fromio(buf, p, 16);
|
||||
for (q = p + 16; q < p + 0x10000; q += 16) {
|
||||
memcpy_fromio(buf + 16, q, 16);
|
||||
if (!dmi_smbios3_present(buf)) {
|
||||
dmi_available = 1;
|
||||
dmi_early_unmap(p, 0x10000);
|
||||
goto out;
|
||||
}
|
||||
memcpy(buf, buf + 16, 16);
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate over all possible DMI header addresses q.
|
||||
* Maintain the 32 bytes around q in buf. On the
|
||||
@ -659,7 +683,7 @@ void __init dmi_scan_machine(void)
|
||||
memset(buf, 0, 16);
|
||||
for (q = p; q < p + 0x10000; q += 16) {
|
||||
memcpy_fromio(buf + 16, q, 16);
|
||||
if (!dmi_smbios3_present(buf) || !dmi_present(buf)) {
|
||||
if (!dmi_present(buf)) {
|
||||
dmi_available = 1;
|
||||
dmi_early_unmap(p, 0x10000);
|
||||
goto out;
|
||||
@ -993,7 +1017,8 @@ EXPORT_SYMBOL(dmi_get_date);
|
||||
* @decode: Callback function
|
||||
* @private_data: Private data to be passed to the callback function
|
||||
*
|
||||
* Returns -1 when the DMI table can't be reached, 0 on success.
|
||||
* Returns 0 on success, -ENXIO if DMI is not selected or not present,
|
||||
* or a different negative error code if DMI walking fails.
|
||||
*/
|
||||
int dmi_walk(void (*decode)(const struct dmi_header *, void *),
|
||||
void *private_data)
|
||||
@ -1001,11 +1026,11 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
|
||||
u8 *buf;
|
||||
|
||||
if (!dmi_available)
|
||||
return -1;
|
||||
return -ENXIO;
|
||||
|
||||
buf = dmi_remap(dmi_base, dmi_len);
|
||||
if (buf == NULL)
|
||||
return -1;
|
||||
return -ENOMEM;
|
||||
|
||||
dmi_decode_table(buf, decode, private_data);
|
||||
|
||||
|
@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
|
||||
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
|
||||
|
||||
if (amdgpu_crtc->base.enabled && num_heads && mode) {
|
||||
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
|
||||
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
|
||||
active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = min(line_time, (u32)65535);
|
||||
|
||||
/* watermark for high clocks */
|
||||
if (adev->pm.dpm_enabled) {
|
||||
|
@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
|
||||
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
|
||||
|
||||
if (amdgpu_crtc->base.enabled && num_heads && mode) {
|
||||
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
|
||||
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
|
||||
active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = min(line_time, (u32)65535);
|
||||
|
||||
/* watermark for high clocks */
|
||||
if (adev->pm.dpm_enabled) {
|
||||
|
@ -983,8 +983,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
|
||||
fixed20_12 a, b, c;
|
||||
|
||||
if (amdgpu_crtc->base.enabled && num_heads && mode) {
|
||||
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
|
||||
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
|
||||
active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = min(line_time, (u32)65535);
|
||||
priority_a_cnt = 0;
|
||||
priority_b_cnt = 0;
|
||||
|
||||
|
@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
|
||||
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
|
||||
|
||||
if (amdgpu_crtc->base.enabled && num_heads && mode) {
|
||||
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
|
||||
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
|
||||
active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = min(line_time, (u32)65535);
|
||||
|
||||
/* watermark for high clocks */
|
||||
if (adev->pm.dpm_enabled) {
|
||||
|
@ -1,6 +1,7 @@
|
||||
config DRM_DW_HDMI
|
||||
tristate
|
||||
select DRM_KMS_HELPER
|
||||
select REGMAP_MMIO
|
||||
|
||||
config DRM_DW_HDMI_AHB_AUDIO
|
||||
tristate "Synopsys Designware AHB Audio interface"
|
||||
|
@ -36,10 +36,6 @@
|
||||
#define VGT_VERSION_MAJOR 1
|
||||
#define VGT_VERSION_MINOR 0
|
||||
|
||||
#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
|
||||
#define INTEL_VGT_IF_VERSION \
|
||||
INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
|
||||
|
||||
/*
|
||||
* notifications from guest to vgpu device model
|
||||
*/
|
||||
@ -55,8 +51,8 @@ enum vgt_g2v_type {
|
||||
|
||||
struct vgt_if {
|
||||
u64 magic; /* VGT_MAGIC */
|
||||
uint16_t version_major;
|
||||
uint16_t version_minor;
|
||||
u16 version_major;
|
||||
u16 version_minor;
|
||||
u32 vgt_id; /* ID of vGT instance */
|
||||
u32 rsv1[12]; /* pad to offset 0x40 */
|
||||
/*
|
||||
|
@ -60,8 +60,8 @@
|
||||
*/
|
||||
void i915_check_vgpu(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint64_t magic;
|
||||
uint32_t version;
|
||||
u64 magic;
|
||||
u16 version_major;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
|
||||
|
||||
@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
|
||||
if (magic != VGT_MAGIC)
|
||||
return;
|
||||
|
||||
version = INTEL_VGT_IF_VERSION_ENCODE(
|
||||
__raw_i915_read16(dev_priv, vgtif_reg(version_major)),
|
||||
__raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
|
||||
if (version != INTEL_VGT_IF_VERSION) {
|
||||
version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
|
||||
if (version_major < VGT_VERSION_MAJOR) {
|
||||
DRM_INFO("VGT interface version mismatch!\n");
|
||||
return;
|
||||
}
|
||||
|
@ -4598,7 +4598,7 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
|
||||
|
||||
static int
|
||||
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
||||
unsigned scaler_user, int *scaler_id, unsigned int rotation,
|
||||
unsigned int scaler_user, int *scaler_id,
|
||||
int src_w, int src_h, int dst_w, int dst_h)
|
||||
{
|
||||
struct intel_crtc_scaler_state *scaler_state =
|
||||
@ -4607,9 +4607,12 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
||||
to_intel_crtc(crtc_state->base.crtc);
|
||||
int need_scaling;
|
||||
|
||||
need_scaling = drm_rotation_90_or_270(rotation) ?
|
||||
(src_h != dst_w || src_w != dst_h):
|
||||
(src_w != dst_w || src_h != dst_h);
|
||||
/*
|
||||
* Src coordinates are already rotated by 270 degrees for
|
||||
* the 90/270 degree plane rotation cases (to match the
|
||||
* GTT mapping), hence no need to account for rotation here.
|
||||
*/
|
||||
need_scaling = src_w != dst_w || src_h != dst_h;
|
||||
|
||||
/*
|
||||
* if plane is being disabled or scaler is no more required or force detach
|
||||
@ -4671,7 +4674,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
|
||||
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
|
||||
|
||||
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
|
||||
&state->scaler_state.scaler_id, DRM_ROTATE_0,
|
||||
&state->scaler_state.scaler_id,
|
||||
state->pipe_src_w, state->pipe_src_h,
|
||||
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
|
||||
}
|
||||
@ -4700,7 +4703,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
|
||||
ret = skl_update_scaler(crtc_state, force_detach,
|
||||
drm_plane_index(&intel_plane->base),
|
||||
&plane_state->scaler_id,
|
||||
plane_state->base.rotation,
|
||||
drm_rect_width(&plane_state->base.src) >> 16,
|
||||
drm_rect_height(&plane_state->base.src) >> 16,
|
||||
drm_rect_width(&plane_state->base.dst),
|
||||
|
@ -3373,20 +3373,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
|
||||
|
||||
/* n.b., src is 16.16 fixed point, dst is whole integer */
|
||||
if (plane->id == PLANE_CURSOR) {
|
||||
/*
|
||||
* Cursors only support 0/180 degree rotation,
|
||||
* hence no need to account for rotation here.
|
||||
*/
|
||||
src_w = pstate->base.src_w;
|
||||
src_h = pstate->base.src_h;
|
||||
dst_w = pstate->base.crtc_w;
|
||||
dst_h = pstate->base.crtc_h;
|
||||
} else {
|
||||
/*
|
||||
* Src coordinates are already rotated by 270 degrees for
|
||||
* the 90/270 degree plane rotation cases (to match the
|
||||
* GTT mapping), hence no need to account for rotation here.
|
||||
*/
|
||||
src_w = drm_rect_width(&pstate->base.src);
|
||||
src_h = drm_rect_height(&pstate->base.src);
|
||||
dst_w = drm_rect_width(&pstate->base.dst);
|
||||
dst_h = drm_rect_height(&pstate->base.dst);
|
||||
}
|
||||
|
||||
if (drm_rotation_90_or_270(pstate->base.rotation))
|
||||
swap(dst_w, dst_h);
|
||||
|
||||
downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
|
||||
downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
|
||||
|
||||
@ -3417,12 +3423,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
|
||||
if (y && format != DRM_FORMAT_NV12)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Src coordinates are already rotated by 270 degrees for
|
||||
* the 90/270 degree plane rotation cases (to match the
|
||||
* GTT mapping), hence no need to account for rotation here.
|
||||
*/
|
||||
width = drm_rect_width(&intel_pstate->base.src) >> 16;
|
||||
height = drm_rect_height(&intel_pstate->base.src) >> 16;
|
||||
|
||||
if (drm_rotation_90_or_270(pstate->rotation))
|
||||
swap(width, height);
|
||||
|
||||
/* for planar format */
|
||||
if (format == DRM_FORMAT_NV12) {
|
||||
if (y) /* y-plane data rate */
|
||||
@ -3505,12 +3513,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
|
||||
fb->modifier != I915_FORMAT_MOD_Yf_TILED)
|
||||
return 8;
|
||||
|
||||
/*
|
||||
* Src coordinates are already rotated by 270 degrees for
|
||||
* the 90/270 degree plane rotation cases (to match the
|
||||
* GTT mapping), hence no need to account for rotation here.
|
||||
*/
|
||||
src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
|
||||
src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
|
||||
|
||||
if (drm_rotation_90_or_270(pstate->rotation))
|
||||
swap(src_w, src_h);
|
||||
|
||||
/* Halve UV plane width and height for NV12 */
|
||||
if (fb->format->format == DRM_FORMAT_NV12 && !y) {
|
||||
src_w /= 2;
|
||||
@ -3794,13 +3804,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
width = intel_pstate->base.crtc_w;
|
||||
height = intel_pstate->base.crtc_h;
|
||||
} else {
|
||||
/*
|
||||
* Src coordinates are already rotated by 270 degrees for
|
||||
* the 90/270 degree plane rotation cases (to match the
|
||||
* GTT mapping), hence no need to account for rotation here.
|
||||
*/
|
||||
width = drm_rect_width(&intel_pstate->base.src) >> 16;
|
||||
height = drm_rect_height(&intel_pstate->base.src) >> 16;
|
||||
}
|
||||
|
||||
if (drm_rotation_90_or_270(pstate->rotation))
|
||||
swap(width, height);
|
||||
|
||||
cpp = fb->format->cpp[0];
|
||||
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
|
||||
|
||||
|
@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
|
||||
|
||||
|
||||
if (IS_G200_SE(mdev)) {
|
||||
if (mdev->unique_rev_id >= 0x02) {
|
||||
if (mdev->unique_rev_id >= 0x04) {
|
||||
WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
|
||||
WREG8(MGAREG_CRTCEXT_DATA, 0);
|
||||
} else if (mdev->unique_rev_id >= 0x02) {
|
||||
u8 hi_pri_lvl;
|
||||
u32 bpp;
|
||||
u32 mb;
|
||||
@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
|
||||
if (mga_vga_calculate_mode_bandwidth(mode, bpp)
|
||||
> (30100 * 1024))
|
||||
return MODE_BANDWIDTH;
|
||||
} else {
|
||||
if (mga_vga_calculate_mode_bandwidth(mode, bpp)
|
||||
> (55000 * 1024))
|
||||
return MODE_BANDWIDTH;
|
||||
}
|
||||
} else if (mdev->type == G200_WB) {
|
||||
if (mode->hdisplay > 1280)
|
||||
|
@ -35,6 +35,13 @@
|
||||
#include "mxsfb_drv.h"
|
||||
#include "mxsfb_regs.h"
|
||||
|
||||
#define MXS_SET_ADDR 0x4
|
||||
#define MXS_CLR_ADDR 0x8
|
||||
#define MODULE_CLKGATE BIT(30)
|
||||
#define MODULE_SFTRST BIT(31)
|
||||
/* 1 second delay should be plenty of time for block reset */
|
||||
#define RESET_TIMEOUT 1000000
|
||||
|
||||
static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
|
||||
{
|
||||
return (val & mxsfb->devdata->hs_wdth_mask) <<
|
||||
@ -159,6 +166,36 @@ static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
|
||||
clk_disable_unprepare(mxsfb->clk_disp_axi);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the bit and poll it cleared. This is usually called with
|
||||
* a reset address and mask being either SFTRST(bit 31) or CLKGATE
|
||||
* (bit 30).
|
||||
*/
|
||||
static int clear_poll_bit(void __iomem *addr, u32 mask)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
writel(mask, addr + MXS_CLR_ADDR);
|
||||
return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT);
|
||||
}
|
||||
|
||||
static int mxsfb_reset_block(void __iomem *reset_addr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
|
||||
|
||||
ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return clear_poll_bit(reset_addr, MODULE_CLKGATE);
|
||||
}
|
||||
|
||||
static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
|
||||
{
|
||||
struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
|
||||
@ -173,6 +210,11 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
|
||||
*/
|
||||
mxsfb_enable_axi_clk(mxsfb);
|
||||
|
||||
/* Mandatory eLCDIF reset as per the Reference Manual */
|
||||
err = mxsfb_reset_block(mxsfb->base);
|
||||
if (err)
|
||||
return;
|
||||
|
||||
/* Clear the FIFOs */
|
||||
writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
|
||||
|
||||
|
@ -9267,8 +9267,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
|
||||
u32 tmp, wm_mask;
|
||||
|
||||
if (radeon_crtc->base.enabled && num_heads && mode) {
|
||||
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
|
||||
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
|
||||
active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = min(line_time, (u32)65535);
|
||||
|
||||
/* watermark for high clocks */
|
||||
if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
|
||||
|
@ -2266,8 +2266,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
|
||||
fixed20_12 a, b, c;
|
||||
|
||||
if (radeon_crtc->base.enabled && num_heads && mode) {
|
||||
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
|
||||
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
|
||||
active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = min(line_time, (u32)65535);
|
||||
priority_a_cnt = 0;
|
||||
priority_b_cnt = 0;
|
||||
dram_channels = evergreen_get_number_of_dram_channels(rdev);
|
||||
|
@ -621,7 +621,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
|
||||
}
|
||||
|
||||
/* TODO: is this still necessary on NI+ ? */
|
||||
if ((cmd == 0 || cmd == 1 || cmd == 0x3) &&
|
||||
if ((cmd == 0 || cmd == 0x3) &&
|
||||
(start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
|
||||
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
|
||||
start, end);
|
||||
|
@ -2284,8 +2284,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
|
||||
fixed20_12 a, b, c;
|
||||
|
||||
if (radeon_crtc->base.enabled && num_heads && mode) {
|
||||
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
|
||||
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
|
||||
active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
|
||||
(u32)mode->clock);
|
||||
line_time = min(line_time, (u32)65535);
|
||||
priority_a_cnt = 0;
|
||||
priority_b_cnt = 0;
|
||||
|
||||
|
@ -451,18 +451,6 @@ fail:
|
||||
|
||||
|
||||
#ifdef CONFIG_DRM_TEGRA_STAGING
|
||||
static struct tegra_drm_context *
|
||||
tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id)
|
||||
{
|
||||
struct tegra_drm_context *context;
|
||||
|
||||
mutex_lock(&file->lock);
|
||||
context = idr_find(&file->contexts, id);
|
||||
mutex_unlock(&file->lock);
|
||||
|
||||
return context;
|
||||
}
|
||||
|
||||
static int tegra_gem_create(struct drm_device *drm, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
@ -551,7 +539,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL);
|
||||
err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
|
||||
if (err < 0) {
|
||||
client->ops->close_channel(context);
|
||||
return err;
|
||||
@ -606,7 +594,7 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
|
||||
|
||||
mutex_lock(&fpriv->lock);
|
||||
|
||||
context = tegra_drm_file_get_context(fpriv, args->context);
|
||||
context = idr_find(&fpriv->contexts, args->context);
|
||||
if (!context) {
|
||||
err = -EINVAL;
|
||||
goto unlock;
|
||||
@ -631,7 +619,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
|
||||
|
||||
mutex_lock(&fpriv->lock);
|
||||
|
||||
context = tegra_drm_file_get_context(fpriv, args->context);
|
||||
context = idr_find(&fpriv->contexts, args->context);
|
||||
if (!context) {
|
||||
err = -ENODEV;
|
||||
goto unlock;
|
||||
@ -660,7 +648,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
|
||||
|
||||
mutex_lock(&fpriv->lock);
|
||||
|
||||
context = tegra_drm_file_get_context(fpriv, args->context);
|
||||
context = idr_find(&fpriv->contexts, args->context);
|
||||
if (!context) {
|
||||
err = -ENODEV;
|
||||
goto unlock;
|
||||
@ -685,7 +673,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
|
||||
|
||||
mutex_lock(&fpriv->lock);
|
||||
|
||||
context = tegra_drm_file_get_context(fpriv, args->context);
|
||||
context = idr_find(&fpriv->contexts, args->context);
|
||||
if (!context) {
|
||||
err = -ENODEV;
|
||||
goto unlock;
|
||||
|
@ -172,7 +172,7 @@ static int host1x_probe(struct platform_device *pdev)
|
||||
|
||||
host->rst = devm_reset_control_get(&pdev->dev, "host1x");
|
||||
if (IS_ERR(host->rst)) {
|
||||
err = PTR_ERR(host->clk);
|
||||
err = PTR_ERR(host->rst);
|
||||
dev_err(&pdev->dev, "failed to get reset: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
@ -319,6 +319,9 @@
|
||||
#define USB_VENDOR_ID_DELCOM 0x0fc5
|
||||
#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080
|
||||
|
||||
#define USB_VENDOR_ID_DELL 0x413c
|
||||
#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
|
||||
|
||||
#define USB_VENDOR_ID_DELORME 0x1163
|
||||
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
|
||||
#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
|
||||
|
@ -349,7 +349,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
|
||||
|
||||
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
|
||||
magicmouse_emit_buttons(msc, clicks & 3);
|
||||
input_mt_report_pointer_emulation(input, true);
|
||||
input_report_rel(input, REL_X, x);
|
||||
input_report_rel(input, REL_Y, y);
|
||||
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
|
||||
@ -389,16 +388,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
|
||||
__clear_bit(BTN_RIGHT, input->keybit);
|
||||
__clear_bit(BTN_MIDDLE, input->keybit);
|
||||
__set_bit(BTN_MOUSE, input->keybit);
|
||||
__set_bit(BTN_TOOL_FINGER, input->keybit);
|
||||
__set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
|
||||
__set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
|
||||
__set_bit(BTN_TOOL_QUADTAP, input->keybit);
|
||||
__set_bit(BTN_TOOL_QUINTTAP, input->keybit);
|
||||
__set_bit(BTN_TOUCH, input->keybit);
|
||||
__set_bit(INPUT_PROP_POINTER, input->propbit);
|
||||
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
|
||||
}
|
||||
|
||||
__set_bit(BTN_TOOL_FINGER, input->keybit);
|
||||
__set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
|
||||
__set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
|
||||
__set_bit(BTN_TOOL_QUADTAP, input->keybit);
|
||||
__set_bit(BTN_TOOL_QUINTTAP, input->keybit);
|
||||
__set_bit(BTN_TOUCH, input->keybit);
|
||||
__set_bit(INPUT_PROP_POINTER, input->propbit);
|
||||
|
||||
__set_bit(EV_ABS, input->evbit);
|
||||
|
||||
|
@ -85,6 +85,7 @@ static const struct hid_blacklist {
|
||||
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
|
||||
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
|
||||
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
|
||||
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
|
||||
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
|
||||
|
@ -584,7 +584,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
|
||||
|
||||
/* unmap the data buffer */
|
||||
if (dma_size != 0)
|
||||
dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction);
|
||||
dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
|
||||
|
||||
if (unlikely(!time_left)) {
|
||||
dev_err(dev, "completion wait timed out\n");
|
||||
|
@ -319,7 +319,7 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
|
||||
rcar_i2c_write(priv, ICFBSCR, TCYC06);
|
||||
|
||||
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
|
||||
priv->msg->len, priv->dma_direction);
|
||||
sg_dma_len(&priv->sg), priv->dma_direction);
|
||||
|
||||
priv->dma_direction = DMA_NONE;
|
||||
}
|
||||
|
@ -468,13 +468,13 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev)
|
||||
static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
|
||||
int count;
|
||||
unsigned int count, tmp;
|
||||
|
||||
for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) {
|
||||
if (!meson_sar_adc_get_fifo_count(indio_dev))
|
||||
break;
|
||||
|
||||
regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0);
|
||||
regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -718,9 +718,12 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
|
||||
adc->dev = dev;
|
||||
|
||||
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!iores)
|
||||
return -EINVAL;
|
||||
|
||||
adc->base = devm_ioremap(dev, iores->start, resource_size(iores));
|
||||
if (IS_ERR(adc->base))
|
||||
return PTR_ERR(adc->base);
|
||||
if (!adc->base)
|
||||
return -ENOMEM;
|
||||
|
||||
init_completion(&adc->completion);
|
||||
spin_lock_init(&adc->lock);
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/iio/buffer.h>
|
||||
#include <linux/iio/buffer_impl.h>
|
||||
#include <linux/iio/buffer-dma.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/sizes.h>
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
#include <linux/iio/iio.h>
|
||||
#include <linux/iio/buffer.h>
|
||||
#include <linux/iio/buffer_impl.h>
|
||||
#include <linux/iio/buffer-dma.h>
|
||||
#include <linux/iio/buffer-dmaengine.h>
|
||||
|
||||
|
@ -41,6 +41,7 @@ static const int accel_scale[] = {598, 1196, 2392, 4785};
|
||||
static const struct inv_mpu6050_reg_map reg_set_6500 = {
|
||||
.sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
|
||||
.lpf = INV_MPU6050_REG_CONFIG,
|
||||
.accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2,
|
||||
.user_ctrl = INV_MPU6050_REG_USER_CTRL,
|
||||
.fifo_en = INV_MPU6050_REG_FIFO_EN,
|
||||
.gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
|
||||
@ -210,6 +211,37 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
|
||||
|
||||
/**
|
||||
* inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent
|
||||
*
|
||||
* MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope
|
||||
* MPU6500 and above have a dedicated register for accelerometer
|
||||
*/
|
||||
static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st,
|
||||
enum inv_mpu6050_filter_e val)
|
||||
{
|
||||
int result;
|
||||
|
||||
result = regmap_write(st->map, st->reg->lpf, val);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
switch (st->chip_type) {
|
||||
case INV_MPU6050:
|
||||
case INV_MPU6000:
|
||||
case INV_MPU9150:
|
||||
/* old chips, nothing to do */
|
||||
result = 0;
|
||||
break;
|
||||
default:
|
||||
/* set accel lpf */
|
||||
result = regmap_write(st->map, st->reg->accel_lpf, val);
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
|
||||
*
|
||||
@ -233,8 +265,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
d = INV_MPU6050_FILTER_20HZ;
|
||||
result = regmap_write(st->map, st->reg->lpf, d);
|
||||
result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
@ -537,6 +568,8 @@ error_write_raw:
|
||||
* would be alising. This function basically search for the
|
||||
* correct low pass parameters based on the fifo rate, e.g,
|
||||
* sampling frequency.
|
||||
*
|
||||
* lpf is set automatically when setting sampling rate to avoid any aliases.
|
||||
*/
|
||||
static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
|
||||
{
|
||||
@ -552,7 +585,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
|
||||
while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
|
||||
i++;
|
||||
data = d[i];
|
||||
result = regmap_write(st->map, st->reg->lpf, data);
|
||||
result = inv_mpu6050_set_lpf_regs(st, data);
|
||||
if (result)
|
||||
return result;
|
||||
st->chip_config.lpf = data;
|
||||
|
@ -28,6 +28,7 @@
|
||||
* struct inv_mpu6050_reg_map - Notable registers.
|
||||
* @sample_rate_div: Divider applied to gyro output rate.
|
||||
* @lpf: Configures internal low pass filter.
|
||||
* @accel_lpf: Configures accelerometer low pass filter.
|
||||
* @user_ctrl: Enables/resets the FIFO.
|
||||
* @fifo_en: Determines which data will appear in FIFO.
|
||||
* @gyro_config: gyro config register.
|
||||
@ -47,6 +48,7 @@
|
||||
struct inv_mpu6050_reg_map {
|
||||
u8 sample_rate_div;
|
||||
u8 lpf;
|
||||
u8 accel_lpf;
|
||||
u8 user_ctrl;
|
||||
u8 fifo_en;
|
||||
u8 gyro_config;
|
||||
@ -188,6 +190,7 @@ struct inv_mpu6050_state {
|
||||
#define INV_MPU6050_FIFO_THRESHOLD 500
|
||||
|
||||
/* mpu6500 registers */
|
||||
#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
|
||||
#define INV_MPU6500_REG_ACCEL_OFFSET 0x77
|
||||
|
||||
/* delay time in milliseconds */
|
||||
|
@ -448,12 +448,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
|
||||
return ret;
|
||||
|
||||
rt = (struct rt6_info *)dst;
|
||||
if (ipv6_addr_any(&fl6.saddr)) {
|
||||
ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
|
||||
&fl6.daddr, 0, &fl6.saddr);
|
||||
if (ret)
|
||||
goto put;
|
||||
|
||||
if (ipv6_addr_any(&src_in->sin6_addr)) {
|
||||
src_in->sin6_family = AF_INET6;
|
||||
src_in->sin6_addr = fl6.saddr;
|
||||
}
|
||||
@ -470,9 +465,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
|
||||
|
||||
*pdst = dst;
|
||||
return 0;
|
||||
put:
|
||||
dst_release(dst);
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static int addr6_resolve(struct sockaddr_in6 *src_in,
|
||||
|
@ -56,6 +56,10 @@
|
||||
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
|
||||
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
|
||||
|
||||
#define BNXT_RE_UD_QP_HW_STALL 0x400000
|
||||
|
||||
#define BNXT_RE_RQ_WQE_THRESHOLD 32
|
||||
|
||||
struct bnxt_re_work {
|
||||
struct work_struct work;
|
||||
unsigned long event;
|
||||
|
@ -61,6 +61,48 @@
|
||||
#include "ib_verbs.h"
|
||||
#include <rdma/bnxt_re-abi.h>
|
||||
|
||||
static int __from_ib_access_flags(int iflags)
|
||||
{
|
||||
int qflags = 0;
|
||||
|
||||
if (iflags & IB_ACCESS_LOCAL_WRITE)
|
||||
qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
|
||||
if (iflags & IB_ACCESS_REMOTE_READ)
|
||||
qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
|
||||
if (iflags & IB_ACCESS_REMOTE_WRITE)
|
||||
qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
|
||||
if (iflags & IB_ACCESS_REMOTE_ATOMIC)
|
||||
qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
|
||||
if (iflags & IB_ACCESS_MW_BIND)
|
||||
qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
|
||||
if (iflags & IB_ZERO_BASED)
|
||||
qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
|
||||
if (iflags & IB_ACCESS_ON_DEMAND)
|
||||
qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
|
||||
return qflags;
|
||||
};
|
||||
|
||||
static enum ib_access_flags __to_ib_access_flags(int qflags)
|
||||
{
|
||||
enum ib_access_flags iflags = 0;
|
||||
|
||||
if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
|
||||
iflags |= IB_ACCESS_LOCAL_WRITE;
|
||||
if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
|
||||
iflags |= IB_ACCESS_REMOTE_WRITE;
|
||||
if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
|
||||
iflags |= IB_ACCESS_REMOTE_READ;
|
||||
if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
|
||||
iflags |= IB_ACCESS_REMOTE_ATOMIC;
|
||||
if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
|
||||
iflags |= IB_ACCESS_MW_BIND;
|
||||
if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
|
||||
iflags |= IB_ZERO_BASED;
|
||||
if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
|
||||
iflags |= IB_ACCESS_ON_DEMAND;
|
||||
return iflags;
|
||||
};
|
||||
|
||||
static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
|
||||
struct bnxt_qplib_sge *sg_list, int num)
|
||||
{
|
||||
@ -149,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
|
||||
ib_attr->max_total_mcast_qp_attach = 0;
|
||||
ib_attr->max_ah = dev_attr->max_ah;
|
||||
|
||||
ib_attr->max_fmr = dev_attr->max_fmr;
|
||||
ib_attr->max_map_per_fmr = 1; /* ? */
|
||||
ib_attr->max_fmr = 0;
|
||||
ib_attr->max_map_per_fmr = 0;
|
||||
|
||||
ib_attr->max_srq = dev_attr->max_srq;
|
||||
ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
|
||||
@ -410,6 +452,158 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
|
||||
return IB_LINK_LAYER_ETHERNET;
|
||||
}
|
||||
|
||||
#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
|
||||
|
||||
static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
|
||||
{
|
||||
struct bnxt_re_fence_data *fence = &pd->fence;
|
||||
struct ib_mr *ib_mr = &fence->mr->ib_mr;
|
||||
struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
|
||||
|
||||
memset(wqe, 0, sizeof(*wqe));
|
||||
wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
|
||||
wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
|
||||
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
|
||||
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
|
||||
wqe->bind.zero_based = false;
|
||||
wqe->bind.parent_l_key = ib_mr->lkey;
|
||||
wqe->bind.va = (u64)(unsigned long)fence->va;
|
||||
wqe->bind.length = fence->size;
|
||||
wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
|
||||
wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
|
||||
|
||||
/* Save the initial rkey in fence structure for now;
|
||||
* wqe->bind.r_key will be set at (re)bind time.
|
||||
*/
|
||||
fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
|
||||
}
|
||||
|
||||
static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
|
||||
{
|
||||
struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
|
||||
qplib_qp);
|
||||
struct ib_pd *ib_pd = qp->ib_qp.pd;
|
||||
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
|
||||
struct bnxt_re_fence_data *fence = &pd->fence;
|
||||
struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
|
||||
struct bnxt_qplib_swqe wqe;
|
||||
int rc;
|
||||
|
||||
memcpy(&wqe, fence_wqe, sizeof(wqe));
|
||||
wqe.bind.r_key = fence->bind_rkey;
|
||||
fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
|
||||
|
||||
dev_dbg(rdev_to_dev(qp->rdev),
|
||||
"Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
|
||||
wqe.bind.r_key, qp->qplib_qp.id, pd);
|
||||
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
|
||||
if (rc) {
|
||||
dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
|
||||
return rc;
|
||||
}
|
||||
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
|
||||
{
|
||||
struct bnxt_re_fence_data *fence = &pd->fence;
|
||||
struct bnxt_re_dev *rdev = pd->rdev;
|
||||
struct device *dev = &rdev->en_dev->pdev->dev;
|
||||
struct bnxt_re_mr *mr = fence->mr;
|
||||
|
||||
if (fence->mw) {
|
||||
bnxt_re_dealloc_mw(fence->mw);
|
||||
fence->mw = NULL;
|
||||
}
|
||||
if (mr) {
|
||||
if (mr->ib_mr.rkey)
|
||||
bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
|
||||
true);
|
||||
if (mr->ib_mr.lkey)
|
||||
bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
|
||||
kfree(mr);
|
||||
fence->mr = NULL;
|
||||
}
|
||||
if (fence->dma_addr) {
|
||||
dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
|
||||
DMA_BIDIRECTIONAL);
|
||||
fence->dma_addr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
|
||||
{
|
||||
int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
|
||||
struct bnxt_re_fence_data *fence = &pd->fence;
|
||||
struct bnxt_re_dev *rdev = pd->rdev;
|
||||
struct device *dev = &rdev->en_dev->pdev->dev;
|
||||
struct bnxt_re_mr *mr = NULL;
|
||||
dma_addr_t dma_addr = 0;
|
||||
struct ib_mw *mw;
|
||||
u64 pbl_tbl;
|
||||
int rc;
|
||||
|
||||
dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
|
||||
DMA_BIDIRECTIONAL);
|
||||
rc = dma_mapping_error(dev, dma_addr);
|
||||
if (rc) {
|
||||
dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
|
||||
rc = -EIO;
|
||||
fence->dma_addr = 0;
|
||||
goto fail;
|
||||
}
|
||||
fence->dma_addr = dma_addr;
|
||||
|
||||
/* Allocate a MR */
|
||||
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
|
||||
if (!mr) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
fence->mr = mr;
|
||||
mr->rdev = rdev;
|
||||
mr->qplib_mr.pd = &pd->qplib_pd;
|
||||
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
|
||||
mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
|
||||
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
|
||||
if (rc) {
|
||||
dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Register MR */
|
||||
mr->ib_mr.lkey = mr->qplib_mr.lkey;
|
||||
mr->qplib_mr.va = (u64)(unsigned long)fence->va;
|
||||
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
|
||||
pbl_tbl = dma_addr;
|
||||
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
|
||||
BNXT_RE_FENCE_PBL_SIZE, false);
|
||||
if (rc) {
|
||||
dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
|
||||
goto fail;
|
||||
}
|
||||
mr->ib_mr.rkey = mr->qplib_mr.rkey;
|
||||
|
||||
/* Create a fence MW only for kernel consumers */
|
||||
mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
|
||||
if (!mw) {
|
||||
dev_err(rdev_to_dev(rdev),
|
||||
"Failed to create fence-MW for PD: %p\n", pd);
|
||||
rc = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
fence->mw = mw;
|
||||
|
||||
bnxt_re_create_fence_wqe(pd);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
bnxt_re_destroy_fence_mr(pd);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Protection Domains */
|
||||
int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
|
||||
{
|
||||
@ -417,6 +611,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
|
||||
struct bnxt_re_dev *rdev = pd->rdev;
|
||||
int rc;
|
||||
|
||||
bnxt_re_destroy_fence_mr(pd);
|
||||
if (ib_pd->uobject && pd->dpi.dbr) {
|
||||
struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
|
||||
struct bnxt_re_ucontext *ucntx;
|
||||
@ -498,6 +693,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
|
||||
}
|
||||
}
|
||||
|
||||
if (!udata)
|
||||
if (bnxt_re_create_fence_mr(pd))
|
||||
dev_warn(rdev_to_dev(rdev),
|
||||
"Failed to create Fence-MR\n");
|
||||
return &pd->ib_pd;
|
||||
dbfail:
|
||||
(void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
|
||||
@ -849,12 +1048,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
|
||||
/* Shadow QP SQ depth should be same as QP1 RQ depth */
|
||||
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
|
||||
qp->qplib_qp.sq.max_sge = 2;
|
||||
/* Q full delta can be 1 since it is internal QP */
|
||||
qp->qplib_qp.sq.q_full_delta = 1;
|
||||
|
||||
qp->qplib_qp.scq = qp1_qp->scq;
|
||||
qp->qplib_qp.rcq = qp1_qp->rcq;
|
||||
|
||||
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
|
||||
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
|
||||
/* Q full delta can be 1 since it is internal QP */
|
||||
qp->qplib_qp.rq.q_full_delta = 1;
|
||||
|
||||
qp->qplib_qp.mtu = qp1_qp->mtu;
|
||||
|
||||
@ -917,10 +1120,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
|
||||
qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
|
||||
IB_SIGNAL_ALL_WR) ? true : false);
|
||||
|
||||
entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
|
||||
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
|
||||
dev_attr->max_qp_wqes + 1);
|
||||
|
||||
qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
|
||||
if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
|
||||
qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
|
||||
@ -959,6 +1158,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
|
||||
qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
|
||||
dev_attr->max_qp_wqes + 1);
|
||||
|
||||
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
|
||||
qp_init_attr->cap.max_recv_wr;
|
||||
|
||||
qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
|
||||
if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
|
||||
qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
|
||||
@ -967,6 +1169,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
|
||||
qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
|
||||
|
||||
if (qp_init_attr->qp_type == IB_QPT_GSI) {
|
||||
/* Allocate 1 more than what's provided */
|
||||
entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
|
||||
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
|
||||
dev_attr->max_qp_wqes + 1);
|
||||
qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
|
||||
qp_init_attr->cap.max_send_wr;
|
||||
qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
|
||||
if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
|
||||
qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
|
||||
@ -1006,6 +1214,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
|
||||
}
|
||||
|
||||
} else {
|
||||
/* Allocate 128 + 1 more than what's provided */
|
||||
entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
|
||||
BNXT_QPLIB_RESERVED_QP_WRS + 1);
|
||||
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
|
||||
dev_attr->max_qp_wqes +
|
||||
BNXT_QPLIB_RESERVED_QP_WRS + 1);
|
||||
qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
|
||||
|
||||
/*
|
||||
* Reserving one slot for Phantom WQE. Application can
|
||||
* post one extra entry in this case. But allowing this to avoid
|
||||
* unexpected Queue full condition
|
||||
*/
|
||||
|
||||
qp->qplib_qp.sq.q_full_delta -= 1;
|
||||
|
||||
qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
|
||||
qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
|
||||
if (udata) {
|
||||
@ -1025,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
|
||||
|
||||
qp->ib_qp.qp_num = qp->qplib_qp.id;
|
||||
spin_lock_init(&qp->sq_lock);
|
||||
spin_lock_init(&qp->rq_lock);
|
||||
|
||||
if (udata) {
|
||||
struct bnxt_re_qp_resp resp;
|
||||
@ -1129,48 +1354,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
|
||||
}
|
||||
}
|
||||
|
||||
static int __from_ib_access_flags(int iflags)
|
||||
{
|
||||
int qflags = 0;
|
||||
|
||||
if (iflags & IB_ACCESS_LOCAL_WRITE)
|
||||
qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
|
||||
if (iflags & IB_ACCESS_REMOTE_READ)
|
||||
qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
|
||||
if (iflags & IB_ACCESS_REMOTE_WRITE)
|
||||
qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
|
||||
if (iflags & IB_ACCESS_REMOTE_ATOMIC)
|
||||
qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
|
||||
if (iflags & IB_ACCESS_MW_BIND)
|
||||
qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
|
||||
if (iflags & IB_ZERO_BASED)
|
||||
qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
|
||||
if (iflags & IB_ACCESS_ON_DEMAND)
|
||||
qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
|
||||
return qflags;
|
||||
};
|
||||
|
||||
static enum ib_access_flags __to_ib_access_flags(int qflags)
|
||||
{
|
||||
enum ib_access_flags iflags = 0;
|
||||
|
||||
if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
|
||||
iflags |= IB_ACCESS_LOCAL_WRITE;
|
||||
if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
|
||||
iflags |= IB_ACCESS_REMOTE_WRITE;
|
||||
if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
|
||||
iflags |= IB_ACCESS_REMOTE_READ;
|
||||
if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
|
||||
iflags |= IB_ACCESS_REMOTE_ATOMIC;
|
||||
if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
|
||||
iflags |= IB_ACCESS_MW_BIND;
|
||||
if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
|
||||
iflags |= IB_ZERO_BASED;
|
||||
if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
|
||||
iflags |= IB_ACCESS_ON_DEMAND;
|
||||
return iflags;
|
||||
};
|
||||
|
||||
static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
|
||||
struct bnxt_re_qp *qp1_qp,
|
||||
int qp_attr_mask)
|
||||
@ -1378,11 +1561,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||
entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
|
||||
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
|
||||
dev_attr->max_qp_wqes + 1);
|
||||
qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
|
||||
qp_attr->cap.max_send_wr;
|
||||
/*
|
||||
* Reserving one slot for Phantom WQE. Some application can
|
||||
* post one extra entry in this case. Allowing this to avoid
|
||||
* unexpected Queue full condition
|
||||
*/
|
||||
qp->qplib_qp.sq.q_full_delta -= 1;
|
||||
qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
|
||||
if (qp->qplib_qp.rq.max_wqe) {
|
||||
entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
|
||||
qp->qplib_qp.rq.max_wqe =
|
||||
min_t(u32, entries, dev_attr->max_qp_wqes + 1);
|
||||
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
|
||||
qp_attr->cap.max_recv_wr;
|
||||
qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
|
||||
} else {
|
||||
/* SRQ was used prior, just ignore the RQ caps */
|
||||
@ -1883,6 +2076,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
|
||||
return payload_sz;
|
||||
}
|
||||
|
||||
static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
|
||||
{
|
||||
if ((qp->ib_qp.qp_type == IB_QPT_UD ||
|
||||
qp->ib_qp.qp_type == IB_QPT_GSI ||
|
||||
qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
|
||||
qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
|
||||
int qp_attr_mask;
|
||||
struct ib_qp_attr qp_attr;
|
||||
|
||||
qp_attr_mask = IB_QP_STATE;
|
||||
qp_attr.qp_state = IB_QPS_RTS;
|
||||
bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
|
||||
qp->qplib_qp.wqe_cnt = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
|
||||
struct bnxt_re_qp *qp,
|
||||
struct ib_send_wr *wr)
|
||||
@ -1928,6 +2137,7 @@ bad:
|
||||
wr = wr->next;
|
||||
}
|
||||
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
||||
return rc;
|
||||
}
|
||||
@ -2024,6 +2234,7 @@ bad:
|
||||
wr = wr->next;
|
||||
}
|
||||
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
||||
|
||||
return rc;
|
||||
@ -2071,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
|
||||
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
|
||||
struct bnxt_qplib_swqe wqe;
|
||||
int rc = 0, payload_sz = 0;
|
||||
unsigned long flags;
|
||||
u32 count = 0;
|
||||
|
||||
spin_lock_irqsave(&qp->rq_lock, flags);
|
||||
while (wr) {
|
||||
/* House keeping */
|
||||
memset(&wqe, 0, sizeof(wqe));
|
||||
@ -2100,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
|
||||
*bad_wr = wr;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Ring DB if the RQEs posted reaches a threshold value */
|
||||
if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
|
||||
bnxt_qplib_post_recv_db(&qp->qplib_qp);
|
||||
count = 0;
|
||||
}
|
||||
|
||||
wr = wr->next;
|
||||
}
|
||||
bnxt_qplib_post_recv_db(&qp->qplib_qp);
|
||||
|
||||
if (count)
|
||||
bnxt_qplib_post_recv_db(&qp->qplib_qp);
|
||||
|
||||
spin_unlock_irqrestore(&qp->rq_lock, flags);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -2643,12 +2869,36 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
|
||||
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
|
||||
}
|
||||
|
||||
static int send_phantom_wqe(struct bnxt_re_qp *qp)
|
||||
{
|
||||
struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
|
||||
unsigned long flags;
|
||||
int rc = 0;
|
||||
|
||||
spin_lock_irqsave(&qp->sq_lock, flags);
|
||||
|
||||
rc = bnxt_re_bind_fence_mw(lib_qp);
|
||||
if (!rc) {
|
||||
lib_qp->sq.phantom_wqe_cnt++;
|
||||
dev_dbg(&lib_qp->sq.hwq.pdev->dev,
|
||||
"qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
|
||||
lib_qp->id, lib_qp->sq.hwq.prod,
|
||||
HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
|
||||
lib_qp->sq.phantom_wqe_cnt);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
|
||||
{
|
||||
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
|
||||
struct bnxt_re_qp *qp;
|
||||
struct bnxt_qplib_cqe *cqe;
|
||||
int i, ncqe, budget;
|
||||
struct bnxt_qplib_q *sq;
|
||||
struct bnxt_qplib_qp *lib_qp;
|
||||
u32 tbl_idx;
|
||||
struct bnxt_re_sqp_entries *sqp_entry = NULL;
|
||||
unsigned long flags;
|
||||
@ -2661,7 +2911,21 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
|
||||
}
|
||||
cqe = &cq->cql[0];
|
||||
while (budget) {
|
||||
ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget);
|
||||
lib_qp = NULL;
|
||||
ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
|
||||
if (lib_qp) {
|
||||
sq = &lib_qp->sq;
|
||||
if (sq->send_phantom) {
|
||||
qp = container_of(lib_qp,
|
||||
struct bnxt_re_qp, qplib_qp);
|
||||
if (send_phantom_wqe(qp) == -ENOMEM)
|
||||
dev_err(rdev_to_dev(cq->rdev),
|
||||
"Phantom failed! Scheduled to send again\n");
|
||||
else
|
||||
sq->send_phantom = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ncqe)
|
||||
break;
|
||||
|
||||
@ -2822,6 +3086,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
|
||||
struct bnxt_re_dev *rdev = mr->rdev;
|
||||
int rc;
|
||||
|
||||
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
|
||||
if (rc) {
|
||||
dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (mr->npages && mr->pages) {
|
||||
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
|
||||
&mr->qplib_frpl);
|
||||
@ -2829,8 +3099,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
|
||||
mr->npages = 0;
|
||||
mr->pages = NULL;
|
||||
}
|
||||
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
|
||||
|
||||
if (!IS_ERR_OR_NULL(mr->ib_umem))
|
||||
ib_umem_release(mr->ib_umem);
|
||||
|
||||
@ -2914,97 +3182,52 @@ fail:
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
/* Fast Memory Regions */
|
||||
struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags,
|
||||
struct ib_fmr_attr *fmr_attr)
|
||||
struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
|
||||
struct bnxt_re_dev *rdev = pd->rdev;
|
||||
struct bnxt_re_fmr *fmr;
|
||||
struct bnxt_re_mw *mw;
|
||||
int rc;
|
||||
|
||||
if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS ||
|
||||
fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) {
|
||||
dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
fmr = kzalloc(sizeof(*fmr), GFP_KERNEL);
|
||||
if (!fmr)
|
||||
mw = kzalloc(sizeof(*mw), GFP_KERNEL);
|
||||
if (!mw)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
mw->rdev = rdev;
|
||||
mw->qplib_mw.pd = &pd->qplib_pd;
|
||||
|
||||
fmr->rdev = rdev;
|
||||
fmr->qplib_fmr.pd = &pd->qplib_pd;
|
||||
fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
|
||||
|
||||
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
|
||||
if (rc)
|
||||
mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
|
||||
CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
|
||||
CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
|
||||
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
|
||||
if (rc) {
|
||||
dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
|
||||
goto fail;
|
||||
}
|
||||
mw->ib_mw.rkey = mw->qplib_mw.rkey;
|
||||
|
||||
fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags);
|
||||
fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey;
|
||||
fmr->ib_fmr.rkey = fmr->ib_fmr.lkey;
|
||||
atomic_inc(&rdev->mw_count);
|
||||
return &mw->ib_mw;
|
||||
|
||||
atomic_inc(&rdev->mr_count);
|
||||
return &fmr->ib_fmr;
|
||||
fail:
|
||||
kfree(fmr);
|
||||
kfree(mw);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len,
|
||||
u64 iova)
|
||||
int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
|
||||
{
|
||||
struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
|
||||
ib_fmr);
|
||||
struct bnxt_re_dev *rdev = fmr->rdev;
|
||||
struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
|
||||
struct bnxt_re_dev *rdev = mw->rdev;
|
||||
int rc;
|
||||
|
||||
fmr->qplib_fmr.va = iova;
|
||||
fmr->qplib_fmr.total_size = list_len * PAGE_SIZE;
|
||||
|
||||
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list,
|
||||
list_len, true);
|
||||
if (rc)
|
||||
dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!",
|
||||
fmr->ib_fmr.lkey);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_re_unmap_fmr(struct list_head *fmr_list)
|
||||
{
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct bnxt_re_fmr *fmr;
|
||||
struct ib_fmr *ib_fmr;
|
||||
int rc = 0;
|
||||
|
||||
/* Validate each FMRs inside the fmr_list */
|
||||
list_for_each_entry(ib_fmr, fmr_list, list) {
|
||||
fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr);
|
||||
rdev = fmr->rdev;
|
||||
|
||||
if (rdev) {
|
||||
rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res,
|
||||
&fmr->qplib_fmr, true);
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
|
||||
if (rc) {
|
||||
dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
|
||||
return rc;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr)
|
||||
{
|
||||
struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
|
||||
ib_fmr);
|
||||
struct bnxt_re_dev *rdev = fmr->rdev;
|
||||
int rc;
|
||||
|
||||
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
|
||||
if (rc)
|
||||
dev_err(rdev_to_dev(rdev), "Failed to free FMR");
|
||||
|
||||
kfree(fmr);
|
||||
atomic_dec(&rdev->mr_count);
|
||||
kfree(mw);
|
||||
atomic_dec(&rdev->mw_count);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx {
|
||||
u32 refcnt;
|
||||
};
|
||||
|
||||
#define BNXT_RE_FENCE_BYTES 64
|
||||
struct bnxt_re_fence_data {
|
||||
u32 size;
|
||||
u8 va[BNXT_RE_FENCE_BYTES];
|
||||
dma_addr_t dma_addr;
|
||||
struct bnxt_re_mr *mr;
|
||||
struct ib_mw *mw;
|
||||
struct bnxt_qplib_swqe bind_wqe;
|
||||
u32 bind_rkey;
|
||||
};
|
||||
|
||||
struct bnxt_re_pd {
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct ib_pd ib_pd;
|
||||
struct bnxt_qplib_pd qplib_pd;
|
||||
struct bnxt_qplib_dpi dpi;
|
||||
struct bnxt_re_fence_data fence;
|
||||
};
|
||||
|
||||
struct bnxt_re_ah {
|
||||
@ -62,6 +74,7 @@ struct bnxt_re_qp {
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct ib_qp ib_qp;
|
||||
spinlock_t sq_lock; /* protect sq */
|
||||
spinlock_t rq_lock; /* protect rq */
|
||||
struct bnxt_qplib_qp qplib_qp;
|
||||
struct ib_umem *sumem;
|
||||
struct ib_umem *rumem;
|
||||
@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
|
||||
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
|
||||
u32 max_num_sg);
|
||||
int bnxt_re_dereg_mr(struct ib_mr *mr);
|
||||
struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
|
||||
struct ib_fmr_attr *fmr_attr);
|
||||
int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len,
|
||||
u64 iova);
|
||||
int bnxt_re_unmap_fmr(struct list_head *fmr_list);
|
||||
int bnxt_re_dealloc_fmr(struct ib_fmr *fmr);
|
||||
struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_dealloc_mw(struct ib_mw *mw);
|
||||
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int mr_access_flags,
|
||||
struct ib_udata *udata);
|
||||
|
@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
|
||||
ibdev->dereg_mr = bnxt_re_dereg_mr;
|
||||
ibdev->alloc_mr = bnxt_re_alloc_mr;
|
||||
ibdev->map_mr_sg = bnxt_re_map_mr_sg;
|
||||
ibdev->alloc_fmr = bnxt_re_alloc_fmr;
|
||||
ibdev->map_phys_fmr = bnxt_re_map_phys_fmr;
|
||||
ibdev->unmap_fmr = bnxt_re_unmap_fmr;
|
||||
ibdev->dealloc_fmr = bnxt_re_dealloc_fmr;
|
||||
|
||||
ibdev->reg_user_mr = bnxt_re_reg_user_mr;
|
||||
ibdev->alloc_ucontext = bnxt_re_alloc_ucontext;
|
||||
|
@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_create_qp1 req;
|
||||
struct creq_create_qp1_resp *resp;
|
||||
struct creq_create_qp1_resp resp;
|
||||
struct bnxt_qplib_pbl *pbl;
|
||||
struct bnxt_qplib_q *sq = &qp->sq;
|
||||
struct bnxt_qplib_q *rq = &qp->rq;
|
||||
@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
|
||||
req.pd_id = cpu_to_le32(qp->pd->id);
|
||||
|
||||
resp = (struct creq_create_qp1_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed");
|
||||
rc = -EINVAL;
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
if (rc)
|
||||
goto fail;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out");
|
||||
rc = -ETIMEDOUT;
|
||||
goto fail;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
rc = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
qp->id = le32_to_cpu(resp->xid);
|
||||
|
||||
qp->id = le32_to_cpu(resp.xid);
|
||||
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
|
||||
sq->flush_in_progress = false;
|
||||
rq->flush_in_progress = false;
|
||||
@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
|
||||
struct cmdq_create_qp req;
|
||||
struct creq_create_qp_resp *resp;
|
||||
struct creq_create_qp_resp resp;
|
||||
struct bnxt_qplib_pbl *pbl;
|
||||
struct sq_psn_search **psn_search_ptr;
|
||||
unsigned long int psn_search, poff = 0;
|
||||
@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
}
|
||||
req.pd_id = cpu_to_le32(qp->pd->id);
|
||||
|
||||
resp = (struct creq_create_qp_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed");
|
||||
rc = -EINVAL;
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
if (rc)
|
||||
goto fail;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out");
|
||||
rc = -ETIMEDOUT;
|
||||
goto fail;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
rc = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
qp->id = le32_to_cpu(resp->xid);
|
||||
|
||||
qp->id = le32_to_cpu(resp.xid);
|
||||
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
|
||||
sq->flush_in_progress = false;
|
||||
rq->flush_in_progress = false;
|
||||
@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_modify_qp req;
|
||||
struct creq_modify_qp_resp *resp;
|
||||
struct creq_modify_qp_resp resp;
|
||||
u16 cmd_flags = 0, pkey;
|
||||
u32 temp32[4];
|
||||
u32 bmask;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
|
||||
|
||||
@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
|
||||
req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
|
||||
|
||||
resp = (struct creq_modify_qp_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
qp->cur_qp_state = qp->state;
|
||||
return 0;
|
||||
}
|
||||
@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_query_qp req;
|
||||
struct creq_query_qp_resp *resp;
|
||||
struct creq_query_qp_resp resp;
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf;
|
||||
struct creq_query_qp_resp_sb *sb;
|
||||
u16 cmd_flags = 0;
|
||||
u32 temp32[4];
|
||||
int i;
|
||||
int i, rc = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
|
||||
|
||||
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
|
||||
if (!sbuf)
|
||||
return -ENOMEM;
|
||||
sb = sbuf->sb;
|
||||
|
||||
req.qp_cid = cpu_to_le32(qp->id);
|
||||
req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
|
||||
resp = (struct creq_query_qp_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void **)&sb, 0);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
(void *)sbuf, 0);
|
||||
if (rc)
|
||||
goto bail;
|
||||
/* Extract the context from the side buffer */
|
||||
qp->state = sb->en_sqd_async_notify_state &
|
||||
CREQ_QUERY_QP_RESP_SB_STATE_MASK;
|
||||
@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
|
||||
memcpy(qp->smac, sb->src_mac, 6);
|
||||
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
|
||||
return 0;
|
||||
bail:
|
||||
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
|
||||
@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_destroy_qp req;
|
||||
struct creq_destroy_qp_resp *resp;
|
||||
struct creq_destroy_qp_resp resp;
|
||||
unsigned long flags;
|
||||
u16 cmd_flags = 0;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
|
||||
|
||||
req.qp_cid = cpu_to_le32(qp->id);
|
||||
resp = (struct creq_destroy_qp_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Must walk the associated CQs to nullified the QP ptr */
|
||||
spin_lock_irqsave(&qp->scq->hwq.lock, flags);
|
||||
@ -1162,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
|
||||
rc = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) ==
|
||||
HWQ_CMP(sq->hwq.cons, &sq->hwq)) {
|
||||
|
||||
if (bnxt_qplib_queue_full(sq)) {
|
||||
dev_err(&sq->hwq.pdev->dev,
|
||||
"QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
|
||||
sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
|
||||
sq->q_full_delta);
|
||||
rc = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
@ -1373,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
|
||||
}
|
||||
|
||||
sq->hwq.prod++;
|
||||
|
||||
qp->wqe_cnt++;
|
||||
|
||||
done:
|
||||
return rc;
|
||||
}
|
||||
@ -1411,8 +1339,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
|
||||
rc = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) ==
|
||||
HWQ_CMP(rq->hwq.cons, &rq->hwq)) {
|
||||
if (bnxt_qplib_queue_full(rq)) {
|
||||
dev_err(&rq->hwq.pdev->dev,
|
||||
"QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
|
||||
rc = -EINVAL;
|
||||
@ -1483,7 +1410,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_create_cq req;
|
||||
struct creq_create_cq_resp *resp;
|
||||
struct creq_create_cq_resp resp;
|
||||
struct bnxt_qplib_pbl *pbl;
|
||||
u16 cmd_flags = 0;
|
||||
int rc;
|
||||
@ -1525,30 +1452,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
|
||||
(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
|
||||
CMDQ_CREATE_CQ_CNQ_ID_SFT);
|
||||
|
||||
resp = (struct creq_create_cq_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out");
|
||||
rc = -ETIMEDOUT;
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
if (rc)
|
||||
goto fail;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
rc = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
cq->id = le32_to_cpu(resp->xid);
|
||||
|
||||
cq->id = le32_to_cpu(resp.xid);
|
||||
cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
|
||||
cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
|
||||
init_waitqueue_head(&cq->waitq);
|
||||
@ -1566,33 +1475,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_destroy_cq req;
|
||||
struct creq_destroy_cq_resp *resp;
|
||||
struct creq_destroy_cq_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
|
||||
|
||||
req.cq_cid = cpu_to_le32(cq->id);
|
||||
resp = (struct creq_destroy_cq_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
|
||||
return 0;
|
||||
}
|
||||
@ -1664,14 +1557,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
|
||||
* CQE is track from sw_cq_cons to max_element but valid only if VALID=1
|
||||
*/
|
||||
static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
|
||||
u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
|
||||
{
|
||||
struct bnxt_qplib_q *sq = &qp->sq;
|
||||
struct bnxt_qplib_swq *swq;
|
||||
u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
|
||||
struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
|
||||
struct cq_req *peek_req_hwcqe;
|
||||
struct bnxt_qplib_qp *peek_qp;
|
||||
struct bnxt_qplib_q *peek_sq;
|
||||
int i, rc = 0;
|
||||
|
||||
/* Normal mode */
|
||||
/* Check for the psn_search marking before completing */
|
||||
swq = &sq->swq[sw_sq_cons];
|
||||
if (swq->psn_search &&
|
||||
le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
|
||||
/* Unmark */
|
||||
swq->psn_search->flags_next_psn = cpu_to_le32
|
||||
(le32_to_cpu(swq->psn_search->flags_next_psn)
|
||||
& ~0x80000000);
|
||||
dev_dbg(&cq->hwq.pdev->dev,
|
||||
"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
|
||||
cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
|
||||
sq->condition = true;
|
||||
sq->send_phantom = true;
|
||||
|
||||
/* TODO: Only ARM if the previous SQE is ARMALL */
|
||||
bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
|
||||
|
||||
rc = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
if (sq->condition) {
|
||||
/* Peek at the completions */
|
||||
peek_raw_cq_cons = cq->hwq.cons;
|
||||
peek_sw_cq_cons = cq_cons;
|
||||
i = cq->hwq.max_elements;
|
||||
while (i--) {
|
||||
peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
|
||||
peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
|
||||
peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
|
||||
[CQE_IDX(peek_sw_cq_cons)];
|
||||
/* If the next hwcqe is VALID */
|
||||
if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
|
||||
cq->hwq.max_elements)) {
|
||||
/* If the next hwcqe is a REQ */
|
||||
if ((peek_hwcqe->cqe_type_toggle &
|
||||
CQ_BASE_CQE_TYPE_MASK) ==
|
||||
CQ_BASE_CQE_TYPE_REQ) {
|
||||
peek_req_hwcqe = (struct cq_req *)
|
||||
peek_hwcqe;
|
||||
peek_qp = (struct bnxt_qplib_qp *)
|
||||
((unsigned long)
|
||||
le64_to_cpu
|
||||
(peek_req_hwcqe->qp_handle));
|
||||
peek_sq = &peek_qp->sq;
|
||||
peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
|
||||
peek_req_hwcqe->sq_cons_idx) - 1
|
||||
, &sq->hwq);
|
||||
/* If the hwcqe's sq's wr_id matches */
|
||||
if (peek_sq == sq &&
|
||||
sq->swq[peek_sq_cons_idx].wr_id ==
|
||||
BNXT_QPLIB_FENCE_WRID) {
|
||||
/*
|
||||
* Unbreak only if the phantom
|
||||
* comes back
|
||||
*/
|
||||
dev_dbg(&cq->hwq.pdev->dev,
|
||||
"FP:Got Phantom CQE");
|
||||
sq->condition = false;
|
||||
sq->single = true;
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
/* Valid but not the phantom, so keep looping */
|
||||
} else {
|
||||
/* Not valid yet, just exit and wait */
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
peek_sw_cq_cons++;
|
||||
peek_raw_cq_cons++;
|
||||
}
|
||||
dev_err(&cq->hwq.pdev->dev,
|
||||
"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
|
||||
cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
|
||||
rc = -EINVAL;
|
||||
}
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
|
||||
struct cq_req *hwcqe,
|
||||
struct bnxt_qplib_cqe **pcqe, int *budget)
|
||||
struct bnxt_qplib_cqe **pcqe, int *budget,
|
||||
u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
|
||||
{
|
||||
struct bnxt_qplib_qp *qp;
|
||||
struct bnxt_qplib_q *sq;
|
||||
struct bnxt_qplib_cqe *cqe;
|
||||
u32 sw_cons, cqe_cons;
|
||||
u32 sw_sq_cons, cqe_sq_cons;
|
||||
struct bnxt_qplib_swq *swq;
|
||||
int rc = 0;
|
||||
|
||||
qp = (struct bnxt_qplib_qp *)((unsigned long)
|
||||
@ -1683,13 +1675,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
|
||||
}
|
||||
sq = &qp->sq;
|
||||
|
||||
cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
|
||||
if (cqe_cons > sq->hwq.max_elements) {
|
||||
cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
|
||||
if (cqe_sq_cons > sq->hwq.max_elements) {
|
||||
dev_err(&cq->hwq.pdev->dev,
|
||||
"QPLIB: FP: CQ Process req reported ");
|
||||
dev_err(&cq->hwq.pdev->dev,
|
||||
"QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
|
||||
cqe_cons, sq->hwq.max_elements);
|
||||
cqe_sq_cons, sq->hwq.max_elements);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* If we were in the middle of flushing the SQ, continue */
|
||||
@ -1698,53 +1690,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
|
||||
|
||||
/* Require to walk the sq's swq to fabricate CQEs for all previously
|
||||
* signaled SWQEs due to CQE aggregation from the current sq cons
|
||||
* to the cqe_cons
|
||||
* to the cqe_sq_cons
|
||||
*/
|
||||
cqe = *pcqe;
|
||||
while (*budget) {
|
||||
sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
|
||||
if (sw_cons == cqe_cons)
|
||||
sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
|
||||
if (sw_sq_cons == cqe_sq_cons)
|
||||
/* Done */
|
||||
break;
|
||||
|
||||
swq = &sq->swq[sw_sq_cons];
|
||||
memset(cqe, 0, sizeof(*cqe));
|
||||
cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
|
||||
cqe->qp_handle = (u64)(unsigned long)qp;
|
||||
cqe->src_qp = qp->id;
|
||||
cqe->wr_id = sq->swq[sw_cons].wr_id;
|
||||
cqe->type = sq->swq[sw_cons].type;
|
||||
cqe->wr_id = swq->wr_id;
|
||||
if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
|
||||
goto skip;
|
||||
cqe->type = swq->type;
|
||||
|
||||
/* For the last CQE, check for status. For errors, regardless
|
||||
* of the request being signaled or not, it must complete with
|
||||
* the hwcqe error status
|
||||
*/
|
||||
if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons &&
|
||||
if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
|
||||
hwcqe->status != CQ_REQ_STATUS_OK) {
|
||||
cqe->status = hwcqe->status;
|
||||
dev_err(&cq->hwq.pdev->dev,
|
||||
"QPLIB: FP: CQ Processed Req ");
|
||||
dev_err(&cq->hwq.pdev->dev,
|
||||
"QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
|
||||
sw_cons, cqe->wr_id, cqe->status);
|
||||
sw_sq_cons, cqe->wr_id, cqe->status);
|
||||
cqe++;
|
||||
(*budget)--;
|
||||
sq->flush_in_progress = true;
|
||||
/* Must block new posting of SQ and RQ */
|
||||
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
||||
sq->condition = false;
|
||||
sq->single = false;
|
||||
} else {
|
||||
if (sq->swq[sw_cons].flags &
|
||||
SQ_SEND_FLAGS_SIGNAL_COMP) {
|
||||
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
|
||||
/* Before we complete, do WA 9060 */
|
||||
if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
|
||||
cqe_sq_cons)) {
|
||||
*lib_qp = qp;
|
||||
goto out;
|
||||
}
|
||||
cqe->status = CQ_REQ_STATUS_OK;
|
||||
cqe++;
|
||||
(*budget)--;
|
||||
}
|
||||
}
|
||||
skip:
|
||||
sq->hwq.cons++;
|
||||
if (sq->single)
|
||||
break;
|
||||
}
|
||||
out:
|
||||
*pcqe = cqe;
|
||||
if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) {
|
||||
if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
|
||||
/* Out of budget */
|
||||
rc = -EAGAIN;
|
||||
goto done;
|
||||
}
|
||||
/*
|
||||
* Back to normal completion mode only after it has completed all of
|
||||
* the WC for this CQE
|
||||
*/
|
||||
sq->single = false;
|
||||
if (!sq->flush_in_progress)
|
||||
goto done;
|
||||
flush:
|
||||
@ -2074,7 +2087,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
|
||||
}
|
||||
|
||||
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
|
||||
int num_cqes)
|
||||
int num_cqes, struct bnxt_qplib_qp **lib_qp)
|
||||
{
|
||||
struct cq_base *hw_cqe, **hw_cqe_ptr;
|
||||
unsigned long flags;
|
||||
@ -2099,7 +2112,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
|
||||
case CQ_BASE_CQE_TYPE_REQ:
|
||||
rc = bnxt_qplib_cq_process_req(cq,
|
||||
(struct cq_req *)hw_cqe,
|
||||
&cqe, &budget);
|
||||
&cqe, &budget,
|
||||
sw_cons, lib_qp);
|
||||
break;
|
||||
case CQ_BASE_CQE_TYPE_RES_RC:
|
||||
rc = bnxt_qplib_cq_process_res_rc(cq,
|
||||
|
@ -88,6 +88,7 @@ struct bnxt_qplib_swq {
|
||||
|
||||
struct bnxt_qplib_swqe {
|
||||
/* General */
|
||||
#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
|
||||
u64 wr_id;
|
||||
u8 reqs_type;
|
||||
u8 type;
|
||||
@ -216,9 +217,16 @@ struct bnxt_qplib_q {
|
||||
struct scatterlist *sglist;
|
||||
u32 nmap;
|
||||
u32 max_wqe;
|
||||
u16 q_full_delta;
|
||||
u16 max_sge;
|
||||
u32 psn;
|
||||
bool flush_in_progress;
|
||||
bool condition;
|
||||
bool single;
|
||||
bool send_phantom;
|
||||
u32 phantom_wqe_cnt;
|
||||
u32 phantom_cqe_cnt;
|
||||
u32 next_cq_cons;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_qp {
|
||||
@ -242,6 +250,7 @@ struct bnxt_qplib_qp {
|
||||
u8 timeout;
|
||||
u8 retry_cnt;
|
||||
u8 rnr_retry;
|
||||
u64 wqe_cnt;
|
||||
u32 min_rnr_timer;
|
||||
u32 max_rd_atomic;
|
||||
u32 max_dest_rd_atomic;
|
||||
@ -301,6 +310,13 @@ struct bnxt_qplib_qp {
|
||||
(!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
|
||||
!((raw_cons) & (cp_bit)))
|
||||
|
||||
static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
|
||||
{
|
||||
return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
|
||||
&qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
|
||||
&qplib_q->hwq);
|
||||
}
|
||||
|
||||
struct bnxt_qplib_cqe {
|
||||
u8 status;
|
||||
u8 type;
|
||||
@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
|
||||
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
|
||||
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
|
||||
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
|
||||
int num);
|
||||
int num, struct bnxt_qplib_qp **qp);
|
||||
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
|
||||
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
|
||||
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
|
||||
|
@ -39,72 +39,55 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "roce_hsi.h"
|
||||
#include "qplib_res.h"
|
||||
#include "qplib_rcfw.h"
|
||||
static void bnxt_qplib_service_creq(unsigned long data);
|
||||
|
||||
/* Hardware communication channel */
|
||||
int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
|
||||
static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
|
||||
{
|
||||
u16 cbit;
|
||||
int rc;
|
||||
|
||||
cookie &= RCFW_MAX_COOKIE_VALUE;
|
||||
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
|
||||
if (!test_bit(cbit, rcfw->cmdq_bitmap))
|
||||
dev_warn(&rcfw->pdev->dev,
|
||||
"QPLIB: CMD bit %d for cookie 0x%x is not set?",
|
||||
cbit, cookie);
|
||||
|
||||
rc = wait_event_timeout(rcfw->waitq,
|
||||
!test_bit(cbit, rcfw->cmdq_bitmap),
|
||||
msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
|
||||
if (!rc) {
|
||||
dev_warn(&rcfw->pdev->dev,
|
||||
"QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n",
|
||||
RCFW_CMD_WAIT_TIME_MS, cookie);
|
||||
}
|
||||
|
||||
return rc;
|
||||
return rc ? 0 : -ETIMEDOUT;
|
||||
};
|
||||
|
||||
int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
|
||||
static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
|
||||
{
|
||||
u32 count = -1;
|
||||
u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
|
||||
u16 cbit;
|
||||
|
||||
cookie &= RCFW_MAX_COOKIE_VALUE;
|
||||
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
|
||||
if (!test_bit(cbit, rcfw->cmdq_bitmap))
|
||||
goto done;
|
||||
do {
|
||||
mdelay(1); /* 1m sec */
|
||||
bnxt_qplib_service_creq((unsigned long)rcfw);
|
||||
} while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
|
||||
done:
|
||||
return count;
|
||||
return count ? 0 : -ETIMEDOUT;
|
||||
};
|
||||
|
||||
void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct cmdq_base *req, void **crsbe,
|
||||
u8 is_block)
|
||||
static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
||||
struct creq_base *resp, void *sb, u8 is_block)
|
||||
{
|
||||
struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
|
||||
struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
|
||||
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
|
||||
struct bnxt_qplib_hwq *crsb = &rcfw->crsb;
|
||||
struct bnxt_qplib_crsqe *crsqe = NULL;
|
||||
struct bnxt_qplib_crsbe **crsb_ptr;
|
||||
struct bnxt_qplib_crsq *crsqe;
|
||||
u32 sw_prod, cmdq_prod;
|
||||
u8 retry_cnt = 0xFF;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned long flags;
|
||||
u32 size, opcode;
|
||||
u16 cookie, cbit;
|
||||
int pg, idx;
|
||||
u8 *preq;
|
||||
|
||||
retry:
|
||||
opcode = req->opcode;
|
||||
if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
|
||||
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
|
||||
@ -112,63 +95,50 @@ retry:
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: RCFW not initialized, reject opcode 0x%x",
|
||||
opcode);
|
||||
return NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
|
||||
opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
|
||||
return NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Cmdq are in 16-byte units, each request can consume 1 or more
|
||||
* cmdqe
|
||||
*/
|
||||
spin_lock_irqsave(&cmdq->lock, flags);
|
||||
if (req->cmd_size > cmdq->max_elements -
|
||||
((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) &
|
||||
(cmdq->max_elements - 1))) {
|
||||
if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
|
||||
spin_unlock_irqrestore(&cmdq->lock, flags);
|
||||
|
||||
if (!retry_cnt--)
|
||||
return NULL;
|
||||
goto retry;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
retry_cnt = 0xFF;
|
||||
|
||||
cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE;
|
||||
cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
|
||||
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
|
||||
if (is_block)
|
||||
cookie |= RCFW_CMD_IS_BLOCKING;
|
||||
|
||||
set_bit(cbit, rcfw->cmdq_bitmap);
|
||||
req->cookie = cpu_to_le16(cookie);
|
||||
if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: RCFW MAX outstanding cmd reached!");
|
||||
atomic_dec(&rcfw->seq_num);
|
||||
crsqe = &rcfw->crsqe_tbl[cbit];
|
||||
if (crsqe->resp) {
|
||||
spin_unlock_irqrestore(&cmdq->lock, flags);
|
||||
|
||||
if (!retry_cnt--)
|
||||
return NULL;
|
||||
goto retry;
|
||||
return -EBUSY;
|
||||
}
|
||||
/* Reserve a resp buffer slot if requested */
|
||||
if (req->resp_size && crsbe) {
|
||||
spin_lock(&crsb->lock);
|
||||
sw_prod = HWQ_CMP(crsb->prod, crsb);
|
||||
crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr;
|
||||
*crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)]
|
||||
[get_crsb_idx(sw_prod)];
|
||||
bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr);
|
||||
req->resp_addr = cpu_to_le64(dma_addr);
|
||||
crsb->prod++;
|
||||
spin_unlock(&crsb->lock);
|
||||
memset(resp, 0, sizeof(*resp));
|
||||
crsqe->resp = (struct creq_qp_event *)resp;
|
||||
crsqe->resp->cookie = req->cookie;
|
||||
crsqe->req_size = req->cmd_size;
|
||||
if (req->resp_size && sb) {
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
|
||||
|
||||
req->resp_size = (sizeof(struct bnxt_qplib_crsbe) +
|
||||
BNXT_QPLIB_CMDQE_UNITS - 1) /
|
||||
BNXT_QPLIB_CMDQE_UNITS;
|
||||
req->resp_addr = cpu_to_le64(sbuf->dma_addr);
|
||||
req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
|
||||
BNXT_QPLIB_CMDQE_UNITS;
|
||||
}
|
||||
|
||||
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
|
||||
preq = (u8 *)req;
|
||||
size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
|
||||
@ -190,23 +160,24 @@ retry:
|
||||
preq += min_t(u32, size, sizeof(*cmdqe));
|
||||
size -= min_t(u32, size, sizeof(*cmdqe));
|
||||
cmdq->prod++;
|
||||
rcfw->seq_num++;
|
||||
} while (size > 0);
|
||||
|
||||
rcfw->seq_num++;
|
||||
|
||||
cmdq_prod = cmdq->prod;
|
||||
if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
|
||||
/* The very first doorbell write is required to set this flag
|
||||
* which prompts the FW to reset its internal pointers
|
||||
/* The very first doorbell write
|
||||
* is required to set this flag
|
||||
* which prompts the FW to reset
|
||||
* its internal pointers
|
||||
*/
|
||||
cmdq_prod |= FIRMWARE_FIRST_FLAG;
|
||||
rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
|
||||
}
|
||||
sw_prod = HWQ_CMP(crsq->prod, crsq);
|
||||
crsqe = &crsq->crsq[sw_prod];
|
||||
memset(crsqe, 0, sizeof(*crsqe));
|
||||
crsq->prod++;
|
||||
crsqe->req_size = req->cmd_size;
|
||||
|
||||
/* ring CMDQ DB */
|
||||
wmb();
|
||||
writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
|
||||
rcfw->cmdq_bar_reg_prod_off);
|
||||
writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
|
||||
@ -214,9 +185,56 @@ retry:
|
||||
done:
|
||||
spin_unlock_irqrestore(&cmdq->lock, flags);
|
||||
/* Return the CREQ response pointer */
|
||||
return crsqe ? &crsqe->qp_event : NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct cmdq_base *req,
|
||||
struct creq_base *resp,
|
||||
void *sb, u8 is_block)
|
||||
{
|
||||
struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
|
||||
u16 cookie;
|
||||
u8 opcode, retry_cnt = 0xFF;
|
||||
int rc = 0;
|
||||
|
||||
do {
|
||||
opcode = req->opcode;
|
||||
rc = __send_message(rcfw, req, resp, sb, is_block);
|
||||
cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
|
||||
if (!rc)
|
||||
break;
|
||||
|
||||
if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
|
||||
/* send failed */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
|
||||
cookie, opcode);
|
||||
return rc;
|
||||
}
|
||||
is_block ? mdelay(1) : usleep_range(500, 1000);
|
||||
|
||||
} while (retry_cnt--);
|
||||
|
||||
if (is_block)
|
||||
rc = __block_for_resp(rcfw, cookie);
|
||||
else
|
||||
rc = __wait_for_resp(rcfw, cookie);
|
||||
if (rc) {
|
||||
/* timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
|
||||
cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (evnt->status) {
|
||||
/* failed with status */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
|
||||
cookie, opcode, evnt->status);
|
||||
rc = -EFAULT;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
/* Completions */
|
||||
static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct creq_func_event *func_event)
|
||||
@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
|
||||
static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct creq_qp_event *qp_event)
|
||||
{
|
||||
struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
|
||||
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
|
||||
struct bnxt_qplib_crsqe *crsqe;
|
||||
u16 cbit, cookie, blocked = 0;
|
||||
struct bnxt_qplib_crsq *crsqe;
|
||||
unsigned long flags;
|
||||
u32 sw_cons;
|
||||
u16 cbit, blocked = 0;
|
||||
u16 cookie;
|
||||
__le16 mcookie;
|
||||
|
||||
switch (qp_event->event) {
|
||||
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
|
||||
@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
|
||||
default:
|
||||
/* Command Response */
|
||||
spin_lock_irqsave(&cmdq->lock, flags);
|
||||
sw_cons = HWQ_CMP(crsq->cons, crsq);
|
||||
crsqe = &crsq->crsq[sw_cons];
|
||||
crsq->cons++;
|
||||
memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event));
|
||||
|
||||
cookie = le16_to_cpu(crsqe->qp_event.cookie);
|
||||
cookie = le16_to_cpu(qp_event->cookie);
|
||||
mcookie = qp_event->cookie;
|
||||
blocked = cookie & RCFW_CMD_IS_BLOCKING;
|
||||
cookie &= RCFW_MAX_COOKIE_VALUE;
|
||||
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
|
||||
crsqe = &rcfw->crsqe_tbl[cbit];
|
||||
if (crsqe->resp &&
|
||||
crsqe->resp->cookie == mcookie) {
|
||||
memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
|
||||
crsqe->resp = NULL;
|
||||
} else {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
|
||||
crsqe->resp ? "mismatch" : "collision",
|
||||
crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
|
||||
}
|
||||
if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
|
||||
dev_warn(&rcfw->pdev->dev,
|
||||
"QPLIB: CMD bit %d was not requested", cbit);
|
||||
|
||||
cmdq->cons += crsqe->req_size;
|
||||
spin_unlock_irqrestore(&cmdq->lock, flags);
|
||||
crsqe->req_size = 0;
|
||||
|
||||
if (!blocked)
|
||||
wake_up(&rcfw->waitq);
|
||||
break;
|
||||
spin_unlock_irqrestore(&cmdq->lock, flags);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data)
|
||||
struct creq_base *creqe, **creq_ptr;
|
||||
u32 sw_cons, raw_cons;
|
||||
unsigned long flags;
|
||||
u32 type;
|
||||
u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
|
||||
|
||||
/* Service the CREQ until empty */
|
||||
/* Service the CREQ until budget is over */
|
||||
spin_lock_irqsave(&creq->lock, flags);
|
||||
raw_cons = creq->cons;
|
||||
while (1) {
|
||||
while (budget > 0) {
|
||||
sw_cons = HWQ_CMP(raw_cons, creq);
|
||||
creq_ptr = (struct creq_base **)creq->pbl_ptr;
|
||||
creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
|
||||
@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
|
||||
type = creqe->type & CREQ_BASE_TYPE_MASK;
|
||||
switch (type) {
|
||||
case CREQ_BASE_TYPE_QP_EVENT:
|
||||
if (!bnxt_qplib_process_qp_event
|
||||
(rcfw, (struct creq_qp_event *)creqe))
|
||||
rcfw->creq_qp_event_processed++;
|
||||
else {
|
||||
dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with");
|
||||
dev_warn(&rcfw->pdev->dev,
|
||||
"QPLIB: type = 0x%x not handled",
|
||||
type);
|
||||
}
|
||||
bnxt_qplib_process_qp_event
|
||||
(rcfw, (struct creq_qp_event *)creqe);
|
||||
rcfw->creq_qp_event_processed++;
|
||||
break;
|
||||
case CREQ_BASE_TYPE_FUNC_EVENT:
|
||||
if (!bnxt_qplib_process_func_event
|
||||
@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
|
||||
break;
|
||||
}
|
||||
raw_cons++;
|
||||
budget--;
|
||||
}
|
||||
|
||||
if (creq->cons != raw_cons) {
|
||||
creq->cons = raw_cons;
|
||||
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
|
||||
@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
|
||||
/* RCFW */
|
||||
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
|
||||
{
|
||||
struct creq_deinitialize_fw_resp *resp;
|
||||
struct cmdq_deinitialize_fw req;
|
||||
struct creq_deinitialize_fw_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
|
||||
resp = (struct creq_deinitialize_fw_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp)
|
||||
return -EINVAL;
|
||||
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie)))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie))
|
||||
return -EFAULT;
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
NULL, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
|
||||
return 0;
|
||||
@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
|
||||
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_ctx *ctx, int is_virtfn)
|
||||
{
|
||||
struct creq_initialize_fw_resp *resp;
|
||||
struct cmdq_initialize_fw req;
|
||||
struct creq_initialize_fw_resp resp;
|
||||
u16 cmd_flags = 0, level;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
|
||||
|
||||
@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
|
||||
|
||||
skip_ctx_setup:
|
||||
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
|
||||
resp = (struct creq_initialize_fw_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: RCFW: INITIALIZE_FW send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: RCFW: INITIALIZE_FW timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: RCFW: INITIALIZE_FW failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
NULL, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
|
||||
{
|
||||
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb);
|
||||
kfree(rcfw->crsq.crsq);
|
||||
kfree(rcfw->crsqe_tbl);
|
||||
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
|
||||
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
|
||||
|
||||
rcfw->pdev = NULL;
|
||||
}
|
||||
|
||||
@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rcfw->crsq.max_elements = rcfw->cmdq.max_elements;
|
||||
rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements,
|
||||
sizeof(*rcfw->crsq.crsq), GFP_KERNEL);
|
||||
if (!rcfw->crsq.crsq)
|
||||
rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
|
||||
sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
|
||||
if (!rcfw->crsqe_tbl)
|
||||
goto fail;
|
||||
|
||||
rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT;
|
||||
if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0,
|
||||
&rcfw->crsb.max_elements,
|
||||
BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE,
|
||||
HWQ_TYPE_CTX)) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: HW channel CRSB allocation failed");
|
||||
goto fail;
|
||||
}
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
||||
int rc;
|
||||
|
||||
/* General */
|
||||
atomic_set(&rcfw->seq_num, 0);
|
||||
rcfw->seq_num = 0;
|
||||
rcfw->flags = FIRMWARE_FIRST_FLAG;
|
||||
bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
|
||||
sizeof(unsigned long));
|
||||
@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
||||
|
||||
rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
|
||||
|
||||
/* CRSQ */
|
||||
rcfw->crsq.prod = 0;
|
||||
rcfw->crsq.cons = 0;
|
||||
|
||||
/* CREQ */
|
||||
rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
|
||||
res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
|
||||
@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
||||
__iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
|
||||
struct bnxt_qplib_rcfw *rcfw,
|
||||
u32 size)
|
||||
{
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf;
|
||||
|
||||
sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
|
||||
if (!sbuf)
|
||||
return NULL;
|
||||
|
||||
sbuf->size = size;
|
||||
sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
|
||||
&sbuf->dma_addr, GFP_ATOMIC);
|
||||
if (!sbuf->sb)
|
||||
goto bail;
|
||||
|
||||
return sbuf;
|
||||
bail:
|
||||
kfree(sbuf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf)
|
||||
{
|
||||
if (sbuf->sb)
|
||||
dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
|
||||
sbuf->sb, sbuf->dma_addr);
|
||||
kfree(sbuf);
|
||||
}
|
||||
|
@ -73,6 +73,7 @@
|
||||
#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT
|
||||
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
|
||||
#define RCFW_CMD_IS_BLOCKING 0x8000
|
||||
#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
|
||||
|
||||
/* Cmdq contains a fix number of a 16-Byte slots */
|
||||
struct bnxt_qplib_cmdqe {
|
||||
@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe {
|
||||
u8 data[1024];
|
||||
};
|
||||
|
||||
/* CRSQ SB */
|
||||
#define BNXT_QPLIB_CRSBE_MAX_CNT 4
|
||||
#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe)
|
||||
#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS)
|
||||
|
||||
#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1)
|
||||
#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1)
|
||||
|
||||
static inline u32 get_crsb_pg(u32 val)
|
||||
{
|
||||
return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG;
|
||||
}
|
||||
|
||||
static inline u32 get_crsb_idx(u32 val)
|
||||
{
|
||||
return val & MAX_CRSB_IDX_PER_PG;
|
||||
}
|
||||
|
||||
static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr,
|
||||
u32 prod, dma_addr_t *dma_addr)
|
||||
{
|
||||
*dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG];
|
||||
*dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) *
|
||||
BNXT_QPLIB_CRSBE_UNITS;
|
||||
}
|
||||
|
||||
/* CREQ */
|
||||
/* Allocate 1 per QP for async error notification for now */
|
||||
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
|
||||
@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val)
|
||||
#define CREQ_DB(db, raw_cons, cp_bit) \
|
||||
writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
|
||||
|
||||
#define CREQ_ENTRY_POLL_BUDGET 0x100
|
||||
|
||||
/* HWQ */
|
||||
struct bnxt_qplib_crsqe {
|
||||
struct creq_qp_event qp_event;
|
||||
|
||||
struct bnxt_qplib_crsq {
|
||||
struct creq_qp_event *resp;
|
||||
u32 req_size;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_crsq {
|
||||
struct bnxt_qplib_crsqe *crsq;
|
||||
u32 prod;
|
||||
u32 cons;
|
||||
u32 max_elements;
|
||||
struct bnxt_qplib_rcfw_sbuf {
|
||||
void *sb;
|
||||
dma_addr_t dma_addr;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
/* RCFW Communication Channels */
|
||||
@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw {
|
||||
wait_queue_head_t waitq;
|
||||
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
|
||||
struct creq_func_event *);
|
||||
atomic_t seq_num;
|
||||
u32 seq_num;
|
||||
|
||||
/* Bar region info */
|
||||
void __iomem *cmdq_bar_reg_iomem;
|
||||
@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw {
|
||||
|
||||
/* Actual Cmd and Resp Queues */
|
||||
struct bnxt_qplib_hwq cmdq;
|
||||
struct bnxt_qplib_crsq crsq;
|
||||
struct bnxt_qplib_hwq crsb;
|
||||
struct bnxt_qplib_crsq *crsqe_tbl;
|
||||
};
|
||||
|
||||
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
|
||||
@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
||||
(struct bnxt_qplib_rcfw *,
|
||||
struct creq_func_event *));
|
||||
|
||||
int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
|
||||
int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
|
||||
void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct cmdq_base *req, void **crsbe,
|
||||
u8 is_block);
|
||||
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
|
||||
struct bnxt_qplib_rcfw *rcfw,
|
||||
u32 size);
|
||||
void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf);
|
||||
int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct cmdq_base *req, struct creq_base *resp,
|
||||
void *sbuf, u8 is_block);
|
||||
|
||||
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
|
||||
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
|
||||
|
@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
|
||||
|
||||
#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
|
||||
|
||||
#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
|
||||
((HWQ_CMP(hwq->prod, hwq)\
|
||||
- HWQ_CMP(hwq->cons, hwq))\
|
||||
& (hwq->max_elements - 1)))
|
||||
enum bnxt_qplib_hwq_type {
|
||||
HWQ_TYPE_CTX,
|
||||
HWQ_TYPE_QUEUE,
|
||||
|
@ -55,37 +55,30 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_dev_attr *attr)
|
||||
{
|
||||
struct cmdq_query_func req;
|
||||
struct creq_query_func_resp *resp;
|
||||
struct creq_query_func_resp resp;
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf;
|
||||
struct creq_query_func_resp_sb *sb;
|
||||
u16 cmd_flags = 0;
|
||||
u32 temp;
|
||||
u8 *tqm_alloc;
|
||||
int i;
|
||||
int i, rc = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
|
||||
|
||||
req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
|
||||
resp = (struct creq_query_func_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb,
|
||||
0);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed ");
|
||||
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
|
||||
if (!sbuf) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
"QPLIB: SP: QUERY_FUNC alloc side buffer failed");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sb = sbuf->sb;
|
||||
req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
(void *)sbuf, 0);
|
||||
if (rc)
|
||||
goto bail;
|
||||
|
||||
/* Extract the context from the side buffer */
|
||||
attr->max_qp = le32_to_cpu(sb->max_qp);
|
||||
attr->max_qp_rd_atom =
|
||||
@ -95,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
|
||||
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
|
||||
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
|
||||
/*
|
||||
* 128 WQEs needs to be reserved for the HW (8916). Prevent
|
||||
* reporting the max number
|
||||
*/
|
||||
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
|
||||
attr->max_qp_sges = sb->max_sge;
|
||||
attr->max_cq = le32_to_cpu(sb->max_cq);
|
||||
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
|
||||
@ -130,7 +128,10 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||
attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
|
||||
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
|
||||
}
|
||||
return 0;
|
||||
|
||||
bail:
|
||||
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* SGID */
|
||||
@ -178,8 +179,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
/* Remove GID from the SGID table */
|
||||
if (update) {
|
||||
struct cmdq_delete_gid req;
|
||||
struct creq_delete_gid_resp *resp;
|
||||
struct creq_delete_gid_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
|
||||
if (sgid_tbl->hw_id[index] == 0xFFFF) {
|
||||
@ -188,31 +190,10 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
return -EINVAL;
|
||||
}
|
||||
req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
|
||||
resp = (struct creq_delete_gid_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL,
|
||||
0);
|
||||
if (!resp) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: SP: DELETE_GID send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
|
||||
le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: SP: DELETE_GID timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: SP: DELETE_GID failed ");
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
|
||||
sizeof(bnxt_qplib_gid_zero));
|
||||
@ -234,7 +215,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_res,
|
||||
sgid_tbl);
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
int i, free_idx, rc = 0;
|
||||
int i, free_idx;
|
||||
|
||||
if (!sgid_tbl) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
|
||||
@ -266,10 +247,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
}
|
||||
if (update) {
|
||||
struct cmdq_add_gid req;
|
||||
struct creq_add_gid_resp *resp;
|
||||
struct creq_add_gid_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
u32 temp32[4];
|
||||
u16 temp16[3];
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
|
||||
|
||||
@ -290,31 +272,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
req.src_mac[1] = cpu_to_be16(temp16[1]);
|
||||
req.src_mac[2] = cpu_to_be16(temp16[2]);
|
||||
|
||||
resp = (struct creq_add_gid_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: SP: ADD_GID send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
|
||||
le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPIB: SP: ADD_GID timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed ");
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
|
||||
}
|
||||
/* Add GID to the sgid_tbl */
|
||||
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
|
||||
@ -325,7 +287,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
|
||||
*index = free_idx;
|
||||
/* unlock */
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* pkeys */
|
||||
@ -422,10 +384,11 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_create_ah req;
|
||||
struct creq_create_ah_resp *resp;
|
||||
struct creq_create_ah_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
u32 temp32[4];
|
||||
u16 temp16[3];
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
|
||||
|
||||
@ -450,28 +413,12 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
|
||||
req.dest_mac[1] = cpu_to_le16(temp16[1]);
|
||||
req.dest_mac[2] = cpu_to_le16(temp16[2]);
|
||||
|
||||
resp = (struct creq_create_ah_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 1);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
ah->id = le32_to_cpu(resp->xid);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
NULL, 1);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ah->id = le32_to_cpu(resp.xid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -479,35 +426,19 @@ int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_destroy_ah req;
|
||||
struct creq_destroy_ah_resp *resp;
|
||||
struct creq_destroy_ah_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
int rc;
|
||||
|
||||
/* Clean up the AH table in the device */
|
||||
RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
|
||||
|
||||
req.ah_cid = cpu_to_le32(ah->id);
|
||||
|
||||
resp = (struct creq_destroy_ah_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 1);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
NULL, 1);
|
||||
if (rc)
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -516,8 +447,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_deallocate_key req;
|
||||
struct creq_deallocate_key_resp *resp;
|
||||
struct creq_deallocate_key_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
int rc;
|
||||
|
||||
if (mrw->lkey == 0xFFFFFFFF) {
|
||||
dev_info(&res->pdev->dev,
|
||||
@ -536,27 +468,11 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
|
||||
else
|
||||
req.key = cpu_to_le32(mrw->lkey);
|
||||
|
||||
resp = (struct creq_deallocate_key_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed ");
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
NULL, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Free the qplib's MRW memory */
|
||||
if (mrw->hwq.max_elements)
|
||||
bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
|
||||
@ -568,9 +484,10 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_allocate_mrw req;
|
||||
struct creq_allocate_mrw_resp *resp;
|
||||
struct creq_allocate_mrw_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
unsigned long tmp;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
|
||||
|
||||
@ -584,33 +501,17 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
|
||||
tmp = (unsigned long)mrw;
|
||||
req.mrw_handle = cpu_to_le64(tmp);
|
||||
|
||||
resp = (struct creq_allocate_mrw_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
|
||||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
|
||||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
|
||||
mrw->rkey = le32_to_cpu(resp->xid);
|
||||
mrw->rkey = le32_to_cpu(resp.xid);
|
||||
else
|
||||
mrw->lkey = le32_to_cpu(resp->xid);
|
||||
mrw->lkey = le32_to_cpu(resp.xid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -619,40 +520,17 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_deregister_mr req;
|
||||
struct creq_deregister_mr_resp *resp;
|
||||
struct creq_deregister_mr_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
|
||||
|
||||
req.lkey = cpu_to_le32(mrw->lkey);
|
||||
resp = (struct creq_deregister_mr_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, block);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (block)
|
||||
rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
|
||||
le16_to_cpu(req.cookie));
|
||||
else
|
||||
rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
|
||||
le16_to_cpu(req.cookie));
|
||||
if (!rc) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, block);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Free the qplib's MR memory */
|
||||
if (mrw->hwq.max_elements) {
|
||||
@ -669,7 +547,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_register_mr req;
|
||||
struct creq_register_mr_resp *resp;
|
||||
struct creq_register_mr_resp resp;
|
||||
u16 cmd_flags = 0, level;
|
||||
int pg_ptrs, pages, i, rc;
|
||||
dma_addr_t **pbl_ptr;
|
||||
@ -730,36 +608,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
|
||||
req.key = cpu_to_le32(mr->lkey);
|
||||
req.mr_size = cpu_to_le64(mr->total_size);
|
||||
|
||||
resp = (struct creq_register_mr_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, block);
|
||||
if (!resp) {
|
||||
dev_err(&res->pdev->dev, "SP: REG_MR send failed");
|
||||
rc = -EINVAL;
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, block);
|
||||
if (rc)
|
||||
goto fail;
|
||||
}
|
||||
if (block)
|
||||
rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
|
||||
le16_to_cpu(req.cookie));
|
||||
else
|
||||
rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
|
||||
le16_to_cpu(req.cookie));
|
||||
if (!rc) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&res->pdev->dev, "SP: REG_MR timed out");
|
||||
rc = -ETIMEDOUT;
|
||||
goto fail;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed ");
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
rc = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
@ -804,35 +657,15 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_map_tc_to_cos req;
|
||||
struct creq_map_tc_to_cos_resp *resp;
|
||||
struct creq_map_tc_to_cos_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
int tleft;
|
||||
int rc = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
|
||||
req.cos0 = cpu_to_le16(cids[0]);
|
||||
req.cos1 = cpu_to_le16(cids[1]);
|
||||
|
||||
resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie));
|
||||
if (!tleft) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed ");
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -40,6 +40,8 @@
|
||||
#ifndef __BNXT_QPLIB_SP_H__
|
||||
#define __BNXT_QPLIB_SP_H__
|
||||
|
||||
#define BNXT_QPLIB_RESERVED_QP_WRS 128
|
||||
|
||||
struct bnxt_qplib_dev_attr {
|
||||
char fw_ver[32];
|
||||
u16 max_sgid;
|
||||
|
@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
list_for_each_safe(pos, nxt, &uctx->qpids) {
|
||||
list_for_each_safe(pos, nxt, &uctx->cqids) {
|
||||
entry = list_entry(pos, struct c4iw_qid_list, entry);
|
||||
list_del_init(&entry->entry);
|
||||
kfree(entry);
|
||||
@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
|
||||
rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
|
||||
if (!rdev->free_workq) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_status_page;
|
||||
goto err_free_status_page_and_wr_log;
|
||||
}
|
||||
|
||||
rdev->status_page->db_off = 0;
|
||||
|
||||
return 0;
|
||||
err_free_status_page:
|
||||
err_free_status_page_and_wr_log:
|
||||
if (c4iw_wr_log && rdev->wr_log)
|
||||
kfree(rdev->wr_log);
|
||||
free_page((unsigned long)rdev->status_page);
|
||||
destroy_ocqp_pool:
|
||||
c4iw_ocqp_pool_destroy(rdev);
|
||||
@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
|
||||
{
|
||||
destroy_workqueue(rdev->free_workq);
|
||||
kfree(rdev->wr_log);
|
||||
c4iw_release_dev_ucontext(rdev, &rdev->uctx);
|
||||
free_page((unsigned long)rdev->status_page);
|
||||
c4iw_pblpool_destroy(rdev);
|
||||
c4iw_rqtpool_destroy(rdev);
|
||||
c4iw_ocqp_pool_destroy(rdev);
|
||||
c4iw_destroy_resource(&rdev->resource);
|
||||
}
|
||||
|
||||
|
@ -3691,8 +3691,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
||||
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
|
||||
dev->ib_dev.get_port_immutable = mlx5_port_immutable;
|
||||
dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
|
||||
dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
|
||||
dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev;
|
||||
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
|
||||
dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
|
||||
dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev;
|
||||
}
|
||||
if (mlx5_core_is_pf(mdev)) {
|
||||
dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
|
||||
dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
|
||||
|
@ -58,7 +58,10 @@
|
||||
#define QEDR_MSG_QP " QP"
|
||||
#define QEDR_MSG_GSI " GSI"
|
||||
|
||||
#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
|
||||
#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
|
||||
|
||||
#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE)
|
||||
#define FW_PAGE_SHIFT (12)
|
||||
|
||||
struct qedr_dev;
|
||||
|
||||
|
@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
|
||||
|
||||
static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
|
||||
struct qedr_pbl *pbl,
|
||||
struct qedr_pbl_info *pbl_info)
|
||||
struct qedr_pbl_info *pbl_info, u32 pg_shift)
|
||||
{
|
||||
int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
|
||||
u32 fw_pg_cnt, fw_pg_per_umem_pg;
|
||||
struct qedr_pbl *pbl_tbl;
|
||||
struct scatterlist *sg;
|
||||
struct regpair *pbe;
|
||||
u64 pg_addr;
|
||||
int entry;
|
||||
u32 addr;
|
||||
|
||||
if (!pbl_info->num_pbes)
|
||||
return;
|
||||
@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
|
||||
|
||||
shift = umem->page_shift;
|
||||
|
||||
fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
|
||||
pages = sg_dma_len(sg) >> shift;
|
||||
pg_addr = sg_dma_address(sg);
|
||||
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
|
||||
/* store the page address in pbe */
|
||||
pbe->lo = cpu_to_le32(sg_dma_address(sg) +
|
||||
(pg_cnt << shift));
|
||||
addr = upper_32_bits(sg_dma_address(sg) +
|
||||
(pg_cnt << shift));
|
||||
pbe->hi = cpu_to_le32(addr);
|
||||
pbe_cnt++;
|
||||
total_num_pbes++;
|
||||
pbe++;
|
||||
for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
|
||||
pbe->lo = cpu_to_le32(pg_addr);
|
||||
pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
|
||||
|
||||
if (total_num_pbes == pbl_info->num_pbes)
|
||||
return;
|
||||
pg_addr += BIT(pg_shift);
|
||||
pbe_cnt++;
|
||||
total_num_pbes++;
|
||||
pbe++;
|
||||
|
||||
/* If the given pbl is full storing the pbes,
|
||||
* move to next pbl.
|
||||
*/
|
||||
if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
|
||||
pbl_tbl++;
|
||||
pbe = (struct regpair *)pbl_tbl->va;
|
||||
pbe_cnt = 0;
|
||||
if (total_num_pbes == pbl_info->num_pbes)
|
||||
return;
|
||||
|
||||
/* If the given pbl is full storing the pbes,
|
||||
* move to next pbl.
|
||||
*/
|
||||
if (pbe_cnt ==
|
||||
(pbl_info->pbl_size / sizeof(u64))) {
|
||||
pbl_tbl++;
|
||||
pbe = (struct regpair *)pbl_tbl->va;
|
||||
pbe_cnt = 0;
|
||||
}
|
||||
|
||||
fw_pg_cnt++;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
|
||||
u64 buf_addr, size_t buf_len,
|
||||
int access, int dmasync)
|
||||
{
|
||||
int page_cnt;
|
||||
u32 fw_pages;
|
||||
int rc;
|
||||
|
||||
q->buf_addr = buf_addr;
|
||||
@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
|
||||
return PTR_ERR(q->umem);
|
||||
}
|
||||
|
||||
page_cnt = ib_umem_page_count(q->umem);
|
||||
rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
|
||||
fw_pages = ib_umem_page_count(q->umem) <<
|
||||
(q->umem->page_shift - FW_PAGE_SHIFT);
|
||||
|
||||
rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
|
||||
if (rc)
|
||||
goto err0;
|
||||
|
||||
@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
|
||||
goto err0;
|
||||
}
|
||||
|
||||
qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
|
||||
qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
|
||||
FW_PAGE_SHIFT);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
|
||||
goto err1;
|
||||
|
||||
qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
|
||||
&mr->info.pbl_info);
|
||||
&mr->info.pbl_info, mr->umem->page_shift);
|
||||
|
||||
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
|
||||
if (rc) {
|
||||
@ -3209,6 +3219,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
|
||||
case IB_WC_REG_MR:
|
||||
qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
|
||||
break;
|
||||
case IB_WC_RDMA_READ:
|
||||
case IB_WC_SEND:
|
||||
wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
|
||||
|
||||
sge = ibwr->sg_list;
|
||||
for (i = 0; i < num_sge; i++, sge++) {
|
||||
if (qp->is_user && copy_from_user(p, (__user void *)
|
||||
(uintptr_t)sge->addr, sge->length))
|
||||
return -EFAULT;
|
||||
|
||||
else if (!qp->is_user)
|
||||
memcpy(p, (void *)(uintptr_t)sge->addr,
|
||||
sge->length);
|
||||
memcpy(p, (void *)(uintptr_t)sge->addr,
|
||||
sge->length);
|
||||
|
||||
p += sge->length;
|
||||
}
|
||||
|
@ -863,7 +863,6 @@ dev_stop:
|
||||
set_bit(IPOIB_STOP_REAPER, &priv->flags);
|
||||
cancel_delayed_work(&priv->ah_reap_task);
|
||||
set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
|
||||
napi_enable(&priv->napi);
|
||||
ipoib_ib_dev_stop(dev);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1596,6 +1596,8 @@ static void ipoib_dev_uninit_default(struct net_device *dev)
|
||||
|
||||
ipoib_transport_dev_cleanup(dev);
|
||||
|
||||
netif_napi_del(&priv->napi);
|
||||
|
||||
ipoib_cm_dev_cleanup(dev);
|
||||
|
||||
kfree(priv->rx_ring);
|
||||
@ -1649,6 +1651,7 @@ out_rx_ring_cleanup:
|
||||
kfree(priv->rx_ring);
|
||||
|
||||
out:
|
||||
netif_napi_del(&priv->napi);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -2237,6 +2240,7 @@ event_failed:
|
||||
|
||||
device_init_failed:
|
||||
free_netdev(priv->dev);
|
||||
kfree(priv);
|
||||
|
||||
alloc_mem_failed:
|
||||
return ERR_PTR(result);
|
||||
@ -2277,7 +2281,7 @@ static void ipoib_add_one(struct ib_device *device)
|
||||
|
||||
static void ipoib_remove_one(struct ib_device *device, void *client_data)
|
||||
{
|
||||
struct ipoib_dev_priv *priv, *tmp;
|
||||
struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
|
||||
struct list_head *dev_list = client_data;
|
||||
|
||||
if (!dev_list)
|
||||
@ -2300,7 +2304,14 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
|
||||
flush_workqueue(priv->wq);
|
||||
|
||||
unregister_netdev(priv->dev);
|
||||
free_netdev(priv->dev);
|
||||
if (device->free_rdma_netdev)
|
||||
device->free_rdma_netdev(priv->dev);
|
||||
else
|
||||
free_netdev(priv->dev);
|
||||
|
||||
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
|
||||
kfree(cpriv);
|
||||
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user