xhci: Platform updates, 64-bit DMA, and trace events for 3.12.
Hi Greg, This pull request includes one new feature for the xhci-plat driver (device tree support). Felipe was fine with the patch last I checked, but hadn't provided an official Acked-by line. This pull request also includes 13 patches from my FOSS Outreach Program for Women (OPW) intern, Xenia. She fixed a bug in the xHCI driver so that the driver can allocate 64-bit consistent DMA, converted the driver to use dynamic debugging, and added a bunch of new trace events for the xHCI driver. The python plugin for trace-cmd should be up on git hub shortly, although the trace events are usable without it. I'm very happy with the progress that Xenia has made, and I look forward to her future contributions to the Linux kernel. Sarah Sharp -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABAgAGBQJSDPhXAAoJEBMGWMLi1Gc5gyIP/iH1gkiwWT4LapgmwKyG3kH5 YS+NKBC/E1a2kXlvP6yb5SzZd9eYcQ0oatGB1Tg/ek+HctDEmDr2XBBhRuR2XszW 6mkGbP4NjnRrW8s+5IgF3bbk5GGHY50ogegHIYiTHCo+lHvX+zCFSv/fNtHli3FU HhyR/cxOJUouFmPQHgnjc3yFGthYO9sfq0YTDKQ00dYna2q44zm+TTwJSvg8XjW5 oTFyJeER0yQQ4b9gKUz4JJDmYUmr2ZiAmXIZD9C8fouD5PGdFjBquH9lCY5rRjPi UZB/HKtSTntT3K1yWCdiT/wqAGcY/MF9FVg44LtQom7c7ozwPUnQV1Y/7kPU2KrM etKBqR5z1x4Sob8c1YnSiARGUAzwKxxXjlcZA3WWJJahI/tR70PSslesoxi+phQs mPTYOX+f5UIF3ieiREJWf6tvL5hWVYVv4y9WqzF6gUydVjpxHgRZ0oikiRFUPBtz DEjhlsNjDE56M88aVVJCUiPDQFZpJZccykqZr6uNt/T9KQyS0NvH5eKY/U3AUJXC N/8EB/eFTbMIgDWBQRusV2+08oMVZrFXXGvDin/qmWP8eRsaJHCILymWrwq1dkTW Cd7F8CgT1uDbu5Kyxq/p4lVWwgpUTkN99aW2cqgfY7IhuWtatn1SdL7SD+0OBHne jFDdl5LQuKE4otg4+KKv =n45I -----END PGP SIGNATURE----- Merge tag 'for-usb-next-2013-08-15' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah/xhci into usb-next Sarah writes: xhci: Platform updates, 64-bit DMA, and trace events for 3.12. Hi Greg, This pull request includes one new feature for the xhci-plat driver (device tree support). Felipe was fine with the patch last I checked, but hadn't provided an official Acked-by line. This pull request also includes 13 patches from my FOSS Outreach Program for Women (OPW) intern, Xenia. She fixed a bug in the xHCI driver so that the driver can allocate 64-bit consistent DMA, converted the driver to use dynamic debugging, and added a bunch of new trace events for the xHCI driver. The python plugin for trace-cmd should be up on git hub shortly, although the trace events are usable without it. I'm very happy with the progress that Xenia has made, and I look forward to her future contributions to the Linux kernel. Sarah Sharp
This commit is contained in:
commit
224563b6ce
14
Documentation/devicetree/bindings/usb/usb-xhci.txt
Normal file
14
Documentation/devicetree/bindings/usb/usb-xhci.txt
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
USB xHCI controllers
|
||||||
|
|
||||||
|
Required properties:
|
||||||
|
- compatible: should be "xhci-platform".
|
||||||
|
- reg: should contain address and length of the standard XHCI
|
||||||
|
register set for the device.
|
||||||
|
- interrupts: one XHCI interrupt should be described here.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
usb@f0931000 {
|
||||||
|
compatible = "xhci-platform";
|
||||||
|
reg = <0xf0931000 0x8c8>;
|
||||||
|
interrupts = <0x0 0x4e 0x0>;
|
||||||
|
};
|
@ -29,15 +29,6 @@ if USB_XHCI_HCD
|
|||||||
config USB_XHCI_PLATFORM
|
config USB_XHCI_PLATFORM
|
||||||
tristate
|
tristate
|
||||||
|
|
||||||
config USB_XHCI_HCD_DEBUGGING
|
|
||||||
bool "Debugging for the xHCI host controller"
|
|
||||||
---help---
|
|
||||||
Say 'Y' to turn on debugging for the xHCI host controller driver.
|
|
||||||
This will spew debugging output, even in interrupt context.
|
|
||||||
This should only be used for debugging xHCI driver bugs.
|
|
||||||
|
|
||||||
If unsure, say N.
|
|
||||||
|
|
||||||
endif # USB_XHCI_HCD
|
endif # USB_XHCI_HCD
|
||||||
|
|
||||||
config USB_EHCI_HCD
|
config USB_EHCI_HCD
|
||||||
|
@ -4,6 +4,9 @@
|
|||||||
|
|
||||||
ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
|
ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
|
||||||
|
|
||||||
|
# tell define_trace.h where to find the xhci trace header
|
||||||
|
CFLAGS_xhci-trace.o := -I$(src)
|
||||||
|
|
||||||
isp1760-y := isp1760-hcd.o isp1760-if.o
|
isp1760-y := isp1760-hcd.o isp1760-if.o
|
||||||
|
|
||||||
fhci-y := fhci-hcd.o fhci-hub.o fhci-q.o
|
fhci-y := fhci-hcd.o fhci-hub.o fhci-q.o
|
||||||
@ -13,6 +16,7 @@ fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o
|
|||||||
|
|
||||||
xhci-hcd-y := xhci.o xhci-mem.o
|
xhci-hcd-y := xhci.o xhci-mem.o
|
||||||
xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
|
xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
|
||||||
|
xhci-hcd-y += xhci-trace.o
|
||||||
xhci-hcd-$(CONFIG_PCI) += xhci-pci.o
|
xhci-hcd-$(CONFIG_PCI) += xhci-pci.o
|
||||||
|
|
||||||
ifneq ($(CONFIG_USB_XHCI_PLATFORM), )
|
ifneq ($(CONFIG_USB_XHCI_PLATFORM), )
|
||||||
|
@ -580,3 +580,17 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci,
|
|||||||
xhci_dbg_slot_ctx(xhci, ctx);
|
xhci_dbg_slot_ctx(xhci, ctx);
|
||||||
xhci_dbg_ep_ctx(xhci, ctx, last_ep);
|
xhci_dbg_ep_ctx(xhci, ctx, last_ep);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
|
||||||
|
const char *fmt, ...)
|
||||||
|
{
|
||||||
|
struct va_format vaf;
|
||||||
|
va_list args;
|
||||||
|
|
||||||
|
va_start(args, fmt);
|
||||||
|
vaf.fmt = fmt;
|
||||||
|
vaf.va = &args;
|
||||||
|
xhci_dbg(xhci, "%pV\n", &vaf);
|
||||||
|
trace(&vaf);
|
||||||
|
va_end(args);
|
||||||
|
}
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
#include <asm/unaligned.h>
|
#include <asm/unaligned.h>
|
||||||
|
|
||||||
#include "xhci.h"
|
#include "xhci.h"
|
||||||
|
#include "xhci-trace.h"
|
||||||
|
|
||||||
#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
|
#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
|
||||||
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
|
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
|
||||||
@ -535,8 +536,10 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
|
|||||||
xhci->port_status_u0 |= 1 << wIndex;
|
xhci->port_status_u0 |= 1 << wIndex;
|
||||||
if (xhci->port_status_u0 == all_ports_seen_u0) {
|
if (xhci->port_status_u0 == all_ports_seen_u0) {
|
||||||
del_timer_sync(&xhci->comp_mode_recovery_timer);
|
del_timer_sync(&xhci->comp_mode_recovery_timer);
|
||||||
xhci_dbg(xhci, "All USB3 ports have entered U0 already!\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted.\n");
|
"All USB3 ports have entered U0 already!");
|
||||||
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Compliance Mode Recovery Timer Deleted.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include <linux/dmapool.h>
|
#include <linux/dmapool.h>
|
||||||
|
|
||||||
#include "xhci.h"
|
#include "xhci.h"
|
||||||
|
#include "xhci-trace.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocates a generic ring segment from the ring pool, sets the dma address,
|
* Allocates a generic ring segment from the ring pool, sets the dma address,
|
||||||
@ -347,7 +348,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
xhci_link_rings(xhci, ring, first, last, num_segs);
|
xhci_link_rings(xhci, ring, first, last, num_segs);
|
||||||
xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
|
||||||
|
"ring expansion succeed, now has %d segments",
|
||||||
ring->num_segs);
|
ring->num_segs);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -481,17 +483,6 @@ struct xhci_ring *xhci_dma_to_transfer_ring(
|
|||||||
return ep->ring;
|
return ep->ring;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Only use this when you know stream_info is valid */
|
|
||||||
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
|
|
||||||
static struct xhci_ring *dma_to_stream_ring(
|
|
||||||
struct xhci_stream_info *stream_info,
|
|
||||||
u64 address)
|
|
||||||
{
|
|
||||||
return radix_tree_lookup(&stream_info->trb_address_map,
|
|
||||||
address >> TRB_SEGMENT_SHIFT);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
|
|
||||||
|
|
||||||
struct xhci_ring *xhci_stream_id_to_ring(
|
struct xhci_ring *xhci_stream_id_to_ring(
|
||||||
struct xhci_virt_device *dev,
|
struct xhci_virt_device *dev,
|
||||||
unsigned int ep_index,
|
unsigned int ep_index,
|
||||||
@ -509,58 +500,6 @@ struct xhci_ring *xhci_stream_id_to_ring(
|
|||||||
return ep->stream_info->stream_rings[stream_id];
|
return ep->stream_info->stream_rings[stream_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
|
|
||||||
static int xhci_test_radix_tree(struct xhci_hcd *xhci,
|
|
||||||
unsigned int num_streams,
|
|
||||||
struct xhci_stream_info *stream_info)
|
|
||||||
{
|
|
||||||
u32 cur_stream;
|
|
||||||
struct xhci_ring *cur_ring;
|
|
||||||
u64 addr;
|
|
||||||
|
|
||||||
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
|
|
||||||
struct xhci_ring *mapped_ring;
|
|
||||||
int trb_size = sizeof(union xhci_trb);
|
|
||||||
|
|
||||||
cur_ring = stream_info->stream_rings[cur_stream];
|
|
||||||
for (addr = cur_ring->first_seg->dma;
|
|
||||||
addr < cur_ring->first_seg->dma + TRB_SEGMENT_SIZE;
|
|
||||||
addr += trb_size) {
|
|
||||||
mapped_ring = dma_to_stream_ring(stream_info, addr);
|
|
||||||
if (cur_ring != mapped_ring) {
|
|
||||||
xhci_warn(xhci, "WARN: DMA address 0x%08llx "
|
|
||||||
"didn't map to stream ID %u; "
|
|
||||||
"mapped to ring %p\n",
|
|
||||||
(unsigned long long) addr,
|
|
||||||
cur_stream,
|
|
||||||
mapped_ring);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* One TRB after the end of the ring segment shouldn't return a
|
|
||||||
* pointer to the current ring (although it may be a part of a
|
|
||||||
* different ring).
|
|
||||||
*/
|
|
||||||
mapped_ring = dma_to_stream_ring(stream_info, addr);
|
|
||||||
if (mapped_ring != cur_ring) {
|
|
||||||
/* One TRB before should also fail */
|
|
||||||
addr = cur_ring->first_seg->dma - trb_size;
|
|
||||||
mapped_ring = dma_to_stream_ring(stream_info, addr);
|
|
||||||
}
|
|
||||||
if (mapped_ring == cur_ring) {
|
|
||||||
xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
|
|
||||||
"mapped to valid stream ID %u; "
|
|
||||||
"mapped ring = %p\n",
|
|
||||||
(unsigned long long) addr,
|
|
||||||
cur_stream,
|
|
||||||
mapped_ring);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Change an endpoint's internal structure so it supports stream IDs. The
|
* Change an endpoint's internal structure so it supports stream IDs. The
|
||||||
* number of requested streams includes stream 0, which cannot be used by device
|
* number of requested streams includes stream 0, which cannot be used by device
|
||||||
@ -687,13 +626,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
|
|||||||
* was any other way, the host controller would assume the ring is
|
* was any other way, the host controller would assume the ring is
|
||||||
* "empty" and wait forever for data to be queued to that stream ID).
|
* "empty" and wait forever for data to be queued to that stream ID).
|
||||||
*/
|
*/
|
||||||
#if XHCI_DEBUG
|
|
||||||
/* Do a little test on the radix tree to make sure it returns the
|
|
||||||
* correct values.
|
|
||||||
*/
|
|
||||||
if (xhci_test_radix_tree(xhci, num_streams, stream_info))
|
|
||||||
goto cleanup_rings;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return stream_info;
|
return stream_info;
|
||||||
|
|
||||||
@ -731,7 +663,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
|
|||||||
* fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
|
* fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
|
||||||
*/
|
*/
|
||||||
max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
|
max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
|
||||||
xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
|
||||||
|
"Setting number of stream ctx array entries to %u",
|
||||||
1 << (max_primary_streams + 1));
|
1 << (max_primary_streams + 1));
|
||||||
ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
|
ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
|
||||||
ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
|
ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
|
||||||
@ -1613,7 +1546,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
struct device *dev = xhci_to_hcd(xhci)->self.controller;
|
struct device *dev = xhci_to_hcd(xhci)->self.controller;
|
||||||
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
|
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
|
||||||
|
|
||||||
xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Allocating %d scratchpad buffers", num_sp);
|
||||||
|
|
||||||
if (!num_sp)
|
if (!num_sp)
|
||||||
return 0;
|
return 0;
|
||||||
@ -1770,11 +1704,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
|||||||
dma_free_coherent(&pdev->dev, size,
|
dma_free_coherent(&pdev->dev, size,
|
||||||
xhci->erst.entries, xhci->erst.erst_dma_addr);
|
xhci->erst.entries, xhci->erst.erst_dma_addr);
|
||||||
xhci->erst.entries = NULL;
|
xhci->erst.entries = NULL;
|
||||||
xhci_dbg(xhci, "Freed ERST\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
|
||||||
if (xhci->event_ring)
|
if (xhci->event_ring)
|
||||||
xhci_ring_free(xhci, xhci->event_ring);
|
xhci_ring_free(xhci, xhci->event_ring);
|
||||||
xhci->event_ring = NULL;
|
xhci->event_ring = NULL;
|
||||||
xhci_dbg(xhci, "Freed event ring\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
|
||||||
|
|
||||||
if (xhci->lpm_command)
|
if (xhci->lpm_command)
|
||||||
xhci_free_command(xhci, xhci->lpm_command);
|
xhci_free_command(xhci, xhci->lpm_command);
|
||||||
@ -1782,7 +1716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
|||||||
if (xhci->cmd_ring)
|
if (xhci->cmd_ring)
|
||||||
xhci_ring_free(xhci, xhci->cmd_ring);
|
xhci_ring_free(xhci, xhci->cmd_ring);
|
||||||
xhci->cmd_ring = NULL;
|
xhci->cmd_ring = NULL;
|
||||||
xhci_dbg(xhci, "Freed command ring\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
|
||||||
list_for_each_entry_safe(cur_cd, next_cd,
|
list_for_each_entry_safe(cur_cd, next_cd,
|
||||||
&xhci->cancel_cmd_list, cancel_cmd_list) {
|
&xhci->cancel_cmd_list, cancel_cmd_list) {
|
||||||
list_del(&cur_cd->cancel_cmd_list);
|
list_del(&cur_cd->cancel_cmd_list);
|
||||||
@ -1795,22 +1729,24 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
|||||||
if (xhci->segment_pool)
|
if (xhci->segment_pool)
|
||||||
dma_pool_destroy(xhci->segment_pool);
|
dma_pool_destroy(xhci->segment_pool);
|
||||||
xhci->segment_pool = NULL;
|
xhci->segment_pool = NULL;
|
||||||
xhci_dbg(xhci, "Freed segment pool\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
|
||||||
|
|
||||||
if (xhci->device_pool)
|
if (xhci->device_pool)
|
||||||
dma_pool_destroy(xhci->device_pool);
|
dma_pool_destroy(xhci->device_pool);
|
||||||
xhci->device_pool = NULL;
|
xhci->device_pool = NULL;
|
||||||
xhci_dbg(xhci, "Freed device context pool\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
|
||||||
|
|
||||||
if (xhci->small_streams_pool)
|
if (xhci->small_streams_pool)
|
||||||
dma_pool_destroy(xhci->small_streams_pool);
|
dma_pool_destroy(xhci->small_streams_pool);
|
||||||
xhci->small_streams_pool = NULL;
|
xhci->small_streams_pool = NULL;
|
||||||
xhci_dbg(xhci, "Freed small stream array pool\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Freed small stream array pool");
|
||||||
|
|
||||||
if (xhci->medium_streams_pool)
|
if (xhci->medium_streams_pool)
|
||||||
dma_pool_destroy(xhci->medium_streams_pool);
|
dma_pool_destroy(xhci->medium_streams_pool);
|
||||||
xhci->medium_streams_pool = NULL;
|
xhci->medium_streams_pool = NULL;
|
||||||
xhci_dbg(xhci, "Freed medium stream array pool\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Freed medium stream array pool");
|
||||||
|
|
||||||
if (xhci->dcbaa)
|
if (xhci->dcbaa)
|
||||||
dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
|
dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
|
||||||
@ -2036,8 +1972,9 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
|
|||||||
* there might be more events to service.
|
* there might be more events to service.
|
||||||
*/
|
*/
|
||||||
temp &= ~ERST_EHB;
|
temp &= ~ERST_EHB;
|
||||||
xhci_dbg(xhci, "// Write event ring dequeue pointer, "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
"preserving EHB bit\n");
|
"// Write event ring dequeue pointer, "
|
||||||
|
"preserving EHB bit");
|
||||||
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
|
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
|
||||||
&xhci->ir_set->erst_dequeue);
|
&xhci->ir_set->erst_dequeue);
|
||||||
}
|
}
|
||||||
@ -2060,8 +1997,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
|
|||||||
temp = xhci_readl(xhci, addr + 2);
|
temp = xhci_readl(xhci, addr + 2);
|
||||||
port_offset = XHCI_EXT_PORT_OFF(temp);
|
port_offset = XHCI_EXT_PORT_OFF(temp);
|
||||||
port_count = XHCI_EXT_PORT_COUNT(temp);
|
port_count = XHCI_EXT_PORT_COUNT(temp);
|
||||||
xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
"count = %u, revision = 0x%x\n",
|
"Ext Cap %p, port offset = %u, "
|
||||||
|
"count = %u, revision = 0x%x",
|
||||||
addr, port_offset, port_count, major_revision);
|
addr, port_offset, port_count, major_revision);
|
||||||
/* Port count includes the current port offset */
|
/* Port count includes the current port offset */
|
||||||
if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
|
if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
|
||||||
@ -2075,15 +2013,18 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
|
|||||||
/* Check the host's USB2 LPM capability */
|
/* Check the host's USB2 LPM capability */
|
||||||
if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
|
if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
|
||||||
(temp & XHCI_L1C)) {
|
(temp & XHCI_L1C)) {
|
||||||
xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"xHCI 0.96: support USB2 software lpm");
|
||||||
xhci->sw_lpm_support = 1;
|
xhci->sw_lpm_support = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
|
if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
|
||||||
xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"xHCI 1.0: support USB2 software lpm");
|
||||||
xhci->sw_lpm_support = 1;
|
xhci->sw_lpm_support = 1;
|
||||||
if (temp & XHCI_HLC) {
|
if (temp & XHCI_HLC) {
|
||||||
xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"xHCI 1.0: support USB2 hardware lpm");
|
||||||
xhci->hw_lpm_support = 1;
|
xhci->hw_lpm_support = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2207,18 +2148,21 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
xhci_warn(xhci, "No ports on the roothubs?\n");
|
xhci_warn(xhci, "No ports on the roothubs?\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Found %u USB 2.0 ports and %u USB 3.0 ports.",
|
||||||
xhci->num_usb2_ports, xhci->num_usb3_ports);
|
xhci->num_usb2_ports, xhci->num_usb3_ports);
|
||||||
|
|
||||||
/* Place limits on the number of roothub ports so that the hub
|
/* Place limits on the number of roothub ports so that the hub
|
||||||
* descriptors aren't longer than the USB core will allocate.
|
* descriptors aren't longer than the USB core will allocate.
|
||||||
*/
|
*/
|
||||||
if (xhci->num_usb3_ports > 15) {
|
if (xhci->num_usb3_ports > 15) {
|
||||||
xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Limiting USB 3.0 roothub ports to 15.");
|
||||||
xhci->num_usb3_ports = 15;
|
xhci->num_usb3_ports = 15;
|
||||||
}
|
}
|
||||||
if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
|
if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
|
||||||
xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Limiting USB 2.0 roothub ports to %u.",
|
||||||
USB_MAXCHILDREN);
|
USB_MAXCHILDREN);
|
||||||
xhci->num_usb2_ports = USB_MAXCHILDREN;
|
xhci->num_usb2_ports = USB_MAXCHILDREN;
|
||||||
}
|
}
|
||||||
@ -2243,8 +2187,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
xhci->usb2_ports[port_index] =
|
xhci->usb2_ports[port_index] =
|
||||||
&xhci->op_regs->port_status_base +
|
&xhci->op_regs->port_status_base +
|
||||||
NUM_PORT_REGS*i;
|
NUM_PORT_REGS*i;
|
||||||
xhci_dbg(xhci, "USB 2.0 port at index %u, "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
"addr = %p\n", i,
|
"USB 2.0 port at index %u, "
|
||||||
|
"addr = %p", i,
|
||||||
xhci->usb2_ports[port_index]);
|
xhci->usb2_ports[port_index]);
|
||||||
port_index++;
|
port_index++;
|
||||||
if (port_index == xhci->num_usb2_ports)
|
if (port_index == xhci->num_usb2_ports)
|
||||||
@ -2263,8 +2208,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
xhci->usb3_ports[port_index] =
|
xhci->usb3_ports[port_index] =
|
||||||
&xhci->op_regs->port_status_base +
|
&xhci->op_regs->port_status_base +
|
||||||
NUM_PORT_REGS*i;
|
NUM_PORT_REGS*i;
|
||||||
xhci_dbg(xhci, "USB 3.0 port at index %u, "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
"addr = %p\n", i,
|
"USB 3.0 port at index %u, "
|
||||||
|
"addr = %p", i,
|
||||||
xhci->usb3_ports[port_index]);
|
xhci->usb3_ports[port_index]);
|
||||||
port_index++;
|
port_index++;
|
||||||
if (port_index == xhci->num_usb3_ports)
|
if (port_index == xhci->num_usb3_ports)
|
||||||
@ -2288,32 +2234,35 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
INIT_LIST_HEAD(&xhci->cancel_cmd_list);
|
INIT_LIST_HEAD(&xhci->cancel_cmd_list);
|
||||||
|
|
||||||
page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
|
page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
|
||||||
xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Supported page size register = 0x%x", page_size);
|
||||||
for (i = 0; i < 16; i++) {
|
for (i = 0; i < 16; i++) {
|
||||||
if ((0x1 & page_size) != 0)
|
if ((0x1 & page_size) != 0)
|
||||||
break;
|
break;
|
||||||
page_size = page_size >> 1;
|
page_size = page_size >> 1;
|
||||||
}
|
}
|
||||||
if (i < 16)
|
if (i < 16)
|
||||||
xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Supported page size of %iK", (1 << (i+12)) / 1024);
|
||||||
else
|
else
|
||||||
xhci_warn(xhci, "WARN: no supported page size\n");
|
xhci_warn(xhci, "WARN: no supported page size\n");
|
||||||
/* Use 4K pages, since that's common and the minimum the HC supports */
|
/* Use 4K pages, since that's common and the minimum the HC supports */
|
||||||
xhci->page_shift = 12;
|
xhci->page_shift = 12;
|
||||||
xhci->page_size = 1 << xhci->page_shift;
|
xhci->page_size = 1 << xhci->page_shift;
|
||||||
xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"HCD page size set to %iK", xhci->page_size / 1024);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Program the Number of Device Slots Enabled field in the CONFIG
|
* Program the Number of Device Slots Enabled field in the CONFIG
|
||||||
* register with the max value of slots the HC can handle.
|
* register with the max value of slots the HC can handle.
|
||||||
*/
|
*/
|
||||||
val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
|
val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
|
||||||
xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
(unsigned int) val);
|
"// xHC can handle at most %d device slots.", val);
|
||||||
val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
|
val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
|
||||||
val |= (val2 & ~HCS_SLOTS_MASK);
|
val |= (val2 & ~HCS_SLOTS_MASK);
|
||||||
xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
(unsigned int) val);
|
"// Setting Max device slots reg = 0x%x.", val);
|
||||||
xhci_writel(xhci, val, &xhci->op_regs->config_reg);
|
xhci_writel(xhci, val, &xhci->op_regs->config_reg);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2326,7 +2275,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
goto fail;
|
goto fail;
|
||||||
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
|
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
|
||||||
xhci->dcbaa->dma = dma;
|
xhci->dcbaa->dma = dma;
|
||||||
xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"// Device context base array address = 0x%llx (DMA), %p (virt)",
|
||||||
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
|
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
|
||||||
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
|
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
|
||||||
|
|
||||||
@ -2365,8 +2315,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
|
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
|
||||||
if (!xhci->cmd_ring)
|
if (!xhci->cmd_ring)
|
||||||
goto fail;
|
goto fail;
|
||||||
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
|
"Allocated command ring at %p", xhci->cmd_ring);
|
||||||
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
|
||||||
(unsigned long long)xhci->cmd_ring->first_seg->dma);
|
(unsigned long long)xhci->cmd_ring->first_seg->dma);
|
||||||
|
|
||||||
/* Set the address in the Command Ring Control register */
|
/* Set the address in the Command Ring Control register */
|
||||||
@ -2374,7 +2325,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
|
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
|
||||||
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
|
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
|
||||||
xhci->cmd_ring->cycle_state;
|
xhci->cmd_ring->cycle_state;
|
||||||
xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"// Setting command ring address to 0x%x", val);
|
||||||
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
|
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
|
||||||
xhci_dbg_cmd_ptrs(xhci);
|
xhci_dbg_cmd_ptrs(xhci);
|
||||||
|
|
||||||
@ -2390,8 +2342,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
|
|
||||||
val = xhci_readl(xhci, &xhci->cap_regs->db_off);
|
val = xhci_readl(xhci, &xhci->cap_regs->db_off);
|
||||||
val &= DBOFF_MASK;
|
val &= DBOFF_MASK;
|
||||||
xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
" from cap regs base addr\n", val);
|
"// Doorbell array is located at offset 0x%x"
|
||||||
|
" from cap regs base addr", val);
|
||||||
xhci->dba = (void __iomem *) xhci->cap_regs + val;
|
xhci->dba = (void __iomem *) xhci->cap_regs + val;
|
||||||
xhci_dbg_regs(xhci);
|
xhci_dbg_regs(xhci);
|
||||||
xhci_print_run_regs(xhci);
|
xhci_print_run_regs(xhci);
|
||||||
@ -2402,7 +2355,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
* Event ring setup: Allocate a normal ring, but also setup
|
* Event ring setup: Allocate a normal ring, but also setup
|
||||||
* the event ring segment table (ERST). Section 4.9.3.
|
* the event ring segment table (ERST). Section 4.9.3.
|
||||||
*/
|
*/
|
||||||
xhci_dbg(xhci, "// Allocating event ring\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
|
||||||
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
|
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
|
||||||
flags);
|
flags);
|
||||||
if (!xhci->event_ring)
|
if (!xhci->event_ring)
|
||||||
@ -2415,13 +2368,15 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!xhci->erst.entries)
|
if (!xhci->erst.entries)
|
||||||
goto fail;
|
goto fail;
|
||||||
xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"// Allocated event ring segment table at 0x%llx",
|
||||||
(unsigned long long)dma);
|
(unsigned long long)dma);
|
||||||
|
|
||||||
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
|
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
|
||||||
xhci->erst.num_entries = ERST_NUM_SEGS;
|
xhci->erst.num_entries = ERST_NUM_SEGS;
|
||||||
xhci->erst.erst_dma_addr = dma;
|
xhci->erst.erst_dma_addr = dma;
|
||||||
xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
|
||||||
xhci->erst.num_entries,
|
xhci->erst.num_entries,
|
||||||
xhci->erst.entries,
|
xhci->erst.entries,
|
||||||
(unsigned long long)xhci->erst.erst_dma_addr);
|
(unsigned long long)xhci->erst.erst_dma_addr);
|
||||||
@ -2439,13 +2394,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
val = xhci_readl(xhci, &xhci->ir_set->erst_size);
|
val = xhci_readl(xhci, &xhci->ir_set->erst_size);
|
||||||
val &= ERST_SIZE_MASK;
|
val &= ERST_SIZE_MASK;
|
||||||
val |= ERST_NUM_SEGS;
|
val |= ERST_NUM_SEGS;
|
||||||
xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"// Write ERST size = %i to ir_set 0 (some bits preserved)",
|
||||||
val);
|
val);
|
||||||
xhci_writel(xhci, val, &xhci->ir_set->erst_size);
|
xhci_writel(xhci, val, &xhci->ir_set->erst_size);
|
||||||
|
|
||||||
xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"// Set ERST entries to point to event ring.");
|
||||||
/* set the segment table base address */
|
/* set the segment table base address */
|
||||||
xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"// Set ERST base address for ir_set 0 = 0x%llx",
|
||||||
(unsigned long long)xhci->erst.erst_dma_addr);
|
(unsigned long long)xhci->erst.erst_dma_addr);
|
||||||
val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
|
val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
|
||||||
val_64 &= ERST_PTR_MASK;
|
val_64 &= ERST_PTR_MASK;
|
||||||
@ -2454,7 +2412,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
|
|
||||||
/* Set the event ring dequeue address */
|
/* Set the event ring dequeue address */
|
||||||
xhci_set_hc_event_deq(xhci);
|
xhci_set_hc_event_deq(xhci);
|
||||||
xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Wrote ERST address to ir_set 0.");
|
||||||
xhci_print_ir_set(xhci, 0);
|
xhci_print_ir_set(xhci, 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
#include "xhci.h"
|
#include "xhci.h"
|
||||||
|
#include "xhci-trace.h"
|
||||||
|
|
||||||
/* Device for a quirk */
|
/* Device for a quirk */
|
||||||
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
|
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
|
||||||
@ -64,16 +65,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
|
|||||||
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
|
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
|
||||||
pdev->revision == 0x0) {
|
pdev->revision == 0x0) {
|
||||||
xhci->quirks |= XHCI_RESET_EP_QUIRK;
|
xhci->quirks |= XHCI_RESET_EP_QUIRK;
|
||||||
xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
" endpoint cmd after reset endpoint\n");
|
"QUIRK: Fresco Logic xHC needs configure"
|
||||||
|
" endpoint cmd after reset endpoint");
|
||||||
}
|
}
|
||||||
/* Fresco Logic confirms: all revisions of this chip do not
|
/* Fresco Logic confirms: all revisions of this chip do not
|
||||||
* support MSI, even though some of them claim to in their PCI
|
* support MSI, even though some of them claim to in their PCI
|
||||||
* capabilities.
|
* capabilities.
|
||||||
*/
|
*/
|
||||||
xhci->quirks |= XHCI_BROKEN_MSI;
|
xhci->quirks |= XHCI_BROKEN_MSI;
|
||||||
xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
"has broken MSI implementation\n",
|
"QUIRK: Fresco Logic revision %u "
|
||||||
|
"has broken MSI implementation",
|
||||||
pdev->revision);
|
pdev->revision);
|
||||||
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
|
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
|
||||||
}
|
}
|
||||||
@ -110,7 +113,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
|
|||||||
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
|
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
|
||||||
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
|
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
|
||||||
xhci->quirks |= XHCI_RESET_ON_RESUME;
|
xhci->quirks |= XHCI_RESET_ON_RESUME;
|
||||||
xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"QUIRK: Resetting on resume");
|
||||||
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
|
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
|
||||||
}
|
}
|
||||||
if (pdev->vendor == PCI_VENDOR_ID_VIA)
|
if (pdev->vendor == PCI_VENDOR_ID_VIA)
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/of.h>
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
|
|
||||||
#include "xhci.h"
|
#include "xhci.h"
|
||||||
|
|
||||||
@ -104,6 +106,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
|
|||||||
if (!res)
|
if (!res)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* Initialize dma_mask and coherent_dma_mask to 32-bits */
|
||||||
|
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
if (!pdev->dev.dma_mask)
|
||||||
|
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
|
||||||
|
else
|
||||||
|
dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
|
|
||||||
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
|
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
|
||||||
if (!hcd)
|
if (!hcd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -211,12 +222,21 @@ static const struct dev_pm_ops xhci_plat_pm_ops = {
|
|||||||
#define DEV_PM_OPS NULL
|
#define DEV_PM_OPS NULL
|
||||||
#endif /* CONFIG_PM */
|
#endif /* CONFIG_PM */
|
||||||
|
|
||||||
|
#ifdef CONFIG_OF
|
||||||
|
static const struct of_device_id usb_xhci_of_match[] = {
|
||||||
|
{ .compatible = "xhci-platform" },
|
||||||
|
{ },
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
|
||||||
|
#endif
|
||||||
|
|
||||||
static struct platform_driver usb_xhci_driver = {
|
static struct platform_driver usb_xhci_driver = {
|
||||||
.probe = xhci_plat_probe,
|
.probe = xhci_plat_probe,
|
||||||
.remove = xhci_plat_remove,
|
.remove = xhci_plat_remove,
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "xhci-hcd",
|
.name = "xhci-hcd",
|
||||||
.pm = DEV_PM_OPS,
|
.pm = DEV_PM_OPS,
|
||||||
|
.of_match_table = of_match_ptr(usb_xhci_of_match),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
MODULE_ALIAS("platform:xhci-hcd");
|
MODULE_ALIAS("platform:xhci-hcd");
|
||||||
|
@ -67,6 +67,7 @@
|
|||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include "xhci.h"
|
#include "xhci.h"
|
||||||
|
#include "xhci-trace.h"
|
||||||
|
|
||||||
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
|
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
|
||||||
struct xhci_virt_device *virt_dev,
|
struct xhci_virt_device *virt_dev,
|
||||||
@ -555,7 +556,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
state->new_cycle_state = 0;
|
state->new_cycle_state = 0;
|
||||||
xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"Finding segment containing stopped TRB.");
|
||||||
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
|
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
|
||||||
dev->eps[ep_index].stopped_trb,
|
dev->eps[ep_index].stopped_trb,
|
||||||
&state->new_cycle_state);
|
&state->new_cycle_state);
|
||||||
@ -565,12 +567,14 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
|
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
|
||||||
xhci_dbg(xhci, "Finding endpoint context\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"Finding endpoint context");
|
||||||
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
|
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
|
||||||
state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
|
state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
|
||||||
|
|
||||||
state->new_deq_ptr = cur_td->last_trb;
|
state->new_deq_ptr = cur_td->last_trb;
|
||||||
xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"Finding segment containing last TRB in TD.");
|
||||||
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
|
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
|
||||||
state->new_deq_ptr,
|
state->new_deq_ptr,
|
||||||
&state->new_cycle_state);
|
&state->new_cycle_state);
|
||||||
@ -597,13 +601,16 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
|
|||||||
if (ep_ring->first_seg == ep_ring->first_seg->next &&
|
if (ep_ring->first_seg == ep_ring->first_seg->next &&
|
||||||
state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
|
state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
|
||||||
state->new_cycle_state ^= 0x1;
|
state->new_cycle_state ^= 0x1;
|
||||||
xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"Cycle state = 0x%x", state->new_cycle_state);
|
||||||
|
|
||||||
/* Don't update the ring cycle state for the producer (us). */
|
/* Don't update the ring cycle state for the producer (us). */
|
||||||
xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"New dequeue segment = %p (virtual)",
|
||||||
state->new_deq_seg);
|
state->new_deq_seg);
|
||||||
addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
|
addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
|
||||||
xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"New dequeue pointer = 0x%llx (DMA)",
|
||||||
(unsigned long long) addr);
|
(unsigned long long) addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -631,9 +638,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
|
|||||||
if (flip_cycle)
|
if (flip_cycle)
|
||||||
cur_trb->generic.field[3] ^=
|
cur_trb->generic.field[3] ^=
|
||||||
cpu_to_le32(TRB_CYCLE);
|
cpu_to_le32(TRB_CYCLE);
|
||||||
xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
xhci_dbg(xhci, "Address = %p (0x%llx dma); "
|
"Cancel (unchain) link TRB");
|
||||||
"in seg %p (0x%llx dma)\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"Address = %p (0x%llx dma); "
|
||||||
|
"in seg %p (0x%llx dma)",
|
||||||
cur_trb,
|
cur_trb,
|
||||||
(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
|
(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
|
||||||
cur_seg,
|
cur_seg,
|
||||||
@ -651,7 +660,8 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
|
|||||||
cpu_to_le32(TRB_CYCLE);
|
cpu_to_le32(TRB_CYCLE);
|
||||||
cur_trb->generic.field[3] |= cpu_to_le32(
|
cur_trb->generic.field[3] |= cpu_to_le32(
|
||||||
TRB_TYPE(TRB_TR_NOOP));
|
TRB_TYPE(TRB_TR_NOOP));
|
||||||
xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"TRB to noop at offset 0x%llx",
|
||||||
(unsigned long long)
|
(unsigned long long)
|
||||||
xhci_trb_virt_to_dma(cur_seg, cur_trb));
|
xhci_trb_virt_to_dma(cur_seg, cur_trb));
|
||||||
}
|
}
|
||||||
@ -672,8 +682,9 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
|
|||||||
{
|
{
|
||||||
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
|
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
|
||||||
|
|
||||||
xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
|
"Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
|
||||||
|
"new deq ptr = %p (0x%llx dma), new cycle = %u",
|
||||||
deq_state->new_deq_seg,
|
deq_state->new_deq_seg,
|
||||||
(unsigned long long)deq_state->new_deq_seg->dma,
|
(unsigned long long)deq_state->new_deq_seg->dma,
|
||||||
deq_state->new_deq_ptr,
|
deq_state->new_deq_ptr,
|
||||||
@ -793,7 +804,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
|
|||||||
*/
|
*/
|
||||||
list_for_each(entry, &ep->cancelled_td_list) {
|
list_for_each(entry, &ep->cancelled_td_list) {
|
||||||
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
|
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
|
||||||
xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"Removing canceled TD starting at 0x%llx (dma).",
|
||||||
(unsigned long long)xhci_trb_virt_to_dma(
|
(unsigned long long)xhci_trb_virt_to_dma(
|
||||||
cur_td->start_seg, cur_td->first_trb));
|
cur_td->start_seg, cur_td->first_trb));
|
||||||
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
|
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
|
||||||
@ -913,14 +925,16 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
|
|||||||
|
|
||||||
ep->stop_cmds_pending--;
|
ep->stop_cmds_pending--;
|
||||||
if (xhci->xhc_state & XHCI_STATE_DYING) {
|
if (xhci->xhc_state & XHCI_STATE_DYING) {
|
||||||
xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
"xHCI as DYING, exiting.\n");
|
"Stop EP timer ran, but another timer marked "
|
||||||
|
"xHCI as DYING, exiting.");
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
|
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
|
||||||
xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
"exiting.\n");
|
"Stop EP timer ran, but no command pending, "
|
||||||
|
"exiting.");
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -962,8 +976,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
|
|||||||
ring = temp_ep->ring;
|
ring = temp_ep->ring;
|
||||||
if (!ring)
|
if (!ring)
|
||||||
continue;
|
continue;
|
||||||
xhci_dbg(xhci, "Killing URBs for slot ID %u, "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
"ep index %u\n", i, j);
|
"Killing URBs for slot ID %u, "
|
||||||
|
"ep index %u", i, j);
|
||||||
while (!list_empty(&ring->td_list)) {
|
while (!list_empty(&ring->td_list)) {
|
||||||
cur_td = list_first_entry(&ring->td_list,
|
cur_td = list_first_entry(&ring->td_list,
|
||||||
struct xhci_td,
|
struct xhci_td,
|
||||||
@ -986,9 +1001,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
xhci_dbg(xhci, "Calling usb_hc_died()\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"Calling usb_hc_died()");
|
||||||
usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
|
usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
|
||||||
xhci_dbg(xhci, "xHCI host controller is dead.\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"xHCI host controller is dead.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1092,7 +1109,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
|
|||||||
ep_state &= EP_STATE_MASK;
|
ep_state &= EP_STATE_MASK;
|
||||||
slot_state = le32_to_cpu(slot_ctx->dev_state);
|
slot_state = le32_to_cpu(slot_ctx->dev_state);
|
||||||
slot_state = GET_SLOT_STATE(slot_state);
|
slot_state = GET_SLOT_STATE(slot_state);
|
||||||
xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"Slot state = %u, EP state = %u",
|
||||||
slot_state, ep_state);
|
slot_state, ep_state);
|
||||||
break;
|
break;
|
||||||
case COMP_EBADSLT:
|
case COMP_EBADSLT:
|
||||||
@ -1112,7 +1130,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
|
|||||||
* cancelling URBs, which might not be an error...
|
* cancelling URBs, which might not be an error...
|
||||||
*/
|
*/
|
||||||
} else {
|
} else {
|
||||||
xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"Successful Set TR Deq Ptr cmd, deq = @%08llx",
|
||||||
le64_to_cpu(ep_ctx->deq));
|
le64_to_cpu(ep_ctx->deq));
|
||||||
if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
|
if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
|
||||||
dev->eps[ep_index].queued_deq_ptr) ==
|
dev->eps[ep_index].queued_deq_ptr) ==
|
||||||
@ -1150,7 +1169,8 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
|
|||||||
/* This command will only fail if the endpoint wasn't halted,
|
/* This command will only fail if the endpoint wasn't halted,
|
||||||
* but we don't care.
|
* but we don't care.
|
||||||
*/
|
*/
|
||||||
xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
|
||||||
|
"Ignoring reset ep completion code of %u",
|
||||||
GET_COMP_CODE(le32_to_cpu(event->status)));
|
GET_COMP_CODE(le32_to_cpu(event->status)));
|
||||||
|
|
||||||
/* HW with the reset endpoint quirk needs to have a configure endpoint
|
/* HW with the reset endpoint quirk needs to have a configure endpoint
|
||||||
@ -1158,7 +1178,8 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
|
|||||||
* because the HW can't handle two commands being queued in a row.
|
* because the HW can't handle two commands being queued in a row.
|
||||||
*/
|
*/
|
||||||
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
|
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
|
||||||
xhci_dbg(xhci, "Queueing configure endpoint command\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Queueing configure endpoint command");
|
||||||
xhci_queue_configure_endpoint(xhci,
|
xhci_queue_configure_endpoint(xhci,
|
||||||
xhci->devs[slot_id]->in_ctx->dma, slot_id,
|
xhci->devs[slot_id]->in_ctx->dma, slot_id,
|
||||||
false);
|
false);
|
||||||
@ -1377,6 +1398,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
trace_xhci_cmd_completion(&xhci->cmd_ring->dequeue->generic,
|
||||||
|
(struct xhci_generic_trb *) event);
|
||||||
|
|
||||||
if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
|
if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
|
||||||
(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
|
(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
|
||||||
/* If the return value is 0, we think the trb pointed by
|
/* If the return value is 0, we think the trb pointed by
|
||||||
@ -1444,8 +1468,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|||||||
ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
|
ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
|
||||||
if (!(ep_state & EP_HALTED))
|
if (!(ep_state & EP_HALTED))
|
||||||
goto bandwidth_change;
|
goto bandwidth_change;
|
||||||
xhci_dbg(xhci, "Completed config ep cmd - "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
"last ep index = %d, state = %d\n",
|
"Completed config ep cmd - "
|
||||||
|
"last ep index = %d, state = %d",
|
||||||
ep_index, ep_state);
|
ep_index, ep_state);
|
||||||
/* Clear internal halted state and restart ring(s) */
|
/* Clear internal halted state and restart ring(s) */
|
||||||
xhci->devs[slot_id]->eps[ep_index].ep_state &=
|
xhci->devs[slot_id]->eps[ep_index].ep_state &=
|
||||||
@ -1454,7 +1479,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
bandwidth_change:
|
bandwidth_change:
|
||||||
xhci_dbg(xhci, "Completed config ep cmd\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
|
||||||
|
"Completed config ep cmd");
|
||||||
xhci->devs[slot_id]->cmd_status =
|
xhci->devs[slot_id]->cmd_status =
|
||||||
GET_COMP_CODE(le32_to_cpu(event->status));
|
GET_COMP_CODE(le32_to_cpu(event->status));
|
||||||
complete(&xhci->devs[slot_id]->cmd_completion);
|
complete(&xhci->devs[slot_id]->cmd_completion);
|
||||||
@ -1497,7 +1523,8 @@ bandwidth_change:
|
|||||||
xhci->error_bitmask |= 1 << 6;
|
xhci->error_bitmask |= 1 << 6;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"NEC firmware version %2x.%02x",
|
||||||
NEC_FW_MAJOR(le32_to_cpu(event->status)),
|
NEC_FW_MAJOR(le32_to_cpu(event->status)),
|
||||||
NEC_FW_MINOR(le32_to_cpu(event->status)));
|
NEC_FW_MINOR(le32_to_cpu(event->status)));
|
||||||
break;
|
break;
|
||||||
@ -2877,8 +2904,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
xhci_dbg(xhci, "ERROR no room on ep ring, "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
|
||||||
"try ring expansion\n");
|
"ERROR no room on ep ring, try ring expansion");
|
||||||
num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
|
num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
|
||||||
if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
|
if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
|
||||||
mem_flags)) {
|
mem_flags)) {
|
||||||
|
15
drivers/usb/host/xhci-trace.c
Normal file
15
drivers/usb/host/xhci-trace.c
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
/*
|
||||||
|
* xHCI host controller driver
|
||||||
|
*
|
||||||
|
* Copyright (C) 2013 Xenia Ragiadakou
|
||||||
|
*
|
||||||
|
* Author: Xenia Ragiadakou
|
||||||
|
* Email : burzalodowa@gmail.com
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define CREATE_TRACE_POINTS
|
||||||
|
#include "xhci-trace.h"
|
151
drivers/usb/host/xhci-trace.h
Normal file
151
drivers/usb/host/xhci-trace.h
Normal file
@ -0,0 +1,151 @@
|
|||||||
|
/*
|
||||||
|
* xHCI host controller driver
|
||||||
|
*
|
||||||
|
* Copyright (C) 2013 Xenia Ragiadakou
|
||||||
|
*
|
||||||
|
* Author: Xenia Ragiadakou
|
||||||
|
* Email : burzalodowa@gmail.com
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#undef TRACE_SYSTEM
|
||||||
|
#define TRACE_SYSTEM xhci-hcd
|
||||||
|
|
||||||
|
#if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||||
|
#define __XHCI_TRACE_H
|
||||||
|
|
||||||
|
#include <linux/tracepoint.h>
|
||||||
|
#include "xhci.h"
|
||||||
|
|
||||||
|
#define XHCI_MSG_MAX 500
|
||||||
|
|
||||||
|
DECLARE_EVENT_CLASS(xhci_log_msg,
|
||||||
|
TP_PROTO(struct va_format *vaf),
|
||||||
|
TP_ARGS(vaf),
|
||||||
|
TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
|
||||||
|
TP_fast_assign(
|
||||||
|
vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
|
||||||
|
),
|
||||||
|
TP_printk("%s", __get_str(msg))
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(xhci_log_msg, xhci_dbg_address,
|
||||||
|
TP_PROTO(struct va_format *vaf),
|
||||||
|
TP_ARGS(vaf)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change,
|
||||||
|
TP_PROTO(struct va_format *vaf),
|
||||||
|
TP_ARGS(vaf)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks,
|
||||||
|
TP_PROTO(struct va_format *vaf),
|
||||||
|
TP_ARGS(vaf)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep,
|
||||||
|
TP_PROTO(struct va_format *vaf),
|
||||||
|
TP_ARGS(vaf)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
|
||||||
|
TP_PROTO(struct va_format *vaf),
|
||||||
|
TP_ARGS(vaf)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(xhci_log_msg, xhci_dbg_init,
|
||||||
|
TP_PROTO(struct va_format *vaf),
|
||||||
|
TP_ARGS(vaf)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion,
|
||||||
|
TP_PROTO(struct va_format *vaf),
|
||||||
|
TP_ARGS(vaf)
|
||||||
|
);
|
||||||
|
|
||||||
|
DECLARE_EVENT_CLASS(xhci_log_ctx,
|
||||||
|
TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
|
||||||
|
unsigned int ep_num),
|
||||||
|
TP_ARGS(xhci, ctx, ep_num),
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field(int, ctx_64)
|
||||||
|
__field(unsigned, ctx_type)
|
||||||
|
__field(dma_addr_t, ctx_dma)
|
||||||
|
__field(u8 *, ctx_va)
|
||||||
|
__field(unsigned, ctx_ep_num)
|
||||||
|
__field(int, slot_id)
|
||||||
|
__dynamic_array(u32, ctx_data,
|
||||||
|
((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
|
||||||
|
((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
|
||||||
|
),
|
||||||
|
TP_fast_assign(
|
||||||
|
struct usb_device *udev;
|
||||||
|
|
||||||
|
udev = to_usb_device(xhci_to_hcd(xhci)->self.controller);
|
||||||
|
__entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
|
||||||
|
__entry->ctx_type = ctx->type;
|
||||||
|
__entry->ctx_dma = ctx->dma;
|
||||||
|
__entry->ctx_va = ctx->bytes;
|
||||||
|
__entry->slot_id = udev->slot_id;
|
||||||
|
__entry->ctx_ep_num = ep_num;
|
||||||
|
memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
|
||||||
|
((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
|
||||||
|
((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
|
||||||
|
),
|
||||||
|
TP_printk("\nctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
|
||||||
|
__entry->ctx_64, __entry->ctx_type,
|
||||||
|
(unsigned long long) __entry->ctx_dma, __entry->ctx_va
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
|
||||||
|
TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
|
||||||
|
unsigned int ep_num),
|
||||||
|
TP_ARGS(xhci, ctx, ep_num)
|
||||||
|
);
|
||||||
|
|
||||||
|
DECLARE_EVENT_CLASS(xhci_log_event,
|
||||||
|
TP_PROTO(void *trb_va, struct xhci_generic_trb *ev),
|
||||||
|
TP_ARGS(trb_va, ev),
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field(void *, va)
|
||||||
|
__field(u64, dma)
|
||||||
|
__field(u32, status)
|
||||||
|
__field(u32, flags)
|
||||||
|
__dynamic_array(__le32, trb, 4)
|
||||||
|
),
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->va = trb_va;
|
||||||
|
__entry->dma = le64_to_cpu(((u64)ev->field[1]) << 32 |
|
||||||
|
ev->field[0]);
|
||||||
|
__entry->status = le32_to_cpu(ev->field[2]);
|
||||||
|
__entry->flags = le32_to_cpu(ev->field[3]);
|
||||||
|
memcpy(__get_dynamic_array(trb), trb_va,
|
||||||
|
sizeof(struct xhci_generic_trb));
|
||||||
|
),
|
||||||
|
TP_printk("\ntrb_dma=@%llx, trb_va=@%p, status=%08x, flags=%08x",
|
||||||
|
(unsigned long long) __entry->dma, __entry->va,
|
||||||
|
__entry->status, __entry->flags
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(xhci_log_event, xhci_cmd_completion,
|
||||||
|
TP_PROTO(void *trb_va, struct xhci_generic_trb *ev),
|
||||||
|
TP_ARGS(trb_va, ev)
|
||||||
|
);
|
||||||
|
|
||||||
|
#endif /* __XHCI_TRACE_H */
|
||||||
|
|
||||||
|
/* this part must be outside header guard */
|
||||||
|
|
||||||
|
#undef TRACE_INCLUDE_PATH
|
||||||
|
#define TRACE_INCLUDE_PATH .
|
||||||
|
|
||||||
|
#undef TRACE_INCLUDE_FILE
|
||||||
|
#define TRACE_INCLUDE_FILE xhci-trace
|
||||||
|
|
||||||
|
#include <trace/define_trace.h>
|
@ -29,6 +29,7 @@
|
|||||||
#include <linux/dmi.h>
|
#include <linux/dmi.h>
|
||||||
|
|
||||||
#include "xhci.h"
|
#include "xhci.h"
|
||||||
|
#include "xhci-trace.h"
|
||||||
|
|
||||||
#define DRIVER_AUTHOR "Sarah Sharp"
|
#define DRIVER_AUTHOR "Sarah Sharp"
|
||||||
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
|
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
|
||||||
@ -100,7 +101,7 @@ void xhci_quiesce(struct xhci_hcd *xhci)
|
|||||||
int xhci_halt(struct xhci_hcd *xhci)
|
int xhci_halt(struct xhci_hcd *xhci)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
xhci_dbg(xhci, "// Halt the HC\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
|
||||||
xhci_quiesce(xhci);
|
xhci_quiesce(xhci);
|
||||||
|
|
||||||
ret = xhci_handshake(xhci, &xhci->op_regs->status,
|
ret = xhci_handshake(xhci, &xhci->op_regs->status,
|
||||||
@ -124,7 +125,7 @@ static int xhci_start(struct xhci_hcd *xhci)
|
|||||||
|
|
||||||
temp = xhci_readl(xhci, &xhci->op_regs->command);
|
temp = xhci_readl(xhci, &xhci->op_regs->command);
|
||||||
temp |= (CMD_RUN);
|
temp |= (CMD_RUN);
|
||||||
xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
|
||||||
temp);
|
temp);
|
||||||
xhci_writel(xhci, temp, &xhci->op_regs->command);
|
xhci_writel(xhci, temp, &xhci->op_regs->command);
|
||||||
|
|
||||||
@ -162,7 +163,7 @@ int xhci_reset(struct xhci_hcd *xhci)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
xhci_dbg(xhci, "// Reset the HC\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
|
||||||
command = xhci_readl(xhci, &xhci->op_regs->command);
|
command = xhci_readl(xhci, &xhci->op_regs->command);
|
||||||
command |= CMD_RESET;
|
command |= CMD_RESET;
|
||||||
xhci_writel(xhci, command, &xhci->op_regs->command);
|
xhci_writel(xhci, command, &xhci->op_regs->command);
|
||||||
@ -172,7 +173,8 @@ int xhci_reset(struct xhci_hcd *xhci)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Wait for controller to be ready for doorbell rings");
|
||||||
/*
|
/*
|
||||||
* xHCI cannot write to any doorbells or operational registers other
|
* xHCI cannot write to any doorbells or operational registers other
|
||||||
* than status until the "Controller Not Ready" flag is cleared.
|
* than status until the "Controller Not Ready" flag is cleared.
|
||||||
@ -214,14 +216,16 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
|
|||||||
|
|
||||||
ret = pci_enable_msi(pdev);
|
ret = pci_enable_msi(pdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
xhci_dbg(xhci, "failed to allocate MSI entry\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"failed to allocate MSI entry");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = request_irq(pdev->irq, xhci_msi_irq,
|
ret = request_irq(pdev->irq, xhci_msi_irq,
|
||||||
0, "xhci_hcd", xhci_to_hcd(xhci));
|
0, "xhci_hcd", xhci_to_hcd(xhci));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
xhci_dbg(xhci, "disable MSI interrupt\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"disable MSI interrupt");
|
||||||
pci_disable_msi(pdev);
|
pci_disable_msi(pdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -284,7 +288,8 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
|
|||||||
|
|
||||||
ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
|
ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
xhci_dbg(xhci, "Failed to enable MSI-X\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Failed to enable MSI-X");
|
||||||
goto free_entries;
|
goto free_entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,7 +305,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
disable_msix:
|
disable_msix:
|
||||||
xhci_dbg(xhci, "disable MSI-X interrupt\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
|
||||||
xhci_free_irq(xhci);
|
xhci_free_irq(xhci);
|
||||||
pci_disable_msix(pdev);
|
pci_disable_msix(pdev);
|
||||||
free_entries:
|
free_entries:
|
||||||
@ -417,9 +422,11 @@ static void compliance_mode_recovery(unsigned long arg)
|
|||||||
* Compliance Mode Detected. Letting USB Core
|
* Compliance Mode Detected. Letting USB Core
|
||||||
* handle the Warm Reset
|
* handle the Warm Reset
|
||||||
*/
|
*/
|
||||||
xhci_dbg(xhci, "Compliance mode detected->port %d\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Compliance mode detected->port %d",
|
||||||
i + 1);
|
i + 1);
|
||||||
xhci_dbg(xhci, "Attempting compliance mode recovery\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Attempting compliance mode recovery");
|
||||||
hcd = xhci->shared_hcd;
|
hcd = xhci->shared_hcd;
|
||||||
|
|
||||||
if (hcd->state == HC_STATE_SUSPENDED)
|
if (hcd->state == HC_STATE_SUSPENDED)
|
||||||
@ -457,7 +464,8 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
|
|||||||
set_timer_slack(&xhci->comp_mode_recovery_timer,
|
set_timer_slack(&xhci->comp_mode_recovery_timer,
|
||||||
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
|
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
|
||||||
add_timer(&xhci->comp_mode_recovery_timer);
|
add_timer(&xhci->comp_mode_recovery_timer);
|
||||||
xhci_dbg(xhci, "Compliance mode recovery timer initialized\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Compliance mode recovery timer initialized");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -505,16 +513,18 @@ int xhci_init(struct usb_hcd *hcd)
|
|||||||
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
|
|
||||||
xhci_dbg(xhci, "xhci_init\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
|
||||||
spin_lock_init(&xhci->lock);
|
spin_lock_init(&xhci->lock);
|
||||||
if (xhci->hci_version == 0x95 && link_quirk) {
|
if (xhci->hci_version == 0x95 && link_quirk) {
|
||||||
xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"QUIRK: Not clearing Link TRB chain bits.");
|
||||||
xhci->quirks |= XHCI_LINK_TRB_QUIRK;
|
xhci->quirks |= XHCI_LINK_TRB_QUIRK;
|
||||||
} else {
|
} else {
|
||||||
xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"xHCI doesn't need link TRB QUIRK");
|
||||||
}
|
}
|
||||||
retval = xhci_mem_init(xhci, GFP_KERNEL);
|
retval = xhci_mem_init(xhci, GFP_KERNEL);
|
||||||
xhci_dbg(xhci, "Finished xhci_init\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
|
||||||
|
|
||||||
/* Initializing Compliance Mode Recovery Data If Needed */
|
/* Initializing Compliance Mode Recovery Data If Needed */
|
||||||
if (xhci_compliance_mode_recovery_timer_quirk_check()) {
|
if (xhci_compliance_mode_recovery_timer_quirk_check()) {
|
||||||
@ -528,57 +538,6 @@ int xhci_init(struct usb_hcd *hcd)
|
|||||||
/*-------------------------------------------------------------------------*/
|
/*-------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
|
|
||||||
static void xhci_event_ring_work(unsigned long arg)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int temp;
|
|
||||||
u64 temp_64;
|
|
||||||
struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
|
|
||||||
int i, j;
|
|
||||||
|
|
||||||
xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&xhci->lock, flags);
|
|
||||||
temp = xhci_readl(xhci, &xhci->op_regs->status);
|
|
||||||
xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
|
|
||||||
if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
|
|
||||||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
|
|
||||||
xhci_dbg(xhci, "HW died, polling stopped.\n");
|
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
|
||||||
xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
|
|
||||||
xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
|
|
||||||
xhci->error_bitmask = 0;
|
|
||||||
xhci_dbg(xhci, "Event ring:\n");
|
|
||||||
xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
|
|
||||||
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
|
|
||||||
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
|
|
||||||
temp_64 &= ~ERST_PTR_MASK;
|
|
||||||
xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
|
|
||||||
xhci_dbg(xhci, "Command ring:\n");
|
|
||||||
xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
|
|
||||||
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
|
|
||||||
xhci_dbg_cmd_ptrs(xhci);
|
|
||||||
for (i = 0; i < MAX_HC_SLOTS; ++i) {
|
|
||||||
if (!xhci->devs[i])
|
|
||||||
continue;
|
|
||||||
for (j = 0; j < 31; ++j) {
|
|
||||||
xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
|
||||||
|
|
||||||
if (!xhci->zombie)
|
|
||||||
mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
|
|
||||||
else
|
|
||||||
xhci_dbg(xhci, "Quit polling the event ring.\n");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int xhci_run_finished(struct xhci_hcd *xhci)
|
static int xhci_run_finished(struct xhci_hcd *xhci)
|
||||||
{
|
{
|
||||||
if (xhci_start(xhci)) {
|
if (xhci_start(xhci)) {
|
||||||
@ -591,7 +550,8 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
|
|||||||
if (xhci->quirks & XHCI_NEC_HOST)
|
if (xhci->quirks & XHCI_NEC_HOST)
|
||||||
xhci_ring_cmd_db(xhci);
|
xhci_ring_cmd_db(xhci);
|
||||||
|
|
||||||
xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Finished xhci_run for USB3 roothub");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -622,23 +582,12 @@ int xhci_run(struct usb_hcd *hcd)
|
|||||||
if (!usb_hcd_is_primary_hcd(hcd))
|
if (!usb_hcd_is_primary_hcd(hcd))
|
||||||
return xhci_run_finished(xhci);
|
return xhci_run_finished(xhci);
|
||||||
|
|
||||||
xhci_dbg(xhci, "xhci_run\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
|
||||||
|
|
||||||
ret = xhci_try_enable_msi(hcd);
|
ret = xhci_try_enable_msi(hcd);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
|
|
||||||
init_timer(&xhci->event_ring_timer);
|
|
||||||
xhci->event_ring_timer.data = (unsigned long) xhci;
|
|
||||||
xhci->event_ring_timer.function = xhci_event_ring_work;
|
|
||||||
/* Poll the event ring */
|
|
||||||
xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
|
|
||||||
xhci->zombie = 0;
|
|
||||||
xhci_dbg(xhci, "Setting event ring polling timer\n");
|
|
||||||
add_timer(&xhci->event_ring_timer);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
xhci_dbg(xhci, "Command ring memory map follows:\n");
|
xhci_dbg(xhci, "Command ring memory map follows:\n");
|
||||||
xhci_debug_ring(xhci, xhci->cmd_ring);
|
xhci_debug_ring(xhci, xhci->cmd_ring);
|
||||||
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
|
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
|
||||||
@ -651,9 +600,11 @@ int xhci_run(struct usb_hcd *hcd)
|
|||||||
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
|
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
|
||||||
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
|
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
|
||||||
temp_64 &= ~ERST_PTR_MASK;
|
temp_64 &= ~ERST_PTR_MASK;
|
||||||
xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
|
||||||
|
|
||||||
xhci_dbg(xhci, "// Set the interrupt modulation register\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"// Set the interrupt modulation register");
|
||||||
temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
|
temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
|
||||||
temp &= ~ER_IRQ_INTERVAL_MASK;
|
temp &= ~ER_IRQ_INTERVAL_MASK;
|
||||||
temp |= (u32) 160;
|
temp |= (u32) 160;
|
||||||
@ -662,12 +613,13 @@ int xhci_run(struct usb_hcd *hcd)
|
|||||||
/* Set the HCD state before we enable the irqs */
|
/* Set the HCD state before we enable the irqs */
|
||||||
temp = xhci_readl(xhci, &xhci->op_regs->command);
|
temp = xhci_readl(xhci, &xhci->op_regs->command);
|
||||||
temp |= (CMD_EIE);
|
temp |= (CMD_EIE);
|
||||||
xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
temp);
|
"// Enable interrupts, cmd = 0x%x.", temp);
|
||||||
xhci_writel(xhci, temp, &xhci->op_regs->command);
|
xhci_writel(xhci, temp, &xhci->op_regs->command);
|
||||||
|
|
||||||
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
||||||
xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
|
||||||
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
|
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
|
||||||
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
|
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
|
||||||
&xhci->ir_set->irq_pending);
|
&xhci->ir_set->irq_pending);
|
||||||
@ -677,7 +629,8 @@ int xhci_run(struct usb_hcd *hcd)
|
|||||||
xhci_queue_vendor_command(xhci, 0, 0, 0,
|
xhci_queue_vendor_command(xhci, 0, 0, 0,
|
||||||
TRB_TYPE(TRB_NEC_GET_FW));
|
TRB_TYPE(TRB_NEC_GET_FW));
|
||||||
|
|
||||||
xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"Finished xhci_run for USB2 roothub");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -725,24 +678,20 @@ void xhci_stop(struct usb_hcd *hcd)
|
|||||||
|
|
||||||
xhci_cleanup_msix(xhci);
|
xhci_cleanup_msix(xhci);
|
||||||
|
|
||||||
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
|
|
||||||
/* Tell the event ring poll function not to reschedule */
|
|
||||||
xhci->zombie = 1;
|
|
||||||
del_timer_sync(&xhci->event_ring_timer);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Deleting Compliance Mode Recovery Timer */
|
/* Deleting Compliance Mode Recovery Timer */
|
||||||
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
|
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
|
||||||
(!(xhci_all_ports_seen_u0(xhci)))) {
|
(!(xhci_all_ports_seen_u0(xhci)))) {
|
||||||
del_timer_sync(&xhci->comp_mode_recovery_timer);
|
del_timer_sync(&xhci->comp_mode_recovery_timer);
|
||||||
xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"%s: compliance mode recovery timer deleted",
|
||||||
__func__);
|
__func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (xhci->quirks & XHCI_AMD_PLL_FIX)
|
if (xhci->quirks & XHCI_AMD_PLL_FIX)
|
||||||
usb_amd_dev_put();
|
usb_amd_dev_put();
|
||||||
|
|
||||||
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"// Disabling event ring interrupts");
|
||||||
temp = xhci_readl(xhci, &xhci->op_regs->status);
|
temp = xhci_readl(xhci, &xhci->op_regs->status);
|
||||||
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
|
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
|
||||||
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
||||||
@ -750,10 +699,11 @@ void xhci_stop(struct usb_hcd *hcd)
|
|||||||
&xhci->ir_set->irq_pending);
|
&xhci->ir_set->irq_pending);
|
||||||
xhci_print_ir_set(xhci, 0);
|
xhci_print_ir_set(xhci, 0);
|
||||||
|
|
||||||
xhci_dbg(xhci, "cleaning up memory\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
|
||||||
xhci_mem_cleanup(xhci);
|
xhci_mem_cleanup(xhci);
|
||||||
xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
xhci_readl(xhci, &xhci->op_regs->status));
|
"xhci_stop completed - status = %x",
|
||||||
|
xhci_readl(xhci, &xhci->op_regs->status));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -778,8 +728,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
|
|||||||
|
|
||||||
xhci_cleanup_msix(xhci);
|
xhci_cleanup_msix(xhci);
|
||||||
|
|
||||||
xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
xhci_readl(xhci, &xhci->op_regs->status));
|
"xhci_shutdown completed - status = %x",
|
||||||
|
xhci_readl(xhci, &xhci->op_regs->status));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
@ -820,7 +771,8 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
|
|||||||
xhci->cmd_ring->dequeue) &
|
xhci->cmd_ring->dequeue) &
|
||||||
(u64) ~CMD_RING_RSVD_BITS) |
|
(u64) ~CMD_RING_RSVD_BITS) |
|
||||||
xhci->cmd_ring->cycle_state;
|
xhci->cmd_ring->cycle_state;
|
||||||
xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||||
|
"// Setting command ring address to 0x%llx",
|
||||||
(long unsigned long) val_64);
|
(long unsigned long) val_64);
|
||||||
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
|
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
|
||||||
}
|
}
|
||||||
@ -933,7 +885,8 @@ int xhci_suspend(struct xhci_hcd *xhci)
|
|||||||
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
|
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
|
||||||
(!(xhci_all_ports_seen_u0(xhci)))) {
|
(!(xhci_all_ports_seen_u0(xhci)))) {
|
||||||
del_timer_sync(&xhci->comp_mode_recovery_timer);
|
del_timer_sync(&xhci->comp_mode_recovery_timer);
|
||||||
xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"%s: compliance mode recovery timer deleted",
|
||||||
__func__);
|
__func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -998,7 +951,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
|
|||||||
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
|
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
|
||||||
!(xhci_all_ports_seen_u0(xhci))) {
|
!(xhci_all_ports_seen_u0(xhci))) {
|
||||||
del_timer_sync(&xhci->comp_mode_recovery_timer);
|
del_timer_sync(&xhci->comp_mode_recovery_timer);
|
||||||
xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Compliance Mode Recovery Timer deleted!");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Let the USB core know _both_ roothubs lost power. */
|
/* Let the USB core know _both_ roothubs lost power. */
|
||||||
@ -1011,12 +965,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
|
|||||||
spin_unlock_irq(&xhci->lock);
|
spin_unlock_irq(&xhci->lock);
|
||||||
xhci_cleanup_msix(xhci);
|
xhci_cleanup_msix(xhci);
|
||||||
|
|
||||||
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
|
|
||||||
/* Tell the event ring poll function not to reschedule */
|
|
||||||
xhci->zombie = 1;
|
|
||||||
del_timer_sync(&xhci->event_ring_timer);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
|
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
|
||||||
temp = xhci_readl(xhci, &xhci->op_regs->status);
|
temp = xhci_readl(xhci, &xhci->op_regs->status);
|
||||||
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
|
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
|
||||||
@ -1170,27 +1118,25 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
|
|||||||
struct xhci_virt_device *virt_dev;
|
struct xhci_virt_device *virt_dev;
|
||||||
|
|
||||||
if (!hcd || (check_ep && !ep) || !udev) {
|
if (!hcd || (check_ep && !ep) || !udev) {
|
||||||
printk(KERN_DEBUG "xHCI %s called with invalid args\n",
|
pr_debug("xHCI %s called with invalid args\n", func);
|
||||||
func);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (!udev->parent) {
|
if (!udev->parent) {
|
||||||
printk(KERN_DEBUG "xHCI %s called for root hub\n",
|
pr_debug("xHCI %s called for root hub\n", func);
|
||||||
func);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
xhci = hcd_to_xhci(hcd);
|
xhci = hcd_to_xhci(hcd);
|
||||||
if (check_virt_dev) {
|
if (check_virt_dev) {
|
||||||
if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
|
if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
|
||||||
printk(KERN_DEBUG "xHCI %s called with unaddressed "
|
xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
|
||||||
"device\n", func);
|
func);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
virt_dev = xhci->devs[udev->slot_id];
|
virt_dev = xhci->devs[udev->slot_id];
|
||||||
if (virt_dev->udev != udev) {
|
if (virt_dev->udev != udev) {
|
||||||
printk(KERN_DEBUG "xHCI %s called with udev and "
|
xhci_dbg(xhci, "xHCI %s called with udev and "
|
||||||
"virt_dev does not match\n", func);
|
"virt_dev does not match\n", func);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -1228,12 +1174,16 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
|
|||||||
hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
|
hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
|
||||||
max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
|
max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
|
||||||
if (hw_max_packet_size != max_packet_size) {
|
if (hw_max_packet_size != max_packet_size) {
|
||||||
xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
|
||||||
xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
|
"Max Packet Size for ep 0 changed.");
|
||||||
|
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
|
||||||
|
"Max packet size in usb_device = %d",
|
||||||
max_packet_size);
|
max_packet_size);
|
||||||
xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
|
||||||
|
"Max packet size in xHCI HW = %d",
|
||||||
hw_max_packet_size);
|
hw_max_packet_size);
|
||||||
xhci_dbg(xhci, "Issuing evaluate context command.\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
|
||||||
|
"Issuing evaluate context command.");
|
||||||
|
|
||||||
/* Set up the input context flags for the command */
|
/* Set up the input context flags for the command */
|
||||||
/* FIXME: This won't work if a non-default control endpoint
|
/* FIXME: This won't work if a non-default control endpoint
|
||||||
@ -1498,7 +1448,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||||||
goto done;
|
goto done;
|
||||||
temp = xhci_readl(xhci, &xhci->op_regs->status);
|
temp = xhci_readl(xhci, &xhci->op_regs->status);
|
||||||
if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
|
if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
|
||||||
xhci_dbg(xhci, "HW died, freeing TD.\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
|
"HW died, freeing TD.");
|
||||||
urb_priv = urb->hcpriv;
|
urb_priv = urb->hcpriv;
|
||||||
for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
|
for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
|
||||||
td = urb_priv->td[i];
|
td = urb_priv->td[i];
|
||||||
@ -1516,8 +1467,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||||||
}
|
}
|
||||||
if ((xhci->xhc_state & XHCI_STATE_DYING) ||
|
if ((xhci->xhc_state & XHCI_STATE_DYING) ||
|
||||||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
|
(xhci->xhc_state & XHCI_STATE_HALTED)) {
|
||||||
xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
"non-responsive xHCI host.\n",
|
"Ep 0x%x: URB %p to be canceled on "
|
||||||
|
"non-responsive xHCI host.",
|
||||||
urb->ep->desc.bEndpointAddress, urb);
|
urb->ep->desc.bEndpointAddress, urb);
|
||||||
/* Let the stop endpoint command watchdog timer (which set this
|
/* Let the stop endpoint command watchdog timer (which set this
|
||||||
* state) finish cleaning up the endpoint TD lists. We must
|
* state) finish cleaning up the endpoint TD lists. We must
|
||||||
@ -1538,8 +1490,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||||||
urb_priv = urb->hcpriv;
|
urb_priv = urb->hcpriv;
|
||||||
i = urb_priv->td_cnt;
|
i = urb_priv->td_cnt;
|
||||||
if (i < urb_priv->length)
|
if (i < urb_priv->length)
|
||||||
xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
"starting at offset 0x%llx\n",
|
"Cancel URB %p, dev %s, ep 0x%x, "
|
||||||
|
"starting at offset 0x%llx",
|
||||||
urb, urb->dev->devpath,
|
urb, urb->dev->devpath,
|
||||||
urb->ep->desc.bEndpointAddress,
|
urb->ep->desc.bEndpointAddress,
|
||||||
(unsigned long long) xhci_trb_virt_to_dma(
|
(unsigned long long) xhci_trb_virt_to_dma(
|
||||||
@ -1851,7 +1804,8 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
|
|||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
break;
|
break;
|
||||||
case COMP_SUCCESS:
|
case COMP_SUCCESS:
|
||||||
dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
|
||||||
|
"Successful Endpoint Configure command");
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -1897,7 +1851,8 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
|
|||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
case COMP_SUCCESS:
|
case COMP_SUCCESS:
|
||||||
dev_dbg(&udev->dev, "Successful evaluate context command\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
|
||||||
|
"Successful evaluate context command");
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -1963,14 +1918,16 @@ static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
|
|||||||
|
|
||||||
added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
|
added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
|
||||||
if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
|
if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
|
||||||
xhci_dbg(xhci, "Not enough ep ctxs: "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
"%u active, need to add %u, limit is %u.\n",
|
"Not enough ep ctxs: "
|
||||||
|
"%u active, need to add %u, limit is %u.",
|
||||||
xhci->num_active_eps, added_eps,
|
xhci->num_active_eps, added_eps,
|
||||||
xhci->limit_active_eps);
|
xhci->limit_active_eps);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
xhci->num_active_eps += added_eps;
|
xhci->num_active_eps += added_eps;
|
||||||
xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Adding %u ep ctxs, %u now active.", added_eps,
|
||||||
xhci->num_active_eps);
|
xhci->num_active_eps);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1988,7 +1945,8 @@ static void xhci_free_host_resources(struct xhci_hcd *xhci,
|
|||||||
|
|
||||||
num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
|
num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
|
||||||
xhci->num_active_eps -= num_failed_eps;
|
xhci->num_active_eps -= num_failed_eps;
|
||||||
xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Removing %u failed ep ctxs, %u now active.",
|
||||||
num_failed_eps,
|
num_failed_eps,
|
||||||
xhci->num_active_eps);
|
xhci->num_active_eps);
|
||||||
}
|
}
|
||||||
@ -2007,7 +1965,8 @@ static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
|
|||||||
num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
|
num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
|
||||||
xhci->num_active_eps -= num_dropped_eps;
|
xhci->num_active_eps -= num_dropped_eps;
|
||||||
if (num_dropped_eps)
|
if (num_dropped_eps)
|
||||||
xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Removing %u dropped ep ctxs, %u now active.",
|
||||||
num_dropped_eps,
|
num_dropped_eps,
|
||||||
xhci->num_active_eps);
|
xhci->num_active_eps);
|
||||||
}
|
}
|
||||||
@ -2168,18 +2127,21 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
|
|||||||
* that the HS bus has enough bandwidth if we are activing a new TT.
|
* that the HS bus has enough bandwidth if we are activing a new TT.
|
||||||
*/
|
*/
|
||||||
if (virt_dev->tt_info) {
|
if (virt_dev->tt_info) {
|
||||||
xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Recalculating BW for rootport %u",
|
||||||
virt_dev->real_port);
|
virt_dev->real_port);
|
||||||
if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
|
if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
|
||||||
xhci_warn(xhci, "Not enough bandwidth on HS bus for "
|
xhci_warn(xhci, "Not enough bandwidth on HS bus for "
|
||||||
"newly activated TT.\n");
|
"newly activated TT.\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Recalculating BW for TT slot %u port %u",
|
||||||
virt_dev->tt_info->slot_id,
|
virt_dev->tt_info->slot_id,
|
||||||
virt_dev->tt_info->ttport);
|
virt_dev->tt_info->ttport);
|
||||||
} else {
|
} else {
|
||||||
xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Recalculating BW for rootport %u",
|
||||||
virt_dev->real_port);
|
virt_dev->real_port);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2287,8 +2249,9 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
|
|||||||
xhci->rh_bw[port_index].num_active_tts;
|
xhci->rh_bw[port_index].num_active_tts;
|
||||||
}
|
}
|
||||||
|
|
||||||
xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
"Available: %u " "percent\n",
|
"Final bandwidth: %u, Limit: %u, Reserved: %u, "
|
||||||
|
"Available: %u " "percent",
|
||||||
bw_used, max_bandwidth, bw_reserved,
|
bw_used, max_bandwidth, bw_reserved,
|
||||||
(max_bandwidth - bw_used - bw_reserved) * 100 /
|
(max_bandwidth - bw_used - bw_reserved) * 100 /
|
||||||
max_bandwidth);
|
max_bandwidth);
|
||||||
@ -2658,7 +2621,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
|
|||||||
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
|
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
|
||||||
xhci_free_host_resources(xhci, ctrl_ctx);
|
xhci_free_host_resources(xhci, ctrl_ctx);
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
|
||||||
|
"FIXME allocate a new ring segment");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
xhci_ring_cmd_db(xhci);
|
xhci_ring_cmd_db(xhci);
|
||||||
@ -2871,7 +2835,8 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
|
|||||||
struct xhci_dequeue_state deq_state;
|
struct xhci_dequeue_state deq_state;
|
||||||
struct xhci_virt_ep *ep;
|
struct xhci_virt_ep *ep;
|
||||||
|
|
||||||
xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
|
||||||
|
"Cleaning up stalled endpoint ring");
|
||||||
ep = &xhci->devs[udev->slot_id]->eps[ep_index];
|
ep = &xhci->devs[udev->slot_id]->eps[ep_index];
|
||||||
/* We need to move the HW's dequeue pointer past this TD,
|
/* We need to move the HW's dequeue pointer past this TD,
|
||||||
* or it will attempt to resend it on the next doorbell ring.
|
* or it will attempt to resend it on the next doorbell ring.
|
||||||
@ -2884,7 +2849,8 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
|
|||||||
* issue a configure endpoint command later.
|
* issue a configure endpoint command later.
|
||||||
*/
|
*/
|
||||||
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
|
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
|
||||||
xhci_dbg(xhci, "Queueing new dequeue state\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
|
||||||
|
"Queueing new dequeue state");
|
||||||
xhci_queue_new_dequeue_state(xhci, udev->slot_id,
|
xhci_queue_new_dequeue_state(xhci, udev->slot_id,
|
||||||
ep_index, ep->stopped_stream, &deq_state);
|
ep_index, ep->stopped_stream, &deq_state);
|
||||||
} else {
|
} else {
|
||||||
@ -2893,8 +2859,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
|
|||||||
* XXX: No idea how this hardware will react when stream rings
|
* XXX: No idea how this hardware will react when stream rings
|
||||||
* are enabled.
|
* are enabled.
|
||||||
*/
|
*/
|
||||||
xhci_dbg(xhci, "Setting up input context for "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
"configure endpoint command\n");
|
"Setting up input context for "
|
||||||
|
"configure endpoint command");
|
||||||
xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
|
xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
|
||||||
ep_index, &deq_state);
|
ep_index, &deq_state);
|
||||||
}
|
}
|
||||||
@ -2926,16 +2893,19 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
|
|||||||
ep_index = xhci_get_endpoint_index(&ep->desc);
|
ep_index = xhci_get_endpoint_index(&ep->desc);
|
||||||
virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
|
virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
|
||||||
if (!virt_ep->stopped_td) {
|
if (!virt_ep->stopped_td) {
|
||||||
xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
|
||||||
ep->desc.bEndpointAddress);
|
"Endpoint 0x%x not halted, refusing to reset.",
|
||||||
|
ep->desc.bEndpointAddress);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (usb_endpoint_xfer_control(&ep->desc)) {
|
if (usb_endpoint_xfer_control(&ep->desc)) {
|
||||||
xhci_dbg(xhci, "Control endpoint stall already handled.\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
|
||||||
|
"Control endpoint stall already handled.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
xhci_dbg(xhci, "Queueing reset endpoint command\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
|
||||||
|
"Queueing reset endpoint command");
|
||||||
spin_lock_irqsave(&xhci->lock, flags);
|
spin_lock_irqsave(&xhci->lock, flags);
|
||||||
ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
|
ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
|
||||||
/*
|
/*
|
||||||
@ -3373,8 +3343,9 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
|
|||||||
}
|
}
|
||||||
xhci->num_active_eps -= num_dropped_eps;
|
xhci->num_active_eps -= num_dropped_eps;
|
||||||
if (num_dropped_eps)
|
if (num_dropped_eps)
|
||||||
xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
"%u now active.\n",
|
"Dropped %u ep ctxs, flags = 0x%x, "
|
||||||
|
"%u now active.",
|
||||||
num_dropped_eps, drop_flags,
|
num_dropped_eps, drop_flags,
|
||||||
xhci->num_active_eps);
|
xhci->num_active_eps);
|
||||||
}
|
}
|
||||||
@ -3508,10 +3479,10 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
switch (ret) {
|
switch (ret) {
|
||||||
case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
|
case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
|
||||||
case COMP_CTX_STATE: /* 0.96 completion code for same thing */
|
case COMP_CTX_STATE: /* 0.96 completion code for same thing */
|
||||||
xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
|
xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
|
||||||
slot_id,
|
slot_id,
|
||||||
xhci_get_slot_state(xhci, virt_dev->out_ctx));
|
xhci_get_slot_state(xhci, virt_dev->out_ctx));
|
||||||
xhci_info(xhci, "Not freeing device rings.\n");
|
xhci_dbg(xhci, "Not freeing device rings.\n");
|
||||||
/* Don't treat this as an error. May change my mind later. */
|
/* Don't treat this as an error. May change my mind later. */
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto command_cleanup;
|
goto command_cleanup;
|
||||||
@ -3636,13 +3607,15 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
|
static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
|
||||||
{
|
{
|
||||||
if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
|
if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
|
||||||
xhci_dbg(xhci, "Not enough ep ctxs: "
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
"%u active, need to add 1, limit is %u.\n",
|
"Not enough ep ctxs: "
|
||||||
|
"%u active, need to add 1, limit is %u.",
|
||||||
xhci->num_active_eps, xhci->limit_active_eps);
|
xhci->num_active_eps, xhci->limit_active_eps);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
xhci->num_active_eps += 1;
|
xhci->num_active_eps += 1;
|
||||||
xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
|
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||||
|
"Adding 1 ep ctx, %u now active.",
|
||||||
xhci->num_active_eps);
|
xhci->num_active_eps);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3742,7 +3715,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
union xhci_trb *cmd_trb;
|
union xhci_trb *cmd_trb;
|
||||||
|
|
||||||
if (!udev->slot_id) {
|
if (!udev->slot_id) {
|
||||||
xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
|
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
|
||||||
|
"Bad Slot ID %d", udev->slot_id);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3781,6 +3755,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
|
|
||||||
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
|
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
|
||||||
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
|
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
|
||||||
|
trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
|
||||||
|
slot_ctx->dev_info >> 27);
|
||||||
|
|
||||||
spin_lock_irqsave(&xhci->lock, flags);
|
spin_lock_irqsave(&xhci->lock, flags);
|
||||||
cmd_trb = xhci->cmd_ring->dequeue;
|
cmd_trb = xhci->cmd_ring->dequeue;
|
||||||
@ -3788,7 +3764,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
udev->slot_id);
|
udev->slot_id);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
|
||||||
|
"FIXME: allocate a command ring segment");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
xhci_ring_cmd_db(xhci);
|
xhci_ring_cmd_db(xhci);
|
||||||
@ -3828,13 +3805,15 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
break;
|
break;
|
||||||
case COMP_SUCCESS:
|
case COMP_SUCCESS:
|
||||||
xhci_dbg(xhci, "Successful Address Device command\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
|
||||||
|
"Successful Address Device command");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
xhci_err(xhci, "ERROR: unexpected command completion "
|
xhci_err(xhci, "ERROR: unexpected command completion "
|
||||||
"code 0x%x.\n", virt_dev->cmd_status);
|
"code 0x%x.\n", virt_dev->cmd_status);
|
||||||
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
|
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
|
||||||
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
|
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
|
||||||
|
trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -3842,16 +3821,21 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
|
temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
|
||||||
xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
|
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
|
||||||
xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
|
"Op regs DCBAA ptr = %#016llx", temp_64);
|
||||||
udev->slot_id,
|
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
|
||||||
&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
|
"Slot ID %d dcbaa entry @%p = %#016llx",
|
||||||
(unsigned long long)
|
udev->slot_id,
|
||||||
le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
|
&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
|
||||||
xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
|
(unsigned long long)
|
||||||
|
le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
|
||||||
|
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
|
||||||
|
"Output Context DMA address = %#08llx",
|
||||||
(unsigned long long)virt_dev->out_ctx->dma);
|
(unsigned long long)virt_dev->out_ctx->dma);
|
||||||
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
|
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
|
||||||
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
|
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
|
||||||
|
trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
|
||||||
|
slot_ctx->dev_info >> 27);
|
||||||
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
|
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
|
||||||
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
|
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
|
||||||
/*
|
/*
|
||||||
@ -3859,6 +3843,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
* address given back to us by the HC.
|
* address given back to us by the HC.
|
||||||
*/
|
*/
|
||||||
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
|
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
|
||||||
|
trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
|
||||||
|
slot_ctx->dev_info >> 27);
|
||||||
/* Use kernel assigned address for devices; store xHC assigned
|
/* Use kernel assigned address for devices; store xHC assigned
|
||||||
* address locally. */
|
* address locally. */
|
||||||
virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
|
virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
|
||||||
@ -3867,7 +3853,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
ctrl_ctx->add_flags = 0;
|
ctrl_ctx->add_flags = 0;
|
||||||
ctrl_ctx->drop_flags = 0;
|
ctrl_ctx->drop_flags = 0;
|
||||||
|
|
||||||
xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
|
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
|
||||||
|
"Internal device address = %d", virt_dev->address);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3933,7 +3920,8 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
|
|||||||
slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
|
slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
|
||||||
slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
|
slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
|
||||||
|
|
||||||
xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
|
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
|
||||||
|
"Set up evaluate context for LPM MEL change.");
|
||||||
xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
|
xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
|
||||||
xhci_dbg_ctx(xhci, command->in_ctx, 0);
|
xhci_dbg_ctx(xhci, command->in_ctx, 0);
|
||||||
|
|
||||||
@ -4837,7 +4825,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
|
|||||||
struct xhci_hcd *xhci;
|
struct xhci_hcd *xhci;
|
||||||
struct device *dev = hcd->self.controller;
|
struct device *dev = hcd->self.controller;
|
||||||
int retval;
|
int retval;
|
||||||
u32 temp;
|
|
||||||
|
|
||||||
/* Accept arbitrarily long scatter-gather lists */
|
/* Accept arbitrarily long scatter-gather lists */
|
||||||
hcd->self.sg_tablesize = ~0;
|
hcd->self.sg_tablesize = ~0;
|
||||||
@ -4869,14 +4856,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
|
|||||||
/* xHCI private pointer was set in xhci_pci_probe for the second
|
/* xHCI private pointer was set in xhci_pci_probe for the second
|
||||||
* registered roothub.
|
* registered roothub.
|
||||||
*/
|
*/
|
||||||
xhci = hcd_to_xhci(hcd);
|
|
||||||
temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
|
|
||||||
if (HCC_64BIT_ADDR(temp)) {
|
|
||||||
xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
|
|
||||||
dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
|
|
||||||
} else {
|
|
||||||
dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4915,12 +4894,12 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
|
|||||||
goto error;
|
goto error;
|
||||||
xhci_dbg(xhci, "Reset complete\n");
|
xhci_dbg(xhci, "Reset complete\n");
|
||||||
|
|
||||||
temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
|
/* Set dma_mask and coherent_dma_mask to 64-bits,
|
||||||
if (HCC_64BIT_ADDR(temp)) {
|
* if xHC supports 64-bit addressing */
|
||||||
|
if (HCC_64BIT_ADDR(xhci->hcc_params) &&
|
||||||
|
!dma_set_mask(dev, DMA_BIT_MASK(64))) {
|
||||||
xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
|
xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
|
||||||
dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
|
dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
|
||||||
} else {
|
|
||||||
dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
xhci_dbg(xhci, "Calling HCD init\n");
|
xhci_dbg(xhci, "Calling HCD init\n");
|
||||||
@ -4945,12 +4924,12 @@ static int __init xhci_hcd_init(void)
|
|||||||
|
|
||||||
retval = xhci_register_pci();
|
retval = xhci_register_pci();
|
||||||
if (retval < 0) {
|
if (retval < 0) {
|
||||||
printk(KERN_DEBUG "Problem registering PCI driver.");
|
pr_debug("Problem registering PCI driver.\n");
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
retval = xhci_register_plat();
|
retval = xhci_register_plat();
|
||||||
if (retval < 0) {
|
if (retval < 0) {
|
||||||
printk(KERN_DEBUG "Problem registering platform driver.");
|
pr_debug("Problem registering platform driver.\n");
|
||||||
goto unreg_pci;
|
goto unreg_pci;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -1490,11 +1490,6 @@ struct xhci_hcd {
|
|||||||
struct dma_pool *small_streams_pool;
|
struct dma_pool *small_streams_pool;
|
||||||
struct dma_pool *medium_streams_pool;
|
struct dma_pool *medium_streams_pool;
|
||||||
|
|
||||||
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
|
|
||||||
/* Poll the rings - for debugging */
|
|
||||||
struct timer_list event_ring_timer;
|
|
||||||
int zombie;
|
|
||||||
#endif
|
|
||||||
/* Host controller watchdog timer structures */
|
/* Host controller watchdog timer structures */
|
||||||
unsigned int xhc_state;
|
unsigned int xhc_state;
|
||||||
|
|
||||||
@ -1579,16 +1574,8 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
|
|||||||
return xhci->main_hcd;
|
return xhci->main_hcd;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
|
|
||||||
#define XHCI_DEBUG 1
|
|
||||||
#else
|
|
||||||
#define XHCI_DEBUG 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define xhci_dbg(xhci, fmt, args...) \
|
#define xhci_dbg(xhci, fmt, args...) \
|
||||||
do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
|
dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
|
||||||
#define xhci_info(xhci, fmt, args...) \
|
|
||||||
do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
|
|
||||||
#define xhci_err(xhci, fmt, args...) \
|
#define xhci_err(xhci, fmt, args...) \
|
||||||
dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
|
dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
|
||||||
#define xhci_warn(xhci, fmt, args...) \
|
#define xhci_warn(xhci, fmt, args...) \
|
||||||
@ -1660,6 +1647,8 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
|
|||||||
void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
|
void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
|
||||||
unsigned int slot_id, unsigned int ep_index,
|
unsigned int slot_id, unsigned int ep_index,
|
||||||
struct xhci_virt_ep *ep);
|
struct xhci_virt_ep *ep);
|
||||||
|
void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
|
||||||
|
const char *fmt, ...);
|
||||||
|
|
||||||
/* xHCI memory management */
|
/* xHCI memory management */
|
||||||
void xhci_mem_cleanup(struct xhci_hcd *xhci);
|
void xhci_mem_cleanup(struct xhci_hcd *xhci);
|
||||||
|
Loading…
Reference in New Issue
Block a user