forked from Minki/linux
Merge branch 'for-usb-next' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah/xhci
* 'for-usb-next' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah/xhci: Intel xhci: Limit number of active endpoints to 64. Intel xhci: Ignore spurious successful event. Intel xhci: Support EHCI/xHCI port switching. Intel xhci: Add PCI id for Panther Point xHCI host. xhci: STFU: Be quieter during URB submission and completion. xhci: STFU: Don't print event ring dequeue pointer. xhci: STFU: Remove function tracing. xhci: Don't submit commands when the host is dead. xhci: Clear stopped_td when Stop Endpoint command completes.
This commit is contained in:
commit
87367a0b71
@ -348,11 +348,50 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
|
||||
{
|
||||
return pdev->class == PCI_CLASS_SERIAL_USB_EHCI &&
|
||||
pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
||||
pdev->device == 0x1E26;
|
||||
}
|
||||
|
||||
static void ehci_enable_xhci_companion(void)
|
||||
{
|
||||
struct pci_dev *companion = NULL;
|
||||
|
||||
/* The xHCI and EHCI controllers are not on the same PCI slot */
|
||||
for_each_pci_dev(companion) {
|
||||
if (!usb_is_intel_switchable_xhci(companion))
|
||||
continue;
|
||||
usb_enable_xhci_ports(companion);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
|
||||
{
|
||||
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
|
||||
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
|
||||
|
||||
/* The BIOS on systems with the Intel Panther Point chipset may or may
|
||||
* not support xHCI natively. That means that during system resume, it
|
||||
* may switch the ports back to EHCI so that users can use their
|
||||
* keyboard to select a kernel from GRUB after resume from hibernate.
|
||||
*
|
||||
* The BIOS is supposed to remember whether the OS had xHCI ports
|
||||
* enabled before resume, and switch the ports back to xHCI when the
|
||||
* BIOS/OS semaphore is written, but we all know we can't trust BIOS
|
||||
* writers.
|
||||
*
|
||||
* Unconditionally switch the ports back to xHCI after a system resume.
|
||||
* We can't tell whether the EHCI or xHCI controller will be resumed
|
||||
* first, so we have to do the port switchover in both drivers. Writing
|
||||
* a '1' to the port switchover registers should have no effect if the
|
||||
* port was already switched over.
|
||||
*/
|
||||
if (usb_is_intel_switchable_ehci(pdev))
|
||||
ehci_enable_xhci_companion();
|
||||
|
||||
// maybe restore FLADJ
|
||||
|
||||
if (time_before(jiffies, ehci->next_statechange))
|
||||
|
@ -69,6 +69,9 @@
|
||||
#define NB_PIF0_PWRDOWN_0 0x01100012
|
||||
#define NB_PIF0_PWRDOWN_1 0x01100013
|
||||
|
||||
#define USB_INTEL_XUSB2PR 0xD0
|
||||
#define USB_INTEL_USB3_PSSEN 0xD8
|
||||
|
||||
static struct amd_chipset_info {
|
||||
struct pci_dev *nb_dev;
|
||||
struct pci_dev *smbus_dev;
|
||||
@ -673,6 +676,64 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
|
||||
{
|
||||
return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
|
||||
pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
||||
pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
|
||||
|
||||
/*
|
||||
* Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
|
||||
* share some number of ports. These ports can be switched between either
|
||||
* controller. Not all of the ports under the EHCI host controller may be
|
||||
* switchable.
|
||||
*
|
||||
* The ports should be switched over to xHCI before PCI probes for any device
|
||||
* start. This avoids active devices under EHCI being disconnected during the
|
||||
* port switchover, which could cause loss of data on USB storage devices, or
|
||||
* failed boot when the root file system is on a USB mass storage device and is
|
||||
* enumerated under EHCI first.
|
||||
*
|
||||
* We write into the xHC's PCI configuration space in some Intel-specific
|
||||
* registers to switch the ports over. The USB 3.0 terminations and the USB
|
||||
* 2.0 data wires are switched separately. We want to enable the SuperSpeed
|
||||
* terminations before switching the USB 2.0 wires over, so that USB 3.0
|
||||
* devices connect at SuperSpeed, rather than at USB 2.0 speeds.
|
||||
*/
|
||||
void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
|
||||
{
|
||||
u32 ports_available;
|
||||
|
||||
ports_available = 0xffffffff;
|
||||
/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
|
||||
* Register, to turn on SuperSpeed terminations for all
|
||||
* available ports.
|
||||
*/
|
||||
pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
|
||||
cpu_to_le32(ports_available));
|
||||
|
||||
pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
|
||||
&ports_available);
|
||||
dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
|
||||
"under xHCI: 0x%x\n", ports_available);
|
||||
|
||||
ports_available = 0xffffffff;
|
||||
/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
|
||||
* switch the USB 2.0 power and data lines over to the xHCI
|
||||
* host.
|
||||
*/
|
||||
pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
|
||||
cpu_to_le32(ports_available));
|
||||
|
||||
pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
|
||||
&ports_available);
|
||||
dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
|
||||
"to xHCI: 0x%x\n", ports_available);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
|
||||
|
||||
/**
|
||||
* PCI Quirks for xHCI.
|
||||
*
|
||||
@ -732,6 +793,8 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
|
||||
writel(XHCI_LEGACY_DISABLE_SMI,
|
||||
base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
|
||||
|
||||
if (usb_is_intel_switchable_xhci(pdev))
|
||||
usb_enable_xhci_ports(pdev);
|
||||
hc_init:
|
||||
op_reg_base = base + XHCI_HC_LENGTH(readl(base));
|
||||
|
||||
|
@ -8,6 +8,8 @@ int usb_amd_find_chipset_info(void);
|
||||
void usb_amd_dev_put(void);
|
||||
void usb_amd_quirk_pll_disable(void);
|
||||
void usb_amd_quirk_pll_enable(void);
|
||||
bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
|
||||
void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
|
||||
#else
|
||||
static inline void usb_amd_quirk_pll_disable(void) {}
|
||||
static inline void usb_amd_quirk_pll_enable(void) {}
|
||||
|
@ -118,6 +118,12 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
|
||||
/* AMD PLL quirk */
|
||||
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
|
||||
xhci->quirks |= XHCI_AMD_PLL_FIX;
|
||||
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
||||
pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
|
||||
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
|
||||
xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
|
||||
xhci->limit_active_eps = 64;
|
||||
}
|
||||
|
||||
/* Make sure the HC is halted. */
|
||||
retval = xhci_halt(xhci);
|
||||
@ -242,8 +248,28 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
|
||||
static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
|
||||
{
|
||||
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
||||
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
|
||||
int retval = 0;
|
||||
|
||||
/* The BIOS on systems with the Intel Panther Point chipset may or may
|
||||
* not support xHCI natively. That means that during system resume, it
|
||||
* may switch the ports back to EHCI so that users can use their
|
||||
* keyboard to select a kernel from GRUB after resume from hibernate.
|
||||
*
|
||||
* The BIOS is supposed to remember whether the OS had xHCI ports
|
||||
* enabled before resume, and switch the ports back to xHCI when the
|
||||
* BIOS/OS semaphore is written, but we all know we can't trust BIOS
|
||||
* writers.
|
||||
*
|
||||
* Unconditionally switch the ports back to xHCI after a system resume.
|
||||
* We can't tell whether the EHCI or xHCI controller will be resumed
|
||||
* first, so we have to do the port switchover in both drivers. Writing
|
||||
* a '1' to the port switchover registers should have no effect if the
|
||||
* port was already switched over.
|
||||
*/
|
||||
if (usb_is_intel_switchable_xhci(pdev))
|
||||
usb_enable_xhci_ports(pdev);
|
||||
|
||||
retval = xhci_resume(xhci, hibernated);
|
||||
return retval;
|
||||
}
|
||||
|
@ -167,12 +167,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
|
||||
next = ring->dequeue;
|
||||
}
|
||||
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
|
||||
if (ring == xhci->event_ring)
|
||||
xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
|
||||
else if (ring == xhci->cmd_ring)
|
||||
xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
|
||||
else
|
||||
xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -248,12 +242,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
||||
next = ring->enqueue;
|
||||
}
|
||||
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
|
||||
if (ring == xhci->event_ring)
|
||||
xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
|
||||
else if (ring == xhci->cmd_ring)
|
||||
xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
|
||||
else
|
||||
xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -636,13 +624,11 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
|
||||
}
|
||||
}
|
||||
usb_hcd_unlink_urb_from_ep(hcd, urb);
|
||||
xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
|
||||
|
||||
spin_unlock(&xhci->lock);
|
||||
usb_hcd_giveback_urb(hcd, urb, status);
|
||||
xhci_urb_free_priv(xhci, urb_priv);
|
||||
spin_lock(&xhci->lock);
|
||||
xhci_dbg(xhci, "%s URB given back\n", adjective);
|
||||
}
|
||||
}
|
||||
|
||||
@ -692,6 +678,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
|
||||
|
||||
if (list_empty(&ep->cancelled_td_list)) {
|
||||
xhci_stop_watchdog_timer_in_irq(xhci, ep);
|
||||
ep->stopped_td = NULL;
|
||||
ep->stopped_trb = NULL;
|
||||
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
|
||||
return;
|
||||
}
|
||||
@ -1093,8 +1081,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
||||
complete(&xhci->addr_dev);
|
||||
break;
|
||||
case TRB_TYPE(TRB_DISABLE_SLOT):
|
||||
if (xhci->devs[slot_id])
|
||||
if (xhci->devs[slot_id]) {
|
||||
if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
|
||||
/* Delete default control endpoint resources */
|
||||
xhci_free_device_endpoint_resources(xhci,
|
||||
xhci->devs[slot_id], true);
|
||||
xhci_free_virt_device(xhci, slot_id);
|
||||
}
|
||||
break;
|
||||
case TRB_TYPE(TRB_CONFIG_EP):
|
||||
virt_dev = xhci->devs[slot_id];
|
||||
@ -1630,7 +1623,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
||||
"without IOC set??\n");
|
||||
*status = -ESHUTDOWN;
|
||||
} else {
|
||||
xhci_dbg(xhci, "Successful control transfer!\n");
|
||||
*status = 0;
|
||||
}
|
||||
break;
|
||||
@ -1727,7 +1719,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
||||
switch (trb_comp_code) {
|
||||
case COMP_SUCCESS:
|
||||
frame->status = 0;
|
||||
xhci_dbg(xhci, "Successful isoc transfer!\n");
|
||||
break;
|
||||
case COMP_SHORT_TX:
|
||||
frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
|
||||
@ -1837,12 +1828,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
||||
else
|
||||
*status = 0;
|
||||
} else {
|
||||
if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
|
||||
xhci_dbg(xhci, "Successful bulk "
|
||||
"transfer!\n");
|
||||
else
|
||||
xhci_dbg(xhci, "Successful interrupt "
|
||||
"transfer!\n");
|
||||
*status = 0;
|
||||
}
|
||||
break;
|
||||
@ -1856,11 +1841,12 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
||||
/* Others already handled above */
|
||||
break;
|
||||
}
|
||||
xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
|
||||
"%d bytes untransferred\n",
|
||||
td->urb->ep->desc.bEndpointAddress,
|
||||
td->urb->transfer_buffer_length,
|
||||
TRB_LEN(le32_to_cpu(event->transfer_len)));
|
||||
if (trb_comp_code == COMP_SHORT_TX)
|
||||
xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
|
||||
"%d bytes untransferred\n",
|
||||
td->urb->ep->desc.bEndpointAddress,
|
||||
td->urb->transfer_buffer_length,
|
||||
TRB_LEN(le32_to_cpu(event->transfer_len)));
|
||||
/* Fast path - was this the last TRB in the TD for this URB? */
|
||||
if (event_trb == td->last_trb) {
|
||||
if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
|
||||
@ -1954,7 +1940,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
|
||||
/* Endpoint ID is 1 based, our index is zero based */
|
||||
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
|
||||
xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
|
||||
ep = &xdev->eps[ep_index];
|
||||
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
|
||||
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
||||
@ -2081,6 +2066,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
if (!event_seg) {
|
||||
if (!ep->skip ||
|
||||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
|
||||
/* Some host controllers give a spurious
|
||||
* successful event after a short transfer.
|
||||
* Ignore it.
|
||||
*/
|
||||
if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
|
||||
ep_ring->last_td_was_short) {
|
||||
ep_ring->last_td_was_short = false;
|
||||
ret = 0;
|
||||
goto cleanup;
|
||||
}
|
||||
/* HC is busted, give up! */
|
||||
xhci_err(xhci,
|
||||
"ERROR Transfer event TRB DMA ptr not "
|
||||
@ -2091,6 +2086,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
ret = skip_isoc_td(xhci, td, event, ep, &status);
|
||||
goto cleanup;
|
||||
}
|
||||
if (trb_comp_code == COMP_SHORT_TX)
|
||||
ep_ring->last_td_was_short = true;
|
||||
else
|
||||
ep_ring->last_td_was_short = false;
|
||||
|
||||
if (ep->skip) {
|
||||
xhci_dbg(xhci, "Found td. Clear skip flag.\n");
|
||||
@ -2149,9 +2148,15 @@ cleanup:
|
||||
xhci_urb_free_priv(xhci, urb_priv);
|
||||
|
||||
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
|
||||
xhci_dbg(xhci, "Giveback URB %p, len = %d, "
|
||||
"status = %d\n",
|
||||
urb, urb->actual_length, status);
|
||||
if ((urb->actual_length != urb->transfer_buffer_length &&
|
||||
(urb->transfer_flags &
|
||||
URB_SHORT_NOT_OK)) ||
|
||||
status != 0)
|
||||
xhci_dbg(xhci, "Giveback URB %p, len = %d, "
|
||||
"expected = %x, status = %d\n",
|
||||
urb, urb->actual_length,
|
||||
urb->transfer_buffer_length,
|
||||
status);
|
||||
spin_unlock(&xhci->lock);
|
||||
usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
|
||||
spin_lock(&xhci->lock);
|
||||
@ -2180,7 +2185,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
|
||||
int update_ptrs = 1;
|
||||
int ret;
|
||||
|
||||
xhci_dbg(xhci, "In %s\n", __func__);
|
||||
if (!xhci->event_ring || !xhci->event_ring->dequeue) {
|
||||
xhci->error_bitmask |= 1 << 1;
|
||||
return 0;
|
||||
@ -2193,7 +2197,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
|
||||
xhci->error_bitmask |= 1 << 2;
|
||||
return 0;
|
||||
}
|
||||
xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
|
||||
|
||||
/*
|
||||
* Barrier between reading the TRB_CYCLE (valid) flag above and any
|
||||
@ -2203,20 +2206,14 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
|
||||
/* FIXME: Handle more event types. */
|
||||
switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
|
||||
case TRB_TYPE(TRB_COMPLETION):
|
||||
xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
|
||||
handle_cmd_completion(xhci, &event->event_cmd);
|
||||
xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
|
||||
break;
|
||||
case TRB_TYPE(TRB_PORT_STATUS):
|
||||
xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
|
||||
handle_port_status(xhci, event);
|
||||
xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
|
||||
update_ptrs = 0;
|
||||
break;
|
||||
case TRB_TYPE(TRB_TRANSFER):
|
||||
xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
|
||||
ret = handle_tx_event(xhci, &event->trans_event);
|
||||
xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
|
||||
if (ret < 0)
|
||||
xhci->error_bitmask |= 1 << 9;
|
||||
else
|
||||
@ -2273,16 +2270,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
|
||||
spin_unlock(&xhci->lock);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
xhci_dbg(xhci, "op reg status = %08x\n", status);
|
||||
xhci_dbg(xhci, "Event ring dequeue ptr:\n");
|
||||
xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
|
||||
(unsigned long long)
|
||||
xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
|
||||
lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
|
||||
upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
|
||||
(unsigned int) le32_to_cpu(trb->link.intr_target),
|
||||
(unsigned int) le32_to_cpu(trb->link.control));
|
||||
|
||||
if (status & STS_FATAL) {
|
||||
xhci_warn(xhci, "WARNING: Host System Error\n");
|
||||
xhci_halt(xhci);
|
||||
@ -2397,7 +2384,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
|
||||
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
|
||||
{
|
||||
/* Make sure the endpoint has been added to xHC schedule */
|
||||
xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
|
||||
switch (ep_state) {
|
||||
case EP_STATE_DISABLED:
|
||||
/*
|
||||
@ -2434,7 +2420,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
|
||||
struct xhci_ring *ring = ep_ring;
|
||||
union xhci_trb *next;
|
||||
|
||||
xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
|
||||
next = ring->enqueue;
|
||||
|
||||
while (last_trb(xhci, ring, ring->enq_seg, next)) {
|
||||
|
@ -1314,8 +1314,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
xhci = hcd_to_xhci(hcd);
|
||||
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
|
||||
if (xhci->xhc_state & XHCI_STATE_DYING)
|
||||
return -ENODEV;
|
||||
|
||||
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
|
||||
drop_flag = xhci_get_endpoint_flag(&ep->desc);
|
||||
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
|
||||
xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
|
||||
@ -1401,6 +1403,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
return ret;
|
||||
}
|
||||
xhci = hcd_to_xhci(hcd);
|
||||
if (xhci->xhc_state & XHCI_STATE_DYING)
|
||||
return -ENODEV;
|
||||
|
||||
added_ctxs = xhci_get_endpoint_flag(&ep->desc);
|
||||
last_ctx = xhci_last_valid_endpoint(added_ctxs);
|
||||
@ -1578,6 +1582,113 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *in_ctx)
|
||||
{
|
||||
struct xhci_input_control_ctx *ctrl_ctx;
|
||||
u32 valid_add_flags;
|
||||
u32 valid_drop_flags;
|
||||
|
||||
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
|
||||
/* Ignore the slot flag (bit 0), and the default control endpoint flag
|
||||
* (bit 1). The default control endpoint is added during the Address
|
||||
* Device command and is never removed until the slot is disabled.
|
||||
*/
|
||||
valid_add_flags = ctrl_ctx->add_flags >> 2;
|
||||
valid_drop_flags = ctrl_ctx->drop_flags >> 2;
|
||||
|
||||
/* Use hweight32 to count the number of ones in the add flags, or
|
||||
* number of endpoints added. Don't count endpoints that are changed
|
||||
* (both added and dropped).
|
||||
*/
|
||||
return hweight32(valid_add_flags) -
|
||||
hweight32(valid_add_flags & valid_drop_flags);
|
||||
}
|
||||
|
||||
static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *in_ctx)
|
||||
{
|
||||
struct xhci_input_control_ctx *ctrl_ctx;
|
||||
u32 valid_add_flags;
|
||||
u32 valid_drop_flags;
|
||||
|
||||
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
|
||||
valid_add_flags = ctrl_ctx->add_flags >> 2;
|
||||
valid_drop_flags = ctrl_ctx->drop_flags >> 2;
|
||||
|
||||
return hweight32(valid_drop_flags) -
|
||||
hweight32(valid_add_flags & valid_drop_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to reserve the new number of endpoints before the configure endpoint
|
||||
* command completes. We can't subtract the dropped endpoints from the number
|
||||
* of active endpoints until the command completes because we can oversubscribe
|
||||
* the host in this case:
|
||||
*
|
||||
* - the first configure endpoint command drops more endpoints than it adds
|
||||
* - a second configure endpoint command that adds more endpoints is queued
|
||||
* - the first configure endpoint command fails, so the config is unchanged
|
||||
* - the second command may succeed, even though there isn't enough resources
|
||||
*
|
||||
* Must be called with xhci->lock held.
|
||||
*/
|
||||
static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *in_ctx)
|
||||
{
|
||||
u32 added_eps;
|
||||
|
||||
added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
|
||||
if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
|
||||
xhci_dbg(xhci, "Not enough ep ctxs: "
|
||||
"%u active, need to add %u, limit is %u.\n",
|
||||
xhci->num_active_eps, added_eps,
|
||||
xhci->limit_active_eps);
|
||||
return -ENOMEM;
|
||||
}
|
||||
xhci->num_active_eps += added_eps;
|
||||
xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
|
||||
xhci->num_active_eps);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The configure endpoint was failed by the xHC for some other reason, so we
|
||||
* need to revert the resources that failed configuration would have used.
|
||||
*
|
||||
* Must be called with xhci->lock held.
|
||||
*/
|
||||
static void xhci_free_host_resources(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *in_ctx)
|
||||
{
|
||||
u32 num_failed_eps;
|
||||
|
||||
num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
|
||||
xhci->num_active_eps -= num_failed_eps;
|
||||
xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
|
||||
num_failed_eps,
|
||||
xhci->num_active_eps);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that the command has completed, clean up the active endpoint count by
|
||||
* subtracting out the endpoints that were dropped (but not changed).
|
||||
*
|
||||
* Must be called with xhci->lock held.
|
||||
*/
|
||||
static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *in_ctx)
|
||||
{
|
||||
u32 num_dropped_eps;
|
||||
|
||||
num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
|
||||
xhci->num_active_eps -= num_dropped_eps;
|
||||
if (num_dropped_eps)
|
||||
xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
|
||||
num_dropped_eps,
|
||||
xhci->num_active_eps);
|
||||
}
|
||||
|
||||
/* Issue a configure endpoint command or evaluate context command
|
||||
* and wait for it to finish.
|
||||
*/
|
||||
@ -1598,6 +1709,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
|
||||
virt_dev = xhci->devs[udev->slot_id];
|
||||
if (command) {
|
||||
in_ctx = command->in_ctx;
|
||||
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
|
||||
xhci_reserve_host_resources(xhci, in_ctx)) {
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
xhci_warn(xhci, "Not enough host resources, "
|
||||
"active endpoint contexts = %u\n",
|
||||
xhci->num_active_eps);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd_completion = command->completion;
|
||||
cmd_status = &command->status;
|
||||
command->command_trb = xhci->cmd_ring->enqueue;
|
||||
@ -1613,6 +1733,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
|
||||
list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
|
||||
} else {
|
||||
in_ctx = virt_dev->in_ctx;
|
||||
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
|
||||
xhci_reserve_host_resources(xhci, in_ctx)) {
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
xhci_warn(xhci, "Not enough host resources, "
|
||||
"active endpoint contexts = %u\n",
|
||||
xhci->num_active_eps);
|
||||
return -ENOMEM;
|
||||
}
|
||||
cmd_completion = &virt_dev->cmd_completion;
|
||||
cmd_status = &virt_dev->cmd_status;
|
||||
}
|
||||
@ -1627,6 +1755,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
|
||||
if (ret < 0) {
|
||||
if (command)
|
||||
list_del(&command->cmd_list);
|
||||
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
|
||||
xhci_free_host_resources(xhci, in_ctx);
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
|
||||
return -ENOMEM;
|
||||
@ -1649,8 +1779,22 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
|
||||
}
|
||||
|
||||
if (!ctx_change)
|
||||
return xhci_configure_endpoint_result(xhci, udev, cmd_status);
|
||||
return xhci_evaluate_context_result(xhci, udev, cmd_status);
|
||||
ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
|
||||
else
|
||||
ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
|
||||
|
||||
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
/* If the command failed, remove the reserved resources.
|
||||
* Otherwise, clean up the estimate to include dropped eps.
|
||||
*/
|
||||
if (ret)
|
||||
xhci_free_host_resources(xhci, in_ctx);
|
||||
else
|
||||
xhci_finish_resource_reservation(xhci, in_ctx);
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Called after one or more calls to xhci_add_endpoint() or
|
||||
@ -1676,6 +1820,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
xhci = hcd_to_xhci(hcd);
|
||||
if (xhci->xhc_state & XHCI_STATE_DYING)
|
||||
return -ENODEV;
|
||||
|
||||
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
|
||||
virt_dev = xhci->devs[udev->slot_id];
|
||||
@ -2265,6 +2411,34 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Deletes endpoint resources for endpoints that were active before a Reset
|
||||
* Device command, or a Disable Slot command. The Reset Device command leaves
|
||||
* the control endpoint intact, whereas the Disable Slot command deletes it.
|
||||
*
|
||||
* Must be called with xhci->lock held.
|
||||
*/
|
||||
void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev, bool drop_control_ep)
|
||||
{
|
||||
int i;
|
||||
unsigned int num_dropped_eps = 0;
|
||||
unsigned int drop_flags = 0;
|
||||
|
||||
for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
|
||||
if (virt_dev->eps[i].ring) {
|
||||
drop_flags |= 1 << i;
|
||||
num_dropped_eps++;
|
||||
}
|
||||
}
|
||||
xhci->num_active_eps -= num_dropped_eps;
|
||||
if (num_dropped_eps)
|
||||
xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
|
||||
"%u now active.\n",
|
||||
num_dropped_eps, drop_flags,
|
||||
xhci->num_active_eps);
|
||||
}
|
||||
|
||||
/*
|
||||
* This submits a Reset Device Command, which will set the device state to 0,
|
||||
* set the device address to 0, and disable all the endpoints except the default
|
||||
@ -2406,6 +2580,14 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
goto command_cleanup;
|
||||
}
|
||||
|
||||
/* Free up host controller endpoint resources */
|
||||
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
/* Don't delete the default control endpoint resources */
|
||||
xhci_free_device_endpoint_resources(xhci, virt_dev, false);
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
}
|
||||
|
||||
/* Everything but endpoint 0 is disabled, so free or cache the rings. */
|
||||
last_freed_endpoint = 1;
|
||||
for (i = 1; i < 31; ++i) {
|
||||
@ -2478,6 +2660,27 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks if we have enough host controller resources for the default control
|
||||
* endpoint.
|
||||
*
|
||||
* Must be called with xhci->lock held.
|
||||
*/
|
||||
static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
|
||||
{
|
||||
if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
|
||||
xhci_dbg(xhci, "Not enough ep ctxs: "
|
||||
"%u active, need to add 1, limit is %u.\n",
|
||||
xhci->num_active_eps, xhci->limit_active_eps);
|
||||
return -ENOMEM;
|
||||
}
|
||||
xhci->num_active_eps += 1;
|
||||
xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
|
||||
xhci->num_active_eps);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Returns 0 if the xHC ran out of device slots, the Enable Slot command
|
||||
* timed out, or allocating memory failed. Returns 1 on success.
|
||||
@ -2513,24 +2716,39 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
xhci_err(xhci, "Error while assigning device slot ID\n");
|
||||
return 0;
|
||||
}
|
||||
/* xhci_alloc_virt_device() does not touch rings; no need to lock.
|
||||
* Use GFP_NOIO, since this function can be called from
|
||||
|
||||
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
ret = xhci_reserve_host_control_ep_resources(xhci);
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
xhci_warn(xhci, "Not enough host resources, "
|
||||
"active endpoint contexts = %u\n",
|
||||
xhci->num_active_eps);
|
||||
goto disable_slot;
|
||||
}
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
}
|
||||
/* Use GFP_NOIO, since this function can be called from
|
||||
* xhci_discover_or_reset_device(), which may be called as part of
|
||||
* mass storage driver error handling.
|
||||
*/
|
||||
if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
|
||||
/* Disable slot, if we can do it without mem alloc */
|
||||
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
|
||||
xhci_ring_cmd_db(xhci);
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
return 0;
|
||||
goto disable_slot;
|
||||
}
|
||||
udev->slot_id = xhci->slot_id;
|
||||
/* Is this a LS or FS device under a HS hub? */
|
||||
/* Hub or peripherial? */
|
||||
return 1;
|
||||
|
||||
disable_slot:
|
||||
/* Disable slot, if we can do it without mem alloc */
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
|
||||
xhci_ring_cmd_db(xhci);
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1123,6 +1123,7 @@ struct xhci_ring {
|
||||
*/
|
||||
u32 cycle_state;
|
||||
unsigned int stream_id;
|
||||
bool last_td_was_short;
|
||||
};
|
||||
|
||||
struct xhci_erst_entry {
|
||||
@ -1290,6 +1291,19 @@ struct xhci_hcd {
|
||||
#define XHCI_RESET_EP_QUIRK (1 << 1)
|
||||
#define XHCI_NEC_HOST (1 << 2)
|
||||
#define XHCI_AMD_PLL_FIX (1 << 3)
|
||||
#define XHCI_SPURIOUS_SUCCESS (1 << 4)
|
||||
/*
|
||||
* Certain Intel host controllers have a limit to the number of endpoint
|
||||
* contexts they can handle. Ideally, they would signal that they can't handle
|
||||
* anymore endpoint contexts by returning a Resource Error for the Configure
|
||||
* Endpoint command, but they don't. Instead they expect software to keep track
|
||||
* of the number of active endpoints for them, across configure endpoint
|
||||
* commands, reset device commands, disable slot commands, and address device
|
||||
* commands.
|
||||
*/
|
||||
#define XHCI_EP_LIMIT_QUIRK (1 << 5)
|
||||
unsigned int num_active_eps;
|
||||
unsigned int limit_active_eps;
|
||||
/* There are two roothubs to keep track of bus suspend info for */
|
||||
struct xhci_bus_state bus_state[2];
|
||||
/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
|
||||
@ -1338,9 +1352,6 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
|
||||
static inline void xhci_writel(struct xhci_hcd *xhci,
|
||||
const unsigned int val, __le32 __iomem *regs)
|
||||
{
|
||||
xhci_dbg(xhci,
|
||||
"`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
|
||||
regs, val);
|
||||
writel(val, regs);
|
||||
}
|
||||
|
||||
@ -1368,9 +1379,6 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
|
||||
u32 val_lo = lower_32_bits(val);
|
||||
u32 val_hi = upper_32_bits(val);
|
||||
|
||||
xhci_dbg(xhci,
|
||||
"`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
|
||||
regs, (long unsigned int) val);
|
||||
writel(val_lo, ptr);
|
||||
writel(val_hi, ptr + 1);
|
||||
}
|
||||
@ -1439,6 +1447,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
|
||||
void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
|
||||
struct xhci_ep_ctx *ep_ctx,
|
||||
struct xhci_virt_ep *ep);
|
||||
void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev, bool drop_control_ep);
|
||||
struct xhci_ring *xhci_dma_to_transfer_ring(
|
||||
struct xhci_virt_ep *ep,
|
||||
u64 address);
|
||||
|
@ -2483,6 +2483,7 @@
|
||||
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
|
||||
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40
|
||||
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41
|
||||
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
|
||||
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
|
||||
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
|
||||
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
|
||||
|
Loading…
Reference in New Issue
Block a user