USB: xhci: Support for 64-byte contexts
Adds support for controllers that use 64-byte contexts. The following context data structures are affected by this: Device, Input, Input Control, Endpoint, and Slot. To accommodate the use of either 32 or 64-byte contexts, a Device or Input context can only be accessed through functions which look-up and return pointers to their contained contexts. Signed-off-by: John Youn <johnyoun@synopsys.com> Acked-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
parent
28c2d2efb4
commit
d115b04818
@ -393,103 +393,138 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
|
|||||||
upper_32_bits(val));
|
upper_32_bits(val));
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_addr_t xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_slot_ctx *slot, dma_addr_t dma)
|
/* Print the last 32 bytes for 64-byte contexts */
|
||||||
|
static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < 4; ++i) {
|
||||||
|
xhci_dbg(xhci, "@%p (virt) @%08llx "
|
||||||
|
"(dma) %#08llx - rsvd64[%d]\n",
|
||||||
|
&ctx[4 + i], (unsigned long long)dma,
|
||||||
|
ctx[4 + i], i);
|
||||||
|
dma += 8;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
|
||||||
{
|
{
|
||||||
/* Fields are 32 bits wide, DMA addresses are in bytes */
|
/* Fields are 32 bits wide, DMA addresses are in bytes */
|
||||||
int field_size = 32 / 8;
|
int field_size = 32 / 8;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
|
||||||
|
dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx);
|
||||||
|
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
|
||||||
|
|
||||||
xhci_dbg(xhci, "Slot Context:\n");
|
xhci_dbg(xhci, "Slot Context:\n");
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
|
||||||
&slot->dev_info,
|
&slot_ctx->dev_info,
|
||||||
(unsigned long long)dma, slot->dev_info);
|
(unsigned long long)dma, slot_ctx->dev_info);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
|
||||||
&slot->dev_info2,
|
&slot_ctx->dev_info2,
|
||||||
(unsigned long long)dma, slot->dev_info2);
|
(unsigned long long)dma, slot_ctx->dev_info2);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
|
||||||
&slot->tt_info,
|
&slot_ctx->tt_info,
|
||||||
(unsigned long long)dma, slot->tt_info);
|
(unsigned long long)dma, slot_ctx->tt_info);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
|
||||||
&slot->dev_state,
|
&slot_ctx->dev_state,
|
||||||
(unsigned long long)dma, slot->dev_state);
|
(unsigned long long)dma, slot_ctx->dev_state);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
for (i = 0; i < 4; ++i) {
|
for (i = 0; i < 4; ++i) {
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
|
||||||
&slot->reserved[i], (unsigned long long)dma,
|
&slot_ctx->reserved[i], (unsigned long long)dma,
|
||||||
slot->reserved[i], i);
|
slot_ctx->reserved[i], i);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
return dma;
|
if (csz)
|
||||||
|
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_addr_t xhci_dbg_ep_ctx(struct xhci_hcd *xhci, struct xhci_ep_ctx *ep, dma_addr_t dma, unsigned int last_ep)
|
void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
|
||||||
|
struct xhci_container_ctx *ctx,
|
||||||
|
unsigned int last_ep)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
int last_ep_ctx = 31;
|
int last_ep_ctx = 31;
|
||||||
/* Fields are 32 bits wide, DMA addresses are in bytes */
|
/* Fields are 32 bits wide, DMA addresses are in bytes */
|
||||||
int field_size = 32 / 8;
|
int field_size = 32 / 8;
|
||||||
|
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
|
||||||
|
|
||||||
if (last_ep < 31)
|
if (last_ep < 31)
|
||||||
last_ep_ctx = last_ep + 1;
|
last_ep_ctx = last_ep + 1;
|
||||||
for (i = 0; i < last_ep_ctx; ++i) {
|
for (i = 0; i < last_ep_ctx; ++i) {
|
||||||
|
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
|
||||||
|
dma_addr_t dma = ctx->dma +
|
||||||
|
((unsigned long)ep_ctx - (unsigned long)ctx);
|
||||||
|
|
||||||
xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
|
xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
|
||||||
&ep[i].ep_info,
|
&ep_ctx->ep_info,
|
||||||
(unsigned long long)dma, ep[i].ep_info);
|
(unsigned long long)dma, ep_ctx->ep_info);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
|
||||||
&ep[i].ep_info2,
|
&ep_ctx->ep_info2,
|
||||||
(unsigned long long)dma, ep[i].ep_info2);
|
(unsigned long long)dma, ep_ctx->ep_info2);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
|
||||||
&ep[i].deq,
|
&ep_ctx->deq,
|
||||||
(unsigned long long)dma, ep[i].deq);
|
(unsigned long long)dma, ep_ctx->deq);
|
||||||
dma += 2*field_size;
|
dma += 2*field_size;
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
|
||||||
&ep[i].tx_info,
|
&ep_ctx->tx_info,
|
||||||
(unsigned long long)dma, ep[i].tx_info);
|
(unsigned long long)dma, ep_ctx->tx_info);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
for (j = 0; j < 3; ++j) {
|
for (j = 0; j < 3; ++j) {
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
|
||||||
&ep[i].reserved[j],
|
&ep_ctx->reserved[j],
|
||||||
(unsigned long long)dma,
|
(unsigned long long)dma,
|
||||||
ep[i].reserved[j], j);
|
ep_ctx->reserved[j], j);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (csz)
|
||||||
|
dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
|
||||||
}
|
}
|
||||||
return dma;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
|
void xhci_dbg_ctx(struct xhci_hcd *xhci,
|
||||||
|
struct xhci_container_ctx *ctx,
|
||||||
|
unsigned int last_ep)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
/* Fields are 32 bits wide, DMA addresses are in bytes */
|
/* Fields are 32 bits wide, DMA addresses are in bytes */
|
||||||
int field_size = 32 / 8;
|
int field_size = 32 / 8;
|
||||||
|
struct xhci_slot_ctx *slot_ctx;
|
||||||
|
dma_addr_t dma = ctx->dma;
|
||||||
|
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
|
||||||
|
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
|
if (ctx->type == XHCI_CTX_TYPE_INPUT) {
|
||||||
&ctx->drop_flags, (unsigned long long)dma,
|
struct xhci_input_control_ctx *ctrl_ctx =
|
||||||
ctx->drop_flags);
|
xhci_get_input_control_ctx(xhci, ctx);
|
||||||
dma += field_size;
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
|
&ctrl_ctx->drop_flags, (unsigned long long)dma,
|
||||||
&ctx->add_flags, (unsigned long long)dma,
|
ctrl_ctx->drop_flags);
|
||||||
ctx->add_flags);
|
|
||||||
dma += field_size;
|
|
||||||
for (i = 0; i < 6; ++i) {
|
|
||||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
|
|
||||||
&ctx->rsvd[i], (unsigned long long)dma,
|
|
||||||
ctx->rsvd[i], i);
|
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
}
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
|
||||||
dma = xhci_dbg_slot_ctx(xhci, &ctx->slot, dma);
|
&ctrl_ctx->add_flags, (unsigned long long)dma,
|
||||||
dma = xhci_dbg_ep_ctx(xhci, ctx->ep, dma, last_ep);
|
ctrl_ctx->add_flags);
|
||||||
}
|
dma += field_size;
|
||||||
|
for (i = 0; i < 6; ++i) {
|
||||||
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
|
||||||
|
&ctrl_ctx->rsvd2[i], (unsigned long long)dma,
|
||||||
|
ctrl_ctx->rsvd2[i], i);
|
||||||
|
dma += field_size;
|
||||||
|
}
|
||||||
|
|
||||||
void xhci_dbg_device_ctx(struct xhci_hcd *xhci, struct xhci_device_ctx *ctx, dma_addr_t dma, unsigned int last_ep)
|
if (csz)
|
||||||
{
|
dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
|
||||||
dma = xhci_dbg_slot_ctx(xhci, &ctx->slot, dma);
|
}
|
||||||
dma = xhci_dbg_ep_ctx(xhci, ctx->ep, dma, last_ep);
|
|
||||||
|
slot_ctx = xhci_get_slot_ctx(xhci, ctx);
|
||||||
|
xhci_dbg_slot_ctx(xhci, ctx);
|
||||||
|
xhci_dbg_ep_ctx(xhci, ctx, last_ep);
|
||||||
}
|
}
|
||||||
|
@ -722,7 +722,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
|||||||
struct usb_host_endpoint *ep)
|
struct usb_host_endpoint *ep)
|
||||||
{
|
{
|
||||||
struct xhci_hcd *xhci;
|
struct xhci_hcd *xhci;
|
||||||
struct xhci_device_control *in_ctx;
|
struct xhci_container_ctx *in_ctx, *out_ctx;
|
||||||
|
struct xhci_input_control_ctx *ctrl_ctx;
|
||||||
|
struct xhci_slot_ctx *slot_ctx;
|
||||||
unsigned int last_ctx;
|
unsigned int last_ctx;
|
||||||
unsigned int ep_index;
|
unsigned int ep_index;
|
||||||
struct xhci_ep_ctx *ep_ctx;
|
struct xhci_ep_ctx *ep_ctx;
|
||||||
@ -750,31 +752,34 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
in_ctx = xhci->devs[udev->slot_id]->in_ctx;
|
in_ctx = xhci->devs[udev->slot_id]->in_ctx;
|
||||||
|
out_ctx = xhci->devs[udev->slot_id]->out_ctx;
|
||||||
|
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
|
||||||
ep_index = xhci_get_endpoint_index(&ep->desc);
|
ep_index = xhci_get_endpoint_index(&ep->desc);
|
||||||
ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
|
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
|
||||||
/* If the HC already knows the endpoint is disabled,
|
/* If the HC already knows the endpoint is disabled,
|
||||||
* or the HCD has noted it is disabled, ignore this request
|
* or the HCD has noted it is disabled, ignore this request
|
||||||
*/
|
*/
|
||||||
if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
|
if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
|
||||||
in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
|
ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
|
||||||
xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
|
xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
|
||||||
__func__, ep);
|
__func__, ep);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
in_ctx->drop_flags |= drop_flag;
|
ctrl_ctx->drop_flags |= drop_flag;
|
||||||
new_drop_flags = in_ctx->drop_flags;
|
new_drop_flags = ctrl_ctx->drop_flags;
|
||||||
|
|
||||||
in_ctx->add_flags = ~drop_flag;
|
ctrl_ctx->add_flags = ~drop_flag;
|
||||||
new_add_flags = in_ctx->add_flags;
|
new_add_flags = ctrl_ctx->add_flags;
|
||||||
|
|
||||||
last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
|
last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
|
||||||
|
slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
|
||||||
/* Update the last valid endpoint context, if we deleted the last one */
|
/* Update the last valid endpoint context, if we deleted the last one */
|
||||||
if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
|
if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
|
||||||
in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
|
slot_ctx->dev_info &= ~LAST_CTX_MASK;
|
||||||
in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
|
slot_ctx->dev_info |= LAST_CTX(last_ctx);
|
||||||
}
|
}
|
||||||
new_slot_info = in_ctx->slot.dev_info;
|
new_slot_info = slot_ctx->dev_info;
|
||||||
|
|
||||||
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
|
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
|
||||||
|
|
||||||
@ -804,9 +809,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
|||||||
struct usb_host_endpoint *ep)
|
struct usb_host_endpoint *ep)
|
||||||
{
|
{
|
||||||
struct xhci_hcd *xhci;
|
struct xhci_hcd *xhci;
|
||||||
struct xhci_device_control *in_ctx;
|
struct xhci_container_ctx *in_ctx, *out_ctx;
|
||||||
unsigned int ep_index;
|
unsigned int ep_index;
|
||||||
struct xhci_ep_ctx *ep_ctx;
|
struct xhci_ep_ctx *ep_ctx;
|
||||||
|
struct xhci_slot_ctx *slot_ctx;
|
||||||
|
struct xhci_input_control_ctx *ctrl_ctx;
|
||||||
u32 added_ctxs;
|
u32 added_ctxs;
|
||||||
unsigned int last_ctx;
|
unsigned int last_ctx;
|
||||||
u32 new_add_flags, new_drop_flags, new_slot_info;
|
u32 new_add_flags, new_drop_flags, new_slot_info;
|
||||||
@ -839,12 +846,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
in_ctx = xhci->devs[udev->slot_id]->in_ctx;
|
in_ctx = xhci->devs[udev->slot_id]->in_ctx;
|
||||||
|
out_ctx = xhci->devs[udev->slot_id]->out_ctx;
|
||||||
|
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
|
||||||
ep_index = xhci_get_endpoint_index(&ep->desc);
|
ep_index = xhci_get_endpoint_index(&ep->desc);
|
||||||
ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
|
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
|
||||||
/* If the HCD has already noted the endpoint is enabled,
|
/* If the HCD has already noted the endpoint is enabled,
|
||||||
* ignore this request.
|
* ignore this request.
|
||||||
*/
|
*/
|
||||||
if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
|
if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
|
||||||
xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
|
xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
|
||||||
__func__, ep);
|
__func__, ep);
|
||||||
return 0;
|
return 0;
|
||||||
@ -862,8 +871,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
in_ctx->add_flags |= added_ctxs;
|
ctrl_ctx->add_flags |= added_ctxs;
|
||||||
new_add_flags = in_ctx->add_flags;
|
new_add_flags = ctrl_ctx->add_flags;
|
||||||
|
|
||||||
/* If xhci_endpoint_disable() was called for this endpoint, but the
|
/* If xhci_endpoint_disable() was called for this endpoint, but the
|
||||||
* xHC hasn't been notified yet through the check_bandwidth() call,
|
* xHC hasn't been notified yet through the check_bandwidth() call,
|
||||||
@ -871,14 +880,15 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
|||||||
* descriptors. We must drop and re-add this endpoint, so we leave the
|
* descriptors. We must drop and re-add this endpoint, so we leave the
|
||||||
* drop flags alone.
|
* drop flags alone.
|
||||||
*/
|
*/
|
||||||
new_drop_flags = in_ctx->drop_flags;
|
new_drop_flags = ctrl_ctx->drop_flags;
|
||||||
|
|
||||||
|
slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
|
||||||
/* Update the last valid endpoint context, if we just added one past */
|
/* Update the last valid endpoint context, if we just added one past */
|
||||||
if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
|
if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
|
||||||
in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
|
slot_ctx->dev_info &= ~LAST_CTX_MASK;
|
||||||
in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
|
slot_ctx->dev_info |= LAST_CTX(last_ctx);
|
||||||
}
|
}
|
||||||
new_slot_info = in_ctx->slot.dev_info;
|
new_slot_info = slot_ctx->dev_info;
|
||||||
|
|
||||||
/* Store the usb_device pointer for later use */
|
/* Store the usb_device pointer for later use */
|
||||||
ep->hcpriv = udev;
|
ep->hcpriv = udev;
|
||||||
@ -892,9 +902,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
|
static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
|
||||||
{
|
{
|
||||||
|
struct xhci_input_control_ctx *ctrl_ctx;
|
||||||
struct xhci_ep_ctx *ep_ctx;
|
struct xhci_ep_ctx *ep_ctx;
|
||||||
|
struct xhci_slot_ctx *slot_ctx;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* When a device's add flag and drop flag are zero, any subsequent
|
/* When a device's add flag and drop flag are zero, any subsequent
|
||||||
@ -902,13 +914,15 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
|
|||||||
* untouched. Make sure we don't leave any old state in the input
|
* untouched. Make sure we don't leave any old state in the input
|
||||||
* endpoint contexts.
|
* endpoint contexts.
|
||||||
*/
|
*/
|
||||||
virt_dev->in_ctx->drop_flags = 0;
|
ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
|
||||||
virt_dev->in_ctx->add_flags = 0;
|
ctrl_ctx->drop_flags = 0;
|
||||||
virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
|
ctrl_ctx->add_flags = 0;
|
||||||
|
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
|
||||||
|
slot_ctx->dev_info &= ~LAST_CTX_MASK;
|
||||||
/* Endpoint 0 is always valid */
|
/* Endpoint 0 is always valid */
|
||||||
virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
|
slot_ctx->dev_info |= LAST_CTX(1);
|
||||||
for (i = 1; i < 31; ++i) {
|
for (i = 1; i < 31; ++i) {
|
||||||
ep_ctx = &virt_dev->in_ctx->ep[i];
|
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
|
||||||
ep_ctx->ep_info = 0;
|
ep_ctx->ep_info = 0;
|
||||||
ep_ctx->ep_info2 = 0;
|
ep_ctx->ep_info2 = 0;
|
||||||
ep_ctx->deq = 0;
|
ep_ctx->deq = 0;
|
||||||
@ -934,6 +948,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct xhci_hcd *xhci;
|
struct xhci_hcd *xhci;
|
||||||
struct xhci_virt_device *virt_dev;
|
struct xhci_virt_device *virt_dev;
|
||||||
|
struct xhci_input_control_ctx *ctrl_ctx;
|
||||||
|
struct xhci_slot_ctx *slot_ctx;
|
||||||
|
|
||||||
ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
|
ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
@ -949,16 +965,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
virt_dev = xhci->devs[udev->slot_id];
|
virt_dev = xhci->devs[udev->slot_id];
|
||||||
|
|
||||||
/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
|
/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
|
||||||
virt_dev->in_ctx->add_flags |= SLOT_FLAG;
|
ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
|
||||||
virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
|
ctrl_ctx->add_flags |= SLOT_FLAG;
|
||||||
virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
|
ctrl_ctx->add_flags &= ~EP0_FLAG;
|
||||||
virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
|
ctrl_ctx->drop_flags &= ~SLOT_FLAG;
|
||||||
|
ctrl_ctx->drop_flags &= ~EP0_FLAG;
|
||||||
xhci_dbg(xhci, "New Input Control Context:\n");
|
xhci_dbg(xhci, "New Input Control Context:\n");
|
||||||
xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
|
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
|
||||||
LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
|
xhci_dbg_ctx(xhci, virt_dev->in_ctx,
|
||||||
|
LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
|
||||||
|
|
||||||
spin_lock_irqsave(&xhci->lock, flags);
|
spin_lock_irqsave(&xhci->lock, flags);
|
||||||
ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
|
ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma,
|
||||||
udev->slot_id);
|
udev->slot_id);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
@ -1013,10 +1031,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
|
xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
|
||||||
xhci_dbg_device_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
|
xhci_dbg_ctx(xhci, virt_dev->out_ctx,
|
||||||
LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
|
LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
|
||||||
|
|
||||||
xhci_zero_in_ctx(virt_dev);
|
xhci_zero_in_ctx(xhci, virt_dev);
|
||||||
/* Free any old rings */
|
/* Free any old rings */
|
||||||
for (i = 1; i < 31; ++i) {
|
for (i = 1; i < 31; ++i) {
|
||||||
if (virt_dev->new_ep_rings[i]) {
|
if (virt_dev->new_ep_rings[i]) {
|
||||||
@ -1054,7 +1072,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
virt_dev->new_ep_rings[i] = NULL;
|
virt_dev->new_ep_rings[i] = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
xhci_zero_in_ctx(virt_dev);
|
xhci_zero_in_ctx(xhci, virt_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Deal with stalled endpoints. The core should have sent the control message
|
/* Deal with stalled endpoints. The core should have sent the control message
|
||||||
@ -1187,6 +1205,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
struct xhci_virt_device *virt_dev;
|
struct xhci_virt_device *virt_dev;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
||||||
|
struct xhci_slot_ctx *slot_ctx;
|
||||||
|
struct xhci_input_control_ctx *ctrl_ctx;
|
||||||
u64 temp_64;
|
u64 temp_64;
|
||||||
|
|
||||||
if (!udev->slot_id) {
|
if (!udev->slot_id) {
|
||||||
@ -1201,11 +1221,11 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
xhci_setup_addressable_virt_dev(xhci, udev);
|
xhci_setup_addressable_virt_dev(xhci, udev);
|
||||||
/* Otherwise, assume the core has the device configured how it wants */
|
/* Otherwise, assume the core has the device configured how it wants */
|
||||||
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
|
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
|
||||||
xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
|
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
|
||||||
|
|
||||||
spin_lock_irqsave(&xhci->lock, flags);
|
spin_lock_irqsave(&xhci->lock, flags);
|
||||||
ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
|
ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
|
||||||
udev->slot_id);
|
udev->slot_id);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
|
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
|
||||||
@ -1246,7 +1266,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
xhci_err(xhci, "ERROR: unexpected command completion "
|
xhci_err(xhci, "ERROR: unexpected command completion "
|
||||||
"code 0x%x.\n", virt_dev->cmd_status);
|
"code 0x%x.\n", virt_dev->cmd_status);
|
||||||
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
|
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
|
||||||
xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
|
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1261,19 +1281,21 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||||||
(unsigned long long)
|
(unsigned long long)
|
||||||
xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
|
xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
|
||||||
xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
|
xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
|
||||||
(unsigned long long)virt_dev->out_ctx_dma);
|
(unsigned long long)virt_dev->out_ctx->dma);
|
||||||
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
|
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
|
||||||
xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
|
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
|
||||||
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
|
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
|
||||||
xhci_dbg_device_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
|
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
|
||||||
/*
|
/*
|
||||||
* USB core uses address 1 for the roothubs, so we add one to the
|
* USB core uses address 1 for the roothubs, so we add one to the
|
||||||
* address given back to us by the HC.
|
* address given back to us by the HC.
|
||||||
*/
|
*/
|
||||||
udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
|
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
|
||||||
|
udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
|
||||||
/* Zero the input context control for later use */
|
/* Zero the input context control for later use */
|
||||||
virt_dev->in_ctx->add_flags = 0;
|
ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
|
||||||
virt_dev->in_ctx->drop_flags = 0;
|
ctrl_ctx->add_flags = 0;
|
||||||
|
ctrl_ctx->drop_flags = 0;
|
||||||
|
|
||||||
xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
|
xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
|
||||||
/* XXX Meh, not sure if anyone else but choose_address uses this. */
|
/* XXX Meh, not sure if anyone else but choose_address uses this. */
|
||||||
@ -1315,7 +1337,6 @@ static int __init xhci_hcd_init(void)
|
|||||||
/* xhci_device_control has eight fields, and also
|
/* xhci_device_control has eight fields, and also
|
||||||
* embeds one xhci_slot_ctx and 31 xhci_ep_ctx
|
* embeds one xhci_slot_ctx and 31 xhci_ep_ctx
|
||||||
*/
|
*/
|
||||||
BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
|
|
||||||
BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
|
BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
|
||||||
BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
|
BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
|
||||||
BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
|
BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
|
||||||
|
@ -189,6 +189,63 @@ fail:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
|
||||||
|
|
||||||
|
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
|
||||||
|
int type, gfp_t flags)
|
||||||
|
{
|
||||||
|
struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
|
||||||
|
if (!ctx)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
|
||||||
|
ctx->type = type;
|
||||||
|
ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
|
||||||
|
if (type == XHCI_CTX_TYPE_INPUT)
|
||||||
|
ctx->size += CTX_SIZE(xhci->hcc_params);
|
||||||
|
|
||||||
|
ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
|
||||||
|
memset(ctx->bytes, 0, ctx->size);
|
||||||
|
return ctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
void xhci_free_container_ctx(struct xhci_hcd *xhci,
|
||||||
|
struct xhci_container_ctx *ctx)
|
||||||
|
{
|
||||||
|
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
|
||||||
|
kfree(ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
|
||||||
|
struct xhci_container_ctx *ctx)
|
||||||
|
{
|
||||||
|
BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
|
||||||
|
return (struct xhci_input_control_ctx *)ctx->bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
|
||||||
|
struct xhci_container_ctx *ctx)
|
||||||
|
{
|
||||||
|
if (ctx->type == XHCI_CTX_TYPE_DEVICE)
|
||||||
|
return (struct xhci_slot_ctx *)ctx->bytes;
|
||||||
|
|
||||||
|
return (struct xhci_slot_ctx *)
|
||||||
|
(ctx->bytes + CTX_SIZE(xhci->hcc_params));
|
||||||
|
}
|
||||||
|
|
||||||
|
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
|
||||||
|
struct xhci_container_ctx *ctx,
|
||||||
|
unsigned int ep_index)
|
||||||
|
{
|
||||||
|
/* increment ep index by offset of start of ep ctx array */
|
||||||
|
ep_index++;
|
||||||
|
if (ctx->type == XHCI_CTX_TYPE_INPUT)
|
||||||
|
ep_index++;
|
||||||
|
|
||||||
|
return (struct xhci_ep_ctx *)
|
||||||
|
(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
|
||||||
|
}
|
||||||
|
|
||||||
/* All the xhci_tds in the ring's TD list should be freed at this point */
|
/* All the xhci_tds in the ring's TD list should be freed at this point */
|
||||||
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
|
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
|
||||||
{
|
{
|
||||||
@ -209,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
|
|||||||
xhci_ring_free(xhci, dev->ep_rings[i]);
|
xhci_ring_free(xhci, dev->ep_rings[i]);
|
||||||
|
|
||||||
if (dev->in_ctx)
|
if (dev->in_ctx)
|
||||||
dma_pool_free(xhci->device_pool,
|
xhci_free_container_ctx(xhci, dev->in_ctx);
|
||||||
dev->in_ctx, dev->in_ctx_dma);
|
|
||||||
if (dev->out_ctx)
|
if (dev->out_ctx)
|
||||||
dma_pool_free(xhci->device_pool,
|
xhci_free_container_ctx(xhci, dev->out_ctx);
|
||||||
dev->out_ctx, dev->out_ctx_dma);
|
|
||||||
kfree(xhci->devs[slot_id]);
|
kfree(xhci->devs[slot_id]);
|
||||||
xhci->devs[slot_id] = 0;
|
xhci->devs[slot_id] = 0;
|
||||||
}
|
}
|
||||||
@ -221,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
|
|||||||
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
|
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
|
||||||
struct usb_device *udev, gfp_t flags)
|
struct usb_device *udev, gfp_t flags)
|
||||||
{
|
{
|
||||||
dma_addr_t dma;
|
|
||||||
struct xhci_virt_device *dev;
|
struct xhci_virt_device *dev;
|
||||||
|
|
||||||
/* Slot ID 0 is reserved */
|
/* Slot ID 0 is reserved */
|
||||||
@ -235,26 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
|
|||||||
return 0;
|
return 0;
|
||||||
dev = xhci->devs[slot_id];
|
dev = xhci->devs[slot_id];
|
||||||
|
|
||||||
/* Allocate the (output) device context that will be used in the HC.
|
/* Allocate the (output) device context that will be used in the HC. */
|
||||||
* The structure is 32 bytes smaller than the input context, but that's
|
dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
|
||||||
* fine.
|
|
||||||
*/
|
|
||||||
dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
|
|
||||||
if (!dev->out_ctx)
|
if (!dev->out_ctx)
|
||||||
goto fail;
|
goto fail;
|
||||||
dev->out_ctx_dma = dma;
|
|
||||||
xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
|
xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
|
||||||
(unsigned long long)dma);
|
(unsigned long long)dev->out_ctx->dma);
|
||||||
memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
|
|
||||||
|
|
||||||
/* Allocate the (input) device context for address device command */
|
/* Allocate the (input) device context for address device command */
|
||||||
dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
|
dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
|
||||||
if (!dev->in_ctx)
|
if (!dev->in_ctx)
|
||||||
goto fail;
|
goto fail;
|
||||||
dev->in_ctx_dma = dma;
|
|
||||||
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
|
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
|
||||||
(unsigned long long)dma);
|
(unsigned long long)dev->in_ctx->dma);
|
||||||
memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
|
|
||||||
|
|
||||||
/* Allocate endpoint 0 ring */
|
/* Allocate endpoint 0 ring */
|
||||||
dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
|
dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
|
||||||
@ -264,7 +314,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
|
|||||||
init_completion(&dev->cmd_completion);
|
init_completion(&dev->cmd_completion);
|
||||||
|
|
||||||
/* Point to output device context in dcbaa. */
|
/* Point to output device context in dcbaa. */
|
||||||
xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx_dma;
|
xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
|
||||||
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
|
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
|
||||||
slot_id,
|
slot_id,
|
||||||
&xhci->dcbaa->dev_context_ptrs[slot_id],
|
&xhci->dcbaa->dev_context_ptrs[slot_id],
|
||||||
@ -282,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
|
|||||||
struct xhci_virt_device *dev;
|
struct xhci_virt_device *dev;
|
||||||
struct xhci_ep_ctx *ep0_ctx;
|
struct xhci_ep_ctx *ep0_ctx;
|
||||||
struct usb_device *top_dev;
|
struct usb_device *top_dev;
|
||||||
|
struct xhci_slot_ctx *slot_ctx;
|
||||||
|
struct xhci_input_control_ctx *ctrl_ctx;
|
||||||
|
|
||||||
dev = xhci->devs[udev->slot_id];
|
dev = xhci->devs[udev->slot_id];
|
||||||
/* Slot ID 0 is reserved */
|
/* Slot ID 0 is reserved */
|
||||||
@ -290,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
|
|||||||
udev->slot_id);
|
udev->slot_id);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
ep0_ctx = &dev->in_ctx->ep[0];
|
ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
|
||||||
|
ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
|
||||||
|
slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
|
||||||
|
|
||||||
/* 2) New slot context and endpoint 0 context are valid*/
|
/* 2) New slot context and endpoint 0 context are valid*/
|
||||||
dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
|
ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
|
||||||
|
|
||||||
/* 3) Only the control endpoint is valid - one endpoint context */
|
/* 3) Only the control endpoint is valid - one endpoint context */
|
||||||
dev->in_ctx->slot.dev_info |= LAST_CTX(1);
|
slot_ctx->dev_info |= LAST_CTX(1);
|
||||||
|
|
||||||
switch (udev->speed) {
|
switch (udev->speed) {
|
||||||
case USB_SPEED_SUPER:
|
case USB_SPEED_SUPER:
|
||||||
dev->in_ctx->slot.dev_info |= (u32) udev->route;
|
slot_ctx->dev_info |= (u32) udev->route;
|
||||||
dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS;
|
slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
|
||||||
break;
|
break;
|
||||||
case USB_SPEED_HIGH:
|
case USB_SPEED_HIGH:
|
||||||
dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS;
|
slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
|
||||||
break;
|
break;
|
||||||
case USB_SPEED_FULL:
|
case USB_SPEED_FULL:
|
||||||
dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS;
|
slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
|
||||||
break;
|
break;
|
||||||
case USB_SPEED_LOW:
|
case USB_SPEED_LOW:
|
||||||
dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS;
|
slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
|
||||||
break;
|
break;
|
||||||
case USB_SPEED_VARIABLE:
|
case USB_SPEED_VARIABLE:
|
||||||
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
|
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
|
||||||
@ -324,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
|
|||||||
for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
|
for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
|
||||||
top_dev = top_dev->parent)
|
top_dev = top_dev->parent)
|
||||||
/* Found device below root hub */;
|
/* Found device below root hub */;
|
||||||
dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
|
slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
|
||||||
xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
|
xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
|
||||||
|
|
||||||
/* Is this a LS/FS device under a HS hub? */
|
/* Is this a LS/FS device under a HS hub? */
|
||||||
@ -334,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
|
|||||||
*/
|
*/
|
||||||
if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
|
if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
|
||||||
udev->tt) {
|
udev->tt) {
|
||||||
dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
|
slot_ctx->tt_info = udev->tt->hub->slot_id;
|
||||||
dev->in_ctx->slot.tt_info |= udev->ttport << 8;
|
slot_ctx->tt_info |= udev->ttport << 8;
|
||||||
}
|
}
|
||||||
xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
|
xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
|
||||||
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
|
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
|
||||||
@ -466,7 +520,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
|
|||||||
unsigned int max_burst;
|
unsigned int max_burst;
|
||||||
|
|
||||||
ep_index = xhci_get_endpoint_index(&ep->desc);
|
ep_index = xhci_get_endpoint_index(&ep->desc);
|
||||||
ep_ctx = &virt_dev->in_ctx->ep[ep_index];
|
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
|
||||||
|
|
||||||
/* Set up the endpoint ring */
|
/* Set up the endpoint ring */
|
||||||
virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
|
virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
|
||||||
@ -533,7 +587,7 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
|
|||||||
struct xhci_ep_ctx *ep_ctx;
|
struct xhci_ep_ctx *ep_ctx;
|
||||||
|
|
||||||
ep_index = xhci_get_endpoint_index(&ep->desc);
|
ep_index = xhci_get_endpoint_index(&ep->desc);
|
||||||
ep_ctx = &virt_dev->in_ctx->ep[ep_index];
|
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
|
||||||
|
|
||||||
ep_ctx->ep_info = 0;
|
ep_ctx->ep_info = 0;
|
||||||
ep_ctx->ep_info2 = 0;
|
ep_ctx->ep_info2 = 0;
|
||||||
@ -753,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||||||
*/
|
*/
|
||||||
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
|
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
|
||||||
SEGMENT_SIZE, 64, xhci->page_size);
|
SEGMENT_SIZE, 64, xhci->page_size);
|
||||||
|
|
||||||
/* See Table 46 and Note on Figure 55 */
|
/* See Table 46 and Note on Figure 55 */
|
||||||
/* FIXME support 64-byte contexts */
|
|
||||||
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
|
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
|
||||||
sizeof(struct xhci_device_control),
|
2112, 64, xhci->page_size);
|
||||||
64, xhci->page_size);
|
|
||||||
if (!xhci->segment_pool || !xhci->device_pool)
|
if (!xhci->segment_pool || !xhci->device_pool)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
@ -362,6 +362,7 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
|
|||||||
struct xhci_virt_device *dev = xhci->devs[slot_id];
|
struct xhci_virt_device *dev = xhci->devs[slot_id];
|
||||||
struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
|
struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
|
||||||
struct xhci_generic_trb *trb;
|
struct xhci_generic_trb *trb;
|
||||||
|
struct xhci_ep_ctx *ep_ctx;
|
||||||
|
|
||||||
state->new_cycle_state = 0;
|
state->new_cycle_state = 0;
|
||||||
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
|
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
|
||||||
@ -370,7 +371,8 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
|
|||||||
if (!state->new_deq_seg)
|
if (!state->new_deq_seg)
|
||||||
BUG();
|
BUG();
|
||||||
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
|
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
|
||||||
state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq;
|
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
|
||||||
|
state->new_cycle_state = 0x1 & ep_ctx->deq;
|
||||||
|
|
||||||
state->new_deq_ptr = cur_td->last_trb;
|
state->new_deq_ptr = cur_td->last_trb;
|
||||||
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
|
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
|
||||||
@ -570,11 +572,15 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
|
|||||||
unsigned int ep_index;
|
unsigned int ep_index;
|
||||||
struct xhci_ring *ep_ring;
|
struct xhci_ring *ep_ring;
|
||||||
struct xhci_virt_device *dev;
|
struct xhci_virt_device *dev;
|
||||||
|
struct xhci_ep_ctx *ep_ctx;
|
||||||
|
struct xhci_slot_ctx *slot_ctx;
|
||||||
|
|
||||||
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
|
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
|
||||||
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
|
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
|
||||||
dev = xhci->devs[slot_id];
|
dev = xhci->devs[slot_id];
|
||||||
ep_ring = dev->ep_rings[ep_index];
|
ep_ring = dev->ep_rings[ep_index];
|
||||||
|
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
|
||||||
|
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
|
||||||
|
|
||||||
if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
|
if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
|
||||||
unsigned int ep_state;
|
unsigned int ep_state;
|
||||||
@ -588,9 +594,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
|
|||||||
case COMP_CTX_STATE:
|
case COMP_CTX_STATE:
|
||||||
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
|
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
|
||||||
"to incorrect slot or ep state.\n");
|
"to incorrect slot or ep state.\n");
|
||||||
ep_state = dev->out_ctx->ep[ep_index].ep_info;
|
ep_state = ep_ctx->ep_info;
|
||||||
ep_state &= EP_STATE_MASK;
|
ep_state &= EP_STATE_MASK;
|
||||||
slot_state = dev->out_ctx->slot.dev_state;
|
slot_state = slot_ctx->dev_state;
|
||||||
slot_state = GET_SLOT_STATE(slot_state);
|
slot_state = GET_SLOT_STATE(slot_state);
|
||||||
xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
|
xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
|
||||||
slot_state, ep_state);
|
slot_state, ep_state);
|
||||||
@ -613,7 +619,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
|
|||||||
*/
|
*/
|
||||||
} else {
|
} else {
|
||||||
xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
|
xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
|
||||||
dev->out_ctx->ep[ep_index].deq);
|
ep_ctx->deq);
|
||||||
}
|
}
|
||||||
|
|
||||||
ep_ring->state &= ~SET_DEQ_PENDING;
|
ep_ring->state &= ~SET_DEQ_PENDING;
|
||||||
@ -795,6 +801,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
|||||||
union xhci_trb *event_trb;
|
union xhci_trb *event_trb;
|
||||||
struct urb *urb = 0;
|
struct urb *urb = 0;
|
||||||
int status = -EINPROGRESS;
|
int status = -EINPROGRESS;
|
||||||
|
struct xhci_ep_ctx *ep_ctx;
|
||||||
|
|
||||||
xhci_dbg(xhci, "In %s\n", __func__);
|
xhci_dbg(xhci, "In %s\n", __func__);
|
||||||
xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
|
xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
|
||||||
@ -807,7 +814,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
|||||||
ep_index = TRB_TO_EP_ID(event->flags) - 1;
|
ep_index = TRB_TO_EP_ID(event->flags) - 1;
|
||||||
xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
|
xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
|
||||||
ep_ring = xdev->ep_rings[ep_index];
|
ep_ring = xdev->ep_rings[ep_index];
|
||||||
if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
|
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
||||||
|
if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
|
||||||
xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
|
xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
@ -1193,9 +1201,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
|
|||||||
gfp_t mem_flags)
|
gfp_t mem_flags)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
||||||
ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
|
ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
|
||||||
xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK,
|
ep_ctx->ep_info & EP_STATE_MASK,
|
||||||
num_trbs, mem_flags);
|
num_trbs, mem_flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -446,6 +446,27 @@ struct xhci_doorbell_array {
|
|||||||
#define EPI_TO_DB(p) (((p) + 1) & 0xff)
|
#define EPI_TO_DB(p) (((p) + 1) & 0xff)
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct xhci_container_ctx
|
||||||
|
* @type: Type of context. Used to calculated offsets to contained contexts.
|
||||||
|
* @size: Size of the context data
|
||||||
|
* @bytes: The raw context data given to HW
|
||||||
|
* @dma: dma address of the bytes
|
||||||
|
*
|
||||||
|
* Represents either a Device or Input context. Holds a pointer to the raw
|
||||||
|
* memory used for the context (bytes) and dma address of it (dma).
|
||||||
|
*/
|
||||||
|
struct xhci_container_ctx {
|
||||||
|
unsigned type;
|
||||||
|
#define XHCI_CTX_TYPE_DEVICE 0x1
|
||||||
|
#define XHCI_CTX_TYPE_INPUT 0x2
|
||||||
|
|
||||||
|
int size;
|
||||||
|
|
||||||
|
u8 *bytes;
|
||||||
|
dma_addr_t dma;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct xhci_slot_ctx
|
* struct xhci_slot_ctx
|
||||||
* @dev_info: Route string, device speed, hub info, and last valid endpoint
|
* @dev_info: Route string, device speed, hub info, and last valid endpoint
|
||||||
@ -583,32 +604,16 @@ struct xhci_ep_ctx {
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct xhci_device_control
|
* struct xhci_input_control_context
|
||||||
* Input context; see section 6.2.5.
|
* Input control context; see section 6.2.5.
|
||||||
*
|
*
|
||||||
* @drop_context: set the bit of the endpoint context you want to disable
|
* @drop_context: set the bit of the endpoint context you want to disable
|
||||||
* @add_context: set the bit of the endpoint context you want to enable
|
* @add_context: set the bit of the endpoint context you want to enable
|
||||||
*/
|
*/
|
||||||
struct xhci_device_control {
|
struct xhci_input_control_ctx {
|
||||||
/* Input control context */
|
|
||||||
u32 drop_flags;
|
u32 drop_flags;
|
||||||
u32 add_flags;
|
u32 add_flags;
|
||||||
u32 rsvd[6];
|
u32 rsvd2[6];
|
||||||
/* Copy of device context */
|
|
||||||
struct xhci_slot_ctx slot;
|
|
||||||
struct xhci_ep_ctx ep[31];
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* struct xhci_device_ctx
|
|
||||||
* Device context; see section 6.2.1.
|
|
||||||
*
|
|
||||||
* @slot: slot context for the device.
|
|
||||||
* @ep: array of endpoint contexts for the device.
|
|
||||||
*/
|
|
||||||
struct xhci_device_ctx {
|
|
||||||
struct xhci_slot_ctx slot;
|
|
||||||
struct xhci_ep_ctx ep[31];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* drop context bitmasks */
|
/* drop context bitmasks */
|
||||||
@ -616,7 +621,6 @@ struct xhci_device_ctx {
|
|||||||
/* add context bitmasks */
|
/* add context bitmasks */
|
||||||
#define ADD_EP(x) (0x1 << x)
|
#define ADD_EP(x) (0x1 << x)
|
||||||
|
|
||||||
|
|
||||||
struct xhci_virt_device {
|
struct xhci_virt_device {
|
||||||
/*
|
/*
|
||||||
* Commands to the hardware are passed an "input context" that
|
* Commands to the hardware are passed an "input context" that
|
||||||
@ -626,11 +630,10 @@ struct xhci_virt_device {
|
|||||||
* track of input and output contexts separately because
|
* track of input and output contexts separately because
|
||||||
* these commands might fail and we don't trust the hardware.
|
* these commands might fail and we don't trust the hardware.
|
||||||
*/
|
*/
|
||||||
struct xhci_device_ctx *out_ctx;
|
struct xhci_container_ctx *out_ctx;
|
||||||
dma_addr_t out_ctx_dma;
|
|
||||||
/* Used for addressing devices and configuration changes */
|
/* Used for addressing devices and configuration changes */
|
||||||
struct xhci_device_control *in_ctx;
|
struct xhci_container_ctx *in_ctx;
|
||||||
dma_addr_t in_ctx_dma;
|
|
||||||
/* FIXME when stream support is added */
|
/* FIXME when stream support is added */
|
||||||
struct xhci_ring *ep_rings[31];
|
struct xhci_ring *ep_rings[31];
|
||||||
/* Temporary storage in case the configure endpoint command fails and we
|
/* Temporary storage in case the configure endpoint command fails and we
|
||||||
@ -1139,8 +1142,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
|
|||||||
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
|
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
|
||||||
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
|
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
|
||||||
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
|
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
|
||||||
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep);
|
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
|
||||||
void xhci_dbg_device_ctx(struct xhci_hcd *xhci, struct xhci_device_ctx *ctx, dma_addr_t dma, unsigned int last_ep);
|
|
||||||
|
|
||||||
/* xHCI memory managment */
|
/* xHCI memory managment */
|
||||||
void xhci_mem_cleanup(struct xhci_hcd *xhci);
|
void xhci_mem_cleanup(struct xhci_hcd *xhci);
|
||||||
@ -1207,4 +1209,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
|
|||||||
char *buf, u16 wLength);
|
char *buf, u16 wLength);
|
||||||
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
|
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
|
||||||
|
|
||||||
|
/* xHCI contexts */
|
||||||
|
struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
|
||||||
|
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
|
||||||
|
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
|
||||||
|
|
||||||
#endif /* __LINUX_XHCI_HCD_H */
|
#endif /* __LINUX_XHCI_HCD_H */
|
||||||
|
Loading…
Reference in New Issue
Block a user