Merge branch 'usb-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6

* 'usb-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (42 commits)
  usb: gadget: composite: avoid access beyond array max length
  USB: serial: handle Data Carrier Detect changes
  USB: gadget: Fix endpoint representation in ci13xxx_udc
  USB: gadget: Fix error path in ci13xxx_udc gadget probe function
  usb: pch_udc: Fix the worning log issue at gadget driver remove
  USB: serial: Updated support for ICOM devices
  USB: ehci-mxc: add work-around for efika mx/sb bug
  USB: unbreak ehci-mxc on otg port of i.MX27
  drivers: update to pl2303 usb-serial to support Motorola cables
  USB: adding USB support for Cinterion's HC2x, EU3 and PH8 products
  USB serial: add missing .usb_driver field in serial drivers
  USB: ehci-fsl: Fix 'have_sysif_regs' detection
  USB: g_printer: fix bug in module parameter definitions
  USB: g_printer: fix bug in unregistration
  USB: uss720: remove duplicate USB device
  MAINTAINERS: add ueagle-atm entry
  USB: EHCI: fix DMA deallocation bug
  USB: pch_udc: support new device ML7213 IOH
  usb: pch_udc: Fixed issue which does not work with g_serial
  usb: set ep_dev async suspend should be later than device_initialize
  ...
This commit is contained in:
Linus Torvalds
2011-02-01 08:07:40 +10:00
48 changed files with 567 additions and 380 deletions

View File

@@ -3139,6 +3139,12 @@ S: Maintained
F: net/ieee802154/
F: drivers/ieee802154/
IKANOS/ADI EAGLE ADSL USB DRIVER
M: Matthieu Castet <castet.matthieu@free.fr>
M: Stanislaw Gruszka <stf_xl@wp.pl>
S: Maintained
F: drivers/usb/atm/ueagle-atm.c
INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
M: Mimi Zohar <zohar@us.ibm.com>
S: Supported

View File

@@ -342,7 +342,7 @@ static ssize_t wdm_write
goto outnp;
}
if (!file->f_flags && O_NONBLOCK)
if (!(file->f_flags & O_NONBLOCK))
r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
&desc->flags));
else

View File

@@ -192,12 +192,12 @@ int usb_create_ep_devs(struct device *parent,
ep_dev->dev.parent = parent;
ep_dev->dev.release = ep_device_release;
dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress);
device_enable_async_suspend(&ep_dev->dev);
retval = device_register(&ep_dev->dev);
if (retval)
goto error_register;
device_enable_async_suspend(&ep_dev->dev);
endpoint->ep_dev = ep_dev;
return retval;

View File

@@ -405,7 +405,12 @@ static int suspend_common(struct device *dev, bool do_wakeup)
return retval;
}
synchronize_irq(pci_dev->irq);
/* If MSI-X is enabled, the driver will have synchronized all vectors
* in pci_suspend(). If MSI or legacy PCI is enabled, that will be
* synchronized here.
*/
if (!hcd->msix_enabled)
synchronize_irq(pci_dev->irq);
/* Downstream ports from this root hub should already be quiesced, so
* there will be no DMA activity. Now we can shut down the upstream

View File

@@ -676,6 +676,8 @@ static void hub_init_func3(struct work_struct *ws);
static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
{
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd;
int ret;
int port1;
int status;
bool need_debounce_delay = false;
@@ -714,6 +716,25 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
usb_autopm_get_interface_no_resume(
to_usb_interface(hub->intfdev));
return; /* Continues at init2: below */
} else if (type == HUB_RESET_RESUME) {
/* The internal host controller state for the hub device
* may be gone after a host power loss on system resume.
* Update the device's info so the HW knows it's a hub.
*/
hcd = bus_to_hcd(hdev->bus);
if (hcd->driver->update_hub_device) {
ret = hcd->driver->update_hub_device(hcd, hdev,
&hub->tt, GFP_NOIO);
if (ret < 0) {
dev_err(hub->intfdev, "Host not "
"accepting hub info "
"update.\n");
dev_err(hub->intfdev, "LS/FS devices "
"and hubs may not work "
"under this hub\n.");
}
}
hub_power_on(hub, true);
} else {
hub_power_on(hub, true);
}

View File

@@ -509,7 +509,7 @@ config USB_LANGWELL
select USB_GADGET_SELECTED
config USB_GADGET_EG20T
boolean "Intel EG20T(Topcliff) USB Device controller"
boolean "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC"
depends on PCI
select USB_GADGET_DUALSPEED
help
@@ -525,6 +525,11 @@ config USB_GADGET_EG20T
This driver dose not support interrupt transfer or isochronous
transfer modes.
This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is
for IVI(In-Vehicle Infotainment) use.
ML7213 is companion chip for Intel Atom E6xx series.
ML7213 is completely compatible for Intel EG20T PCH.
config USB_EG20T
tristate
depends on USB_GADGET_EG20T

File diff suppressed because it is too large Load Diff

View File

@@ -20,7 +20,7 @@
* DEFINE
*****************************************************************************/
#define CI13XXX_PAGE_SIZE 4096ul /* page size for TD's */
#define ENDPT_MAX (16)
#define ENDPT_MAX (32)
#define CTRL_PAYLOAD_MAX (64)
#define RX (0) /* similar to USB_DIR_OUT but can be used as an index */
#define TX (1) /* similar to USB_DIR_IN but can be used as an index */
@@ -88,8 +88,7 @@ struct ci13xxx_ep {
struct list_head queue;
struct ci13xxx_qh *ptr;
dma_addr_t dma;
} qh[2];
struct usb_request *status;
} qh;
int wedge;
/* global resources */
@@ -119,9 +118,13 @@ struct ci13xxx {
struct dma_pool *qh_pool; /* DMA pool for queue heads */
struct dma_pool *td_pool; /* DMA pool for transfer descs */
struct usb_request *status; /* ep0 status request */
struct usb_gadget gadget; /* USB slave device */
struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
u32 ep0_dir; /* ep0 direction */
#define ep0out ci13xxx_ep[0]
#define ep0in ci13xxx_ep[16]
struct usb_gadget_driver *driver; /* 3rd party gadget driver */
struct ci13xxx_udc_driver *udc_driver; /* device controller driver */

View File

@@ -928,8 +928,9 @@ unknown:
*/
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
if (cdev->config)
f = cdev->config->interface[intf];
if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
break;
case USB_RECIP_ENDPOINT:

View File

@@ -198,10 +198,10 @@
#define PCH_UDC_BRLEN 0x0F /* Burst length */
#define PCH_UDC_THLEN 0x1F /* Threshold length */
/* Value of EP Buffer Size */
#define UDC_EP0IN_BUFF_SIZE 64
#define UDC_EPIN_BUFF_SIZE 512
#define UDC_EP0OUT_BUFF_SIZE 64
#define UDC_EPOUT_BUFF_SIZE 512
#define UDC_EP0IN_BUFF_SIZE 16
#define UDC_EPIN_BUFF_SIZE 256
#define UDC_EP0OUT_BUFF_SIZE 16
#define UDC_EPOUT_BUFF_SIZE 256
/* Value of EP maximum packet size */
#define UDC_EP0IN_MAX_PKT_SIZE 64
#define UDC_EP0OUT_MAX_PKT_SIZE 64
@@ -351,7 +351,7 @@ struct pch_udc_dev {
struct pci_pool *data_requests;
struct pci_pool *stp_requests;
dma_addr_t dma_addr;
unsigned long ep0out_buf[64];
void *ep0out_buf;
struct usb_ctrlrequest setup_data;
unsigned long phys_addr;
void __iomem *base_addr;
@@ -361,6 +361,8 @@ struct pch_udc_dev {
#define PCH_UDC_PCI_BAR 1
#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
static const char ep0_string[] = "ep0in";
static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
@@ -1219,11 +1221,11 @@ static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
dev = ep->dev;
if (req->dma_mapped) {
if (ep->in)
pci_unmap_single(dev->pdev, req->req.dma,
req->req.length, PCI_DMA_TODEVICE);
dma_unmap_single(&dev->pdev->dev, req->req.dma,
req->req.length, DMA_TO_DEVICE);
else
pci_unmap_single(dev->pdev, req->req.dma,
req->req.length, PCI_DMA_FROMDEVICE);
dma_unmap_single(&dev->pdev->dev, req->req.dma,
req->req.length, DMA_FROM_DEVICE);
req->dma_mapped = 0;
req->req.dma = DMA_ADDR_INVALID;
}
@@ -1414,7 +1416,6 @@ static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
td_data = req->td_data;
ep->td_data = req->td_data;
/* Set the status bits for all descriptors */
while (1) {
td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
@@ -1613,15 +1614,19 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
if (usbreq->length &&
((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
if (ep->in)
usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
usbreq->length, PCI_DMA_TODEVICE);
usbreq->dma = dma_map_single(&dev->pdev->dev,
usbreq->buf,
usbreq->length,
DMA_TO_DEVICE);
else
usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
usbreq->length, PCI_DMA_FROMDEVICE);
usbreq->dma = dma_map_single(&dev->pdev->dev,
usbreq->buf,
usbreq->length,
DMA_FROM_DEVICE);
req->dma_mapped = 1;
}
if (usbreq->length > 0) {
retval = prepare_dma(ep, req, gfp);
retval = prepare_dma(ep, req, GFP_ATOMIC);
if (retval)
goto probe_end;
}
@@ -1646,7 +1651,6 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
pch_udc_wait_ep_stall(ep);
pch_udc_ep_clear_nak(ep);
pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
pch_udc_set_dma(dev, DMA_DIR_TX);
}
}
/* Now add this request to the ep's pending requests */
@@ -1926,6 +1930,7 @@ static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
PCH_UDC_BS_DMA_DONE)
return;
pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
pch_udc_ep_set_ddptr(ep, 0);
if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
PCH_UDC_RTS_SUCC) {
dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
@@ -1963,7 +1968,7 @@ static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
u32 epsts;
struct pch_udc_ep *ep;
ep = &dev->ep[2*ep_num];
ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
epsts = ep->epsts;
ep->epsts = 0;
@@ -2008,7 +2013,7 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
struct pch_udc_ep *ep;
struct pch_udc_request *req = NULL;
ep = &dev->ep[2*ep_num + 1];
ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
epsts = ep->epsts;
ep->epsts = 0;
@@ -2025,10 +2030,11 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
}
if (epsts & UDC_EPSTS_HE)
return;
if (epsts & UDC_EPSTS_RSS)
if (epsts & UDC_EPSTS_RSS) {
pch_udc_ep_set_stall(ep);
pch_udc_enable_ep_interrupts(ep->dev,
PCH_UDC_EPINT(ep->in, ep->num));
}
if (epsts & UDC_EPSTS_RCS) {
if (!dev->prot_stall) {
pch_udc_ep_clear_stall(ep);
@@ -2060,8 +2066,10 @@ static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
{
u32 epsts;
struct pch_udc_ep *ep;
struct pch_udc_ep *ep_out;
ep = &dev->ep[UDC_EP0IN_IDX];
ep_out = &dev->ep[UDC_EP0OUT_IDX];
epsts = ep->epsts;
ep->epsts = 0;
@@ -2073,8 +2081,16 @@ static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
return;
if (epsts & UDC_EPSTS_HE)
return;
if ((epsts & UDC_EPSTS_TDC) && (!dev->stall))
if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
pch_udc_complete_transfer(ep);
pch_udc_clear_dma(dev, DMA_DIR_RX);
ep_out->td_data->status = (ep_out->td_data->status &
~PCH_UDC_BUFF_STS) |
PCH_UDC_BS_HST_RDY;
pch_udc_ep_clear_nak(ep_out);
pch_udc_set_dma(dev, DMA_DIR_RX);
pch_udc_ep_set_rrdy(ep_out);
}
/* On IN interrupt, provide data if we have any */
if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
!(epsts & UDC_EPSTS_TXEMPTY))
@@ -2102,11 +2118,9 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
dev->stall = 0;
dev->ep[UDC_EP0IN_IDX].halted = 0;
dev->ep[UDC_EP0OUT_IDX].halted = 0;
/* In data not ready */
pch_udc_ep_set_nak(&(dev->ep[UDC_EP0IN_IDX]));
dev->setup_data = ep->td_stp->request;
pch_udc_init_setup_buff(ep->td_stp);
pch_udc_clear_dma(dev, DMA_DIR_TX);
pch_udc_clear_dma(dev, DMA_DIR_RX);
pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
dev->ep[UDC_EP0IN_IDX].in);
if ((dev->setup_data.bRequestType & USB_DIR_IN))
@@ -2122,14 +2136,23 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
setup_supported = dev->driver->setup(&dev->gadget,
&dev->setup_data);
spin_lock(&dev->lock);
if (dev->setup_data.bRequestType & USB_DIR_IN) {
ep->td_data->status = (ep->td_data->status &
~PCH_UDC_BUFF_STS) |
PCH_UDC_BS_HST_RDY;
pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
}
/* ep0 in returns data on IN phase */
if (setup_supported >= 0 && setup_supported <
UDC_EP0IN_MAX_PKT_SIZE) {
pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
/* Gadget would have queued a request when
* we called the setup */
pch_udc_set_dma(dev, DMA_DIR_RX);
pch_udc_ep_clear_nak(ep);
if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
pch_udc_set_dma(dev, DMA_DIR_RX);
pch_udc_ep_clear_nak(ep);
}
} else if (setup_supported < 0) {
/* if unsupported request, then stall */
pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
@@ -2142,22 +2165,13 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
}
} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
UDC_EPSTS_OUT_DATA) && !dev->stall) {
if (list_empty(&ep->queue)) {
dev_err(&dev->pdev->dev, "%s: No request\n", __func__);
ep->td_data->status = (ep->td_data->status &
~PCH_UDC_BUFF_STS) |
PCH_UDC_BS_HST_RDY;
pch_udc_set_dma(dev, DMA_DIR_RX);
} else {
/* control write */
/* next function will pickuo an clear the status */
pch_udc_clear_dma(dev, DMA_DIR_RX);
pch_udc_ep_set_ddptr(ep, 0);
if (!list_empty(&ep->queue)) {
ep->epsts = stat;
pch_udc_svc_data_out(dev, 0);
/* re-program desc. pointer for possible ZLPs */
pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
pch_udc_set_dma(dev, DMA_DIR_RX);
pch_udc_svc_data_out(dev, PCH_UDC_EP0);
}
pch_udc_set_dma(dev, DMA_DIR_RX);
}
pch_udc_ep_set_rrdy(ep);
}
@@ -2174,7 +2188,7 @@ static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
struct pch_udc_ep *ep;
struct pch_udc_request *req;
ep = &dev->ep[2*ep_num];
ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
if (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct pch_udc_request, queue);
pch_udc_enable_ep_interrupts(ep->dev,
@@ -2196,13 +2210,13 @@ static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
/* IN */
if (ep_intr & (0x1 << i)) {
ep = &dev->ep[2*i];
ep = &dev->ep[UDC_EPIN_IDX(i)];
ep->epsts = pch_udc_read_ep_status(ep);
pch_udc_clear_ep_status(ep, ep->epsts);
}
/* OUT */
if (ep_intr & (0x10000 << i)) {
ep = &dev->ep[2*i+1];
ep = &dev->ep[UDC_EPOUT_IDX(i)];
ep->epsts = pch_udc_read_ep_status(ep);
pch_udc_clear_ep_status(ep, ep->epsts);
}
@@ -2563,9 +2577,6 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
dev->dma_addr = pci_map_single(dev->pdev, dev->ep0out_buf, 256,
PCI_DMA_FROMDEVICE);
/* remove ep0 in and out from the list. They have own pointer */
list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
@@ -2637,6 +2648,13 @@ static int init_dma_pools(struct pch_udc_dev *dev)
dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
dev->ep[UDC_EP0IN_IDX].td_data = NULL;
dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
if (!dev->ep0out_buf)
return -ENOMEM;
dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
UDC_EP0OUT_BUFF_SIZE * 4,
DMA_FROM_DEVICE);
return 0;
}
@@ -2700,7 +2718,8 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
/* Assues that there are no pending requets with this driver */
/* Assures that there are no pending requests with this driver */
driver->disconnect(&dev->gadget);
driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
@@ -2750,6 +2769,11 @@ static void pch_udc_remove(struct pci_dev *pdev)
pci_pool_destroy(dev->stp_requests);
}
if (dev->dma_addr)
dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
kfree(dev->ep0out_buf);
pch_udc_exit(dev);
if (dev->irq_registered)
@@ -2792,11 +2816,7 @@ static int pch_udc_resume(struct pci_dev *pdev)
int ret;
pci_set_power_state(pdev, PCI_D0);
ret = pci_restore_state(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: pci_restore_state failed\n", __func__);
return ret;
}
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
@@ -2914,6 +2934,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
.class_mask = 0xffffffff,
},
{
PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
.class_mask = 0xffffffff,
},
{ 0 },
};

View File

@@ -131,31 +131,31 @@ static struct printer_dev usb_printer_gadget;
* parameters are in UTF-8 (superset of ASCII's 7 bit characters).
*/
static ushort __initdata idVendor;
static ushort idVendor;
module_param(idVendor, ushort, S_IRUGO);
MODULE_PARM_DESC(idVendor, "USB Vendor ID");
static ushort __initdata idProduct;
static ushort idProduct;
module_param(idProduct, ushort, S_IRUGO);
MODULE_PARM_DESC(idProduct, "USB Product ID");
static ushort __initdata bcdDevice;
static ushort bcdDevice;
module_param(bcdDevice, ushort, S_IRUGO);
MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
static char *__initdata iManufacturer;
static char *iManufacturer;
module_param(iManufacturer, charp, S_IRUGO);
MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
static char *__initdata iProduct;
static char *iProduct;
module_param(iProduct, charp, S_IRUGO);
MODULE_PARM_DESC(iProduct, "USB Product string");
static char *__initdata iSerialNum;
static char *iSerialNum;
module_param(iSerialNum, charp, S_IRUGO);
MODULE_PARM_DESC(iSerialNum, "1");
static char *__initdata iPNPstring;
static char *iPNPstring;
module_param(iPNPstring, charp, S_IRUGO);
MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
@@ -1596,13 +1596,12 @@ cleanup(void)
int status;
mutex_lock(&usb_printer_gadget.lock_printer_io);
class_destroy(usb_gadget_class);
unregister_chrdev_region(g_printer_devno, 2);
status = usb_gadget_unregister_driver(&printer_driver);
if (status)
ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
unregister_chrdev_region(g_printer_devno, 2);
class_destroy(usb_gadget_class);
mutex_unlock(&usb_printer_gadget.lock_printer_io);
}
module_exit(cleanup);

View File

@@ -52,7 +52,6 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
struct resource *res;
int irq;
int retval;
unsigned int temp;
pr_debug("initializing FSL-SOC USB Controller\n");
@@ -126,18 +125,6 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
goto err3;
}
/*
* Check if it is MPC5121 SoC, otherwise set pdata->have_sysif_regs
* flag for 83xx or 8536 system interface registers.
*/
if (pdata->big_endian_mmio)
temp = in_be32(hcd->regs + FSL_SOC_USB_ID);
else
temp = in_le32(hcd->regs + FSL_SOC_USB_ID);
if ((temp & ID_MSK) != (~((temp & NID_MSK) >> 8) & ID_MSK))
pdata->have_sysif_regs = 1;
/* Enable USB controller, 83xx or 8536 */
if (pdata->have_sysif_regs)
setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);

View File

@@ -19,9 +19,6 @@
#define _EHCI_FSL_H
/* offsets for the non-ehci registers in the FSL SOC USB controller */
#define FSL_SOC_USB_ID 0x0
#define ID_MSK 0x3f
#define NID_MSK 0x3f00
#define FSL_SOC_USB_ULPIVP 0x170
#define FSL_SOC_USB_PORTSC1 0x184
#define PORT_PTS_MSK (3<<30)

View File

@@ -572,6 +572,8 @@ static int ehci_init(struct usb_hcd *hcd)
ehci->iaa_watchdog.function = ehci_iaa_watchdog;
ehci->iaa_watchdog.data = (unsigned long) ehci;
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
@@ -579,11 +581,20 @@ static int ehci_init(struct usb_hcd *hcd)
ehci->periodic_size = DEFAULT_I_TDPS;
INIT_LIST_HEAD(&ehci->cached_itd_list);
INIT_LIST_HEAD(&ehci->cached_sitd_list);
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
/* periodic schedule size can be smaller than default */
switch (EHCI_TUNE_FLS) {
case 0: ehci->periodic_size = 1024; break;
case 1: ehci->periodic_size = 512; break;
case 2: ehci->periodic_size = 256; break;
default: BUG();
}
}
if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
ehci->i_thresh = 2 + 8;
else // N microframes cached
@@ -637,12 +648,6 @@ static int ehci_init(struct usb_hcd *hcd)
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
switch (EHCI_TUNE_FLS) {
case 0: ehci->periodic_size = 1024; break;
case 1: ehci->periodic_size = 512; break;
case 2: ehci->periodic_size = 256; break;
default: BUG();
}
}
if (HCC_LPM(hcc_params)) {
/* support link power management EHCI 1.1 addendum */

View File

@@ -21,10 +21,13 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
#include <linux/slab.h>
#include <mach/mxc_ehci.h>
#include <asm/mach-types.h>
#define ULPI_VIEWPORT_OFFSET 0x170
struct ehci_mxc_priv {
@@ -114,6 +117,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
struct usb_hcd *hcd;
struct resource *res;
int irq, ret;
unsigned int flags;
struct ehci_mxc_priv *priv;
struct device *dev = &pdev->dev;
struct ehci_hcd *ehci;
@@ -177,8 +181,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
clk_enable(priv->ahbclk);
}
/* "dr" device has its own clock */
if (pdev->id == 0) {
/* "dr" device has its own clock on i.MX51 */
if (cpu_is_mx51() && (pdev->id == 0)) {
priv->phy1clk = clk_get(dev, "usb_phy1");
if (IS_ERR(priv->phy1clk)) {
ret = PTR_ERR(priv->phy1clk);
@@ -240,6 +244,23 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
if (ret)
goto err_add;
if (pdata->otg) {
/*
* efikamx and efikasb have some hardware bug which is
* preventing usb to work unless CHRGVBUS is set.
* It's in violation of USB specs
*/
if (machine_is_mx51_efikamx() || machine_is_mx51_efikasb()) {
flags = otg_io_read(pdata->otg, ULPI_OTG_CTRL);
flags |= ULPI_OTG_CTRL_CHRGVBUS;
ret = otg_io_write(pdata->otg, flags, ULPI_OTG_CTRL);
if (ret) {
dev_err(dev, "unable to set CHRVBUS\n");
goto err_add;
}
}
}
return 0;
err_add:

View File

@@ -44,28 +44,35 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
return 0;
}
static int ehci_quirk_amd_SB800(struct ehci_hcd *ehci)
static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci)
{
struct pci_dev *amd_smbus_dev;
u8 rev = 0;
amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
if (!amd_smbus_dev)
return 0;
pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
if (rev < 0x40) {
pci_dev_put(amd_smbus_dev);
amd_smbus_dev = NULL;
return 0;
if (amd_smbus_dev) {
pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
if (rev < 0x40) {
pci_dev_put(amd_smbus_dev);
amd_smbus_dev = NULL;
return 0;
}
} else {
amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL);
if (!amd_smbus_dev)
return 0;
pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
if (rev < 0x11 || rev > 0x18) {
pci_dev_put(amd_smbus_dev);
amd_smbus_dev = NULL;
return 0;
}
}
if (!amd_nb_dev)
amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
if (!amd_nb_dev)
ehci_err(ehci, "QUIRK: unable to get AMD NB device\n");
ehci_info(ehci, "QUIRK: Enable AMD SB800 L1 fix\n");
ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n");
pci_dev_put(amd_smbus_dev);
amd_smbus_dev = NULL;
@@ -131,7 +138,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
if (ehci_quirk_amd_SB800(ehci))
if (ehci_quirk_amd_hudson(ehci))
ehci->amd_l1_fix = 1;
retval = ehci_halt(ehci);

View File

@@ -262,19 +262,24 @@ static void fsl_usb2_mpc5121_exit(struct platform_device *pdev)
}
}
struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
.big_endian_desc = 1,
.big_endian_mmio = 1,
.es = 1,
.have_sysif_regs = 0,
.le_setup_buf = 1,
.init = fsl_usb2_mpc5121_init,
.exit = fsl_usb2_mpc5121_exit,
};
#endif /* CONFIG_PPC_MPC512x */
static struct fsl_usb2_platform_data fsl_usb2_mpc8xxx_pd = {
.have_sysif_regs = 1,
};
static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
{ .compatible = "fsl-usb2-mph", },
{ .compatible = "fsl-usb2-dr", },
{ .compatible = "fsl-usb2-mph", .data = &fsl_usb2_mpc8xxx_pd, },
{ .compatible = "fsl-usb2-dr", .data = &fsl_usb2_mpc8xxx_pd, },
#ifdef CONFIG_PPC_MPC512x
{ .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, },
#endif

View File

@@ -308,11 +308,8 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
/* Ring the host controller doorbell after placing a command on the ring */
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
{
u32 temp;
xhci_dbg(xhci, "// Ding dong!\n");
temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
xhci_readl(xhci, &xhci->dba->doorbell[0]);
}
@@ -322,26 +319,24 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int ep_index,
unsigned int stream_id)
{
struct xhci_virt_ep *ep;
unsigned int ep_state;
u32 field;
__u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
unsigned int ep_state = ep->ep_state;
ep = &xhci->devs[slot_id]->eps[ep_index];
ep_state = ep->ep_state;
/* Don't ring the doorbell for this endpoint if there are pending
* cancellations because the we don't want to interrupt processing.
* cancellations because we don't want to interrupt processing.
* We don't want to restart any stream rings if there's a set dequeue
* pointer command pending because the device can choose to start any
* stream once the endpoint is on the HW schedule.
* FIXME - check all the stream rings for pending cancellations.
*/
if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
&& !(ep_state & EP_HALTED)) {
field = xhci_readl(xhci, db_addr) & DB_MASK;
field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
xhci_writel(xhci, field, db_addr);
}
if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
(ep_state & EP_HALTED))
return;
xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
/* The CPU has better things to do at this point than wait for a
* write-posting flush. It'll get there soon enough.
*/
}
/* Ring the doorbell for any rings with pending URBs */
@@ -1188,7 +1183,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
temp = xhci_readl(xhci, addr);
if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) {
if (hcd->state == HC_STATE_SUSPENDED) {
xhci_dbg(xhci, "resume root hub\n");
usb_hcd_resume_root_hub(hcd);
}
@@ -1710,8 +1705,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* Others already handled above */
break;
}
dev_dbg(&td->urb->dev->dev,
"ep %#x - asked for %d bytes, "
xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
"%d bytes untransferred\n",
td->urb->ep->desc.bEndpointAddress,
td->urb->transfer_buffer_length,
@@ -2389,7 +2383,8 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
}
xhci_dbg(xhci, "\n");
if (!in_interrupt())
dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
"num_trbs = %d\n",
urb->ep->desc.bEndpointAddress,
urb->transfer_buffer_length,
num_trbs);
@@ -2414,14 +2409,17 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, unsigned int stream_id, int start_cycle,
struct xhci_generic_trb *start_trb, struct xhci_td *td)
struct xhci_generic_trb *start_trb)
{
/*
* Pass all the TRBs to the hardware at once and make sure this write
* isn't reordered.
*/
wmb();
start_trb->field[3] |= start_cycle;
if (start_cycle)
start_trb->field[3] |= start_cycle;
else
start_trb->field[3] &= ~0x1;
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
}
@@ -2449,7 +2447,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
if (!printk_ratelimit())
if (printk_ratelimit())
dev_dbg(&urb->dev->dev, "Driver uses different interval"
" (%d microframe%s) than xHCI "
"(%d microframe%s)\n",
@@ -2551,9 +2549,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
u32 remainder = 0;
/* Don't change the cycle bit of the first TRB until later */
if (first_trb)
if (first_trb) {
first_trb = false;
else
if (start_cycle == 0)
field |= 0x1;
} else
field |= ep_ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
@@ -2625,7 +2625,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
check_trb_math(urb, num_trbs, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb, td);
start_cycle, start_trb);
return 0;
}
@@ -2671,7 +2671,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
if (!in_interrupt())
dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
"addr = %#llx, num_trbs = %d\n",
urb->ep->desc.bEndpointAddress,
urb->transfer_buffer_length,
urb->transfer_buffer_length,
@@ -2711,9 +2712,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field = 0;
/* Don't change the cycle bit of the first TRB until later */
if (first_trb)
if (first_trb) {
first_trb = false;
else
if (start_cycle == 0)
field |= 0x1;
} else
field |= ep_ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
@@ -2757,7 +2760,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
check_trb_math(urb, num_trbs, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb, td);
start_cycle, start_trb);
return 0;
}
@@ -2818,13 +2821,17 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Queue setup TRB - see section 6.4.1.2.1 */
/* FIXME better way to translate setup_packet into two u32 fields? */
setup = (struct usb_ctrlrequest *) urb->setup_packet;
field = 0;
field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
if (start_cycle == 0)
field |= 0x1;
queue_trb(xhci, ep_ring, false, true,
/* FIXME endianness is probably going to bite my ass here. */
setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
setup->wIndex | setup->wLength << 16,
TRB_LEN(8) | TRB_INTR_TARGET(0),
/* Immediate data in pointer */
TRB_IDT | TRB_TYPE(TRB_SETUP));
field);
/* If there's data, queue data TRBs */
field = 0;
@@ -2859,7 +2866,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
giveback_first_trb(xhci, slot_id, ep_index, 0,
start_cycle, start_trb, td);
start_cycle, start_trb);
return 0;
}
@@ -2900,6 +2907,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int running_total, trb_buff_len, td_len, td_remain_len, ret;
u64 start_addr, addr;
int i, j;
bool more_trbs_coming;
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
@@ -2910,7 +2918,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
if (!in_interrupt())
dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
" addr = %#llx, num_tds = %d\n",
urb->ep->desc.bEndpointAddress,
urb->transfer_buffer_length,
@@ -2950,7 +2958,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_TYPE(TRB_ISOC);
/* Assume URB_ISO_ASAP is set */
field |= TRB_SIA;
if (i > 0)
if (i == 0) {
if (start_cycle == 0)
field |= 0x1;
} else
field |= ep_ring->cycle_state;
first_trb = false;
} else {
@@ -2965,9 +2976,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
*/
if (j < trbs_per_td - 1) {
field |= TRB_CHAIN;
more_trbs_coming = true;
} else {
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
more_trbs_coming = false;
}
/* Calculate TRB length */
@@ -2980,7 +2993,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, false, false,
queue_trb(xhci, ep_ring, false, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -3003,10 +3016,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
}
wmb();
start_trb->field[3] |= start_cycle;
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
}
@@ -3064,7 +3075,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
if (!printk_ratelimit())
if (printk_ratelimit())
dev_dbg(&urb->dev->dev, "Driver uses different interval"
" (%d microframe%s) than xHCI "
"(%d microframe%s)\n",

View File

@@ -226,7 +226,8 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
static int xhci_setup_msix(struct xhci_hcd *xhci)
{
int i, ret = 0;
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
/*
* calculate number of msi-x vectors supported.
@@ -265,6 +266,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
goto disable_msix;
}
hcd->msix_enabled = 1;
return ret;
disable_msix:
@@ -280,7 +282,8 @@ free_entries:
/* Free any IRQs and disable MSI-X */
static void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
xhci_free_irq(xhci);
@@ -292,6 +295,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
pci_disable_msi(pdev);
}
hcd->msix_enabled = 0;
return;
}
@@ -508,9 +512,10 @@ void xhci_stop(struct usb_hcd *hcd)
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
xhci_reset(xhci);
xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
/* Tell the event ring poll function not to reschedule */
xhci->zombie = 1;
@@ -544,9 +549,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
xhci_readl(xhci, &xhci->op_regs->status));
}
@@ -647,6 +653,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
int rc = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
int i;
spin_lock_irq(&xhci->lock);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@ -677,10 +684,15 @@ int xhci_suspend(struct xhci_hcd *xhci)
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
/* step 5: remove core well power */
xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
/* step 5: remove core well power */
/* synchronize irq when using MSI-X */
if (xhci->msix_entries) {
for (i = 0; i < xhci->msix_count; i++)
synchronize_irq(xhci->msix_entries[i].vector);
}
return rc;
}
@@ -694,7 +706,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
{
u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int old_state, retval;
old_state = hcd->state;
@@ -729,9 +740,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
xhci_reset(xhci);
if (hibernated)
xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
/* Tell the event ring poll function not to reschedule */
@@ -765,30 +775,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
return retval;
}
spin_unlock_irq(&xhci->lock);
/* Re-setup MSI-X */
if (hcd->irq)
free_irq(hcd->irq, hcd);
hcd->irq = -1;
retval = xhci_setup_msix(xhci);
if (retval)
/* fall back to msi*/
retval = xhci_setup_msi(xhci);
if (retval) {
/* fall back to legacy interrupt*/
retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
hcd->irq_descr, hcd);
if (retval) {
xhci_err(xhci, "request interrupt %d failed\n",
pdev->irq);
return retval;
}
hcd->irq = pdev->irq;
}
spin_lock_irq(&xhci->lock);
/* step 4: set Run/Stop bit */
command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_RUN;
@@ -2445,8 +2431,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_err(xhci, "Error while assigning device slot ID\n");
return 0;
}
/* xhci_alloc_virt_device() does not touch rings; no need to lock */
if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
/* xhci_alloc_virt_device() does not touch rings; no need to lock.
* Use GFP_NOIO, since this function can be called from
* xhci_discover_or_reset_device(), which may be called as part of
* mass storage driver error handling.
*/
if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
/* Disable slot, if we can do it without mem alloc */
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
spin_lock_irqsave(&xhci->lock, flags);

View File

@@ -436,22 +436,18 @@ struct xhci_run_regs {
/**
* struct doorbell_array
*
* Bits 0 - 7: Endpoint target
* Bits 8 - 15: RsvdZ
* Bits 16 - 31: Stream ID
*
* Section 5.6
*/
struct xhci_doorbell_array {
u32 doorbell[256];
};
#define DB_TARGET_MASK 0xFFFFFF00
#define DB_STREAM_ID_MASK 0x0000FFFF
#define DB_TARGET_HOST 0x0
#define DB_STREAM_ID_HOST 0x0
#define DB_MASK (0xff << 8)
/* Endpoint Target - bits 0:7 */
#define EPI_TO_DB(p) (((p) + 1) & 0xff)
#define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16)
#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
#define DB_VALUE_HOST 0x00000000
/**
* struct xhci_protocol_caps

Some files were not shown because too many files have changed in this diff Show More