From 50f798506e53c7005ac62a1430cf3ea82ec7d03e Mon Sep 17 00:00:00 2001 From: caicheng Date: Fri, 10 Jan 2025 15:39:47 +0800 Subject: [PATCH 1/5] Import dwc3/xhci driver source code in linux Signed-off-by: caicheng --- usb/dwc3/core.c | 2082 +++++++++++++++ usb/dwc3/core.h | 1667 ++++++++++++ usb/dwc3/host.c | 136 + usb/dwc3/io.h | 57 + usb/host/hcd.c | 296 +++ usb/host/xhci-ext-caps.h | 125 + usb/host/xhci-hub.c | 1882 +++++++++++++ usb/host/xhci-mem.c | 2701 +++++++++++++++++++ usb/host/xhci-plat.c | 601 +++++ usb/host/xhci-plat.h | 33 + usb/host/xhci-ring.c | 4407 ++++++++++++++++++++++++++++++ usb/host/xhci.c | 5472 ++++++++++++++++++++++++++++++++++++++ usb/host/xhci.h | 2833 ++++++++++++++++++++ usb/usb_platform.c | 78 + 14 files changed, 22370 insertions(+) create mode 100644 usb/dwc3/core.c create mode 100644 usb/dwc3/core.h create mode 100644 usb/dwc3/host.c create mode 100644 usb/dwc3/io.h create mode 100644 usb/host/hcd.c create mode 100644 usb/host/xhci-ext-caps.h create mode 100644 usb/host/xhci-hub.c create mode 100644 usb/host/xhci-mem.c create mode 100644 usb/host/xhci-plat.c create mode 100644 usb/host/xhci-plat.h create mode 100644 usb/host/xhci-ring.c create mode 100644 usb/host/xhci.c create mode 100644 usb/host/xhci.h create mode 100644 usb/usb_platform.c diff --git a/usb/dwc3/core.c b/usb/dwc3/core.c new file mode 100644 index 0000000..6c5d2bb --- /dev/null +++ b/usb/dwc3/core.c @@ -0,0 +1,2082 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * core.c - DesignWare USB3 DRD Controller Core file + * + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com + * + * Authors: Felipe Balbi , + * Sebastian Andrzej Siewior + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "core.h" +#include "gadget.h" +#include "io.h" + +#include "debug.h" + +#define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */ + +/** + * dwc3_get_dr_mode - Validates and sets dr_mode + * @dwc: pointer to our context structure + */ +static int dwc3_get_dr_mode(struct dwc3 *dwc) +{ + enum usb_dr_mode mode; + struct device *dev = dwc->dev; + unsigned int hw_mode; + + if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) + dwc->dr_mode = USB_DR_MODE_OTG; + + mode = dwc->dr_mode; + hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); + + switch (hw_mode) { + case DWC3_GHWPARAMS0_MODE_GADGET: + if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) { + dev_err(dev, + "Controller does not support host mode.\n"); + return -EINVAL; + } + mode = USB_DR_MODE_PERIPHERAL; + break; + case DWC3_GHWPARAMS0_MODE_HOST: + if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) { + dev_err(dev, + "Controller does not support device mode.\n"); + return -EINVAL; + } + mode = USB_DR_MODE_HOST; + break; + default: + if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) + mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) + mode = USB_DR_MODE_PERIPHERAL; + + /* + * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG + * mode. If the controller supports DRD but the dr_mode is not + * specified or set to OTG, then set the mode to peripheral. + */ + if (mode == USB_DR_MODE_OTG && + (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) || + !device_property_read_bool(dwc->dev, "usb-role-switch")) && + !DWC3_VER_IS_PRIOR(DWC3, 330A)) + mode = USB_DR_MODE_PERIPHERAL; + } + + if (mode != dwc->dr_mode) { + dev_warn(dev, + "Configuration mismatch. dr_mode forced to %s\n", + mode == USB_DR_MODE_HOST ? "host" : "gadget"); + + dwc->dr_mode = mode; + } + + return 0; +} + +void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) +{ + u32 reg; + + reg = dwc3_readl(dwc->regs, DWC3_GCTL); + reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); + reg |= DWC3_GCTL_PRTCAPDIR(mode); + dwc3_writel(dwc->regs, DWC3_GCTL, reg); + + dwc->current_dr_role = mode; +} + +static int dwc3_core_soft_reset(struct dwc3 *dwc); + +static void __dwc3_set_mode(struct work_struct *work) +{ + struct dwc3 *dwc = work_to_dwc(work); + unsigned long flags; + int ret; + u32 reg; + + mutex_lock(&dwc->mutex); + + pm_runtime_get_sync(dwc->dev); + + if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) + dwc3_otg_update(dwc, 0); + + if (!dwc->desired_dr_role) + goto out; + + if (dwc->desired_dr_role == dwc->current_dr_role) + goto out; + + if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) + goto out; + + switch (dwc->current_dr_role) { + case DWC3_GCTL_PRTCAP_HOST: + dwc3_host_exit(dwc); + break; + case DWC3_GCTL_PRTCAP_DEVICE: + dwc3_gadget_exit(dwc); + dwc3_event_buffers_cleanup(dwc); + break; + case DWC3_GCTL_PRTCAP_OTG: + dwc3_otg_exit(dwc); + spin_lock_irqsave(&dwc->lock, flags); + dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE; + spin_unlock_irqrestore(&dwc->lock, flags); + dwc3_otg_update(dwc, 1); + break; + default: + break; + } + + /* For DRD host or device mode only */ + if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) { + reg = dwc3_readl(dwc->regs, DWC3_GCTL); + reg |= DWC3_GCTL_CORESOFTRESET; + dwc3_writel(dwc->regs, DWC3_GCTL, reg); + + /* + * Wait for internal clocks to synchronized. DWC_usb31 and + * DWC_usb32 may need at least 50ms (less for DWC_usb3). To + * keep it consistent across different IPs, let's wait up to + * 100ms before clearing GCTL.CORESOFTRESET. + */ + msleep(100); + + reg = dwc3_readl(dwc->regs, DWC3_GCTL); + reg &= ~DWC3_GCTL_CORESOFTRESET; + dwc3_writel(dwc->regs, DWC3_GCTL, reg); + } + + spin_lock_irqsave(&dwc->lock, flags); + + dwc3_set_prtcap(dwc, dwc->desired_dr_role); + + spin_unlock_irqrestore(&dwc->lock, flags); + + switch (dwc->desired_dr_role) { + case DWC3_GCTL_PRTCAP_HOST: + ret = dwc3_host_init(dwc); + if (ret) { + dev_err(dwc->dev, "failed to initialize host\n"); + } else { + if (dwc->usb2_phy) + otg_set_vbus(dwc->usb2_phy->otg, true); + phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST); + phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST); + if (dwc->dis_split_quirk) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); + reg |= DWC3_GUCTL3_SPLITDISABLE; + dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); + } + } + break; + case DWC3_GCTL_PRTCAP_DEVICE: + dwc3_core_soft_reset(dwc); + + dwc3_event_buffers_setup(dwc); + + if (dwc->usb2_phy) + otg_set_vbus(dwc->usb2_phy->otg, false); + phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE); + phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE); + + ret = dwc3_gadget_init(dwc); + if (ret) + dev_err(dwc->dev, "failed to initialize peripheral\n"); + break; + case DWC3_GCTL_PRTCAP_OTG: + dwc3_otg_init(dwc); + dwc3_otg_update(dwc, 0); + break; + default: + break; + } + +out: + pm_runtime_mark_last_busy(dwc->dev); + pm_runtime_put_autosuspend(dwc->dev); + mutex_unlock(&dwc->mutex); +} + +void dwc3_set_mode(struct dwc3 *dwc, u32 mode) +{ + unsigned long flags; + + if (dwc->dr_mode != USB_DR_MODE_OTG) + return; + + spin_lock_irqsave(&dwc->lock, flags); + dwc->desired_dr_role = mode; + spin_unlock_irqrestore(&dwc->lock, flags); + + queue_work(system_freezable_wq, &dwc->drd_work); +} + +u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) +{ + struct dwc3 *dwc = dep->dwc; + u32 reg; + + dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE, + DWC3_GDBGFIFOSPACE_NUM(dep->number) | + DWC3_GDBGFIFOSPACE_TYPE(type)); + + reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE); + + return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg); +} + +/** + * dwc3_core_soft_reset - Issues core soft reset and PHY reset + * @dwc: pointer to our context structure + */ +static int dwc3_core_soft_reset(struct dwc3 *dwc) +{ + u32 reg; + int retries = 1000; + + /* + * We're resetting only the device side because, if we're in host mode, + * XHCI driver will reset the host block. If dwc3 was configured for + * host-only mode, then we can return early. + */ + if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) + return 0; + + reg = dwc3_readl(dwc->regs, DWC3_DCTL); + reg |= DWC3_DCTL_CSFTRST; + dwc3_writel(dwc->regs, DWC3_DCTL, reg); + + /* + * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit + * is cleared only after all the clocks are synchronized. This can + * take a little more than 50ms. Set the polling rate at 20ms + * for 10 times instead. + */ + if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) + retries = 10; + + do { + reg = dwc3_readl(dwc->regs, DWC3_DCTL); + if (!(reg & DWC3_DCTL_CSFTRST)) + goto done; + + if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) + msleep(20); + else + udelay(1); + } while (--retries); + + return -ETIMEDOUT; + +done: + /* + * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit + * is cleared, we must wait at least 50ms before accessing the PHY + * domain (synchronization delay). + */ + if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A)) + msleep(50); + + return 0; +} + +/* + * dwc3_frame_length_adjustment - Adjusts frame length if required + * @dwc3: Pointer to our controller context structure + */ +static void dwc3_frame_length_adjustment(struct dwc3 *dwc) +{ + u32 reg; + u32 dft; + + if (DWC3_VER_IS_PRIOR(DWC3, 250A)) + return; + + if (dwc->fladj == 0) + return; + + reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); + dft = reg & DWC3_GFLADJ_30MHZ_MASK; + if (dft != dwc->fladj) { + reg &= ~DWC3_GFLADJ_30MHZ_MASK; + reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; + dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); + } +} + +/** + * dwc3_free_one_event_buffer - Frees one event buffer + * @dwc: Pointer to our controller context structure + * @evt: Pointer to event buffer to be freed + */ +static void dwc3_free_one_event_buffer(struct dwc3 *dwc, + struct dwc3_event_buffer *evt) +{ + dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma); +} + +/** + * dwc3_alloc_one_event_buffer - Allocates one event buffer structure + * @dwc: Pointer to our controller context structure + * @length: size of the event buffer + * + * Returns a pointer to the allocated event buffer structure on success + * otherwise ERR_PTR(errno). + */ +static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc, + unsigned length) +{ + struct dwc3_event_buffer *evt; + + evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL); + if (!evt) + return ERR_PTR(-ENOMEM); + + evt->dwc = dwc; + evt->length = length; + evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL); + if (!evt->cache) + return ERR_PTR(-ENOMEM); + + evt->buf = dma_alloc_coherent(dwc->sysdev, length, + &evt->dma, GFP_KERNEL); + if (!evt->buf) + return ERR_PTR(-ENOMEM); + + return evt; +} + +/** + * dwc3_free_event_buffers - frees all allocated event buffers + * @dwc: Pointer to our controller context structure + */ +static void dwc3_free_event_buffers(struct dwc3 *dwc) +{ + struct dwc3_event_buffer *evt; + + evt = dwc->ev_buf; + if (evt) + dwc3_free_one_event_buffer(dwc, evt); +} + +/** + * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length + * @dwc: pointer to our controller context structure + * @length: size of event buffer + * + * Returns 0 on success otherwise negative errno. In the error case, dwc + * may contain some buffers allocated but not all which were requested. + */ +static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length) +{ + struct dwc3_event_buffer *evt; + + evt = dwc3_alloc_one_event_buffer(dwc, length); + if (IS_ERR(evt)) { + dev_err(dwc->dev, "can't allocate event buffer\n"); + return PTR_ERR(evt); + } + dwc->ev_buf = evt; + + return 0; +} + +/** + * dwc3_event_buffers_setup - setup our allocated event buffers + * @dwc: pointer to our controller context structure + * + * Returns 0 on success otherwise negative errno. + */ +int dwc3_event_buffers_setup(struct dwc3 *dwc) +{ + struct dwc3_event_buffer *evt; + + evt = dwc->ev_buf; + evt->lpos = 0; + dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), + lower_32_bits(evt->dma)); + dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), + upper_32_bits(evt->dma)); + dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), + DWC3_GEVNTSIZ_SIZE(evt->length)); + dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0); + + return 0; +} + +void dwc3_event_buffers_cleanup(struct dwc3 *dwc) +{ + struct dwc3_event_buffer *evt; + + evt = dwc->ev_buf; + + evt->lpos = 0; + + dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0); + dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0); + dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK + | DWC3_GEVNTSIZ_SIZE(0)); + dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0); +} + +static int dwc3_alloc_scratch_buffers(struct dwc3 *dwc) +{ + if (!dwc->has_hibernation) + return 0; + + if (!dwc->nr_scratch) + return 0; + + dwc->scratchbuf = kmalloc_array(dwc->nr_scratch, + DWC3_SCRATCHBUF_SIZE, GFP_KERNEL); + if (!dwc->scratchbuf) + return -ENOMEM; + + return 0; +} + +static int dwc3_setup_scratch_buffers(struct dwc3 *dwc) +{ + dma_addr_t scratch_addr; + u32 param; + int ret; + + if (!dwc->has_hibernation) + return 0; + + if (!dwc->nr_scratch) + return 0; + + /* should never fall here */ + if (!WARN_ON(dwc->scratchbuf)) + return 0; + + scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf, + dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dwc->sysdev, scratch_addr)) { + dev_err(dwc->sysdev, "failed to map scratch buffer\n"); + ret = -EFAULT; + goto err0; + } + + dwc->scratch_addr = scratch_addr; + + param = lower_32_bits(scratch_addr); + + ret = dwc3_send_gadget_generic_command(dwc, + DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO, param); + if (ret < 0) + goto err1; + + param = upper_32_bits(scratch_addr); + + ret = dwc3_send_gadget_generic_command(dwc, + DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI, param); + if (ret < 0) + goto err1; + + return 0; + +err1: + dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch * + DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL); + +err0: + return ret; +} + +static void dwc3_free_scratch_buffers(struct dwc3 *dwc) +{ + if (!dwc->has_hibernation) + return; + + if (!dwc->nr_scratch) + return; + + /* should never fall here */ + if (!WARN_ON(dwc->scratchbuf)) + return; + + dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch * + DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL); + kfree(dwc->scratchbuf); +} + +static void dwc3_core_num_eps(struct dwc3 *dwc) +{ + struct dwc3_hwparams *parms = &dwc->hwparams; + + dwc->num_eps = DWC3_NUM_EPS(parms); +} + +static void dwc3_cache_hwparams(struct dwc3 *dwc) +{ + struct dwc3_hwparams *parms = &dwc->hwparams; + + parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0); + parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1); + parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2); + parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3); + parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4); + parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5); + parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6); + parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7); + parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); + + if (DWC3_IP_IS(DWC32)) + parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9); +} + +static int dwc3_core_ulpi_init(struct dwc3 *dwc) +{ + int intf; + int ret = 0; + + intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3); + + if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI || + (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI && + dwc->hsphy_interface && + !strncmp(dwc->hsphy_interface, "ulpi", 4))) + ret = dwc3_ulpi_init(dwc); + + return ret; +} + +/** + * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core + * @dwc: Pointer to our controller context structure + * + * Returns 0 on success. The USB PHY interfaces are configured but not + * initialized. The PHY interfaces and the PHYs get initialized together with + * the core in dwc3_core_init. + */ +static int dwc3_phy_setup(struct dwc3 *dwc) +{ + unsigned int hw_mode; + u32 reg; + + hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); + + reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); + + /* + * Make sure UX_EXIT_PX is cleared as that causes issues with some + * PHYs. Also, this bit is not supposed to be used in normal operation. + */ + reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; + + /* + * Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY + * to '0' during coreConsultant configuration. So default value + * will be '0' when the core is reset. Application needs to set it + * to '1' after the core initialization is completed. + */ + if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) + reg |= DWC3_GUSB3PIPECTL_SUSPHY; + + /* + * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after + * power-on reset, and it can be set after core initialization, which is + * after device soft-reset during initialization. + */ + if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD) + reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; + + if (dwc->u2ss_inp3_quirk) + reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK; + + if (dwc->dis_rxdet_inp3_quirk) + reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3; + + if (dwc->req_p1p2p3_quirk) + reg |= DWC3_GUSB3PIPECTL_REQP1P2P3; + + if (dwc->del_p1p2p3_quirk) + reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN; + + if (dwc->del_phy_power_chg_quirk) + reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE; + + if (dwc->lfps_filter_quirk) + reg |= DWC3_GUSB3PIPECTL_LFPSFILT; + + if (dwc->rx_detect_poll_quirk) + reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL; + + if (dwc->tx_de_emphasis_quirk) + reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis); + + if (dwc->dis_u3_susphy_quirk) + reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; + + if (dwc->dis_del_phy_power_chg_quirk) + reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE; + + dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); + + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + + /* Select the HS PHY interface */ + switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { + case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: + if (dwc->hsphy_interface && + !strncmp(dwc->hsphy_interface, "utmi", 4)) { + reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; + break; + } else if (dwc->hsphy_interface && + !strncmp(dwc->hsphy_interface, "ulpi", 4)) { + reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); + } else { + /* Relying on default value. */ + if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI)) + break; + } + fallthrough; + case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: + default: + break; + } + + switch (dwc->hsphy_mode) { + case USBPHY_INTERFACE_MODE_UTMI: + reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | + DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); + reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) | + DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT); + break; + case USBPHY_INTERFACE_MODE_UTMIW: + reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | + DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); + reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) | + DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT); + break; + default: + break; + } + + /* + * Above 1.94a, it is recommended to set DWC3_GUSB2PHYCFG_SUSPHY to + * '0' during coreConsultant configuration. So default value will + * be '0' when the core is reset. Application needs to set it to + * '1' after the core initialization is completed. + */ + if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) + reg |= DWC3_GUSB2PHYCFG_SUSPHY; + + /* + * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after + * power-on reset, and it can be set after core initialization, which is + * after device soft-reset during initialization. + */ + if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD) + reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; + + if (dwc->dis_u2_susphy_quirk) + reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; + + if (dwc->dis_enblslpm_quirk) + reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; + else + reg |= DWC3_GUSB2PHYCFG_ENBLSLPM; + + if (dwc->dis_u2_freeclk_exists_quirk) + reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS; + + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); + + return 0; +} + +static void dwc3_core_exit(struct dwc3 *dwc) +{ + dwc3_event_buffers_cleanup(dwc); + + usb_phy_shutdown(dwc->usb2_phy); + usb_phy_shutdown(dwc->usb3_phy); + phy_exit(dwc->usb2_generic_phy); + phy_exit(dwc->usb3_generic_phy); + + usb_phy_set_suspend(dwc->usb2_phy, 1); + usb_phy_set_suspend(dwc->usb3_phy, 1); + phy_power_off(dwc->usb2_generic_phy); + phy_power_off(dwc->usb3_generic_phy); + clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks); + reset_control_assert(dwc->reset); +} + +static bool dwc3_core_is_valid(struct dwc3 *dwc) +{ + u32 reg; + + reg = dwc3_readl(dwc->regs, DWC3_GSNPSID); + dwc->ip = DWC3_GSNPS_ID(reg); + + /* This should read as U3 followed by revision number */ + if (DWC3_IP_IS(DWC3)) { + dwc->revision = reg; + } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) { + dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER); + dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE); + } else { + return false; + } + + return true; +} + +static void dwc3_core_setup_global_control(struct dwc3 *dwc) +{ + u32 hwparams4 = dwc->hwparams.hwparams4; + u32 reg; + + reg = dwc3_readl(dwc->regs, DWC3_GCTL); + reg &= ~DWC3_GCTL_SCALEDOWN_MASK; + + switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) { + case DWC3_GHWPARAMS1_EN_PWROPT_CLK: + /** + * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an + * issue which would cause xHCI compliance tests to fail. + * + * Because of that we cannot enable clock gating on such + * configurations. + * + * Refers to: + * + * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based + * SOF/ITP Mode Used + */ + if ((dwc->dr_mode == USB_DR_MODE_HOST || + dwc->dr_mode == USB_DR_MODE_OTG) && + DWC3_VER_IS_WITHIN(DWC3, 210A, 250A)) + reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC; + else + reg &= ~DWC3_GCTL_DSBLCLKGTNG; + break; + case DWC3_GHWPARAMS1_EN_PWROPT_HIB: + /* enable hibernation here */ + dwc->nr_scratch = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4); + + /* + * REVISIT Enabling this bit so that host-mode hibernation + * will work. Device-mode hibernation is not yet implemented. + */ + reg |= DWC3_GCTL_GBLHIBERNATIONEN; + break; + default: + /* nothing */ + break; + } + + /* check if current dwc3 is on simulation board */ + if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { + dev_info(dwc->dev, "Running with FPGA optimizations\n"); + dwc->is_fpga = true; + } + + WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga, + "disable_scramble cannot be used on non-FPGA builds\n"); + + if (dwc->disable_scramble_quirk && dwc->is_fpga) + reg |= DWC3_GCTL_DISSCRAMBLE; + else + reg &= ~DWC3_GCTL_DISSCRAMBLE; + + if (dwc->u2exit_lfps_quirk) + reg |= DWC3_GCTL_U2EXIT_LFPS; + + /* + * WORKAROUND: DWC3 revisions <1.90a have a bug + * where the device can fail to connect at SuperSpeed + * and falls back to high-speed mode which causes + * the device to enter a Connect/Disconnect loop + */ + if (DWC3_VER_IS_PRIOR(DWC3, 190A)) + reg |= DWC3_GCTL_U2RSTECN; + + dwc3_writel(dwc->regs, DWC3_GCTL, reg); +} + +static int dwc3_core_get_phy(struct dwc3 *dwc); +static int dwc3_core_ulpi_init(struct dwc3 *dwc); + +/* set global incr burst type configuration registers */ +static void dwc3_set_incr_burst_type(struct dwc3 *dwc) +{ + struct device *dev = dwc->dev; + /* incrx_mode : for INCR burst type. */ + bool incrx_mode; + /* incrx_size : for size of INCRX burst. */ + u32 incrx_size; + u32 *vals; + u32 cfg; + int ntype; + int ret; + int i; + + cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); + + /* + * Handle property "snps,incr-burst-type-adjustment". + * Get the number of value from this property: + * result <= 0, means this property is not supported. + * result = 1, means INCRx burst mode supported. + * result > 1, means undefined length burst mode supported. + */ + ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment"); + if (ntype <= 0) + return; + + vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL); + if (!vals) { + dev_err(dev, "Error to get memory\n"); + return; + } + + /* Get INCR burst type, and parse it */ + ret = device_property_read_u32_array(dev, + "snps,incr-burst-type-adjustment", vals, ntype); + if (ret) { + kfree(vals); + dev_err(dev, "Error to get property\n"); + return; + } + + incrx_size = *vals; + + if (ntype > 1) { + /* INCRX (undefined length) burst mode */ + incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE; + for (i = 1; i < ntype; i++) { + if (vals[i] > incrx_size) + incrx_size = vals[i]; + } + } else { + /* INCRX burst mode */ + incrx_mode = INCRX_BURST_MODE; + } + + kfree(vals); + + /* Enable Undefined Length INCR Burst and Enable INCRx Burst */ + cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK; + if (incrx_mode) + cfg |= DWC3_GSBUSCFG0_INCRBRSTENA; + switch (incrx_size) { + case 256: + cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA; + break; + case 128: + cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA; + break; + case 64: + cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA; + break; + case 32: + cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA; + break; + case 16: + cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA; + break; + case 8: + cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA; + break; + case 4: + cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA; + break; + case 1: + break; + default: + dev_err(dev, "Invalid property\n"); + break; + } + + dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg); +} + +/** + * dwc3_core_init - Low-level initialization of DWC3 Core + * @dwc: Pointer to our controller context structure + * + * Returns 0 on success otherwise negative errno. + */ +static int dwc3_core_init(struct dwc3 *dwc) +{ + unsigned int hw_mode; + u32 reg; + int ret; + + hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); + + /* + * Write Linux Version Code to our GUID register so it's easy to figure + * out which kernel version a bug was found. + */ + dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); + + ret = dwc3_phy_setup(dwc); + if (ret) + goto err0; + + if (!dwc->ulpi_ready) { + ret = dwc3_core_ulpi_init(dwc); + if (ret) + goto err0; + dwc->ulpi_ready = true; + } + + if (!dwc->phys_ready) { + ret = dwc3_core_get_phy(dwc); + if (ret) + goto err0a; + dwc->phys_ready = true; + } + + usb_phy_init(dwc->usb2_phy); + usb_phy_init(dwc->usb3_phy); + ret = phy_init(dwc->usb2_generic_phy); + if (ret < 0) + goto err0a; + + ret = phy_init(dwc->usb3_generic_phy); + if (ret < 0) { + phy_exit(dwc->usb2_generic_phy); + goto err0a; + } + + ret = dwc3_core_soft_reset(dwc); + if (ret) + goto err1; + + if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && + !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) { + if (!dwc->dis_u3_susphy_quirk) { + reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); + reg |= DWC3_GUSB3PIPECTL_SUSPHY; + dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); + } + + if (!dwc->dis_u2_susphy_quirk) { + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + reg |= DWC3_GUSB2PHYCFG_SUSPHY; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); + } + } + + dwc3_core_setup_global_control(dwc); + dwc3_core_num_eps(dwc); + + ret = dwc3_setup_scratch_buffers(dwc); + if (ret) + goto err1; + + /* Adjust Frame Length */ + dwc3_frame_length_adjustment(dwc); + + dwc3_set_incr_burst_type(dwc); + + usb_phy_set_suspend(dwc->usb2_phy, 0); + usb_phy_set_suspend(dwc->usb3_phy, 0); + ret = phy_power_on(dwc->usb2_generic_phy); + if (ret < 0) + goto err2; + + ret = phy_power_on(dwc->usb3_generic_phy); + if (ret < 0) + goto err3; + + ret = dwc3_event_buffers_setup(dwc); + if (ret) { + dev_err(dwc->dev, "failed to setup event buffers\n"); + goto err4; + } + + /* + * ENDXFER polling is available on version 3.10a and later of + * the DWC_usb3 controller. It is NOT available in the + * DWC_usb31 controller. + */ + if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); + reg |= DWC3_GUCTL2_RST_ACTBITLATER; + dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); + } + + if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); + + /* + * Enable hardware control of sending remote wakeup + * in HS when the device is in the L1 state. + */ + if (!DWC3_VER_IS_PRIOR(DWC3, 290A)) + reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW; + + /* + * Decouple USB 2.0 L1 & L2 events which will allow for + * gadget driver to only receive U3/L2 suspend & wakeup + * events and prevent the more frequent L1 LPM transitions + * from interrupting the driver. + */ + if (!DWC3_VER_IS_PRIOR(DWC3, 300A)) + reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT; + + if (dwc->dis_tx_ipgap_linecheck_quirk) + reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; + + if (dwc->parkmode_disable_ss_quirk) + reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; + + dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); + } + + if (dwc->dr_mode == USB_DR_MODE_HOST || + dwc->dr_mode == USB_DR_MODE_OTG) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL); + + /* + * Enable Auto retry Feature to make the controller operating in + * Host mode on seeing transaction errors(CRC errors or internal + * overrun scenerios) on IN transfers to reply to the device + * with a non-terminating retry ACK (i.e, an ACK transcation + * packet with Retry=1 & Nump != 0) + */ + reg |= DWC3_GUCTL_HSTINAUTORETRY; + + dwc3_writel(dwc->regs, DWC3_GUCTL, reg); + } + + /* + * Must config both number of packets and max burst settings to enable + * RX and/or TX threshold. + */ + if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) { + u8 rx_thr_num = dwc->rx_thr_num_pkt_prd; + u8 rx_maxburst = dwc->rx_max_burst_prd; + u8 tx_thr_num = dwc->tx_thr_num_pkt_prd; + u8 tx_maxburst = dwc->tx_max_burst_prd; + + if (rx_thr_num && rx_maxburst) { + reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); + reg |= DWC31_RXTHRNUMPKTSEL_PRD; + + reg &= ~DWC31_RXTHRNUMPKT_PRD(~0); + reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num); + + reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0); + reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst); + + dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); + } + + if (tx_thr_num && tx_maxburst) { + reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); + reg |= DWC31_TXTHRNUMPKTSEL_PRD; + + reg &= ~DWC31_TXTHRNUMPKT_PRD(~0); + reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num); + + reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0); + reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst); + + dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); + } + } + + return 0; + +err4: + phy_power_off(dwc->usb3_generic_phy); + +err3: + phy_power_off(dwc->usb2_generic_phy); + +err2: + usb_phy_set_suspend(dwc->usb2_phy, 1); + usb_phy_set_suspend(dwc->usb3_phy, 1); + +err1: + usb_phy_shutdown(dwc->usb2_phy); + usb_phy_shutdown(dwc->usb3_phy); + phy_exit(dwc->usb2_generic_phy); + phy_exit(dwc->usb3_generic_phy); + +err0a: + dwc3_ulpi_exit(dwc); + +err0: + return ret; +} + +static int dwc3_core_get_phy(struct dwc3 *dwc) +{ + struct device *dev = dwc->dev; + struct device_node *node = dev->of_node; + int ret; + + if (node) { + dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0); + dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1); + } else { + dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); + dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3); + } + + if (IS_ERR(dwc->usb2_phy)) { + ret = PTR_ERR(dwc->usb2_phy); + if (ret == -ENXIO || ret == -ENODEV) { + dwc->usb2_phy = NULL; + } else { + return dev_err_probe(dev, ret, "no usb2 phy configured\n"); + } + } + + if (IS_ERR(dwc->usb3_phy)) { + ret = PTR_ERR(dwc->usb3_phy); + if (ret == -ENXIO || ret == -ENODEV) { + dwc->usb3_phy = NULL; + } else { + return dev_err_probe(dev, ret, "no usb3 phy configured\n"); + } + } + + dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy"); + if (IS_ERR(dwc->usb2_generic_phy)) { + ret = PTR_ERR(dwc->usb2_generic_phy); + if (ret == -ENOSYS || ret == -ENODEV) { + dwc->usb2_generic_phy = NULL; + } else { + return dev_err_probe(dev, ret, "no usb2 phy configured\n"); + } + } + + dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy"); + if (IS_ERR(dwc->usb3_generic_phy)) { + ret = PTR_ERR(dwc->usb3_generic_phy); + if (ret == -ENOSYS || ret == -ENODEV) { + dwc->usb3_generic_phy = NULL; + } else { + return dev_err_probe(dev, ret, "no usb3 phy configured\n"); + } + } + + return 0; +} + +static int dwc3_core_init_mode(struct dwc3 *dwc) +{ + struct device *dev = dwc->dev; + int ret; + + switch (dwc->dr_mode) { + case USB_DR_MODE_PERIPHERAL: + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); + + if (dwc->usb2_phy) + otg_set_vbus(dwc->usb2_phy->otg, false); + phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE); + phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE); + + ret = dwc3_gadget_init(dwc); + if (ret) + return dev_err_probe(dev, ret, "failed to initialize gadget\n"); + break; + case USB_DR_MODE_HOST: + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); + + if (dwc->usb2_phy) + otg_set_vbus(dwc->usb2_phy->otg, true); + phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST); + phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST); + + ret = dwc3_host_init(dwc); + if (ret) + return dev_err_probe(dev, ret, "failed to initialize host\n"); + break; + case USB_DR_MODE_OTG: + INIT_WORK(&dwc->drd_work, __dwc3_set_mode); + ret = dwc3_drd_init(dwc); + if (ret) + return dev_err_probe(dev, ret, "failed to initialize dual-role\n"); + break; + default: + dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode); + return -EINVAL; + } + + return 0; +} + +static void dwc3_core_exit_mode(struct dwc3 *dwc) +{ + switch (dwc->dr_mode) { + case USB_DR_MODE_PERIPHERAL: + dwc3_gadget_exit(dwc); + break; + case USB_DR_MODE_HOST: + dwc3_host_exit(dwc); + break; + case USB_DR_MODE_OTG: + dwc3_drd_exit(dwc); + break; + default: + /* do nothing */ + break; + } + + /* de-assert DRVVBUS for HOST and OTG mode */ + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); +} + +static void dwc3_get_properties(struct dwc3 *dwc) +{ + struct device *dev = dwc->dev; + u8 lpm_nyet_threshold; + u8 tx_de_emphasis; + u8 hird_threshold; + u8 rx_thr_num_pkt_prd; + u8 rx_max_burst_prd; + u8 tx_thr_num_pkt_prd; + u8 tx_max_burst_prd; + u8 tx_fifo_resize_max_num; + const char *usb_psy_name; + int ret; + + /* default to highest possible threshold */ + lpm_nyet_threshold = 0xf; + + /* default to -3.5dB de-emphasis */ + tx_de_emphasis = 1; + + /* + * default to assert utmi_sleep_n and use maximum allowed HIRD + * threshold value of 0b1100 + */ + hird_threshold = 12; + + /* + * default to a TXFIFO size large enough to fit 6 max packets. This + * allows for systems with larger bus latencies to have some headroom + * for endpoints that have a large bMaxBurst value. + */ + tx_fifo_resize_max_num = 6; + + dwc->maximum_speed = usb_get_maximum_speed(dev); + dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev); + dwc->dr_mode = usb_get_dr_mode(dev); + dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); + + dwc->sysdev_is_parent = device_property_read_bool(dev, + "linux,sysdev_is_parent"); + if (dwc->sysdev_is_parent) + dwc->sysdev = dwc->dev->parent; + else + dwc->sysdev = dwc->dev; + + ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name); + if (ret >= 0) { + dwc->usb_psy = power_supply_get_by_name(usb_psy_name); + if (!dwc->usb_psy) + dev_err(dev, "couldn't get usb power supply\n"); + } + + dwc->has_lpm_erratum = device_property_read_bool(dev, + "snps,has-lpm-erratum"); + device_property_read_u8(dev, "snps,lpm-nyet-threshold", + &lpm_nyet_threshold); + dwc->is_utmi_l1_suspend = device_property_read_bool(dev, + "snps,is-utmi-l1-suspend"); + device_property_read_u8(dev, "snps,hird-threshold", + &hird_threshold); + dwc->dis_start_transfer_quirk = device_property_read_bool(dev, + "snps,dis-start-transfer-quirk"); + dwc->usb3_lpm_capable = device_property_read_bool(dev, + "snps,usb3_lpm_capable"); + dwc->usb2_lpm_disable = device_property_read_bool(dev, + "snps,usb2-lpm-disable"); + dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev, + "snps,usb2-gadget-lpm-disable"); + device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd", + &rx_thr_num_pkt_prd); + device_property_read_u8(dev, "snps,rx-max-burst-prd", + &rx_max_burst_prd); + device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd", + &tx_thr_num_pkt_prd); + device_property_read_u8(dev, "snps,tx-max-burst-prd", + &tx_max_burst_prd); + dwc->do_fifo_resize = device_property_read_bool(dev, + "tx-fifo-resize"); + if (dwc->do_fifo_resize) + device_property_read_u8(dev, "tx-fifo-max-num", + &tx_fifo_resize_max_num); + + dwc->disable_scramble_quirk = device_property_read_bool(dev, + "snps,disable_scramble_quirk"); + dwc->u2exit_lfps_quirk = device_property_read_bool(dev, + "snps,u2exit_lfps_quirk"); + dwc->u2ss_inp3_quirk = device_property_read_bool(dev, + "snps,u2ss_inp3_quirk"); + dwc->req_p1p2p3_quirk = device_property_read_bool(dev, + "snps,req_p1p2p3_quirk"); + dwc->del_p1p2p3_quirk = device_property_read_bool(dev, + "snps,del_p1p2p3_quirk"); + dwc->del_phy_power_chg_quirk = device_property_read_bool(dev, + "snps,del_phy_power_chg_quirk"); + dwc->lfps_filter_quirk = device_property_read_bool(dev, + "snps,lfps_filter_quirk"); + dwc->rx_detect_poll_quirk = device_property_read_bool(dev, + "snps,rx_detect_poll_quirk"); + dwc->dis_u3_susphy_quirk = device_property_read_bool(dev, + "snps,dis_u3_susphy_quirk"); + dwc->dis_u2_susphy_quirk = device_property_read_bool(dev, + "snps,dis_u2_susphy_quirk"); + dwc->dis_enblslpm_quirk = device_property_read_bool(dev, + "snps,dis_enblslpm_quirk"); + dwc->dis_u1_entry_quirk = device_property_read_bool(dev, + "snps,dis-u1-entry-quirk"); + dwc->dis_u2_entry_quirk = device_property_read_bool(dev, + "snps,dis-u2-entry-quirk"); + dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev, + "snps,dis_rxdet_inp3_quirk"); + dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev, + "snps,dis-u2-freeclk-exists-quirk"); + dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev, + "snps,dis-del-phy-power-chg-quirk"); + dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, + "snps,dis-tx-ipgap-linecheck-quirk"); + dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, + "snps,parkmode-disable-ss-quirk"); + + dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, + "snps,tx_de_emphasis_quirk"); + device_property_read_u8(dev, "snps,tx_de_emphasis", + &tx_de_emphasis); + device_property_read_string(dev, "snps,hsphy_interface", + &dwc->hsphy_interface); + device_property_read_u32(dev, "snps,quirk-frame-length-adjustment", + &dwc->fladj); + + dwc->dis_metastability_quirk = device_property_read_bool(dev, + "snps,dis_metastability_quirk"); + + dwc->dis_split_quirk = device_property_read_bool(dev, + "snps,dis-split-quirk"); + + dwc->lpm_nyet_threshold = lpm_nyet_threshold; + dwc->tx_de_emphasis = tx_de_emphasis; + + dwc->hird_threshold = hird_threshold; + + dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd; + dwc->rx_max_burst_prd = rx_max_burst_prd; + + dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd; + dwc->tx_max_burst_prd = tx_max_burst_prd; + + dwc->imod_interval = 0; + + dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num; +} + +/* check whether the core supports IMOD */ +bool dwc3_has_imod(struct dwc3 *dwc) +{ + return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) || + DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) || + DWC3_IP_IS(DWC32); +} + +static void dwc3_check_params(struct dwc3 *dwc) +{ + struct device *dev = dwc->dev; + unsigned int hwparam_gen = + DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); + + /* Check for proper value of imod_interval */ + if (dwc->imod_interval && !dwc3_has_imod(dwc)) { + dev_warn(dwc->dev, "Interrupt moderation not supported\n"); + dwc->imod_interval = 0; + } + + /* + * Workaround for STAR 9000961433 which affects only version + * 3.00a of the DWC_usb3 core. This prevents the controller + * interrupt from being masked while handling events. IMOD + * allows us to work around this issue. Enable it for the + * affected version. + */ + if (!dwc->imod_interval && + DWC3_VER_IS(DWC3, 300A)) + dwc->imod_interval = 1; + + /* Check the maximum_speed parameter */ + switch (dwc->maximum_speed) { + case USB_SPEED_LOW: + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + break; + case USB_SPEED_SUPER: + if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) + dev_warn(dev, "UDC doesn't support Gen 1\n"); + break; + case USB_SPEED_SUPER_PLUS: + if ((DWC3_IP_IS(DWC32) && + hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) || + (!DWC3_IP_IS(DWC32) && + hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) + dev_warn(dev, "UDC doesn't support SSP\n"); + break; + default: + dev_err(dev, "invalid maximum_speed parameter %d\n", + dwc->maximum_speed); + fallthrough; + case USB_SPEED_UNKNOWN: + switch (hwparam_gen) { + case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: + dwc->maximum_speed = USB_SPEED_SUPER_PLUS; + break; + case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: + if (DWC3_IP_IS(DWC32)) + dwc->maximum_speed = USB_SPEED_SUPER_PLUS; + else + dwc->maximum_speed = USB_SPEED_SUPER; + break; + case DWC3_GHWPARAMS3_SSPHY_IFC_DIS: + dwc->maximum_speed = USB_SPEED_HIGH; + break; + default: + dwc->maximum_speed = USB_SPEED_SUPER; + break; + } + break; + } + + /* + * Currently the controller does not have visibility into the HW + * parameter to determine the maximum number of lanes the HW supports. + * If the number of lanes is not specified in the device property, then + * set the default to support dual-lane for DWC_usb32 and single-lane + * for DWC_usb31 for super-speed-plus. + */ + if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) { + switch (dwc->max_ssp_rate) { + case USB_SSP_GEN_2x1: + if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1) + dev_warn(dev, "UDC only supports Gen 1\n"); + break; + case USB_SSP_GEN_1x2: + case USB_SSP_GEN_2x2: + if (DWC3_IP_IS(DWC31)) + dev_warn(dev, "UDC only supports single lane\n"); + break; + case USB_SSP_GEN_UNKNOWN: + default: + switch (hwparam_gen) { + case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: + if (DWC3_IP_IS(DWC32)) + dwc->max_ssp_rate = USB_SSP_GEN_2x2; + else + dwc->max_ssp_rate = USB_SSP_GEN_2x1; + break; + case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: + if (DWC3_IP_IS(DWC32)) + dwc->max_ssp_rate = USB_SSP_GEN_1x2; + break; + } + break; + } + } +} + +static void dwc3_resume_quirk(struct dwc3 *dwc); + +static void dwc3_set_ncr(struct dwc3 *dwc) +{ + if (device_property_read_bool(dwc->dev, "SOC_Thang")) { + dwc->soc_taihang = true; + if (device_property_read_bool(dwc->dev, "usb-role-switch")) + sd_otg_enable(dwc); + } + + dwc3_resume_quirk(dwc); +} + +static int dwc3_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res, dwc_res; + struct dwc3_vendor *vdwc; + struct dwc3 *dwc; + + int ret; + + void __iomem *regs; + + vdwc = devm_kzalloc(dev, sizeof(*vdwc), GFP_KERNEL); + if (!vdwc) + return -ENOMEM; + dwc = &vdwc->dwc; + + dwc->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "missing memory resource\n"); + return -ENODEV; + } + + dwc->xhci_resources[0].start = res->start; + dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + + DWC3_XHCI_REGS_END; + dwc->xhci_resources[0].flags = res->flags; + dwc->xhci_resources[0].name = res->name; + + /* + * Request memory region but exclude xHCI regs, + * since it will be requested by the xhci-plat driver. + */ + dwc_res = *res; + dwc_res.start += DWC3_GLOBALS_REGS_START; + + regs = devm_ioremap_resource(dev, &dwc_res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + dwc->regs = regs; + dwc->regs_size = resource_size(&dwc_res); + + dwc3_get_properties(dwc); + + dwc->reset = devm_reset_control_array_get_optional_shared(dev); + if (IS_ERR(dwc->reset)) + return PTR_ERR(dwc->reset); + + if (dev->of_node) { + ret = devm_clk_bulk_get_all(dev, &dwc->clks); + if (ret == -EPROBE_DEFER) + return ret; + /* + * Clocks are optional, but new DT platforms should support all + * clocks as required by the DT-binding. + */ + if (ret < 0) + dwc->num_clks = 0; + else + dwc->num_clks = ret; + + } + + ret = reset_control_deassert(dwc->reset); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(dwc->num_clks, dwc->clks); + if (ret) + goto assert_reset; + + if (!dwc3_core_is_valid(dwc)) { + dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); + ret = -ENODEV; + goto disable_clks; + } + + platform_set_drvdata(pdev, dwc); + dwc3_cache_hwparams(dwc); + + spin_lock_init(&dwc->lock); + mutex_init(&dwc->mutex); + + pm_runtime_set_active(dev); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto err1; + + pm_runtime_forbid(dev); + + ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); + if (ret) { + dev_err(dwc->dev, "failed to allocate event buffers\n"); + ret = -ENOMEM; + goto err2; + } + + ret = dwc3_get_dr_mode(dwc); + if (ret) + goto err3; + + ret = dwc3_alloc_scratch_buffers(dwc); + if (ret) + goto err3; + + ret = dwc3_core_init(dwc); + if (ret) { + dev_err_probe(dev, ret, "failed to initialize core\n"); + goto err4; + } + + dwc3_check_params(dwc); + dwc3_debugfs_init(dwc); + + ret = dwc3_core_init_mode(dwc); + if (ret) + goto err5; + + dwc3_set_ncr(dwc); + + pm_runtime_put(dev); + + return 0; + +err5: + dwc3_debugfs_exit(dwc); + dwc3_event_buffers_cleanup(dwc); + + usb_phy_shutdown(dwc->usb2_phy); + usb_phy_shutdown(dwc->usb3_phy); + phy_exit(dwc->usb2_generic_phy); + phy_exit(dwc->usb3_generic_phy); + + usb_phy_set_suspend(dwc->usb2_phy, 1); + usb_phy_set_suspend(dwc->usb3_phy, 1); + phy_power_off(dwc->usb2_generic_phy); + phy_power_off(dwc->usb3_generic_phy); + + dwc3_ulpi_exit(dwc); + +err4: + dwc3_free_scratch_buffers(dwc); + +err3: + dwc3_free_event_buffers(dwc); + +err2: + pm_runtime_allow(&pdev->dev); + +err1: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + +disable_clks: + clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks); +assert_reset: + reset_control_assert(dwc->reset); + + if (dwc->usb_psy) + power_supply_put(dwc->usb_psy); + + return ret; +} + +static int dwc3_remove(struct platform_device *pdev) +{ + struct dwc3 *dwc = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&pdev->dev); + + dwc3_core_exit_mode(dwc); + dwc3_debugfs_exit(dwc); + + dwc3_core_exit(dwc); + dwc3_ulpi_exit(dwc); + + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + + dwc3_free_event_buffers(dwc); + dwc3_free_scratch_buffers(dwc); + + if (dwc->usb_psy) + power_supply_put(dwc->usb_psy); + + return 0; +} + +#ifdef CONFIG_PM +static int dwc3_core_init_for_resume(struct dwc3 *dwc) +{ + int ret; + + ret = reset_control_deassert(dwc->reset); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(dwc->num_clks, dwc->clks); + if (ret) + goto assert_reset; + + ret = dwc3_core_init(dwc); + if (ret) + goto disable_clks; + + return 0; + +disable_clks: + clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks); +assert_reset: + reset_control_assert(dwc->reset); + + return ret; +} + +static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) +{ + unsigned long flags; + u32 reg; + + switch (dwc->current_dr_role) { + case DWC3_GCTL_PRTCAP_DEVICE: + if (pm_runtime_suspended(dwc->dev)) + break; + spin_lock_irqsave(&dwc->lock, flags); + dwc3_gadget_suspend(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + synchronize_irq(dwc->irq_gadget); + dwc3_core_exit(dwc); + break; + case DWC3_GCTL_PRTCAP_HOST: + if (!PMSG_IS_AUTO(msg)) { + dwc3_core_exit(dwc); + break; + } + + /* Let controller to suspend HSPHY before PHY driver suspends */ + if (dwc->dis_u2_susphy_quirk || + dwc->dis_enblslpm_quirk) { + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + reg |= DWC3_GUSB2PHYCFG_ENBLSLPM | + DWC3_GUSB2PHYCFG_SUSPHY; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); + + /* Give some time for USB2 PHY to suspend */ + usleep_range(5000, 6000); + } + + phy_pm_runtime_put_sync(dwc->usb2_generic_phy); + phy_pm_runtime_put_sync(dwc->usb3_generic_phy); + break; + case DWC3_GCTL_PRTCAP_OTG: + /* do nothing during runtime_suspend */ + if (PMSG_IS_AUTO(msg)) + break; + + if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { + spin_lock_irqsave(&dwc->lock, flags); + dwc3_gadget_suspend(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + synchronize_irq(dwc->irq_gadget); + } + + dwc3_otg_exit(dwc); + dwc3_core_exit(dwc); + break; + default: + /* do nothing */ + break; + } + + return 0; +} + +static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) +{ + unsigned long flags; + int ret; + u32 reg; + + switch (dwc->current_dr_role) { + case DWC3_GCTL_PRTCAP_DEVICE: + ret = dwc3_core_init_for_resume(dwc); + if (ret) + return ret; + + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); + spin_lock_irqsave(&dwc->lock, flags); + dwc3_gadget_resume(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + break; + case DWC3_GCTL_PRTCAP_HOST: + if (!PMSG_IS_AUTO(msg)) { + ret = dwc3_core_init_for_resume(dwc); + if (ret) + return ret; + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); + break; + } + /* Restore GUSB2PHYCFG bits that were modified in suspend */ + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + if (dwc->dis_u2_susphy_quirk) + reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; + + if (dwc->dis_enblslpm_quirk) + reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; + + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); + + phy_pm_runtime_get_sync(dwc->usb2_generic_phy); + phy_pm_runtime_get_sync(dwc->usb3_generic_phy); + break; + case DWC3_GCTL_PRTCAP_OTG: + /* nothing to do on runtime_resume */ + if (PMSG_IS_AUTO(msg)) + break; + + ret = dwc3_core_init_for_resume(dwc); + if (ret) + return ret; + + dwc3_set_prtcap(dwc, dwc->current_dr_role); + + dwc3_otg_init(dwc); + if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) { + dwc3_otg_host_init(dwc); + } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { + spin_lock_irqsave(&dwc->lock, flags); + dwc3_gadget_resume(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + } + + break; + default: + /* do nothing */ + break; + } + + return 0; +} + +static int dwc3_runtime_checks(struct dwc3 *dwc) +{ + switch (dwc->current_dr_role) { + case DWC3_GCTL_PRTCAP_DEVICE: + if (dwc->connected) + return -EBUSY; + break; + case DWC3_GCTL_PRTCAP_HOST: + default: + /* do nothing */ + break; + } + + return 0; +} + +static int dwc3_runtime_suspend(struct device *dev) +{ + struct dwc3 *dwc = dev_get_drvdata(dev); + int ret; + + if (dwc3_runtime_checks(dwc)) + return -EBUSY; + + ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND); + if (ret) + return ret; + + device_init_wakeup(dev, true); + + return 0; +} + +static int dwc3_runtime_resume(struct device *dev) +{ + struct dwc3 *dwc = dev_get_drvdata(dev); + int ret; + + device_init_wakeup(dev, false); + + ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME); + if (ret) + return ret; + + switch (dwc->current_dr_role) { + case DWC3_GCTL_PRTCAP_DEVICE: + dwc3_gadget_process_pending_events(dwc); + break; + case DWC3_GCTL_PRTCAP_HOST: + default: + /* do nothing */ + break; + } + + pm_runtime_mark_last_busy(dev); + + return 0; +} + +static int dwc3_runtime_idle(struct device *dev) +{ + struct dwc3 *dwc = dev_get_drvdata(dev); + + switch (dwc->current_dr_role) { + case DWC3_GCTL_PRTCAP_DEVICE: + if (dwc3_runtime_checks(dwc)) + return -EBUSY; + break; + case DWC3_GCTL_PRTCAP_HOST: + default: + /* do nothing */ + break; + } + + pm_runtime_mark_last_busy(dev); + pm_runtime_autosuspend(dev); + + return 0; +} +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_SLEEP +static int dwc3_suspend(struct device *dev) +{ + struct dwc3 *dwc = dev_get_drvdata(dev); + int ret; + + ret = dwc3_suspend_common(dwc, PMSG_SUSPEND); + if (ret) + return ret; + + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static void dwc3_resume_quirk(struct dwc3 *dwc) +{ + u32 reg; + + if (!dwc->soc_taihang) + dwc3_writel(dwc->regs, DWC3_NCR_INTEN, 0x3F); + else { + if (device_property_read_bool(dwc->dev, "usb-role-switch")) + dwc3_writel(dwc->regs, DWC3_THANG_NCR_INTEN, 0xBF); + else + dwc3_writel(dwc->regs, DWC3_THANG_NCR_INTEN, 0x3F); + + reg = dwc3_readl(dwc->regs, DWC3_THANG_NCR_CTRL_2); + reg |= U3DRD_NCR_CTRL_2_SW_RESET_WAKEUP; + reg &= ~U3DRD_NCR_CTRL_2_UTMI_SUSPENDM_OVRD_EN; + dwc3_writel(dwc->regs, DWC3_THANG_NCR_CTRL_2, reg); + } +} + +static int dwc3_resume(struct device *dev) +{ + struct dwc3 *dwc = dev_get_drvdata(dev); + int ret; + + pinctrl_pm_select_default_state(dev); + + dwc3_resume_quirk(dwc); + ret = dwc3_resume_common(dwc, PMSG_RESUME); + if (ret) + return ret; + + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; +} + +static void dwc3_complete(struct device *dev) +{ + struct dwc3 *dwc = dev_get_drvdata(dev); + u32 reg; + + if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && + dwc->dis_split_quirk) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); + reg |= DWC3_GUCTL3_SPLITDISABLE; + dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); + } +} +#else +#define dwc3_complete NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops dwc3_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume) + .complete = dwc3_complete, + SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume, + dwc3_runtime_idle) +}; + +#ifdef CONFIG_OF +static const struct of_device_id of_dwc3_match[] = { + { + .compatible = "snps,dwc3" + }, + { + .compatible = "synopsys,dwc3" + }, + { }, +}; +MODULE_DEVICE_TABLE(of, of_dwc3_match); +#endif + +#ifdef CONFIG_ACPI + +#define ACPI_ID_INTEL_BSW "808622B7" + +static const struct acpi_device_id dwc3_acpi_match[] = { + { ACPI_ID_INTEL_BSW, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match); +#endif + +static struct platform_driver dwc3_driver = { + .probe = dwc3_probe, + .remove = dwc3_remove, + .driver = { + .name = "dwc3", + .of_match_table = of_match_ptr(of_dwc3_match), + .acpi_match_table = ACPI_PTR(dwc3_acpi_match), + .pm = &dwc3_dev_pm_ops, + }, +}; + +module_platform_driver(dwc3_driver); + +MODULE_ALIAS("platform:dwc3"); +MODULE_AUTHOR("Felipe Balbi "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver"); diff --git a/usb/dwc3/core.h b/usb/dwc3/core.h new file mode 100644 index 0000000..9ce3fa0 --- /dev/null +++ b/usb/dwc3/core.h @@ -0,0 +1,1667 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * core.h - DesignWare USB3 DRD Core Header + * + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com + * + * Authors: Felipe Balbi , + * Sebastian Andrzej Siewior + */ + +#ifndef __DRIVERS_USB_DWC3_CORE_H +#define __DRIVERS_USB_DWC3_CORE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include + +#include + +#define DWC3_MSG_MAX 500 + +/* Global constants */ +#define DWC3_PULL_UP_TIMEOUT 500 /* ms */ +#define DWC3_BOUNCE_SIZE 1024 /* size of a superspeed bulk */ +#define DWC3_EP0_SETUP_SIZE 512 +#define DWC3_ENDPOINTS_NUM 32 +#define DWC3_XHCI_RESOURCES_NUM 2 +#define DWC3_ISOC_MAX_RETRIES 5 + +#define DWC3_SCRATCHBUF_SIZE 4096 /* each buffer is assumed to be 4KiB */ +#define DWC3_EVENT_BUFFERS_SIZE 4096 +#define DWC3_EVENT_TYPE_MASK 0xfe + +#define DWC3_EVENT_TYPE_DEV 0 +#define DWC3_EVENT_TYPE_CARKIT 3 +#define DWC3_EVENT_TYPE_I2C 4 + +#define DWC3_DEVICE_EVENT_DISCONNECT 0 +#define DWC3_DEVICE_EVENT_RESET 1 +#define DWC3_DEVICE_EVENT_CONNECT_DONE 2 +#define DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE 3 +#define DWC3_DEVICE_EVENT_WAKEUP 4 +#define DWC3_DEVICE_EVENT_HIBER_REQ 5 +#define DWC3_DEVICE_EVENT_SUSPEND 6 +#define DWC3_DEVICE_EVENT_SOF 7 +#define DWC3_DEVICE_EVENT_ERRATIC_ERROR 9 +#define DWC3_DEVICE_EVENT_CMD_CMPL 10 +#define DWC3_DEVICE_EVENT_OVERFLOW 11 + +/* Controller's role while using the OTG block */ +#define DWC3_OTG_ROLE_IDLE 0 +#define DWC3_OTG_ROLE_HOST 1 +#define DWC3_OTG_ROLE_DEVICE 2 + +#define DWC3_GEVNTCOUNT_MASK 0xfffc +#define DWC3_GEVNTCOUNT_EHB BIT(31) +#define DWC3_GSNPSID_MASK 0xffff0000 +#define DWC3_GSNPSREV_MASK 0xffff +#define DWC3_GSNPS_ID(p) (((p) & DWC3_GSNPSID_MASK) >> 16) + +/* DWC3 registers memory space boundries */ +#define DWC3_XHCI_REGS_START 0x0 +#define DWC3_XHCI_REGS_END 0x7fff +#define DWC3_GLOBALS_REGS_START 0xc100 +#define DWC3_GLOBALS_REGS_END 0xc6ff +#define DWC3_DEVICE_REGS_START 0xc700 +#define DWC3_DEVICE_REGS_END 0xcbff +#define DWC3_OTG_REGS_START 0xcc00 +#define DWC3_OTG_REGS_END 0xccff + +/* Global Registers */ +#define DWC3_GSBUSCFG0 0xc100 +#define DWC3_GSBUSCFG1 0xc104 +#define DWC3_GTXTHRCFG 0xc108 +#define DWC3_GRXTHRCFG 0xc10c +#define DWC3_GCTL 0xc110 +#define DWC3_GEVTEN 0xc114 +#define DWC3_GSTS 0xc118 +#define DWC3_GUCTL1 0xc11c +#define DWC3_GSNPSID 0xc120 +#define DWC3_GGPIO 0xc124 +#define DWC3_GUID 0xc128 +#define DWC3_GUCTL 0xc12c +#define DWC3_GBUSERRADDR0 0xc130 +#define DWC3_GBUSERRADDR1 0xc134 +#define DWC3_GPRTBIMAP0 0xc138 +#define DWC3_GPRTBIMAP1 0xc13c +#define DWC3_GHWPARAMS0 0xc140 +#define DWC3_GHWPARAMS1 0xc144 +#define DWC3_GHWPARAMS2 0xc148 +#define DWC3_GHWPARAMS3 0xc14c +#define DWC3_GHWPARAMS4 0xc150 +#define DWC3_GHWPARAMS5 0xc154 +#define DWC3_GHWPARAMS6 0xc158 +#define DWC3_GHWPARAMS7 0xc15c +#define DWC3_GDBGFIFOSPACE 0xc160 +#define DWC3_GDBGLTSSM 0xc164 +#define DWC3_GDBGBMU 0xc16c +#define DWC3_GDBGLSPMUX 0xc170 +#define DWC3_GDBGLSP 0xc174 +#define DWC3_GDBGEPINFO0 0xc178 +#define DWC3_GDBGEPINFO1 0xc17c +#define DWC3_GPRTBIMAP_HS0 0xc180 +#define DWC3_GPRTBIMAP_HS1 0xc184 +#define DWC3_GPRTBIMAP_FS0 0xc188 +#define DWC3_GPRTBIMAP_FS1 0xc18c +#define DWC3_GUCTL2 0xc19c + +#define DWC3_VER_NUMBER 0xc1a0 +#define DWC3_VER_TYPE 0xc1a4 + +#define DWC3_GUSB2PHYCFG(n) (0xc200 + ((n) * 0x04)) +#define DWC3_GUSB2I2CCTL(n) (0xc240 + ((n) * 0x04)) + +#define DWC3_GUSB2PHYACC(n) (0xc280 + ((n) * 0x04)) + +#define DWC3_GUSB3PIPECTL(n) (0xc2c0 + ((n) * 0x04)) + +#define DWC3_GTXFIFOSIZ(n) (0xc300 + ((n) * 0x04)) +#define DWC3_GRXFIFOSIZ(n) (0xc380 + ((n) * 0x04)) + +#define DWC3_GEVNTADRLO(n) (0xc400 + ((n) * 0x10)) +#define DWC3_GEVNTADRHI(n) (0xc404 + ((n) * 0x10)) +#define DWC3_GEVNTSIZ(n) (0xc408 + ((n) * 0x10)) +#define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10)) + +#define DWC3_GHWPARAMS8 0xc600 +#define DWC3_GUCTL3 0xc60c +#define DWC3_GFLADJ 0xc630 +#define DWC3_GHWPARAMS9 0xc680 + +/* Device Registers */ +#define DWC3_DCFG 0xc700 +#define DWC3_DCTL 0xc704 +#define DWC3_DEVTEN 0xc708 +#define DWC3_DSTS 0xc70c +#define DWC3_DGCMDPAR 0xc710 +#define DWC3_DGCMD 0xc714 +#define DWC3_DALEPENA 0xc720 + +#define DWC3_DEP_BASE(n) (0xc800 + ((n) * 0x10)) +#define DWC3_DEPCMDPAR2 0x00 +#define DWC3_DEPCMDPAR1 0x04 +#define DWC3_DEPCMDPAR0 0x08 +#define DWC3_DEPCMD 0x0c + +#define DWC3_DEV_IMOD(n) (0xca00 + ((n) * 0x4)) + +/* OTG Registers */ +#define DWC3_OCFG 0xcc00 +#define DWC3_OCTL 0xcc04 +#define DWC3_OEVT 0xcc08 +#define DWC3_OEVTEN 0xcc0C +#define DWC3_OSTS 0xcc10 + +/* NCR Registers */ +#define DWC3_NCR_INTEN 0xD000 +#define DWC3_NCR_INTR 0xD080 + +#define DWC3_THANG_NCR_INTEN 0xE000 +#define DWC3_THANG_NCR_INTR 0xE080 +#define DWC3_THANG_NCR_CTRL_2 0xE018 +#define DWC3_THANG_NCR_CTRL_5 0xE024 +#define U3DRD_NCR_INTR_IDCHG (BIT(7)) +#define U3DRD_NCR_CTRL_2_SW_RESET_WAKEUP (BIT(31)) +#define U3DRD_NCR_CTRL_2_UTMI_SUSPENDM_OVRD_EN (BIT(0)) +#define U3DRD_NCR_CTRL_5_IDCHGINTCTL (BIT(12) | BIT(13)) + +/* Phy NCR Registers */ +#define USB_PHY_NCR_STS0 0x10080 +#define USB_PHY_NCR_STS0_IDDIG (BIT(8)) + +/* Bit fields */ + +/* Global SoC Bus Configuration INCRx Register 0 */ +#define DWC3_GSBUSCFG0_INCR256BRSTENA (1 << 7) /* INCR256 burst */ +#define DWC3_GSBUSCFG0_INCR128BRSTENA (1 << 6) /* INCR128 burst */ +#define DWC3_GSBUSCFG0_INCR64BRSTENA (1 << 5) /* INCR64 burst */ +#define DWC3_GSBUSCFG0_INCR32BRSTENA (1 << 4) /* INCR32 burst */ +#define DWC3_GSBUSCFG0_INCR16BRSTENA (1 << 3) /* INCR16 burst */ +#define DWC3_GSBUSCFG0_INCR8BRSTENA (1 << 2) /* INCR8 burst */ +#define DWC3_GSBUSCFG0_INCR4BRSTENA (1 << 1) /* INCR4 burst */ +#define DWC3_GSBUSCFG0_INCRBRSTENA (1 << 0) /* undefined length enable */ +#define DWC3_GSBUSCFG0_INCRBRST_MASK 0xff + +/* Global Debug LSP MUX Select */ +#define DWC3_GDBGLSPMUX_ENDBC BIT(15) /* Host only */ +#define DWC3_GDBGLSPMUX_HOSTSELECT(n) ((n) & 0x3fff) +#define DWC3_GDBGLSPMUX_DEVSELECT(n) (((n) & 0xf) << 4) +#define DWC3_GDBGLSPMUX_EPSELECT(n) ((n) & 0xf) + +/* Global Debug Queue/FIFO Space Available Register */ +#define DWC3_GDBGFIFOSPACE_NUM(n) ((n) & 0x1f) +#define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0) +#define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff) + +#define DWC3_TXFIFO 0 +#define DWC3_RXFIFO 1 +#define DWC3_TXREQQ 2 +#define DWC3_RXREQQ 3 +#define DWC3_RXINFOQ 4 +#define DWC3_PSTATQ 5 +#define DWC3_DESCFETCHQ 6 +#define DWC3_EVENTQ 7 +#define DWC3_AUXEVENTQ 8 + +/* Global RX Threshold Configuration Register */ +#define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) +#define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24) +#define DWC3_GRXTHRCFG_PKTCNTSEL BIT(29) + +/* Global RX Threshold Configuration Register for DWC_usb31 only */ +#define DWC31_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 16) +#define DWC31_GRXTHRCFG_RXPKTCNT(n) (((n) & 0x1f) << 21) +#define DWC31_GRXTHRCFG_PKTCNTSEL BIT(26) +#define DWC31_RXTHRNUMPKTSEL_HS_PRD BIT(15) +#define DWC31_RXTHRNUMPKT_HS_PRD(n) (((n) & 0x3) << 13) +#define DWC31_RXTHRNUMPKTSEL_PRD BIT(10) +#define DWC31_RXTHRNUMPKT_PRD(n) (((n) & 0x1f) << 5) +#define DWC31_MAXRXBURSTSIZE_PRD(n) ((n) & 0x1f) + +/* Global TX Threshold Configuration Register for DWC_usb31 only */ +#define DWC31_GTXTHRCFG_MAXTXBURSTSIZE(n) (((n) & 0x1f) << 16) +#define DWC31_GTXTHRCFG_TXPKTCNT(n) (((n) & 0x1f) << 21) +#define DWC31_GTXTHRCFG_PKTCNTSEL BIT(26) +#define DWC31_TXTHRNUMPKTSEL_HS_PRD BIT(15) +#define DWC31_TXTHRNUMPKT_HS_PRD(n) (((n) & 0x3) << 13) +#define DWC31_TXTHRNUMPKTSEL_PRD BIT(10) +#define DWC31_TXTHRNUMPKT_PRD(n) (((n) & 0x1f) << 5) +#define DWC31_MAXTXBURSTSIZE_PRD(n) ((n) & 0x1f) + +/* Global Configuration Register */ +#define DWC3_GCTL_PWRDNSCALE(n) ((n) << 19) +#define DWC3_GCTL_U2RSTECN BIT(16) +#define DWC3_GCTL_RAMCLKSEL(x) (((x) & DWC3_GCTL_CLK_MASK) << 6) +#define DWC3_GCTL_CLK_BUS (0) +#define DWC3_GCTL_CLK_PIPE (1) +#define DWC3_GCTL_CLK_PIPEHALF (2) +#define DWC3_GCTL_CLK_MASK (3) + +#define DWC3_GCTL_PRTCAP(n) (((n) & (3 << 12)) >> 12) +#define DWC3_GCTL_PRTCAPDIR(n) ((n) << 12) +#define DWC3_GCTL_PRTCAP_HOST 1 +#define DWC3_GCTL_PRTCAP_DEVICE 2 +#define DWC3_GCTL_PRTCAP_OTG 3 + +#define DWC3_GCTL_CORESOFTRESET BIT(11) +#define DWC3_GCTL_SOFITPSYNC BIT(10) +#define DWC3_GCTL_SCALEDOWN(n) ((n) << 4) +#define DWC3_GCTL_SCALEDOWN_MASK DWC3_GCTL_SCALEDOWN(3) +#define DWC3_GCTL_DISSCRAMBLE BIT(3) +#define DWC3_GCTL_U2EXIT_LFPS BIT(2) +#define DWC3_GCTL_GBLHIBERNATIONEN BIT(1) +#define DWC3_GCTL_DSBLCLKGTNG BIT(0) + +/* Global User Control Register */ +#define DWC3_GUCTL_HSTINAUTORETRY BIT(14) + +/* Global User Control 1 Register */ +#define DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT BIT(31) +#define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28) +#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24) +#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17) + +/* Global Status Register */ +#define DWC3_GSTS_OTG_IP BIT(10) +#define DWC3_GSTS_BC_IP BIT(9) +#define DWC3_GSTS_ADP_IP BIT(8) +#define DWC3_GSTS_HOST_IP BIT(7) +#define DWC3_GSTS_DEVICE_IP BIT(6) +#define DWC3_GSTS_CSR_TIMEOUT BIT(5) +#define DWC3_GSTS_BUS_ERR_ADDR_VLD BIT(4) +#define DWC3_GSTS_CURMOD(n) ((n) & 0x3) +#define DWC3_GSTS_CURMOD_DEVICE 0 +#define DWC3_GSTS_CURMOD_HOST 1 + +/* Global USB2 PHY Configuration Register */ +#define DWC3_GUSB2PHYCFG_PHYSOFTRST BIT(31) +#define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS BIT(30) +#define DWC3_GUSB2PHYCFG_SUSPHY BIT(6) +#define DWC3_GUSB2PHYCFG_ULPI_UTMI BIT(4) +#define DWC3_GUSB2PHYCFG_ENBLSLPM BIT(8) +#define DWC3_GUSB2PHYCFG_PHYIF(n) (n << 3) +#define DWC3_GUSB2PHYCFG_PHYIF_MASK DWC3_GUSB2PHYCFG_PHYIF(1) +#define DWC3_GUSB2PHYCFG_USBTRDTIM(n) (n << 10) +#define DWC3_GUSB2PHYCFG_USBTRDTIM_MASK DWC3_GUSB2PHYCFG_USBTRDTIM(0xf) +#define USBTRDTIM_UTMI_8_BIT 9 +#define USBTRDTIM_UTMI_16_BIT 5 +#define UTMI_PHYIF_16_BIT 1 +#define UTMI_PHYIF_8_BIT 0 + +/* Global USB2 PHY Vendor Control Register */ +#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25) +#define DWC3_GUSB2PHYACC_DONE BIT(24) +#define DWC3_GUSB2PHYACC_BUSY BIT(23) +#define DWC3_GUSB2PHYACC_WRITE BIT(22) +#define DWC3_GUSB2PHYACC_ADDR(n) (n << 16) +#define DWC3_GUSB2PHYACC_EXTEND_ADDR(n) (n << 8) +#define DWC3_GUSB2PHYACC_DATA(n) (n & 0xff) + +/* Global USB3 PIPE Control Register */ +#define DWC3_GUSB3PIPECTL_PHYSOFTRST BIT(31) +#define DWC3_GUSB3PIPECTL_U2SSINP3OK BIT(29) +#define DWC3_GUSB3PIPECTL_DISRXDETINP3 BIT(28) +#define DWC3_GUSB3PIPECTL_UX_EXIT_PX BIT(27) +#define DWC3_GUSB3PIPECTL_REQP1P2P3 BIT(24) +#define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19) +#define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK DWC3_GUSB3PIPECTL_DEP1P2P3(7) +#define DWC3_GUSB3PIPECTL_DEP1P2P3_EN DWC3_GUSB3PIPECTL_DEP1P2P3(1) +#define DWC3_GUSB3PIPECTL_DEPOCHANGE BIT(18) +#define DWC3_GUSB3PIPECTL_SUSPHY BIT(17) +#define DWC3_GUSB3PIPECTL_LFPSFILT BIT(9) +#define DWC3_GUSB3PIPECTL_RX_DETOPOLL BIT(8) +#define DWC3_GUSB3PIPECTL_TX_DEEPH_MASK DWC3_GUSB3PIPECTL_TX_DEEPH(3) +#define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1) + +/* Global TX Fifo Size Register */ +#define DWC31_GTXFIFOSIZ_TXFRAMNUM BIT(15) /* DWC_usb31 only */ +#define DWC31_GTXFIFOSIZ_TXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */ +#define DWC3_GTXFIFOSIZ_TXFDEP(n) ((n) & 0xffff) +#define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000) + +/* Global RX Fifo Size Register */ +#define DWC31_GRXFIFOSIZ_RXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */ +#define DWC3_GRXFIFOSIZ_RXFDEP(n) ((n) & 0xffff) + +/* Global Event Size Registers */ +#define DWC3_GEVNTSIZ_INTMASK BIT(31) +#define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff) + +/* Global HWPARAMS0 Register */ +#define DWC3_GHWPARAMS0_MODE(n) ((n) & 0x3) +#define DWC3_GHWPARAMS0_MODE_GADGET 0 +#define DWC3_GHWPARAMS0_MODE_HOST 1 +#define DWC3_GHWPARAMS0_MODE_DRD 2 +#define DWC3_GHWPARAMS0_MBUS_TYPE(n) (((n) >> 3) & 0x7) +#define DWC3_GHWPARAMS0_SBUS_TYPE(n) (((n) >> 6) & 0x3) +#define DWC3_GHWPARAMS0_MDWIDTH(n) (((n) >> 8) & 0xff) +#define DWC3_GHWPARAMS0_SDWIDTH(n) (((n) >> 16) & 0xff) +#define DWC3_GHWPARAMS0_AWIDTH(n) (((n) >> 24) & 0xff) + +/* Global HWPARAMS1 Register */ +#define DWC3_GHWPARAMS1_EN_PWROPT(n) (((n) & (3 << 24)) >> 24) +#define DWC3_GHWPARAMS1_EN_PWROPT_NO 0 +#define DWC3_GHWPARAMS1_EN_PWROPT_CLK 1 +#define DWC3_GHWPARAMS1_EN_PWROPT_HIB 2 +#define DWC3_GHWPARAMS1_PWROPT(n) ((n) << 24) +#define DWC3_GHWPARAMS1_PWROPT_MASK DWC3_GHWPARAMS1_PWROPT(3) +#define DWC3_GHWPARAMS1_ENDBC BIT(31) + +/* Global HWPARAMS3 Register */ +#define DWC3_GHWPARAMS3_SSPHY_IFC(n) ((n) & 3) +#define DWC3_GHWPARAMS3_SSPHY_IFC_DIS 0 +#define DWC3_GHWPARAMS3_SSPHY_IFC_GEN1 1 +#define DWC3_GHWPARAMS3_SSPHY_IFC_GEN2 2 /* DWC_usb31 only */ +#define DWC3_GHWPARAMS3_HSPHY_IFC(n) (((n) & (3 << 2)) >> 2) +#define DWC3_GHWPARAMS3_HSPHY_IFC_DIS 0 +#define DWC3_GHWPARAMS3_HSPHY_IFC_UTMI 1 +#define DWC3_GHWPARAMS3_HSPHY_IFC_ULPI 2 +#define DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI 3 +#define DWC3_GHWPARAMS3_FSPHY_IFC(n) (((n) & (3 << 4)) >> 4) +#define DWC3_GHWPARAMS3_FSPHY_IFC_DIS 0 +#define DWC3_GHWPARAMS3_FSPHY_IFC_ENA 1 + +/* Global HWPARAMS4 Register */ +#define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n) (((n) & (0x0f << 13)) >> 13) +#define DWC3_MAX_HIBER_SCRATCHBUFS 15 + +/* Global HWPARAMS6 Register */ +#define DWC3_GHWPARAMS6_BCSUPPORT BIT(14) +#define DWC3_GHWPARAMS6_OTG3SUPPORT BIT(13) +#define DWC3_GHWPARAMS6_ADPSUPPORT BIT(12) +#define DWC3_GHWPARAMS6_HNPSUPPORT BIT(11) +#define DWC3_GHWPARAMS6_SRPSUPPORT BIT(10) +#define DWC3_GHWPARAMS6_EN_FPGA BIT(7) + +/* DWC_usb32 only */ +#define DWC3_GHWPARAMS6_MDWIDTH(n) ((n) & (0x3 << 8)) + +/* Global HWPARAMS7 Register */ +#define DWC3_GHWPARAMS7_RAM1_DEPTH(n) ((n) & 0xffff) +#define DWC3_GHWPARAMS7_RAM2_DEPTH(n) (((n) >> 16) & 0xffff) + +/* Global HWPARAMS9 Register */ +#define DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS BIT(0) + +/* Global Frame Length Adjustment Register */ +#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7) +#define DWC3_GFLADJ_30MHZ_MASK 0x3f + +/* Global User Control Register 2 */ +#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14) + +/* Global User Control Register 3 */ +#define DWC3_GUCTL3_SPLITDISABLE BIT(14) + +/* Device Configuration Register */ +#define DWC3_DCFG_NUMLANES(n) (((n) & 0x3) << 30) /* DWC_usb32 only */ + +#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3) +#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f) + +#define DWC3_DCFG_SPEED_MASK (7 << 0) +#define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */ +#define DWC3_DCFG_SUPERSPEED (4 << 0) +#define DWC3_DCFG_HIGHSPEED (0 << 0) +#define DWC3_DCFG_FULLSPEED BIT(0) +#define DWC3_DCFG_LOWSPEED (2 << 0) + +#define DWC3_DCFG_NUMP_SHIFT 17 +#define DWC3_DCFG_NUMP(n) (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f) +#define DWC3_DCFG_NUMP_MASK (0x1f << DWC3_DCFG_NUMP_SHIFT) +#define DWC3_DCFG_LPM_CAP BIT(22) +#define DWC3_DCFG_IGNSTRMPP BIT(23) + +/* Device Control Register */ +#define DWC3_DCTL_RUN_STOP BIT(31) +#define DWC3_DCTL_CSFTRST BIT(30) +#define DWC3_DCTL_LSFTRST BIT(29) + +#define DWC3_DCTL_HIRD_THRES_MASK (0x1f << 24) +#define DWC3_DCTL_HIRD_THRES(n) ((n) << 24) + +#define DWC3_DCTL_APPL1RES BIT(23) + +/* These apply for core versions 1.87a and earlier */ +#define DWC3_DCTL_TRGTULST_MASK (0x0f << 17) +#define DWC3_DCTL_TRGTULST(n) ((n) << 17) +#define DWC3_DCTL_TRGTULST_U2 (DWC3_DCTL_TRGTULST(2)) +#define DWC3_DCTL_TRGTULST_U3 (DWC3_DCTL_TRGTULST(3)) +#define DWC3_DCTL_TRGTULST_SS_DIS (DWC3_DCTL_TRGTULST(4)) +#define DWC3_DCTL_TRGTULST_RX_DET (DWC3_DCTL_TRGTULST(5)) +#define DWC3_DCTL_TRGTULST_SS_INACT (DWC3_DCTL_TRGTULST(6)) + +/* These apply for core versions 1.94a and later */ +#define DWC3_DCTL_NYET_THRES(n) (((n) & 0xf) << 20) + +#define DWC3_DCTL_KEEP_CONNECT BIT(19) +#define DWC3_DCTL_L1_HIBER_EN BIT(18) +#define DWC3_DCTL_CRS BIT(17) +#define DWC3_DCTL_CSS BIT(16) + +#define DWC3_DCTL_INITU2ENA BIT(12) +#define DWC3_DCTL_ACCEPTU2ENA BIT(11) +#define DWC3_DCTL_INITU1ENA BIT(10) +#define DWC3_DCTL_ACCEPTU1ENA BIT(9) +#define DWC3_DCTL_TSTCTRL_MASK (0xf << 1) + +#define DWC3_DCTL_ULSTCHNGREQ_MASK (0x0f << 5) +#define DWC3_DCTL_ULSTCHNGREQ(n) (((n) << 5) & DWC3_DCTL_ULSTCHNGREQ_MASK) + +#define DWC3_DCTL_ULSTCHNG_NO_ACTION (DWC3_DCTL_ULSTCHNGREQ(0)) +#define DWC3_DCTL_ULSTCHNG_SS_DISABLED (DWC3_DCTL_ULSTCHNGREQ(4)) +#define DWC3_DCTL_ULSTCHNG_RX_DETECT (DWC3_DCTL_ULSTCHNGREQ(5)) +#define DWC3_DCTL_ULSTCHNG_SS_INACTIVE (DWC3_DCTL_ULSTCHNGREQ(6)) +#define DWC3_DCTL_ULSTCHNG_RECOVERY (DWC3_DCTL_ULSTCHNGREQ(8)) +#define DWC3_DCTL_ULSTCHNG_COMPLIANCE (DWC3_DCTL_ULSTCHNGREQ(10)) +#define DWC3_DCTL_ULSTCHNG_LOOPBACK (DWC3_DCTL_ULSTCHNGREQ(11)) + +/* Device Event Enable Register */ +#define DWC3_DEVTEN_VNDRDEVTSTRCVEDEN BIT(12) +#define DWC3_DEVTEN_EVNTOVERFLOWEN BIT(11) +#define DWC3_DEVTEN_CMDCMPLTEN BIT(10) +#define DWC3_DEVTEN_ERRTICERREN BIT(9) +#define DWC3_DEVTEN_SOFEN BIT(7) +#define DWC3_DEVTEN_U3L2L1SUSPEN BIT(6) +#define DWC3_DEVTEN_HIBERNATIONREQEVTEN BIT(5) +#define DWC3_DEVTEN_WKUPEVTEN BIT(4) +#define DWC3_DEVTEN_ULSTCNGEN BIT(3) +#define DWC3_DEVTEN_CONNECTDONEEN BIT(2) +#define DWC3_DEVTEN_USBRSTEN BIT(1) +#define DWC3_DEVTEN_DISCONNEVTEN BIT(0) + +#define DWC3_DSTS_CONNLANES(n) (((n) >> 30) & 0x3) /* DWC_usb32 only */ + +/* Device Status Register */ +#define DWC3_DSTS_DCNRD BIT(29) + +/* This applies for core versions 1.87a and earlier */ +#define DWC3_DSTS_PWRUPREQ BIT(24) + +/* These apply for core versions 1.94a and later */ +#define DWC3_DSTS_RSS BIT(25) +#define DWC3_DSTS_SSS BIT(24) + +#define DWC3_DSTS_COREIDLE BIT(23) +#define DWC3_DSTS_DEVCTRLHLT BIT(22) + +#define DWC3_DSTS_USBLNKST_MASK (0x0f << 18) +#define DWC3_DSTS_USBLNKST(n) (((n) & DWC3_DSTS_USBLNKST_MASK) >> 18) + +#define DWC3_DSTS_RXFIFOEMPTY BIT(17) + +#define DWC3_DSTS_SOFFN_MASK (0x3fff << 3) +#define DWC3_DSTS_SOFFN(n) (((n) & DWC3_DSTS_SOFFN_MASK) >> 3) + +#define DWC3_DSTS_CONNECTSPD (7 << 0) + +#define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */ +#define DWC3_DSTS_SUPERSPEED (4 << 0) +#define DWC3_DSTS_HIGHSPEED (0 << 0) +#define DWC3_DSTS_FULLSPEED BIT(0) +#define DWC3_DSTS_LOWSPEED (2 << 0) + +/* Device Generic Command Register */ +#define DWC3_DGCMD_SET_LMP 0x01 +#define DWC3_DGCMD_SET_PERIODIC_PAR 0x02 +#define DWC3_DGCMD_XMIT_FUNCTION 0x03 + +/* These apply for core versions 1.94a and later */ +#define DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO 0x04 +#define DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI 0x05 + +#define DWC3_DGCMD_SELECTED_FIFO_FLUSH 0x09 +#define DWC3_DGCMD_ALL_FIFO_FLUSH 0x0a +#define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c +#define DWC3_DGCMD_SET_ENDPOINT_PRIME 0x0d +#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10 + +#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F) +#define DWC3_DGCMD_CMDACT BIT(10) +#define DWC3_DGCMD_CMDIOC BIT(8) + +/* Device Generic Command Parameter Register */ +#define DWC3_DGCMDPAR_FORCE_LINKPM_ACCEPT BIT(0) +#define DWC3_DGCMDPAR_FIFO_NUM(n) ((n) << 0) +#define DWC3_DGCMDPAR_RX_FIFO (0 << 5) +#define DWC3_DGCMDPAR_TX_FIFO BIT(5) +#define DWC3_DGCMDPAR_LOOPBACK_DIS (0 << 0) +#define DWC3_DGCMDPAR_LOOPBACK_ENA BIT(0) + +/* Device Endpoint Command Register */ +#define DWC3_DEPCMD_PARAM_SHIFT 16 +#define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT) +#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) +#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F) +#define DWC3_DEPCMD_HIPRI_FORCERM BIT(11) +#define DWC3_DEPCMD_CLEARPENDIN BIT(11) +#define DWC3_DEPCMD_CMDACT BIT(10) +#define DWC3_DEPCMD_CMDIOC BIT(8) + +#define DWC3_DEPCMD_DEPSTARTCFG (0x09 << 0) +#define DWC3_DEPCMD_ENDTRANSFER (0x08 << 0) +#define DWC3_DEPCMD_UPDATETRANSFER (0x07 << 0) +#define DWC3_DEPCMD_STARTTRANSFER (0x06 << 0) +#define DWC3_DEPCMD_CLEARSTALL (0x05 << 0) +#define DWC3_DEPCMD_SETSTALL (0x04 << 0) +/* This applies for core versions 1.90a and earlier */ +#define DWC3_DEPCMD_GETSEQNUMBER (0x03 << 0) +/* This applies for core versions 1.94a and later */ +#define DWC3_DEPCMD_GETEPSTATE (0x03 << 0) +#define DWC3_DEPCMD_SETTRANSFRESOURCE (0x02 << 0) +#define DWC3_DEPCMD_SETEPCONFIG (0x01 << 0) + +#define DWC3_DEPCMD_CMD(x) ((x) & 0xf) + +/* The EP number goes 0..31 so ep0 is always out and ep1 is always in */ +#define DWC3_DALEPENA_EP(n) BIT(n) + +#define DWC3_DEPCMD_TYPE_CONTROL 0 +#define DWC3_DEPCMD_TYPE_ISOC 1 +#define DWC3_DEPCMD_TYPE_BULK 2 +#define DWC3_DEPCMD_TYPE_INTR 3 + +#define DWC3_DEV_IMOD_COUNT_SHIFT 16 +#define DWC3_DEV_IMOD_COUNT_MASK (0xffff << 16) +#define DWC3_DEV_IMOD_INTERVAL_SHIFT 0 +#define DWC3_DEV_IMOD_INTERVAL_MASK (0xffff << 0) + +/* OTG Configuration Register */ +#define DWC3_OCFG_DISPWRCUTTOFF BIT(5) +#define DWC3_OCFG_HIBDISMASK BIT(4) +#define DWC3_OCFG_SFTRSTMASK BIT(3) +#define DWC3_OCFG_OTGVERSION BIT(2) +#define DWC3_OCFG_HNPCAP BIT(1) +#define DWC3_OCFG_SRPCAP BIT(0) + +/* OTG CTL Register */ +#define DWC3_OCTL_OTG3GOERR BIT(7) +#define DWC3_OCTL_PERIMODE BIT(6) +#define DWC3_OCTL_PRTPWRCTL BIT(5) +#define DWC3_OCTL_HNPREQ BIT(4) +#define DWC3_OCTL_SESREQ BIT(3) +#define DWC3_OCTL_TERMSELIDPULSE BIT(2) +#define DWC3_OCTL_DEVSETHNPEN BIT(1) +#define DWC3_OCTL_HSTSETHNPEN BIT(0) + +/* OTG Event Register */ +#define DWC3_OEVT_DEVICEMODE BIT(31) +#define DWC3_OEVT_XHCIRUNSTPSET BIT(27) +#define DWC3_OEVT_DEVRUNSTPSET BIT(26) +#define DWC3_OEVT_HIBENTRY BIT(25) +#define DWC3_OEVT_CONIDSTSCHNG BIT(24) +#define DWC3_OEVT_HRRCONFNOTIF BIT(23) +#define DWC3_OEVT_HRRINITNOTIF BIT(22) +#define DWC3_OEVT_ADEVIDLE BIT(21) +#define DWC3_OEVT_ADEVBHOSTEND BIT(20) +#define DWC3_OEVT_ADEVHOST BIT(19) +#define DWC3_OEVT_ADEVHNPCHNG BIT(18) +#define DWC3_OEVT_ADEVSRPDET BIT(17) +#define DWC3_OEVT_ADEVSESSENDDET BIT(16) +#define DWC3_OEVT_BDEVBHOSTEND BIT(11) +#define DWC3_OEVT_BDEVHNPCHNG BIT(10) +#define DWC3_OEVT_BDEVSESSVLDDET BIT(9) +#define DWC3_OEVT_BDEVVBUSCHNG BIT(8) +#define DWC3_OEVT_BSESSVLD BIT(3) +#define DWC3_OEVT_HSTNEGSTS BIT(2) +#define DWC3_OEVT_SESREQSTS BIT(1) +#define DWC3_OEVT_ERROR BIT(0) + +/* OTG Event Enable Register */ +#define DWC3_OEVTEN_XHCIRUNSTPSETEN BIT(27) +#define DWC3_OEVTEN_DEVRUNSTPSETEN BIT(26) +#define DWC3_OEVTEN_HIBENTRYEN BIT(25) +#define DWC3_OEVTEN_CONIDSTSCHNGEN BIT(24) +#define DWC3_OEVTEN_HRRCONFNOTIFEN BIT(23) +#define DWC3_OEVTEN_HRRINITNOTIFEN BIT(22) +#define DWC3_OEVTEN_ADEVIDLEEN BIT(21) +#define DWC3_OEVTEN_ADEVBHOSTENDEN BIT(20) +#define DWC3_OEVTEN_ADEVHOSTEN BIT(19) +#define DWC3_OEVTEN_ADEVHNPCHNGEN BIT(18) +#define DWC3_OEVTEN_ADEVSRPDETEN BIT(17) +#define DWC3_OEVTEN_ADEVSESSENDDETEN BIT(16) +#define DWC3_OEVTEN_BDEVBHOSTENDEN BIT(11) +#define DWC3_OEVTEN_BDEVHNPCHNGEN BIT(10) +#define DWC3_OEVTEN_BDEVSESSVLDDETEN BIT(9) +#define DWC3_OEVTEN_BDEVVBUSCHNGEN BIT(8) + +/* OTG Status Register */ +#define DWC3_OSTS_DEVRUNSTP BIT(13) +#define DWC3_OSTS_XHCIRUNSTP BIT(12) +#define DWC3_OSTS_PERIPHERALSTATE BIT(4) +#define DWC3_OSTS_XHCIPRTPOWER BIT(3) +#define DWC3_OSTS_BSESVLD BIT(2) +#define DWC3_OSTS_VBUSVLD BIT(1) +#define DWC3_OSTS_CONIDSTS BIT(0) + +/* Structures */ + +struct dwc3_trb; + +/** + * struct dwc3_event_buffer - Software event buffer representation + * @buf: _THE_ buffer + * @cache: The buffer cache used in the threaded interrupt + * @length: size of this buffer + * @lpos: event offset + * @count: cache of last read event count register + * @flags: flags related to this event buffer + * @dma: dma_addr_t + * @dwc: pointer to DWC controller + */ +struct dwc3_event_buffer { + void *buf; + void *cache; + unsigned int length; + unsigned int lpos; + unsigned int count; + unsigned int flags; + +#define DWC3_EVENT_PENDING BIT(0) + + dma_addr_t dma; + + struct dwc3 *dwc; + + ANDROID_KABI_RESERVE(1); +}; + +#define DWC3_EP_FLAG_STALLED BIT(0) +#define DWC3_EP_FLAG_WEDGED BIT(1) + +#define DWC3_EP_DIRECTION_TX true +#define DWC3_EP_DIRECTION_RX false + +#define DWC3_TRB_NUM 256 + +/** + * struct dwc3_ep - device side endpoint representation + * @endpoint: usb endpoint + * @cancelled_list: list of cancelled requests for this endpoint + * @pending_list: list of pending requests for this endpoint + * @started_list: list of started requests on this endpoint + * @regs: pointer to first endpoint register + * @trb_pool: array of transaction buffers + * @trb_pool_dma: dma address of @trb_pool + * @trb_enqueue: enqueue 'pointer' into TRB array + * @trb_dequeue: dequeue 'pointer' into TRB array + * @dwc: pointer to DWC controller + * @saved_state: ep state saved during hibernation + * @flags: endpoint flags (wedged, stalled, ...) + * @number: endpoint number (1 - 15) + * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK + * @resource_index: Resource transfer index + * @frame_number: set to the frame number we want this transfer to start (ISOC) + * @interval: the interval on which the ISOC transfer is started + * @name: a human readable name e.g. ep1out-bulk + * @direction: true for TX, false for RX + * @stream_capable: true when streams are enabled + * @combo_num: the test combination BIT[15:14] of the frame number to test + * isochronous START TRANSFER command failure workaround + * @start_cmd_status: the status of testing START TRANSFER command with + * combo_num = 'b00 + */ +struct dwc3_ep { + struct usb_ep endpoint; + struct list_head cancelled_list; + struct list_head pending_list; + struct list_head started_list; + + void __iomem *regs; + + struct dwc3_trb *trb_pool; + dma_addr_t trb_pool_dma; + struct dwc3 *dwc; + + u32 saved_state; + unsigned int flags; +#define DWC3_EP_ENABLED BIT(0) +#define DWC3_EP_STALL BIT(1) +#define DWC3_EP_WEDGE BIT(2) +#define DWC3_EP_TRANSFER_STARTED BIT(3) +#define DWC3_EP_END_TRANSFER_PENDING BIT(4) +#define DWC3_EP_PENDING_REQUEST BIT(5) +#define DWC3_EP_DELAY_START BIT(6) +#define DWC3_EP_WAIT_TRANSFER_COMPLETE BIT(7) +#define DWC3_EP_IGNORE_NEXT_NOSTREAM BIT(8) +#define DWC3_EP_FORCE_RESTART_STREAM BIT(9) +#define DWC3_EP_FIRST_STREAM_PRIMED BIT(10) +#define DWC3_EP_PENDING_CLEAR_STALL BIT(11) +#define DWC3_EP_TXFIFO_RESIZED BIT(12) + + /* This last one is specific to EP0 */ +#define DWC3_EP0_DIR_IN BIT(31) + + /* + * IMPORTANT: we *know* we have 256 TRBs in our @trb_pool, so we will + * use a u8 type here. If anybody decides to increase number of TRBs to + * anything larger than 256 - I can't see why people would want to do + * this though - then this type needs to be changed. + * + * By using u8 types we ensure that our % operator when incrementing + * enqueue and dequeue get optimized away by the compiler. + */ + u8 trb_enqueue; + u8 trb_dequeue; + + u8 number; + u8 type; + u8 resource_index; + u32 frame_number; + u32 interval; + + char name[20]; + + unsigned direction:1; + unsigned stream_capable:1; + + /* For isochronous START TRANSFER workaround only */ + u8 combo_num; + int start_cmd_status; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); +}; + +enum dwc3_phy { + DWC3_PHY_UNKNOWN = 0, + DWC3_PHY_USB3, + DWC3_PHY_USB2, +}; + +enum dwc3_ep0_next { + DWC3_EP0_UNKNOWN = 0, + DWC3_EP0_COMPLETE, + DWC3_EP0_NRDY_DATA, + DWC3_EP0_NRDY_STATUS, +}; + +enum dwc3_ep0_state { + EP0_UNCONNECTED = 0, + EP0_SETUP_PHASE, + EP0_DATA_PHASE, + EP0_STATUS_PHASE, +}; + +enum dwc3_link_state { + /* In SuperSpeed */ + DWC3_LINK_STATE_U0 = 0x00, /* in HS, means ON */ + DWC3_LINK_STATE_U1 = 0x01, + DWC3_LINK_STATE_U2 = 0x02, /* in HS, means SLEEP */ + DWC3_LINK_STATE_U3 = 0x03, /* in HS, means SUSPEND */ + DWC3_LINK_STATE_SS_DIS = 0x04, + DWC3_LINK_STATE_RX_DET = 0x05, /* in HS, means Early Suspend */ + DWC3_LINK_STATE_SS_INACT = 0x06, + DWC3_LINK_STATE_POLL = 0x07, + DWC3_LINK_STATE_RECOV = 0x08, + DWC3_LINK_STATE_HRESET = 0x09, + DWC3_LINK_STATE_CMPLY = 0x0a, + DWC3_LINK_STATE_LPBK = 0x0b, + DWC3_LINK_STATE_RESET = 0x0e, + DWC3_LINK_STATE_RESUME = 0x0f, + DWC3_LINK_STATE_MASK = 0x0f, +}; + +/* TRB Length, PCM and Status */ +#define DWC3_TRB_SIZE_MASK (0x00ffffff) +#define DWC3_TRB_SIZE_LENGTH(n) ((n) & DWC3_TRB_SIZE_MASK) +#define DWC3_TRB_SIZE_PCM1(n) (((n) & 0x03) << 24) +#define DWC3_TRB_SIZE_TRBSTS(n) (((n) & (0x0f << 28)) >> 28) + +#define DWC3_TRBSTS_OK 0 +#define DWC3_TRBSTS_MISSED_ISOC 1 +#define DWC3_TRBSTS_SETUP_PENDING 2 +#define DWC3_TRB_STS_XFER_IN_PROG 4 + +/* TRB Control */ +#define DWC3_TRB_CTRL_HWO BIT(0) +#define DWC3_TRB_CTRL_LST BIT(1) +#define DWC3_TRB_CTRL_CHN BIT(2) +#define DWC3_TRB_CTRL_CSP BIT(3) +#define DWC3_TRB_CTRL_TRBCTL(n) (((n) & 0x3f) << 4) +#define DWC3_TRB_CTRL_ISP_IMI BIT(10) +#define DWC3_TRB_CTRL_IOC BIT(11) +#define DWC3_TRB_CTRL_SID_SOFN(n) (((n) & 0xffff) << 14) +#define DWC3_TRB_CTRL_GET_SID_SOFN(n) (((n) & (0xffff << 14)) >> 14) + +#define DWC3_TRBCTL_TYPE(n) ((n) & (0x3f << 4)) +#define DWC3_TRBCTL_NORMAL DWC3_TRB_CTRL_TRBCTL(1) +#define DWC3_TRBCTL_CONTROL_SETUP DWC3_TRB_CTRL_TRBCTL(2) +#define DWC3_TRBCTL_CONTROL_STATUS2 DWC3_TRB_CTRL_TRBCTL(3) +#define DWC3_TRBCTL_CONTROL_STATUS3 DWC3_TRB_CTRL_TRBCTL(4) +#define DWC3_TRBCTL_CONTROL_DATA DWC3_TRB_CTRL_TRBCTL(5) +#define DWC3_TRBCTL_ISOCHRONOUS_FIRST DWC3_TRB_CTRL_TRBCTL(6) +#define DWC3_TRBCTL_ISOCHRONOUS DWC3_TRB_CTRL_TRBCTL(7) +#define DWC3_TRBCTL_LINK_TRB DWC3_TRB_CTRL_TRBCTL(8) + +/** + * struct dwc3_trb - transfer request block (hw format) + * @bpl: DW0-3 + * @bph: DW4-7 + * @size: DW8-B + * @ctrl: DWC-F + */ +struct dwc3_trb { + u32 bpl; + u32 bph; + u32 size; + u32 ctrl; +} __packed; + +/** + * struct dwc3_hwparams - copy of HWPARAMS registers + * @hwparams0: GHWPARAMS0 + * @hwparams1: GHWPARAMS1 + * @hwparams2: GHWPARAMS2 + * @hwparams3: GHWPARAMS3 + * @hwparams4: GHWPARAMS4 + * @hwparams5: GHWPARAMS5 + * @hwparams6: GHWPARAMS6 + * @hwparams7: GHWPARAMS7 + * @hwparams8: GHWPARAMS8 + * @hwparams9: GHWPARAMS9 + */ +struct dwc3_hwparams { + u32 hwparams0; + u32 hwparams1; + u32 hwparams2; + u32 hwparams3; + u32 hwparams4; + u32 hwparams5; + u32 hwparams6; + u32 hwparams7; + u32 hwparams8; + u32 hwparams9; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); +}; + +/* HWPARAMS0 */ +#define DWC3_MODE(n) ((n) & 0x7) + +/* HWPARAMS1 */ +#define DWC3_NUM_INT(n) (((n) & (0x3f << 15)) >> 15) + +/* HWPARAMS3 */ +#define DWC3_NUM_IN_EPS_MASK (0x1f << 18) +#define DWC3_NUM_EPS_MASK (0x3f << 12) +#define DWC3_NUM_EPS(p) (((p)->hwparams3 & \ + (DWC3_NUM_EPS_MASK)) >> 12) +#define DWC3_NUM_IN_EPS(p) (((p)->hwparams3 & \ + (DWC3_NUM_IN_EPS_MASK)) >> 18) + +/* HWPARAMS7 */ +#define DWC3_RAM1_DEPTH(n) ((n) & 0xffff) + +/** + * struct dwc3_request - representation of a transfer request + * @request: struct usb_request to be transferred + * @list: a list_head used for request queueing + * @dep: struct dwc3_ep owning this request + * @sg: pointer to first incomplete sg + * @start_sg: pointer to the sg which should be queued next + * @num_pending_sgs: counter to pending sgs + * @num_queued_sgs: counter to the number of sgs which already got queued + * @remaining: amount of data remaining + * @status: internal dwc3 request status tracking + * @epnum: endpoint number to which this request refers + * @trb: pointer to struct dwc3_trb + * @trb_dma: DMA address of @trb + * @num_trbs: number of TRBs used by this request + * @needs_extra_trb: true when request needs one extra TRB (either due to ZLP + * or unaligned OUT) + * @direction: IN or OUT direction flag + * @mapped: true when request has been dma-mapped + */ +struct dwc3_request { + struct usb_request request; + struct list_head list; + struct dwc3_ep *dep; + struct scatterlist *sg; + struct scatterlist *start_sg; + + unsigned int num_pending_sgs; + unsigned int num_queued_sgs; + unsigned int remaining; + + unsigned int status; +#define DWC3_REQUEST_STATUS_QUEUED 0 +#define DWC3_REQUEST_STATUS_STARTED 1 +#define DWC3_REQUEST_STATUS_DISCONNECTED 2 +#define DWC3_REQUEST_STATUS_DEQUEUED 3 +#define DWC3_REQUEST_STATUS_STALLED 4 +#define DWC3_REQUEST_STATUS_COMPLETED 5 +#define DWC3_REQUEST_STATUS_UNKNOWN -1 + + u8 epnum; + struct dwc3_trb *trb; + dma_addr_t trb_dma; + + unsigned int num_trbs; + + unsigned int needs_extra_trb:1; + unsigned int direction:1; + unsigned int mapped:1; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); +}; + +/* + * struct dwc3_scratchpad_array - hibernation scratchpad array + * (format defined by hw) + */ +struct dwc3_scratchpad_array { + __le64 dma_adr[DWC3_MAX_HIBER_SCRATCHBUFS]; +}; + +/** + * struct dwc3 - representation of our controller + * @drd_work: workqueue used for role swapping + * @ep0_trb: trb which is used for the ctrl_req + * @bounce: address of bounce buffer + * @scratchbuf: address of scratch buffer + * @setup_buf: used while precessing STD USB requests + * @ep0_trb_addr: dma address of @ep0_trb + * @bounce_addr: dma address of @bounce + * @ep0_usb_req: dummy req used while handling STD USB requests + * @scratch_addr: dma address of scratchbuf + * @ep0_in_setup: one control transfer is completed and enter setup phase + * @lock: for synchronizing + * @mutex: for mode switching + * @dev: pointer to our struct device + * @sysdev: pointer to the DMA-capable device + * @xhci: pointer to our xHCI child + * @xhci_resources: struct resources for our @xhci child + * @ev_buf: struct dwc3_event_buffer pointer + * @eps: endpoint array + * @gadget: device side representation of the peripheral controller + * @gadget_driver: pointer to the gadget driver + * @clks: array of clocks + * @num_clks: number of clocks + * @reset: reset control + * @regs: base address for our registers + * @regs_size: address space size + * @fladj: frame length adjustment + * @irq_gadget: peripheral controller's IRQ number + * @otg_irq: IRQ number for OTG IRQs + * @current_otg_role: current role of operation while using the OTG block + * @desired_otg_role: desired role of operation while using the OTG block + * @otg_restart_host: flag that OTG controller needs to restart host + * @nr_scratch: number of scratch buffers + * @u1u2: only used on revisions <1.83a for workaround + * @maximum_speed: maximum speed requested (mainly for testing purposes) + * @max_ssp_rate: SuperSpeed Plus maximum signaling rate and lane count + * @gadget_max_speed: maximum gadget speed requested + * @gadget_ssp_rate: Gadget driver's maximum supported SuperSpeed Plus signaling + * rate and lane count. + * @ip: controller's ID + * @revision: controller's version of an IP + * @version_type: VERSIONTYPE register contents, a sub release of a revision + * @dr_mode: requested mode of operation + * @current_dr_role: current role of operation when in dual-role mode + * @desired_dr_role: desired role of operation when in dual-role mode + * @edev: extcon handle + * @edev_nb: extcon notifier + * @hsphy_mode: UTMI phy mode, one of following: + * - USBPHY_INTERFACE_MODE_UTMI + * - USBPHY_INTERFACE_MODE_UTMIW + * @role_sw: usb_role_switch handle + * @role_switch_default_mode: default operation mode of controller while + * usb role is USB_ROLE_NONE. + * @usb_psy: pointer to power supply interface. + * @usb2_phy: pointer to USB2 PHY + * @usb3_phy: pointer to USB3 PHY + * @usb2_generic_phy: pointer to USB2 PHY + * @usb3_generic_phy: pointer to USB3 PHY + * @phys_ready: flag to indicate that PHYs are ready + * @ulpi: pointer to ulpi interface + * @ulpi_ready: flag to indicate that ULPI is initialized + * @u2sel: parameter from Set SEL request. + * @u2pel: parameter from Set SEL request. + * @u1sel: parameter from Set SEL request. + * @u1pel: parameter from Set SEL request. + * @num_eps: number of endpoints + * @ep0_next_event: hold the next expected event + * @ep0state: state of endpoint zero + * @link_state: link state + * @speed: device speed (super, high, full, low) + * @hwparams: copy of hwparams registers + * @root: debugfs root folder pointer + * @regset: debugfs pointer to regdump file + * @dbg_lsp_select: current debug lsp mux register selection + * @test_mode: true when we're entering a USB test mode + * @test_mode_nr: test feature selector + * @lpm_nyet_threshold: LPM NYET response threshold + * @hird_threshold: HIRD threshold + * @rx_thr_num_pkt_prd: periodic ESS receive packet count + * @rx_max_burst_prd: max periodic ESS receive burst size + * @tx_thr_num_pkt_prd: periodic ESS transmit packet count + * @tx_max_burst_prd: max periodic ESS transmit burst size + * @tx_fifo_resize_max_num: max number of fifos allocated during txfifo resize + * @hsphy_interface: "utmi" or "ulpi" + * @connected: true when we're connected to a host, false otherwise + * @delayed_status: true when gadget driver asks for delayed status + * @ep0_bounced: true when we used bounce buffer + * @ep0_expect_in: true when we expect a DATA IN transfer + * @has_hibernation: true when dwc3 was configured with Hibernation + * @sysdev_is_parent: true when dwc3 device has a parent driver + * @has_lpm_erratum: true when core was configured with LPM Erratum. Note that + * there's now way for software to detect this in runtime. + * @is_utmi_l1_suspend: the core asserts output signal + * 0 - utmi_sleep_n + * 1 - utmi_l1_suspend_n + * @is_fpga: true when we are using the FPGA board + * @pending_events: true when we have pending IRQs to be handled + * @do_fifo_resize: true when txfifo resizing is enabled for dwc3 endpoints + * @pullups_connected: true when Run/Stop bit is set + * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround + * @three_stage_setup: set if we perform a three phase setup + * @dis_start_transfer_quirk: set if start_transfer failure SW workaround is + * not needed for DWC_usb31 version 1.70a-ea06 and below + * @usb3_lpm_capable: set if hadrware supports Link Power Management + * @usb2_lpm_disable: set to disable usb2 lpm for host + * @usb2_gadget_lpm_disable: set to disable usb2 lpm for gadget + * @disable_scramble_quirk: set if we enable the disable scramble quirk + * @u2exit_lfps_quirk: set if we enable u2exit lfps quirk + * @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk + * @req_p1p2p3_quirk: set if we enable request p1p2p3 quirk + * @del_p1p2p3_quirk: set if we enable delay p1p2p3 quirk + * @del_phy_power_chg_quirk: set if we enable delay phy power change quirk + * @lfps_filter_quirk: set if we enable LFPS filter quirk + * @rx_detect_poll_quirk: set if we enable rx_detect to polling lfps quirk + * @dis_u3_susphy_quirk: set if we disable usb3 suspend phy + * @dis_u2_susphy_quirk: set if we disable usb2 suspend phy + * @dis_enblslpm_quirk: set if we clear enblslpm in GUSB2PHYCFG, + * disabling the suspend signal to the PHY. + * @dis_u1_entry_quirk: set if link entering into U1 state needs to be disabled. + * @dis_u2_entry_quirk: set if link entering into U2 state needs to be disabled. + * @dis_rxdet_inp3_quirk: set if we disable Rx.Detect in P3 + * @dis_u2_freeclk_exists_quirk : set if we clear u2_freeclk_exists + * in GUSB2PHYCFG, specify that USB2 PHY doesn't + * provide a free-running PHY clock. + * @dis_del_phy_power_chg_quirk: set if we disable delay phy power + * change quirk. + * @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate + * check during HS transmit. + * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed + * instances in park mode. + * @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk + * @tx_de_emphasis: Tx de-emphasis value + * 0 - -6dB de-emphasis + * 1 - -3.5dB de-emphasis + * 2 - No de-emphasis + * 3 - Reserved + * @dis_metastability_quirk: set to disable metastability quirk. + * @dis_split_quirk: set to disable split boundary. + * @imod_interval: set the interrupt moderation interval in 250ns + * increments or 0 to disable. + * @max_cfg_eps: current max number of IN eps used across all USB configs. + * @last_fifo_depth: last fifo depth used to determine next fifo ram start + * address. + * @num_ep_resized: carries the current number endpoints which have had its tx + * fifo resized. + */ +struct dwc3 { + struct work_struct drd_work; + struct dwc3_trb *ep0_trb; + void *bounce; + void *scratchbuf; + u8 *setup_buf; + dma_addr_t ep0_trb_addr; + dma_addr_t bounce_addr; + dma_addr_t scratch_addr; + struct dwc3_request ep0_usb_req; + struct completion ep0_in_setup; + + /* device lock */ + spinlock_t lock; + + /* mode switching lock */ + struct mutex mutex; + + struct device *dev; + struct device *sysdev; + + struct platform_device *xhci; + struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM]; + + struct dwc3_event_buffer *ev_buf; + struct dwc3_ep *eps[DWC3_ENDPOINTS_NUM]; + + struct usb_gadget *gadget; + struct usb_gadget_driver *gadget_driver; + + struct clk_bulk_data *clks; + int num_clks; + + struct reset_control *reset; + + struct usb_phy *usb2_phy; + struct usb_phy *usb3_phy; + + struct phy *usb2_generic_phy; + struct phy *usb3_generic_phy; + + bool phys_ready; + + struct ulpi *ulpi; + bool ulpi_ready; + + void __iomem *regs; + size_t regs_size; + bool soc_taihang; + void __iomem *phy_base; + + enum usb_dr_mode dr_mode; + u32 current_dr_role; + u32 desired_dr_role; + struct extcon_dev *edev; + struct notifier_block edev_nb; + enum usb_phy_interface hsphy_mode; + struct usb_role_switch *role_sw; + enum usb_dr_mode role_switch_default_mode; + + struct power_supply *usb_psy; + + u32 fladj; + u32 irq_gadget; + u32 otg_irq; + u32 current_otg_role; + u32 desired_otg_role; + bool otg_restart_host; + u32 nr_scratch; + u32 u1u2; + u32 maximum_speed; + u32 gadget_max_speed; + enum usb_ssp_rate max_ssp_rate; + enum usb_ssp_rate gadget_ssp_rate; + + u32 ip; + +#define DWC3_IP 0x5533 +#define DWC31_IP 0x3331 +#define DWC32_IP 0x3332 + + u32 revision; + +#define DWC3_REVISION_ANY 0x0 +#define DWC3_REVISION_173A 0x5533173a +#define DWC3_REVISION_175A 0x5533175a +#define DWC3_REVISION_180A 0x5533180a +#define DWC3_REVISION_183A 0x5533183a +#define DWC3_REVISION_185A 0x5533185a +#define DWC3_REVISION_187A 0x5533187a +#define DWC3_REVISION_188A 0x5533188a +#define DWC3_REVISION_190A 0x5533190a +#define DWC3_REVISION_194A 0x5533194a +#define DWC3_REVISION_200A 0x5533200a +#define DWC3_REVISION_202A 0x5533202a +#define DWC3_REVISION_210A 0x5533210a +#define DWC3_REVISION_220A 0x5533220a +#define DWC3_REVISION_230A 0x5533230a +#define DWC3_REVISION_240A 0x5533240a +#define DWC3_REVISION_250A 0x5533250a +#define DWC3_REVISION_260A 0x5533260a +#define DWC3_REVISION_270A 0x5533270a +#define DWC3_REVISION_280A 0x5533280a +#define DWC3_REVISION_290A 0x5533290a +#define DWC3_REVISION_300A 0x5533300a +#define DWC3_REVISION_310A 0x5533310a +#define DWC3_REVISION_330A 0x5533330a + +#define DWC31_REVISION_ANY 0x0 +#define DWC31_REVISION_110A 0x3131302a +#define DWC31_REVISION_120A 0x3132302a +#define DWC31_REVISION_160A 0x3136302a +#define DWC31_REVISION_170A 0x3137302a +#define DWC31_REVISION_180A 0x3138302a +#define DWC31_REVISION_190A 0x3139302a + +#define DWC32_REVISION_ANY 0x0 +#define DWC32_REVISION_100A 0x3130302a + + u32 version_type; + +#define DWC31_VERSIONTYPE_ANY 0x0 +#define DWC31_VERSIONTYPE_EA01 0x65613031 +#define DWC31_VERSIONTYPE_EA02 0x65613032 +#define DWC31_VERSIONTYPE_EA03 0x65613033 +#define DWC31_VERSIONTYPE_EA04 0x65613034 +#define DWC31_VERSIONTYPE_EA05 0x65613035 +#define DWC31_VERSIONTYPE_EA06 0x65613036 + + enum dwc3_ep0_next ep0_next_event; + enum dwc3_ep0_state ep0state; + enum dwc3_link_state link_state; + + u16 u2sel; + u16 u2pel; + u8 u1sel; + u8 u1pel; + + u8 speed; + + u8 num_eps; + + struct dwc3_hwparams hwparams; + struct dentry *root; + struct debugfs_regset32 *regset; + + u32 dbg_lsp_select; + + u8 test_mode; + u8 test_mode_nr; + u8 lpm_nyet_threshold; + u8 hird_threshold; + u8 rx_thr_num_pkt_prd; + u8 rx_max_burst_prd; + u8 tx_thr_num_pkt_prd; + u8 tx_max_burst_prd; + u8 tx_fifo_resize_max_num; + + const char *hsphy_interface; + + unsigned connected:1; + unsigned delayed_status:1; + unsigned ep0_bounced:1; + unsigned ep0_expect_in:1; + unsigned has_hibernation:1; + unsigned sysdev_is_parent:1; + unsigned has_lpm_erratum:1; + unsigned is_utmi_l1_suspend:1; + unsigned is_fpga:1; + unsigned pending_events:1; + unsigned do_fifo_resize:1; + unsigned pullups_connected:1; + unsigned setup_packet_pending:1; + unsigned three_stage_setup:1; + unsigned dis_start_transfer_quirk:1; + unsigned usb3_lpm_capable:1; + unsigned usb2_lpm_disable:1; + unsigned usb2_gadget_lpm_disable:1; + + unsigned disable_scramble_quirk:1; + unsigned u2exit_lfps_quirk:1; + unsigned u2ss_inp3_quirk:1; + unsigned req_p1p2p3_quirk:1; + unsigned del_p1p2p3_quirk:1; + unsigned del_phy_power_chg_quirk:1; + unsigned lfps_filter_quirk:1; + unsigned rx_detect_poll_quirk:1; + unsigned dis_u3_susphy_quirk:1; + unsigned dis_u2_susphy_quirk:1; + unsigned dis_enblslpm_quirk:1; + unsigned dis_u1_entry_quirk:1; + unsigned dis_u2_entry_quirk:1; + unsigned dis_rxdet_inp3_quirk:1; + unsigned dis_u2_freeclk_exists_quirk:1; + unsigned dis_del_phy_power_chg_quirk:1; + unsigned dis_tx_ipgap_linecheck_quirk:1; + unsigned parkmode_disable_ss_quirk:1; + + unsigned tx_de_emphasis_quirk:1; + unsigned tx_de_emphasis:2; + + unsigned dis_metastability_quirk:1; + + unsigned dis_split_quirk:1; + unsigned async_callbacks:1; + + u16 imod_interval; + + int max_cfg_eps; + int last_fifo_depth; + int num_ep_resized; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); +}; + +/** + * struct dwc3_vendor - contains parameters without modifying the format of DWC3 core + * @dwc: contains dwc3 core reference + * @softconnect: true when gadget connect is called, false when disconnect runs + */ +struct dwc3_vendor { + struct dwc3 dwc; + unsigned softconnect:1; +}; + +#define INCRX_BURST_MODE 0 +#define INCRX_UNDEF_LENGTH_BURST_MODE 1 + +#define work_to_dwc(w) (container_of((w), struct dwc3, drd_work)) + +/* -------------------------------------------------------------------------- */ + +struct dwc3_event_type { + u32 is_devspec:1; + u32 type:7; + u32 reserved8_31:24; +} __packed; + +#define DWC3_DEPEVT_XFERCOMPLETE 0x01 +#define DWC3_DEPEVT_XFERINPROGRESS 0x02 +#define DWC3_DEPEVT_XFERNOTREADY 0x03 +#define DWC3_DEPEVT_RXTXFIFOEVT 0x04 +#define DWC3_DEPEVT_STREAMEVT 0x06 +#define DWC3_DEPEVT_EPCMDCMPLT 0x07 + +/** + * struct dwc3_event_depevt - Device Endpoint Events + * @one_bit: indicates this is an endpoint event (not used) + * @endpoint_number: number of the endpoint + * @endpoint_event: The event we have: + * 0x00 - Reserved + * 0x01 - XferComplete + * 0x02 - XferInProgress + * 0x03 - XferNotReady + * 0x04 - RxTxFifoEvt (IN->Underrun, OUT->Overrun) + * 0x05 - Reserved + * 0x06 - StreamEvt + * 0x07 - EPCmdCmplt + * @reserved11_10: Reserved, don't use. + * @status: Indicates the status of the event. Refer to databook for + * more information. + * @parameters: Parameters of the current event. Refer to databook for + * more information. + */ +struct dwc3_event_depevt { + u32 one_bit:1; + u32 endpoint_number:5; + u32 endpoint_event:4; + u32 reserved11_10:2; + u32 status:4; + +/* Within XferNotReady */ +#define DEPEVT_STATUS_TRANSFER_ACTIVE BIT(3) + +/* Within XferComplete or XferInProgress */ +#define DEPEVT_STATUS_BUSERR BIT(0) +#define DEPEVT_STATUS_SHORT BIT(1) +#define DEPEVT_STATUS_IOC BIT(2) +#define DEPEVT_STATUS_LST BIT(3) /* XferComplete */ +#define DEPEVT_STATUS_MISSED_ISOC BIT(3) /* XferInProgress */ + +/* Stream event only */ +#define DEPEVT_STREAMEVT_FOUND 1 +#define DEPEVT_STREAMEVT_NOTFOUND 2 + +/* Stream event parameter */ +#define DEPEVT_STREAM_PRIME 0xfffe +#define DEPEVT_STREAM_NOSTREAM 0x0 + +/* Control-only Status */ +#define DEPEVT_STATUS_CONTROL_DATA 1 +#define DEPEVT_STATUS_CONTROL_STATUS 2 +#define DEPEVT_STATUS_CONTROL_PHASE(n) ((n) & 3) + +/* In response to Start Transfer */ +#define DEPEVT_TRANSFER_NO_RESOURCE 1 +#define DEPEVT_TRANSFER_BUS_EXPIRY 2 + + u32 parameters:16; + +/* For Command Complete Events */ +#define DEPEVT_PARAMETER_CMD(n) (((n) & (0xf << 8)) >> 8) +} __packed; + +/** + * struct dwc3_event_devt - Device Events + * @one_bit: indicates this is a non-endpoint event (not used) + * @device_event: indicates it's a device event. Should read as 0x00 + * @type: indicates the type of device event. + * 0 - DisconnEvt + * 1 - USBRst + * 2 - ConnectDone + * 3 - ULStChng + * 4 - WkUpEvt + * 5 - Reserved + * 6 - Suspend (EOPF on revisions 2.10a and prior) + * 7 - SOF + * 8 - Reserved + * 9 - ErrticErr + * 10 - CmdCmplt + * 11 - EvntOverflow + * 12 - VndrDevTstRcved + * @reserved15_12: Reserved, not used + * @event_info: Information about this event + * @reserved31_25: Reserved, not used + */ +struct dwc3_event_devt { + u32 one_bit:1; + u32 device_event:7; + u32 type:4; + u32 reserved15_12:4; + u32 event_info:9; + u32 reserved31_25:7; +} __packed; + +/** + * struct dwc3_event_gevt - Other Core Events + * @one_bit: indicates this is a non-endpoint event (not used) + * @device_event: indicates it's (0x03) Carkit or (0x04) I2C event. + * @phy_port_number: self-explanatory + * @reserved31_12: Reserved, not used. + */ +struct dwc3_event_gevt { + u32 one_bit:1; + u32 device_event:7; + u32 phy_port_number:4; + u32 reserved31_12:20; +} __packed; + +/** + * union dwc3_event - representation of Event Buffer contents + * @raw: raw 32-bit event + * @type: the type of the event + * @depevt: Device Endpoint Event + * @devt: Device Event + * @gevt: Global Event + */ +union dwc3_event { + u32 raw; + struct dwc3_event_type type; + struct dwc3_event_depevt depevt; + struct dwc3_event_devt devt; + struct dwc3_event_gevt gevt; +}; + +/** + * struct dwc3_gadget_ep_cmd_params - representation of endpoint command + * parameters + * @param2: third parameter + * @param1: second parameter + * @param0: first parameter + */ +struct dwc3_gadget_ep_cmd_params { + u32 param2; + u32 param1; + u32 param0; +}; + +/* + * DWC3 Features to be used as Driver Data + */ + +#define DWC3_HAS_PERIPHERAL BIT(0) +#define DWC3_HAS_XHCI BIT(1) +#define DWC3_HAS_OTG BIT(3) + +/* prototypes */ +void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode); +void dwc3_set_mode(struct dwc3 *dwc, u32 mode); +u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type); + +#define DWC3_IP_IS(_ip) \ + (dwc->ip == _ip##_IP) + +#define DWC3_VER_IS(_ip, _ver) \ + (DWC3_IP_IS(_ip) && dwc->revision == _ip##_REVISION_##_ver) + +#define DWC3_VER_IS_PRIOR(_ip, _ver) \ + (DWC3_IP_IS(_ip) && dwc->revision < _ip##_REVISION_##_ver) + +#define DWC3_VER_IS_WITHIN(_ip, _from, _to) \ + (DWC3_IP_IS(_ip) && \ + dwc->revision >= _ip##_REVISION_##_from && \ + (!(_ip##_REVISION_##_to) || \ + dwc->revision <= _ip##_REVISION_##_to)) + +#define DWC3_VER_TYPE_IS_WITHIN(_ip, _ver, _from, _to) \ + (DWC3_VER_IS(_ip, _ver) && \ + dwc->version_type >= _ip##_VERSIONTYPE_##_from && \ + (!(_ip##_VERSIONTYPE_##_to) || \ + dwc->version_type <= _ip##_VERSIONTYPE_##_to)) + +/** + * dwc3_mdwidth - get MDWIDTH value in bits + * @dwc: pointer to our context structure + * + * Return MDWIDTH configuration value in bits. + */ +static inline u32 dwc3_mdwidth(struct dwc3 *dwc) +{ + u32 mdwidth; + + mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); + if (DWC3_IP_IS(DWC32)) + mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6); + + return mdwidth; +} + +bool dwc3_has_imod(struct dwc3 *dwc); + +int dwc3_event_buffers_setup(struct dwc3 *dwc); +void dwc3_event_buffers_cleanup(struct dwc3 *dwc); + +#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) +int dwc3_host_init(struct dwc3 *dwc); +void dwc3_host_exit(struct dwc3 *dwc); +#else +static inline int dwc3_host_init(struct dwc3 *dwc) +{ return 0; } +static inline void dwc3_host_exit(struct dwc3 *dwc) +{ } +#endif + +#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) +int dwc3_gadget_init(struct dwc3 *dwc); +void dwc3_gadget_exit(struct dwc3 *dwc); +int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode); +int dwc3_gadget_get_link_state(struct dwc3 *dwc); +int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state); +int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, + struct dwc3_gadget_ep_cmd_params *params); +int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd, + u32 param); +void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt); +void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc); +#else +static inline int dwc3_gadget_init(struct dwc3 *dwc) +{ return 0; } +static inline void dwc3_gadget_exit(struct dwc3 *dwc) +{ } +static inline int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) +{ return 0; } +static inline int dwc3_gadget_get_link_state(struct dwc3 *dwc) +{ return 0; } +static inline int dwc3_gadget_set_link_state(struct dwc3 *dwc, + enum dwc3_link_state state) +{ return 0; } + +static inline int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, + struct dwc3_gadget_ep_cmd_params *params) +{ return 0; } +static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc, + int cmd, u32 param) +{ return 0; } +static inline void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, + bool interrupt) +{ } +static inline void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc) +{ } +#endif + +#if IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) +int dwc3_drd_init(struct dwc3 *dwc); +void dwc3_drd_exit(struct dwc3 *dwc); +int sd_otg_enable(struct dwc3 *dwc); +void dwc3_otg_init(struct dwc3 *dwc); +void dwc3_otg_exit(struct dwc3 *dwc); +void dwc3_otg_update(struct dwc3 *dwc, bool ignore_idstatus); +void dwc3_otg_host_init(struct dwc3 *dwc); +#else +static inline int dwc3_drd_init(struct dwc3 *dwc) +{ return 0; } +static inline void dwc3_drd_exit(struct dwc3 *dwc) +{ } +static inline int sd_otg_enable(struct dwc3 *dwc) +{ return 0; } +static inline void dwc3_otg_init(struct dwc3 *dwc) +{ } +static inline void dwc3_otg_exit(struct dwc3 *dwc) +{ } +static inline void dwc3_otg_update(struct dwc3 *dwc, bool ignore_idstatus) +{ } +static inline void dwc3_otg_host_init(struct dwc3 *dwc) +{ } +#endif + +/* power management interface */ +#if !IS_ENABLED(CONFIG_USB_DWC3_HOST) +int dwc3_gadget_suspend(struct dwc3 *dwc); +int dwc3_gadget_resume(struct dwc3 *dwc); +void dwc3_gadget_process_pending_events(struct dwc3 *dwc); +#else +static inline int dwc3_gadget_suspend(struct dwc3 *dwc) +{ + return 0; +} + +static inline int dwc3_gadget_resume(struct dwc3 *dwc) +{ + return 0; +} + +static inline void dwc3_gadget_process_pending_events(struct dwc3 *dwc) +{ +} +#endif /* !IS_ENABLED(CONFIG_USB_DWC3_HOST) */ + +#if IS_ENABLED(CONFIG_USB_DWC3_ULPI) +int dwc3_ulpi_init(struct dwc3 *dwc); +void dwc3_ulpi_exit(struct dwc3 *dwc); +#else +static inline int dwc3_ulpi_init(struct dwc3 *dwc) +{ return 0; } +static inline void dwc3_ulpi_exit(struct dwc3 *dwc) +{ } +#endif + +#endif /* __DRIVERS_USB_DWC3_CORE_H */ diff --git a/usb/dwc3/host.c b/usb/dwc3/host.c new file mode 100644 index 0000000..4cbcf87 --- /dev/null +++ b/usb/dwc3/host.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * host.c - DesignWare USB3 DRD Controller Host Glue + * + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com + * + * Authors: Felipe Balbi , + */ + +#include +#include + +#include "core.h" + +static int dwc3_host_get_irq(struct dwc3 *dwc) +{ + struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); + int irq; + + irq = platform_get_irq_byname_optional(dwc3_pdev, "host"); + if (irq > 0) + goto out; + + if (irq == -EPROBE_DEFER) + goto out; + + irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3"); + if (irq > 0) + goto out; + + if (irq == -EPROBE_DEFER) + goto out; + + irq = platform_get_irq(dwc3_pdev, 0); + if (irq > 0) + goto out; + + if (!irq) + irq = -EINVAL; + +out: + return irq; +} + +int dwc3_host_init(struct dwc3 *dwc) +{ + struct property_entry props[4]; + struct platform_device *xhci; + int ret, irq; + struct resource *res; + struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); + int prop_idx = 0; + + irq = dwc3_host_get_irq(dwc); + if (irq < 0) + return irq; + + res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, "host"); + if (!res) + res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, + "dwc_usb3"); + if (!res) + res = platform_get_resource(dwc3_pdev, IORESOURCE_IRQ, 0); + if (!res) + return -ENOMEM; + + dwc->xhci_resources[1].start = irq; + dwc->xhci_resources[1].end = irq; + dwc->xhci_resources[1].flags = res->flags; + dwc->xhci_resources[1].name = res->name; + + xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO); + if (!xhci) { + dev_err(dwc->dev, "couldn't allocate xHCI device\n"); + return -ENOMEM; + } + + xhci->dev.parent = dwc->dev; + ACPI_COMPANION_SET(&xhci->dev, ACPI_COMPANION(dwc->dev)); + + dwc->xhci = xhci; + + ret = platform_device_add_resources(xhci, dwc->xhci_resources, + DWC3_XHCI_RESOURCES_NUM); + if (ret) { + dev_err(dwc->dev, "couldn't add resources to xHCI device\n"); + goto err; + } + + memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props)); + + if (dwc->usb3_lpm_capable) + props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable"); + + if (dwc->usb2_lpm_disable) + props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable"); + + /** + * WORKAROUND: dwc3 revisions <=3.00a have a limitation + * where Port Disable command doesn't work. + * + * The suggested workaround is that we avoid Port Disable + * completely. + * + * This following flag tells XHCI to do just that. + */ + if (DWC3_VER_IS_WITHIN(DWC3, ANY, 300A)) + props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped"); + + if (dwc->soc_taihang) + props[prop_idx++] = PROPERTY_ENTRY_BOOL("semidrive-taihang"); + + if (prop_idx) { + ret = platform_device_add_properties(xhci, props); + if (ret) { + dev_err(dwc->dev, "failed to add properties to xHCI\n"); + goto err; + } + } + + ret = platform_device_add(xhci); + if (ret) { + dev_err(dwc->dev, "failed to register xHCI device\n"); + goto err; + } + + return 0; +err: + platform_device_put(xhci); + return ret; +} + +void dwc3_host_exit(struct dwc3 *dwc) +{ + platform_device_unregister(dwc->xhci); +} diff --git a/usb/dwc3/io.h b/usb/dwc3/io.h new file mode 100644 index 0000000..76b73b1 --- /dev/null +++ b/usb/dwc3/io.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * io.h - DesignWare USB3 DRD IO Header + * + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com + * + * Authors: Felipe Balbi , + * Sebastian Andrzej Siewior + */ + +#ifndef __DRIVERS_USB_DWC3_IO_H +#define __DRIVERS_USB_DWC3_IO_H + +#include +#include "trace.h" +#include "debug.h" +#include "core.h" + +static inline u32 dwc3_readl(void __iomem *base, u32 offset) +{ + u32 value; + + /* + * We requested the mem region starting from the Globals address + * space, see dwc3_probe in core.c. + * However, the offsets are given starting from xHCI address space. + */ + value = readl(base + offset - DWC3_GLOBALS_REGS_START); + + /* + * When tracing we want to make it easy to find the correct address on + * documentation, so we revert it back to the proper addresses, the + * same way they are described on SNPS documentation + */ + trace_dwc3_readl(base - DWC3_GLOBALS_REGS_START, offset, value); + + return value; +} + +static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value) +{ + /* + * We requested the mem region starting from the Globals address + * space, see dwc3_probe in core.c. + * However, the offsets are given starting from xHCI address space. + */ + writel(value, base + offset - DWC3_GLOBALS_REGS_START); + + /* + * When tracing we want to make it easy to find the correct address on + * documentation, so we revert it back to the proper addresses, the + * same way they are described on SNPS documentation + */ + trace_dwc3_writel(base - DWC3_GLOBALS_REGS_START, offset, value); +} + +#endif /* __DRIVERS_USB_DWC3_IO_H */ diff --git a/usb/host/hcd.c b/usb/host/hcd.c new file mode 100644 index 0000000..4908718 --- /dev/null +++ b/usb/host/hcd.c @@ -0,0 +1,296 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * (C) Copyright Linus Torvalds 1999 + * (C) Copyright Johannes Erdfelt 1999-2001 + * (C) Copyright Andreas Gal 1999 + * (C) Copyright Gregory P. Smith 1999 + * (C) Copyright Deti Fliegl 1999 + * (C) Copyright Randy Dunlap 2000 + * (C) Copyright David Brownell 2000-2002 + */ + +#include +#include +#include +#include +#include +#include "xhci.h" +#include "hcd.h" + +struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver, + struct device *sysdev, struct device *dev, const char *bus_name, + struct usb_hcd *primary_hcd) +{ + struct usb_hcd *hcd; + + // hcd = kzalloc(sizeof(*hcd) + driver->hcd_priv_size, GFP_KERNEL); + hcd = (struct usb_hcd *)uk_calloc(uk_alloc_get_default(), 1, sizeof(*hcd)); +#if 0 + if (!hcd) + return NULL; + if (primary_hcd == NULL) { + hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex), + GFP_KERNEL); + if (!hcd->address0_mutex) { + kfree(hcd); + dev_dbg(dev, "hcd address0 mutex alloc failed\n"); + return NULL; + } + mutex_init(hcd->address0_mutex); + hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex), + GFP_KERNEL); + if (!hcd->bandwidth_mutex) { + kfree(hcd->address0_mutex); + kfree(hcd); + dev_dbg(dev, "hcd bandwidth mutex alloc failed\n"); + return NULL; + } + mutex_init(hcd->bandwidth_mutex); + // dev_set_drvdata(dev, hcd); + } else { + mutex_lock(&usb_port_peer_mutex); + hcd->address0_mutex = primary_hcd->address0_mutex; + hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex; + hcd->primary_hcd = primary_hcd; + primary_hcd->primary_hcd = primary_hcd; + hcd->shared_hcd = primary_hcd; + primary_hcd->shared_hcd = hcd; + mutex_unlock(&usb_port_peer_mutex); + } + + kref_init(&hcd->kref); + + usb_bus_init(&hcd->self); + hcd->self.controller = dev; + hcd->self.sysdev = sysdev; + hcd->self.bus_name = bus_name; + + timer_setup(&hcd->rh_timer, rh_timer_func, 0); +#ifdef CONFIG_PM + INIT_WORK(&hcd->wakeup_work, hcd_resume_work); +#endif + + INIT_WORK(&hcd->died_work, hcd_died_work); +#endif + hcd->driver = driver; + hcd->speed = driver->flags & HCD_MASK; + hcd->product_desc = (driver->product_desc) ? driver->product_desc : + "USB Host Controller"; + return hcd; +} + +/** + * usb_add_hcd - finish generic HCD structure initialization and register + * @hcd: the usb_hcd structure to initialize + * @irqnum: Interrupt line to allocate + * @irqflags: Interrupt type flags + * + * Finish the remaining parts of generic HCD initialization: allocate the + * buffers of consistent memory, register the bus, request the IRQ line, + * and call the driver's reset() and start() routines. + */ +int usb_add_hcd(struct usb_hcd *hcd, + unsigned int irqnum, unsigned long irqflags __unused) +{ + int retval = 0; +#if 0 + struct usb_device *rhdev; + + if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) { + hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev); + if (IS_ERR(hcd->phy_roothub)) + return PTR_ERR(hcd->phy_roothub); + + retval = usb_phy_roothub_init(hcd->phy_roothub); + if (retval) + return retval; + + retval = usb_phy_roothub_set_mode(hcd->phy_roothub, + PHY_MODE_USB_HOST_SS); + if (retval) + retval = usb_phy_roothub_set_mode(hcd->phy_roothub, + PHY_MODE_USB_HOST); + if (retval) + goto err_usb_phy_roothub_power_on; + + retval = usb_phy_roothub_power_on(hcd->phy_roothub); + if (retval) + goto err_usb_phy_roothub_power_on; + } + dev_info(hcd->self.controller, "%s\n", hcd->product_desc); + + switch (authorized_default) { + case USB_AUTHORIZE_NONE: + hcd->dev_policy = USB_DEVICE_AUTHORIZE_NONE; + break; + + case USB_AUTHORIZE_ALL: + hcd->dev_policy = USB_DEVICE_AUTHORIZE_ALL; + break; + + case USB_AUTHORIZE_INTERNAL: + hcd->dev_policy = USB_DEVICE_AUTHORIZE_INTERNAL; + break; + + case USB_AUTHORIZE_WIRED: + default: + hcd->dev_policy = hcd->wireless ? + USB_DEVICE_AUTHORIZE_NONE : USB_DEVICE_AUTHORIZE_ALL; + break; + } + + set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + + /* per default all interfaces are authorized */ + set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags); + + /* HC is in reset state, but accessible. Now do the one-time init, + * bottom up so that hcds can customize the root hubs before hub_wq + * starts talking to them. (Note, bus id is assigned early too.) + */ + retval = hcd_buffer_create(hcd); + if (retval != 0) { + dev_dbg(hcd->self.sysdev, "pool alloc failed\n"); + goto err_create_buf; + } + + retval = usb_register_bus(&hcd->self); + if (retval < 0) + goto err_register_bus; + + rhdev = usb_alloc_dev(NULL, &hcd->self, 0); + if (rhdev == NULL) { + dev_err(hcd->self.sysdev, "unable to allocate root hub\n"); + retval = -ENOMEM; + goto err_allocate_root_hub; + } + mutex_lock(&usb_port_peer_mutex); + hcd->self.root_hub = rhdev; + mutex_unlock(&usb_port_peer_mutex); + + rhdev->rx_lanes = 1; + rhdev->tx_lanes = 1; + + switch (hcd->speed) { + case HCD_USB11: + rhdev->speed = USB_SPEED_FULL; + break; + case HCD_USB2: + rhdev->speed = USB_SPEED_HIGH; + break; + case HCD_USB25: + rhdev->speed = USB_SPEED_WIRELESS; + break; + case HCD_USB3: + rhdev->speed = USB_SPEED_SUPER; + break; + case HCD_USB32: + rhdev->rx_lanes = 2; + rhdev->tx_lanes = 2; + fallthrough; + case HCD_USB31: + rhdev->speed = USB_SPEED_SUPER_PLUS; + break; + default: + retval = -EINVAL; + goto err_set_rh_speed; + } + + /* wakeup flag init defaults to "everything works" for root hubs, + * but drivers can override it in reset() if needed, along with + * recording the overall controller's system wakeup capability. + */ + device_set_wakeup_capable(&rhdev->dev, 1); + + /* HCD_FLAG_RH_RUNNING doesn't matter until the root hub is + * registered. But since the controller can die at any time, + * let's initialize the flag before touching the hardware. + */ + set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); + /* "reset" is misnamed; its role is now one-time init. the controller + * should already have been reset (and boot firmware kicked off etc). + */ + if (hcd->driver->reset) { + retval = hcd->driver->reset(hcd); + if (retval < 0) { + uk_pr_err("can't setup: %d\n", retval); + goto err_hcd_driver_setup; + } + } +#if 0 + hcd->rh_pollable = 1; + + retval = usb_phy_roothub_calibrate(hcd->phy_roothub); + if (retval) + goto err_hcd_driver_setup; + + /* NOTE: root hub and controller capabilities may not be the same */ + if (device_can_wakeup(hcd->self.controller) + && device_can_wakeup(&hcd->self.root_hub->dev)) + dev_dbg(hcd->self.controller, "supports USB remote wakeup\n"); + + /* initialize tasklets */ + init_giveback_urb_bh(&hcd->high_prio_bh); + init_giveback_urb_bh(&hcd->low_prio_bh); +#endif + + /* enable irqs just before we start the controller, + * if the BIOS provides legacy PCI irqs. + */ + // if (usb_hcd_is_primary_hcd(hcd) && irqnum) { + if ((hcd->primary_hcd) && (hcd == hcd->primary_hcd) && irqnum) { + // retval = usb_hcd_request_irqs(hcd, irqnum, irqflags); + retval = uk_intctlr_irq_register(irqnum, dummy_handler, NULL); + if (retval) + goto err_request_irq; + } + + hcd->state = HC_STATE_RUNNING; + retval = hcd->driver->start(hcd); + if (retval < 0) { + uk_pr_err("startup error %d\n", retval); + goto err_hcd_driver_start; + } + +#if 0 + /* starting here, usbcore will pay attention to this root hub */ + retval = register_root_hub(hcd); + if (retval != 0) + goto err_register_root_hub; + + if (hcd->uses_new_polling && HCD_POLL_RH(hcd)) + usb_hcd_poll_rh_status(hcd); +#endif + return retval; +#if 0 +err_register_root_hub: + hcd->rh_pollable = 0; + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); + del_timer_sync(&hcd->rh_timer); + hcd->driver->stop(hcd); + hcd->state = HC_STATE_HALT; + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); + del_timer_sync(&hcd->rh_timer); +#endif +err_hcd_driver_start: + // if (usb_hcd_is_primary_hcd(hcd) && hcd->irq > 0) + // free_irq(irqnum, hcd); + if ((hcd->primary_hcd) && (hcd == hcd->primary_hcd)) + uk_intctlr_irq_unregister(irqnum, dummy_handler); +err_request_irq: +#if 0 +err_hcd_driver_setup: +err_set_rh_speed: + usb_put_invalidate_rhdev(hcd); +err_allocate_root_hub: + usb_deregister_bus(&hcd->self); +err_register_bus: + hcd_buffer_destroy(hcd); +err_create_buf: + usb_phy_roothub_power_off(hcd->phy_roothub); +err_usb_phy_roothub_power_on: + usb_phy_roothub_exit(hcd->phy_roothub); +#endif +#endif + return retval; +} \ No newline at end of file diff --git a/usb/host/xhci-ext-caps.h b/usb/host/xhci-ext-caps.h new file mode 100644 index 0000000..8e8dba9 --- /dev/null +++ b/usb/host/xhci-ext-caps.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * xHCI host controller driver + * + * Copyright (C) 2008 Intel Corp. + * + * Author: Sarah Sharp + * Some code borrowed from the Linux EHCI driver. + */ + +/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */ +#define XHCI_MAX_HALT_USEC (32 * 1000) +/* HC not running - set to 1 when run/stop bit is cleared. */ +#define XHCI_STS_HALT (1<<0) + +/* HCCPARAMS offset from PCI base address */ +#define XHCI_HCC_PARAMS_OFFSET 0x10 +/* HCCPARAMS contains the first extended capability pointer */ +#define XHCI_HCC_EXT_CAPS(p) (((p)>>16)&0xffff) + +/* Command and Status registers offset from the Operational Registers address */ +#define XHCI_CMD_OFFSET 0x00 +#define XHCI_STS_OFFSET 0x04 + +#define XHCI_MAX_EXT_CAPS 50 + +/* Capability Register */ +/* bits 7:0 - how long is the Capabilities register */ +#define XHCI_HC_LENGTH(p) (((p)>>00)&0x00ff) + +/* Extended capability register fields */ +#define XHCI_EXT_CAPS_ID(p) (((p)>>0)&0xff) +#define XHCI_EXT_CAPS_NEXT(p) (((p)>>8)&0xff) +#define XHCI_EXT_CAPS_VAL(p) ((p)>>16) +/* Extended capability IDs - ID 0 reserved */ +#define XHCI_EXT_CAPS_LEGACY 1 +#define XHCI_EXT_CAPS_PROTOCOL 2 +#define XHCI_EXT_CAPS_PM 3 +#define XHCI_EXT_CAPS_VIRT 4 +#define XHCI_EXT_CAPS_ROUTE 5 +/* IDs 6-9 reserved */ +#define XHCI_EXT_CAPS_DEBUG 10 +/* Vendor caps */ +#define XHCI_EXT_CAPS_VENDOR_INTEL 192 +/* USB Legacy Support Capability - section 7.1.1 */ +#define XHCI_HC_BIOS_OWNED (1 << 16) +#define XHCI_HC_OS_OWNED (1 << 24) + +/* USB Legacy Support Capability - section 7.1.1 */ +/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */ +#define XHCI_LEGACY_SUPPORT_OFFSET (0x00) + +/* USB Legacy Support Control and Status Register - section 7.1.2 */ +/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */ +#define XHCI_LEGACY_CONTROL_OFFSET (0x04) +/* bits 1:3, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */ +#define XHCI_LEGACY_DISABLE_SMI ((0x7 << 1) + (0xff << 5) + (0x7 << 17)) +#define XHCI_LEGACY_SMI_EVENTS (0x7 << 29) + +/* USB 2.0 xHCI 0.96 L1C capability - section 7.2.2.1.3.2 */ +#define XHCI_L1C (1 << 16) + +/* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */ +#define XHCI_HLC (1 << 19) +#define XHCI_BLC (1 << 20) + +/* command register values to disable interrupts and halt the HC */ +/* start/stop HC execution - do not write unless HC is halted*/ +#define XHCI_CMD_RUN (1 << 0) +/* Event Interrupt Enable - get irq when EINT bit is set in USBSTS register */ +#define XHCI_CMD_EIE (1 << 2) +/* Host System Error Interrupt Enable - get irq when HSEIE bit set in USBSTS */ +#define XHCI_CMD_HSEIE (1 << 3) +/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */ +#define XHCI_CMD_EWE (1 << 10) + +#define XHCI_IRQS (XHCI_CMD_EIE | XHCI_CMD_HSEIE | XHCI_CMD_EWE) + +/* true: Controller Not Ready to accept doorbell or op reg writes after reset */ +#define XHCI_STS_CNR (1 << 11) + +// #include + +/** + * Find the offset of the extended capabilities with capability ID id. + * + * @base PCI MMIO registers base address. + * @start address at which to start looking, (0 or HCC_PARAMS to start at + * beginning of list) + * @id Extended capability ID to search for, or 0 for the next + * capability + * + * Returns the offset of the next matching extended capability structure. + * Some capabilities can occur several times, e.g., the XHCI_EXT_CAPS_PROTOCOL, + * and this provides a way to find them all. + */ + +static inline int xhci_find_next_ext_cap(void __iomem *base, u32 start, int id) +{ + u32 val; + u32 next; + u32 offset; + + offset = start; + if (!start || start == XHCI_HCC_PARAMS_OFFSET) { + val = readl(base + XHCI_HCC_PARAMS_OFFSET); + if (val == ~0) + return 0; + offset = XHCI_HCC_EXT_CAPS(val) << 2; + if (!offset) + return 0; + } + do { + val = readl(base + offset); + if (val == ~0) + return 0; + if (offset != start && (id == 0 || XHCI_EXT_CAPS_ID(val) == id)) + return offset; + + next = XHCI_EXT_CAPS_NEXT(val); + offset += next << 2; + } while (next); + + return 0; +} diff --git a/usb/host/xhci-hub.c b/usb/host/xhci-hub.c new file mode 100644 index 0000000..15d543f --- /dev/null +++ b/usb/host/xhci-hub.c @@ -0,0 +1,1882 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * xHCI host controller driver + * + * Copyright (C) 2008 Intel Corp. + * + * Author: Sarah Sharp + * Some code borrowed from the Linux EHCI driver. + */ + + +#include +#include + +#include "xhci.h" +#include "xhci-trace.h" + +#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) +#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \ + PORT_RC | PORT_PLC | PORT_PE) + +/* USB 3 BOS descriptor and a capability descriptors, combined. + * Fields will be adjusted and added later in xhci_create_usb3_bos_desc() + */ +static u8 usb_bos_descriptor [] = { + USB_DT_BOS_SIZE, /* __u8 bLength, 5 bytes */ + USB_DT_BOS, /* __u8 bDescriptorType */ + 0x0F, 0x00, /* __le16 wTotalLength, 15 bytes */ + 0x1, /* __u8 bNumDeviceCaps */ + /* First device capability, SuperSpeed */ + USB_DT_USB_SS_CAP_SIZE, /* __u8 bLength, 10 bytes */ + USB_DT_DEVICE_CAPABILITY, /* Device Capability */ + USB_SS_CAP_TYPE, /* bDevCapabilityType, SUPERSPEED_USB */ + 0x00, /* bmAttributes, LTM off by default */ + USB_5GBPS_OPERATION, 0x00, /* wSpeedsSupported, 5Gbps only */ + 0x03, /* bFunctionalitySupport, + USB 3.0 speed only */ + 0x00, /* bU1DevExitLat, set later. */ + 0x00, 0x00, /* __le16 bU2DevExitLat, set later. */ + /* Second device capability, SuperSpeedPlus */ + 0x1c, /* bLength 28, will be adjusted later */ + USB_DT_DEVICE_CAPABILITY, /* Device Capability */ + USB_SSP_CAP_TYPE, /* bDevCapabilityType SUPERSPEED_PLUS */ + 0x00, /* bReserved 0 */ + 0x23, 0x00, 0x00, 0x00, /* bmAttributes, SSAC=3 SSIC=1 */ + 0x01, 0x00, /* wFunctionalitySupport */ + 0x00, 0x00, /* wReserved 0 */ + /* Default Sublink Speed Attributes, overwrite if custom PSI exists */ + 0x34, 0x00, 0x05, 0x00, /* 5Gbps, symmetric, rx, ID = 4 */ + 0xb4, 0x00, 0x05, 0x00, /* 5Gbps, symmetric, tx, ID = 4 */ + 0x35, 0x40, 0x0a, 0x00, /* 10Gbps, SSP, symmetric, rx, ID = 5 */ + 0xb5, 0x40, 0x0a, 0x00, /* 10Gbps, SSP, symmetric, tx, ID = 5 */ +}; + +static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, + u16 wLength) +{ + struct xhci_port_cap *port_cap = NULL; + int i, ssa_count; + u32 temp; + u16 desc_size, ssp_cap_size, ssa_size = 0; + bool usb3_1 = false; + + desc_size = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE; + ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size; + + /* does xhci support USB 3.1 Enhanced SuperSpeed */ + for (i = 0; i < xhci->num_port_caps; i++) { + if (xhci->port_caps[i].maj_rev == 0x03 && + xhci->port_caps[i].min_rev >= 0x01) { + usb3_1 = true; + port_cap = &xhci->port_caps[i]; + break; + } + } + + if (usb3_1) { + /* does xhci provide a PSI table for SSA speed attributes? */ + if (port_cap->psi_count) { + /* two SSA entries for each unique PSI ID, RX and TX */ + ssa_count = port_cap->psi_uid_count * 2; + ssa_size = ssa_count * sizeof(u32); + ssp_cap_size -= 16; /* skip copying the default SSA */ + } + desc_size += ssp_cap_size; + } + memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength)); + + if (usb3_1) { + /* modify bos descriptor bNumDeviceCaps and wTotalLength */ + buf[4] += 1; + put_unaligned_le16(desc_size + ssa_size, &buf[2]); + } + + if (wLength < USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE) + return wLength; + + /* Indicate whether the host has LTM support. */ + temp = readl(&xhci->cap_regs->hcc_params); + if (HCC_LTC(temp)) + buf[8] |= USB_LTM_SUPPORT; + + /* Set the U1 and U2 exit latencies. */ + if ((xhci->quirks & XHCI_LPM_SUPPORT)) { + temp = readl(&xhci->cap_regs->hcs_params3); + buf[12] = HCS_U1_LATENCY(temp); + put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]); + } + + /* If PSI table exists, add the custom speed attributes from it */ + if (usb3_1 && port_cap->psi_count) { + u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp; + int offset; + + ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE; + + if (wLength < desc_size) + return wLength; + buf[ssp_cap_base] = ssp_cap_size + ssa_size; + + /* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */ + bm_attrib = (ssa_count - 1) & 0x1f; + bm_attrib |= (port_cap->psi_uid_count - 1) << 5; + put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]); + + if (wLength < desc_size + ssa_size) + return wLength; + /* + * Create the Sublink Speed Attributes (SSA) array. + * The xhci PSI field and USB 3.1 SSA fields are very similar, + * but link type bits 7:6 differ for values 01b and 10b. + * xhci has also only one PSI entry for a symmetric link when + * USB 3.1 requires two SSA entries (RX and TX) for every link + */ + offset = desc_size; + for (i = 0; i < port_cap->psi_count; i++) { + psi = port_cap->psi[i]; + psi &= ~USB_SSP_SUBLINK_SPEED_RSVD; + psi_exp = XHCI_EXT_PORT_PSIE(psi); + psi_mant = XHCI_EXT_PORT_PSIM(psi); + + /* Shift to Gbps and set SSP Link BIT(14) if 10Gpbs */ + for (; psi_exp < 3; psi_exp++) + psi_mant /= 1000; + if (psi_mant >= 10) + psi |= BIT(14); + + if ((psi & PLT_MASK) == PLT_SYM) { + /* Symmetric, create SSA RX and TX from one PSI entry */ + put_unaligned_le32(psi, &buf[offset]); + psi |= 1 << 7; /* turn entry to TX */ + offset += 4; + if (offset >= desc_size + ssa_size) + return desc_size + ssa_size; + } else if ((psi & PLT_MASK) == PLT_ASYM_RX) { + /* Asymetric RX, flip bits 7:6 for SSA */ + psi ^= PLT_MASK; + } + put_unaligned_le32(psi, &buf[offset]); + offset += 4; + if (offset >= desc_size + ssa_size) + return desc_size + ssa_size; + } + } + /* ssa_size is 0 for other than usb 3.1 hosts */ + return desc_size + ssa_size; +} + +static void xhci_common_hub_descriptor(struct xhci_hcd *xhci, + struct usb_hub_descriptor *desc, int ports) +{ + u16 temp; + + desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.9 says 20ms max */ + desc->bHubContrCurrent = 0; + + desc->bNbrPorts = ports; + temp = 0; + /* Bits 1:0 - support per-port power switching, or power always on */ + if (HCC_PPC(xhci->hcc_params)) + temp |= HUB_CHAR_INDV_PORT_LPSM; + else + temp |= HUB_CHAR_NO_LPSM; + /* Bit 2 - root hubs are not part of a compound device */ + /* Bits 4:3 - individual port over current protection */ + temp |= HUB_CHAR_INDV_PORT_OCPM; + /* Bits 6:5 - no TTs in root ports */ + /* Bit 7 - no port indicators */ + desc->wHubCharacteristics = cpu_to_le16(temp); +} + +/* Fill in the USB 2.0 roothub descriptor */ +static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci, + struct usb_hub_descriptor *desc) +{ + int ports; + u16 temp; + __u8 port_removable[(USB_MAXCHILDREN + 1 + 7) / 8]; + u32 portsc; + unsigned int i; + struct xhci_hub *rhub; + + rhub = &xhci->usb2_rhub; + ports = rhub->num_ports; + xhci_common_hub_descriptor(xhci, desc, ports); + desc->bDescriptorType = USB_DT_HUB; + temp = 1 + (ports / 8); + desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * temp; + + /* The Device Removable bits are reported on a byte granularity. + * If the port doesn't exist within that byte, the bit is set to 0. + */ + memset(port_removable, 0, sizeof(port_removable)); + for (i = 0; i < ports; i++) { + portsc = readl(rhub->ports[i]->addr); + /* If a device is removable, PORTSC reports a 0, same as in the + * hub descriptor DeviceRemovable bits. + */ + if (portsc & PORT_DEV_REMOVE) + /* This math is hairy because bit 0 of DeviceRemovable + * is reserved, and bit 1 is for port 1, etc. + */ + port_removable[(i + 1) / 8] |= 1 << ((i + 1) % 8); + } + + /* ch11.h defines a hub descriptor that has room for USB_MAXCHILDREN + * ports on it. The USB 2.0 specification says that there are two + * variable length fields at the end of the hub descriptor: + * DeviceRemovable and PortPwrCtrlMask. But since we can have less than + * USB_MAXCHILDREN ports, we may need to use the DeviceRemovable array + * to set PortPwrCtrlMask bits. PortPwrCtrlMask must always be set to + * 0xFF, so we initialize the both arrays (DeviceRemovable and + * PortPwrCtrlMask) to 0xFF. Then we set the DeviceRemovable for each + * set of ports that actually exist. + */ + memset(desc->u.hs.DeviceRemovable, 0xff, + sizeof(desc->u.hs.DeviceRemovable)); + memset(desc->u.hs.PortPwrCtrlMask, 0xff, + sizeof(desc->u.hs.PortPwrCtrlMask)); + + for (i = 0; i < (ports + 1 + 7) / 8; i++) + memset(&desc->u.hs.DeviceRemovable[i], port_removable[i], + sizeof(__u8)); +} + +/* Fill in the USB 3.0 roothub descriptor */ +static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci, + struct usb_hub_descriptor *desc) +{ + int ports; + u16 port_removable; + u32 portsc; + unsigned int i; + struct xhci_hub *rhub; + + rhub = &xhci->usb3_rhub; + ports = rhub->num_ports; + xhci_common_hub_descriptor(xhci, desc, ports); + desc->bDescriptorType = USB_DT_SS_HUB; + desc->bDescLength = USB_DT_SS_HUB_SIZE; + + /* header decode latency should be zero for roothubs, + * see section 4.23.5.2. + */ + desc->u.ss.bHubHdrDecLat = 0; + desc->u.ss.wHubDelay = 0; + + port_removable = 0; + /* bit 0 is reserved, bit 1 is for port 1, etc. */ + for (i = 0; i < ports; i++) { + portsc = readl(rhub->ports[i]->addr); + if (portsc & PORT_DEV_REMOVE) + port_removable |= 1 << (i + 1); + } + + desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable); +} + +static void xhci_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci, + struct usb_hub_descriptor *desc) +{ + + if (hcd->speed >= HCD_USB3) + xhci_usb3_hub_descriptor(hcd, xhci, desc); + else + xhci_usb2_hub_descriptor(hcd, xhci, desc); + +} + +static unsigned int xhci_port_speed(unsigned int port_status) +{ + if (DEV_LOWSPEED(port_status)) + return USB_PORT_STAT_LOW_SPEED; + if (DEV_HIGHSPEED(port_status)) + return USB_PORT_STAT_HIGH_SPEED; + /* + * FIXME: Yes, we should check for full speed, but the core uses that as + * a default in portspeed() in usb/core/hub.c (which is the only place + * USB_PORT_STAT_*_SPEED is used). + */ + return 0; +} + +/* + * These bits are Read Only (RO) and should be saved and written to the + * registers: 0, 3, 10:13, 30 + * connect status, over-current status, port speed, and device removable. + * connect status and port speed are also sticky - meaning they're in + * the AUX well and they aren't changed by a hot, warm, or cold reset. + */ +#define XHCI_PORT_RO ((1<<0) | (1<<3) | (0xf<<10) | (1<<30)) +/* + * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit: + * bits 5:8, 9, 14:15, 25:27 + * link state, port power, port indicator state, "wake on" enable state + */ +#define XHCI_PORT_RWS ((0xf<<5) | (1<<9) | (0x3<<14) | (0x7<<25)) +/* + * These bits are RW; writing a 1 sets the bit, writing a 0 has no effect: + * bit 4 (port reset) + */ +#define XHCI_PORT_RW1S ((1<<4)) +/* + * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect: + * bits 1, 17, 18, 19, 20, 21, 22, 23 + * port enable/disable, and + * change bits: connect, PED, warm port reset changed (reserved zero for USB 2.0 ports), + * over-current, reset, link state, and L1 change + */ +#define XHCI_PORT_RW1CS ((1<<1) | (0x7f<<17)) +/* + * Bit 16 is RW, and writing a '1' to it causes the link state control to be + * latched in + */ +#define XHCI_PORT_RW ((1<<16)) +/* + * These bits are Reserved Zero (RsvdZ) and zero should be written to them: + * bits 2, 24, 28:31 + */ +#define XHCI_PORT_RZ ((1<<2) | (1<<24) | (0xf<<28)) + +/* + * Given a port state, this function returns a value that would result in the + * port being in the same state, if the value was written to the port status + * control register. + * Save Read Only (RO) bits and save read/write bits where + * writing a 0 clears the bit and writing a 1 sets the bit (RWS). + * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect. + */ +u32 xhci_port_state_to_neutral(u32 state) +{ + /* Save read-only status and port state */ + return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS); +} + +/* + * find slot id based on port number. + * @port: The one-based port number from one of the two split roothubs. + */ +int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, + u16 port) +{ + int slot_id; + int i; + enum usb_device_speed speed; + + slot_id = 0; + for (i = 0; i < MAX_HC_SLOTS; i++) { + if (!xhci->devs[i] || !xhci->devs[i]->udev) + continue; + speed = xhci->devs[i]->udev->speed; + if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3)) + && xhci->devs[i]->fake_port == port) { + slot_id = i; + break; + } + } + + return slot_id; +} + +/* + * Stop device + * It issues stop endpoint command for EP 0 to 30. And wait the last command + * to complete. + * suspend will set to 1, if suspend bit need to set in command. + */ +static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) +{ + struct xhci_virt_device *virt_dev; + struct xhci_command *cmd; + unsigned long flags; + int ret; + int i; + + ret = 0; + virt_dev = xhci->devs[slot_id]; + if (!virt_dev) + return -ENODEV; + + trace_xhci_stop_device(virt_dev); + + cmd = xhci_alloc_command(xhci, true, GFP_NOIO); + if (!cmd) + return -ENOMEM; + + spin_lock_irqsave(&xhci->lock, flags); + for (i = LAST_EP_INDEX; i > 0; i--) { + if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) { + struct xhci_ep_ctx *ep_ctx; + struct xhci_command *command; + + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i); + + /* Check ep is running, required by AMD SNPS 3.1 xHC */ + if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING) + continue; + + command = xhci_alloc_command(xhci, false, GFP_NOWAIT); + if (!command) { + spin_unlock_irqrestore(&xhci->lock, flags); + ret = -ENOMEM; + goto cmd_cleanup; + } + + ret = xhci_queue_stop_endpoint(xhci, command, slot_id, + i, suspend); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, command); + goto cmd_cleanup; + } + } + } + ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + goto cmd_cleanup; + } + + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* Wait for last stop endpoint command to finish */ + wait_for_completion(cmd->completion); + + if (cmd->status == COMP_COMMAND_ABORTED || + cmd->status == COMP_COMMAND_RING_STOPPED) { + xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); + ret = -ETIME; + goto cmd_cleanup; + } + + ret = xhci_vendor_sync_dev_ctx(xhci, slot_id); + if (ret) + xhci_warn(xhci, "Sync device context failed, ret=%d\n", ret); + +cmd_cleanup: + xhci_free_command(xhci, cmd); + return ret; +} + +/* + * Ring device, it rings the all doorbells unconditionally. + */ +void xhci_ring_device(struct xhci_hcd *xhci, int slot_id) +{ + int i, s; + struct xhci_virt_ep *ep; + + for (i = 0; i < LAST_EP_INDEX + 1; i++) { + ep = &xhci->devs[slot_id]->eps[i]; + + if (ep->ep_state & EP_HAS_STREAMS) { + for (s = 1; s < ep->stream_info->num_streams; s++) + xhci_ring_ep_doorbell(xhci, slot_id, i, s); + } else if (ep->ring && ep->ring->dequeue) { + xhci_ring_ep_doorbell(xhci, slot_id, i, 0); + } + } + + return; +} + +static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, + u16 wIndex, __le32 __iomem *addr, u32 port_status) +{ + /* Don't allow the USB core to disable SuperSpeed ports. */ + if (hcd->speed >= HCD_USB3) { + xhci_dbg(xhci, "Ignoring request to disable " + "SuperSpeed port.\n"); + return; + } + + if (xhci->quirks & XHCI_BROKEN_PORT_PED) { + xhci_dbg(xhci, + "Broken Port Enabled/Disabled, ignoring port disable request.\n"); + return; + } + + /* Write 1 to disable the port */ + writel(port_status | PORT_PE, addr); + port_status = readl(addr); + xhci_dbg(xhci, "disable port %d-%d, portsc: 0x%x\n", + hcd->self.busnum, wIndex + 1, port_status); +} + +static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, + u16 wIndex, __le32 __iomem *addr, u32 port_status) +{ + char *port_change_bit; + u32 status; + + switch (wValue) { + case USB_PORT_FEAT_C_RESET: + status = PORT_RC; + port_change_bit = "reset"; + break; + case USB_PORT_FEAT_C_BH_PORT_RESET: + status = PORT_WRC; + port_change_bit = "warm(BH) reset"; + break; + case USB_PORT_FEAT_C_CONNECTION: + status = PORT_CSC; + port_change_bit = "connect"; + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + status = PORT_OCC; + port_change_bit = "over-current"; + break; + case USB_PORT_FEAT_C_ENABLE: + status = PORT_PEC; + port_change_bit = "enable/disable"; + break; + case USB_PORT_FEAT_C_SUSPEND: + status = PORT_PLC; + port_change_bit = "suspend/resume"; + break; + case USB_PORT_FEAT_C_PORT_LINK_STATE: + status = PORT_PLC; + port_change_bit = "link state"; + break; + case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: + status = PORT_CEC; + port_change_bit = "config error"; + break; + default: + /* Should never happen */ + return; + } + /* Change bits are all write 1 to clear */ + writel(port_status | status, addr); + port_status = readl(addr); + + xhci_dbg(xhci, "clear port%d %s change, portsc: 0x%x\n", + wIndex + 1, port_change_bit, port_status); +} + +struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + if (hcd->speed >= HCD_USB3) + return &xhci->usb3_rhub; + return &xhci->usb2_rhub; +} + +/* + * xhci_set_port_power() must be called with xhci->lock held. + * It will release and re-aquire the lock while calling ACPI + * method. + */ +static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, + u16 index, bool on, unsigned long *flags) + __must_hold(&xhci->lock) +{ + struct xhci_hub *rhub; + struct xhci_port *port; + u32 temp; + + rhub = xhci_get_rhub(hcd); + port = rhub->ports[index]; + temp = readl(port->addr); + + xhci_dbg(xhci, "set port power %d-%d %s, portsc: 0x%x\n", + hcd->self.busnum, index + 1, on ? "ON" : "OFF", temp); + + temp = xhci_port_state_to_neutral(temp); + + if (on) { + /* Power on */ + writel(temp | PORT_POWER, port->addr); + readl(port->addr); + } else { + /* Power off */ + writel(temp & ~PORT_POWER, port->addr); + } + + spin_unlock_irqrestore(&xhci->lock, *flags); + temp = usb_acpi_power_manageable(hcd->self.root_hub, + index); + if (temp) + usb_acpi_set_power_state(hcd->self.root_hub, + index, on); + spin_lock_irqsave(&xhci->lock, *flags); +} + +static void xhci_port_set_test_mode(struct xhci_hcd *xhci, + u16 test_mode, u16 wIndex) +{ + u32 temp; + struct xhci_port *port; + + /* xhci only supports test mode for usb2 ports */ + port = xhci->usb2_rhub.ports[wIndex]; + temp = readl(port->addr + PORTPMSC); + temp |= test_mode << PORT_TEST_MODE_SHIFT; + writel(temp, port->addr + PORTPMSC); + xhci->test_mode = test_mode; + if (test_mode == USB_TEST_FORCE_ENABLE) + xhci_start(xhci); +} + +static int xhci_enter_test_mode(struct xhci_hcd *xhci, + u16 test_mode, u16 wIndex, unsigned long *flags) + __must_hold(&xhci->lock) +{ + int i, retval; + + /* Disable all Device Slots */ + xhci_dbg(xhci, "Disable all slots\n"); + spin_unlock_irqrestore(&xhci->lock, *flags); + for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { + if (!xhci->devs[i]) + continue; + + retval = xhci_disable_slot(xhci, i); + if (retval) + xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", + i, retval); + } + spin_lock_irqsave(&xhci->lock, *flags); + /* Put all ports to the Disable state by clear PP */ + xhci_dbg(xhci, "Disable all port (PP = 0)\n"); + /* Power off USB3 ports*/ + for (i = 0; i < xhci->usb3_rhub.num_ports; i++) + xhci_set_port_power(xhci, xhci->shared_hcd, i, false, flags); + /* Power off USB2 ports*/ + for (i = 0; i < xhci->usb2_rhub.num_ports; i++) + xhci_set_port_power(xhci, xhci->main_hcd, i, false, flags); + /* Stop the controller */ + xhci_dbg(xhci, "Stop controller\n"); + retval = xhci_halt(xhci); + if (retval) + return retval; + /* Disable runtime PM for test mode */ + pm_runtime_forbid(xhci_to_hcd(xhci)->self.controller); + /* Set PORTPMSC.PTC field to enter selected test mode */ + /* Port is selected by wIndex. port_id = wIndex + 1 */ + xhci_dbg(xhci, "Enter Test Mode: %d, Port_id=%d\n", + test_mode, wIndex + 1); + xhci_port_set_test_mode(xhci, test_mode, wIndex); + return retval; +} + +static int xhci_exit_test_mode(struct xhci_hcd *xhci) +{ + int retval; + + if (!xhci->test_mode) { + xhci_err(xhci, "Not in test mode, do nothing.\n"); + return 0; + } + if (xhci->test_mode == USB_TEST_FORCE_ENABLE && + !(xhci->xhc_state & XHCI_STATE_HALTED)) { + retval = xhci_halt(xhci); + if (retval) + return retval; + } + pm_runtime_allow(xhci_to_hcd(xhci)->self.controller); + xhci->test_mode = 0; + return xhci_reset(xhci); +} + +void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, + u32 link_state) +{ + u32 temp; + u32 portsc; + + portsc = readl(port->addr); + temp = xhci_port_state_to_neutral(portsc); + temp &= ~PORT_PLS_MASK; + temp |= PORT_LINK_STROBE | link_state; + writel(temp, port->addr); + + xhci_dbg(xhci, "Set port %d-%d link state, portsc: 0x%x, write 0x%x", + port->rhub->hcd->self.busnum, port->hcd_portnum + 1, + portsc, temp); +} + +static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci, + struct xhci_port *port, u16 wake_mask) +{ + u32 temp; + + temp = readl(port->addr); + temp = xhci_port_state_to_neutral(temp); + + if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_CONNECT) + temp |= PORT_WKCONN_E; + else + temp &= ~PORT_WKCONN_E; + + if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT) + temp |= PORT_WKDISC_E; + else + temp &= ~PORT_WKDISC_E; + + if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT) + temp |= PORT_WKOC_E; + else + temp &= ~PORT_WKOC_E; + + writel(temp, port->addr); +} + +/* Test and clear port RWC bit */ +void xhci_test_and_clear_bit(struct xhci_hcd *xhci, struct xhci_port *port, + u32 port_bit) +{ + u32 temp; + + temp = readl(port->addr); + if (temp & port_bit) { + temp = xhci_port_state_to_neutral(temp); + temp |= port_bit; + writel(temp, port->addr); + } +} + +/* Updates Link Status for super Speed port */ +static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, + u32 *status, u32 status_reg) +{ + u32 pls = status_reg & PORT_PLS_MASK; + + /* When the CAS bit is set then warm reset + * should be performed on port + */ + if (status_reg & PORT_CAS) { + /* The CAS bit can be set while the port is + * in any link state. + * Only roothubs have CAS bit, so we + * pretend to be in compliance mode + * unless we're already in compliance + * or the inactive state. + */ + if (pls != USB_SS_PORT_LS_COMP_MOD && + pls != USB_SS_PORT_LS_SS_INACTIVE) { + pls = USB_SS_PORT_LS_COMP_MOD; + } + /* Return also connection bit - + * hub state machine resets port + * when this bit is set. + */ + pls |= USB_PORT_STAT_CONNECTION; + } else { + /* + * Resume state is an xHCI internal state. Do not report it to + * usb core, instead, pretend to be U3, thus usb core knows + * it's not ready for transfer. + */ + if (pls == XDEV_RESUME) { + *status |= USB_SS_PORT_LS_U3; + return; + } + + /* + * If CAS bit isn't set but the Port is already at + * Compliance Mode, fake a connection so the USB core + * notices the Compliance state and resets the port. + * This resolves an issue generated by the SN65LVPE502CP + * in which sometimes the port enters compliance mode + * caused by a delay on the host-device negotiation. + */ + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + (pls == USB_SS_PORT_LS_COMP_MOD)) + pls |= USB_PORT_STAT_CONNECTION; + } + + /* update status field */ + *status |= pls; +} + +/* + * Function for Compliance Mode Quirk. + * + * This Function verifies if all xhc USB3 ports have entered U0, if so, + * the compliance mode timer is deleted. A port won't enter + * compliance mode if it has previously entered U0. + */ +static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, + u16 wIndex) +{ + u32 all_ports_seen_u0 = ((1 << xhci->usb3_rhub.num_ports) - 1); + bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0); + + if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK)) + return; + + if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) { + xhci->port_status_u0 |= 1 << wIndex; + if (xhci->port_status_u0 == all_ports_seen_u0) { + del_timer_sync(&xhci->comp_mode_recovery_timer); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "All USB3 ports have entered U0 already!"); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Compliance Mode Recovery Timer Deleted."); + } + } +} + +static int xhci_handle_usb2_port_link_resume(struct xhci_port *port, + u32 *status, u32 portsc, + unsigned long *flags) +{ + struct xhci_bus_state *bus_state; + struct xhci_hcd *xhci; + struct usb_hcd *hcd; + int slot_id; + u32 wIndex; + + hcd = port->rhub->hcd; + bus_state = &port->rhub->bus_state; + xhci = hcd_to_xhci(hcd); + wIndex = port->hcd_portnum; + + if ((portsc & PORT_RESET) || !(portsc & PORT_PE)) { + *status = 0xffffffff; + return -EINVAL; + } + /* did port event handler already start resume timing? */ + if (!bus_state->resume_done[wIndex]) { + /* If not, maybe we are in a host initated resume? */ + if (test_bit(wIndex, &bus_state->resuming_ports)) { + /* Host initated resume doesn't time the resume + * signalling using resume_done[]. + * It manually sets RESUME state, sleeps 20ms + * and sets U0 state. This should probably be + * changed, but not right now. + */ + } else { + /* port resume was discovered now and here, + * start resume timing + */ + unsigned long timeout = jiffies + + msecs_to_jiffies(USB_RESUME_TIMEOUT); + + set_bit(wIndex, &bus_state->resuming_ports); + bus_state->resume_done[wIndex] = timeout; + mod_timer(&hcd->rh_timer, timeout); + usb_hcd_start_port_resume(&hcd->self, wIndex); + } + /* Has resume been signalled for USB_RESUME_TIME yet? */ + } else if (time_after_eq(jiffies, bus_state->resume_done[wIndex])) { + int time_left; + + xhci_dbg(xhci, "resume USB2 port %d-%d\n", + hcd->self.busnum, wIndex + 1); + + bus_state->resume_done[wIndex] = 0; + clear_bit(wIndex, &bus_state->resuming_ports); + + set_bit(wIndex, &bus_state->rexit_ports); + + xhci_test_and_clear_bit(xhci, port, PORT_PLC); + xhci_set_link_state(xhci, port, XDEV_U0); + + spin_unlock_irqrestore(&xhci->lock, *flags); + time_left = wait_for_completion_timeout( + &bus_state->rexit_done[wIndex], + msecs_to_jiffies(XHCI_MAX_REXIT_TIMEOUT_MS)); + spin_lock_irqsave(&xhci->lock, *flags); + + if (time_left) { + slot_id = xhci_find_slot_id_by_port(hcd, xhci, + wIndex + 1); + if (!slot_id) { + xhci_dbg(xhci, "slot_id is zero\n"); + *status = 0xffffffff; + return -ENODEV; + } + xhci_ring_device(xhci, slot_id); + } else { + int port_status = readl(port->addr); + + xhci_warn(xhci, "Port resume timed out, port %d-%d: 0x%x\n", + hcd->self.busnum, wIndex + 1, port_status); + *status |= USB_PORT_STAT_SUSPEND; + clear_bit(wIndex, &bus_state->rexit_ports); + } + + usb_hcd_end_port_resume(&hcd->self, wIndex); + bus_state->port_c_suspend |= 1 << wIndex; + bus_state->suspended_ports &= ~(1 << wIndex); + } else { + /* + * The resume has been signaling for less than + * USB_RESUME_TIME. Report the port status as SUSPEND, + * let the usbcore check port status again and clear + * resume signaling later. + */ + *status |= USB_PORT_STAT_SUSPEND; + } + return 0; +} + +static u32 xhci_get_ext_port_status(u32 raw_port_status, u32 port_li) +{ + u32 ext_stat = 0; + int speed_id; + + /* only support rx and tx lane counts of 1 in usb3.1 spec */ + speed_id = DEV_PORT_SPEED(raw_port_status); + ext_stat |= speed_id; /* bits 3:0, RX speed id */ + ext_stat |= speed_id << 4; /* bits 7:4, TX speed id */ + + ext_stat |= PORT_RX_LANES(port_li) << 8; /* bits 11:8 Rx lane count */ + ext_stat |= PORT_TX_LANES(port_li) << 12; /* bits 15:12 Tx lane count */ + + return ext_stat; +} + +static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status, + u32 portsc) +{ + struct xhci_bus_state *bus_state; + struct xhci_hcd *xhci; + struct usb_hcd *hcd; + u32 link_state; + u32 portnum; + + bus_state = &port->rhub->bus_state; + xhci = hcd_to_xhci(port->rhub->hcd); + hcd = port->rhub->hcd; + link_state = portsc & PORT_PLS_MASK; + portnum = port->hcd_portnum; + + /* USB3 specific wPortChange bits + * + * Port link change with port in resume state should not be + * reported to usbcore, as this is an internal state to be + * handled by xhci driver. Reporting PLC to usbcore may + * cause usbcore clearing PLC first and port change event + * irq won't be generated. + */ + + if (portsc & PORT_PLC && (link_state != XDEV_RESUME)) + *status |= USB_PORT_STAT_C_LINK_STATE << 16; + if (portsc & PORT_WRC) + *status |= USB_PORT_STAT_C_BH_RESET << 16; + if (portsc & PORT_CEC) + *status |= USB_PORT_STAT_C_CONFIG_ERROR << 16; + + /* USB3 specific wPortStatus bits */ + if (portsc & PORT_POWER) { + *status |= USB_SS_PORT_STAT_POWER; + /* link state handling */ + if (link_state == XDEV_U0) + bus_state->suspended_ports &= ~(1 << portnum); + } + + /* remote wake resume signaling complete */ + if (bus_state->port_remote_wakeup & (1 << portnum) && + link_state != XDEV_RESUME && + link_state != XDEV_RECOVERY) { + bus_state->port_remote_wakeup &= ~(1 << portnum); + usb_hcd_end_port_resume(&hcd->self, portnum); + } + + xhci_hub_report_usb3_link_state(xhci, status, portsc); + xhci_del_comp_mod_timer(xhci, portsc, portnum); +} + +static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status, + u32 portsc, unsigned long *flags) +{ + struct xhci_bus_state *bus_state; + u32 link_state; + u32 portnum; + int ret; + + bus_state = &port->rhub->bus_state; + link_state = portsc & PORT_PLS_MASK; + portnum = port->hcd_portnum; + + /* USB2 wPortStatus bits */ + if (portsc & PORT_POWER) { + *status |= USB_PORT_STAT_POWER; + + /* link state is only valid if port is powered */ + if (link_state == XDEV_U3) + *status |= USB_PORT_STAT_SUSPEND; + if (link_state == XDEV_U2) + *status |= USB_PORT_STAT_L1; + if (link_state == XDEV_U0) { + bus_state->resume_done[portnum] = 0; + clear_bit(portnum, &bus_state->resuming_ports); + if (bus_state->suspended_ports & (1 << portnum)) { + bus_state->suspended_ports &= ~(1 << portnum); + bus_state->port_c_suspend |= 1 << portnum; + } + } + if (link_state == XDEV_RESUME) { + ret = xhci_handle_usb2_port_link_resume(port, status, + portsc, flags); + if (ret) + return; + } + } +} + +/* + * Converts a raw xHCI port status into the format that external USB 2.0 or USB + * 3.0 hubs use. + * + * Possible side effects: + * - Mark a port as being done with device resume, + * and ring the endpoint doorbells. + * - Stop the Synopsys redriver Compliance Mode polling. + * - Drop and reacquire the xHCI lock, in order to wait for port resume. + */ +static u32 xhci_get_port_status(struct usb_hcd *hcd, + struct xhci_bus_state *bus_state, + u16 wIndex, u32 raw_port_status, + unsigned long *flags) + __releases(&xhci->lock) + __acquires(&xhci->lock) +{ + u32 status = 0; + struct xhci_hub *rhub; + struct xhci_port *port; + + rhub = xhci_get_rhub(hcd); + port = rhub->ports[wIndex]; + + /* common wPortChange bits */ + if (raw_port_status & PORT_CSC) + status |= USB_PORT_STAT_C_CONNECTION << 16; + if (raw_port_status & PORT_PEC) + status |= USB_PORT_STAT_C_ENABLE << 16; + if ((raw_port_status & PORT_OCC)) + status |= USB_PORT_STAT_C_OVERCURRENT << 16; + if ((raw_port_status & PORT_RC)) + status |= USB_PORT_STAT_C_RESET << 16; + + /* common wPortStatus bits */ + if (raw_port_status & PORT_CONNECT) { + status |= USB_PORT_STAT_CONNECTION; + status |= xhci_port_speed(raw_port_status); + } + if (raw_port_status & PORT_PE) + status |= USB_PORT_STAT_ENABLE; + if (raw_port_status & PORT_OC) + status |= USB_PORT_STAT_OVERCURRENT; + if (raw_port_status & PORT_RESET) + status |= USB_PORT_STAT_RESET; + + /* USB2 and USB3 specific bits, including Port Link State */ + if (hcd->speed >= HCD_USB3) + xhci_get_usb3_port_status(port, &status, raw_port_status); + else + xhci_get_usb2_port_status(port, &status, raw_port_status, + flags); + /* + * Clear stale usb2 resume signalling variables in case port changed + * state during resume signalling. For example on error + */ + if ((bus_state->resume_done[wIndex] || + test_bit(wIndex, &bus_state->resuming_ports)) && + (raw_port_status & PORT_PLS_MASK) != XDEV_U3 && + (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) { + bus_state->resume_done[wIndex] = 0; + clear_bit(wIndex, &bus_state->resuming_ports); + usb_hcd_end_port_resume(&hcd->self, wIndex); + } + + if (bus_state->port_c_suspend & (1 << wIndex)) + status |= USB_PORT_STAT_C_SUSPEND << 16; + + return status; +} + +int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + int max_ports; + unsigned long flags; + u32 temp, status; + int retval = 0; + int slot_id; + struct xhci_bus_state *bus_state; + u16 link_state = 0; + u16 wake_mask = 0; + u16 timeout = 0; + u16 test_mode = 0; + struct xhci_hub *rhub; + struct xhci_port **ports; + + rhub = xhci_get_rhub(hcd); + ports = rhub->ports; + max_ports = rhub->num_ports; + bus_state = &rhub->bus_state; + + spin_lock_irqsave(&xhci->lock, flags); + switch (typeReq) { + case GetHubStatus: + /* No power source, over-current reported per port */ + memset(buf, 0, 4); + break; + case GetHubDescriptor: + /* Check to make sure userspace is asking for the USB 3.0 hub + * descriptor for the USB 3.0 roothub. If not, we stall the + * endpoint, like external hubs do. + */ + if (hcd->speed >= HCD_USB3 && + (wLength < USB_DT_SS_HUB_SIZE || + wValue != (USB_DT_SS_HUB << 8))) { + xhci_dbg(xhci, "Wrong hub descriptor type for " + "USB 3.0 roothub.\n"); + goto error; + } + xhci_hub_descriptor(hcd, xhci, + (struct usb_hub_descriptor *) buf); + break; + case DeviceRequest | USB_REQ_GET_DESCRIPTOR: + if ((wValue & 0xff00) != (USB_DT_BOS << 8)) + goto error; + + if (hcd->speed < HCD_USB3) + goto error; + + retval = xhci_create_usb3_bos_desc(xhci, buf, wLength); + spin_unlock_irqrestore(&xhci->lock, flags); + return retval; + case GetPortStatus: + if (!wIndex || wIndex > max_ports) + goto error; + wIndex--; + temp = readl(ports[wIndex]->addr); + if (temp == ~(u32)0) { + xhci_hc_died(xhci); + retval = -ENODEV; + break; + } + trace_xhci_get_port_status(wIndex, temp); + status = xhci_get_port_status(hcd, bus_state, wIndex, temp, + &flags); + if (status == 0xffffffff) + goto error; + + xhci_dbg(xhci, "Get port status %d-%d read: 0x%x, return 0x%x", + hcd->self.busnum, wIndex + 1, temp, status); + + put_unaligned(cpu_to_le32(status), (__le32 *) buf); + /* if USB 3.1 extended port status return additional 4 bytes */ + if (wValue == 0x02) { + u32 port_li; + + if (hcd->speed < HCD_USB31 || wLength != 8) { + xhci_err(xhci, "get ext port status invalid parameter\n"); + retval = -EINVAL; + break; + } + port_li = readl(ports[wIndex]->addr + PORTLI); + status = xhci_get_ext_port_status(temp, port_li); + put_unaligned_le32(status, &buf[4]); + } + break; + case SetPortFeature: + if (wValue == USB_PORT_FEAT_LINK_STATE) + link_state = (wIndex & 0xff00) >> 3; + if (wValue == USB_PORT_FEAT_REMOTE_WAKE_MASK) + wake_mask = wIndex & 0xff00; + if (wValue == USB_PORT_FEAT_TEST) + test_mode = (wIndex & 0xff00) >> 8; + /* The MSB of wIndex is the U1/U2 timeout */ + timeout = (wIndex & 0xff00) >> 8; + wIndex &= 0xff; + if (!wIndex || wIndex > max_ports) + goto error; + wIndex--; + temp = readl(ports[wIndex]->addr); + if (temp == ~(u32)0) { + xhci_hc_died(xhci); + retval = -ENODEV; + break; + } + temp = xhci_port_state_to_neutral(temp); + /* FIXME: What new port features do we need to support? */ + switch (wValue) { + case USB_PORT_FEAT_SUSPEND: + temp = readl(ports[wIndex]->addr); + if ((temp & PORT_PLS_MASK) != XDEV_U0) { + /* Resume the port to U0 first */ + xhci_set_link_state(xhci, ports[wIndex], + XDEV_U0); + spin_unlock_irqrestore(&xhci->lock, flags); + msleep(10); + spin_lock_irqsave(&xhci->lock, flags); + } + /* In spec software should not attempt to suspend + * a port unless the port reports that it is in the + * enabled (PED = ‘1’,PLS < ‘3’) state. + */ + temp = readl(ports[wIndex]->addr); + if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) + || (temp & PORT_PLS_MASK) >= XDEV_U3) { + xhci_warn(xhci, "USB core suspending port %d-%d not in U0/U1/U2\n", + hcd->self.busnum, wIndex + 1); + goto error; + } + + slot_id = xhci_find_slot_id_by_port(hcd, xhci, + wIndex + 1); + if (!slot_id) { + xhci_warn(xhci, "slot_id is zero\n"); + goto error; + } + /* unlock to execute stop endpoint commands */ + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_stop_device(xhci, slot_id, 1); + spin_lock_irqsave(&xhci->lock, flags); + + xhci_set_link_state(xhci, ports[wIndex], XDEV_U3); + + spin_unlock_irqrestore(&xhci->lock, flags); + msleep(10); /* wait device to enter */ + spin_lock_irqsave(&xhci->lock, flags); + + temp = readl(ports[wIndex]->addr); + bus_state->suspended_ports |= 1 << wIndex; + break; + case USB_PORT_FEAT_LINK_STATE: + temp = readl(ports[wIndex]->addr); + /* Disable port */ + if (link_state == USB_SS_PORT_LS_SS_DISABLED) { + xhci_dbg(xhci, "Disable port %d-%d\n", + hcd->self.busnum, wIndex + 1); + temp = xhci_port_state_to_neutral(temp); + /* + * Clear all change bits, so that we get a new + * connection event. + */ + temp |= PORT_CSC | PORT_PEC | PORT_WRC | + PORT_OCC | PORT_RC | PORT_PLC | + PORT_CEC; + writel(temp | PORT_PE, ports[wIndex]->addr); + temp = readl(ports[wIndex]->addr); + break; + } + + /* Put link in RxDetect (enable port) */ + if (link_state == USB_SS_PORT_LS_RX_DETECT) { + xhci_dbg(xhci, "Enable port %d-%d\n", + hcd->self.busnum, wIndex + 1); + xhci_set_link_state(xhci, ports[wIndex], + link_state); + temp = readl(ports[wIndex]->addr); + break; + } + + /* + * For xHCI 1.1 according to section 4.19.1.2.4.1 a + * root hub port's transition to compliance mode upon + * detecting LFPS timeout may be controlled by an + * Compliance Transition Enabled (CTE) flag (not + * software visible). This flag is set by writing 0xA + * to PORTSC PLS field which will allow transition to + * compliance mode the next time LFPS timeout is + * encountered. A warm reset will clear it. + * + * The CTE flag is only supported if the HCCPARAMS2 CTC + * flag is set, otherwise, the compliance substate is + * automatically entered as on 1.0 and prior. + */ + if (link_state == USB_SS_PORT_LS_COMP_MOD) { + if (!HCC2_CTC(xhci->hcc_params2)) { + xhci_dbg(xhci, "CTC flag is 0, port already supports entering compliance mode\n"); + break; + } + + if ((temp & PORT_CONNECT)) { + xhci_warn(xhci, "Can't set compliance mode when port is connected\n"); + goto error; + } + + xhci_dbg(xhci, "Enable compliance mode transition for port %d-%d\n", + hcd->self.busnum, wIndex + 1); + xhci_set_link_state(xhci, ports[wIndex], + link_state); + + temp = readl(ports[wIndex]->addr); + break; + } + /* Port must be enabled */ + if (!(temp & PORT_PE)) { + retval = -ENODEV; + break; + } + /* Can't set port link state above '3' (U3) */ + if (link_state > USB_SS_PORT_LS_U3) { + xhci_warn(xhci, "Cannot set port %d-%d link state %d\n", + hcd->self.busnum, wIndex + 1, + link_state); + goto error; + } + + /* + * set link to U0, steps depend on current link state. + * U3: set link to U0 and wait for u3exit completion. + * U1/U2: no PLC complete event, only set link to U0. + * Resume/Recovery: device initiated U0, only wait for + * completion + */ + if (link_state == USB_SS_PORT_LS_U0) { + u32 pls = temp & PORT_PLS_MASK; + bool wait_u0 = false; + + /* already in U0 */ + if (pls == XDEV_U0) + break; + if (pls == XDEV_U3 || + pls == XDEV_RESUME || + pls == XDEV_RECOVERY) { + wait_u0 = true; + reinit_completion(&bus_state->u3exit_done[wIndex]); + } + if (pls <= XDEV_U3) /* U1, U2, U3 */ + xhci_set_link_state(xhci, ports[wIndex], + USB_SS_PORT_LS_U0); + if (!wait_u0) { + if (pls > XDEV_U3) + goto error; + break; + } + spin_unlock_irqrestore(&xhci->lock, flags); + if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex], + msecs_to_jiffies(100))) + xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n", + hcd->self.busnum, wIndex + 1); + spin_lock_irqsave(&xhci->lock, flags); + temp = readl(ports[wIndex]->addr); + break; + } + + if (link_state == USB_SS_PORT_LS_U3) { + int retries = 16; + slot_id = xhci_find_slot_id_by_port(hcd, xhci, + wIndex + 1); + if (slot_id) { + /* unlock to execute stop endpoint + * commands */ + spin_unlock_irqrestore(&xhci->lock, + flags); + xhci_stop_device(xhci, slot_id, 1); + spin_lock_irqsave(&xhci->lock, flags); + } + xhci_set_link_state(xhci, ports[wIndex], USB_SS_PORT_LS_U3); + spin_unlock_irqrestore(&xhci->lock, flags); + while (retries--) { + usleep_range(4000, 8000); + temp = readl(ports[wIndex]->addr); + if ((temp & PORT_PLS_MASK) == XDEV_U3) + break; + } + spin_lock_irqsave(&xhci->lock, flags); + temp = readl(ports[wIndex]->addr); + bus_state->suspended_ports |= 1 << wIndex; + } + break; + case USB_PORT_FEAT_POWER: + /* + * Turn on ports, even if there isn't per-port switching. + * HC will report connect events even before this is set. + * However, hub_wq will ignore the roothub events until + * the roothub is registered. + */ + xhci_set_port_power(xhci, hcd, wIndex, true, &flags); + break; + case USB_PORT_FEAT_RESET: + temp = (temp | PORT_RESET); + writel(temp, ports[wIndex]->addr); + + temp = readl(ports[wIndex]->addr); + xhci_dbg(xhci, "set port reset, actual port %d-%d status = 0x%x\n", + hcd->self.busnum, wIndex + 1, temp); + break; + case USB_PORT_FEAT_REMOTE_WAKE_MASK: + xhci_set_remote_wake_mask(xhci, ports[wIndex], + wake_mask); + temp = readl(ports[wIndex]->addr); + xhci_dbg(xhci, "set port remote wake mask, actual port %d-%d status = 0x%x\n", + hcd->self.busnum, wIndex + 1, temp); + break; + case USB_PORT_FEAT_BH_PORT_RESET: + temp |= PORT_WR; + writel(temp, ports[wIndex]->addr); + temp = readl(ports[wIndex]->addr); + break; + case USB_PORT_FEAT_U1_TIMEOUT: + if (hcd->speed < HCD_USB3) + goto error; + temp = readl(ports[wIndex]->addr + PORTPMSC); + temp &= ~PORT_U1_TIMEOUT_MASK; + temp |= PORT_U1_TIMEOUT(timeout); + writel(temp, ports[wIndex]->addr + PORTPMSC); + break; + case USB_PORT_FEAT_U2_TIMEOUT: + if (hcd->speed < HCD_USB3) + goto error; + temp = readl(ports[wIndex]->addr + PORTPMSC); + temp &= ~PORT_U2_TIMEOUT_MASK; + temp |= PORT_U2_TIMEOUT(timeout); + writel(temp, ports[wIndex]->addr + PORTPMSC); + break; + case USB_PORT_FEAT_TEST: + /* 4.19.6 Port Test Modes (USB2 Test Mode) */ + if (hcd->speed != HCD_USB2) + goto error; + if (test_mode > USB_TEST_FORCE_ENABLE || + test_mode < USB_TEST_J) + goto error; + retval = xhci_enter_test_mode(xhci, test_mode, wIndex, + &flags); + break; + default: + goto error; + } + /* unblock any posted writes */ + temp = readl(ports[wIndex]->addr); + break; + case ClearPortFeature: + if (!wIndex || wIndex > max_ports) + goto error; + wIndex--; + temp = readl(ports[wIndex]->addr); + if (temp == ~(u32)0) { + xhci_hc_died(xhci); + retval = -ENODEV; + break; + } + /* FIXME: What new port features do we need to support? */ + temp = xhci_port_state_to_neutral(temp); + switch (wValue) { + case USB_PORT_FEAT_SUSPEND: + temp = readl(ports[wIndex]->addr); + xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n"); + xhci_dbg(xhci, "PORTSC %04x\n", temp); + if (temp & PORT_RESET) + goto error; + if ((temp & PORT_PLS_MASK) == XDEV_U3) { + if ((temp & PORT_PE) == 0) + goto error; + + set_bit(wIndex, &bus_state->resuming_ports); + usb_hcd_start_port_resume(&hcd->self, wIndex); + xhci_set_link_state(xhci, ports[wIndex], + XDEV_RESUME); + spin_unlock_irqrestore(&xhci->lock, flags); + msleep(USB_RESUME_TIMEOUT); + spin_lock_irqsave(&xhci->lock, flags); + xhci_set_link_state(xhci, ports[wIndex], + XDEV_U0); + clear_bit(wIndex, &bus_state->resuming_ports); + usb_hcd_end_port_resume(&hcd->self, wIndex); + } + bus_state->port_c_suspend |= 1 << wIndex; + + slot_id = xhci_find_slot_id_by_port(hcd, xhci, + wIndex + 1); + if (!slot_id) { + xhci_dbg(xhci, "slot_id is zero\n"); + goto error; + } + xhci_ring_device(xhci, slot_id); + break; + case USB_PORT_FEAT_C_SUSPEND: + bus_state->port_c_suspend &= ~(1 << wIndex); + fallthrough; + case USB_PORT_FEAT_C_RESET: + case USB_PORT_FEAT_C_BH_PORT_RESET: + case USB_PORT_FEAT_C_CONNECTION: + case USB_PORT_FEAT_C_OVER_CURRENT: + case USB_PORT_FEAT_C_ENABLE: + case USB_PORT_FEAT_C_PORT_LINK_STATE: + case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: + xhci_clear_port_change_bit(xhci, wValue, wIndex, + ports[wIndex]->addr, temp); + break; + case USB_PORT_FEAT_ENABLE: + xhci_disable_port(hcd, xhci, wIndex, + ports[wIndex]->addr, temp); + break; + case USB_PORT_FEAT_POWER: + xhci_set_port_power(xhci, hcd, wIndex, false, &flags); + break; + case USB_PORT_FEAT_TEST: + retval = xhci_exit_test_mode(xhci); + break; + default: + goto error; + } + break; + default: +error: + /* "stall" on error */ + retval = -EPIPE; + } + spin_unlock_irqrestore(&xhci->lock, flags); + return retval; +} + +/* + * Returns 0 if the status hasn't changed, or the number of bytes in buf. + * Ports are 0-indexed from the HCD point of view, + * and 1-indexed from the USB core pointer of view. + * + * Note that the status change bits will be cleared as soon as a port status + * change event is generated, so we use the saved status from that event. + */ +int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + unsigned long flags; + u32 temp, status; + u32 mask; + int i, retval; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + int max_ports; + struct xhci_bus_state *bus_state; + bool reset_change = false; + struct xhci_hub *rhub; + struct xhci_port **ports; + + rhub = xhci_get_rhub(hcd); + ports = rhub->ports; + max_ports = rhub->num_ports; + bus_state = &rhub->bus_state; + + /* Initial status is no changes */ + retval = (max_ports + 8) / 8; + memset(buf, 0, retval); + + /* + * Inform the usbcore about resume-in-progress by returning + * a non-zero value even if there are no status changes. + */ + spin_lock_irqsave(&xhci->lock, flags); + + status = bus_state->resuming_ports; + + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC; + + /* For each port, did anything change? If so, set that bit in buf. */ + for (i = 0; i < max_ports; i++) { + temp = readl(ports[i]->addr); + if (temp == ~(u32)0) { + xhci_hc_died(xhci); + retval = -ENODEV; + break; + } + trace_xhci_hub_status_data(i, temp); + + if ((temp & mask) != 0 || + (bus_state->port_c_suspend & 1 << i) || + (bus_state->resume_done[i] && time_after_eq( + jiffies, bus_state->resume_done[i]))) { + buf[(i + 1) / 8] |= 1 << (i + 1) % 8; + status = 1; + } + if ((temp & PORT_RC)) + reset_change = true; + if (temp & PORT_OC) + status = 1; + } + if (!status && !reset_change) { + xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); + } + spin_unlock_irqrestore(&xhci->lock, flags); + return status ? retval : 0; +} + +#ifdef CONFIG_PM + +int xhci_bus_suspend(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + int max_ports, port_index; + struct xhci_bus_state *bus_state; + unsigned long flags; + struct xhci_hub *rhub; + struct xhci_port **ports; + u32 portsc_buf[USB_MAXCHILDREN]; + bool wake_enabled; + + rhub = xhci_get_rhub(hcd); + ports = rhub->ports; + max_ports = rhub->num_ports; + bus_state = &rhub->bus_state; + wake_enabled = hcd->self.root_hub->do_remote_wakeup; + + spin_lock_irqsave(&xhci->lock, flags); + + if (wake_enabled) { + if (bus_state->resuming_ports || /* USB2 */ + bus_state->port_remote_wakeup) { /* USB3 */ + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "suspend failed because a port is resuming\n"); + return -EBUSY; + } + } + /* + * Prepare ports for suspend, but don't write anything before all ports + * are checked and we know bus suspend can proceed + */ + bus_state->bus_suspended = 0; + port_index = max_ports; + while (port_index--) { + u32 t1, t2; + int retries = 10; +retry: + t1 = readl(ports[port_index]->addr); + t2 = xhci_port_state_to_neutral(t1); + portsc_buf[port_index] = 0; + + /* + * Give a USB3 port in link training time to finish, but don't + * prevent suspend as port might be stuck + */ + if ((hcd->speed >= HCD_USB3) && retries-- && + (t1 & PORT_PLS_MASK) == XDEV_POLLING) { + spin_unlock_irqrestore(&xhci->lock, flags); + msleep(XHCI_PORT_POLLING_LFPS_TIME); + spin_lock_irqsave(&xhci->lock, flags); + xhci_dbg(xhci, "port %d-%d polling in bus suspend, waiting\n", + hcd->self.busnum, port_index + 1); + goto retry; + } + /* bail out if port detected a over-current condition */ + if (t1 & PORT_OC) { + bus_state->bus_suspended = 0; + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "Bus suspend bailout, port over-current detected\n"); + return -EBUSY; + } + /* suspend ports in U0, or bail out for new connect changes */ + if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { + if ((t1 & PORT_CSC) && wake_enabled) { + bus_state->bus_suspended = 0; + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "Bus suspend bailout, port connect change\n"); + return -EBUSY; + } + xhci_dbg(xhci, "port %d-%d not suspended\n", + hcd->self.busnum, port_index + 1); + t2 &= ~PORT_PLS_MASK; + t2 |= PORT_LINK_STROBE | XDEV_U3; + set_bit(port_index, &bus_state->bus_suspended); + } + /* USB core sets remote wake mask for USB 3.0 hubs, + * including the USB 3.0 roothub, but only if CONFIG_PM + * is enabled, so also enable remote wake here. + */ + if (wake_enabled) { + if (t1 & PORT_CONNECT) { + t2 |= PORT_WKOC_E | PORT_WKDISC_E; + t2 &= ~PORT_WKCONN_E; + } else { + t2 |= PORT_WKOC_E | PORT_WKCONN_E; + t2 &= ~PORT_WKDISC_E; + } + + if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) && + (hcd->speed < HCD_USB3)) { + if (usb_amd_pt_check_port(hcd->self.controller, + port_index)) + t2 &= ~PORT_WAKE_BITS; + } + } else + t2 &= ~PORT_WAKE_BITS; + + t1 = xhci_port_state_to_neutral(t1); + if (t1 != t2) + portsc_buf[port_index] = t2; + } + + /* write port settings, stopping and suspending ports if needed */ + port_index = max_ports; + while (port_index--) { + if (!portsc_buf[port_index]) + continue; + if (test_bit(port_index, &bus_state->bus_suspended)) { + int slot_id; + + slot_id = xhci_find_slot_id_by_port(hcd, xhci, + port_index + 1); + if (slot_id) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_stop_device(xhci, slot_id, 1); + spin_lock_irqsave(&xhci->lock, flags); + } + } + writel(portsc_buf[port_index], ports[port_index]->addr); + } + hcd->state = HC_STATE_SUSPENDED; + bus_state->next_statechange = jiffies + msecs_to_jiffies(10); + spin_unlock_irqrestore(&xhci->lock, flags); + + if (bus_state->bus_suspended) + usleep_range(5000, 10000); + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_bus_suspend); + +/* + * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3. + * warm reset a USB3 device stuck in polling or compliance mode after resume. + * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8 + */ +static bool xhci_port_missing_cas_quirk(struct xhci_port *port) +{ + u32 portsc; + + portsc = readl(port->addr); + + /* if any of these are set we are not stuck */ + if (portsc & (PORT_CONNECT | PORT_CAS)) + return false; + + if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) && + ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE)) + return false; + + /* clear wakeup/change bits, and do a warm port reset */ + portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS); + portsc |= PORT_WR; + writel(portsc, port->addr); + /* flush write */ + readl(port->addr); + return true; +} + +int xhci_bus_resume(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_bus_state *bus_state; + unsigned long flags; + int max_ports, port_index; + int slot_id; + int sret; + u32 next_state; + u32 temp, portsc; + struct xhci_hub *rhub; + struct xhci_port **ports; + + rhub = xhci_get_rhub(hcd); + ports = rhub->ports; + max_ports = rhub->num_ports; + bus_state = &rhub->bus_state; + + if (time_before(jiffies, bus_state->next_statechange)) + msleep(5); + + spin_lock_irqsave(&xhci->lock, flags); + if (!HCD_HW_ACCESSIBLE(hcd)) { + spin_unlock_irqrestore(&xhci->lock, flags); + return -ESHUTDOWN; + } + + /* delay the irqs */ + temp = readl(&xhci->op_regs->command); + temp &= ~CMD_EIE; + writel(temp, &xhci->op_regs->command); + + /* bus specific resume for ports we suspended at bus_suspend */ + if (hcd->speed >= HCD_USB3) + next_state = XDEV_U0; + else + next_state = XDEV_RESUME; + + port_index = max_ports; + while (port_index--) { + portsc = readl(ports[port_index]->addr); + + /* warm reset CAS limited ports stuck in polling/compliance */ + if ((xhci->quirks & XHCI_MISSING_CAS) && + (hcd->speed >= HCD_USB3) && + xhci_port_missing_cas_quirk(ports[port_index])) { + xhci_dbg(xhci, "reset stuck port %d-%d\n", + hcd->self.busnum, port_index + 1); + clear_bit(port_index, &bus_state->bus_suspended); + continue; + } + /* resume if we suspended the link, and it is still suspended */ + if (test_bit(port_index, &bus_state->bus_suspended)) + switch (portsc & PORT_PLS_MASK) { + case XDEV_U3: + portsc = xhci_port_state_to_neutral(portsc); + portsc &= ~PORT_PLS_MASK; + portsc |= PORT_LINK_STROBE | next_state; + break; + case XDEV_RESUME: + /* resume already initiated */ + break; + default: + /* not in a resumeable state, ignore it */ + clear_bit(port_index, + &bus_state->bus_suspended); + break; + } + /* disable wake for all ports, write new link state if needed */ + portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS); + writel(portsc, ports[port_index]->addr); + } + + /* USB2 specific resume signaling delay and U0 link state transition */ + if (hcd->speed < HCD_USB3) { + if (bus_state->bus_suspended) { + spin_unlock_irqrestore(&xhci->lock, flags); + msleep(USB_RESUME_TIMEOUT); + spin_lock_irqsave(&xhci->lock, flags); + } + for_each_set_bit(port_index, &bus_state->bus_suspended, + BITS_PER_LONG) { + /* Clear PLC to poll it later for U0 transition */ + xhci_test_and_clear_bit(xhci, ports[port_index], + PORT_PLC); + xhci_set_link_state(xhci, ports[port_index], XDEV_U0); + } + } + + /* poll for U0 link state complete, both USB2 and USB3 */ + for_each_set_bit(port_index, &bus_state->bus_suspended, BITS_PER_LONG) { + sret = xhci_handshake(ports[port_index]->addr, PORT_PLC, + PORT_PLC, 10 * 1000); + if (sret) { + xhci_warn(xhci, "port %d-%d resume PLC timeout\n", + hcd->self.busnum, port_index + 1); + continue; + } + xhci_test_and_clear_bit(xhci, ports[port_index], PORT_PLC); + slot_id = xhci_find_slot_id_by_port(hcd, xhci, port_index + 1); + if (slot_id) + xhci_ring_device(xhci, slot_id); + } + (void) readl(&xhci->op_regs->command); + + bus_state->next_statechange = jiffies + msecs_to_jiffies(5); + /* re-enable irqs */ + temp = readl(&xhci->op_regs->command); + temp |= CMD_EIE; + writel(temp, &xhci->op_regs->command); + temp = readl(&xhci->op_regs->command); + + spin_unlock_irqrestore(&xhci->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(xhci_bus_resume); + +unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd) +{ + struct xhci_hub *rhub = xhci_get_rhub(hcd); + + /* USB3 port wakeups are reported via usb_wakeup_notification() */ + return rhub->bus_state.resuming_ports; /* USB2 ports only */ +} + +#endif /* CONFIG_PM */ diff --git a/usb/host/xhci-mem.c b/usb/host/xhci-mem.c new file mode 100644 index 0000000..4324fd3 --- /dev/null +++ b/usb/host/xhci-mem.c @@ -0,0 +1,2701 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * xHCI host controller driver + * + * Copyright (C) 2008 Intel Corp. + * + * Author: Sarah Sharp + * Some code borrowed from the Linux EHCI driver. + */ + +#include +#include +#include +#include +#include + +#include "xhci.h" +#include "xhci-trace.h" +#include "xhci-debugfs.h" + +/* + * Allocates a generic ring segment from the ring pool, sets the dma address, + * initializes the segment to zero, and sets the private next pointer to NULL. + * + * Section 4.11.1.1: + * "All components of all Command and Transfer TRBs shall be initialized to '0'" + */ +static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, + unsigned int cycle_state, + unsigned int max_packet, + gfp_t flags) +{ + struct xhci_segment *seg; + dma_addr_t dma; + int i; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + + seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev)); + if (!seg) + return NULL; + + seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma); + if (!seg->trbs) { + kfree(seg); + return NULL; + } + + if (max_packet) { + seg->bounce_buf = kzalloc_node(max_packet, flags, + dev_to_node(dev)); + if (!seg->bounce_buf) { + dma_pool_free(xhci->segment_pool, seg->trbs, dma); + kfree(seg); + return NULL; + } + } + /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ + if (cycle_state == 0) { + for (i = 0; i < TRBS_PER_SEGMENT; i++) + seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); + } + seg->dma = dma; + seg->next = NULL; + + return seg; +} + +void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) +{ + if (seg->trbs) { + dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); + seg->trbs = NULL; + } + kfree(seg->bounce_buf); + kfree(seg); +} +EXPORT_SYMBOL_GPL(xhci_segment_free); + +void xhci_free_segments_for_ring(struct xhci_hcd *xhci, + struct xhci_segment *first) +{ + struct xhci_segment *seg; + + seg = first->next; + while (seg != first) { + struct xhci_segment *next = seg->next; + xhci_segment_free(xhci, seg); + seg = next; + } + xhci_segment_free(xhci, first); +} + +/* + * Make the prev segment point to the next segment. + * + * Change the last TRB in the prev segment to be a Link TRB which points to the + * DMA address of the next segment. The caller needs to set any Link TRB + * related flags, such as End TRB, Toggle Cycle, and no snoop. + */ +void xhci_link_segments(struct xhci_segment *prev, + struct xhci_segment *next, + enum xhci_ring_type type, bool chain_links) +{ + u32 val; + + if (!prev || !next) + return; + prev->next = next; + if (type != TYPE_EVENT) { + prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = + cpu_to_le64(next->dma); + + /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ + val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); + val &= ~TRB_TYPE_BITMASK; + val |= TRB_TYPE(TRB_LINK); + if (chain_links) + val |= TRB_CHAIN; + prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); + } +} +EXPORT_SYMBOL_GPL(xhci_link_segments); + +/* + * Link the ring to the new segments. + * Set Toggle Cycle for the new ring if needed. + */ +static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, + struct xhci_segment *first, struct xhci_segment *last, + unsigned int num_segs) +{ + struct xhci_segment *next; + bool chain_links; + + if (!ring || !first || !last) + return; + + /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */ + chain_links = !!(xhci_link_trb_quirk(xhci) || + (ring->type == TYPE_ISOC && + (xhci->quirks & XHCI_AMD_0x96_HOST))); + + next = ring->enq_seg->next; + xhci_link_segments(ring->enq_seg, first, ring->type, chain_links); + xhci_link_segments(last, next, ring->type, chain_links); + ring->num_segs += num_segs; + ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; + + if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) { + ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control + &= ~cpu_to_le32(LINK_TOGGLE); + last->trbs[TRBS_PER_SEGMENT-1].link.control + |= cpu_to_le32(LINK_TOGGLE); + ring->last_seg = last; + } +} + +/* + * We need a radix tree for mapping physical addresses of TRBs to which stream + * ID they belong to. We need to do this because the host controller won't tell + * us which stream ring the TRB came from. We could store the stream ID in an + * event data TRB, but that doesn't help us for the cancellation case, since the + * endpoint may stop before it reaches that event data TRB. + * + * The radix tree maps the upper portion of the TRB DMA address to a ring + * segment that has the same upper portion of DMA addresses. For example, say I + * have segments of size 1KB, that are always 1KB aligned. A segment may + * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the + * key to the stream ID is 0x43244. I can use the DMA address of the TRB to + * pass the radix tree a key to get the right stream ID: + * + * 0x10c90fff >> 10 = 0x43243 + * 0x10c912c0 >> 10 = 0x43244 + * 0x10c91400 >> 10 = 0x43245 + * + * Obviously, only those TRBs with DMA addresses that are within the segment + * will make the radix tree return the stream ID for that ring. + * + * Caveats for the radix tree: + * + * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an + * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be + * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the + * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit + * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit + * extended systems (where the DMA address can be bigger than 32-bits), + * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. + */ +static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map, + struct xhci_ring *ring, + struct xhci_segment *seg, + gfp_t mem_flags) +{ + unsigned long key; + int ret; + + key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); + /* Skip any segments that were already added. */ + if (radix_tree_lookup(trb_address_map, key)) + return 0; + + ret = radix_tree_maybe_preload(mem_flags); + if (ret) + return ret; + ret = radix_tree_insert(trb_address_map, + key, ring); + radix_tree_preload_end(); + return ret; +} + +static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map, + struct xhci_segment *seg) +{ + unsigned long key; + + key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); + if (radix_tree_lookup(trb_address_map, key)) + radix_tree_delete(trb_address_map, key); +} + +static int xhci_update_stream_segment_mapping( + struct radix_tree_root *trb_address_map, + struct xhci_ring *ring, + struct xhci_segment *first_seg, + struct xhci_segment *last_seg, + gfp_t mem_flags) +{ + struct xhci_segment *seg; + struct xhci_segment *failed_seg; + int ret; + + if (WARN_ON_ONCE(trb_address_map == NULL)) + return 0; + + seg = first_seg; + do { + ret = xhci_insert_segment_mapping(trb_address_map, + ring, seg, mem_flags); + if (ret) + goto remove_streams; + if (seg == last_seg) + return 0; + seg = seg->next; + } while (seg != first_seg); + + return 0; + +remove_streams: + failed_seg = seg; + seg = first_seg; + do { + xhci_remove_segment_mapping(trb_address_map, seg); + if (seg == failed_seg) + return ret; + seg = seg->next; + } while (seg != first_seg); + + return ret; +} + +static void xhci_remove_stream_mapping(struct xhci_ring *ring) +{ + struct xhci_segment *seg; + + if (WARN_ON_ONCE(ring->trb_address_map == NULL)) + return; + + seg = ring->first_seg; + do { + xhci_remove_segment_mapping(ring->trb_address_map, seg); + seg = seg->next; + } while (seg != ring->first_seg); +} + +static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags) +{ + return xhci_update_stream_segment_mapping(ring->trb_address_map, ring, + ring->first_seg, ring->last_seg, mem_flags); +} + +/* XXX: Do we need the hcd structure in all these functions? */ +void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) +{ + if (!ring) + return; + + trace_xhci_ring_free(ring); + + if (ring->first_seg) { + if (ring->type == TYPE_STREAM) + xhci_remove_stream_mapping(ring); + xhci_free_segments_for_ring(xhci, ring->first_seg); + } + + kfree(ring); +} +EXPORT_SYMBOL_GPL(xhci_ring_free); + +void xhci_initialize_ring_info(struct xhci_ring *ring, + unsigned int cycle_state) +{ + /* The ring is empty, so the enqueue pointer == dequeue pointer */ + ring->enqueue = ring->first_seg->trbs; + ring->enq_seg = ring->first_seg; + ring->dequeue = ring->enqueue; + ring->deq_seg = ring->first_seg; + /* The ring is initialized to 0. The producer must write 1 to the cycle + * bit to handover ownership of the TRB, so PCS = 1. The consumer must + * compare CCS to the cycle bit to check ownership, so CCS = 1. + * + * New rings are initialized with cycle state equal to 1; if we are + * handling ring expansion, set the cycle state equal to the old ring. + */ + ring->cycle_state = cycle_state; + + /* + * Each segment has a link TRB, and leave an extra TRB for SW + * accounting purpose + */ + ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; +} +EXPORT_SYMBOL_GPL(xhci_initialize_ring_info); + +/* Allocate segments and link them for a ring */ +static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, + struct xhci_segment **first, struct xhci_segment **last, + unsigned int num_segs, unsigned int cycle_state, + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) +{ + struct xhci_segment *prev; + bool chain_links; + + /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */ + chain_links = !!(xhci_link_trb_quirk(xhci) || + (type == TYPE_ISOC && + (xhci->quirks & XHCI_AMD_0x96_HOST))); + + prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); + if (!prev) + return -ENOMEM; + num_segs--; + + *first = prev; + while (num_segs > 0) { + struct xhci_segment *next; + + next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); + if (!next) { + prev = *first; + while (prev) { + next = prev->next; + xhci_segment_free(xhci, prev); + prev = next; + } + return -ENOMEM; + } + xhci_link_segments(prev, next, type, chain_links); + + prev = next; + num_segs--; + } + xhci_link_segments(prev, *first, type, chain_links); + *last = prev; + + return 0; +} + +static void xhci_vendor_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) +{ + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if (ops && ops->free_container_ctx) + ops->free_container_ctx(xhci, ctx); +} + +static void xhci_vendor_alloc_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, + int type, gfp_t flags) +{ + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if (ops && ops->alloc_container_ctx) + ops->alloc_container_ctx(xhci, ctx, type, flags); +} + +static struct xhci_ring *xhci_vendor_alloc_transfer_ring(struct xhci_hcd *xhci, + u32 endpoint_type, enum xhci_ring_type ring_type, + unsigned int max_packet, gfp_t mem_flags) +{ + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if (ops && ops->alloc_transfer_ring) + return ops->alloc_transfer_ring(xhci, endpoint_type, ring_type, + max_packet, mem_flags); + return 0; +} + +void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, unsigned int ep_index) +{ + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if (ops && ops->free_transfer_ring) + ops->free_transfer_ring(xhci, virt_dev, ep_index); +} + +bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, unsigned int ep_index) +{ + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if (ops && ops->is_usb_offload_enabled) + return ops->is_usb_offload_enabled(xhci, virt_dev, ep_index); + return false; +} + +/* + * Create a new ring with zero or more segments. + * + * Link each segment together into a ring. + * Set the end flag and the cycle toggle bit on the last segment. + * See section 4.9.1 and figures 15 and 16. + */ +struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, + unsigned int num_segs, unsigned int cycle_state, + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) +{ + struct xhci_ring *ring; + int ret; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + + ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev)); + if (!ring) + return NULL; + + ring->num_segs = num_segs; + ring->bounce_buf_len = max_packet; + INIT_LIST_HEAD(&ring->td_list); + ring->type = type; + if (num_segs == 0) + return ring; + + ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, + &ring->last_seg, num_segs, cycle_state, type, + max_packet, flags); + if (ret) + goto fail; + + /* Only event ring does not use link TRB */ + if (type != TYPE_EVENT) { + /* See section 4.9.2.1 and 6.4.4.1 */ + ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= + cpu_to_le32(LINK_TOGGLE); + } + xhci_initialize_ring_info(ring, cycle_state); + trace_xhci_ring_alloc(ring); + return ring; + +fail: + kfree(ring); + return NULL; +} +EXPORT_SYMBOL_GPL(xhci_ring_alloc); + +void xhci_free_endpoint_ring(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + unsigned int ep_index) +{ + if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index)) + xhci_vendor_free_transfer_ring(xhci, virt_dev, ep_index); + else + xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); + + virt_dev->eps[ep_index].ring = NULL; +} + +/* + * Expand an existing ring. + * Allocate a new ring which has same segment numbers and link the two rings. + */ +int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, + unsigned int num_trbs, gfp_t flags) +{ + struct xhci_segment *first; + struct xhci_segment *last; + unsigned int num_segs; + unsigned int num_segs_needed; + int ret; + + num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) / + (TRBS_PER_SEGMENT - 1); + + /* Allocate number of segments we needed, or double the ring size */ + num_segs = ring->num_segs > num_segs_needed ? + ring->num_segs : num_segs_needed; + + ret = xhci_alloc_segments_for_ring(xhci, &first, &last, + num_segs, ring->cycle_state, ring->type, + ring->bounce_buf_len, flags); + if (ret) + return -ENOMEM; + + if (ring->type == TYPE_STREAM) + ret = xhci_update_stream_segment_mapping(ring->trb_address_map, + ring, first, last, flags); + if (ret) { + struct xhci_segment *next; + do { + next = first->next; + xhci_segment_free(xhci, first); + if (first == last) + break; + first = next; + } while (true); + return ret; + } + + xhci_link_rings(xhci, ring, first, last, num_segs); + trace_xhci_ring_expansion(ring); + xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, + "ring expansion succeed, now has %d segments", + ring->num_segs); + + return 0; +} + +struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, + int type, gfp_t flags) +{ + struct xhci_container_ctx *ctx; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)) + return NULL; + + ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev)); + if (!ctx) + return NULL; + + ctx->type = type; + ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; + if (type == XHCI_CTX_TYPE_INPUT) + ctx->size += CTX_SIZE(xhci->hcc_params); + + if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) && + (ops && ops->alloc_container_ctx)) + xhci_vendor_alloc_container_ctx(xhci, ctx, type, flags); + else + ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma); + + if (!ctx->bytes) { + kfree(ctx); + return NULL; + } + return ctx; +} + +void xhci_free_container_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx) +{ + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if (!ctx) + return; + if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) && + (ops && ops->free_container_ctx)) + xhci_vendor_free_container_ctx(xhci, ctx); + else + dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); + + kfree(ctx); +} + +struct xhci_input_control_ctx *xhci_get_input_control_ctx( + struct xhci_container_ctx *ctx) +{ + if (ctx->type != XHCI_CTX_TYPE_INPUT) + return NULL; + + return (struct xhci_input_control_ctx *)ctx->bytes; +} + +struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx) +{ + if (ctx->type == XHCI_CTX_TYPE_DEVICE) + return (struct xhci_slot_ctx *)ctx->bytes; + + return (struct xhci_slot_ctx *) + (ctx->bytes + CTX_SIZE(xhci->hcc_params)); +} +EXPORT_SYMBOL_GPL(xhci_get_slot_ctx); + +struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx, + unsigned int ep_index) +{ + /* increment ep index by offset of start of ep ctx array */ + ep_index++; + if (ctx->type == XHCI_CTX_TYPE_INPUT) + ep_index++; + + return (struct xhci_ep_ctx *) + (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); +} +EXPORT_SYMBOL_GPL(xhci_get_ep_ctx); + + +/***************** Streams structures manipulation *************************/ + +static void xhci_free_stream_ctx(struct xhci_hcd *xhci, + unsigned int num_stream_ctxs, + struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) +{ + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs; + + if (size > MEDIUM_STREAM_ARRAY_SIZE) + dma_free_coherent(dev, size, + stream_ctx, dma); + else if (size <= SMALL_STREAM_ARRAY_SIZE) + return dma_pool_free(xhci->small_streams_pool, + stream_ctx, dma); + else + return dma_pool_free(xhci->medium_streams_pool, + stream_ctx, dma); +} + +/* + * The stream context array for each endpoint with bulk streams enabled can + * vary in size, based on: + * - how many streams the endpoint supports, + * - the maximum primary stream array size the host controller supports, + * - and how many streams the device driver asks for. + * + * The stream context array must be a power of 2, and can be as small as + * 64 bytes or as large as 1MB. + */ +static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, + unsigned int num_stream_ctxs, dma_addr_t *dma, + gfp_t mem_flags) +{ + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs; + + if (size > MEDIUM_STREAM_ARRAY_SIZE) + return dma_alloc_coherent(dev, size, + dma, mem_flags); + else if (size <= SMALL_STREAM_ARRAY_SIZE) + return dma_pool_alloc(xhci->small_streams_pool, + mem_flags, dma); + else + return dma_pool_alloc(xhci->medium_streams_pool, + mem_flags, dma); +} + +struct xhci_ring *xhci_dma_to_transfer_ring( + struct xhci_virt_ep *ep, + u64 address) +{ + if (ep->ep_state & EP_HAS_STREAMS) + return radix_tree_lookup(&ep->stream_info->trb_address_map, + address >> TRB_SEGMENT_SHIFT); + return ep->ring; +} + +/* + * Change an endpoint's internal structure so it supports stream IDs. The + * number of requested streams includes stream 0, which cannot be used by device + * drivers. + * + * The number of stream contexts in the stream context array may be bigger than + * the number of streams the driver wants to use. This is because the number of + * stream context array entries must be a power of two. + */ +struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, + unsigned int num_stream_ctxs, + unsigned int num_streams, + unsigned int max_packet, gfp_t mem_flags) +{ + struct xhci_stream_info *stream_info; + u32 cur_stream; + struct xhci_ring *cur_ring; + u64 addr; + int ret; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + + xhci_dbg(xhci, "Allocating %u streams and %u " + "stream context array entries.\n", + num_streams, num_stream_ctxs); + if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) { + xhci_dbg(xhci, "Command ring has no reserved TRBs available\n"); + return NULL; + } + xhci->cmd_ring_reserved_trbs++; + + stream_info = kzalloc_node(sizeof(*stream_info), mem_flags, + dev_to_node(dev)); + if (!stream_info) + goto cleanup_trbs; + + stream_info->num_streams = num_streams; + stream_info->num_stream_ctxs = num_stream_ctxs; + + /* Initialize the array of virtual pointers to stream rings. */ + stream_info->stream_rings = kcalloc_node( + num_streams, sizeof(struct xhci_ring *), mem_flags, + dev_to_node(dev)); + if (!stream_info->stream_rings) + goto cleanup_info; + + /* Initialize the array of DMA addresses for stream rings for the HW. */ + stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci, + num_stream_ctxs, &stream_info->ctx_array_dma, + mem_flags); + if (!stream_info->stream_ctx_array) + goto cleanup_ctx; + memset(stream_info->stream_ctx_array, 0, + sizeof(struct xhci_stream_ctx)*num_stream_ctxs); + + /* Allocate everything needed to free the stream rings later */ + stream_info->free_streams_command = + xhci_alloc_command_with_ctx(xhci, true, mem_flags); + if (!stream_info->free_streams_command) + goto cleanup_ctx; + + INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); + + /* Allocate rings for all the streams that the driver will use, + * and add their segment DMA addresses to the radix tree. + * Stream 0 is reserved. + */ + + for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { + stream_info->stream_rings[cur_stream] = + xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet, + mem_flags); + cur_ring = stream_info->stream_rings[cur_stream]; + if (!cur_ring) + goto cleanup_rings; + cur_ring->stream_id = cur_stream; + cur_ring->trb_address_map = &stream_info->trb_address_map; + /* Set deq ptr, cycle bit, and stream context type */ + addr = cur_ring->first_seg->dma | + SCT_FOR_CTX(SCT_PRI_TR) | + cur_ring->cycle_state; + stream_info->stream_ctx_array[cur_stream].stream_ring = + cpu_to_le64(addr); + xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", + cur_stream, (unsigned long long) addr); + + ret = xhci_update_stream_mapping(cur_ring, mem_flags); + if (ret) { + xhci_ring_free(xhci, cur_ring); + stream_info->stream_rings[cur_stream] = NULL; + goto cleanup_rings; + } + } + /* Leave the other unused stream ring pointers in the stream context + * array initialized to zero. This will cause the xHC to give us an + * error if the device asks for a stream ID we don't have setup (if it + * was any other way, the host controller would assume the ring is + * "empty" and wait forever for data to be queued to that stream ID). + */ + + return stream_info; + +cleanup_rings: + for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { + cur_ring = stream_info->stream_rings[cur_stream]; + if (cur_ring) { + xhci_ring_free(xhci, cur_ring); + stream_info->stream_rings[cur_stream] = NULL; + } + } + xhci_free_command(xhci, stream_info->free_streams_command); +cleanup_ctx: + kfree(stream_info->stream_rings); +cleanup_info: + kfree(stream_info); +cleanup_trbs: + xhci->cmd_ring_reserved_trbs--; + return NULL; +} +/* + * Sets the MaxPStreams field and the Linear Stream Array field. + * Sets the dequeue pointer to the stream context array. + */ +void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, + struct xhci_ep_ctx *ep_ctx, + struct xhci_stream_info *stream_info) +{ + u32 max_primary_streams; + /* MaxPStreams is the number of stream context array entries, not the + * number we're actually using. Must be in 2^(MaxPstreams + 1) format. + * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. + */ + max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Setting number of stream ctx array entries to %u", + 1 << (max_primary_streams + 1)); + ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); + ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) + | EP_HAS_LSA); + ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); +} + +/* + * Sets the MaxPStreams field and the Linear Stream Array field to 0. + * Reinstalls the "normal" endpoint ring (at its previous dequeue mark, + * not at the beginning of the ring). + */ +void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx, + struct xhci_virt_ep *ep) +{ + dma_addr_t addr; + ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); + addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); + ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); +} + +/* Frees all stream contexts associated with the endpoint, + * + * Caller should fix the endpoint context streams fields. + */ +void xhci_free_stream_info(struct xhci_hcd *xhci, + struct xhci_stream_info *stream_info) +{ + int cur_stream; + struct xhci_ring *cur_ring; + + if (!stream_info) + return; + + for (cur_stream = 1; cur_stream < stream_info->num_streams; + cur_stream++) { + cur_ring = stream_info->stream_rings[cur_stream]; + if (cur_ring) { + xhci_ring_free(xhci, cur_ring); + stream_info->stream_rings[cur_stream] = NULL; + } + } + xhci_free_command(xhci, stream_info->free_streams_command); + xhci->cmd_ring_reserved_trbs--; + if (stream_info->stream_ctx_array) + xhci_free_stream_ctx(xhci, + stream_info->num_stream_ctxs, + stream_info->stream_ctx_array, + stream_info->ctx_array_dma); + + kfree(stream_info->stream_rings); + kfree(stream_info); +} + + +/***************** Device context manipulation *************************/ + +static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, + struct xhci_virt_ep *ep) +{ + timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog, + 0); + ep->xhci = xhci; +} + +static void xhci_free_tt_info(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + int slot_id) +{ + struct list_head *tt_list_head; + struct xhci_tt_bw_info *tt_info, *next; + bool slot_found = false; + + /* If the device never made it past the Set Address stage, + * it may not have the real_port set correctly. + */ + if (virt_dev->real_port == 0 || + virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { + xhci_dbg(xhci, "Bad real port.\n"); + return; + } + + tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); + list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { + /* Multi-TT hubs will have more than one entry */ + if (tt_info->slot_id == slot_id) { + slot_found = true; + list_del(&tt_info->tt_list); + kfree(tt_info); + } else if (slot_found) { + break; + } + } +} + +int xhci_alloc_tt_info(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + struct usb_device *hdev, + struct usb_tt *tt, gfp_t mem_flags) +{ + struct xhci_tt_bw_info *tt_info; + unsigned int num_ports; + int i, j; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + + if (!tt->multi) + num_ports = 1; + else + num_ports = hdev->maxchild; + + for (i = 0; i < num_ports; i++, tt_info++) { + struct xhci_interval_bw_table *bw_table; + + tt_info = kzalloc_node(sizeof(*tt_info), mem_flags, + dev_to_node(dev)); + if (!tt_info) + goto free_tts; + INIT_LIST_HEAD(&tt_info->tt_list); + list_add(&tt_info->tt_list, + &xhci->rh_bw[virt_dev->real_port - 1].tts); + tt_info->slot_id = virt_dev->udev->slot_id; + if (tt->multi) + tt_info->ttport = i+1; + bw_table = &tt_info->bw_table; + for (j = 0; j < XHCI_MAX_INTERVAL; j++) + INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); + } + return 0; + +free_tts: + xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id); + return -ENOMEM; +} + + +/* All the xhci_tds in the ring's TD list should be freed at this point. + * Should be called with xhci->lock held if there is any chance the TT lists + * will be manipulated by the configure endpoint, allocate device, or update + * hub functions while this function is removing the TT entries from the list. + */ +void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) +{ + struct xhci_virt_device *dev; + int i; + int old_active_eps = 0; + + /* Slot ID 0 is reserved */ + if (slot_id == 0 || !xhci->devs[slot_id]) + return; + + dev = xhci->devs[slot_id]; + + xhci->dcbaa->dev_context_ptrs[slot_id] = 0; + if (!dev) + return; + + trace_xhci_free_virt_device(dev); + + if (dev->tt_info) + old_active_eps = dev->tt_info->active_eps; + + for (i = 0; i < 31; i++) { + if (dev->eps[i].ring) + xhci_free_endpoint_ring(xhci, dev, i); + if (dev->eps[i].stream_info) + xhci_free_stream_info(xhci, + dev->eps[i].stream_info); + /* Endpoints on the TT/root port lists should have been removed + * when usb_disable_device() was called for the device. + * We can't drop them anyway, because the udev might have gone + * away by this point, and we can't tell what speed it was. + */ + if (!list_empty(&dev->eps[i].bw_endpoint_list)) + xhci_warn(xhci, "Slot %u endpoint %u " + "not removed from BW list!\n", + slot_id, i); + } + /* If this is a hub, free the TT(s) from the TT list */ + xhci_free_tt_info(xhci, dev, slot_id); + /* If necessary, update the number of active TTs on this root port */ + xhci_update_tt_active_eps(xhci, dev, old_active_eps); + + if (dev->in_ctx) + xhci_free_container_ctx(xhci, dev->in_ctx); + if (dev->out_ctx) + xhci_free_container_ctx(xhci, dev->out_ctx); + + if (dev->udev && dev->udev->slot_id) + dev->udev->slot_id = 0; + kfree(xhci->devs[slot_id]); + xhci->devs[slot_id] = NULL; +} + +/* + * Free a virt_device structure. + * If the virt_device added a tt_info (a hub) and has children pointing to + * that tt_info, then free the child first. Recursive. + * We can't rely on udev at this point to find child-parent relationships. + */ +static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) +{ + struct xhci_virt_device *vdev; + struct list_head *tt_list_head; + struct xhci_tt_bw_info *tt_info, *next; + int i; + + vdev = xhci->devs[slot_id]; + if (!vdev) + return; + + if (vdev->real_port == 0 || + vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { + xhci_dbg(xhci, "Bad vdev->real_port.\n"); + goto out; + } + + tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); + list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { + /* is this a hub device that added a tt_info to the tts list */ + if (tt_info->slot_id == slot_id) { + /* are any devices using this tt_info? */ + for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) { + vdev = xhci->devs[i]; + if (vdev && (vdev->tt_info == tt_info)) + xhci_free_virt_devices_depth_first( + xhci, i); + } + } + } +out: + /* we are now at a leaf device */ + xhci_debugfs_remove_slot(xhci, slot_id); + xhci_free_virt_device(xhci, slot_id); +} + +int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, + struct usb_device *udev, gfp_t flags) +{ + struct xhci_virt_device *dev; + int i; + + /* Slot ID 0 is reserved */ + if (slot_id == 0 || xhci->devs[slot_id]) { + xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); + return 0; + } + + dev = kzalloc(sizeof(*dev), flags); + if (!dev) + return 0; + + dev->slot_id = slot_id; + + /* Allocate the (output) device context that will be used in the HC. */ + dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); + if (!dev->out_ctx) + goto fail; + + xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, + (unsigned long long)dev->out_ctx->dma); + + /* Allocate the (input) device context for address device command */ + dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); + if (!dev->in_ctx) + goto fail; + + xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, + (unsigned long long)dev->in_ctx->dma); + + /* Initialize the cancellation list and watchdog timers for each ep */ + for (i = 0; i < 31; i++) { + dev->eps[i].ep_index = i; + dev->eps[i].vdev = dev; + xhci_init_endpoint_timer(xhci, &dev->eps[i]); + INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); + INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); + } + + /* Allocate endpoint 0 ring */ + dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags); + if (!dev->eps[0].ring) + goto fail; + + dev->udev = udev; + + /* Point to output device context in dcbaa. */ + xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); + xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", + slot_id, + &xhci->dcbaa->dev_context_ptrs[slot_id], + le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); + + trace_xhci_alloc_virt_device(dev); + + xhci->devs[slot_id] = dev; + + return 1; +fail: + + if (dev->in_ctx) + xhci_free_container_ctx(xhci, dev->in_ctx); + if (dev->out_ctx) + xhci_free_container_ctx(xhci, dev->out_ctx); + kfree(dev); + + return 0; +} + +void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, + struct usb_device *udev) +{ + struct xhci_virt_device *virt_dev; + struct xhci_ep_ctx *ep0_ctx; + struct xhci_ring *ep_ring; + + virt_dev = xhci->devs[udev->slot_id]; + ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0); + ep_ring = virt_dev->eps[0].ring; + /* + * FIXME we don't keep track of the dequeue pointer very well after a + * Set TR dequeue pointer, so we're setting the dequeue pointer of the + * host to our enqueue pointer. This should only be called after a + * configured device has reset, so all control transfers should have + * been completed or cancelled before the reset. + */ + ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, + ep_ring->enqueue) + | ep_ring->cycle_state); +} + +/* + * The xHCI roothub may have ports of differing speeds in any order in the port + * status registers. + * + * The xHCI hardware wants to know the roothub port number that the USB device + * is attached to (or the roothub port its ancestor hub is attached to). All we + * know is the index of that port under either the USB 2.0 or the USB 3.0 + * roothub, but that doesn't give us the real index into the HW port status + * registers. Call xhci_find_raw_port_number() to get real index. + */ +static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, + struct usb_device *udev) +{ + struct usb_device *top_dev; + struct usb_hcd *hcd; + + if (udev->speed >= USB_SPEED_SUPER) + hcd = xhci->shared_hcd; + else + hcd = xhci->main_hcd; + + for (top_dev = udev; top_dev->parent && top_dev->parent->parent; + top_dev = top_dev->parent) + /* Found device below root hub */; + + return xhci_find_raw_port_number(hcd, top_dev->portnum); +} + +/* Setup an xHCI virtual device for a Set Address command */ +int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) +{ + struct xhci_virt_device *dev; + struct xhci_ep_ctx *ep0_ctx; + struct xhci_slot_ctx *slot_ctx; + u32 port_num; + u32 max_packets; + struct usb_device *top_dev; + + dev = xhci->devs[udev->slot_id]; + /* Slot ID 0 is reserved */ + if (udev->slot_id == 0 || !dev) { + xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", + udev->slot_id); + return -EINVAL; + } + ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); + slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); + + /* 3) Only the control endpoint is valid - one endpoint context */ + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); + switch (udev->speed) { + case USB_SPEED_SUPER_PLUS: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP); + max_packets = MAX_PACKET(512); + break; + case USB_SPEED_SUPER: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); + max_packets = MAX_PACKET(512); + break; + case USB_SPEED_HIGH: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); + max_packets = MAX_PACKET(64); + break; + /* USB core guesses at a 64-byte max packet first for FS devices */ + case USB_SPEED_FULL: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); + max_packets = MAX_PACKET(64); + break; + case USB_SPEED_LOW: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); + max_packets = MAX_PACKET(8); + break; + case USB_SPEED_WIRELESS: + xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); + return -EINVAL; + break; + default: + /* Speed was set earlier, this shouldn't happen. */ + return -EINVAL; + } + /* Find the root hub port this device is under */ + port_num = xhci_find_real_port_number(xhci, udev); + if (!port_num) + return -EINVAL; + slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num)); + /* Set the port number in the virtual_device to the faked port number */ + for (top_dev = udev; top_dev->parent && top_dev->parent->parent; + top_dev = top_dev->parent) + /* Found device below root hub */; + dev->fake_port = top_dev->portnum; + dev->real_port = port_num; + xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num); + xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port); + + /* Find the right bandwidth table that this device will be a part of. + * If this is a full speed device attached directly to a root port (or a + * decendent of one), it counts as a primary bandwidth domain, not a + * secondary bandwidth domain under a TT. An xhci_tt_info structure + * will never be created for the HS root hub. + */ + if (!udev->tt || !udev->tt->hub->parent) { + dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table; + } else { + struct xhci_root_port_bw_info *rh_bw; + struct xhci_tt_bw_info *tt_bw; + + rh_bw = &xhci->rh_bw[port_num - 1]; + /* Find the right TT. */ + list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) { + if (tt_bw->slot_id != udev->tt->hub->slot_id) + continue; + + if (!dev->udev->tt->multi || + (udev->tt->multi && + tt_bw->ttport == dev->udev->ttport)) { + dev->bw_table = &tt_bw->bw_table; + dev->tt_info = tt_bw; + break; + } + } + if (!dev->tt_info) + xhci_warn(xhci, "WARN: Didn't find a matching TT\n"); + } + + /* Is this a LS/FS device under an external HS hub? */ + if (udev->tt && udev->tt->hub->parent) { + slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id | + (udev->ttport << 8)); + if (udev->tt->multi) + slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); + } + xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); + xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); + + /* Step 4 - ring already allocated */ + /* Step 5 */ + ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); + + /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ + ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) | + max_packets); + + ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | + dev->eps[0].ring->cycle_state); + + trace_xhci_setup_addressable_virt_device(dev); + + /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ + + return 0; +} + +/* + * Convert interval expressed as 2^(bInterval - 1) == interval into + * straight exponent value 2^n == interval. + * + */ +static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + unsigned int interval; + + interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; + if (interval != ep->desc.bInterval - 1) + dev_warn(&udev->dev, + "ep %#x - rounding interval to %d %sframes\n", + ep->desc.bEndpointAddress, + 1 << interval, + udev->speed == USB_SPEED_FULL ? "" : "micro"); + + if (udev->speed == USB_SPEED_FULL) { + /* + * Full speed isoc endpoints specify interval in frames, + * not microframes. We are using microframes everywhere, + * so adjust accordingly. + */ + interval += 3; /* 1 frame = 2^3 uframes */ + } + + return interval; +} + +/* + * Convert bInterval expressed in microframes (in 1-255 range) to exponent of + * microframes, rounded down to nearest power of 2. + */ +static unsigned int xhci_microframes_to_exponent(struct usb_device *udev, + struct usb_host_endpoint *ep, unsigned int desc_interval, + unsigned int min_exponent, unsigned int max_exponent) +{ + unsigned int interval; + + interval = fls(desc_interval) - 1; + interval = clamp_val(interval, min_exponent, max_exponent); + if ((1 << interval) != desc_interval) + dev_dbg(&udev->dev, + "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", + ep->desc.bEndpointAddress, + 1 << interval, + desc_interval); + + return interval; +} + +static unsigned int xhci_parse_microframe_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + if (ep->desc.bInterval == 0) + return 0; + return xhci_microframes_to_exponent(udev, ep, + ep->desc.bInterval, 0, 15); +} + + +static unsigned int xhci_parse_frame_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + return xhci_microframes_to_exponent(udev, ep, + ep->desc.bInterval * 8, 3, 10); +} + +/* Return the polling or NAK interval. + * + * The polling interval is expressed in "microframes". If xHCI's Interval field + * is set to N, it will service the endpoint every 2^(Interval)*125us. + * + * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval + * is set to 0. + */ +static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + unsigned int interval = 0; + + switch (udev->speed) { + case USB_SPEED_HIGH: + /* Max NAK rate */ + if (usb_endpoint_xfer_control(&ep->desc) || + usb_endpoint_xfer_bulk(&ep->desc)) { + interval = xhci_parse_microframe_interval(udev, ep); + break; + } + fallthrough; /* SS and HS isoc/int have same decoding */ + + case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + if (usb_endpoint_xfer_int(&ep->desc) || + usb_endpoint_xfer_isoc(&ep->desc)) { + interval = xhci_parse_exponent_interval(udev, ep); + } + break; + + case USB_SPEED_FULL: + if (usb_endpoint_xfer_isoc(&ep->desc)) { + interval = xhci_parse_exponent_interval(udev, ep); + break; + } + /* + * Fall through for interrupt endpoint interval decoding + * since it uses the same rules as low speed interrupt + * endpoints. + */ + fallthrough; + + case USB_SPEED_LOW: + if (usb_endpoint_xfer_int(&ep->desc) || + usb_endpoint_xfer_isoc(&ep->desc)) { + + interval = xhci_parse_frame_interval(udev, ep); + } + break; + + default: + BUG(); + } + return interval; +} + +/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. + * High speed endpoint descriptors can define "the number of additional + * transaction opportunities per microframe", but that goes in the Max Burst + * endpoint context field. + */ +static u32 xhci_get_endpoint_mult(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + if (udev->speed < USB_SPEED_SUPER || + !usb_endpoint_xfer_isoc(&ep->desc)) + return 0; + return ep->ss_ep_comp.bmAttributes; +} + +static u32 xhci_get_endpoint_max_burst(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + /* Super speed and Plus have max burst in ep companion desc */ + if (udev->speed >= USB_SPEED_SUPER) + return ep->ss_ep_comp.bMaxBurst; + + if (udev->speed == USB_SPEED_HIGH && + (usb_endpoint_xfer_isoc(&ep->desc) || + usb_endpoint_xfer_int(&ep->desc))) + return usb_endpoint_maxp_mult(&ep->desc) - 1; + + return 0; +} + +static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep) +{ + int in; + + in = usb_endpoint_dir_in(&ep->desc); + + switch (usb_endpoint_type(&ep->desc)) { + case USB_ENDPOINT_XFER_CONTROL: + return CTRL_EP; + case USB_ENDPOINT_XFER_BULK: + return in ? BULK_IN_EP : BULK_OUT_EP; + case USB_ENDPOINT_XFER_ISOC: + return in ? ISOC_IN_EP : ISOC_OUT_EP; + case USB_ENDPOINT_XFER_INT: + return in ? INT_IN_EP : INT_OUT_EP; + } + return 0; +} + +/* Return the maximum endpoint service interval time (ESIT) payload. + * Basically, this is the maxpacket size, multiplied by the burst size + * and mult size. + */ +static u32 xhci_get_max_esit_payload(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + int max_burst; + int max_packet; + + /* Only applies for interrupt or isochronous endpoints */ + if (usb_endpoint_xfer_control(&ep->desc) || + usb_endpoint_xfer_bulk(&ep->desc)) + return 0; + + /* SuperSpeedPlus Isoc ep sending over 48k per esit */ + if ((udev->speed >= USB_SPEED_SUPER_PLUS) && + USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes)) + return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval); + /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */ + else if (udev->speed >= USB_SPEED_SUPER) + return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); + + max_packet = usb_endpoint_maxp(&ep->desc); + max_burst = usb_endpoint_maxp_mult(&ep->desc); + /* A 0 in max burst means 1 transfer per ESIT */ + return max_packet * max_burst; +} + +/* Set up an endpoint with one ring segment. Do not allocate stream rings. + * Drivers will have to call usb_alloc_streams() to do that. + */ +int xhci_endpoint_init(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + struct usb_device *udev, + struct usb_host_endpoint *ep, + gfp_t mem_flags) +{ + unsigned int ep_index; + struct xhci_ep_ctx *ep_ctx; + struct xhci_ring *ep_ring; + unsigned int max_packet; + enum xhci_ring_type ring_type; + u32 max_esit_payload; + u32 endpoint_type; + unsigned int max_burst; + unsigned int interval; + unsigned int mult; + unsigned int avg_trb_len; + unsigned int err_count = 0; + + ep_index = xhci_get_endpoint_index(&ep->desc); + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); + + endpoint_type = xhci_get_endpoint_type(ep); + if (!endpoint_type) + return -EINVAL; + + ring_type = usb_endpoint_type(&ep->desc); + + /* + * Get values to fill the endpoint context, mostly from ep descriptor. + * The average TRB buffer lengt for bulk endpoints is unclear as we + * have no clue on scatter gather list entry size. For Isoc and Int, + * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details. + */ + max_esit_payload = xhci_get_max_esit_payload(udev, ep); + interval = xhci_get_endpoint_interval(udev, ep); + + /* Periodic endpoint bInterval limit quirk */ + if (usb_endpoint_xfer_int(&ep->desc) || + usb_endpoint_xfer_isoc(&ep->desc)) { + if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) && + udev->speed >= USB_SPEED_HIGH && + interval >= 7) { + interval = 6; + } + } + + mult = xhci_get_endpoint_mult(udev, ep); + max_packet = usb_endpoint_maxp(&ep->desc); + max_burst = xhci_get_endpoint_max_burst(udev, ep); + avg_trb_len = max_esit_payload; + + /* FIXME dig Mult and streams info out of ep companion desc */ + + /* Allow 3 retries for everything but isoc, set CErr = 3 */ + if (!usb_endpoint_xfer_isoc(&ep->desc)) + err_count = 3; + /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */ + if (usb_endpoint_xfer_bulk(&ep->desc)) { + if (udev->speed == USB_SPEED_HIGH) + max_packet = 512; + if (udev->speed == USB_SPEED_FULL) { + max_packet = rounddown_pow_of_two(max_packet); + max_packet = clamp_val(max_packet, 8, 64); + } + } + /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */ + if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100) + avg_trb_len = 8; + /* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */ + if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2)) + mult = 0; + + /* Set up the endpoint ring */ + if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index) && + usb_endpoint_xfer_isoc(&ep->desc)) { + virt_dev->eps[ep_index].new_ring = + xhci_vendor_alloc_transfer_ring(xhci, endpoint_type, ring_type, + max_packet, mem_flags); + } else { + virt_dev->eps[ep_index].new_ring = + xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags); + } + + if (!virt_dev->eps[ep_index].new_ring) + return -ENOMEM; + + virt_dev->eps[ep_index].skip = false; + ep_ring = virt_dev->eps[ep_index].new_ring; + + /* Fill the endpoint context */ + ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) | + EP_INTERVAL(interval) | + EP_MULT(mult)); + ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) | + MAX_PACKET(max_packet) | + MAX_BURST(max_burst) | + ERROR_COUNT(err_count)); + ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | + ep_ring->cycle_state); + + ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) | + EP_AVG_TRB_LENGTH(avg_trb_len)); + + return 0; +} + +void xhci_endpoint_zero(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + struct usb_host_endpoint *ep) +{ + unsigned int ep_index; + struct xhci_ep_ctx *ep_ctx; + + ep_index = xhci_get_endpoint_index(&ep->desc); + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); + + ep_ctx->ep_info = 0; + ep_ctx->ep_info2 = 0; + ep_ctx->deq = 0; + ep_ctx->tx_info = 0; + /* Don't free the endpoint ring until the set interface or configuration + * request succeeds. + */ +} + +void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info) +{ + bw_info->ep_interval = 0; + bw_info->mult = 0; + bw_info->num_packets = 0; + bw_info->max_packet_size = 0; + bw_info->type = 0; + bw_info->max_esit_payload = 0; +} + +void xhci_update_bw_info(struct xhci_hcd *xhci, + struct xhci_container_ctx *in_ctx, + struct xhci_input_control_ctx *ctrl_ctx, + struct xhci_virt_device *virt_dev) +{ + struct xhci_bw_info *bw_info; + struct xhci_ep_ctx *ep_ctx; + unsigned int ep_type; + int i; + + for (i = 1; i < 31; i++) { + bw_info = &virt_dev->eps[i].bw_info; + + /* We can't tell what endpoint type is being dropped, but + * unconditionally clearing the bandwidth info for non-periodic + * endpoints should be harmless because the info will never be + * set in the first place. + */ + if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) { + /* Dropped endpoint */ + xhci_clear_endpoint_bw_info(bw_info); + continue; + } + + if (EP_IS_ADDED(ctrl_ctx, i)) { + ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i); + ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); + + /* Ignore non-periodic endpoints */ + if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && + ep_type != ISOC_IN_EP && + ep_type != INT_IN_EP) + continue; + + /* Added or changed endpoint */ + bw_info->ep_interval = CTX_TO_EP_INTERVAL( + le32_to_cpu(ep_ctx->ep_info)); + /* Number of packets and mult are zero-based in the + * input context, but we want one-based for the + * interval table. + */ + bw_info->mult = CTX_TO_EP_MULT( + le32_to_cpu(ep_ctx->ep_info)) + 1; + bw_info->num_packets = CTX_TO_MAX_BURST( + le32_to_cpu(ep_ctx->ep_info2)) + 1; + bw_info->max_packet_size = MAX_PACKET_DECODED( + le32_to_cpu(ep_ctx->ep_info2)); + bw_info->type = ep_type; + bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD( + le32_to_cpu(ep_ctx->tx_info)); + } + } +} + +/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. + * Useful when you want to change one particular aspect of the endpoint and then + * issue a configure endpoint command. + */ +void xhci_endpoint_copy(struct xhci_hcd *xhci, + struct xhci_container_ctx *in_ctx, + struct xhci_container_ctx *out_ctx, + unsigned int ep_index) +{ + struct xhci_ep_ctx *out_ep_ctx; + struct xhci_ep_ctx *in_ep_ctx; + + out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); + in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); + + in_ep_ctx->ep_info = out_ep_ctx->ep_info; + in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; + in_ep_ctx->deq = out_ep_ctx->deq; + in_ep_ctx->tx_info = out_ep_ctx->tx_info; + if (xhci->quirks & XHCI_MTK_HOST) { + in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0]; + in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1]; + } +} + +/* Copy output xhci_slot_ctx to the input xhci_slot_ctx. + * Useful when you want to change one particular aspect of the endpoint and then + * issue a configure endpoint command. Only the context entries field matters, + * but we'll copy the whole thing anyway. + */ +void xhci_slot_copy(struct xhci_hcd *xhci, + struct xhci_container_ctx *in_ctx, + struct xhci_container_ctx *out_ctx) +{ + struct xhci_slot_ctx *in_slot_ctx; + struct xhci_slot_ctx *out_slot_ctx; + + in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); + out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx); + + in_slot_ctx->dev_info = out_slot_ctx->dev_info; + in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; + in_slot_ctx->tt_info = out_slot_ctx->tt_info; + in_slot_ctx->dev_state = out_slot_ctx->dev_state; +} + +/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ +static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) +{ + int i; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Allocating %d scratchpad buffers", num_sp); + + if (!num_sp) + return 0; + + xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags, + dev_to_node(dev)); + if (!xhci->scratchpad) + goto fail_sp; + + xhci->scratchpad->sp_array = dma_alloc_coherent(dev, + num_sp * sizeof(u64), + &xhci->scratchpad->sp_dma, flags); + if (!xhci->scratchpad->sp_array) + goto fail_sp2; + + xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *), + flags, dev_to_node(dev)); + if (!xhci->scratchpad->sp_buffers) + goto fail_sp3; + + xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); + for (i = 0; i < num_sp; i++) { + dma_addr_t dma; + void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, + flags); + if (!buf) + goto fail_sp4; + + xhci->scratchpad->sp_array[i] = dma; + xhci->scratchpad->sp_buffers[i] = buf; + } + + return 0; + + fail_sp4: + for (i = i - 1; i >= 0; i--) { + dma_free_coherent(dev, xhci->page_size, + xhci->scratchpad->sp_buffers[i], + xhci->scratchpad->sp_array[i]); + } + + kfree(xhci->scratchpad->sp_buffers); + + fail_sp3: + dma_free_coherent(dev, num_sp * sizeof(u64), + xhci->scratchpad->sp_array, + xhci->scratchpad->sp_dma); + + fail_sp2: + kfree(xhci->scratchpad); + xhci->scratchpad = NULL; + + fail_sp: + return -ENOMEM; +} + +static void scratchpad_free(struct xhci_hcd *xhci) +{ + int num_sp; + int i; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + + if (!xhci->scratchpad) + return; + + num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); + + for (i = 0; i < num_sp; i++) { + dma_free_coherent(dev, xhci->page_size, + xhci->scratchpad->sp_buffers[i], + xhci->scratchpad->sp_array[i]); + } + kfree(xhci->scratchpad->sp_buffers); + dma_free_coherent(dev, num_sp * sizeof(u64), + xhci->scratchpad->sp_array, + xhci->scratchpad->sp_dma); + kfree(xhci->scratchpad); + xhci->scratchpad = NULL; +} + +struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, + bool allocate_completion, gfp_t mem_flags) +{ + struct xhci_command *command; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + + command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev)); + if (!command) + return NULL; + + if (allocate_completion) { + command->completion = + kzalloc_node(sizeof(struct completion), mem_flags, + dev_to_node(dev)); + if (!command->completion) { + kfree(command); + return NULL; + } + init_completion(command->completion); + } + + command->status = 0; + INIT_LIST_HEAD(&command->cmd_list); + return command; +} +EXPORT_SYMBOL_GPL(xhci_alloc_command); + +struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci, + bool allocate_completion, gfp_t mem_flags) +{ + struct xhci_command *command; + + command = xhci_alloc_command(xhci, allocate_completion, mem_flags); + if (!command) + return NULL; + + command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, + mem_flags); + if (!command->in_ctx) { + kfree(command->completion); + kfree(command); + return NULL; + } + return command; +} + +void xhci_urb_free_priv(struct urb_priv *urb_priv) +{ + kfree(urb_priv); +} + +void xhci_free_command(struct xhci_hcd *xhci, + struct xhci_command *command) +{ + xhci_free_container_ctx(xhci, + command->in_ctx); + kfree(command->completion); + kfree(command); +} +EXPORT_SYMBOL_GPL(xhci_free_command); + +int xhci_alloc_erst(struct xhci_hcd *xhci, + struct xhci_ring *evt_ring, + struct xhci_erst *erst, + gfp_t flags) +{ + size_t size; + unsigned int val; + struct xhci_segment *seg; + struct xhci_erst_entry *entry; + + size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; + erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, + size, &erst->erst_dma_addr, flags); + if (!erst->entries) + return -ENOMEM; + + erst->num_entries = evt_ring->num_segs; + + seg = evt_ring->first_seg; + for (val = 0; val < evt_ring->num_segs; val++) { + entry = &erst->entries[val]; + entry->seg_addr = cpu_to_le64(seg->dma); + entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); + entry->rsvd = 0; + seg = seg->next; + } + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_alloc_erst); + +void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) +{ + size_t size; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + + size = sizeof(struct xhci_erst_entry) * (erst->num_entries); + if (erst->entries) + dma_free_coherent(dev, size, + erst->entries, + erst->erst_dma_addr); + erst->entries = NULL; +} +EXPORT_SYMBOL_GPL(xhci_free_erst); + +static struct xhci_device_context_array *xhci_vendor_alloc_dcbaa( + struct xhci_hcd *xhci, gfp_t flags) +{ + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if (ops && ops->alloc_dcbaa) + return ops->alloc_dcbaa(xhci, flags); + return 0; +} + +static void xhci_vendor_free_dcbaa(struct xhci_hcd *xhci) +{ + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if (ops && ops->free_dcbaa) + ops->free_dcbaa(xhci); +} + +void xhci_mem_cleanup(struct xhci_hcd *xhci) +{ + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + int i, j, num_ports; + + cancel_delayed_work_sync(&xhci->cmd_timer); + + xhci_free_erst(xhci, &xhci->erst); + + if (xhci->event_ring) + xhci_ring_free(xhci, xhci->event_ring); + xhci->event_ring = NULL; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring"); + + if (xhci->lpm_command) + xhci_free_command(xhci, xhci->lpm_command); + xhci->lpm_command = NULL; + if (xhci->cmd_ring) + xhci_ring_free(xhci, xhci->cmd_ring); + xhci->cmd_ring = NULL; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring"); + xhci_cleanup_command_queue(xhci); + + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); + for (i = 0; i < num_ports && xhci->rh_bw; i++) { + struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; + for (j = 0; j < XHCI_MAX_INTERVAL; j++) { + struct list_head *ep = &bwt->interval_bw[j].endpoints; + while (!list_empty(ep)) + list_del_init(ep->next); + } + } + + for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--) + xhci_free_virt_devices_depth_first(xhci, i); + + dma_pool_destroy(xhci->segment_pool); + xhci->segment_pool = NULL; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool"); + + dma_pool_destroy(xhci->device_pool); + xhci->device_pool = NULL; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool"); + + dma_pool_destroy(xhci->small_streams_pool); + xhci->small_streams_pool = NULL; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Freed small stream array pool"); + + dma_pool_destroy(xhci->medium_streams_pool); + xhci->medium_streams_pool = NULL; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Freed medium stream array pool"); + + if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) { + xhci_vendor_free_dcbaa(xhci); + } else { + if (xhci->dcbaa) + dma_free_coherent(dev, sizeof(*xhci->dcbaa), + xhci->dcbaa, xhci->dcbaa->dma); + } + xhci->dcbaa = NULL; + + scratchpad_free(xhci); + + if (!xhci->rh_bw) + goto no_bw; + + for (i = 0; i < num_ports; i++) { + struct xhci_tt_bw_info *tt, *n; + list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { + list_del(&tt->tt_list); + kfree(tt); + } + } + +no_bw: + xhci->cmd_ring_reserved_trbs = 0; + xhci->usb2_rhub.num_ports = 0; + xhci->usb3_rhub.num_ports = 0; + xhci->num_active_eps = 0; + kfree(xhci->usb2_rhub.ports); + kfree(xhci->usb3_rhub.ports); + kfree(xhci->hw_ports); + kfree(xhci->rh_bw); + kfree(xhci->ext_caps); + for (i = 0; i < xhci->num_port_caps; i++) + kfree(xhci->port_caps[i].psi); + kfree(xhci->port_caps); + xhci->num_port_caps = 0; + + xhci->usb2_rhub.ports = NULL; + xhci->usb3_rhub.ports = NULL; + xhci->hw_ports = NULL; + xhci->rh_bw = NULL; + xhci->ext_caps = NULL; + xhci->port_caps = NULL; + + xhci->page_size = 0; + xhci->page_shift = 0; + xhci->usb2_rhub.bus_state.bus_suspended = 0; + xhci->usb3_rhub.bus_state.bus_suspended = 0; +} + +static int xhci_test_trb_in_td(struct xhci_hcd *xhci, + struct xhci_segment *input_seg, + union xhci_trb *start_trb, + union xhci_trb *end_trb, + dma_addr_t input_dma, + struct xhci_segment *result_seg, + char *test_name, int test_number) +{ + unsigned long long start_dma; + unsigned long long end_dma; + struct xhci_segment *seg; + + start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); + end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); + + seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false); + if (seg != result_seg) { + xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", + test_name, test_number); + xhci_warn(xhci, "Tested TRB math w/ seg %p and " + "input DMA 0x%llx\n", + input_seg, + (unsigned long long) input_dma); + xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " + "ending TRB %p (0x%llx DMA)\n", + start_trb, start_dma, + end_trb, end_dma); + xhci_warn(xhci, "Expected seg %p, got seg %p\n", + result_seg, seg); + trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, + true); + return -1; + } + return 0; +} + +/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ +int xhci_check_trb_in_td_math(struct xhci_hcd *xhci) +{ + struct { + dma_addr_t input_dma; + struct xhci_segment *result_seg; + } simple_test_vector [] = { + /* A zeroed DMA field should fail */ + { 0, NULL }, + /* One TRB before the ring start should fail */ + { xhci->event_ring->first_seg->dma - 16, NULL }, + /* One byte before the ring start should fail */ + { xhci->event_ring->first_seg->dma - 1, NULL }, + /* Starting TRB should succeed */ + { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, + /* Ending TRB should succeed */ + { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, + xhci->event_ring->first_seg }, + /* One byte after the ring end should fail */ + { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, + /* One TRB after the ring end should fail */ + { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, + /* An address of all ones should fail */ + { (dma_addr_t) (~0), NULL }, + }; + struct { + struct xhci_segment *input_seg; + union xhci_trb *start_trb; + union xhci_trb *end_trb; + dma_addr_t input_dma; + struct xhci_segment *result_seg; + } complex_test_vector [] = { + /* Test feeding a valid DMA address from a different ring */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = xhci->event_ring->first_seg->trbs, + .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + .input_dma = xhci->cmd_ring->first_seg->dma, + .result_seg = NULL, + }, + /* Test feeding a valid end TRB from a different ring */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = xhci->event_ring->first_seg->trbs, + .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + .input_dma = xhci->cmd_ring->first_seg->dma, + .result_seg = NULL, + }, + /* Test feeding a valid start and end TRB from a different ring */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = xhci->cmd_ring->first_seg->trbs, + .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + .input_dma = xhci->cmd_ring->first_seg->dma, + .result_seg = NULL, + }, + /* TRB in this ring, but after this TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[0], + .end_trb = &xhci->event_ring->first_seg->trbs[3], + .input_dma = xhci->event_ring->first_seg->dma + 4*16, + .result_seg = NULL, + }, + /* TRB in this ring, but before this TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[3], + .end_trb = &xhci->event_ring->first_seg->trbs[6], + .input_dma = xhci->event_ring->first_seg->dma + 2*16, + .result_seg = NULL, + }, + /* TRB in this ring, but after this wrapped TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], + .end_trb = &xhci->event_ring->first_seg->trbs[1], + .input_dma = xhci->event_ring->first_seg->dma + 2*16, + .result_seg = NULL, + }, + /* TRB in this ring, but before this wrapped TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], + .end_trb = &xhci->event_ring->first_seg->trbs[1], + .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, + .result_seg = NULL, + }, + /* TRB not in this ring, and we have a wrapped TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], + .end_trb = &xhci->event_ring->first_seg->trbs[1], + .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, + .result_seg = NULL, + }, + }; + + unsigned int num_tests; + int i, ret; + + num_tests = ARRAY_SIZE(simple_test_vector); + for (i = 0; i < num_tests; i++) { + ret = xhci_test_trb_in_td(xhci, + xhci->event_ring->first_seg, + xhci->event_ring->first_seg->trbs, + &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + simple_test_vector[i].input_dma, + simple_test_vector[i].result_seg, + "Simple", i); + if (ret < 0) + return ret; + } + + num_tests = ARRAY_SIZE(complex_test_vector); + for (i = 0; i < num_tests; i++) { + ret = xhci_test_trb_in_td(xhci, + complex_test_vector[i].input_seg, + complex_test_vector[i].start_trb, + complex_test_vector[i].end_trb, + complex_test_vector[i].input_dma, + complex_test_vector[i].result_seg, + "Complex", i); + if (ret < 0) + return ret; + } + xhci_dbg(xhci, "TRB math tests passed.\n"); + return 0; +} +EXPORT_SYMBOL_GPL(xhci_check_trb_in_td_math); + +static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) +{ + u64 temp; + dma_addr_t deq; + + deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, + xhci->event_ring->dequeue); + if (deq == 0 && !in_interrupt()) + xhci_warn(xhci, "WARN something wrong with SW event ring " + "dequeue ptr.\n"); + /* Update HC event ring dequeue pointer */ + temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + temp &= ERST_PTR_MASK; + /* Don't clear the EHB bit (which is RW1C) because + * there might be more events to service. + */ + temp &= ~ERST_EHB; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Write event ring dequeue pointer, " + "preserving EHB bit"); + xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, + &xhci->ir_set->erst_dequeue); +} + +static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, + __le32 __iomem *addr, int max_caps) +{ + u32 temp, port_offset, port_count; + int i; + u8 major_revision, minor_revision; + struct xhci_hub *rhub; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + struct xhci_port_cap *port_cap; + + temp = readl(addr); + major_revision = XHCI_EXT_PORT_MAJOR(temp); + minor_revision = XHCI_EXT_PORT_MINOR(temp); + + if (major_revision == 0x03) { + rhub = &xhci->usb3_rhub; + /* + * Some hosts incorrectly use sub-minor version for minor + * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01 + * for bcdUSB 0x310). Since there is no USB release with sub + * minor version 0x301 to 0x309, we can assume that they are + * incorrect and fix it here. + */ + if (minor_revision > 0x00 && minor_revision < 0x10) + minor_revision <<= 4; + } else if (major_revision <= 0x02) { + rhub = &xhci->usb2_rhub; + } else { + xhci_warn(xhci, "Ignoring unknown port speed, " + "Ext Cap %p, revision = 0x%x\n", + addr, major_revision); + /* Ignoring port protocol we can't understand. FIXME */ + return; + } + rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); + + if (rhub->min_rev < minor_revision) + rhub->min_rev = minor_revision; + + /* Port offset and count in the third dword, see section 7.2 */ + temp = readl(addr + 2); + port_offset = XHCI_EXT_PORT_OFF(temp); + port_count = XHCI_EXT_PORT_COUNT(temp); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Ext Cap %p, port offset = %u, " + "count = %u, revision = 0x%x", + addr, port_offset, port_count, major_revision); + /* Port count includes the current port offset */ + if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) + /* WTF? "Valid values are ‘1’ to MaxPorts" */ + return; + + port_cap = &xhci->port_caps[xhci->num_port_caps++]; + if (xhci->num_port_caps > max_caps) + return; + + port_cap->maj_rev = major_revision; + port_cap->min_rev = minor_revision; + port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp); + + if (port_cap->psi_count) { + port_cap->psi = kcalloc_node(port_cap->psi_count, + sizeof(*port_cap->psi), + GFP_KERNEL, dev_to_node(dev)); + if (!port_cap->psi) + port_cap->psi_count = 0; + + port_cap->psi_uid_count++; + for (i = 0; i < port_cap->psi_count; i++) { + port_cap->psi[i] = readl(addr + 4 + i); + + /* count unique ID values, two consecutive entries can + * have the same ID if link is assymetric + */ + if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) != + XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1]))) + port_cap->psi_uid_count++; + + xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n", + XHCI_EXT_PORT_PSIV(port_cap->psi[i]), + XHCI_EXT_PORT_PSIE(port_cap->psi[i]), + XHCI_EXT_PORT_PLT(port_cap->psi[i]), + XHCI_EXT_PORT_PFD(port_cap->psi[i]), + XHCI_EXT_PORT_LP(port_cap->psi[i]), + XHCI_EXT_PORT_PSIM(port_cap->psi[i])); + } + } + /* cache usb2 port capabilities */ + if (major_revision < 0x03 && xhci->num_ext_caps < max_caps) + xhci->ext_caps[xhci->num_ext_caps++] = temp; + + if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) && + (temp & XHCI_HLC)) { + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "xHCI 1.0: support USB2 hardware lpm"); + xhci->hw_lpm_support = 1; + } + + port_offset--; + for (i = port_offset; i < (port_offset + port_count); i++) { + struct xhci_port *hw_port = &xhci->hw_ports[i]; + /* Duplicate entry. Ignore the port if the revisions differ. */ + if (hw_port->rhub) { + xhci_warn(xhci, "Duplicate port entry, Ext Cap %p," + " port %u\n", addr, i); + xhci_warn(xhci, "Port was marked as USB %u, " + "duplicated as USB %u\n", + hw_port->rhub->maj_rev, major_revision); + /* Only adjust the roothub port counts if we haven't + * found a similar duplicate. + */ + if (hw_port->rhub != rhub && + hw_port->hcd_portnum != DUPLICATE_ENTRY) { + hw_port->rhub->num_ports--; + hw_port->hcd_portnum = DUPLICATE_ENTRY; + } + continue; + } + hw_port->rhub = rhub; + hw_port->port_cap = port_cap; + rhub->num_ports++; + } + /* FIXME: Should we disable ports not in the Extended Capabilities? */ +} + +static void xhci_create_rhub_port_array(struct xhci_hcd *xhci, + struct xhci_hub *rhub, gfp_t flags) +{ + int port_index = 0; + int i; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + + if (!rhub->num_ports) + return; + rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports), + flags, dev_to_node(dev)); + if (!rhub->ports) + return; + + for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) { + if (xhci->hw_ports[i].rhub != rhub || + xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY) + continue; + xhci->hw_ports[i].hcd_portnum = port_index; + rhub->ports[port_index] = &xhci->hw_ports[i]; + port_index++; + if (port_index == rhub->num_ports) + break; + } +} + +/* + * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that + * specify what speeds each port is supposed to be. We can't count on the port + * speed bits in the PORTSC register being correct until a device is connected, + * but we need to set up the two fake roothubs with the correct number of USB + * 3.0 and USB 2.0 ports at host controller initialization time. + */ +static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) +{ + void __iomem *base; + u32 offset; + unsigned int num_ports; + int i, j; + int cap_count = 0; + u32 cap_start; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); + xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports), + flags, dev_to_node(dev)); + if (!xhci->hw_ports) + return -ENOMEM; + + for (i = 0; i < num_ports; i++) { + xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base + + NUM_PORT_REGS * i; + xhci->hw_ports[i].hw_portnum = i; + } + + xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags, + dev_to_node(dev)); + if (!xhci->rh_bw) + return -ENOMEM; + for (i = 0; i < num_ports; i++) { + struct xhci_interval_bw_table *bw_table; + + INIT_LIST_HEAD(&xhci->rh_bw[i].tts); + bw_table = &xhci->rh_bw[i].bw_table; + for (j = 0; j < XHCI_MAX_INTERVAL; j++) + INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); + } + base = &xhci->cap_regs->hc_capbase; + + cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL); + if (!cap_start) { + xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n"); + return -ENODEV; + } + + offset = cap_start; + /* count extended protocol capability entries for later caching */ + while (offset) { + cap_count++; + offset = xhci_find_next_ext_cap(base, offset, + XHCI_EXT_CAPS_PROTOCOL); + } + + xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps), + flags, dev_to_node(dev)); + if (!xhci->ext_caps) + return -ENOMEM; + + xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps), + flags, dev_to_node(dev)); + if (!xhci->port_caps) + return -ENOMEM; + + offset = cap_start; + + while (offset) { + xhci_add_in_port(xhci, num_ports, base + offset, cap_count); + if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports == + num_ports) + break; + offset = xhci_find_next_ext_cap(base, offset, + XHCI_EXT_CAPS_PROTOCOL); + } + if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) { + xhci_warn(xhci, "No ports on the roothubs?\n"); + return -ENODEV; + } + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Found %u USB 2.0 ports and %u USB 3.0 ports.", + xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports); + + /* Place limits on the number of roothub ports so that the hub + * descriptors aren't longer than the USB core will allocate. + */ + if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) { + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Limiting USB 3.0 roothub ports to %u.", + USB_SS_MAXPORTS); + xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS; + } + if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) { + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Limiting USB 2.0 roothub ports to %u.", + USB_MAXCHILDREN); + xhci->usb2_rhub.num_ports = USB_MAXCHILDREN; + } + + /* + * Note we could have all USB 3.0 ports, or all USB 2.0 ports. + * Not sure how the USB core will handle a hub with no ports... + */ + + xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags); + xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags); + + return 0; +} + +int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) +{ + dma_addr_t dma; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + unsigned int val, val2; + u64 val_64; + u32 page_size, temp; + int i, ret; + + INIT_LIST_HEAD(&xhci->cmd_list); + + /* init command timeout work */ + INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); + init_completion(&xhci->cmd_ring_stop_completion); + + page_size = readl(&xhci->op_regs->page_size); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Supported page size register = 0x%x", page_size); + for (i = 0; i < 16; i++) { + if ((0x1 & page_size) != 0) + break; + page_size = page_size >> 1; + } + if (i < 16) + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Supported page size of %iK", (1 << (i+12)) / 1024); + else + xhci_warn(xhci, "WARN: no supported page size\n"); + /* Use 4K pages, since that's common and the minimum the HC supports */ + xhci->page_shift = 12; + xhci->page_size = 1 << xhci->page_shift; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "HCD page size set to %iK", xhci->page_size / 1024); + + /* + * Program the Number of Device Slots Enabled field in the CONFIG + * register with the max value of slots the HC can handle. + */ + val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1)); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// xHC can handle at most %d device slots.", val); + val2 = readl(&xhci->op_regs->config_reg); + val |= (val2 & ~HCS_SLOTS_MASK); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Setting Max device slots reg = 0x%x.", val); + writel(val, &xhci->op_regs->config_reg); + + /* + * xHCI section 5.4.6 - doorbell array must be + * "physically contiguous and 64-byte (cache line) aligned". + */ + if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) { + xhci->dcbaa = xhci_vendor_alloc_dcbaa(xhci, flags); + if (!xhci->dcbaa) + goto fail; + } else { + xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, + flags); + if (!xhci->dcbaa) + goto fail; + xhci->dcbaa->dma = dma; + } + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Device context base array address = 0x%llx (DMA), %p (virt)", + (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); + xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr); + + /* + * Initialize the ring segment pool. The ring must be a contiguous + * structure comprised of TRBs. The TRBs must be 16 byte aligned, + * however, the command ring segment needs 64-byte aligned segments + * and our use of dma addresses in the trb_address_map radix tree needs + * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need. + */ + xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, + TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size); + + /* See Table 46 and Note on Figure 55 */ + xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, + 2112, 64, xhci->page_size); + if (!xhci->segment_pool || !xhci->device_pool) + goto fail; + + /* Linear stream context arrays don't have any boundary restrictions, + * and only need to be 16-byte aligned. + */ + xhci->small_streams_pool = + dma_pool_create("xHCI 256 byte stream ctx arrays", + dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); + xhci->medium_streams_pool = + dma_pool_create("xHCI 1KB stream ctx arrays", + dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); + /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE + * will be allocated with dma_alloc_coherent() + */ + + if (!xhci->small_streams_pool || !xhci->medium_streams_pool) + goto fail; + + /* Set up the command ring to have one segments for now. */ + xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags); + if (!xhci->cmd_ring) + goto fail; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Allocated command ring at %p", xhci->cmd_ring); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx", + (unsigned long long)xhci->cmd_ring->first_seg->dma); + + /* Set the address in the Command Ring Control register */ + val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | + (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | + xhci->cmd_ring->cycle_state; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Setting command ring address to 0x%016llx", val_64); + xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); + + xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags); + if (!xhci->lpm_command) + goto fail; + + /* Reserve one command ring TRB for disabling LPM. + * Since the USB core grabs the shared usb_bus bandwidth mutex before + * disabling LPM, we only need to reserve one TRB for all devices. + */ + xhci->cmd_ring_reserved_trbs++; + + val = readl(&xhci->cap_regs->db_off); + val &= DBOFF_MASK; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Doorbell array is located at offset 0x%x" + " from cap regs base addr", val); + xhci->dba = (void __iomem *) xhci->cap_regs + val; + /* Set ir_set to interrupt register set 0 */ + xhci->ir_set = &xhci->run_regs->ir_set[0]; + + /* + * Event ring setup: Allocate a normal ring, but also setup + * the event ring segment table (ERST). Section 4.9.3. + */ + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring"); + xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, + 0, flags); + if (!xhci->event_ring) + goto fail; + if (xhci_check_trb_in_td_math(xhci) < 0) + goto fail; + + ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags); + if (ret) + goto fail; + + /* set ERST count with the number of entries in the segment table */ + val = readl(&xhci->ir_set->erst_size); + val &= ERST_SIZE_MASK; + val |= ERST_NUM_SEGS; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Write ERST size = %i to ir_set 0 (some bits preserved)", + val); + writel(val, &xhci->ir_set->erst_size); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Set ERST entries to point to event ring."); + /* set the segment table base address */ + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Set ERST base address for ir_set 0 = 0x%llx", + (unsigned long long)xhci->erst.erst_dma_addr); + val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); + val_64 &= ERST_PTR_MASK; + val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); + xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); + + /* Set the event ring dequeue address */ + xhci_set_hc_event_deq(xhci); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Wrote ERST address to ir_set 0."); + + /* + * XXX: Might need to set the Interrupter Moderation Register to + * something other than the default (~1ms minimum between interrupts). + * See section 5.5.1.2. + */ + for (i = 0; i < MAX_HC_SLOTS; i++) + xhci->devs[i] = NULL; + for (i = 0; i < USB_MAXCHILDREN; i++) { + xhci->usb2_rhub.bus_state.resume_done[i] = 0; + xhci->usb3_rhub.bus_state.resume_done[i] = 0; + /* Only the USB 2.0 completions will ever be used. */ + init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]); + init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]); + } + + if (scratchpad_alloc(xhci, flags)) + goto fail; + if (xhci_setup_port_arrays(xhci, flags)) + goto fail; + + /* Enable USB 3.0 device notifications for function remote wake, which + * is necessary for allowing USB 3.0 devices to do remote wakeup from + * U3 (device suspend). + */ + temp = readl(&xhci->op_regs->dev_notification); + temp &= ~DEV_NOTE_MASK; + temp |= DEV_NOTE_FWAKE; + writel(temp, &xhci->op_regs->dev_notification); + + return 0; + +fail: + xhci_halt(xhci); + xhci_reset(xhci); + xhci_mem_cleanup(xhci); + return -ENOMEM; +} diff --git a/usb/host/xhci-plat.c b/usb/host/xhci-plat.c new file mode 100644 index 0000000..60e72ca --- /dev/null +++ b/usb/host/xhci-plat.c @@ -0,0 +1,601 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * xhci-plat.c - xHCI host controller driver platform Bus Glue. + * + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com + * Author: Sebastian Andrzej Siewior + * + * A lot of code borrowed from the Linux xHCI driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "xhci.h" +#include "xhci-plat.h" +#include "xhci-mvebu.h" +#include "xhci-rcar.h" + +static struct hc_driver __read_mostly xhci_plat_hc_driver; + +static int xhci_plat_setup(struct usb_hcd *hcd); +static int xhci_plat_start(struct usb_hcd *hcd); + +static const struct xhci_driver_overrides xhci_plat_overrides __initconst = { + .extra_priv_size = sizeof(struct xhci_plat_priv), + .reset = xhci_plat_setup, + .start = xhci_plat_start, +}; + +static void xhci_priv_plat_start(struct usb_hcd *hcd) +{ + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); + + if (priv->plat_start) + priv->plat_start(hcd); +} + +static int xhci_priv_plat_setup(struct usb_hcd *hcd) +{ + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); + + if (!priv->plat_setup) + return 0; + + return priv->plat_setup(hcd); +} + +static int xhci_priv_init_quirk(struct usb_hcd *hcd) +{ + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); + + if (!priv->init_quirk) + return 0; + + return priv->init_quirk(hcd); +} + +static int xhci_priv_suspend_quirk(struct usb_hcd *hcd) +{ + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); + + if (!priv->suspend_quirk) + return 0; + + return priv->suspend_quirk(hcd); +} + +static int xhci_priv_resume_quirk(struct usb_hcd *hcd) +{ + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); + + if (!priv->resume_quirk) + return 0; + + return priv->resume_quirk(hcd); +} + +static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci) +{ + struct xhci_plat_priv *priv = xhci_to_priv(xhci); + + /* + * As of now platform drivers don't provide MSI support so we ensure + * here that the generic code does not try to make a pci_dev from our + * dev struct in order to setup MSI + */ + xhci->quirks |= XHCI_PLAT | priv->quirks; +} + +/* called during probe() after chip reset completes */ +static int xhci_plat_setup(struct usb_hcd *hcd) +{ + int ret; + + + ret = xhci_priv_init_quirk(hcd); + if (ret) + return ret; + + return xhci_gen_setup(hcd, xhci_plat_quirks); +} + +static int xhci_plat_start(struct usb_hcd *hcd) +{ + xhci_priv_plat_start(hcd); + return xhci_run(hcd); +} + +#ifdef CONFIG_OF +static const struct xhci_plat_priv xhci_plat_marvell_armada = { + .init_quirk = xhci_mvebu_mbus_init_quirk, +}; + +static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { + .plat_setup = xhci_mvebu_a3700_plat_setup, + .init_quirk = xhci_mvebu_a3700_init_quirk, +}; + +static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = { + SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V1) +}; + +static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = { + SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V3) +}; + +static const struct xhci_plat_priv xhci_plat_brcm = { + .quirks = XHCI_RESET_ON_RESUME, +}; + +static const struct of_device_id usb_xhci_of_match[] = { + { + .compatible = "generic-xhci", + }, { + .compatible = "xhci-platform", + }, { + .compatible = "marvell,armada-375-xhci", + .data = &xhci_plat_marvell_armada, + }, { + .compatible = "marvell,armada-380-xhci", + .data = &xhci_plat_marvell_armada, + }, { + .compatible = "marvell,armada3700-xhci", + .data = &xhci_plat_marvell_armada3700, + }, { + .compatible = "renesas,xhci-r8a7790", + .data = &xhci_plat_renesas_rcar_gen2, + }, { + .compatible = "renesas,xhci-r8a7791", + .data = &xhci_plat_renesas_rcar_gen2, + }, { + .compatible = "renesas,xhci-r8a7793", + .data = &xhci_plat_renesas_rcar_gen2, + }, { + .compatible = "renesas,xhci-r8a7795", + .data = &xhci_plat_renesas_rcar_gen3, + }, { + .compatible = "renesas,xhci-r8a7796", + .data = &xhci_plat_renesas_rcar_gen3, + }, { + .compatible = "renesas,rcar-gen2-xhci", + .data = &xhci_plat_renesas_rcar_gen2, + }, { + .compatible = "renesas,rcar-gen3-xhci", + .data = &xhci_plat_renesas_rcar_gen3, + }, { + .compatible = "brcm,xhci-brcm-v2", + .data = &xhci_plat_brcm, + }, { + .compatible = "brcm,bcm7445-xhci", + .data = &xhci_plat_brcm, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, usb_xhci_of_match); +#endif + +static struct xhci_plat_priv_overwrite xhci_plat_vendor_overwrite; + +int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops) +{ + if (vendor_ops == NULL) + return -EINVAL; + + xhci_plat_vendor_overwrite.vendor_ops = vendor_ops; + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_plat_register_vendor_ops); + +static int xhci_vendor_init(struct xhci_hcd *xhci) +{ + struct xhci_vendor_ops *ops = NULL; + + if (xhci_plat_vendor_overwrite.vendor_ops) + ops = xhci->vendor_ops = xhci_plat_vendor_overwrite.vendor_ops; + + if (ops && ops->vendor_init) + return ops->vendor_init(xhci); + return 0; +} + +static void xhci_vendor_cleanup(struct xhci_hcd *xhci) +{ + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if (ops && ops->vendor_cleanup) + ops->vendor_cleanup(xhci); + + xhci->vendor_ops = NULL; +} + +static int xhci_plat_ncr_init(struct usb_hcd *hcd, struct platform_device *pdev) +{ + int ret; + + if (device_property_read_bool(&pdev->dev, "semidrive-taihang")) + hcd->soc_taihang = true; + + if (!hcd->soc_taihang) { + if ((hcd->rsrc_start == 0x31260000 && hcd->rsrc_len == 0x8000) + || (hcd->rsrc_start == 0x31220000 && hcd->rsrc_len == 0x8000)) { + hcd->ncr_regs = devm_ioremap(&pdev->dev, + hcd->rsrc_start + 0xd000, 0x1000); + if (IS_ERR(hcd->ncr_regs)) { + ret = PTR_ERR(hcd->ncr_regs); + return ret; + } + } + } + return 0; +} + +static int xhci_plat_probe(struct platform_device *pdev) +{ + const struct xhci_plat_priv *priv_match; + const struct hc_driver *driver; + struct device *sysdev, *tmpdev; + struct xhci_hcd *xhci; + struct resource *res; + struct usb_hcd *hcd; + int ret; + int irq; + struct xhci_plat_priv *priv = NULL; + + + if (usb_disabled()) + return -ENODEV; + + driver = &xhci_plat_hc_driver; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + /* + * sysdev must point to a device that is known to the system firmware + * or PCI hardware. We handle these three cases here: + * 1. xhci_plat comes from firmware + * 2. xhci_plat is child of a device from firmware (dwc3-plat) + * 3. xhci_plat is grandchild of a pci device (dwc3-pci) + */ + for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) { + if (is_of_node(sysdev->fwnode) || + is_acpi_device_node(sysdev->fwnode)) + break; +#ifdef CONFIG_PCI + else if (sysdev->bus == &pci_bus_type) + break; +#endif + } + + if (!sysdev) + sysdev = &pdev->dev; + + /* Try to set 64-bit DMA first */ + if (WARN_ON(!sysdev->dma_mask)) + /* Platform did not initialize dma_mask */ + ret = dma_coerce_mask_and_coherent(sysdev, + DMA_BIT_MASK(64)); + else + ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(64)); + + /* If seting 64-bit DMA mask fails, fall back to 32-bit DMA mask */ + if (ret) { + ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(32)); + if (ret) + return ret; + } + + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + + hcd = __usb_create_hcd(driver, sysdev, &pdev->dev, + dev_name(&pdev->dev), NULL); + if (!hcd) { + ret = -ENOMEM; + goto disable_runtime; + } + + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(hcd->regs)) { + ret = PTR_ERR(hcd->regs); + goto put_hcd; + } + + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); + + xhci = hcd_to_xhci(hcd); + + if (xhci_plat_ncr_init(hcd, pdev)) + goto put_hcd; + + /* + * Not all platforms have clks so it is not an error if the + * clock do not exist. + */ + xhci->reg_clk = devm_clk_get_optional(&pdev->dev, "reg"); + if (IS_ERR(xhci->reg_clk)) { + ret = PTR_ERR(xhci->reg_clk); + goto put_hcd; + } + + ret = clk_prepare_enable(xhci->reg_clk); + if (ret) + goto put_hcd; + + xhci->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(xhci->clk)) { + ret = PTR_ERR(xhci->clk); + goto disable_reg_clk; + } + + ret = clk_prepare_enable(xhci->clk); + if (ret) + goto disable_reg_clk; + + if (pdev->dev.of_node) + priv_match = of_device_get_match_data(&pdev->dev); + else + priv_match = dev_get_platdata(&pdev->dev); + + if (priv_match) { + priv = hcd_to_xhci_priv(hcd); + /* Just copy data for now */ + *priv = *priv_match; + } + + device_set_wakeup_capable(&pdev->dev, true); + + xhci->main_hcd = hcd; + xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev, + dev_name(&pdev->dev), hcd); + if (!xhci->shared_hcd) { + ret = -ENOMEM; + goto disable_clk; + } + + /* imod_interval is the interrupt moderation value in nanoseconds. */ + xhci->imod_interval = 40000; + + /* Iterate over all parent nodes for finding quirks */ + for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) { + + if (device_property_read_bool(tmpdev, "usb2-lpm-disable")) + xhci->quirks |= XHCI_HW_LPM_DISABLE; + + if (device_property_read_bool(tmpdev, "usb3-lpm-capable")) + xhci->quirks |= XHCI_LPM_SUPPORT; + + if (device_property_read_bool(tmpdev, "quirk-broken-port-ped")) + xhci->quirks |= XHCI_BROKEN_PORT_PED; + + device_property_read_u32(tmpdev, "imod-interval-ns", + &xhci->imod_interval); + } + + hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0); + if (IS_ERR(hcd->usb_phy)) { + ret = PTR_ERR(hcd->usb_phy); + if (ret == -EPROBE_DEFER) + goto put_usb3_hcd; + hcd->usb_phy = NULL; + } else { + ret = usb_phy_init(hcd->usb_phy); + if (ret) + goto put_usb3_hcd; + } + + ret = xhci_vendor_init(xhci); + if (ret) + goto disable_usb_phy; + + hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); + xhci->shared_hcd->tpl_support = hcd->tpl_support; + + if (priv) { + ret = xhci_priv_plat_setup(hcd); + if (ret) + goto disable_usb_phy; + } + + if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))) + hcd->skip_phy_initialization = 1; + + if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK)) + xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK; + + ret = usb_add_hcd(hcd, irq, IRQF_SHARED); + if (ret) + goto disable_usb_phy; + + if (HCC_MAX_PSA(xhci->hcc_params) >= 4) + xhci->shared_hcd->can_do_streams = 1; + + ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); + if (ret) + goto dealloc_usb2_hcd; + + device_enable_async_suspend(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + /* + * Prevent runtime pm from being on as default, users should enable + * runtime pm using power/control in sysfs. + */ + pm_runtime_forbid(&pdev->dev); + + return 0; + + +dealloc_usb2_hcd: + usb_remove_hcd(hcd); + +disable_usb_phy: + usb_phy_shutdown(hcd->usb_phy); + +put_usb3_hcd: + usb_put_hcd(xhci->shared_hcd); + +disable_clk: + clk_disable_unprepare(xhci->clk); + +disable_reg_clk: + clk_disable_unprepare(xhci->reg_clk); + +put_hcd: + usb_put_hcd(hcd); + +disable_runtime: + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static int xhci_plat_remove(struct platform_device *dev) +{ + struct usb_hcd *hcd = platform_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct clk *clk = xhci->clk; + struct clk *reg_clk = xhci->reg_clk; + struct usb_hcd *shared_hcd = xhci->shared_hcd; + + pm_runtime_get_sync(&dev->dev); + xhci->xhc_state |= XHCI_STATE_REMOVING; + + usb_remove_hcd(shared_hcd); + xhci->shared_hcd = NULL; + usb_phy_shutdown(hcd->usb_phy); + + usb_remove_hcd(hcd); + + xhci_vendor_cleanup(xhci); + + usb_put_hcd(shared_hcd); + clk_disable_unprepare(clk); + clk_disable_unprepare(reg_clk); + usb_put_hcd(hcd); + + pm_runtime_disable(&dev->dev); + pm_runtime_put_noidle(&dev->dev); + pm_runtime_set_suspended(&dev->dev); + + return 0; +} + +static int __maybe_unused xhci_plat_suspend(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + int ret; + + ret = xhci_priv_suspend_quirk(hcd); + if (ret) + return ret; + /* + * xhci_suspend() needs `do_wakeup` to know whether host is allowed + * to do wakeup during suspend. + */ + return xhci_suspend(xhci, device_may_wakeup(dev)); +} + +static int __maybe_unused xhci_plat_resume(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + int ret; + + ret = xhci_priv_resume_quirk(hcd); + if (ret) + return ret; + + ret = xhci_resume(xhci, 0); + if (ret) + return ret; + + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; +} + +static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + int ret; + + ret = xhci_priv_suspend_quirk(hcd); + if (ret) + return ret; + + return xhci_suspend(xhci, true); +} + +static int __maybe_unused xhci_plat_runtime_resume(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + return xhci_resume(xhci, 0); +} + +static const struct dev_pm_ops xhci_plat_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume) + + SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, + xhci_plat_runtime_resume, + NULL) +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id usb_xhci_acpi_match[] = { + /* XHCI-compliant USB Controller */ + { "PNP0D10", }, + { } +}; +MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); +#endif + +static struct platform_driver usb_xhci_driver = { + .probe = xhci_plat_probe, + .remove = xhci_plat_remove, + .shutdown = usb_hcd_platform_shutdown, + .driver = { + .name = "xhci-hcd", + .pm = &xhci_plat_pm_ops, + .of_match_table = of_match_ptr(usb_xhci_of_match), + .acpi_match_table = ACPI_PTR(usb_xhci_acpi_match), + }, +}; +MODULE_ALIAS("platform:xhci-hcd"); + +static int __init xhci_plat_init(void) +{ + xhci_init_driver(&xhci_plat_hc_driver, &xhci_plat_overrides); + return platform_driver_register(&usb_xhci_driver); +} +module_init(xhci_plat_init); + +static void __exit xhci_plat_exit(void) +{ + platform_driver_unregister(&usb_xhci_driver); +} +module_exit(xhci_plat_exit); + +MODULE_DESCRIPTION("xHCI Platform Host Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/usb/host/xhci-plat.h b/usb/host/xhci-plat.h new file mode 100644 index 0000000..e726a57 --- /dev/null +++ b/usb/host/xhci-plat.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * xhci-plat.h - xHCI host controller driver platform Bus Glue. + * + * Copyright (C) 2015 Renesas Electronics Corporation + */ + +#ifndef _XHCI_PLAT_H +#define _XHCI_PLAT_H + +#include "xhci.h" /* for hcd_to_xhci() */ + +struct xhci_plat_priv { + const char *firmware_name; + unsigned long long quirks; + struct xhci_vendor_data *vendor_data; + int (*plat_setup)(struct usb_hcd *); + void (*plat_start)(struct usb_hcd *); + int (*init_quirk)(struct usb_hcd *); + int (*suspend_quirk)(struct usb_hcd *); + int (*resume_quirk)(struct usb_hcd *); +}; + +#define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv) +#define xhci_to_priv(x) ((struct xhci_plat_priv *)(x)->priv) + +struct xhci_plat_priv_overwrite { + struct xhci_vendor_ops *vendor_ops; +}; + +int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops); + +#endif /* _XHCI_PLAT_H */ diff --git a/usb/host/xhci-ring.c b/usb/host/xhci-ring.c new file mode 100644 index 0000000..3b43592 --- /dev/null +++ b/usb/host/xhci-ring.c @@ -0,0 +1,4407 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * xHCI host controller driver + * + * Copyright (C) 2008 Intel Corp. + * + * Author: Sarah Sharp + * Some code borrowed from the Linux EHCI driver. + */ + +/* + * Ring initialization rules: + * 1. Each segment is initialized to zero, except for link TRBs. + * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or + * Consumer Cycle State (CCS), depending on ring function. + * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. + * + * Ring behavior rules: + * 1. A ring is empty if enqueue == dequeue. This means there will always be at + * least one free TRB in the ring. This is useful if you want to turn that + * into a link TRB and expand the ring. + * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a + * link TRB, then load the pointer with the address in the link TRB. If the + * link TRB had its toggle bit set, you may need to update the ring cycle + * state (see cycle bit rules). You may have to do this multiple times + * until you reach a non-link TRB. + * 3. A ring is full if enqueue++ (for the definition of increment above) + * equals the dequeue pointer. + * + * Cycle bit rules: + * 1. When a consumer increments a dequeue pointer and encounters a toggle bit + * in a link TRB, it must toggle the ring cycle state. + * 2. When a producer increments an enqueue pointer and encounters a toggle bit + * in a link TRB, it must toggle the ring cycle state. + * + * Producer rules: + * 1. Check if ring is full before you enqueue. + * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. + * Update enqueue pointer between each write (which may update the ring + * cycle state). + * 3. Notify consumer. If SW is producer, it rings the doorbell for command + * and endpoint rings. If HC is the producer for the event ring, + * and it generates an interrupt according to interrupt modulation rules. + * + * Consumer rules: + * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, + * the TRB is owned by the consumer. + * 2. Update dequeue pointer (which may update the ring cycle state) and + * continue processing TRBs until you reach a TRB which is not owned by you. + * 3. Notify the producer. SW is the consumer for the event ring, and it + * updates event ring dequeue pointer. HC is the consumer for the command and + * endpoint rings; it generates events on the event ring for these. + */ + +#include +#include +#include +#include "xhci.h" +#include "xhci-trace.h" + +static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 field1, u32 field2, + u32 field3, u32 field4, bool command_must_succeed); + +/* + * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA + * address of the TRB. + */ +dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, + union xhci_trb *trb) +{ + unsigned long segment_offset; + + if (!seg || !trb || trb < seg->trbs) + return 0; + /* offset in TRBs */ + segment_offset = trb - seg->trbs; + if (segment_offset >= TRBS_PER_SEGMENT) + return 0; + return seg->dma + (segment_offset * sizeof(*trb)); +} +EXPORT_SYMBOL_GPL(xhci_trb_virt_to_dma); + +static bool trb_is_noop(union xhci_trb *trb) +{ + return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); +} + +static bool trb_is_link(union xhci_trb *trb) +{ + return TRB_TYPE_LINK_LE32(trb->link.control); +} + +static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb) +{ + return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; +} + +static bool last_trb_on_ring(struct xhci_ring *ring, + struct xhci_segment *seg, union xhci_trb *trb) +{ + return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); +} + +static bool link_trb_toggles_cycle(union xhci_trb *trb) +{ + return le32_to_cpu(trb->link.control) & LINK_TOGGLE; +} + +static bool last_td_in_urb(struct xhci_td *td) +{ + struct urb_priv *urb_priv = td->urb->hcpriv; + + return urb_priv->num_tds_done == urb_priv->num_tds; +} + +static void inc_td_cnt(struct urb *urb) +{ + struct urb_priv *urb_priv = urb->hcpriv; + + urb_priv->num_tds_done++; +} + +static void trb_to_noop(union xhci_trb *trb, u32 noop_type) +{ + if (trb_is_link(trb)) { + /* unchain chained link TRBs */ + trb->link.control &= cpu_to_le32(~TRB_CHAIN); + } else { + trb->generic.field[0] = 0; + trb->generic.field[1] = 0; + trb->generic.field[2] = 0; + /* Preserve only the cycle bit of this TRB */ + trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); + trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); + } +} + +/* Updates trb to point to the next TRB in the ring, and updates seg if the next + * TRB is in a new segment. This does not skip over link TRBs, and it does not + * effect the ring dequeue or enqueue pointers. + */ +static void next_trb(struct xhci_hcd *xhci, + struct xhci_ring *ring, + struct xhci_segment **seg, + union xhci_trb **trb) +{ + if (trb_is_link(*trb)) { + *seg = (*seg)->next; + *trb = ((*seg)->trbs); + } else { + (*trb)++; + } +} + +/* + * See Cycle bit rules. SW is the consumer for the event ring only. + */ +void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) +{ + unsigned int link_trb_count = 0; + + /* event ring doesn't have link trbs, check for last trb */ + if (ring->type == TYPE_EVENT) { + if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) { + ring->dequeue++; + goto out; + } + if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) + ring->cycle_state ^= 1; + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + goto out; + } + + /* All other rings have link trbs */ + if (!trb_is_link(ring->dequeue)) { + if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) { + xhci_warn(xhci, "Missing link TRB at end of segment\n"); + } else { + ring->dequeue++; + ring->num_trbs_free++; + } + } + + while (trb_is_link(ring->dequeue)) { + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + + if (link_trb_count++ > ring->num_segs) { + xhci_warn(xhci, "Ring is an endless link TRB loop\n"); + break; + } + } +out: + trace_xhci_inc_deq(ring); + + return; +} + +/* + * See Cycle bit rules. SW is the consumer for the event ring only. + * + * If we've just enqueued a TRB that is in the middle of a TD (meaning the + * chain bit is set), then set the chain bit in all the following link TRBs. + * If we've enqueued the last TRB in a TD, make sure the following link TRBs + * have their chain bit cleared (so that each Link TRB is a separate TD). + * + * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit + * set, but other sections talk about dealing with the chain bit set. This was + * fixed in the 0.96 specification errata, but we have to assume that all 0.95 + * xHCI hardware can't handle the chain bit being cleared on a link TRB. + * + * @more_trbs_coming: Will you enqueue more TRBs before calling + * prepare_transfer()? + */ +static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, + bool more_trbs_coming) +{ + u32 chain; + union xhci_trb *next; + unsigned int link_trb_count = 0; + + chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; + /* If this is not event ring, there is one less usable TRB */ + if (!trb_is_link(ring->enqueue)) + ring->num_trbs_free--; + + if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) { + xhci_err(xhci, "Tried to move enqueue past ring segment\n"); + return; + } + + next = ++(ring->enqueue); + + /* Update the dequeue pointer further if that was a link TRB */ + while (trb_is_link(next)) { + + /* + * If the caller doesn't plan on enqueueing more TDs before + * ringing the doorbell, then we don't want to give the link TRB + * to the hardware just yet. We'll give the link TRB back in + * prepare_ring() just before we enqueue the TD at the top of + * the ring. + */ + if (!chain && !more_trbs_coming) + break; + + /* If we're not dealing with 0.95 hardware or isoc rings on + * AMD 0.96 host, carry over the chain bit of the previous TRB + * (which may mean the chain bit is cleared). + */ + if (!(ring->type == TYPE_ISOC && + (xhci->quirks & XHCI_AMD_0x96_HOST)) && + !xhci_link_trb_quirk(xhci)) { + next->link.control &= cpu_to_le32(~TRB_CHAIN); + next->link.control |= cpu_to_le32(chain); + } + /* Give this link TRB to the hardware */ + wmb(); + next->link.control ^= cpu_to_le32(TRB_CYCLE); + + /* Toggle the cycle bit after the last ring segment. */ + if (link_trb_toggles_cycle(next)) + ring->cycle_state ^= 1; + + ring->enq_seg = ring->enq_seg->next; + ring->enqueue = ring->enq_seg->trbs; + next = ring->enqueue; + + if (link_trb_count++ > ring->num_segs) { + xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__); + break; + } + } + + trace_xhci_inc_enq(ring); +} + +/* + * Check to see if there's room to enqueue num_trbs on the ring and make sure + * enqueue pointer will not advance into dequeue segment. See rules above. + */ +static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, + unsigned int num_trbs) +{ + int num_trbs_in_deq_seg; + + if (ring->num_trbs_free < num_trbs) + return 0; + + if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { + num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; + if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) + return 0; + } + + return 1; +} + +/* Ring the host controller doorbell after placing a command on the ring */ +void xhci_ring_cmd_db(struct xhci_hcd *xhci) +{ + if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) + return; + + xhci_dbg(xhci, "// Ding dong!\n"); + + trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST); + + writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); + /* Flush PCI posted writes */ + readl(&xhci->dba->doorbell[0]); +} +EXPORT_SYMBOL_GPL(xhci_ring_cmd_db); + +static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay) +{ + return mod_delayed_work(system_wq, &xhci->cmd_timer, delay); +} + +static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci) +{ + return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command, + cmd_list); +} + +/* + * Turn all commands on command ring with status set to "aborted" to no-op trbs. + * If there are other commands waiting then restart the ring and kick the timer. + * This must be called with command ring stopped and xhci->lock held. + */ +static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, + struct xhci_command *cur_cmd) +{ + struct xhci_command *i_cmd; + + /* Turn all aborted commands in list to no-ops, then restart */ + list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { + + if (i_cmd->status != COMP_COMMAND_ABORTED) + continue; + + i_cmd->status = COMP_COMMAND_RING_STOPPED; + + xhci_dbg(xhci, "Turn aborted command %p to no-op\n", + i_cmd->command_trb); + + trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP); + + /* + * caller waiting for completion is called when command + * completion event is received for these no-op commands + */ + } + + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; + + /* ring command ring doorbell to restart the command ring */ + if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && + !(xhci->xhc_state & XHCI_STATE_DYING)) { + xhci->current_cmd = cur_cmd; + xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); + xhci_ring_cmd_db(xhci); + } +} + +/* Must be called with xhci->lock held, releases and aquires lock back */ +static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) +{ + u64 temp_64; + int ret; + + xhci_dbg(xhci, "Abort command ring\n"); + + reinit_completion(&xhci->cmd_ring_stop_completion); + + temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, + &xhci->op_regs->cmd_ring); + + /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the + * completion of the Command Abort operation. If CRR is not negated in 5 + * seconds then driver handles it as if host died (-ENODEV). + * In the future we should distinguish between -ENODEV and -ETIMEDOUT + * and try to recover a -ETIMEDOUT with a host controller reset. + */ + ret = xhci_handshake(&xhci->op_regs->cmd_ring, + CMD_RING_RUNNING, 0, 5 * 1000 * 1000); + if (ret < 0) { + xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); + xhci_halt(xhci); + xhci_hc_died(xhci); + return ret; + } + /* + * Writing the CMD_RING_ABORT bit should cause a cmd completion event, + * however on some host hw the CMD_RING_RUNNING bit is correctly cleared + * but the completion event in never sent. Wait 2 secs (arbitrary + * number) to handle those cases after negation of CMD_RING_RUNNING. + */ + spin_unlock_irqrestore(&xhci->lock, flags); + ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion, + msecs_to_jiffies(2000)); + spin_lock_irqsave(&xhci->lock, flags); + if (!ret) { + xhci_dbg(xhci, "No stop event for abort, ring start fail?\n"); + xhci_cleanup_command_queue(xhci); + } else { + xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci)); + } + return 0; +} + +void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, + unsigned int slot_id, + unsigned int ep_index, + unsigned int stream_id) +{ + __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; + struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; + unsigned int ep_state = ep->ep_state; + + /* Don't ring the doorbell for this endpoint if there are pending + * cancellations because we don't want to interrupt processing. + * We don't want to restart any stream rings if there's a set dequeue + * pointer command pending because the device can choose to start any + * stream once the endpoint is on the HW schedule. + */ + if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) || + (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT)) + return; + + trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id)); + + writel(DB_VALUE(ep_index, stream_id), db_addr); + /* flush the write */ + readl(db_addr); +} + +/* Ring the doorbell for any rings with pending URBs */ +static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, + unsigned int slot_id, + unsigned int ep_index) +{ + unsigned int stream_id; + struct xhci_virt_ep *ep; + + ep = &xhci->devs[slot_id]->eps[ep_index]; + + /* A ring has pending URBs if its TD list is not empty */ + if (!(ep->ep_state & EP_HAS_STREAMS)) { + if (ep->ring && !(list_empty(&ep->ring->td_list))) + xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); + return; + } + + for (stream_id = 1; stream_id < ep->stream_info->num_streams; + stream_id++) { + struct xhci_stream_info *stream_info = ep->stream_info; + if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) + xhci_ring_ep_doorbell(xhci, slot_id, ep_index, + stream_id); + } +} + +void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, + unsigned int slot_id, + unsigned int ep_index) +{ + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); +} + +static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, + unsigned int slot_id, + unsigned int ep_index) +{ + if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) { + xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); + return NULL; + } + if (ep_index >= EP_CTX_PER_DEV) { + xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index); + return NULL; + } + if (!xhci->devs[slot_id]) { + xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id); + return NULL; + } + + return &xhci->devs[slot_id]->eps[ep_index]; +} + +static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci, + struct xhci_virt_ep *ep, + unsigned int stream_id) +{ + /* common case, no streams */ + if (!(ep->ep_state & EP_HAS_STREAMS)) + return ep->ring; + + if (!ep->stream_info) + return NULL; + + if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) { + xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n", + stream_id, ep->vdev->slot_id, ep->ep_index); + return NULL; + } + + return ep->stream_info->stream_rings[stream_id]; +} + +/* Get the right ring for the given slot_id, ep_index and stream_id. + * If the endpoint supports streams, boundary check the URB's stream ID. + * If the endpoint doesn't support streams, return the singular endpoint ring. + */ +struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + unsigned int stream_id) +{ + struct xhci_virt_ep *ep; + + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return NULL; + + return xhci_virt_ep_to_ring(xhci, ep, stream_id); +} + + +/* + * Get the hw dequeue pointer xHC stopped on, either directly from the + * endpoint context, or if streams are in use from the stream context. + * The returned hw_dequeue contains the lowest four bits with cycle state + * and possbile stream context type. + */ +static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, + unsigned int ep_index, unsigned int stream_id) +{ + struct xhci_ep_ctx *ep_ctx; + struct xhci_stream_ctx *st_ctx; + struct xhci_virt_ep *ep; + + ep = &vdev->eps[ep_index]; + + if (ep->ep_state & EP_HAS_STREAMS) { + st_ctx = &ep->stream_info->stream_ctx_array[stream_id]; + return le64_to_cpu(st_ctx->stream_ring); + } + ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); + return le64_to_cpu(ep_ctx->deq); +} + +static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + unsigned int stream_id, struct xhci_td *td) +{ + struct xhci_virt_device *dev = xhci->devs[slot_id]; + struct xhci_virt_ep *ep = &dev->eps[ep_index]; + struct xhci_ring *ep_ring; + struct xhci_command *cmd; + struct xhci_segment *new_seg; + union xhci_trb *new_deq; + int new_cycle; + dma_addr_t addr; + u64 hw_dequeue; + bool cycle_found = false; + bool td_last_trb_found = false; + u32 trb_sct = 0; + int ret; + + ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, + ep_index, stream_id); + if (!ep_ring) { + xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n", + stream_id); + return -ENODEV; + } + /* + * A cancelled TD can complete with a stall if HW cached the trb. + * In this case driver can't find td, but if the ring is empty we + * can move the dequeue pointer to the current enqueue position. + * We shouldn't hit this anymore as cached cancelled TRBs are given back + * after clearing the cache, but be on the safe side and keep it anyway + */ + if (!td) { + if (list_empty(&ep_ring->td_list)) { + new_seg = ep_ring->enq_seg; + new_deq = ep_ring->enqueue; + new_cycle = ep_ring->cycle_state; + xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue"); + goto deq_found; + } else { + xhci_warn(xhci, "Can't find new dequeue state, missing td\n"); + return -EINVAL; + } + } + + hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); + new_seg = ep_ring->deq_seg; + new_deq = ep_ring->dequeue; + new_cycle = hw_dequeue & 0x1; + + /* + * We want to find the pointer, segment and cycle state of the new trb + * (the one after current TD's last_trb). We know the cycle state at + * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are + * found. + */ + do { + if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) + == (dma_addr_t)(hw_dequeue & ~0xf)) { + cycle_found = true; + if (td_last_trb_found) + break; + } + if (new_deq == td->last_trb) + td_last_trb_found = true; + + if (cycle_found && trb_is_link(new_deq) && + link_trb_toggles_cycle(new_deq)) + new_cycle ^= 0x1; + + next_trb(xhci, ep_ring, &new_seg, &new_deq); + + /* Search wrapped around, bail out */ + if (new_deq == ep->ring->dequeue) { + xhci_err(xhci, "Error: Failed finding new dequeue state\n"); + return -EINVAL; + } + + } while (!cycle_found || !td_last_trb_found); + +deq_found: + + /* Don't update the ring cycle state for the producer (us). */ + addr = xhci_trb_virt_to_dma(new_seg, new_deq); + if (addr == 0) { + xhci_warn(xhci, "Can't find dma of new dequeue ptr\n"); + xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq); + return -EINVAL; + } + + if ((ep->ep_state & SET_DEQ_PENDING)) { + xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n", + &addr); + return -EBUSY; + } + + /* This function gets called from contexts where it cannot sleep */ + cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC); + if (!cmd) { + xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr); + return -ENOMEM; + } + + if (stream_id) + trb_sct = SCT_FOR_TRB(SCT_PRI_TR); + ret = queue_command(xhci, cmd, + lower_32_bits(addr) | trb_sct | new_cycle, + upper_32_bits(addr), + STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) | + EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false); + if (ret < 0) { + xhci_free_command(xhci, cmd); + return ret; + } + ep->queued_deq_seg = new_seg; + ep->queued_deq_ptr = new_deq; + + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle); + + /* Stop the TD queueing code from ringing the doorbell until + * this command completes. The HC won't set the dequeue pointer + * if the ring is running, and ringing the doorbell starts the + * ring running. + */ + ep->ep_state |= SET_DEQ_PENDING; + xhci_ring_cmd_db(xhci); + return 0; +} + +/* flip_cycle means flip the cycle bit of all but the first and last TRB. + * (The last TRB actually points to the ring enqueue pointer, which is not part + * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. + */ +static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, + struct xhci_td *td, bool flip_cycle) +{ + struct xhci_segment *seg = td->start_seg; + union xhci_trb *trb = td->first_trb; + + while (1) { + trb_to_noop(trb, TRB_TR_NOOP); + + /* flip cycle if asked to */ + if (flip_cycle && trb != td->first_trb && trb != td->last_trb) + trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); + + if (trb == td->last_trb) + break; + + next_trb(xhci, ep_ring, &seg, &trb); + } +} + +static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, + struct xhci_virt_ep *ep) +{ + ep->ep_state &= ~EP_STOP_CMD_PENDING; + /* Can't del_timer_sync in interrupt */ + del_timer(&ep->stop_cmd_timer); +} + +/* + * Must be called with xhci->lock held in interrupt context, + * releases and re-acquires xhci->lock + */ +static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, + struct xhci_td *cur_td, int status) +{ + struct urb *urb = cur_td->urb; + struct urb_priv *urb_priv = urb->hcpriv; + struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); + + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; + if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_quirk_pll_enable(); + } + } + xhci_urb_free_priv(urb_priv); + usb_hcd_unlink_urb_from_ep(hcd, urb); + trace_xhci_urb_giveback(urb); + usb_hcd_giveback_urb(hcd, urb, status); +} + +static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, + struct xhci_ring *ring, struct xhci_td *td) +{ + struct device *dev = xhci_to_hcd(xhci)->self.controller; + struct xhci_segment *seg = td->bounce_seg; + struct urb *urb = td->urb; + size_t len; + + if (!ring || !seg || !urb) + return; + + if (usb_urb_dir_out(urb)) { + dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, + DMA_TO_DEVICE); + return; + } + + dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, + DMA_FROM_DEVICE); + /* for in tranfers we need to copy the data from bounce to sg */ + if (urb->num_sgs) { + len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, + seg->bounce_len, seg->bounce_offs); + if (len != seg->bounce_len) + xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", + len, seg->bounce_len); + } else { + memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, + seg->bounce_len); + } + seg->bounce_len = 0; + seg->bounce_offs = 0; +} + +static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_ring *ep_ring, int status) +{ + struct urb *urb = NULL; + + /* Clean up the endpoint's TD list */ + urb = td->urb; + + /* if a bounce buffer was used to align this td then unmap it */ + xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); + + /* Do one last check of the actual transfer length. + * If the host controller said we transferred more data than the buffer + * length, urb->actual_length will be a very big number (since it's + * unsigned). Play it safe and say we didn't transfer anything. + */ + if (urb->actual_length > urb->transfer_buffer_length) { + xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", + urb->transfer_buffer_length, urb->actual_length); + urb->actual_length = 0; + status = 0; + } + /* TD might be removed from td_list if we are giving back a cancelled URB */ + if (!list_empty(&td->td_list)) + list_del_init(&td->td_list); + /* Giving back a cancelled URB, or if a slated TD completed anyway */ + if (!list_empty(&td->cancelled_td_list)) + list_del_init(&td->cancelled_td_list); + + inc_td_cnt(urb); + /* Giveback the urb when all the tds are completed */ + if (last_td_in_urb(td)) { + if ((urb->actual_length != urb->transfer_buffer_length && + (urb->transfer_flags & URB_SHORT_NOT_OK)) || + (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) + xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", + urb, urb->actual_length, + urb->transfer_buffer_length, status); + + /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) + status = 0; + xhci_giveback_urb_in_irq(xhci, td, status); + } + + return 0; +} + + +/* Complete the cancelled URBs we unlinked from td_list. */ +static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep) +{ + struct xhci_ring *ring; + struct xhci_td *td, *tmp_td; + + list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, + cancelled_td_list) { + + ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); + + if (td->cancel_status == TD_CLEARED) + xhci_td_cleanup(ep->xhci, td, ring, td->status); + + if (ep->xhci->xhc_state & XHCI_STATE_DYING) + return; + } +} + +static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, enum xhci_ep_reset_type reset_type) +{ + struct xhci_command *command; + int ret = 0; + + command = xhci_alloc_command(xhci, false, GFP_ATOMIC); + if (!command) { + ret = -ENOMEM; + goto done; + } + + ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); +done: + if (ret) + xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n", + slot_id, ep_index, ret); + return ret; +} + +static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci, + struct xhci_virt_ep *ep, unsigned int stream_id, + struct xhci_td *td, + enum xhci_ep_reset_type reset_type) +{ + unsigned int slot_id = ep->vdev->slot_id; + int err; + + /* + * Avoid resetting endpoint if link is inactive. Can cause host hang. + * Device will be reset soon to recover the link so don't do anything + */ + if (ep->vdev->flags & VDEV_PORT_ERROR) + return -ENODEV; + + /* add td to cancelled list and let reset ep handler take care of it */ + if (reset_type == EP_HARD_RESET) { + ep->ep_state |= EP_HARD_CLEAR_TOGGLE; + if (td && list_empty(&td->cancelled_td_list)) { + list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); + td->cancel_status = TD_HALTED; + } + } + + if (ep->ep_state & EP_HALTED) { + xhci_dbg(xhci, "Reset ep command already pending\n"); + return 0; + } + + err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); + if (err) + return err; + + ep->ep_state |= EP_HALTED; + + xhci_ring_cmd_db(xhci); + + return 0; +} + +/* + * Fix up the ep ring first, so HW stops executing cancelled TDs. + * We have the xHCI lock, so nothing can modify this list until we drop it. + * We're also in the event handler, so we can't get re-interrupted if another + * Stop Endpoint command completes. + * + * only call this when ring is not in a running state + */ + +static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) +{ + struct xhci_hcd *xhci; + struct xhci_td *td = NULL; + struct xhci_td *tmp_td = NULL; + struct xhci_td *cached_td = NULL; + struct xhci_ring *ring; + u64 hw_deq; + unsigned int slot_id = ep->vdev->slot_id; + int err; + + xhci = ep->xhci; + + list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Removing canceled TD starting at 0x%llx (dma).", + (unsigned long long)xhci_trb_virt_to_dma( + td->start_seg, td->first_trb)); + list_del_init(&td->td_list); + ring = xhci_urb_to_transfer_ring(xhci, td->urb); + if (!ring) { + xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n", + td->urb, td->urb->stream_id); + continue; + } + /* + * If a ring stopped on the TD we need to cancel then we have to + * move the xHC endpoint ring dequeue pointer past this TD. + * Rings halted due to STALL may show hw_deq is past the stalled + * TD, but still require a set TR Deq command to flush xHC cache. + */ + hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index, + td->urb->stream_id); + hw_deq &= ~0xf; + + if (td->cancel_status == TD_HALTED) { + cached_td = td; + } else if (trb_in_td(xhci, td->start_seg, td->first_trb, + td->last_trb, hw_deq, false)) { + switch (td->cancel_status) { + case TD_CLEARED: /* TD is already no-op */ + case TD_CLEARING_CACHE: /* set TR deq command already queued */ + break; + case TD_DIRTY: /* TD is cached, clear it */ + case TD_HALTED: + /* FIXME stream case, several stopped rings */ + cached_td = td; + break; + } + } else { + td_to_noop(xhci, ring, td, false); + td->cancel_status = TD_CLEARED; + } + } + if (cached_td) { + cached_td->cancel_status = TD_CLEARING_CACHE; + + err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index, + cached_td->urb->stream_id, + cached_td); + /* Failed to move past cached td, try just setting it noop */ + if (err) { + td_to_noop(xhci, ring, cached_td, false); + cached_td->cancel_status = TD_CLEARED; + } + cached_td = NULL; + } + return 0; +} + +/* + * Returns the TD the endpoint ring halted on. + * Only call for non-running rings without streams. + */ +static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep) +{ + struct xhci_td *td; + u64 hw_deq; + + if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */ + hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0); + hw_deq &= ~0xf; + td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list); + if (trb_in_td(ep->xhci, td->start_seg, td->first_trb, + td->last_trb, hw_deq, false)) + return td; + } + return NULL; +} + +/* + * When we get a command completion for a Stop Endpoint Command, we need to + * unlink any cancelled TDs from the ring. There are two ways to do that: + * + * 1. If the HW was in the middle of processing the TD that needs to be + * cancelled, then we must move the ring's dequeue pointer past the last TRB + * in the TD with a Set Dequeue Pointer Command. + * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain + * bit cleared) so that the HW will skip over them. + */ +static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, + union xhci_trb *trb, u32 comp_code) +{ + unsigned int ep_index; + struct xhci_virt_ep *ep; + struct xhci_ep_ctx *ep_ctx; + struct xhci_td *td = NULL; + enum xhci_ep_reset_type reset_type; + struct xhci_command *command; + int err; + + if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { + if (!xhci->devs[slot_id]) + xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n", + slot_id); + return; + } + + ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return; + + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); + + trace_xhci_handle_cmd_stop_ep(ep_ctx); + + if (comp_code == COMP_CONTEXT_STATE_ERROR) { + /* + * If stop endpoint command raced with a halting endpoint we need to + * reset the host side endpoint first. + * If the TD we halted on isn't cancelled the TD should be given back + * with a proper error code, and the ring dequeue moved past the TD. + * If streams case we can't find hw_deq, or the TD we halted on so do a + * soft reset. + * + * Proper error code is unknown here, it would be -EPIPE if device side + * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error) + * We use -EPROTO, if device is stalled it should return a stall error on + * next transfer, which then will return -EPIPE, and device side stall is + * noted and cleared by class driver. + */ + switch (GET_EP_CTX_STATE(ep_ctx)) { + case EP_STATE_HALTED: + xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n"); + if (ep->ep_state & EP_HAS_STREAMS) { + reset_type = EP_SOFT_RESET; + } else { + reset_type = EP_HARD_RESET; + td = find_halted_td(ep); + if (td) + td->status = -EPROTO; + } + /* reset ep, reset handler cleans up cancelled tds */ + err = xhci_handle_halted_endpoint(xhci, ep, 0, td, + reset_type); + if (err) + break; + xhci_stop_watchdog_timer_in_irq(xhci, ep); + return; + case EP_STATE_RUNNING: + /* Race, HW handled stop ep cmd before ep was running */ + command = xhci_alloc_command(xhci, false, GFP_ATOMIC); + if (!command) + xhci_stop_watchdog_timer_in_irq(xhci, ep); + + mod_timer(&ep->stop_cmd_timer, + jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ); + xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0); + xhci_ring_cmd_db(xhci); + + return; + default: + break; + } + } + /* will queue a set TR deq if stopped on a cancelled, uncleared TD */ + xhci_invalidate_cancelled_tds(ep); + xhci_stop_watchdog_timer_in_irq(xhci, ep); + + /* Otherwise ring the doorbell(s) to restart queued transfers */ + xhci_giveback_invalidated_tds(ep); + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); +} + +static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) +{ + struct xhci_td *cur_td; + struct xhci_td *tmp; + + list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) { + list_del_init(&cur_td->td_list); + + if (!list_empty(&cur_td->cancelled_td_list)) + list_del_init(&cur_td->cancelled_td_list); + + xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); + + inc_td_cnt(cur_td->urb); + if (last_td_in_urb(cur_td)) + xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); + } +} + +static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, + int slot_id, int ep_index) +{ + struct xhci_td *cur_td; + struct xhci_td *tmp; + struct xhci_virt_ep *ep; + struct xhci_ring *ring; + + ep = &xhci->devs[slot_id]->eps[ep_index]; + if ((ep->ep_state & EP_HAS_STREAMS) || + (ep->ep_state & EP_GETTING_NO_STREAMS)) { + int stream_id; + + for (stream_id = 1; stream_id < ep->stream_info->num_streams; + stream_id++) { + ring = ep->stream_info->stream_rings[stream_id]; + if (!ring) + continue; + + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Killing URBs for slot ID %u, ep index %u, stream %u", + slot_id, ep_index, stream_id); + xhci_kill_ring_urbs(xhci, ring); + } + } else { + ring = ep->ring; + if (!ring) + return; + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Killing URBs for slot ID %u, ep index %u", + slot_id, ep_index); + xhci_kill_ring_urbs(xhci, ring); + } + + list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list, + cancelled_td_list) { + list_del_init(&cur_td->cancelled_td_list); + inc_td_cnt(cur_td->urb); + + if (last_td_in_urb(cur_td)) + xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); + } +} + +/* + * host controller died, register read returns 0xffffffff + * Complete pending commands, mark them ABORTED. + * URBs need to be given back as usb core might be waiting with device locks + * held for the URBs to finish during device disconnect, blocking host remove. + * + * Call with xhci->lock held. + * lock is relased and re-acquired while giving back urb. + */ +void xhci_hc_died(struct xhci_hcd *xhci) +{ + int i, j; + + if (xhci->xhc_state & XHCI_STATE_DYING) + return; + + xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); + xhci->xhc_state |= XHCI_STATE_DYING; + + xhci_cleanup_command_queue(xhci); + + /* return any pending urbs, remove may be waiting for them */ + for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { + if (!xhci->devs[i]) + continue; + for (j = 0; j < 31; j++) + xhci_kill_endpoint_urbs(xhci, i, j); + } + + /* inform usb core hc died if PCI remove isn't already handling it */ + if (!(xhci->xhc_state & XHCI_STATE_REMOVING)) + usb_hc_died(xhci_to_hcd(xhci)); +} + +/* Watchdog timer function for when a stop endpoint command fails to complete. + * In this case, we assume the host controller is broken or dying or dead. The + * host may still be completing some other events, so we have to be careful to + * let the event ring handler and the URB dequeueing/enqueueing functions know + * through xhci->state. + * + * The timer may also fire if the host takes a very long time to respond to the + * command, and the stop endpoint command completion handler cannot delete the + * timer before the timer function is called. Another endpoint cancellation may + * sneak in before the timer function can grab the lock, and that may queue + * another stop endpoint command and add the timer back. So we cannot use a + * simple flag to say whether there is a pending stop endpoint command for a + * particular endpoint. + * + * Instead we use a combination of that flag and checking if a new timer is + * pending. + */ +void xhci_stop_endpoint_command_watchdog(struct timer_list *t) +{ + struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer); + struct xhci_hcd *xhci = ep->xhci; + unsigned long flags; + u32 usbsts; + char str[XHCI_MSG_MAX]; + + spin_lock_irqsave(&xhci->lock, flags); + + /* bail out if cmd completed but raced with stop ep watchdog timer.*/ + if (!(ep->ep_state & EP_STOP_CMD_PENDING) || + timer_pending(&ep->stop_cmd_timer)) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit"); + return; + } + usbsts = readl(&xhci->op_regs->status); + + xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); + xhci_warn(xhci, "USBSTS:%s\n", xhci_decode_usbsts(str, usbsts)); + + ep->ep_state &= ~EP_STOP_CMD_PENDING; + + xhci_halt(xhci); + + /* + * handle a stop endpoint cmd timeout as if host died (-ENODEV). + * In the future we could distinguish between -ENODEV and -ETIMEDOUT + * and try to recover a -ETIMEDOUT with a host controller reset + */ + xhci_hc_died(xhci); + + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "xHCI host controller is dead."); +} + +static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, + struct xhci_virt_device *dev, + struct xhci_ring *ep_ring, + unsigned int ep_index) +{ + union xhci_trb *dequeue_temp; + int num_trbs_free_temp; + bool revert = false; + + num_trbs_free_temp = ep_ring->num_trbs_free; + dequeue_temp = ep_ring->dequeue; + + /* If we get two back-to-back stalls, and the first stalled transfer + * ends just before a link TRB, the dequeue pointer will be left on + * the link TRB by the code in the while loop. So we have to update + * the dequeue pointer one segment further, or we'll jump off + * the segment into la-la-land. + */ + if (trb_is_link(ep_ring->dequeue)) { + ep_ring->deq_seg = ep_ring->deq_seg->next; + ep_ring->dequeue = ep_ring->deq_seg->trbs; + } + + while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { + /* We have more usable TRBs */ + ep_ring->num_trbs_free++; + ep_ring->dequeue++; + if (trb_is_link(ep_ring->dequeue)) { + if (ep_ring->dequeue == + dev->eps[ep_index].queued_deq_ptr) + break; + ep_ring->deq_seg = ep_ring->deq_seg->next; + ep_ring->dequeue = ep_ring->deq_seg->trbs; + } + if (ep_ring->dequeue == dequeue_temp) { + revert = true; + break; + } + } + + if (revert) { + xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); + ep_ring->num_trbs_free = num_trbs_free_temp; + } +} + +/* + * When we get a completion for a Set Transfer Ring Dequeue Pointer command, + * we need to clear the set deq pending flag in the endpoint ring state, so that + * the TD queueing code can ring the doorbell again. We also need to ring the + * endpoint doorbell to restart the ring, but only if there aren't more + * cancellations pending. + */ +static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, + union xhci_trb *trb, u32 cmd_comp_code) +{ + unsigned int ep_index; + unsigned int stream_id; + struct xhci_ring *ep_ring; + struct xhci_virt_ep *ep; + struct xhci_ep_ctx *ep_ctx; + struct xhci_slot_ctx *slot_ctx; + struct xhci_td *td, *tmp_td; + + ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); + stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return; + + ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id); + if (!ep_ring) { + xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", + stream_id); + /* XXX: Harmless??? */ + goto cleanup; + } + + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); + slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); + trace_xhci_handle_cmd_set_deq(slot_ctx); + trace_xhci_handle_cmd_set_deq_ep(ep_ctx); + + if (cmd_comp_code != COMP_SUCCESS) { + unsigned int ep_state; + unsigned int slot_state; + + switch (cmd_comp_code) { + case COMP_TRB_ERROR: + xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); + break; + case COMP_CONTEXT_STATE_ERROR: + xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); + ep_state = GET_EP_CTX_STATE(ep_ctx); + slot_state = le32_to_cpu(slot_ctx->dev_state); + slot_state = GET_SLOT_STATE(slot_state); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Slot state = %u, EP state = %u", + slot_state, ep_state); + break; + case COMP_SLOT_NOT_ENABLED_ERROR: + xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", + slot_id); + break; + default: + xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", + cmd_comp_code); + break; + } + /* OK what do we do now? The endpoint state is hosed, and we + * should never get to this point if the synchronization between + * queueing, and endpoint state are correct. This might happen + * if the device gets disconnected after we've finished + * cancelling URBs, which might not be an error... + */ + } else { + u64 deq; + /* 4.6.10 deq ptr is written to the stream ctx for streams */ + if (ep->ep_state & EP_HAS_STREAMS) { + struct xhci_stream_ctx *ctx = + &ep->stream_info->stream_ctx_array[stream_id]; + deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; + } else { + deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; + } + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq); + if (xhci_trb_virt_to_dma(ep->queued_deq_seg, + ep->queued_deq_ptr) == deq) { + /* Update the ring's dequeue segment and dequeue pointer + * to reflect the new position. + */ + update_ring_for_set_deq_completion(xhci, ep->vdev, + ep_ring, ep_index); + } else { + xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); + xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", + ep->queued_deq_seg, ep->queued_deq_ptr); + } + } + /* HW cached TDs cleared from cache, give them back */ + list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, + cancelled_td_list) { + ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); + if (td->cancel_status == TD_CLEARING_CACHE) { + td->cancel_status = TD_CLEARED; + xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); + } + } +cleanup: + ep->ep_state &= ~SET_DEQ_PENDING; + ep->queued_deq_seg = NULL; + ep->queued_deq_ptr = NULL; + /* Restart any rings with pending URBs */ + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); +} + +static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, + union xhci_trb *trb, u32 cmd_comp_code) +{ + struct xhci_virt_ep *ep; + struct xhci_ep_ctx *ep_ctx; + unsigned int ep_index; + + ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return; + + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); + trace_xhci_handle_cmd_reset_ep(ep_ctx); + + /* This command will only fail if the endpoint wasn't halted, + * but we don't care. + */ + xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, + "Ignoring reset ep completion code of %u", cmd_comp_code); + + /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */ + xhci_invalidate_cancelled_tds(ep); + + if (xhci->quirks & XHCI_RESET_EP_QUIRK) + xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw"); + /* Clear our internal halted state */ + ep->ep_state &= ~EP_HALTED; + + xhci_giveback_invalidated_tds(ep); + + /* if this was a soft reset, then restart */ + if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); +} + +static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, + struct xhci_command *command, u32 cmd_comp_code) +{ + if (cmd_comp_code == COMP_SUCCESS) + command->slot_id = slot_id; + else + command->slot_id = 0; +} + +static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) +{ + struct xhci_virt_device *virt_dev; + struct xhci_slot_ctx *slot_ctx; + + virt_dev = xhci->devs[slot_id]; + if (!virt_dev) + return; + + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); + trace_xhci_handle_cmd_disable_slot(slot_ctx); + + if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) + /* Delete default control endpoint resources */ + xhci_free_device_endpoint_resources(xhci, virt_dev, true); + xhci_free_virt_device(xhci, slot_id); +} + +static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, + u32 cmd_comp_code) +{ + struct xhci_virt_device *virt_dev; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_ep_ctx *ep_ctx; + unsigned int ep_index; + unsigned int ep_state; + u32 add_flags, drop_flags; + + /* + * Configure endpoint commands can come from the USB core + * configuration or alt setting changes, or because the HW + * needed an extra configure endpoint command after a reset + * endpoint command or streams were being configured. + * If the command was for a halted endpoint, the xHCI driver + * is not waiting on the configure endpoint command. + */ + virt_dev = xhci->devs[slot_id]; + if (!virt_dev) + return; + ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); + if (!ctrl_ctx) { + xhci_warn(xhci, "Could not get input context, bad type.\n"); + return; + } + + add_flags = le32_to_cpu(ctrl_ctx->add_flags); + drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); + /* Input ctx add_flags are the endpoint index plus one */ + ep_index = xhci_last_valid_endpoint(add_flags) - 1; + + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index); + trace_xhci_handle_cmd_config_ep(ep_ctx); + + /* A usb_set_interface() call directly after clearing a halted + * condition may race on this quirky hardware. Not worth + * worrying about, since this is prototype hardware. Not sure + * if this will work for streams, but streams support was + * untested on this prototype. + */ + if (xhci->quirks & XHCI_RESET_EP_QUIRK && + ep_index != (unsigned int) -1 && + add_flags - SLOT_FLAG == drop_flags) { + ep_state = virt_dev->eps[ep_index].ep_state; + if (!(ep_state & EP_HALTED)) + return; + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Completed config ep cmd - " + "last ep index = %d, state = %d", + ep_index, ep_state); + /* Clear internal halted state and restart ring(s) */ + virt_dev->eps[ep_index].ep_state &= ~EP_HALTED; + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + return; + } + return; +} + +static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) +{ + struct xhci_virt_device *vdev; + struct xhci_slot_ctx *slot_ctx; + + vdev = xhci->devs[slot_id]; + if (!vdev) + return; + slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); + trace_xhci_handle_cmd_addr_dev(slot_ctx); +} + +static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) +{ + struct xhci_virt_device *vdev; + struct xhci_slot_ctx *slot_ctx; + + vdev = xhci->devs[slot_id]; + if (!vdev) { + xhci_warn(xhci, "Reset device command completion for disabled slot %u\n", + slot_id); + return; + } + slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); + trace_xhci_handle_cmd_reset_dev(slot_ctx); + + xhci_dbg(xhci, "Completed reset device command.\n"); +} + +static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, + struct xhci_event_cmd *event) +{ + if (!(xhci->quirks & XHCI_NEC_HOST)) { + xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n"); + return; + } + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "NEC firmware version %2x.%02x", + NEC_FW_MAJOR(le32_to_cpu(event->status)), + NEC_FW_MINOR(le32_to_cpu(event->status))); +} + +static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) +{ + list_del(&cmd->cmd_list); + + if (cmd->completion) { + cmd->status = status; + complete(cmd->completion); + } else { + kfree(cmd); + } +} + +void xhci_cleanup_command_queue(struct xhci_hcd *xhci) +{ + struct xhci_command *cur_cmd, *tmp_cmd; + xhci->current_cmd = NULL; + list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) + xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED); +} + +void xhci_handle_command_timeout(struct work_struct *work) +{ + struct xhci_hcd *xhci; + unsigned long flags; + u64 hw_ring_state; + + xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer); + + spin_lock_irqsave(&xhci->lock, flags); + + /* + * If timeout work is pending, or current_cmd is NULL, it means we + * raced with command completion. Command is handled so just return. + */ + if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) { + spin_unlock_irqrestore(&xhci->lock, flags); + return; + } + /* mark this command to be cancelled */ + xhci->current_cmd->status = COMP_COMMAND_ABORTED; + + /* Make sure command ring is running before aborting it */ + hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + if (hw_ring_state == ~(u64)0) { + xhci_hc_died(xhci); + goto time_out_completed; + } + + if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && + (hw_ring_state & CMD_RING_RUNNING)) { + /* Prevent new doorbell, and start command abort */ + xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; + xhci_dbg(xhci, "Command timeout\n"); + xhci_abort_cmd_ring(xhci, flags); + goto time_out_completed; + } + + /* host removed. Bail out */ + if (xhci->xhc_state & XHCI_STATE_REMOVING) { + xhci_dbg(xhci, "host removed, ring start fail?\n"); + xhci_cleanup_command_queue(xhci); + + goto time_out_completed; + } + + /* command timeout on stopped ring, ring can't be aborted */ + xhci_dbg(xhci, "Command timeout on stopped ring\n"); + xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); + +time_out_completed: + spin_unlock_irqrestore(&xhci->lock, flags); + return; +} + +static void handle_cmd_completion(struct xhci_hcd *xhci, + struct xhci_event_cmd *event) +{ + unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); + u64 cmd_dma; + dma_addr_t cmd_dequeue_dma; + u32 cmd_comp_code; + union xhci_trb *cmd_trb; + struct xhci_command *cmd; + u32 cmd_type; + + if (slot_id >= MAX_HC_SLOTS) { + xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); + return; + } + + cmd_dma = le64_to_cpu(event->cmd_trb); + cmd_trb = xhci->cmd_ring->dequeue; + + trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic); + + cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, + cmd_trb); + /* + * Check whether the completion event is for our internal kept + * command. + */ + if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) { + xhci_warn(xhci, + "ERROR mismatched command completion event\n"); + return; + } + + cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list); + + cancel_delayed_work(&xhci->cmd_timer); + + cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); + + /* If CMD ring stopped we own the trbs between enqueue and dequeue */ + if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { + complete_all(&xhci->cmd_ring_stop_completion); + return; + } + + if (cmd->command_trb != xhci->cmd_ring->dequeue) { + xhci_err(xhci, + "Command completion event does not match command\n"); + return; + } + + /* + * Host aborted the command ring, check if the current command was + * supposed to be aborted, otherwise continue normally. + * The command ring is stopped now, but the xHC will issue a Command + * Ring Stopped event which will cause us to restart it. + */ + if (cmd_comp_code == COMP_COMMAND_ABORTED) { + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + if (cmd->status == COMP_COMMAND_ABORTED) { + if (xhci->current_cmd == cmd) + xhci->current_cmd = NULL; + goto event_handled; + } + } + + cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); + switch (cmd_type) { + case TRB_ENABLE_SLOT: + xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); + break; + case TRB_DISABLE_SLOT: + xhci_handle_cmd_disable_slot(xhci, slot_id); + break; + case TRB_CONFIG_EP: + if (!cmd->completion) + xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code); + break; + case TRB_EVAL_CONTEXT: + break; + case TRB_ADDR_DEV: + xhci_handle_cmd_addr_dev(xhci, slot_id); + break; + case TRB_STOP_RING: + WARN_ON(slot_id != TRB_TO_SLOT_ID( + le32_to_cpu(cmd_trb->generic.field[3]))); + if (!cmd->completion) + xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, + cmd_comp_code); + break; + case TRB_SET_DEQ: + WARN_ON(slot_id != TRB_TO_SLOT_ID( + le32_to_cpu(cmd_trb->generic.field[3]))); + xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); + break; + case TRB_CMD_NOOP: + /* Is this an aborted command turned to NO-OP? */ + if (cmd->status == COMP_COMMAND_RING_STOPPED) + cmd_comp_code = COMP_COMMAND_RING_STOPPED; + break; + case TRB_RESET_EP: + WARN_ON(slot_id != TRB_TO_SLOT_ID( + le32_to_cpu(cmd_trb->generic.field[3]))); + xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); + break; + case TRB_RESET_DEV: + /* SLOT_ID field in reset device cmd completion event TRB is 0. + * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) + */ + slot_id = TRB_TO_SLOT_ID( + le32_to_cpu(cmd_trb->generic.field[3])); + xhci_handle_cmd_reset_dev(xhci, slot_id); + break; + case TRB_NEC_GET_FW: + xhci_handle_cmd_nec_get_fw(xhci, event); + break; + default: + /* Skip over unknown commands on the event ring */ + xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); + break; + } + + /* restart timer if this wasn't the last command */ + if (!list_is_singular(&xhci->cmd_list)) { + xhci->current_cmd = list_first_entry(&cmd->cmd_list, + struct xhci_command, cmd_list); + xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); + } else if (xhci->current_cmd == cmd) { + xhci->current_cmd = NULL; + } + +event_handled: + xhci_complete_del_and_free_cmd(cmd, cmd_comp_code); + + inc_deq(xhci, xhci->cmd_ring); +} + +static void handle_vendor_event(struct xhci_hcd *xhci, + union xhci_trb *event, u32 trb_type) +{ + xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); + if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) + handle_cmd_completion(xhci, &event->event_cmd); +} + +static void handle_device_notification(struct xhci_hcd *xhci, + union xhci_trb *event) +{ + u32 slot_id; + struct usb_device *udev; + + slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); + if (!xhci->devs[slot_id]) { + xhci_warn(xhci, "Device Notification event for " + "unused slot %u\n", slot_id); + return; + } + + xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", + slot_id); + udev = xhci->devs[slot_id]->udev; + if (udev && udev->parent) + usb_wakeup_notification(udev->parent, udev->portnum); +} + +/* + * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI + * Controller. + * As per ThunderX2errata-129 USB 2 device may come up as USB 1 + * If a connection to a USB 1 device is followed by another connection + * to a USB 2 device. + * + * Reset the PHY after the USB device is disconnected if device speed + * is less than HCD_USB3. + * Retry the reset sequence max of 4 times checking the PLL lock status. + * + */ +static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) +{ + struct usb_hcd *hcd = xhci_to_hcd(xhci); + u32 pll_lock_check; + u32 retry_count = 4; + + do { + /* Assert PHY reset */ + writel(0x6F, hcd->regs + 0x1048); + udelay(10); + /* De-assert the PHY reset */ + writel(0x7F, hcd->regs + 0x1048); + udelay(200); + pll_lock_check = readl(hcd->regs + 0x1070); + } while (!(pll_lock_check & 0x1) && --retry_count); +} + +static void handle_port_status(struct xhci_hcd *xhci, + union xhci_trb *event) +{ + struct usb_hcd *hcd; + u32 port_id; + u32 portsc, cmd_reg; + int max_ports; + int slot_id; + unsigned int hcd_portnum; + struct xhci_bus_state *bus_state; + bool bogus_port_status = false; + struct xhci_port *port; + + /* Port status change events always have a successful completion code */ + if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) + xhci_warn(xhci, + "WARN: xHC returned failed port status event\n"); + + port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); + max_ports = HCS_MAX_PORTS(xhci->hcs_params1); + + if ((port_id <= 0) || (port_id > max_ports)) { + xhci_warn(xhci, "Port change event with invalid port ID %d\n", + port_id); + inc_deq(xhci, xhci->event_ring); + return; + } + + port = &xhci->hw_ports[port_id - 1]; + if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) { + xhci_warn(xhci, "Port change event, no port for port ID %u\n", + port_id); + bogus_port_status = true; + goto cleanup; + } + + /* We might get interrupts after shared_hcd is removed */ + if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) { + xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n"); + bogus_port_status = true; + goto cleanup; + } + + hcd = port->rhub->hcd; + bus_state = &port->rhub->bus_state; + hcd_portnum = port->hcd_portnum; + portsc = readl(port->addr); + + xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n", + hcd->self.busnum, hcd_portnum + 1, port_id, portsc); + + trace_xhci_handle_port_status(hcd_portnum, portsc); + + if (hcd->state == HC_STATE_SUSPENDED) { + xhci_dbg(xhci, "resume root hub\n"); + usb_hcd_resume_root_hub(hcd); + } + + if (hcd->speed >= HCD_USB3 && + (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) { + slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); + if (slot_id && xhci->devs[slot_id]) + xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR; + } + + if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) { + xhci_dbg(xhci, "port resume event for port %d\n", port_id); + + cmd_reg = readl(&xhci->op_regs->command); + if (!(cmd_reg & CMD_RUN)) { + xhci_warn(xhci, "xHC is not running.\n"); + goto cleanup; + } + + if (DEV_SUPERSPEED_ANY(portsc)) { + xhci_dbg(xhci, "remote wake SS port %d\n", port_id); + /* Set a flag to say the port signaled remote wakeup, + * so we can tell the difference between the end of + * device and host initiated resume. + */ + bus_state->port_remote_wakeup |= 1 << hcd_portnum; + xhci_test_and_clear_bit(xhci, port, PORT_PLC); + usb_hcd_start_port_resume(&hcd->self, hcd_portnum); + xhci_set_link_state(xhci, port, XDEV_U0); + /* Need to wait until the next link state change + * indicates the device is actually in U0. + */ + bogus_port_status = true; + goto cleanup; + } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) { + xhci_dbg(xhci, "resume HS port %d\n", port_id); + bus_state->resume_done[hcd_portnum] = jiffies + + msecs_to_jiffies(USB_RESUME_TIMEOUT); + set_bit(hcd_portnum, &bus_state->resuming_ports); + /* Do the rest in GetPortStatus after resume time delay. + * Avoid polling roothub status before that so that a + * usb device auto-resume latency around ~40ms. + */ + set_bit(HCD_FLAG_POLL_RH, &hcd->flags); + mod_timer(&hcd->rh_timer, + bus_state->resume_done[hcd_portnum]); + usb_hcd_start_port_resume(&hcd->self, hcd_portnum); + bogus_port_status = true; + } + } + + if ((portsc & PORT_PLC) && + DEV_SUPERSPEED_ANY(portsc) && + ((portsc & PORT_PLS_MASK) == XDEV_U0 || + (portsc & PORT_PLS_MASK) == XDEV_U1 || + (portsc & PORT_PLS_MASK) == XDEV_U2)) { + xhci_dbg(xhci, "resume SS port %d finished\n", port_id); + complete(&bus_state->u3exit_done[hcd_portnum]); + /* We've just brought the device into U0/1/2 through either the + * Resume state after a device remote wakeup, or through the + * U3Exit state after a host-initiated resume. If it's a device + * initiated remote wake, don't pass up the link state change, + * so the roothub behavior is consistent with external + * USB 3.0 hub behavior. + */ + slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); + if (slot_id && xhci->devs[slot_id]) + xhci_ring_device(xhci, slot_id); + if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) { + xhci_test_and_clear_bit(xhci, port, PORT_PLC); + usb_wakeup_notification(hcd->self.root_hub, + hcd_portnum + 1); + bogus_port_status = true; + goto cleanup; + } + } + + /* + * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or + * RExit to a disconnect state). If so, let the the driver know it's + * out of the RExit state. + */ + if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 && + test_and_clear_bit(hcd_portnum, + &bus_state->rexit_ports)) { + complete(&bus_state->rexit_done[hcd_portnum]); + bogus_port_status = true; + goto cleanup; + } + + if (hcd->speed < HCD_USB3) { + xhci_test_and_clear_bit(xhci, port, PORT_PLC); + if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) && + (portsc & PORT_CSC) && !(portsc & PORT_CONNECT)) + xhci_cavium_reset_phy_quirk(xhci); + } + +cleanup: + /* Update event ring dequeue pointer before dropping the lock */ + inc_deq(xhci, xhci->event_ring); + + /* Don't make the USB core poll the roothub if we got a bad port status + * change event. Besides, at that point we can't tell which roothub + * (USB 2.0 or USB 3.0) to kick. + */ + if (bogus_port_status) + return; + + /* + * xHCI port-status-change events occur when the "or" of all the + * status-change bits in the portsc register changes from 0 to 1. + * New status changes won't cause an event if any other change + * bits are still set. When an event occurs, switch over to + * polling to avoid losing status changes. + */ + xhci_dbg(xhci, "%s: starting port polling.\n", __func__); + set_bit(HCD_FLAG_POLL_RH, &hcd->flags); + spin_unlock(&xhci->lock); + /* Pass this up to the core */ + usb_hcd_poll_rh_status(hcd); + spin_lock(&xhci->lock); +} + +/* + * This TD is defined by the TRBs starting at start_trb in start_seg and ending + * at end_trb, which may be in another segment. If the suspect DMA address is a + * TRB in this TD, this function returns that TRB's segment. Otherwise it + * returns 0. + */ +struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, + struct xhci_segment *start_seg, + union xhci_trb *start_trb, + union xhci_trb *end_trb, + dma_addr_t suspect_dma, + bool debug) +{ + dma_addr_t start_dma; + dma_addr_t end_seg_dma; + dma_addr_t end_trb_dma; + struct xhci_segment *cur_seg; + + start_dma = xhci_trb_virt_to_dma(start_seg, start_trb); + cur_seg = start_seg; + + do { + if (start_dma == 0) + return NULL; + /* We may get an event for a Link TRB in the middle of a TD */ + end_seg_dma = xhci_trb_virt_to_dma(cur_seg, + &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); + /* If the end TRB isn't in this segment, this is set to 0 */ + end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); + + if (debug) + xhci_warn(xhci, + "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n", + (unsigned long long)suspect_dma, + (unsigned long long)start_dma, + (unsigned long long)end_trb_dma, + (unsigned long long)cur_seg->dma, + (unsigned long long)end_seg_dma); + + if (end_trb_dma > 0) { + /* The end TRB is in this segment, so suspect should be here */ + if (start_dma <= end_trb_dma) { + if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) + return cur_seg; + } else { + /* Case for one segment with + * a TD wrapped around to the top + */ + if ((suspect_dma >= start_dma && + suspect_dma <= end_seg_dma) || + (suspect_dma >= cur_seg->dma && + suspect_dma <= end_trb_dma)) + return cur_seg; + } + return NULL; + } else { + /* Might still be somewhere in this segment */ + if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) + return cur_seg; + } + cur_seg = cur_seg->next; + start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); + } while (cur_seg != start_seg); + + return NULL; +} + +static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_virt_ep *ep) +{ + /* + * As part of low/full-speed endpoint-halt processing + * we must clear the TT buffer (USB 2.0 specification 11.17.5). + */ + if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) && + (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) && + !(ep->ep_state & EP_CLEARING_TT)) { + ep->ep_state |= EP_CLEARING_TT; + td->urb->ep->hcpriv = td->urb->dev; + if (usb_hub_clear_tt_buffer(td->urb)) + ep->ep_state &= ~EP_CLEARING_TT; + } +} + +/* Check if an error has halted the endpoint ring. The class driver will + * cleanup the halt for a non-default control endpoint if we indicate a stall. + * However, a babble and other errors also halt the endpoint ring, and the class + * driver won't clear the halt in that case, so we need to issue a Set Transfer + * Ring Dequeue Pointer command manually. + */ +static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, + struct xhci_ep_ctx *ep_ctx, + unsigned int trb_comp_code) +{ + /* TRB completion codes that may require a manual halt cleanup */ + if (trb_comp_code == COMP_USB_TRANSACTION_ERROR || + trb_comp_code == COMP_BABBLE_DETECTED_ERROR || + trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR) + /* The 0.95 spec says a babbling control endpoint + * is not halted. The 0.96 spec says it is. Some HW + * claims to be 0.95 compliant, but it halts the control + * endpoint anyway. Check if a babble halted the + * endpoint. + */ + if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) + return 1; + + return 0; +} + +int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) +{ + if (trb_comp_code >= 224 && trb_comp_code <= 255) { + /* Vendor defined "informational" completion code, + * treat as not-an-error. + */ + xhci_dbg(xhci, "Vendor defined info completion code %u\n", + trb_comp_code); + xhci_dbg(xhci, "Treating code as success.\n"); + return 1; + } + return 0; +} + +static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + u32 trb_comp_code) +{ + struct xhci_ep_ctx *ep_ctx; + + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); + + switch (trb_comp_code) { + case COMP_STOPPED_LENGTH_INVALID: + case COMP_STOPPED_SHORT_PACKET: + case COMP_STOPPED: + /* + * The "Stop Endpoint" completion will take care of any + * stopped TDs. A stopped TD may be restarted, so don't update + * the ring dequeue pointer or take this TD off any lists yet. + */ + return 0; + case COMP_USB_TRANSACTION_ERROR: + case COMP_BABBLE_DETECTED_ERROR: + case COMP_SPLIT_TRANSACTION_ERROR: + /* + * If endpoint context state is not halted we might be + * racing with a reset endpoint command issued by a unsuccessful + * stop endpoint completion (context error). In that case the + * td should be on the cancelled list, and EP_HALTED flag set. + * + * Or then it's not halted due to the 0.95 spec stating that a + * babbling control endpoint should not halt. The 0.96 spec + * again says it should. Some HW claims to be 0.95 compliant, + * but it halts the control endpoint anyway. + */ + if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) { + /* + * If EP_HALTED is set and TD is on the cancelled list + * the TD and dequeue pointer will be handled by reset + * ep command completion + */ + if ((ep->ep_state & EP_HALTED) && + !list_empty(&td->cancelled_td_list)) { + xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n", + (unsigned long long)xhci_trb_virt_to_dma( + td->start_seg, td->first_trb)); + return 0; + } + /* endpoint not halted, don't reset it */ + break; + } + /* Almost same procedure as for STALL_ERROR below */ + xhci_clear_hub_tt_buffer(xhci, td, ep); + xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, + EP_HARD_RESET); + return 0; + case COMP_STALL_ERROR: + /* + * xhci internal endpoint state will go to a "halt" state for + * any stall, including default control pipe protocol stall. + * To clear the host side halt we need to issue a reset endpoint + * command, followed by a set dequeue command to move past the + * TD. + * Class drivers clear the device side halt from a functional + * stall later. Hub TT buffer should only be cleared for FS/LS + * devices behind HS hubs for functional stalls. + */ + if (ep->ep_index != 0) + xhci_clear_hub_tt_buffer(xhci, td, ep); + + xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, + EP_HARD_RESET); + + return 0; /* xhci_handle_halted_endpoint marked td cancelled */ + default: + break; + } + + /* Update ring dequeue pointer */ + ep_ring->dequeue = td->last_trb; + ep_ring->deq_seg = td->last_trb_seg; + ep_ring->num_trbs_free += td->num_trbs - 1; + inc_deq(xhci, ep_ring); + + return xhci_td_cleanup(xhci, td, ep_ring, td->status); +} + +/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ +static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, + union xhci_trb *stop_trb) +{ + u32 sum; + union xhci_trb *trb = ring->dequeue; + struct xhci_segment *seg = ring->deq_seg; + + for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { + if (!trb_is_noop(trb) && !trb_is_link(trb)) + sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); + } + return sum; +} + +/* + * Process control tds, update urb status and actual_length. + */ +static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) +{ + struct xhci_ep_ctx *ep_ctx; + u32 trb_comp_code; + u32 remaining, requested; + u32 trb_type; + + trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + requested = td->urb->transfer_buffer_length; + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + + switch (trb_comp_code) { + case COMP_SUCCESS: + if (trb_type != TRB_STATUS) { + xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", + (trb_type == TRB_DATA) ? "data" : "setup"); + td->status = -ESHUTDOWN; + break; + } + td->status = 0; + break; + case COMP_SHORT_PACKET: + td->status = 0; + break; + case COMP_STOPPED_SHORT_PACKET: + if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) + td->urb->actual_length = remaining; + else + xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); + goto finish_td; + case COMP_STOPPED: + switch (trb_type) { + case TRB_SETUP: + td->urb->actual_length = 0; + goto finish_td; + case TRB_DATA: + case TRB_NORMAL: + td->urb->actual_length = requested - remaining; + goto finish_td; + case TRB_STATUS: + td->urb->actual_length = requested; + goto finish_td; + default: + xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", + trb_type); + goto finish_td; + } + case COMP_STOPPED_LENGTH_INVALID: + goto finish_td; + default: + if (!xhci_requires_manual_halt_cleanup(xhci, + ep_ctx, trb_comp_code)) + break; + xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", + trb_comp_code, ep->ep_index); + fallthrough; + case COMP_STALL_ERROR: + /* Did we transfer part of the data (middle) phase? */ + if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) + td->urb->actual_length = requested - remaining; + else if (!td->urb_length_set) + td->urb->actual_length = 0; + goto finish_td; + } + + /* stopped at setup stage, no data transferred */ + if (trb_type == TRB_SETUP) + goto finish_td; + + /* + * if on data stage then update the actual_length of the URB and flag it + * as set, so it won't be overwritten in the event for the last TRB. + */ + if (trb_type == TRB_DATA || + trb_type == TRB_NORMAL) { + td->urb_length_set = true; + td->urb->actual_length = requested - remaining; + xhci_dbg(xhci, "Waiting for status stage event\n"); + return 0; + } + + /* at status stage */ + if (!td->urb_length_set) + td->urb->actual_length = requested; + +finish_td: + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); +} + +/* + * Process isochronous tds, update urb packet status and actual_length. + */ +static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) +{ + struct urb_priv *urb_priv; + int idx; + struct usb_iso_packet_descriptor *frame; + u32 trb_comp_code; + bool sum_trbs_for_length = false; + u32 remaining, requested, ep_trb_len; + int short_framestatus; + + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + urb_priv = td->urb->hcpriv; + idx = urb_priv->num_tds_done; + frame = &td->urb->iso_frame_desc[idx]; + requested = frame->length; + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? + -EREMOTEIO : 0; + + /* handle completion code */ + switch (trb_comp_code) { + case COMP_SUCCESS: + if (remaining) { + frame->status = short_framestatus; + if (xhci->quirks & XHCI_TRUST_TX_LENGTH) + sum_trbs_for_length = true; + break; + } + frame->status = 0; + break; + case COMP_SHORT_PACKET: + frame->status = short_framestatus; + sum_trbs_for_length = true; + break; + case COMP_BANDWIDTH_OVERRUN_ERROR: + frame->status = -ECOMM; + break; + case COMP_ISOCH_BUFFER_OVERRUN: + case COMP_BABBLE_DETECTED_ERROR: + frame->status = -EOVERFLOW; + break; + case COMP_INCOMPATIBLE_DEVICE_ERROR: + case COMP_STALL_ERROR: + frame->status = -EPROTO; + break; + case COMP_USB_TRANSACTION_ERROR: + frame->status = -EPROTO; + if (ep_trb != td->last_trb) + return 0; + break; + case COMP_STOPPED: + sum_trbs_for_length = true; + break; + case COMP_STOPPED_SHORT_PACKET: + /* field normally containing residue now contains tranferred */ + frame->status = short_framestatus; + requested = remaining; + break; + case COMP_STOPPED_LENGTH_INVALID: + requested = 0; + remaining = 0; + break; + default: + sum_trbs_for_length = true; + frame->status = -1; + break; + } + + if (sum_trbs_for_length) + frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + + ep_trb_len - remaining; + else + frame->actual_length = requested; + + td->urb->actual_length += frame->actual_length; + + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); +} + +static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_virt_ep *ep, int status) +{ + struct urb_priv *urb_priv; + struct usb_iso_packet_descriptor *frame; + int idx; + + urb_priv = td->urb->hcpriv; + idx = urb_priv->num_tds_done; + frame = &td->urb->iso_frame_desc[idx]; + + /* The transfer is partly done. */ + frame->status = -EXDEV; + + /* calc actual length */ + frame->actual_length = 0; + + /* Update ring dequeue pointer */ + ep->ring->dequeue = td->last_trb; + ep->ring->deq_seg = td->last_trb_seg; + ep->ring->num_trbs_free += td->num_trbs - 1; + inc_deq(xhci, ep->ring); + + return xhci_td_cleanup(xhci, td, ep->ring, status); +} + +/* + * Process bulk and interrupt tds, update urb status and actual_length. + */ +static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) +{ + struct xhci_slot_ctx *slot_ctx; + u32 trb_comp_code; + u32 remaining, requested, ep_trb_len; + + slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + requested = td->urb->transfer_buffer_length; + + switch (trb_comp_code) { + case COMP_SUCCESS: + ep_ring->err_count = 0; + /* handle success with untransferred data as short packet */ + if (ep_trb != td->last_trb || remaining) { + xhci_warn(xhci, "WARN Successful completion on short TX\n"); + xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", + td->urb->ep->desc.bEndpointAddress, + requested, remaining); + } + td->status = 0; + break; + case COMP_SHORT_PACKET: + xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", + td->urb->ep->desc.bEndpointAddress, + requested, remaining); + td->status = 0; + break; + case COMP_STOPPED_SHORT_PACKET: + td->urb->actual_length = remaining; + goto finish_td; + case COMP_STOPPED_LENGTH_INVALID: + /* stopped on ep trb with invalid length, exclude it */ + ep_trb_len = 0; + remaining = 0; + break; + case COMP_USB_TRANSACTION_ERROR: + if (xhci->quirks & XHCI_NO_SOFT_RETRY || + (ep_ring->err_count++ > MAX_SOFT_RETRY) || + le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) + break; + + td->status = 0; + + xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, + EP_SOFT_RESET); + return 0; + default: + /* do nothing */ + break; + } + + if (ep_trb == td->last_trb) + td->urb->actual_length = requested - remaining; + else + td->urb->actual_length = + sum_trb_lengths(xhci, ep_ring, ep_trb) + + ep_trb_len - remaining; +finish_td: + if (remaining > requested) { + xhci_warn(xhci, "bad transfer trb length %d in event trb\n", + remaining); + td->urb->actual_length = 0; + } + + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); +} + +/* + * If this function returns an error condition, it means it got a Transfer + * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. + * At this point, the host controller is probably hosed and should be reset. + */ +static int handle_tx_event(struct xhci_hcd *xhci, + struct xhci_transfer_event *event) +{ + struct xhci_virt_ep *ep; + struct xhci_ring *ep_ring; + unsigned int slot_id; + int ep_index; + struct xhci_td *td = NULL; + dma_addr_t ep_trb_dma; + struct xhci_segment *ep_seg; + union xhci_trb *ep_trb; + int status = -EINPROGRESS; + struct xhci_ep_ctx *ep_ctx; + struct list_head *tmp; + u32 trb_comp_code; + int td_num = 0; + bool handling_skipped_tds = false; + + slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); + ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + ep_trb_dma = le64_to_cpu(event->buffer); + + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) { + xhci_err(xhci, "ERROR Invalid Transfer event\n"); + goto err_out; + } + + ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); + + if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) { + xhci_err(xhci, + "ERROR Transfer event for disabled endpoint slot %u ep %u\n", + slot_id, ep_index); + goto err_out; + } + + /* Some transfer events don't always point to a trb, see xhci 4.17.4 */ + if (!ep_ring) { + switch (trb_comp_code) { + case COMP_STALL_ERROR: + case COMP_USB_TRANSACTION_ERROR: + case COMP_INVALID_STREAM_TYPE_ERROR: + case COMP_INVALID_STREAM_ID_ERROR: + xhci_handle_halted_endpoint(xhci, ep, 0, NULL, + EP_SOFT_RESET); + goto cleanup; + case COMP_RING_UNDERRUN: + case COMP_RING_OVERRUN: + case COMP_STOPPED_LENGTH_INVALID: + goto cleanup; + default: + xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", + slot_id, ep_index); + goto err_out; + } + } + + /* Count current td numbers if ep->skip is set */ + if (ep->skip) { + list_for_each(tmp, &ep_ring->td_list) + td_num++; + } + + /* Look for common error cases */ + switch (trb_comp_code) { + /* Skip codes that require special handling depending on + * transfer type + */ + case COMP_SUCCESS: + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) + break; + if (xhci->quirks & XHCI_TRUST_TX_LENGTH || + ep_ring->last_td_was_short) + trb_comp_code = COMP_SHORT_PACKET; + else + xhci_warn_ratelimited(xhci, + "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n", + slot_id, ep_index); + case COMP_SHORT_PACKET: + break; + /* Completion codes for endpoint stopped state */ + case COMP_STOPPED: + xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n", + slot_id, ep_index); + break; + case COMP_STOPPED_LENGTH_INVALID: + xhci_dbg(xhci, + "Stopped on No-op or Link TRB for slot %u ep %u\n", + slot_id, ep_index); + break; + case COMP_STOPPED_SHORT_PACKET: + xhci_dbg(xhci, + "Stopped with short packet transfer detected for slot %u ep %u\n", + slot_id, ep_index); + break; + /* Completion codes for endpoint halted state */ + case COMP_STALL_ERROR: + xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id, + ep_index); + status = -EPIPE; + break; + case COMP_SPLIT_TRANSACTION_ERROR: + xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n", + slot_id, ep_index); + status = -EPROTO; + break; + case COMP_USB_TRANSACTION_ERROR: + xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n", + slot_id, ep_index); + status = -EPROTO; + break; + case COMP_BABBLE_DETECTED_ERROR: + xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n", + slot_id, ep_index); + status = -EOVERFLOW; + break; + /* Completion codes for endpoint error state */ + case COMP_TRB_ERROR: + xhci_warn(xhci, + "WARN: TRB error for slot %u ep %u on endpoint\n", + slot_id, ep_index); + status = -EILSEQ; + break; + /* completion codes not indicating endpoint state change */ + case COMP_DATA_BUFFER_ERROR: + xhci_warn(xhci, + "WARN: HC couldn't access mem fast enough for slot %u ep %u\n", + slot_id, ep_index); + status = -ENOSR; + break; + case COMP_BANDWIDTH_OVERRUN_ERROR: + xhci_warn(xhci, + "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n", + slot_id, ep_index); + break; + case COMP_ISOCH_BUFFER_OVERRUN: + xhci_warn(xhci, + "WARN: buffer overrun event for slot %u ep %u on endpoint", + slot_id, ep_index); + break; + case COMP_RING_UNDERRUN: + /* + * When the Isoch ring is empty, the xHC will generate + * a Ring Overrun Event for IN Isoch endpoint or Ring + * Underrun Event for OUT Isoch endpoint. + */ + xhci_dbg(xhci, "underrun event on endpoint\n"); + if (!list_empty(&ep_ring->td_list)) + xhci_dbg(xhci, "Underrun Event for slot %d ep %d " + "still with TDs queued?\n", + TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), + ep_index); + goto cleanup; + case COMP_RING_OVERRUN: + xhci_dbg(xhci, "overrun event on endpoint\n"); + if (!list_empty(&ep_ring->td_list)) + xhci_dbg(xhci, "Overrun Event for slot %d ep %d " + "still with TDs queued?\n", + TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), + ep_index); + goto cleanup; + case COMP_MISSED_SERVICE_ERROR: + /* + * When encounter missed service error, one or more isoc tds + * may be missed by xHC. + * Set skip flag of the ep_ring; Complete the missed tds as + * short transfer when process the ep_ring next time. + */ + ep->skip = true; + xhci_dbg(xhci, + "Miss service interval error for slot %u ep %u, set skip flag\n", + slot_id, ep_index); + goto cleanup; + case COMP_NO_PING_RESPONSE_ERROR: + ep->skip = true; + xhci_dbg(xhci, + "No Ping response error for slot %u ep %u, Skip one Isoc TD\n", + slot_id, ep_index); + goto cleanup; + + case COMP_INCOMPATIBLE_DEVICE_ERROR: + /* needs disable slot command to recover */ + xhci_warn(xhci, + "WARN: detect an incompatible device for slot %u ep %u", + slot_id, ep_index); + status = -EPROTO; + break; + default: + if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { + status = 0; + break; + } + xhci_warn(xhci, + "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n", + trb_comp_code, slot_id, ep_index); + goto cleanup; + } + + do { + /* This TRB should be in the TD at the head of this ring's + * TD list. + */ + if (list_empty(&ep_ring->td_list)) { + /* + * Don't print wanings if it's due to a stopped endpoint + * generating an extra completion event if the device + * was suspended. Or, a event for the last TRB of a + * short TD we already got a short event for. + * The short TD is already removed from the TD list. + */ + + if (!(trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_LENGTH_INVALID || + ep_ring->last_td_was_short)) { + xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", + TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), + ep_index); + } + if (ep->skip) { + ep->skip = false; + xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", + slot_id, ep_index); + } + if (trb_comp_code == COMP_STALL_ERROR || + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, + trb_comp_code)) { + xhci_handle_halted_endpoint(xhci, ep, + ep_ring->stream_id, + NULL, + EP_HARD_RESET); + } + goto cleanup; + } + + /* We've skipped all the TDs on the ep ring when ep->skip set */ + if (ep->skip && td_num == 0) { + ep->skip = false; + xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n", + slot_id, ep_index); + goto cleanup; + } + + td = list_first_entry(&ep_ring->td_list, struct xhci_td, + td_list); + if (ep->skip) + td_num--; + + /* Is this a TRB in the currently executing TD? */ + ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, + td->last_trb, ep_trb_dma, false); + + /* + * Skip the Force Stopped Event. The event_trb(event_dma) of FSE + * is not in the current TD pointed by ep_ring->dequeue because + * that the hardware dequeue pointer still at the previous TRB + * of the current TD. The previous TRB maybe a Link TD or the + * last TRB of the previous TD. The command completion handle + * will take care the rest. + */ + if (!ep_seg && (trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { + goto cleanup; + } + + if (!ep_seg) { + if (!ep->skip || + !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { + /* Some host controllers give a spurious + * successful event after a short transfer. + * Ignore it. + */ + if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && + ep_ring->last_td_was_short) { + ep_ring->last_td_was_short = false; + goto cleanup; + } + /* HC is busted, give up! */ + xhci_err(xhci, + "ERROR Transfer event TRB DMA ptr not " + "part of current TD ep_index %d " + "comp_code %u\n", ep_index, + trb_comp_code); + trb_in_td(xhci, ep_ring->deq_seg, + ep_ring->dequeue, td->last_trb, + ep_trb_dma, true); + return -ESHUTDOWN; + } + + skip_isoc_td(xhci, td, ep, status); + goto cleanup; + } + if (trb_comp_code == COMP_SHORT_PACKET) + ep_ring->last_td_was_short = true; + else + ep_ring->last_td_was_short = false; + + if (ep->skip) { + xhci_dbg(xhci, + "Found td. Clear skip flag for slot %u ep %u.\n", + slot_id, ep_index); + ep->skip = false; + } + + ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / + sizeof(*ep_trb)]; + + trace_xhci_handle_transfer(ep_ring, + (struct xhci_generic_trb *) ep_trb); + + /* + * No-op TRB could trigger interrupts in a case where + * a URB was killed and a STALL_ERROR happens right + * after the endpoint ring stopped. Reset the halted + * endpoint. Otherwise, the endpoint remains stalled + * indefinitely. + */ + + if (trb_is_noop(ep_trb)) { + if (trb_comp_code == COMP_STALL_ERROR || + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, + trb_comp_code)) + xhci_handle_halted_endpoint(xhci, ep, + ep_ring->stream_id, + td, EP_HARD_RESET); + goto cleanup; + } + + td->status = status; + + /* update the urb's actual_length and give back to the core */ + if (usb_endpoint_xfer_control(&td->urb->ep->desc)) + process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); + else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) + process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); + else + process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); +cleanup: + handling_skipped_tds = ep->skip && + trb_comp_code != COMP_MISSED_SERVICE_ERROR && + trb_comp_code != COMP_NO_PING_RESPONSE_ERROR; + + /* + * Do not update event ring dequeue pointer if we're in a loop + * processing missed tds. + */ + if (!handling_skipped_tds) + inc_deq(xhci, xhci->event_ring); + + /* + * If ep->skip is set, it means there are missed tds on the + * endpoint ring need to take care of. + * Process them as short transfer until reach the td pointed by + * the event. + */ + } while (handling_skipped_tds); + + return 0; + +err_out: + xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", + (unsigned long long) xhci_trb_virt_to_dma( + xhci->event_ring->deq_seg, + xhci->event_ring->dequeue), + lower_32_bits(le64_to_cpu(event->buffer)), + upper_32_bits(le64_to_cpu(event->buffer)), + le32_to_cpu(event->transfer_len), + le32_to_cpu(event->flags)); + return -ENODEV; +} + +/* + * This function handles all OS-owned events on the event ring. It may drop + * xhci->lock between event processing (e.g. to pass up port status changes). + * Returns >0 for "possibly more events to process" (caller should call again), + * otherwise 0 if done. In future, <0 returns should indicate error code. + */ +int xhci_handle_event(struct xhci_hcd *xhci) +{ + union xhci_trb *event; + int update_ptrs = 1; + u32 trb_type; + int ret; + + /* Event ring hasn't been allocated yet. */ + if (!xhci->event_ring || !xhci->event_ring->dequeue) { + xhci_err(xhci, "ERROR event ring not ready\n"); + return -ENOMEM; + } + + event = xhci->event_ring->dequeue; + /* Does the HC or OS own the TRB? */ + if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != + xhci->event_ring->cycle_state) + return 0; + + trace_xhci_handle_event(xhci->event_ring, &event->generic); + + /* + * Barrier between reading the TRB_CYCLE (valid) flag above and any + * speculative reads of the event's flags/data below. + */ + rmb(); + trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); + /* FIXME: Handle more event types. */ + + switch (trb_type) { + case TRB_COMPLETION: + handle_cmd_completion(xhci, &event->event_cmd); + break; + case TRB_PORT_STATUS: + handle_port_status(xhci, event); + update_ptrs = 0; + break; + case TRB_TRANSFER: + ret = handle_tx_event(xhci, &event->trans_event); + if (ret >= 0) + update_ptrs = 0; + break; + case TRB_DEV_NOTE: + handle_device_notification(xhci, event); + break; + default: + if (trb_type >= TRB_VENDOR_DEFINED_LOW) + handle_vendor_event(xhci, event, trb_type); + else + xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type); + } + /* Any of the above functions may drop and re-acquire the lock, so check + * to make sure a watchdog timer didn't mark the host as non-responsive. + */ + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_dbg(xhci, "xHCI host dying, returning from " + "event handler.\n"); + return 0; + } + + if (update_ptrs) + /* Update SW event ring dequeue pointer */ + inc_deq(xhci, xhci->event_ring); + + /* Are there more items on the event ring? Caller will call us again to + * check. + */ + return 1; +} +EXPORT_SYMBOL_GPL(xhci_handle_event); + +/* + * Update Event Ring Dequeue Pointer: + * - When all events have finished + * - To avoid "Event Ring Full Error" condition + */ +void xhci_update_erst_dequeue(struct xhci_hcd *xhci, + union xhci_trb *event_ring_deq) +{ + u64 temp_64; + dma_addr_t deq; + + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + /* If necessary, update the HW's version of the event ring deq ptr. */ + if (event_ring_deq != xhci->event_ring->dequeue) { + deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, + xhci->event_ring->dequeue); + if (deq == 0) + xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n"); + /* + * Per 4.9.4, Software writes to the ERDP register shall + * always advance the Event Ring Dequeue Pointer value. + */ + if ((temp_64 & (u64) ~ERST_PTR_MASK) == + ((u64) deq & (u64) ~ERST_PTR_MASK)) + return; + + /* Update HC event ring dequeue pointer */ + temp_64 &= ERST_PTR_MASK; + temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); + } + + /* Clear the event handler busy flag (RW1C) */ + temp_64 |= ERST_EHB; + xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); +} +EXPORT_SYMBOL_GPL(xhci_update_erst_dequeue); + +static irqreturn_t xhci_vendor_queue_irq_work(struct xhci_hcd *xhci) +{ + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if (ops && ops->queue_irq_work) + return ops->queue_irq_work(xhci); + return IRQ_NONE; +} + +irqreturn_t xhci_ncr_irq(struct usb_hcd *hcd) +{ + u32 status; + + if (!hcd->ncr_regs) + return IRQ_NONE; + + status = readl(hcd->ncr_regs + 0x80); + if (status & 0x7f) { + writel(status & 0x7f, hcd->ncr_regs + 0x80); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +/* + * xHCI spec says we can get an interrupt, and if the HC has an error condition, + * we might get bad data out of the event ring. Section 4.10.2.7 has a list of + * indicators of an event TRB error, but we check the status *first* to be safe. + */ +irqreturn_t xhci_irq(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + union xhci_trb *event_ring_deq; + irqreturn_t ret = IRQ_NONE; + irqreturn_t ncr_ret = IRQ_HANDLED; + unsigned long flags; + u64 temp_64; + u32 status; + int event_loop = 0; + + spin_lock_irqsave(&xhci->lock, flags); + + if (!hcd->soc_taihang) + ncr_ret = xhci_ncr_irq(hcd); + /* Check if the xHC generated the interrupt, or the irq is shared */ + status = readl(&xhci->op_regs->status); + if (status == ~(u32)0) { + xhci_hc_died(xhci); + ret = IRQ_HANDLED; + goto out; + } + + if (!(status & STS_EINT)) + goto out; + + if (status & STS_FATAL) { + xhci_warn(xhci, "WARNING: Host System Error\n"); + xhci_halt(xhci); + ret = IRQ_HANDLED; + goto out; + } + + ret = xhci_vendor_queue_irq_work(xhci); + if (ret == IRQ_HANDLED) + goto out; + + /* + * Clear the op reg interrupt status first, + * so we can receive interrupts from other MSI-X interrupters. + * Write 1 to clear the interrupt status. + */ + status |= STS_EINT; + writel(status, &xhci->op_regs->status); + + if (!hcd->msi_enabled) { + u32 irq_pending; + irq_pending = readl(&xhci->ir_set->irq_pending); + irq_pending |= IMAN_IP; + writel(irq_pending, &xhci->ir_set->irq_pending); + } + + if (xhci->xhc_state & XHCI_STATE_DYING || + xhci->xhc_state & XHCI_STATE_HALTED) { + xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " + "Shouldn't IRQs be disabled?\n"); + /* Clear the event handler busy flag (RW1C); + * the event ring should be empty. + */ + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + xhci_write_64(xhci, temp_64 | ERST_EHB, + &xhci->ir_set->erst_dequeue); + ret = IRQ_HANDLED; + goto out; + } + + event_ring_deq = xhci->event_ring->dequeue; + /* FIXME this should be a delayed service routine + * that clears the EHB. + */ + while (xhci_handle_event(xhci) > 0) { + if (event_loop++ < TRBS_PER_SEGMENT / 2) + continue; + xhci_update_erst_dequeue(xhci, event_ring_deq); + event_loop = 0; + } + + xhci_update_erst_dequeue(xhci, event_ring_deq); + ret = IRQ_HANDLED; + +out: + spin_unlock_irqrestore(&xhci->lock, flags); + + if (ret == IRQ_HANDLED || ncr_ret == IRQ_HANDLED) + return IRQ_HANDLED; + + return ret; +} + +irqreturn_t xhci_msi_irq(int irq, void *hcd) +{ + return xhci_irq(hcd); +} + +/**** Endpoint Ring Operations ****/ + +/* + * Generic function for queueing a TRB on a ring. + * The caller must have checked to make sure there's room on the ring. + * + * @more_trbs_coming: Will you enqueue more TRBs before calling + * prepare_transfer()? + */ +static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, + bool more_trbs_coming, + u32 field1, u32 field2, u32 field3, u32 field4) +{ + struct xhci_generic_trb *trb; + + trb = &ring->enqueue->generic; + trb->field[0] = cpu_to_le32(field1); + trb->field[1] = cpu_to_le32(field2); + trb->field[2] = cpu_to_le32(field3); + /* make sure TRB is fully written before giving it to the controller */ + wmb(); + trb->field[3] = cpu_to_le32(field4); + + trace_xhci_queue_trb(ring, trb); + + inc_enq(xhci, ring, more_trbs_coming); +} + +/* + * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. + * FIXME allocate segments if the ring is full. + */ +static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, + u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) +{ + unsigned int num_trbs_needed; + unsigned int link_trb_count = 0; + + /* Make sure the endpoint has been added to xHC schedule */ + switch (ep_state) { + case EP_STATE_DISABLED: + /* + * USB core changed config/interfaces without notifying us, + * or hardware is reporting the wrong state. + */ + xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); + return -ENOENT; + case EP_STATE_ERROR: + xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); + /* FIXME event handling code for error needs to clear it */ + /* XXX not sure if this should be -ENOENT or not */ + return -EINVAL; + case EP_STATE_HALTED: + xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); + case EP_STATE_STOPPED: + case EP_STATE_RUNNING: + break; + default: + xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); + /* + * FIXME issue Configure Endpoint command to try to get the HC + * back into a known state. + */ + return -EINVAL; + } + + while (1) { + if (room_on_ring(xhci, ep_ring, num_trbs)) + break; + + if (ep_ring == xhci->cmd_ring) { + xhci_err(xhci, "Do not support expand command ring\n"); + return -ENOMEM; + } + + xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, + "ERROR no room on ep ring, try ring expansion"); + num_trbs_needed = num_trbs - ep_ring->num_trbs_free; + if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, + mem_flags)) { + xhci_err(xhci, "Ring expansion failed\n"); + return -ENOMEM; + } + } + + while (trb_is_link(ep_ring->enqueue)) { + /* If we're not dealing with 0.95 hardware or isoc rings + * on AMD 0.96 host, clear the chain bit. + */ + if (!xhci_link_trb_quirk(xhci) && + !(ep_ring->type == TYPE_ISOC && + (xhci->quirks & XHCI_AMD_0x96_HOST))) + ep_ring->enqueue->link.control &= + cpu_to_le32(~TRB_CHAIN); + else + ep_ring->enqueue->link.control |= + cpu_to_le32(TRB_CHAIN); + + wmb(); + ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); + + /* Toggle the cycle bit after the last ring segment. */ + if (link_trb_toggles_cycle(ep_ring->enqueue)) + ep_ring->cycle_state ^= 1; + + ep_ring->enq_seg = ep_ring->enq_seg->next; + ep_ring->enqueue = ep_ring->enq_seg->trbs; + + /* prevent infinite loop if all first trbs are link trbs */ + if (link_trb_count++ > ep_ring->num_segs) { + xhci_warn(xhci, "Ring is an endless link TRB loop\n"); + return -EINVAL; + } + } + + if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) { + xhci_warn(xhci, "Missing link TRB at end of ring segment\n"); + return -EINVAL; + } + + return 0; +} + +static int prepare_transfer(struct xhci_hcd *xhci, + struct xhci_virt_device *xdev, + unsigned int ep_index, + unsigned int stream_id, + unsigned int num_trbs, + struct urb *urb, + unsigned int td_index, + gfp_t mem_flags) +{ + int ret; + struct urb_priv *urb_priv; + struct xhci_td *td; + struct xhci_ring *ep_ring; + struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + + ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index, + stream_id); + if (!ep_ring) { + xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", + stream_id); + return -EINVAL; + } + + ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), + num_trbs, mem_flags); + if (ret) + return ret; + + urb_priv = urb->hcpriv; + td = &urb_priv->td[td_index]; + + INIT_LIST_HEAD(&td->td_list); + INIT_LIST_HEAD(&td->cancelled_td_list); + + if (td_index == 0) { + ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); + if (unlikely(ret)) + return ret; + } + + td->urb = urb; + /* Add this TD to the tail of the endpoint ring's TD list */ + list_add_tail(&td->td_list, &ep_ring->td_list); + td->start_seg = ep_ring->enq_seg; + td->first_trb = ep_ring->enqueue; + + return 0; +} + +unsigned int count_trbs(u64 addr, u64 len) +{ + unsigned int num_trbs; + + num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), + TRB_MAX_BUFF_SIZE); + if (num_trbs == 0) + num_trbs++; + + return num_trbs; +} + +static inline unsigned int count_trbs_needed(struct urb *urb) +{ + return count_trbs(urb->transfer_dma, urb->transfer_buffer_length); +} + +static unsigned int count_sg_trbs_needed(struct urb *urb) +{ + struct scatterlist *sg; + unsigned int i, len, full_len, num_trbs = 0; + + full_len = urb->transfer_buffer_length; + + for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { + len = sg_dma_len(sg); + num_trbs += count_trbs(sg_dma_address(sg), len); + len = min_t(unsigned int, len, full_len); + full_len -= len; + if (full_len == 0) + break; + } + + return num_trbs; +} + +static unsigned int count_isoc_trbs_needed(struct urb *urb, int i) +{ + u64 addr, len; + + addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); + len = urb->iso_frame_desc[i].length; + + return count_trbs(addr, len); +} + +static void check_trb_math(struct urb *urb, int running_total) +{ + if (unlikely(running_total != urb->transfer_buffer_length)) + dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " + "queued %#x (%d), asked for %#x (%d)\n", + __func__, + urb->ep->desc.bEndpointAddress, + running_total, running_total, + urb->transfer_buffer_length, + urb->transfer_buffer_length); +} + +static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, + unsigned int ep_index, unsigned int stream_id, int start_cycle, + struct xhci_generic_trb *start_trb) +{ + /* + * Pass all the TRBs to the hardware at once and make sure this write + * isn't reordered. + */ + wmb(); + if (start_cycle) + start_trb->field[3] |= cpu_to_le32(start_cycle); + else + start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); + xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); +} + +static void check_interval(struct xhci_hcd *xhci, struct urb *urb, + struct xhci_ep_ctx *ep_ctx) +{ + int xhci_interval; + int ep_interval; + + xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); + ep_interval = urb->interval; + + /* Convert to microframes */ + if (urb->dev->speed == USB_SPEED_LOW || + urb->dev->speed == USB_SPEED_FULL) + ep_interval *= 8; + + /* FIXME change this to a warning and a suggestion to use the new API + * to set the polling interval (once the API is added). + */ + if (xhci_interval != ep_interval) { + dev_dbg_ratelimited(&urb->dev->dev, + "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", + ep_interval, ep_interval == 1 ? "" : "s", + xhci_interval, xhci_interval == 1 ? "" : "s"); + urb->interval = xhci_interval; + /* Convert back to frames for LS/FS devices */ + if (urb->dev->speed == USB_SPEED_LOW || + urb->dev->speed == USB_SPEED_FULL) + urb->interval /= 8; + } +} + +/* + * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt + * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD + * (comprised of sg list entries) can take several service intervals to + * transmit. + */ +int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + struct urb *urb, int slot_id, unsigned int ep_index) +{ + struct xhci_ep_ctx *ep_ctx; + + ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index); + check_interval(xhci, urb, ep_ctx); + + return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); +} + +/* + * For xHCI 1.0 host controllers, TD size is the number of max packet sized + * packets remaining in the TD (*not* including this TRB). + * + * Total TD packet count = total_packet_count = + * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) + * + * Packets transferred up to and including this TRB = packets_transferred = + * rounddown(total bytes transferred including this TRB / wMaxPacketSize) + * + * TD size = total_packet_count - packets_transferred + * + * For xHCI 0.96 and older, TD size field should be the remaining bytes + * including this TRB, right shifted by 10 + * + * For all hosts it must fit in bits 21:17, so it can't be bigger than 31. + * This is taken care of in the TRB_TD_SIZE() macro + * + * The last TRB in a TD must have the TD size set to zero. + */ +static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, + int trb_buff_len, unsigned int td_total_len, + struct urb *urb, bool more_trbs_coming) +{ + u32 maxp, total_packet_count; + + /* MTK xHCI 0.96 contains some features from 1.0 */ + if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) + return ((td_total_len - transferred) >> 10); + + /* One TRB with a zero-length data packet. */ + if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) || + trb_buff_len == td_total_len) + return 0; + + /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ + if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) + trb_buff_len = 0; + + maxp = usb_endpoint_maxp(&urb->ep->desc); + total_packet_count = DIV_ROUND_UP(td_total_len, maxp); + + /* Queueing functions don't count the current TRB into transferred */ + return (total_packet_count - ((transferred + trb_buff_len) / maxp)); +} + + +static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, + u32 *trb_buff_len, struct xhci_segment *seg) +{ + struct device *dev = xhci_to_hcd(xhci)->self.controller; + unsigned int unalign; + unsigned int max_pkt; + u32 new_buff_len; + size_t len; + + max_pkt = usb_endpoint_maxp(&urb->ep->desc); + unalign = (enqd_len + *trb_buff_len) % max_pkt; + + /* we got lucky, last normal TRB data on segment is packet aligned */ + if (unalign == 0) + return 0; + + xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n", + unalign, *trb_buff_len); + + /* is the last nornal TRB alignable by splitting it */ + if (*trb_buff_len > unalign) { + *trb_buff_len -= unalign; + xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len); + return 0; + } + + /* + * We want enqd_len + trb_buff_len to sum up to a number aligned to + * number which is divisible by the endpoint's wMaxPacketSize. IOW: + * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0. + */ + new_buff_len = max_pkt - (enqd_len % max_pkt); + + if (new_buff_len > (urb->transfer_buffer_length - enqd_len)) + new_buff_len = (urb->transfer_buffer_length - enqd_len); + + /* create a max max_pkt sized bounce buffer pointed to by last trb */ + if (usb_urb_dir_out(urb)) { + if (urb->num_sgs) { + len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, + seg->bounce_buf, new_buff_len, enqd_len); + if (len != new_buff_len) + xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", + len, new_buff_len); + } else { + memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); + } + + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_TO_DEVICE); + } else { + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_FROM_DEVICE); + } + + if (dma_mapping_error(dev, seg->bounce_dma)) { + /* try without aligning. Some host controllers survive */ + xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n"); + return 0; + } + *trb_buff_len = new_buff_len; + seg->bounce_len = new_buff_len; + seg->bounce_offs = enqd_len; + + xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len); + + return 1; +} + +/* This is very similar to what ehci-q.c qtd_fill() does */ +int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + struct urb *urb, int slot_id, unsigned int ep_index) +{ + struct xhci_ring *ring; + struct urb_priv *urb_priv; + struct xhci_td *td; + struct xhci_generic_trb *start_trb; + struct scatterlist *sg = NULL; + bool more_trbs_coming = true; + bool need_zero_pkt = false; + bool first_trb = true; + unsigned int num_trbs; + unsigned int start_cycle, num_sgs = 0; + unsigned int enqd_len, block_len, trb_buff_len, full_len; + int sent_len, ret; + u32 field, length_field, remainder; + u64 addr, send_addr; + + ring = xhci_urb_to_transfer_ring(xhci, urb); + if (!ring) + return -EINVAL; + + full_len = urb->transfer_buffer_length; + /* If we have scatter/gather list, we use it. */ + if (urb->num_sgs) { + num_sgs = urb->num_mapped_sgs; + sg = urb->sg; + addr = (u64) sg_dma_address(sg); + block_len = sg_dma_len(sg); + num_trbs = count_sg_trbs_needed(urb); + } else { + num_trbs = count_trbs_needed(urb); + addr = (u64) urb->transfer_dma; + block_len = full_len; + } + ret = prepare_transfer(xhci, xhci->devs[slot_id], + ep_index, urb->stream_id, + num_trbs, urb, 0, mem_flags); + if (unlikely(ret < 0)) + return ret; + + urb_priv = urb->hcpriv; + + /* Deal with URB_ZERO_PACKET - need one more td/trb */ + if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1) + need_zero_pkt = true; + + td = &urb_priv->td[0]; + + /* + * Don't give the first TRB to the hardware (by toggling the cycle bit) + * until we've finished creating all the other TRBs. The ring's cycle + * state may change as we enqueue the other TRBs, so save it too. + */ + start_trb = &ring->enqueue->generic; + start_cycle = ring->cycle_state; + send_addr = addr; + + /* Queue the TRBs, even if they are zero-length */ + for (enqd_len = 0; first_trb || enqd_len < full_len; + enqd_len += trb_buff_len) { + field = TRB_TYPE(TRB_NORMAL); + + /* TRB buffer should not cross 64KB boundaries */ + trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); + trb_buff_len = min_t(unsigned int, trb_buff_len, block_len); + + if (enqd_len + trb_buff_len > full_len) + trb_buff_len = full_len - enqd_len; + + /* Don't change the cycle bit of the first TRB until later */ + if (first_trb) { + first_trb = false; + if (start_cycle == 0) + field |= TRB_CYCLE; + } else + field |= ring->cycle_state; + + /* Chain all the TRBs together; clear the chain bit in the last + * TRB to indicate it's the last TRB in the chain. + */ + if (enqd_len + trb_buff_len < full_len) { + field |= TRB_CHAIN; + if (trb_is_link(ring->enqueue + 1)) { + if (xhci_align_td(xhci, urb, enqd_len, + &trb_buff_len, + ring->enq_seg)) { + send_addr = ring->enq_seg->bounce_dma; + /* assuming TD won't span 2 segs */ + td->bounce_seg = ring->enq_seg; + } + } + } + if (enqd_len + trb_buff_len >= full_len) { + field &= ~TRB_CHAIN; + field |= TRB_IOC; + more_trbs_coming = false; + td->last_trb = ring->enqueue; + td->last_trb_seg = ring->enq_seg; + if (xhci_urb_suitable_for_idt(urb)) { + memcpy(&send_addr, urb->transfer_buffer, + trb_buff_len); + le64_to_cpus(&send_addr); + field |= TRB_IDT; + } + } + + /* Only set interrupt on short packet for IN endpoints */ + if (usb_urb_dir_in(urb)) + field |= TRB_ISP; + + /* Set the TRB length, TD size, and interrupter fields. */ + remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len, + full_len, urb, more_trbs_coming); + + length_field = TRB_LEN(trb_buff_len) | + TRB_TD_SIZE(remainder) | + TRB_INTR_TARGET(0); + + queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt, + lower_32_bits(send_addr), + upper_32_bits(send_addr), + length_field, + field); + td->num_trbs++; + addr += trb_buff_len; + sent_len = trb_buff_len; + + while (sg && sent_len >= block_len) { + /* New sg entry */ + --num_sgs; + sent_len -= block_len; + sg = sg_next(sg); + if (num_sgs != 0 && sg) { + block_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + addr += sent_len; + } + } + block_len -= sent_len; + send_addr = addr; + } + + if (need_zero_pkt) { + ret = prepare_transfer(xhci, xhci->devs[slot_id], + ep_index, urb->stream_id, + 1, urb, 1, mem_flags); + urb_priv->td[1].last_trb = ring->enqueue; + urb_priv->td[1].last_trb_seg = ring->enq_seg; + field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; + queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); + urb_priv->td[1].num_trbs++; + } + + check_trb_math(urb, enqd_len); + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, + start_cycle, start_trb); + return 0; +} + +/* Caller must have locked xhci->lock */ +int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + struct urb *urb, int slot_id, unsigned int ep_index) +{ + struct xhci_ring *ep_ring; + int num_trbs; + int ret; + struct usb_ctrlrequest *setup; + struct xhci_generic_trb *start_trb; + int start_cycle; + u32 field; + struct urb_priv *urb_priv; + struct xhci_td *td; + + ep_ring = xhci_urb_to_transfer_ring(xhci, urb); + if (!ep_ring) + return -EINVAL; + + /* + * Need to copy setup packet into setup TRB, so we can't use the setup + * DMA address. + */ + if (!urb->setup_packet) + return -EINVAL; + + /* 1 TRB for setup, 1 for status */ + num_trbs = 2; + /* + * Don't need to check if we need additional event data and normal TRBs, + * since data in control transfers will never get bigger than 16MB + * XXX: can we get a buffer that crosses 64KB boundaries? + */ + if (urb->transfer_buffer_length > 0) + num_trbs++; + ret = prepare_transfer(xhci, xhci->devs[slot_id], + ep_index, urb->stream_id, + num_trbs, urb, 0, mem_flags); + if (ret < 0) + return ret; + + urb_priv = urb->hcpriv; + td = &urb_priv->td[0]; + td->num_trbs = num_trbs; + + /* + * Don't give the first TRB to the hardware (by toggling the cycle bit) + * until we've finished creating all the other TRBs. The ring's cycle + * state may change as we enqueue the other TRBs, so save it too. + */ + start_trb = &ep_ring->enqueue->generic; + start_cycle = ep_ring->cycle_state; + + /* Queue setup TRB - see section 6.4.1.2.1 */ + /* FIXME better way to translate setup_packet into two u32 fields? */ + setup = (struct usb_ctrlrequest *) urb->setup_packet; + field = 0; + field |= TRB_IDT | TRB_TYPE(TRB_SETUP); + if (start_cycle == 0) + field |= 0x1; + + /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ + if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) { + if (urb->transfer_buffer_length > 0) { + if (setup->bRequestType & USB_DIR_IN) + field |= TRB_TX_TYPE(TRB_DATA_IN); + else + field |= TRB_TX_TYPE(TRB_DATA_OUT); + } + } + + queue_trb(xhci, ep_ring, true, + setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, + le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, + TRB_LEN(8) | TRB_INTR_TARGET(0), + /* Immediate data in pointer */ + field); + + /* If there's data, queue data TRBs */ + /* Only set interrupt on short packet for IN endpoints */ + if (usb_urb_dir_in(urb)) + field = TRB_ISP | TRB_TYPE(TRB_DATA); + else + field = TRB_TYPE(TRB_DATA); + + if (urb->transfer_buffer_length > 0) { + u32 length_field, remainder; + u64 addr; + + if (xhci_urb_suitable_for_idt(urb)) { + memcpy(&addr, urb->transfer_buffer, + urb->transfer_buffer_length); + le64_to_cpus(&addr); + field |= TRB_IDT; + } else { + addr = (u64) urb->transfer_dma; + } + + remainder = xhci_td_remainder(xhci, 0, + urb->transfer_buffer_length, + urb->transfer_buffer_length, + urb, 1); + length_field = TRB_LEN(urb->transfer_buffer_length) | + TRB_TD_SIZE(remainder) | + TRB_INTR_TARGET(0); + if (setup->bRequestType & USB_DIR_IN) + field |= TRB_DIR_IN; + queue_trb(xhci, ep_ring, true, + lower_32_bits(addr), + upper_32_bits(addr), + length_field, + field | ep_ring->cycle_state); + } + + /* Save the DMA address of the last TRB in the TD */ + td->last_trb = ep_ring->enqueue; + td->last_trb_seg = ep_ring->enq_seg; + + /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ + /* If the device sent data, the status stage is an OUT transfer */ + if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) + field = 0; + else + field = TRB_DIR_IN; + queue_trb(xhci, ep_ring, false, + 0, + 0, + TRB_INTR_TARGET(0), + /* Event on completion */ + field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); + + giveback_first_trb(xhci, slot_id, ep_index, 0, + start_cycle, start_trb); + return 0; +} + +/* + * The transfer burst count field of the isochronous TRB defines the number of + * bursts that are required to move all packets in this TD. Only SuperSpeed + * devices can burst up to bMaxBurst number of packets per service interval. + * This field is zero based, meaning a value of zero in the field means one + * burst. Basically, for everything but SuperSpeed devices, this field will be + * zero. Only xHCI 1.0 host controllers support this field. + */ +static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, + struct urb *urb, unsigned int total_packet_count) +{ + unsigned int max_burst; + + if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER) + return 0; + + max_burst = urb->ep->ss_ep_comp.bMaxBurst; + return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; +} + +/* + * Returns the number of packets in the last "burst" of packets. This field is + * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so + * the last burst packet count is equal to the total number of packets in the + * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst + * must contain (bMaxBurst + 1) number of packets, but the last burst can + * contain 1 to (bMaxBurst + 1) packets. + */ +static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, + struct urb *urb, unsigned int total_packet_count) +{ + unsigned int max_burst; + unsigned int residue; + + if (xhci->hci_version < 0x100) + return 0; + + if (urb->dev->speed >= USB_SPEED_SUPER) { + /* bMaxBurst is zero based: 0 means 1 packet per burst */ + max_burst = urb->ep->ss_ep_comp.bMaxBurst; + residue = total_packet_count % (max_burst + 1); + /* If residue is zero, the last burst contains (max_burst + 1) + * number of packets, but the TLBPC field is zero-based. + */ + if (residue == 0) + return max_burst; + return residue - 1; + } + if (total_packet_count == 0) + return 0; + return total_packet_count - 1; +} + +/* + * Calculates Frame ID field of the isochronous TRB identifies the + * target frame that the Interval associated with this Isochronous + * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec. + * + * Returns actual frame id on success, negative value on error. + */ +static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, + struct urb *urb, int index) +{ + int start_frame, ist, ret = 0; + int start_frame_id, end_frame_id, current_frame_id; + + if (urb->dev->speed == USB_SPEED_LOW || + urb->dev->speed == USB_SPEED_FULL) + start_frame = urb->start_frame + index * urb->interval; + else + start_frame = (urb->start_frame + index * urb->interval) >> 3; + + /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2): + * + * If bit [3] of IST is cleared to '0', software can add a TRB no + * later than IST[2:0] Microframes before that TRB is scheduled to + * be executed. + * If bit [3] of IST is set to '1', software can add a TRB no later + * than IST[2:0] Frames before that TRB is scheduled to be executed. + */ + ist = HCS_IST(xhci->hcs_params2) & 0x7; + if (HCS_IST(xhci->hcs_params2) & (1 << 3)) + ist <<= 3; + + /* Software shall not schedule an Isoch TD with a Frame ID value that + * is less than the Start Frame ID or greater than the End Frame ID, + * where: + * + * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048 + * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048 + * + * Both the End Frame ID and Start Frame ID values are calculated + * in microframes. When software determines the valid Frame ID value; + * The End Frame ID value should be rounded down to the nearest Frame + * boundary, and the Start Frame ID value should be rounded up to the + * nearest Frame boundary. + */ + current_frame_id = readl(&xhci->run_regs->microframe_index); + start_frame_id = roundup(current_frame_id + ist + 1, 8); + end_frame_id = rounddown(current_frame_id + 895 * 8, 8); + + start_frame &= 0x7ff; + start_frame_id = (start_frame_id >> 3) & 0x7ff; + end_frame_id = (end_frame_id >> 3) & 0x7ff; + + xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", + __func__, index, readl(&xhci->run_regs->microframe_index), + start_frame_id, end_frame_id, start_frame); + + if (start_frame_id < end_frame_id) { + if (start_frame > end_frame_id || + start_frame < start_frame_id) + ret = -EINVAL; + } else if (start_frame_id > end_frame_id) { + if ((start_frame > end_frame_id && + start_frame < start_frame_id)) + ret = -EINVAL; + } else { + ret = -EINVAL; + } + + if (index == 0) { + if (ret == -EINVAL || start_frame == start_frame_id) { + start_frame = start_frame_id + 1; + if (urb->dev->speed == USB_SPEED_LOW || + urb->dev->speed == USB_SPEED_FULL) + urb->start_frame = start_frame; + else + urb->start_frame = start_frame << 3; + ret = 0; + } + } + + if (ret) { + xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n", + start_frame, current_frame_id, index, + start_frame_id, end_frame_id); + xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n"); + return ret; + } + + return start_frame; +} + +/* Check if we should generate event interrupt for a TD in an isoc URB */ +static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i) +{ + if (xhci->hci_version < 0x100) + return false; + /* always generate an event interrupt for the last TD */ + if (i == num_tds - 1) + return false; + /* + * If AVOID_BEI is set the host handles full event rings poorly, + * generate an event at least every 8th TD to clear the event ring + */ + if (i && xhci->quirks & XHCI_AVOID_BEI) + return !!(i % 8); + + return true; +} + +/* This is for isoc transfer */ +static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + struct urb *urb, int slot_id, unsigned int ep_index) +{ + struct xhci_ring *ep_ring; + struct urb_priv *urb_priv; + struct xhci_td *td; + int num_tds, trbs_per_td; + struct xhci_generic_trb *start_trb; + bool first_trb; + int start_cycle; + u32 field, length_field; + int running_total, trb_buff_len, td_len, td_remain_len, ret; + u64 start_addr, addr; + int i, j; + bool more_trbs_coming; + struct xhci_virt_ep *xep; + int frame_id; + + xep = &xhci->devs[slot_id]->eps[ep_index]; + ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; + + num_tds = urb->number_of_packets; + if (num_tds < 1) { + xhci_dbg(xhci, "Isoc URB with zero packets?\n"); + return -EINVAL; + } + start_addr = (u64) urb->transfer_dma; + start_trb = &ep_ring->enqueue->generic; + start_cycle = ep_ring->cycle_state; + + urb_priv = urb->hcpriv; + /* Queue the TRBs for each TD, even if they are zero-length */ + for (i = 0; i < num_tds; i++) { + unsigned int total_pkt_count, max_pkt; + unsigned int burst_count, last_burst_pkt_count; + u32 sia_frame_id; + + first_trb = true; + running_total = 0; + addr = start_addr + urb->iso_frame_desc[i].offset; + td_len = urb->iso_frame_desc[i].length; + td_remain_len = td_len; + max_pkt = usb_endpoint_maxp(&urb->ep->desc); + total_pkt_count = DIV_ROUND_UP(td_len, max_pkt); + + /* A zero-length transfer still involves at least one packet. */ + if (total_pkt_count == 0) + total_pkt_count++; + burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count); + last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci, + urb, total_pkt_count); + + trbs_per_td = count_isoc_trbs_needed(urb, i); + + ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, + urb->stream_id, trbs_per_td, urb, i, mem_flags); + if (ret < 0) { + if (i == 0) + return ret; + goto cleanup; + } + td = &urb_priv->td[i]; + td->num_trbs = trbs_per_td; + /* use SIA as default, if frame id is used overwrite it */ + sia_frame_id = TRB_SIA; + if (!(urb->transfer_flags & URB_ISO_ASAP) && + HCC_CFC(xhci->hcc_params)) { + frame_id = xhci_get_isoc_frame_id(xhci, urb, i); + if (frame_id >= 0) + sia_frame_id = TRB_FRAME_ID(frame_id); + } + /* + * Set isoc specific data for the first TRB in a TD. + * Prevent HW from getting the TRBs by keeping the cycle state + * inverted in the first TDs isoc TRB. + */ + field = TRB_TYPE(TRB_ISOC) | + TRB_TLBPC(last_burst_pkt_count) | + sia_frame_id | + (i ? ep_ring->cycle_state : !start_cycle); + + /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */ + if (!xep->use_extended_tbc) + field |= TRB_TBC(burst_count); + + /* fill the rest of the TRB fields, and remaining normal TRBs */ + for (j = 0; j < trbs_per_td; j++) { + u32 remainder = 0; + + /* only first TRB is isoc, overwrite otherwise */ + if (!first_trb) + field = TRB_TYPE(TRB_NORMAL) | + ep_ring->cycle_state; + + /* Only set interrupt on short packet for IN EPs */ + if (usb_urb_dir_in(urb)) + field |= TRB_ISP; + + /* Set the chain bit for all except the last TRB */ + if (j < trbs_per_td - 1) { + more_trbs_coming = true; + field |= TRB_CHAIN; + } else { + more_trbs_coming = false; + td->last_trb = ep_ring->enqueue; + td->last_trb_seg = ep_ring->enq_seg; + field |= TRB_IOC; + if (trb_block_event_intr(xhci, num_tds, i)) + field |= TRB_BEI; + } + /* Calculate TRB length */ + trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); + if (trb_buff_len > td_remain_len) + trb_buff_len = td_remain_len; + + /* Set the TRB length, TD size, & interrupter fields. */ + remainder = xhci_td_remainder(xhci, running_total, + trb_buff_len, td_len, + urb, more_trbs_coming); + + length_field = TRB_LEN(trb_buff_len) | + TRB_INTR_TARGET(0); + + /* xhci 1.1 with ETE uses TD Size field for TBC */ + if (first_trb && xep->use_extended_tbc) + length_field |= TRB_TD_SIZE_TBC(burst_count); + else + length_field |= TRB_TD_SIZE(remainder); + first_trb = false; + + queue_trb(xhci, ep_ring, more_trbs_coming, + lower_32_bits(addr), + upper_32_bits(addr), + length_field, + field); + running_total += trb_buff_len; + + addr += trb_buff_len; + td_remain_len -= trb_buff_len; + } + + /* Check TD length */ + if (running_total != td_len) { + xhci_err(xhci, "ISOC TD length unmatch\n"); + ret = -EINVAL; + goto cleanup; + } + } + + /* store the next frame id */ + if (HCC_CFC(xhci->hcc_params)) + xep->next_frame_id = urb->start_frame + num_tds * urb->interval; + + if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_quirk_pll_disable(); + } + xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; + + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, + start_cycle, start_trb); + return 0; +cleanup: + /* Clean up a partially enqueued isoc transfer. */ + + for (i--; i >= 0; i--) + list_del_init(&urb_priv->td[i].td_list); + + /* Use the first TD as a temporary variable to turn the TDs we've queued + * into No-ops with a software-owned cycle bit. That way the hardware + * won't accidentally start executing bogus TDs when we partially + * overwrite them. td->first_trb and td->start_seg are already set. + */ + urb_priv->td[0].last_trb = ep_ring->enqueue; + /* Every TRB except the first & last will have its cycle bit flipped. */ + td_to_noop(xhci, ep_ring, &urb_priv->td[0], true); + + /* Reset the ring enqueue back to the first TRB and its cycle bit. */ + ep_ring->enqueue = urb_priv->td[0].first_trb; + ep_ring->enq_seg = urb_priv->td[0].start_seg; + ep_ring->cycle_state = start_cycle; + ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp; + usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); + return ret; +} + +/* + * Check transfer ring to guarantee there is enough room for the urb. + * Update ISO URB start_frame and interval. + * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to + * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or + * Contiguous Frame ID is not supported by HC. + */ +int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, + struct urb *urb, int slot_id, unsigned int ep_index) +{ + struct xhci_virt_device *xdev; + struct xhci_ring *ep_ring; + struct xhci_ep_ctx *ep_ctx; + int start_frame; + int num_tds, num_trbs, i; + int ret; + struct xhci_virt_ep *xep; + int ist; + + xdev = xhci->devs[slot_id]; + xep = &xhci->devs[slot_id]->eps[ep_index]; + ep_ring = xdev->eps[ep_index].ring; + ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + + num_trbs = 0; + num_tds = urb->number_of_packets; + for (i = 0; i < num_tds; i++) + num_trbs += count_isoc_trbs_needed(urb, i); + + /* Check the ring to guarantee there is enough room for the whole urb. + * Do not insert any td of the urb to the ring if the check failed. + */ + ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), + num_trbs, mem_flags); + if (ret) + return ret; + + /* + * Check interval value. This should be done before we start to + * calculate the start frame value. + */ + check_interval(xhci, urb, ep_ctx); + + /* Calculate the start frame and put it in urb->start_frame. */ + if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { + if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) { + urb->start_frame = xep->next_frame_id; + goto skip_start_over; + } + } + + start_frame = readl(&xhci->run_regs->microframe_index); + start_frame &= 0x3fff; + /* + * Round up to the next frame and consider the time before trb really + * gets scheduled by hardare. + */ + ist = HCS_IST(xhci->hcs_params2) & 0x7; + if (HCS_IST(xhci->hcs_params2) & (1 << 3)) + ist <<= 3; + start_frame += ist + XHCI_CFC_DELAY; + start_frame = roundup(start_frame, 8); + + /* + * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT + * is greate than 8 microframes. + */ + if (urb->dev->speed == USB_SPEED_LOW || + urb->dev->speed == USB_SPEED_FULL) { + start_frame = roundup(start_frame, urb->interval << 3); + urb->start_frame = start_frame >> 3; + } else { + start_frame = roundup(start_frame, urb->interval); + urb->start_frame = start_frame; + } + +skip_start_over: + ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free; + + return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); +} + +/**** Command Ring Operations ****/ + +/* Generic function for queueing a command TRB on the command ring. + * Check to make sure there's room on the command ring for one command TRB. + * Also check that there's room reserved for commands that must not fail. + * If this is a command that must not fail, meaning command_must_succeed = TRUE, + * then only check for the number of reserved spots. + * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB + * because the command event handler may want to resubmit a failed command. + */ +static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 field1, u32 field2, + u32 field3, u32 field4, bool command_must_succeed) +{ + int reserved_trbs = xhci->cmd_ring_reserved_trbs; + int ret; + + if ((xhci->xhc_state & XHCI_STATE_DYING) || + (xhci->xhc_state & XHCI_STATE_HALTED)) { + xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); + return -ESHUTDOWN; + } + + if (!command_must_succeed) + reserved_trbs++; + + ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, + reserved_trbs, GFP_ATOMIC); + if (ret < 0) { + xhci_err(xhci, "ERR: No room for command on command ring\n"); + if (command_must_succeed) + xhci_err(xhci, "ERR: Reserved TRB counting for " + "unfailable commands failed.\n"); + return ret; + } + + cmd->command_trb = xhci->cmd_ring->enqueue; + + /* if there are no other commands queued we start the timeout timer */ + if (list_empty(&xhci->cmd_list)) { + xhci->current_cmd = cmd; + xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); + } + + list_add_tail(&cmd->cmd_list, &xhci->cmd_list); + + queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, + field4 | xhci->cmd_ring->cycle_state); + return 0; +} + +/* Queue a slot enable or disable request on the command ring */ +int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 trb_type, u32 slot_id) +{ + return queue_command(xhci, cmd, 0, 0, 0, + TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); +} + +/* Queue an address device command TRB */ +int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, + dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup) +{ + return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, + TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) + | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false); +} + +int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 field1, u32 field2, u32 field3, u32 field4) +{ + return queue_command(xhci, cmd, field1, field2, field3, field4, false); +} + +/* Queue a reset device command TRB */ +int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 slot_id) +{ + return queue_command(xhci, cmd, 0, 0, 0, + TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), + false); +} + +/* Queue a configure endpoint command TRB */ +int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, + struct xhci_command *cmd, dma_addr_t in_ctx_ptr, + u32 slot_id, bool command_must_succeed) +{ + return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, + TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), + command_must_succeed); +} + +/* Queue an evaluate context command TRB */ +int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, + dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) +{ + return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, + TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), + command_must_succeed); +} + +/* + * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop + * activity on an endpoint that is about to be suspended. + */ +int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, + int slot_id, unsigned int ep_index, int suspend) +{ + u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); + u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); + u32 type = TRB_TYPE(TRB_STOP_RING); + u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); + + return queue_command(xhci, cmd, 0, 0, 0, + trb_slot_id | trb_ep_index | type | trb_suspend, false); +} +EXPORT_SYMBOL_GPL(xhci_queue_stop_endpoint); + +int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, + int slot_id, unsigned int ep_index, + enum xhci_ep_reset_type reset_type) +{ + u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); + u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); + u32 type = TRB_TYPE(TRB_RESET_EP); + + if (reset_type == EP_SOFT_RESET) + type |= TRB_TSP; + + return queue_command(xhci, cmd, 0, 0, 0, + trb_slot_id | trb_ep_index | type, false); +} diff --git a/usb/host/xhci.c b/usb/host/xhci.c new file mode 100644 index 0000000..cd2d7ca --- /dev/null +++ b/usb/host/xhci.c @@ -0,0 +1,5472 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * xHCI host controller driver + * + * Copyright (C) 2008 Intel Corp. + * + * Author: Sarah Sharp + * Some code borrowed from the Linux EHCI driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "xhci.h" +#include "xhci-trace.h" +#include "xhci-debugfs.h" +#include "xhci-dbgcap.h" + +#define DRIVER_AUTHOR "Sarah Sharp" +#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" + +#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) + +/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ +static int link_quirk; +module_param(link_quirk, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); + +static unsigned long long quirks; +module_param(quirks, ullong, S_IRUGO); +MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); + +static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) +{ + struct xhci_segment *seg = ring->first_seg; + + if (!td || !td->start_seg) + return false; + do { + if (seg == td->start_seg) + return true; + seg = seg->next; + } while (seg && seg != ring->first_seg); + + return false; +} + +/* + * xhci_handshake - spin reading hc until handshake completes or fails + * @ptr: address of hc register to be read + * @mask: bits to look at in result of read + * @done: value of those bits when handshake succeeds + * @usec: timeout in microseconds + * + * Returns negative errno, or zero on success + * + * Success happens when the "mask" bits have the specified value (hardware + * handshake done). There are two failure modes: "usec" have passed (major + * hardware flakeout), or the register reads as all-ones (hardware removed). + */ +int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) +{ + u32 result; + int ret; + + ret = readl_poll_timeout_atomic(ptr, result, + (result & mask) == done || + result == U32_MAX, + 1, usec); + if (result == U32_MAX) /* card removed */ + return -ENODEV; + + return ret; +} + +/* + * Disable interrupts and begin the xHCI halting process. + */ +void xhci_quiesce(struct xhci_hcd *xhci) +{ + u32 halted; + u32 cmd; + u32 mask; + + mask = ~(XHCI_IRQS); + halted = readl(&xhci->op_regs->status) & STS_HALT; + if (!halted) + mask &= ~CMD_RUN; + + cmd = readl(&xhci->op_regs->command); + cmd &= mask; + writel(cmd, &xhci->op_regs->command); +} + +/* + * Force HC into halt state. + * + * Disable any IRQs and clear the run/stop bit. + * HC will complete any current and actively pipelined transactions, and + * should halt within 16 ms of the run/stop bit being cleared. + * Read HC Halted bit in the status register to see when the HC is finished. + */ +int xhci_halt(struct xhci_hcd *xhci) +{ + int ret; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); + xhci_quiesce(xhci); + + ret = xhci_handshake(&xhci->op_regs->status, + STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); + if (ret) { + xhci_warn(xhci, "Host halt failed, %d\n", ret); + return ret; + } + xhci->xhc_state |= XHCI_STATE_HALTED; + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + return ret; +} + +/* + * Set the run bit and wait for the host to be running. + */ +int xhci_start(struct xhci_hcd *xhci) +{ + u32 temp; + int ret; + + temp = readl(&xhci->op_regs->command); + temp |= (CMD_RUN); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", + temp); + writel(temp, &xhci->op_regs->command); + + /* + * Wait for the HCHalted Status bit to be 0 to indicate the host is + * running. + */ + ret = xhci_handshake(&xhci->op_regs->status, + STS_HALT, 0, XHCI_MAX_HALT_USEC); + if (ret == -ETIMEDOUT) + xhci_err(xhci, "Host took too long to start, " + "waited %u microseconds.\n", + XHCI_MAX_HALT_USEC); + if (!ret) + /* clear state flags. Including dying, halted or removing */ + xhci->xhc_state = 0; + + return ret; +} + +/* + * Reset a halted HC. + * + * This resets pipelines, timers, counters, state machines, etc. + * Transactions will be terminated immediately, and operational registers + * will be set to their defaults. + */ +int xhci_reset(struct xhci_hcd *xhci) +{ + u32 command; + u32 state; + int ret; + + state = readl(&xhci->op_regs->status); + + if (state == ~(u32)0) { + xhci_warn(xhci, "Host not accessible, reset failed.\n"); + return -ENODEV; + } + + if ((state & STS_HALT) == 0) { + xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); + return 0; + } + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); + command = readl(&xhci->op_regs->command); + command |= CMD_RESET; + writel(command, &xhci->op_regs->command); + + /* Existing Intel xHCI controllers require a delay of 1 mS, + * after setting the CMD_RESET bit, and before accessing any + * HC registers. This allows the HC to complete the + * reset operation and be ready for HC register access. + * Without this delay, the subsequent HC register access, + * may result in a system hang very rarely. + */ + if (xhci->quirks & XHCI_INTEL_HOST) + udelay(1000); + + ret = xhci_handshake(&xhci->op_regs->command, + CMD_RESET, 0, 10 * 1000 * 1000); + if (ret) + return ret; + + if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) + usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Wait for controller to be ready for doorbell rings"); + /* + * xHCI cannot write to any doorbells or operational registers other + * than status until the "Controller Not Ready" flag is cleared. + */ + ret = xhci_handshake(&xhci->op_regs->status, + STS_CNR, 0, 10 * 1000 * 1000); + + xhci->usb2_rhub.bus_state.port_c_suspend = 0; + xhci->usb2_rhub.bus_state.suspended_ports = 0; + xhci->usb2_rhub.bus_state.resuming_ports = 0; + xhci->usb3_rhub.bus_state.port_c_suspend = 0; + xhci->usb3_rhub.bus_state.suspended_ports = 0; + xhci->usb3_rhub.bus_state.resuming_ports = 0; + + return ret; +} + +static void xhci_zero_64b_regs(struct xhci_hcd *xhci) +{ + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + int err, i; + u64 val; + u32 intrs; + + /* + * Some Renesas controllers get into a weird state if they are + * reset while programmed with 64bit addresses (they will preserve + * the top half of the address in internal, non visible + * registers). You end up with half the address coming from the + * kernel, and the other half coming from the firmware. Also, + * changing the programming leads to extra accesses even if the + * controller is supposed to be halted. The controller ends up with + * a fatal fault, and is then ripe for being properly reset. + * + * Special care is taken to only apply this if the device is behind + * an iommu. Doing anything when there is no iommu is definitely + * unsafe... + */ + if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev)) + return; + + xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); + + /* Clear HSEIE so that faults do not get signaled */ + val = readl(&xhci->op_regs->command); + val &= ~CMD_HSEIE; + writel(val, &xhci->op_regs->command); + + /* Clear HSE (aka FATAL) */ + val = readl(&xhci->op_regs->status); + val |= STS_FATAL; + writel(val, &xhci->op_regs->status); + + /* Now zero the registers, and brace for impact */ + val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); + if (upper_32_bits(val)) + xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); + val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + if (upper_32_bits(val)) + xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); + + intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1), + ARRAY_SIZE(xhci->run_regs->ir_set)); + + for (i = 0; i < intrs; i++) { + struct xhci_intr_reg __iomem *ir; + + ir = &xhci->run_regs->ir_set[i]; + val = xhci_read_64(xhci, &ir->erst_base); + if (upper_32_bits(val)) + xhci_write_64(xhci, 0, &ir->erst_base); + val= xhci_read_64(xhci, &ir->erst_dequeue); + if (upper_32_bits(val)) + xhci_write_64(xhci, 0, &ir->erst_dequeue); + } + + /* Wait for the fault to appear. It will be cleared on reset */ + err = xhci_handshake(&xhci->op_regs->status, + STS_FATAL, STS_FATAL, + XHCI_MAX_HALT_USEC); + if (!err) + xhci_info(xhci, "Fault detected\n"); +} + +#ifdef CONFIG_USB_PCI +/* + * Set up MSI + */ +static int xhci_setup_msi(struct xhci_hcd *xhci) +{ + int ret; + /* + * TODO:Check with MSI Soc for sysdev + */ + struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) { + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "failed to allocate MSI entry"); + return ret; + } + + ret = request_irq(pdev->irq, xhci_msi_irq, + 0, "xhci_hcd", xhci_to_hcd(xhci)); + if (ret) { + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "disable MSI interrupt"); + pci_free_irq_vectors(pdev); + } + + return ret; +} + +/* + * Set up MSI-X + */ +static int xhci_setup_msix(struct xhci_hcd *xhci) +{ + int i, ret = 0; + struct usb_hcd *hcd = xhci_to_hcd(xhci); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + + /* + * calculate number of msi-x vectors supported. + * - HCS_MAX_INTRS: the max number of interrupts the host can handle, + * with max number of interrupters based on the xhci HCSPARAMS1. + * - num_online_cpus: maximum msi-x vectors per CPUs core. + * Add additional 1 vector to ensure always available interrupt. + */ + xhci->msix_count = min(num_online_cpus() + 1, + HCS_MAX_INTRS(xhci->hcs_params1)); + + ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count, + PCI_IRQ_MSIX); + if (ret < 0) { + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Failed to enable MSI-X"); + return ret; + } + + for (i = 0; i < xhci->msix_count; i++) { + ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0, + "xhci_hcd", xhci_to_hcd(xhci)); + if (ret) + goto disable_msix; + } + + hcd->msix_enabled = 1; + return ret; + +disable_msix: + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); + while (--i >= 0) + free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); + pci_free_irq_vectors(pdev); + return ret; +} + +/* Free any IRQs and disable MSI-X */ +static void xhci_cleanup_msix(struct xhci_hcd *xhci) +{ + struct usb_hcd *hcd = xhci_to_hcd(xhci); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + + if (xhci->quirks & XHCI_PLAT) + return; + + /* return if using legacy interrupt */ + if (hcd->irq > 0) + return; + + if (hcd->msix_enabled) { + int i; + + for (i = 0; i < xhci->msix_count; i++) + free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); + } else { + free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci)); + } + + pci_free_irq_vectors(pdev); + hcd->msix_enabled = 0; +} + +static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) +{ + struct usb_hcd *hcd = xhci_to_hcd(xhci); + + if (hcd->msix_enabled) { + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + int i; + + for (i = 0; i < xhci->msix_count; i++) + synchronize_irq(pci_irq_vector(pdev, i)); + } +} + +static int xhci_try_enable_msi(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev; + int ret; + + /* The xhci platform device has set up IRQs through usb_add_hcd. */ + if (xhci->quirks & XHCI_PLAT) + return 0; + + pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + /* + * Some Fresco Logic host controllers advertise MSI, but fail to + * generate interrupts. Don't even try to enable MSI. + */ + if (xhci->quirks & XHCI_BROKEN_MSI) + goto legacy_irq; + + /* unregister the legacy interrupt */ + if (hcd->irq) + free_irq(hcd->irq, hcd); + hcd->irq = 0; + + ret = xhci_setup_msix(xhci); + if (ret) + /* fall back to msi*/ + ret = xhci_setup_msi(xhci); + + if (!ret) { + hcd->msi_enabled = 1; + return 0; + } + + if (!pdev->irq) { + xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); + return -EINVAL; + } + + legacy_irq: + if (!strlen(hcd->irq_descr)) + snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", + hcd->driver->description, hcd->self.busnum); + + /* fall back to legacy interrupt*/ + ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, + hcd->irq_descr, hcd); + if (ret) { + xhci_err(xhci, "request interrupt %d failed\n", + pdev->irq); + return ret; + } + hcd->irq = pdev->irq; + return 0; +} + +#else + +static inline int xhci_try_enable_msi(struct usb_hcd *hcd) +{ + return 0; +} + +static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) +{ +} + +static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) +{ +} + +#endif + +static void compliance_mode_recovery(struct timer_list *t) +{ + struct xhci_hcd *xhci; + struct usb_hcd *hcd; + struct xhci_hub *rhub; + u32 temp; + int i; + + xhci = from_timer(xhci, t, comp_mode_recovery_timer); + rhub = &xhci->usb3_rhub; + + for (i = 0; i < rhub->num_ports; i++) { + temp = readl(rhub->ports[i]->addr); + if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { + /* + * Compliance Mode Detected. Letting USB Core + * handle the Warm Reset + */ + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Compliance mode detected->port %d", + i + 1); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Attempting compliance mode recovery"); + hcd = xhci->shared_hcd; + + if (hcd->state == HC_STATE_SUSPENDED) + usb_hcd_resume_root_hub(hcd); + + usb_hcd_poll_rh_status(hcd); + } + } + + if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1)) + mod_timer(&xhci->comp_mode_recovery_timer, + jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); +} + +/* + * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver + * that causes ports behind that hardware to enter compliance mode sometimes. + * The quirk creates a timer that polls every 2 seconds the link state of + * each host controller's port and recovers it by issuing a Warm reset + * if Compliance mode is detected, otherwise the port will become "dead" (no + * device connections or disconnections will be detected anymore). Becasue no + * status event is generated when entering compliance mode (per xhci spec), + * this quirk is needed on systems that have the failing hardware installed. + */ +static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) +{ + xhci->port_status_u0 = 0; + timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery, + 0); + xhci->comp_mode_recovery_timer.expires = jiffies + + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); + + add_timer(&xhci->comp_mode_recovery_timer); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Compliance mode recovery timer initialized"); +} + +/* + * This function identifies the systems that have installed the SN65LVPE502CP + * USB3.0 re-driver and that need the Compliance Mode Quirk. + * Systems: + * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 + */ +static bool xhci_compliance_mode_recovery_timer_quirk_check(void) +{ + const char *dmi_product_name, *dmi_sys_vendor; + + dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); + dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); + if (!dmi_product_name || !dmi_sys_vendor) + return false; + + if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) + return false; + + if (strstr(dmi_product_name, "Z420") || + strstr(dmi_product_name, "Z620") || + strstr(dmi_product_name, "Z820") || + strstr(dmi_product_name, "Z1 Workstation")) + return true; + + return false; +} + +static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) +{ + return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); +} + + +/* + * Initialize memory for HCD and xHC (one-time init). + * + * Program the PAGESIZE register, initialize the device context array, create + * device contexts (?), set up a command ring segment (or two?), create event + * ring (one for now). + */ +static int xhci_init(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + int retval = 0; + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); + spin_lock_init(&xhci->lock); + if (xhci->hci_version == 0x95 && link_quirk) { + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "QUIRK: Not clearing Link TRB chain bits."); + xhci->quirks |= XHCI_LINK_TRB_QUIRK; + } else { + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "xHCI doesn't need link TRB QUIRK"); + } + retval = xhci_mem_init(xhci, GFP_KERNEL); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); + + /* Initializing Compliance Mode Recovery Data If Needed */ + if (xhci_compliance_mode_recovery_timer_quirk_check()) { + xhci->quirks |= XHCI_COMP_MODE_QUIRK; + compliance_mode_recovery_timer_init(xhci); + } + + return retval; +} + +/*-------------------------------------------------------------------------*/ + + +static int xhci_run_finished(struct xhci_hcd *xhci) +{ + if (xhci_start(xhci)) { + xhci_halt(xhci); + return -ENODEV; + } + xhci->shared_hcd->state = HC_STATE_RUNNING; + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; + + if (xhci->quirks & XHCI_NEC_HOST) + xhci_ring_cmd_db(xhci); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Finished xhci_run for USB3 roothub"); + return 0; +} + +/* + * Start the HC after it was halted. + * + * This function is called by the USB core when the HC driver is added. + * Its opposite is xhci_stop(). + * + * xhci_init() must be called once before this function can be called. + * Reset the HC, enable device slot contexts, program DCBAAP, and + * set command ring pointer and event ring pointer. + * + * Setup MSI-X vectors and enable interrupts. + */ +int xhci_run(struct usb_hcd *hcd) +{ + u32 temp; + u64 temp_64; + int ret; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + /* Start the xHCI host controller running only after the USB 2.0 roothub + * is setup. + */ + + hcd->uses_new_polling = 1; + if (!usb_hcd_is_primary_hcd(hcd)) + return xhci_run_finished(xhci); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); + + ret = xhci_try_enable_msi(hcd); + if (ret) + return ret; + + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + temp_64 &= ~ERST_PTR_MASK; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "ERST deq = 64'h%0lx", (long unsigned int) temp_64); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Set the interrupt modulation register"); + temp = readl(&xhci->ir_set->irq_control); + temp &= ~ER_IRQ_INTERVAL_MASK; + temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; + writel(temp, &xhci->ir_set->irq_control); + + /* Set the HCD state before we enable the irqs */ + temp = readl(&xhci->op_regs->command); + temp |= (CMD_EIE); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Enable interrupts, cmd = 0x%x.", temp); + writel(temp, &xhci->op_regs->command); + + temp = readl(&xhci->ir_set->irq_pending); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", + xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); + writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); + + if (xhci->quirks & XHCI_NEC_HOST) { + struct xhci_command *command; + + command = xhci_alloc_command(xhci, false, GFP_KERNEL); + if (!command) + return -ENOMEM; + + ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, + TRB_TYPE(TRB_NEC_GET_FW)); + if (ret) + xhci_free_command(xhci, command); + } + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Finished xhci_run for USB2 roothub"); + + xhci_dbc_init(xhci); + + xhci_debugfs_init(xhci); + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_run); + +/* + * Stop xHCI driver. + * + * This function is called by the USB core when the HC driver is removed. + * Its opposite is xhci_run(). + * + * Disable device contexts, disable IRQs, and quiesce the HC. + * Reset the HC, finish any completed transactions, and cleanup memory. + */ +static void xhci_stop(struct usb_hcd *hcd) +{ + u32 temp; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + mutex_lock(&xhci->mutex); + + /* Only halt host and free memory after both hcds are removed */ + if (!usb_hcd_is_primary_hcd(hcd)) { + mutex_unlock(&xhci->mutex); + return; + } + + xhci_dbc_exit(xhci); + + spin_lock_irq(&xhci->lock); + xhci->xhc_state |= XHCI_STATE_HALTED; + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + xhci_halt(xhci); + xhci_reset(xhci); + spin_unlock_irq(&xhci->lock); + + xhci_cleanup_msix(xhci); + + /* Deleting Compliance Mode Recovery Timer */ + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + (!(xhci_all_ports_seen_u0(xhci)))) { + del_timer_sync(&xhci->comp_mode_recovery_timer); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "%s: compliance mode recovery timer deleted", + __func__); + } + + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_dev_put(); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Disabling event ring interrupts"); + temp = readl(&xhci->op_regs->status); + writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); + temp = readl(&xhci->ir_set->irq_pending); + writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); + xhci_mem_cleanup(xhci); + xhci_debugfs_exit(xhci); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "xhci_stop completed - status = %x", + readl(&xhci->op_regs->status)); + mutex_unlock(&xhci->mutex); +} + +/* + * Shutdown HC (not bus-specific) + * + * This is called when the machine is rebooting or halting. We assume that the + * machine will be powered off, and the HC's internal state will be reset. + * Don't bother to free memory. + * + * This will only ever be called with the main usb_hcd (the USB3 roothub). + */ +void xhci_shutdown(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + if (xhci->quirks & XHCI_SPURIOUS_REBOOT) + usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); + + spin_lock_irq(&xhci->lock); + xhci_halt(xhci); + /* Workaround for spurious wakeups at shutdown with HSW */ + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + xhci_reset(xhci); + spin_unlock_irq(&xhci->lock); + + xhci_cleanup_msix(xhci); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "xhci_shutdown completed - status = %x", + readl(&xhci->op_regs->status)); +} +EXPORT_SYMBOL_GPL(xhci_shutdown); + +#ifdef CONFIG_PM +static void xhci_save_registers(struct xhci_hcd *xhci) +{ + xhci->s3.command = readl(&xhci->op_regs->command); + xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); + xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); + xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); + xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); + xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); + xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); + xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); +} + +static void xhci_restore_registers(struct xhci_hcd *xhci) +{ + writel(xhci->s3.command, &xhci->op_regs->command); + writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); + xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); + writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); + writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); + xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); + xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); + writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); + writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); +} + +static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) +{ + u64 val_64; + + /* step 2: initialize command ring buffer */ + val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | + (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, + xhci->cmd_ring->dequeue) & + (u64) ~CMD_RING_RSVD_BITS) | + xhci->cmd_ring->cycle_state; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Setting command ring address to 0x%llx", + (long unsigned long) val_64); + xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); +} + +/* + * The whole command ring must be cleared to zero when we suspend the host. + * + * The host doesn't save the command ring pointer in the suspend well, so we + * need to re-program it on resume. Unfortunately, the pointer must be 64-byte + * aligned, because of the reserved bits in the command ring dequeue pointer + * register. Therefore, we can't just set the dequeue pointer back in the + * middle of the ring (TRBs are 16-byte aligned). + */ +static void xhci_clear_command_ring(struct xhci_hcd *xhci) +{ + struct xhci_ring *ring; + struct xhci_segment *seg; + + ring = xhci->cmd_ring; + seg = ring->deq_seg; + do { + memset(seg->trbs, 0, + sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); + seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= + cpu_to_le32(~TRB_CYCLE); + seg = seg->next; + } while (seg != ring->deq_seg); + + /* Reset the software enqueue and dequeue pointers */ + ring->deq_seg = ring->first_seg; + ring->dequeue = ring->first_seg->trbs; + ring->enq_seg = ring->deq_seg; + ring->enqueue = ring->dequeue; + + ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; + /* + * Ring is now zeroed, so the HW should look for change of ownership + * when the cycle bit is set to 1. + */ + ring->cycle_state = 1; + + /* + * Reset the hardware dequeue pointer. + * Yes, this will need to be re-written after resume, but we're paranoid + * and want to make sure the hardware doesn't access bogus memory + * because, say, the BIOS or an SMI started the host without changing + * the command ring pointers. + */ + xhci_set_cmd_ring_deq(xhci); +} + +/* + * Disable port wake bits if do_wakeup is not set. + * + * Also clear a possible internal port wake state left hanging for ports that + * detected termination but never successfully enumerated (trained to 0U). + * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done + * at enumeration clears this wake, force one here as well for unconnected ports + */ + +static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, + struct xhci_hub *rhub, + bool do_wakeup) +{ + unsigned long flags; + u32 t1, t2, portsc; + int i; + + spin_lock_irqsave(&xhci->lock, flags); + + for (i = 0; i < rhub->num_ports; i++) { + portsc = readl(rhub->ports[i]->addr); + t1 = xhci_port_state_to_neutral(portsc); + t2 = t1; + + /* clear wake bits if do_wake is not set */ + if (!do_wakeup) + t2 &= ~PORT_WAKE_BITS; + + /* Don't touch csc bit if connected or connect change is set */ + if (!(portsc & (PORT_CSC | PORT_CONNECT))) + t2 |= PORT_CSC; + + if (t1 != t2) { + writel(t2, rhub->ports[i]->addr); + xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n", + rhub->hcd->self.busnum, i + 1, portsc, t2); + } + } + spin_unlock_irqrestore(&xhci->lock, flags); +} + +static bool xhci_pending_portevent(struct xhci_hcd *xhci) +{ + struct xhci_port **ports; + int port_index; + u32 status; + u32 portsc; + + status = readl(&xhci->op_regs->status); + if (status & STS_EINT) + return true; + /* + * Checking STS_EINT is not enough as there is a lag between a change + * bit being set and the Port Status Change Event that it generated + * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. + */ + + port_index = xhci->usb2_rhub.num_ports; + ports = xhci->usb2_rhub.ports; + while (port_index--) { + portsc = readl(ports[port_index]->addr); + if (portsc & PORT_CHANGE_MASK || + (portsc & PORT_PLS_MASK) == XDEV_RESUME) + return true; + } + port_index = xhci->usb3_rhub.num_ports; + ports = xhci->usb3_rhub.ports; + while (port_index--) { + portsc = readl(ports[port_index]->addr); + if (portsc & PORT_CHANGE_MASK || + (portsc & PORT_PLS_MASK) == XDEV_RESUME) + return true; + } + return false; +} + +/* + * Stop HC (not bus-specific) + * + * This is called when the machine transition into S3/S4 mode. + * + */ +int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) +{ + int rc = 0; + unsigned int delay = XHCI_MAX_HALT_USEC * 2; + struct usb_hcd *hcd = xhci_to_hcd(xhci); + u32 command; + u32 res; + + if (!hcd->state) + return 0; + + if (hcd->state != HC_STATE_SUSPENDED || + xhci->shared_hcd->state != HC_STATE_SUSPENDED) + return -EINVAL; + + /* Clear root port wake on bits if wakeup not allowed. */ + xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup); + xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup); + + if (!HCD_HW_ACCESSIBLE(hcd)) + return 0; + + xhci_dbc_suspend(xhci); + + /* Don't poll the roothubs on bus suspend. */ + xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); + del_timer_sync(&hcd->rh_timer); + clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); + del_timer_sync(&xhci->shared_hcd->rh_timer); + + if (xhci->quirks & XHCI_SUSPEND_DELAY) + usleep_range(1000, 1500); + + spin_lock_irq(&xhci->lock); + clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); + /* step 1: stop endpoint */ + /* skipped assuming that port suspend has done */ + + /* step 2: clear Run/Stop bit */ + command = readl(&xhci->op_regs->command); + command &= ~CMD_RUN; + writel(command, &xhci->op_regs->command); + + /* Some chips from Fresco Logic need an extraordinary delay */ + delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; + + if (xhci_handshake(&xhci->op_regs->status, + STS_HALT, STS_HALT, delay)) { + xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); + spin_unlock_irq(&xhci->lock); + return -ETIMEDOUT; + } + xhci_clear_command_ring(xhci); + + /* step 3: save registers */ + xhci_save_registers(xhci); + + /* step 4: set CSS flag */ + command = readl(&xhci->op_regs->command); + command |= CMD_CSS; + writel(command, &xhci->op_regs->command); + xhci->broken_suspend = 0; + if (xhci_handshake(&xhci->op_regs->status, + STS_SAVE, 0, 20 * 1000)) { + /* + * AMD SNPS xHC 3.0 occasionally does not clear the + * SSS bit of USBSTS and when driver tries to poll + * to see if the xHC clears BIT(8) which never happens + * and driver assumes that controller is not responding + * and times out. To workaround this, its good to check + * if SRE and HCE bits are not set (as per xhci + * Section 5.4.2) and bypass the timeout. + */ + res = readl(&xhci->op_regs->status); + if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) && + (((res & STS_SRE) == 0) && + ((res & STS_HCE) == 0))) { + xhci->broken_suspend = 1; + } else { + xhci_warn(xhci, "WARN: xHC save state timeout\n"); + spin_unlock_irq(&xhci->lock); + return -ETIMEDOUT; + } + } + spin_unlock_irq(&xhci->lock); + + /* + * Deleting Compliance Mode Recovery Timer because the xHCI Host + * is about to be suspended. + */ + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + (!(xhci_all_ports_seen_u0(xhci)))) { + del_timer_sync(&xhci->comp_mode_recovery_timer); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "%s: compliance mode recovery timer deleted", + __func__); + } + + /* step 5: remove core well power */ + /* synchronize irq when using MSI-X */ + xhci_msix_sync_irqs(xhci); + + return rc; +} +EXPORT_SYMBOL_GPL(xhci_suspend); + +/* + * start xHC (not bus-specific) + * + * This is called when the machine transition from S3/S4 mode. + * + */ +int xhci_resume(struct xhci_hcd *xhci, bool hibernated) +{ + u32 command, temp = 0; + struct usb_hcd *hcd = xhci_to_hcd(xhci); + struct usb_hcd *secondary_hcd; + int retval = 0; + bool comp_timer_running = false; + bool pending_portevent = false; + + if (!hcd->state) + return 0; + + /* Wait a bit if either of the roothubs need to settle from the + * transition into bus suspend. + */ + + if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) || + time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange)) + msleep(100); + + set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); + + spin_lock_irq(&xhci->lock); + if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) + hibernated = true; + + if (!hibernated) { + /* + * Some controllers might lose power during suspend, so wait + * for controller not ready bit to clear, just as in xHC init. + */ + retval = xhci_handshake(&xhci->op_regs->status, + STS_CNR, 0, 10 * 1000 * 1000); + if (retval) { + xhci_warn(xhci, "Controller not ready at resume %d\n", + retval); + spin_unlock_irq(&xhci->lock); + return retval; + } + /* step 1: restore register */ + xhci_restore_registers(xhci); + /* step 2: initialize command ring buffer */ + xhci_set_cmd_ring_deq(xhci); + /* step 3: restore state and start state*/ + /* step 3: set CRS flag */ + command = readl(&xhci->op_regs->command); + command |= CMD_CRS; + writel(command, &xhci->op_regs->command); + /* + * Some controllers take up to 55+ ms to complete the controller + * restore so setting the timeout to 100ms. Xhci specification + * doesn't mention any timeout value. + */ + if (xhci_handshake(&xhci->op_regs->status, + STS_RESTORE, 0, 100 * 1000)) { + xhci_warn(xhci, "WARN: xHC restore state timeout\n"); + spin_unlock_irq(&xhci->lock); + return -ETIMEDOUT; + } + temp = readl(&xhci->op_regs->status); + } + + /* If restore operation fails, re-initialize the HC during resume */ + if ((temp & STS_SRE) || hibernated) { + + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + !(xhci_all_ports_seen_u0(xhci))) { + del_timer_sync(&xhci->comp_mode_recovery_timer); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Compliance Mode Recovery Timer deleted!"); + } + + /* Let the USB core know _both_ roothubs lost power. */ + usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); + usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); + + xhci_dbg(xhci, "Stop HCD\n"); + xhci_halt(xhci); + xhci_zero_64b_regs(xhci); + retval = xhci_reset(xhci); + spin_unlock_irq(&xhci->lock); + if (retval) + return retval; + xhci_cleanup_msix(xhci); + + xhci_dbg(xhci, "// Disabling event ring interrupts\n"); + temp = readl(&xhci->op_regs->status); + writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); + temp = readl(&xhci->ir_set->irq_pending); + writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); + + xhci_dbg(xhci, "cleaning up memory\n"); + xhci_mem_cleanup(xhci); + xhci_debugfs_exit(xhci); + xhci_dbg(xhci, "xhci_stop completed - status = %x\n", + readl(&xhci->op_regs->status)); + + /* USB core calls the PCI reinit and start functions twice: + * first with the primary HCD, and then with the secondary HCD. + * If we don't do the same, the host will never be started. + */ + if (!usb_hcd_is_primary_hcd(hcd)) + secondary_hcd = hcd; + else + secondary_hcd = xhci->shared_hcd; + + xhci_dbg(xhci, "Initialize the xhci_hcd\n"); + retval = xhci_init(hcd->primary_hcd); + if (retval) + return retval; + comp_timer_running = true; + + xhci_dbg(xhci, "Start the primary HCD\n"); + retval = xhci_run(hcd->primary_hcd); + if (!retval) { + xhci_dbg(xhci, "Start the secondary HCD\n"); + retval = xhci_run(secondary_hcd); + } + hcd->state = HC_STATE_SUSPENDED; + xhci->shared_hcd->state = HC_STATE_SUSPENDED; + goto done; + } + + /* step 4: set Run/Stop bit */ + command = readl(&xhci->op_regs->command); + command |= CMD_RUN; + writel(command, &xhci->op_regs->command); + xhci_handshake(&xhci->op_regs->status, STS_HALT, + 0, 250 * 1000); + + /* step 5: walk topology and initialize portsc, + * portpmsc and portli + */ + /* this is done in bus_resume */ + + /* step 6: restart each of the previously + * Running endpoints by ringing their doorbells + */ + + spin_unlock_irq(&xhci->lock); + + xhci_dbc_resume(xhci); + + done: + if (retval == 0) { + /* + * Resume roothubs only if there are pending events. + * USB 3 devices resend U3 LFPS wake after a 100ms delay if + * the first wake signalling failed, give it that chance. + */ + pending_portevent = xhci_pending_portevent(xhci); + if (!pending_portevent) { + msleep(120); + pending_portevent = xhci_pending_portevent(xhci); + } + + if (pending_portevent) { + usb_hcd_resume_root_hub(xhci->shared_hcd); + usb_hcd_resume_root_hub(hcd); + } + } + /* + * If system is subject to the Quirk, Compliance Mode Timer needs to + * be re-initialized Always after a system resume. Ports are subject + * to suffer the Compliance Mode issue again. It doesn't matter if + * ports have entered previously to U0 before system's suspension. + */ + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) + compliance_mode_recovery_timer_init(xhci); + + if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) + usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); + + /* Re-enable port polling. */ + xhci_dbg(xhci, "%s: starting port polling.\n", __func__); + set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); + usb_hcd_poll_rh_status(xhci->shared_hcd); + set_bit(HCD_FLAG_POLL_RH, &hcd->flags); + usb_hcd_poll_rh_status(hcd); + + return retval; +} +EXPORT_SYMBOL_GPL(xhci_resume); +#endif /* CONFIG_PM */ + +/*-------------------------------------------------------------------------*/ + +/* + * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT), + * we'll copy the actual data into the TRB address register. This is limited to + * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize + * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed. + */ +static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags) +{ + if (xhci_urb_suitable_for_idt(urb)) + return 0; + + return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); +} + +/* + * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and + * HCDs. Find the index for an endpoint given its descriptor. Use the return + * value to right shift 1 for the bitmask. + * + * Index = (epnum * 2) + direction - 1, + * where direction = 0 for OUT, 1 for IN. + * For control endpoints, the IN index is used (OUT index is unused), so + * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) + */ +unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) +{ + unsigned int index; + if (usb_endpoint_xfer_control(desc)) + index = (unsigned int) (usb_endpoint_num(desc)*2); + else + index = (unsigned int) (usb_endpoint_num(desc)*2) + + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; + return index; +} +EXPORT_SYMBOL_GPL(xhci_get_endpoint_index); + +/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint + * address from the XHCI endpoint index. + */ +unsigned int xhci_get_endpoint_address(unsigned int ep_index) +{ + unsigned int number = DIV_ROUND_UP(ep_index, 2); + unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; + return direction | number; +} +EXPORT_SYMBOL_GPL(xhci_get_endpoint_address); + +/* Find the flag for this endpoint (for use in the control context). Use the + * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is + * bit 1, etc. + */ +static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) +{ + return 1 << (xhci_get_endpoint_index(desc) + 1); +} + +/* Compute the last valid endpoint context index. Basically, this is the + * endpoint index plus one. For slot contexts with more than valid endpoint, + * we find the most significant bit set in the added contexts flags. + * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 + * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. + */ +unsigned int xhci_last_valid_endpoint(u32 added_ctxs) +{ + return fls(added_ctxs) - 1; +} + +/* Returns 1 if the arguments are OK; + * returns 0 this is a root hub; returns -EINVAL for NULL pointers. + */ +static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, + const char *func) { + struct xhci_hcd *xhci; + struct xhci_virt_device *virt_dev; + + if (!hcd || (check_ep && !ep) || !udev) { + pr_debug("xHCI %s called with invalid args\n", func); + return -EINVAL; + } + if (!udev->parent) { + pr_debug("xHCI %s called for root hub\n", func); + return 0; + } + + xhci = hcd_to_xhci(hcd); + if (check_virt_dev) { + if (!udev->slot_id || !xhci->devs[udev->slot_id]) { + xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", + func); + return -EINVAL; + } + + virt_dev = xhci->devs[udev->slot_id]; + if (virt_dev->udev != udev) { + xhci_dbg(xhci, "xHCI %s called with udev and " + "virt_dev does not match\n", func); + return -EINVAL; + } + } + + if (xhci->xhc_state & XHCI_STATE_HALTED) + return -ENODEV; + + return 1; +} + +static int xhci_configure_endpoint(struct xhci_hcd *xhci, + struct usb_device *udev, struct xhci_command *command, + bool ctx_change, bool must_succeed); + +/* + * Full speed devices may have a max packet size greater than 8 bytes, but the + * USB core doesn't know that until it reads the first 8 bytes of the + * descriptor. If the usb_device's max packet size changes after that point, + * we need to issue an evaluate context command and wait on it. + */ +static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, struct urb *urb, gfp_t mem_flags) +{ + struct xhci_container_ctx *out_ctx; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_ep_ctx *ep_ctx; + struct xhci_command *command; + int max_packet_size; + int hw_max_packet_size; + int ret = 0; + + out_ctx = xhci->devs[slot_id]->out_ctx; + ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); + hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); + max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); + if (hw_max_packet_size != max_packet_size) { + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Max Packet Size for ep 0 changed."); + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Max packet size in usb_device = %d", + max_packet_size); + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Max packet size in xHCI HW = %d", + hw_max_packet_size); + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Issuing evaluate context command."); + + /* Set up the input context flags for the command */ + /* FIXME: This won't work if a non-default control endpoint + * changes max packet sizes. + */ + + command = xhci_alloc_command(xhci, true, mem_flags); + if (!command) + return -ENOMEM; + + command->in_ctx = xhci->devs[slot_id]->in_ctx; + ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); + if (!ctrl_ctx) { + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + ret = -ENOMEM; + goto command_cleanup; + } + /* Set up the modified control endpoint 0 */ + xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, + xhci->devs[slot_id]->out_ctx, ep_index); + + ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); + ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ + ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); + ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); + + ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); + ctrl_ctx->drop_flags = 0; + + ret = xhci_configure_endpoint(xhci, urb->dev, command, + true, false); + + /* Clean up the input context for later use by bandwidth + * functions. + */ + ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); +command_cleanup: + kfree(command->completion); + kfree(command); + } + return ret; +} + +/* + * non-error returns are a promise to giveback() the urb later + * we drop ownership so next owner (or urb unlink) can get it + */ +static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + unsigned long flags; + int ret = 0; + unsigned int slot_id, ep_index; + unsigned int *ep_state; + struct urb_priv *urb_priv; + int num_tds; + + if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, + true, true, __func__) <= 0) + return -EINVAL; + + slot_id = urb->dev->slot_id; + ep_index = xhci_get_endpoint_index(&urb->ep->desc); + ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; + + if (!HCD_HW_ACCESSIBLE(hcd)) { + if (!in_interrupt()) + xhci_dbg(xhci, "urb submitted during PCI suspend\n"); + return -ESHUTDOWN; + } + if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { + xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); + return -ENODEV; + } + + if (xhci_vendor_usb_offload_skip_urb(xhci, urb)) { + xhci_dbg(xhci, "skip urb for usb offload\n"); + return -EOPNOTSUPP; + } + + if (usb_endpoint_xfer_isoc(&urb->ep->desc)) + num_tds = urb->number_of_packets; + else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && + urb->transfer_buffer_length > 0 && + urb->transfer_flags & URB_ZERO_PACKET && + !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc))) + num_tds = 2; + else + num_tds = 1; + + urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags); + if (!urb_priv) + return -ENOMEM; + + urb_priv->num_tds = num_tds; + urb_priv->num_tds_done = 0; + urb->hcpriv = urb_priv; + + trace_xhci_urb_enqueue(urb); + + if (usb_endpoint_xfer_control(&urb->ep->desc)) { + /* Check to see if the max packet size for the default control + * endpoint changed during FS device enumeration + */ + if (urb->dev->speed == USB_SPEED_FULL) { + ret = xhci_check_maxpacket(xhci, slot_id, + ep_index, urb, mem_flags); + if (ret < 0) { + xhci_urb_free_priv(urb_priv); + urb->hcpriv = NULL; + return ret; + } + } + } + + spin_lock_irqsave(&xhci->lock, flags); + + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n", + urb->ep->desc.bEndpointAddress, urb); + ret = -ESHUTDOWN; + goto free_priv; + } + if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) { + xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n", + *ep_state); + ret = -EINVAL; + goto free_priv; + } + if (*ep_state & EP_SOFT_CLEAR_TOGGLE) { + xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n"); + ret = -EINVAL; + goto free_priv; + } + + switch (usb_endpoint_type(&urb->ep->desc)) { + + case USB_ENDPOINT_XFER_CONTROL: + ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); + break; + case USB_ENDPOINT_XFER_BULK: + ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); + break; + case USB_ENDPOINT_XFER_INT: + ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); + break; + case USB_ENDPOINT_XFER_ISOC: + ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); + } + + if (ret) { +free_priv: + xhci_urb_free_priv(urb_priv); + urb->hcpriv = NULL; + } + spin_unlock_irqrestore(&xhci->lock, flags); + return ret; +} + +/* + * Remove the URB's TD from the endpoint ring. This may cause the HC to stop + * USB transfers, potentially stopping in the middle of a TRB buffer. The HC + * should pick up where it left off in the TD, unless a Set Transfer Ring + * Dequeue Pointer is issued. + * + * The TRBs that make up the buffers for the canceled URB will be "removed" from + * the ring. Since the ring is a contiguous structure, they can't be physically + * removed. Instead, there are two options: + * + * 1) If the HC is in the middle of processing the URB to be canceled, we + * simply move the ring's dequeue pointer past those TRBs using the Set + * Transfer Ring Dequeue Pointer command. This will be the common case, + * when drivers timeout on the last submitted URB and attempt to cancel. + * + * 2) If the HC is in the middle of a different TD, we turn the TRBs into a + * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The + * HC will need to invalidate the any TRBs it has cached after the stop + * endpoint command, as noted in the xHCI 0.95 errata. + * + * 3) The TD may have completed by the time the Stop Endpoint Command + * completes, so software needs to handle that case too. + * + * This function should protect against the TD enqueueing code ringing the + * doorbell while this code is waiting for a Stop Endpoint command to complete. + * It also needs to account for multiple cancellations on happening at the same + * time for the same endpoint. + * + * Note that this function can be called in any context, or so says + * usb_hcd_unlink_urb() + */ +static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) +{ + unsigned long flags; + int ret, i; + u32 temp; + struct xhci_hcd *xhci; + struct urb_priv *urb_priv; + struct xhci_td *td; + unsigned int ep_index; + struct xhci_ring *ep_ring; + struct xhci_virt_ep *ep; + struct xhci_command *command; + struct xhci_virt_device *vdev; + + xhci = hcd_to_xhci(hcd); + spin_lock_irqsave(&xhci->lock, flags); + + trace_xhci_urb_dequeue(urb); + + /* Make sure the URB hasn't completed or been unlinked already */ + ret = usb_hcd_check_unlink_urb(hcd, urb, status); + if (ret) + goto done; + + /* give back URB now if we can't queue it for cancel */ + vdev = xhci->devs[urb->dev->slot_id]; + urb_priv = urb->hcpriv; + if (!vdev || !urb_priv) + goto err_giveback; + + ep_index = xhci_get_endpoint_index(&urb->ep->desc); + ep = &vdev->eps[ep_index]; + ep_ring = xhci_urb_to_transfer_ring(xhci, urb); + if (!ep || !ep_ring) + goto err_giveback; + + /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */ + temp = readl(&xhci->op_regs->status); + if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) { + xhci_hc_died(xhci); + goto done; + } + + /* + * check ring is not re-allocated since URB was enqueued. If it is, then + * make sure none of the ring related pointers in this URB private data + * are touched, such as td_list, otherwise we overwrite freed data + */ + if (!td_on_ring(&urb_priv->td[0], ep_ring)) { + xhci_err(xhci, "Canceled URB td not found on endpoint ring"); + for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) { + td = &urb_priv->td[i]; + if (!list_empty(&td->cancelled_td_list)) + list_del_init(&td->cancelled_td_list); + } + goto err_giveback; + } + + if (xhci->xhc_state & XHCI_STATE_HALTED) { + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "HC halted, freeing TD manually."); + for (i = urb_priv->num_tds_done; + i < urb_priv->num_tds; + i++) { + td = &urb_priv->td[i]; + if (!list_empty(&td->td_list)) + list_del_init(&td->td_list); + if (!list_empty(&td->cancelled_td_list)) + list_del_init(&td->cancelled_td_list); + } + goto err_giveback; + } + + i = urb_priv->num_tds_done; + if (i < urb_priv->num_tds) + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Cancel URB %p, dev %s, ep 0x%x, " + "starting at offset 0x%llx", + urb, urb->dev->devpath, + urb->ep->desc.bEndpointAddress, + (unsigned long long) xhci_trb_virt_to_dma( + urb_priv->td[i].start_seg, + urb_priv->td[i].first_trb)); + + for (; i < urb_priv->num_tds; i++) { + td = &urb_priv->td[i]; + /* TD can already be on cancelled list if ep halted on it */ + if (list_empty(&td->cancelled_td_list)) { + td->cancel_status = TD_DIRTY; + list_add_tail(&td->cancelled_td_list, + &ep->cancelled_td_list); + } + } + + /* Queue a stop endpoint command, but only if this is + * the first cancellation to be handled. + */ + if (!(ep->ep_state & EP_STOP_CMD_PENDING)) { + command = xhci_alloc_command(xhci, false, GFP_ATOMIC); + if (!command) { + ret = -ENOMEM; + goto done; + } + ep->ep_state |= EP_STOP_CMD_PENDING; + ep->stop_cmd_timer.expires = jiffies + + XHCI_STOP_EP_CMD_TIMEOUT * HZ; + add_timer(&ep->stop_cmd_timer); + xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, + ep_index, 0); + xhci_ring_cmd_db(xhci); + } +done: + spin_unlock_irqrestore(&xhci->lock, flags); + return ret; + +err_giveback: + if (urb_priv) + xhci_urb_free_priv(urb_priv); + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&xhci->lock, flags); + usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); + return ret; +} + +/* Drop an endpoint from a new bandwidth configuration for this device. + * Only one call to this function is allowed per endpoint before + * check_bandwidth() or reset_bandwidth() must be called. + * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will + * add the endpoint to the schedule with possibly new parameters denoted by a + * different endpoint descriptor in usb_host_endpoint. + * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is + * not allowed. + * + * The USB core will not allow URBs to be queued to an endpoint that is being + * disabled, so there's no need for mutual exclusion to protect + * the xhci->devs[slot_id] structure. + */ +int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + struct xhci_hcd *xhci; + struct xhci_container_ctx *in_ctx, *out_ctx; + struct xhci_input_control_ctx *ctrl_ctx; + unsigned int ep_index; + struct xhci_ep_ctx *ep_ctx; + u32 drop_flag; + u32 new_add_flags, new_drop_flags; + int ret; + + ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); + if (ret <= 0) + return ret; + xhci = hcd_to_xhci(hcd); + if (xhci->xhc_state & XHCI_STATE_DYING) + return -ENODEV; + + xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); + drop_flag = xhci_get_endpoint_flag(&ep->desc); + if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { + xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", + __func__, drop_flag); + return 0; + } + + in_ctx = xhci->devs[udev->slot_id]->in_ctx; + out_ctx = xhci->devs[udev->slot_id]->out_ctx; + ctrl_ctx = xhci_get_input_control_ctx(in_ctx); + if (!ctrl_ctx) { + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + return 0; + } + + ep_index = xhci_get_endpoint_index(&ep->desc); + ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); + /* If the HC already knows the endpoint is disabled, + * or the HCD has noted it is disabled, ignore this request + */ + if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || + le32_to_cpu(ctrl_ctx->drop_flags) & + xhci_get_endpoint_flag(&ep->desc)) { + /* Do not warn when called after a usb_device_reset */ + if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) + xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", + __func__, ep); + return 0; + } + + ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); + new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); + + ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); + new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); + + xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index); + + xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); + + xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", + (unsigned int) ep->desc.bEndpointAddress, + udev->slot_id, + (unsigned int) new_drop_flags, + (unsigned int) new_add_flags); + return 0; +} +EXPORT_SYMBOL_GPL(xhci_drop_endpoint); + +/* Add an endpoint to a new possible bandwidth configuration for this device. + * Only one call to this function is allowed per endpoint before + * check_bandwidth() or reset_bandwidth() must be called. + * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will + * add the endpoint to the schedule with possibly new parameters denoted by a + * different endpoint descriptor in usb_host_endpoint. + * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is + * not allowed. + * + * The USB core will not allow URBs to be queued to an endpoint until the + * configuration or alt setting is installed in the device, so there's no need + * for mutual exclusion to protect the xhci->devs[slot_id] structure. + */ +int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + struct xhci_hcd *xhci; + struct xhci_container_ctx *in_ctx; + unsigned int ep_index; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_ep_ctx *ep_ctx; + u32 added_ctxs; + u32 new_add_flags, new_drop_flags; + struct xhci_virt_device *virt_dev; + int ret = 0; + + ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); + if (ret <= 0) { + /* So we won't queue a reset ep command for a root hub */ + ep->hcpriv = NULL; + return ret; + } + xhci = hcd_to_xhci(hcd); + if (xhci->xhc_state & XHCI_STATE_DYING) + return -ENODEV; + + added_ctxs = xhci_get_endpoint_flag(&ep->desc); + if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { + /* FIXME when we have to issue an evaluate endpoint command to + * deal with ep0 max packet size changing once we get the + * descriptors + */ + xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", + __func__, added_ctxs); + return 0; + } + + virt_dev = xhci->devs[udev->slot_id]; + in_ctx = virt_dev->in_ctx; + ctrl_ctx = xhci_get_input_control_ctx(in_ctx); + if (!ctrl_ctx) { + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + return 0; + } + + ep_index = xhci_get_endpoint_index(&ep->desc); + /* If this endpoint is already in use, and the upper layers are trying + * to add it again without dropping it, reject the addition. + */ + if (virt_dev->eps[ep_index].ring && + !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { + xhci_warn(xhci, "Trying to add endpoint 0x%x " + "without dropping it.\n", + (unsigned int) ep->desc.bEndpointAddress); + return -EINVAL; + } + + /* If the HCD has already noted the endpoint is enabled, + * ignore this request. + */ + if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { + xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", + __func__, ep); + return 0; + } + + /* + * Configuration and alternate setting changes must be done in + * process context, not interrupt context (or so documenation + * for usb_set_interface() and usb_set_configuration() claim). + */ + if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { + dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", + __func__, ep->desc.bEndpointAddress); + return -ENOMEM; + } + + ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); + new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); + + /* If xhci_endpoint_disable() was called for this endpoint, but the + * xHC hasn't been notified yet through the check_bandwidth() call, + * this re-adds a new state for the endpoint from the new endpoint + * descriptors. We must drop and re-add this endpoint, so we leave the + * drop flags alone. + */ + new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); + + /* Store the usb_device pointer for later use */ + ep->hcpriv = udev; + + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); + trace_xhci_add_endpoint(ep_ctx); + + xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", + (unsigned int) ep->desc.bEndpointAddress, + udev->slot_id, + (unsigned int) new_drop_flags, + (unsigned int) new_add_flags); + return 0; +} +EXPORT_SYMBOL_GPL(xhci_add_endpoint); + +static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) +{ + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_ep_ctx *ep_ctx; + struct xhci_slot_ctx *slot_ctx; + int i; + + ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); + if (!ctrl_ctx) { + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + return; + } + + /* When a device's add flag and drop flag are zero, any subsequent + * configure endpoint command will leave that endpoint's state + * untouched. Make sure we don't leave any old state in the input + * endpoint contexts. + */ + ctrl_ctx->drop_flags = 0; + ctrl_ctx->add_flags = 0; + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); + /* Endpoint 0 is always valid */ + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); + for (i = 1; i < 31; i++) { + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); + ep_ctx->ep_info = 0; + ep_ctx->ep_info2 = 0; + ep_ctx->deq = 0; + ep_ctx->tx_info = 0; + } +} + +static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, + struct usb_device *udev, u32 *cmd_status) +{ + int ret; + + switch (*cmd_status) { + case COMP_COMMAND_ABORTED: + case COMP_COMMAND_RING_STOPPED: + xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); + ret = -ETIME; + break; + case COMP_RESOURCE_ERROR: + dev_warn(&udev->dev, + "Not enough host controller resources for new device state.\n"); + ret = -ENOMEM; + /* FIXME: can we allocate more resources for the HC? */ + break; + case COMP_BANDWIDTH_ERROR: + case COMP_SECONDARY_BANDWIDTH_ERROR: + dev_warn(&udev->dev, + "Not enough bandwidth for new device state.\n"); + ret = -ENOSPC; + /* FIXME: can we go back to the old state? */ + break; + case COMP_TRB_ERROR: + /* the HCD set up something wrong */ + dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " + "add flag = 1, " + "and endpoint is not disabled.\n"); + ret = -EINVAL; + break; + case COMP_INCOMPATIBLE_DEVICE_ERROR: + dev_warn(&udev->dev, + "ERROR: Incompatible device for endpoint configure command.\n"); + ret = -ENODEV; + break; + case COMP_SUCCESS: + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Successful Endpoint Configure command"); + ret = 0; + break; + default: + xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", + *cmd_status); + ret = -EINVAL; + break; + } + return ret; +} + +static int xhci_evaluate_context_result(struct xhci_hcd *xhci, + struct usb_device *udev, u32 *cmd_status) +{ + int ret; + + switch (*cmd_status) { + case COMP_COMMAND_ABORTED: + case COMP_COMMAND_RING_STOPPED: + xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); + ret = -ETIME; + break; + case COMP_PARAMETER_ERROR: + dev_warn(&udev->dev, + "WARN: xHCI driver setup invalid evaluate context command.\n"); + ret = -EINVAL; + break; + case COMP_SLOT_NOT_ENABLED_ERROR: + dev_warn(&udev->dev, + "WARN: slot not enabled for evaluate context command.\n"); + ret = -EINVAL; + break; + case COMP_CONTEXT_STATE_ERROR: + dev_warn(&udev->dev, + "WARN: invalid context state for evaluate context command.\n"); + ret = -EINVAL; + break; + case COMP_INCOMPATIBLE_DEVICE_ERROR: + dev_warn(&udev->dev, + "ERROR: Incompatible device for evaluate context command.\n"); + ret = -ENODEV; + break; + case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: + /* Max Exit Latency too large error */ + dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); + ret = -EINVAL; + break; + case COMP_SUCCESS: + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Successful evaluate context command"); + ret = 0; + break; + default: + xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", + *cmd_status); + ret = -EINVAL; + break; + } + return ret; +} + +static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, + struct xhci_input_control_ctx *ctrl_ctx) +{ + u32 valid_add_flags; + u32 valid_drop_flags; + + /* Ignore the slot flag (bit 0), and the default control endpoint flag + * (bit 1). The default control endpoint is added during the Address + * Device command and is never removed until the slot is disabled. + */ + valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; + valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; + + /* Use hweight32 to count the number of ones in the add flags, or + * number of endpoints added. Don't count endpoints that are changed + * (both added and dropped). + */ + return hweight32(valid_add_flags) - + hweight32(valid_add_flags & valid_drop_flags); +} + +static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, + struct xhci_input_control_ctx *ctrl_ctx) +{ + u32 valid_add_flags; + u32 valid_drop_flags; + + valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; + valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; + + return hweight32(valid_drop_flags) - + hweight32(valid_add_flags & valid_drop_flags); +} + +/* + * We need to reserve the new number of endpoints before the configure endpoint + * command completes. We can't subtract the dropped endpoints from the number + * of active endpoints until the command completes because we can oversubscribe + * the host in this case: + * + * - the first configure endpoint command drops more endpoints than it adds + * - a second configure endpoint command that adds more endpoints is queued + * - the first configure endpoint command fails, so the config is unchanged + * - the second command may succeed, even though there isn't enough resources + * + * Must be called with xhci->lock held. + */ +static int xhci_reserve_host_resources(struct xhci_hcd *xhci, + struct xhci_input_control_ctx *ctrl_ctx) +{ + u32 added_eps; + + added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); + if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Not enough ep ctxs: " + "%u active, need to add %u, limit is %u.", + xhci->num_active_eps, added_eps, + xhci->limit_active_eps); + return -ENOMEM; + } + xhci->num_active_eps += added_eps; + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Adding %u ep ctxs, %u now active.", added_eps, + xhci->num_active_eps); + return 0; +} + +/* + * The configure endpoint was failed by the xHC for some other reason, so we + * need to revert the resources that failed configuration would have used. + * + * Must be called with xhci->lock held. + */ +static void xhci_free_host_resources(struct xhci_hcd *xhci, + struct xhci_input_control_ctx *ctrl_ctx) +{ + u32 num_failed_eps; + + num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); + xhci->num_active_eps -= num_failed_eps; + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Removing %u failed ep ctxs, %u now active.", + num_failed_eps, + xhci->num_active_eps); +} + +/* + * Now that the command has completed, clean up the active endpoint count by + * subtracting out the endpoints that were dropped (but not changed). + * + * Must be called with xhci->lock held. + */ +static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, + struct xhci_input_control_ctx *ctrl_ctx) +{ + u32 num_dropped_eps; + + num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); + xhci->num_active_eps -= num_dropped_eps; + if (num_dropped_eps) + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Removing %u dropped ep ctxs, %u now active.", + num_dropped_eps, + xhci->num_active_eps); +} + +static unsigned int xhci_get_block_size(struct usb_device *udev) +{ + switch (udev->speed) { + case USB_SPEED_LOW: + case USB_SPEED_FULL: + return FS_BLOCK; + case USB_SPEED_HIGH: + return HS_BLOCK; + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + return SS_BLOCK; + case USB_SPEED_UNKNOWN: + case USB_SPEED_WIRELESS: + default: + /* Should never happen */ + return 1; + } +} + +static unsigned int +xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) +{ + if (interval_bw->overhead[LS_OVERHEAD_TYPE]) + return LS_OVERHEAD; + if (interval_bw->overhead[FS_OVERHEAD_TYPE]) + return FS_OVERHEAD; + return HS_OVERHEAD; +} + +/* If we are changing a LS/FS device under a HS hub, + * make sure (if we are activating a new TT) that the HS bus has enough + * bandwidth for this new TT. + */ +static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + int old_active_eps) +{ + struct xhci_interval_bw_table *bw_table; + struct xhci_tt_bw_info *tt_info; + + /* Find the bandwidth table for the root port this TT is attached to. */ + bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; + tt_info = virt_dev->tt_info; + /* If this TT already had active endpoints, the bandwidth for this TT + * has already been added. Removing all periodic endpoints (and thus + * making the TT enactive) will only decrease the bandwidth used. + */ + if (old_active_eps) + return 0; + if (old_active_eps == 0 && tt_info->active_eps != 0) { + if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) + return -ENOMEM; + return 0; + } + /* Not sure why we would have no new active endpoints... + * + * Maybe because of an Evaluate Context change for a hub update or a + * control endpoint 0 max packet size change? + * FIXME: skip the bandwidth calculation in that case. + */ + return 0; +} + +static int xhci_check_ss_bw(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev) +{ + unsigned int bw_reserved; + + bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); + if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) + return -ENOMEM; + + bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); + if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) + return -ENOMEM; + + return 0; +} + +/* + * This algorithm is a very conservative estimate of the worst-case scheduling + * scenario for any one interval. The hardware dynamically schedules the + * packets, so we can't tell which microframe could be the limiting factor in + * the bandwidth scheduling. This only takes into account periodic endpoints. + * + * Obviously, we can't solve an NP complete problem to find the minimum worst + * case scenario. Instead, we come up with an estimate that is no less than + * the worst case bandwidth used for any one microframe, but may be an + * over-estimate. + * + * We walk the requirements for each endpoint by interval, starting with the + * smallest interval, and place packets in the schedule where there is only one + * possible way to schedule packets for that interval. In order to simplify + * this algorithm, we record the largest max packet size for each interval, and + * assume all packets will be that size. + * + * For interval 0, we obviously must schedule all packets for each interval. + * The bandwidth for interval 0 is just the amount of data to be transmitted + * (the sum of all max ESIT payload sizes, plus any overhead per packet times + * the number of packets). + * + * For interval 1, we have two possible microframes to schedule those packets + * in. For this algorithm, if we can schedule the same number of packets for + * each possible scheduling opportunity (each microframe), we will do so. The + * remaining number of packets will be saved to be transmitted in the gaps in + * the next interval's scheduling sequence. + * + * As we move those remaining packets to be scheduled with interval 2 packets, + * we have to double the number of remaining packets to transmit. This is + * because the intervals are actually powers of 2, and we would be transmitting + * the previous interval's packets twice in this interval. We also have to be + * sure that when we look at the largest max packet size for this interval, we + * also look at the largest max packet size for the remaining packets and take + * the greater of the two. + * + * The algorithm continues to evenly distribute packets in each scheduling + * opportunity, and push the remaining packets out, until we get to the last + * interval. Then those packets and their associated overhead are just added + * to the bandwidth used. + */ +static int xhci_check_bw_table(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + int old_active_eps) +{ + unsigned int bw_reserved; + unsigned int max_bandwidth; + unsigned int bw_used; + unsigned int block_size; + struct xhci_interval_bw_table *bw_table; + unsigned int packet_size = 0; + unsigned int overhead = 0; + unsigned int packets_transmitted = 0; + unsigned int packets_remaining = 0; + unsigned int i; + + if (virt_dev->udev->speed >= USB_SPEED_SUPER) + return xhci_check_ss_bw(xhci, virt_dev); + + if (virt_dev->udev->speed == USB_SPEED_HIGH) { + max_bandwidth = HS_BW_LIMIT; + /* Convert percent of bus BW reserved to blocks reserved */ + bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); + } else { + max_bandwidth = FS_BW_LIMIT; + bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); + } + + bw_table = virt_dev->bw_table; + /* We need to translate the max packet size and max ESIT payloads into + * the units the hardware uses. + */ + block_size = xhci_get_block_size(virt_dev->udev); + + /* If we are manipulating a LS/FS device under a HS hub, double check + * that the HS bus has enough bandwidth if we are activing a new TT. + */ + if (virt_dev->tt_info) { + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Recalculating BW for rootport %u", + virt_dev->real_port); + if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { + xhci_warn(xhci, "Not enough bandwidth on HS bus for " + "newly activated TT.\n"); + return -ENOMEM; + } + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Recalculating BW for TT slot %u port %u", + virt_dev->tt_info->slot_id, + virt_dev->tt_info->ttport); + } else { + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Recalculating BW for rootport %u", + virt_dev->real_port); + } + + /* Add in how much bandwidth will be used for interval zero, or the + * rounded max ESIT payload + number of packets * largest overhead. + */ + bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + + bw_table->interval_bw[0].num_packets * + xhci_get_largest_overhead(&bw_table->interval_bw[0]); + + for (i = 1; i < XHCI_MAX_INTERVAL; i++) { + unsigned int bw_added; + unsigned int largest_mps; + unsigned int interval_overhead; + + /* + * How many packets could we transmit in this interval? + * If packets didn't fit in the previous interval, we will need + * to transmit that many packets twice within this interval. + */ + packets_remaining = 2 * packets_remaining + + bw_table->interval_bw[i].num_packets; + + /* Find the largest max packet size of this or the previous + * interval. + */ + if (list_empty(&bw_table->interval_bw[i].endpoints)) + largest_mps = 0; + else { + struct xhci_virt_ep *virt_ep; + struct list_head *ep_entry; + + ep_entry = bw_table->interval_bw[i].endpoints.next; + virt_ep = list_entry(ep_entry, + struct xhci_virt_ep, bw_endpoint_list); + /* Convert to blocks, rounding up */ + largest_mps = DIV_ROUND_UP( + virt_ep->bw_info.max_packet_size, + block_size); + } + if (largest_mps > packet_size) + packet_size = largest_mps; + + /* Use the larger overhead of this or the previous interval. */ + interval_overhead = xhci_get_largest_overhead( + &bw_table->interval_bw[i]); + if (interval_overhead > overhead) + overhead = interval_overhead; + + /* How many packets can we evenly distribute across + * (1 << (i + 1)) possible scheduling opportunities? + */ + packets_transmitted = packets_remaining >> (i + 1); + + /* Add in the bandwidth used for those scheduled packets */ + bw_added = packets_transmitted * (overhead + packet_size); + + /* How many packets do we have remaining to transmit? */ + packets_remaining = packets_remaining % (1 << (i + 1)); + + /* What largest max packet size should those packets have? */ + /* If we've transmitted all packets, don't carry over the + * largest packet size. + */ + if (packets_remaining == 0) { + packet_size = 0; + overhead = 0; + } else if (packets_transmitted > 0) { + /* Otherwise if we do have remaining packets, and we've + * scheduled some packets in this interval, take the + * largest max packet size from endpoints with this + * interval. + */ + packet_size = largest_mps; + overhead = interval_overhead; + } + /* Otherwise carry over packet_size and overhead from the last + * time we had a remainder. + */ + bw_used += bw_added; + if (bw_used > max_bandwidth) { + xhci_warn(xhci, "Not enough bandwidth. " + "Proposed: %u, Max: %u\n", + bw_used, max_bandwidth); + return -ENOMEM; + } + } + /* + * Ok, we know we have some packets left over after even-handedly + * scheduling interval 15. We don't know which microframes they will + * fit into, so we over-schedule and say they will be scheduled every + * microframe. + */ + if (packets_remaining > 0) + bw_used += overhead + packet_size; + + if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { + unsigned int port_index = virt_dev->real_port - 1; + + /* OK, we're manipulating a HS device attached to a + * root port bandwidth domain. Include the number of active TTs + * in the bandwidth used. + */ + bw_used += TT_HS_OVERHEAD * + xhci->rh_bw[port_index].num_active_tts; + } + + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Final bandwidth: %u, Limit: %u, Reserved: %u, " + "Available: %u " "percent", + bw_used, max_bandwidth, bw_reserved, + (max_bandwidth - bw_used - bw_reserved) * 100 / + max_bandwidth); + + bw_used += bw_reserved; + if (bw_used > max_bandwidth) { + xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", + bw_used, max_bandwidth); + return -ENOMEM; + } + + bw_table->bw_used = bw_used; + return 0; +} + +static bool xhci_is_async_ep(unsigned int ep_type) +{ + return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && + ep_type != ISOC_IN_EP && + ep_type != INT_IN_EP); +} + +static bool xhci_is_sync_in_ep(unsigned int ep_type) +{ + return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); +} + +static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) +{ + unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); + + if (ep_bw->ep_interval == 0) + return SS_OVERHEAD_BURST + + (ep_bw->mult * ep_bw->num_packets * + (SS_OVERHEAD + mps)); + return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * + (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), + 1 << ep_bw->ep_interval); + +} + +static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, + struct xhci_bw_info *ep_bw, + struct xhci_interval_bw_table *bw_table, + struct usb_device *udev, + struct xhci_virt_ep *virt_ep, + struct xhci_tt_bw_info *tt_info) +{ + struct xhci_interval_bw *interval_bw; + int normalized_interval; + + if (xhci_is_async_ep(ep_bw->type)) + return; + + if (udev->speed >= USB_SPEED_SUPER) { + if (xhci_is_sync_in_ep(ep_bw->type)) + xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= + xhci_get_ss_bw_consumed(ep_bw); + else + xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= + xhci_get_ss_bw_consumed(ep_bw); + return; + } + + /* SuperSpeed endpoints never get added to intervals in the table, so + * this check is only valid for HS/FS/LS devices. + */ + if (list_empty(&virt_ep->bw_endpoint_list)) + return; + /* For LS/FS devices, we need to translate the interval expressed in + * microframes to frames. + */ + if (udev->speed == USB_SPEED_HIGH) + normalized_interval = ep_bw->ep_interval; + else + normalized_interval = ep_bw->ep_interval - 3; + + if (normalized_interval == 0) + bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; + interval_bw = &bw_table->interval_bw[normalized_interval]; + interval_bw->num_packets -= ep_bw->num_packets; + switch (udev->speed) { + case USB_SPEED_LOW: + interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; + break; + case USB_SPEED_FULL: + interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; + break; + case USB_SPEED_HIGH: + interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; + break; + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + case USB_SPEED_UNKNOWN: + case USB_SPEED_WIRELESS: + /* Should never happen because only LS/FS/HS endpoints will get + * added to the endpoint list. + */ + return; + } + if (tt_info) + tt_info->active_eps -= 1; + list_del_init(&virt_ep->bw_endpoint_list); +} + +static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, + struct xhci_bw_info *ep_bw, + struct xhci_interval_bw_table *bw_table, + struct usb_device *udev, + struct xhci_virt_ep *virt_ep, + struct xhci_tt_bw_info *tt_info) +{ + struct xhci_interval_bw *interval_bw; + struct xhci_virt_ep *smaller_ep; + int normalized_interval; + + if (xhci_is_async_ep(ep_bw->type)) + return; + + if (udev->speed == USB_SPEED_SUPER) { + if (xhci_is_sync_in_ep(ep_bw->type)) + xhci->devs[udev->slot_id]->bw_table->ss_bw_in += + xhci_get_ss_bw_consumed(ep_bw); + else + xhci->devs[udev->slot_id]->bw_table->ss_bw_out += + xhci_get_ss_bw_consumed(ep_bw); + return; + } + + /* For LS/FS devices, we need to translate the interval expressed in + * microframes to frames. + */ + if (udev->speed == USB_SPEED_HIGH) + normalized_interval = ep_bw->ep_interval; + else + normalized_interval = ep_bw->ep_interval - 3; + + if (normalized_interval == 0) + bw_table->interval0_esit_payload += ep_bw->max_esit_payload; + interval_bw = &bw_table->interval_bw[normalized_interval]; + interval_bw->num_packets += ep_bw->num_packets; + switch (udev->speed) { + case USB_SPEED_LOW: + interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; + break; + case USB_SPEED_FULL: + interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; + break; + case USB_SPEED_HIGH: + interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; + break; + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + case USB_SPEED_UNKNOWN: + case USB_SPEED_WIRELESS: + /* Should never happen because only LS/FS/HS endpoints will get + * added to the endpoint list. + */ + return; + } + + if (tt_info) + tt_info->active_eps += 1; + /* Insert the endpoint into the list, largest max packet size first. */ + list_for_each_entry(smaller_ep, &interval_bw->endpoints, + bw_endpoint_list) { + if (ep_bw->max_packet_size >= + smaller_ep->bw_info.max_packet_size) { + /* Add the new ep before the smaller endpoint */ + list_add_tail(&virt_ep->bw_endpoint_list, + &smaller_ep->bw_endpoint_list); + return; + } + } + /* Add the new endpoint at the end of the list. */ + list_add_tail(&virt_ep->bw_endpoint_list, + &interval_bw->endpoints); +} + +void xhci_update_tt_active_eps(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + int old_active_eps) +{ + struct xhci_root_port_bw_info *rh_bw_info; + if (!virt_dev->tt_info) + return; + + rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; + if (old_active_eps == 0 && + virt_dev->tt_info->active_eps != 0) { + rh_bw_info->num_active_tts += 1; + rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; + } else if (old_active_eps != 0 && + virt_dev->tt_info->active_eps == 0) { + rh_bw_info->num_active_tts -= 1; + rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; + } +} + +static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + struct xhci_container_ctx *in_ctx) +{ + struct xhci_bw_info ep_bw_info[31]; + int i; + struct xhci_input_control_ctx *ctrl_ctx; + int old_active_eps = 0; + + if (virt_dev->tt_info) + old_active_eps = virt_dev->tt_info->active_eps; + + ctrl_ctx = xhci_get_input_control_ctx(in_ctx); + if (!ctrl_ctx) { + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + return -ENOMEM; + } + + for (i = 0; i < 31; i++) { + if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) + continue; + + /* Make a copy of the BW info in case we need to revert this */ + memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, + sizeof(ep_bw_info[i])); + /* Drop the endpoint from the interval table if the endpoint is + * being dropped or changed. + */ + if (EP_IS_DROPPED(ctrl_ctx, i)) + xhci_drop_ep_from_interval_table(xhci, + &virt_dev->eps[i].bw_info, + virt_dev->bw_table, + virt_dev->udev, + &virt_dev->eps[i], + virt_dev->tt_info); + } + /* Overwrite the information stored in the endpoints' bw_info */ + xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); + for (i = 0; i < 31; i++) { + /* Add any changed or added endpoints to the interval table */ + if (EP_IS_ADDED(ctrl_ctx, i)) + xhci_add_ep_to_interval_table(xhci, + &virt_dev->eps[i].bw_info, + virt_dev->bw_table, + virt_dev->udev, + &virt_dev->eps[i], + virt_dev->tt_info); + } + + if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { + /* Ok, this fits in the bandwidth we have. + * Update the number of active TTs. + */ + xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); + return 0; + } + + /* We don't have enough bandwidth for this, revert the stored info. */ + for (i = 0; i < 31; i++) { + if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) + continue; + + /* Drop the new copies of any added or changed endpoints from + * the interval table. + */ + if (EP_IS_ADDED(ctrl_ctx, i)) { + xhci_drop_ep_from_interval_table(xhci, + &virt_dev->eps[i].bw_info, + virt_dev->bw_table, + virt_dev->udev, + &virt_dev->eps[i], + virt_dev->tt_info); + } + /* Revert the endpoint back to its old information */ + memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], + sizeof(ep_bw_info[i])); + /* Add any changed or dropped endpoints back into the table */ + if (EP_IS_DROPPED(ctrl_ctx, i)) + xhci_add_ep_to_interval_table(xhci, + &virt_dev->eps[i].bw_info, + virt_dev->bw_table, + virt_dev->udev, + &virt_dev->eps[i], + virt_dev->tt_info); + } + return -ENOMEM; +} + + +/* Issue a configure endpoint command or evaluate context command + * and wait for it to finish. + */ +static int xhci_configure_endpoint(struct xhci_hcd *xhci, + struct usb_device *udev, + struct xhci_command *command, + bool ctx_change, bool must_succeed) +{ + int ret; + unsigned long flags; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_virt_device *virt_dev; + struct xhci_slot_ctx *slot_ctx; + + if (!command) + return -EINVAL; + + spin_lock_irqsave(&xhci->lock, flags); + + if (xhci->xhc_state & XHCI_STATE_DYING) { + spin_unlock_irqrestore(&xhci->lock, flags); + return -ESHUTDOWN; + } + + virt_dev = xhci->devs[udev->slot_id]; + + ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); + if (!ctrl_ctx) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + return -ENOMEM; + } + + if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && + xhci_reserve_host_resources(xhci, ctrl_ctx)) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_warn(xhci, "Not enough host resources, " + "active endpoint contexts = %u\n", + xhci->num_active_eps); + return -ENOMEM; + } + if ((xhci->quirks & XHCI_SW_BW_CHECKING) && + xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { + if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) + xhci_free_host_resources(xhci, ctrl_ctx); + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_warn(xhci, "Not enough bandwidth\n"); + return -ENOMEM; + } + + slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); + + trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx); + trace_xhci_configure_endpoint(slot_ctx); + + if (!ctx_change) + ret = xhci_queue_configure_endpoint(xhci, command, + command->in_ctx->dma, + udev->slot_id, must_succeed); + else + ret = xhci_queue_evaluate_context(xhci, command, + command->in_ctx->dma, + udev->slot_id, must_succeed); + if (ret < 0) { + if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) + xhci_free_host_resources(xhci, ctrl_ctx); + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "FIXME allocate a new ring segment"); + return -ENOMEM; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* Wait for the configure endpoint command to complete */ + wait_for_completion(command->completion); + + if (!ctx_change) + ret = xhci_configure_endpoint_result(xhci, udev, + &command->status); + else + ret = xhci_evaluate_context_result(xhci, udev, + &command->status); + + if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { + spin_lock_irqsave(&xhci->lock, flags); + /* If the command failed, remove the reserved resources. + * Otherwise, clean up the estimate to include dropped eps. + */ + if (ret) + xhci_free_host_resources(xhci, ctrl_ctx); + else + xhci_finish_resource_reservation(xhci, ctrl_ctx); + spin_unlock_irqrestore(&xhci->lock, flags); + } + if (ret) + goto failed; + + ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); + if (ret) + xhci_warn(xhci, "sync device context failed, ret=%d", ret); + +failed: + return ret; +} + +static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, + struct xhci_virt_device *vdev, int i) +{ + struct xhci_virt_ep *ep = &vdev->eps[i]; + + if (ep->ep_state & EP_HAS_STREAMS) { + xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", + xhci_get_endpoint_address(i)); + xhci_free_stream_info(xhci, ep->stream_info); + ep->stream_info = NULL; + ep->ep_state &= ~EP_HAS_STREAMS; + } +} + +/* Called after one or more calls to xhci_add_endpoint() or + * xhci_drop_endpoint(). If this call fails, the USB core is expected + * to call xhci_reset_bandwidth(). + * + * Since we are in the middle of changing either configuration or + * installing a new alt setting, the USB core won't allow URBs to be + * enqueued for any endpoint on the old config or interface. Nothing + * else should be touching the xhci->devs[slot_id] structure, so we + * don't need to take the xhci->lock for manipulating that. + */ +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +{ + int i; + int ret = 0; + struct xhci_hcd *xhci; + struct xhci_virt_device *virt_dev; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_slot_ctx *slot_ctx; + struct xhci_command *command; + + ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); + if (ret <= 0) + return ret; + xhci = hcd_to_xhci(hcd); + if ((xhci->xhc_state & XHCI_STATE_DYING) || + (xhci->xhc_state & XHCI_STATE_REMOVING)) + return -ENODEV; + + xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); + virt_dev = xhci->devs[udev->slot_id]; + + command = xhci_alloc_command(xhci, true, GFP_KERNEL); + if (!command) + return -ENOMEM; + + command->in_ctx = virt_dev->in_ctx; + + /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ + ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); + if (!ctrl_ctx) { + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + ret = -ENOMEM; + goto command_cleanup; + } + ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); + ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); + ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); + + /* Don't issue the command if there's no endpoints to update. */ + if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && + ctrl_ctx->drop_flags == 0) { + ret = 0; + goto command_cleanup; + } + /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + for (i = 31; i >= 1; i--) { + __le32 le32 = cpu_to_le32(BIT(i)); + + if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) + || (ctrl_ctx->add_flags & le32) || i == 1) { + slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); + break; + } + } + + ret = xhci_configure_endpoint(xhci, udev, command, + false, false); + if (ret) + /* Callee should call reset_bandwidth() */ + goto command_cleanup; + + /* Free any rings that were dropped, but not changed. */ + for (i = 1; i < 31; i++) { + if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && + !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { + xhci_free_endpoint_ring(xhci, virt_dev, i); + xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); + } + } + xhci_zero_in_ctx(xhci, virt_dev); + /* + * Install any rings for completely new endpoints or changed endpoints, + * and free any old rings from changed endpoints. + */ + for (i = 1; i < 31; i++) { + if (!virt_dev->eps[i].new_ring) + continue; + /* Only free the old ring if it exists. + * It may not if this is the first add of an endpoint. + */ + if (virt_dev->eps[i].ring) { + xhci_free_endpoint_ring(xhci, virt_dev, i); + } + xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); + virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; + virt_dev->eps[i].new_ring = NULL; + xhci_debugfs_create_endpoint(xhci, virt_dev, i); + } +command_cleanup: + kfree(command->completion); + kfree(command); + + return ret; +} +EXPORT_SYMBOL_GPL(xhci_check_bandwidth); + +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd *xhci; + struct xhci_virt_device *virt_dev; + int i, ret; + + ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); + if (ret <= 0) + return; + xhci = hcd_to_xhci(hcd); + + xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); + virt_dev = xhci->devs[udev->slot_id]; + /* Free any rings allocated for added endpoints */ + for (i = 0; i < 31; i++) { + if (virt_dev->eps[i].new_ring) { + xhci_debugfs_remove_endpoint(xhci, virt_dev, i); + if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, i)) + xhci_vendor_free_transfer_ring(xhci, virt_dev, i); + else + xhci_ring_free(xhci, virt_dev->eps[i].new_ring); + + virt_dev->eps[i].new_ring = NULL; + } + } + xhci_zero_in_ctx(xhci, virt_dev); +} +EXPORT_SYMBOL_GPL(xhci_reset_bandwidth); + +static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, + struct xhci_container_ctx *in_ctx, + struct xhci_container_ctx *out_ctx, + struct xhci_input_control_ctx *ctrl_ctx, + u32 add_flags, u32 drop_flags) +{ + ctrl_ctx->add_flags = cpu_to_le32(add_flags); + ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); + xhci_slot_copy(xhci, in_ctx, out_ctx); + ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); +} + +static void xhci_endpoint_disable(struct usb_hcd *hcd, + struct usb_host_endpoint *host_ep) +{ + struct xhci_hcd *xhci; + struct xhci_virt_device *vdev; + struct xhci_virt_ep *ep; + struct usb_device *udev; + unsigned long flags; + unsigned int ep_index; + + xhci = hcd_to_xhci(hcd); +rescan: + spin_lock_irqsave(&xhci->lock, flags); + + udev = (struct usb_device *)host_ep->hcpriv; + if (!udev || !udev->slot_id) + goto done; + + vdev = xhci->devs[udev->slot_id]; + if (!vdev) + goto done; + + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = &vdev->eps[ep_index]; + if (!ep) + goto done; + + /* wait for hub_tt_work to finish clearing hub TT */ + if (ep->ep_state & EP_CLEARING_TT) { + spin_unlock_irqrestore(&xhci->lock, flags); + schedule_timeout_uninterruptible(1); + goto rescan; + } + + if (ep->ep_state) + xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n", + ep->ep_state); +done: + host_ep->hcpriv = NULL; + spin_unlock_irqrestore(&xhci->lock, flags); +} + +/* + * Called after usb core issues a clear halt control message. + * The host side of the halt should already be cleared by a reset endpoint + * command issued when the STALL event was received. + * + * The reset endpoint command may only be issued to endpoints in the halted + * state. For software that wishes to reset the data toggle or sequence number + * of an endpoint that isn't in the halted state this function will issue a + * configure endpoint command with the Drop and Add bits set for the target + * endpoint. Refer to the additional note in xhci spcification section 4.6.8. + */ + +static void xhci_endpoint_reset(struct usb_hcd *hcd, + struct usb_host_endpoint *host_ep) +{ + struct xhci_hcd *xhci; + struct usb_device *udev; + struct xhci_virt_device *vdev; + struct xhci_virt_ep *ep; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_command *stop_cmd, *cfg_cmd; + unsigned int ep_index; + unsigned long flags; + u32 ep_flag; + int err; + + xhci = hcd_to_xhci(hcd); + if (!host_ep->hcpriv) + return; + udev = (struct usb_device *) host_ep->hcpriv; + vdev = xhci->devs[udev->slot_id]; + + /* + * vdev may be lost due to xHC restore error and re-initialization + * during S3/S4 resume. A new vdev will be allocated later by + * xhci_discover_or_reset_device() + */ + if (!udev->slot_id || !vdev) + return; + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = &vdev->eps[ep_index]; + if (!ep) + return; + + /* Bail out if toggle is already being cleared by a endpoint reset */ + if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { + ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; + return; + } + /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */ + if (usb_endpoint_xfer_control(&host_ep->desc) || + usb_endpoint_xfer_isoc(&host_ep->desc)) + return; + + ep_flag = xhci_get_endpoint_flag(&host_ep->desc); + + if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) + return; + + stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT); + if (!stop_cmd) + return; + + cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT); + if (!cfg_cmd) + goto cleanup; + + spin_lock_irqsave(&xhci->lock, flags); + + /* block queuing new trbs and ringing ep doorbell */ + ep->ep_state |= EP_SOFT_CLEAR_TOGGLE; + + /* + * Make sure endpoint ring is empty before resetting the toggle/seq. + * Driver is required to synchronously cancel all transfer request. + * Stop the endpoint to force xHC to update the output context + */ + + if (!list_empty(&ep->ring->td_list)) { + dev_err(&udev->dev, "EP not empty, refuse reset\n"); + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); + goto cleanup; + } + + err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, + ep_index, 0); + if (err < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); + xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", + __func__, err); + goto cleanup; + } + + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(stop_cmd->completion); + + err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); + if (err) { + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", + __func__, err); + goto cleanup; + } + + spin_lock_irqsave(&xhci->lock, flags); + + /* config ep command clears toggle if add and drop ep flags are set */ + ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx); + if (!ctrl_ctx) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + goto cleanup; + } + + xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx, + ctrl_ctx, ep_flag, ep_flag); + xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); + + err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, + udev->slot_id, false); + if (err < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); + xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ", + __func__, err); + goto cleanup; + } + + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(cfg_cmd->completion); + + err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); + if (err) + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", + __func__, err); + + xhci_free_command(xhci, cfg_cmd); +cleanup: + xhci_free_command(xhci, stop_cmd); + if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) + ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; +} + +static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, + struct usb_device *udev, struct usb_host_endpoint *ep, + unsigned int slot_id) +{ + int ret; + unsigned int ep_index; + unsigned int ep_state; + + if (!ep) + return -EINVAL; + ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); + if (ret <= 0) + return -EINVAL; + if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { + xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" + " descriptor for ep 0x%x does not support streams\n", + ep->desc.bEndpointAddress); + return -EINVAL; + } + + ep_index = xhci_get_endpoint_index(&ep->desc); + ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; + if (ep_state & EP_HAS_STREAMS || + ep_state & EP_GETTING_STREAMS) { + xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " + "already has streams set up.\n", + ep->desc.bEndpointAddress); + xhci_warn(xhci, "Send email to xHCI maintainer and ask for " + "dynamic stream context array reallocation.\n"); + return -EINVAL; + } + if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { + xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " + "endpoint 0x%x; URBs are pending.\n", + ep->desc.bEndpointAddress); + return -EINVAL; + } + return 0; +} + +static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, + unsigned int *num_streams, unsigned int *num_stream_ctxs) +{ + unsigned int max_streams; + + /* The stream context array size must be a power of two */ + *num_stream_ctxs = roundup_pow_of_two(*num_streams); + /* + * Find out how many primary stream array entries the host controller + * supports. Later we may use secondary stream arrays (similar to 2nd + * level page entries), but that's an optional feature for xHCI host + * controllers. xHCs must support at least 4 stream IDs. + */ + max_streams = HCC_MAX_PSA(xhci->hcc_params); + if (*num_stream_ctxs > max_streams) { + xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", + max_streams); + *num_stream_ctxs = max_streams; + *num_streams = max_streams; + } +} + +/* Returns an error code if one of the endpoint already has streams. + * This does not change any data structures, it only checks and gathers + * information. + */ +static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, + struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps, + unsigned int *num_streams, u32 *changed_ep_bitmask) +{ + unsigned int max_streams; + unsigned int endpoint_flag; + int i; + int ret; + + for (i = 0; i < num_eps; i++) { + ret = xhci_check_streams_endpoint(xhci, udev, + eps[i], udev->slot_id); + if (ret < 0) + return ret; + + max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); + if (max_streams < (*num_streams - 1)) { + xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", + eps[i]->desc.bEndpointAddress, + max_streams); + *num_streams = max_streams+1; + } + + endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); + if (*changed_ep_bitmask & endpoint_flag) + return -EINVAL; + *changed_ep_bitmask |= endpoint_flag; + } + return 0; +} + +static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, + struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps) +{ + u32 changed_ep_bitmask = 0; + unsigned int slot_id; + unsigned int ep_index; + unsigned int ep_state; + int i; + + slot_id = udev->slot_id; + if (!xhci->devs[slot_id]) + return 0; + + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; + /* Are streams already being freed for the endpoint? */ + if (ep_state & EP_GETTING_NO_STREAMS) { + xhci_warn(xhci, "WARN Can't disable streams for " + "endpoint 0x%x, " + "streams are being disabled already\n", + eps[i]->desc.bEndpointAddress); + return 0; + } + /* Are there actually any streams to free? */ + if (!(ep_state & EP_HAS_STREAMS) && + !(ep_state & EP_GETTING_STREAMS)) { + xhci_warn(xhci, "WARN Can't disable streams for " + "endpoint 0x%x, " + "streams are already disabled!\n", + eps[i]->desc.bEndpointAddress); + xhci_warn(xhci, "WARN xhci_free_streams() called " + "with non-streams endpoint\n"); + return 0; + } + changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); + } + return changed_ep_bitmask; +} + +/* + * The USB device drivers use this function (through the HCD interface in USB + * core) to prepare a set of bulk endpoints to use streams. Streams are used to + * coordinate mass storage command queueing across multiple endpoints (basically + * a stream ID == a task ID). + * + * Setting up streams involves allocating the same size stream context array + * for each endpoint and issuing a configure endpoint command for all endpoints. + * + * Don't allow the call to succeed if one endpoint only supports one stream + * (which means it doesn't support streams at all). + * + * Drivers may get less stream IDs than they asked for, if the host controller + * hardware or endpoints claim they can't support the number of requested + * stream IDs. + */ +static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps, + unsigned int num_streams, gfp_t mem_flags) +{ + int i, ret; + struct xhci_hcd *xhci; + struct xhci_virt_device *vdev; + struct xhci_command *config_cmd; + struct xhci_input_control_ctx *ctrl_ctx; + unsigned int ep_index; + unsigned int num_stream_ctxs; + unsigned int max_packet; + unsigned long flags; + u32 changed_ep_bitmask = 0; + + if (!eps) + return -EINVAL; + + /* Add one to the number of streams requested to account for + * stream 0 that is reserved for xHCI usage. + */ + num_streams += 1; + xhci = hcd_to_xhci(hcd); + xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", + num_streams); + + /* MaxPSASize value 0 (2 streams) means streams are not supported */ + if ((xhci->quirks & XHCI_BROKEN_STREAMS) || + HCC_MAX_PSA(xhci->hcc_params) < 4) { + xhci_dbg(xhci, "xHCI controller does not support streams.\n"); + return -ENOSYS; + } + + config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); + if (!config_cmd) + return -ENOMEM; + + ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); + if (!ctrl_ctx) { + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + xhci_free_command(xhci, config_cmd); + return -ENOMEM; + } + + /* Check to make sure all endpoints are not already configured for + * streams. While we're at it, find the maximum number of streams that + * all the endpoints will support and check for duplicate endpoints. + */ + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, + num_eps, &num_streams, &changed_ep_bitmask); + if (ret < 0) { + xhci_free_command(xhci, config_cmd); + spin_unlock_irqrestore(&xhci->lock, flags); + return ret; + } + if (num_streams <= 1) { + xhci_warn(xhci, "WARN: endpoints can't handle " + "more than one stream.\n"); + xhci_free_command(xhci, config_cmd); + spin_unlock_irqrestore(&xhci->lock, flags); + return -EINVAL; + } + vdev = xhci->devs[udev->slot_id]; + /* Mark each endpoint as being in transition, so + * xhci_urb_enqueue() will reject all URBs. + */ + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; + } + spin_unlock_irqrestore(&xhci->lock, flags); + + /* Setup internal data structures and allocate HW data structures for + * streams (but don't install the HW structures in the input context + * until we're sure all memory allocation succeeded). + */ + xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); + xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", + num_stream_ctxs, num_streams); + + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + max_packet = usb_endpoint_maxp(&eps[i]->desc); + vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, + num_stream_ctxs, + num_streams, + max_packet, mem_flags); + if (!vdev->eps[ep_index].stream_info) + goto cleanup; + /* Set maxPstreams in endpoint context and update deq ptr to + * point to stream context array. FIXME + */ + } + + /* Set up the input context for a configure endpoint command. */ + for (i = 0; i < num_eps; i++) { + struct xhci_ep_ctx *ep_ctx; + + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); + + xhci_endpoint_copy(xhci, config_cmd->in_ctx, + vdev->out_ctx, ep_index); + xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, + vdev->eps[ep_index].stream_info); + } + /* Tell the HW to drop its old copy of the endpoint context info + * and add the updated copy from the input context. + */ + xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, + vdev->out_ctx, ctrl_ctx, + changed_ep_bitmask, changed_ep_bitmask); + + /* Issue and wait for the configure endpoint command */ + ret = xhci_configure_endpoint(xhci, udev, config_cmd, + false, false); + + /* xHC rejected the configure endpoint command for some reason, so we + * leave the old ring intact and free our internal streams data + * structure. + */ + if (ret < 0) + goto cleanup; + + spin_lock_irqsave(&xhci->lock, flags); + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; + xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", + udev->slot_id, ep_index); + vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; + } + xhci_free_command(xhci, config_cmd); + spin_unlock_irqrestore(&xhci->lock, flags); + + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + xhci_debugfs_create_stream_files(xhci, vdev, ep_index); + } + /* Subtract 1 for stream 0, which drivers can't use */ + return num_streams - 1; + +cleanup: + /* If it didn't work, free the streams! */ + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); + vdev->eps[ep_index].stream_info = NULL; + /* FIXME Unset maxPstreams in endpoint context and + * update deq ptr to point to normal string ring. + */ + vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; + vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; + xhci_endpoint_zero(xhci, vdev, eps[i]); + } + xhci_free_command(xhci, config_cmd); + return -ENOMEM; +} + +/* Transition the endpoint from using streams to being a "normal" endpoint + * without streams. + * + * Modify the endpoint context state, submit a configure endpoint command, + * and free all endpoint rings for streams if that completes successfully. + */ +static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps, + gfp_t mem_flags) +{ + int i, ret; + struct xhci_hcd *xhci; + struct xhci_virt_device *vdev; + struct xhci_command *command; + struct xhci_input_control_ctx *ctrl_ctx; + unsigned int ep_index; + unsigned long flags; + u32 changed_ep_bitmask; + + xhci = hcd_to_xhci(hcd); + vdev = xhci->devs[udev->slot_id]; + + /* Set up a configure endpoint command to remove the streams rings */ + spin_lock_irqsave(&xhci->lock, flags); + changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, + udev, eps, num_eps); + if (changed_ep_bitmask == 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + return -EINVAL; + } + + /* Use the xhci_command structure from the first endpoint. We may have + * allocated too many, but the driver may call xhci_free_streams() for + * each endpoint it grouped into one call to xhci_alloc_streams(). + */ + ep_index = xhci_get_endpoint_index(&eps[0]->desc); + command = vdev->eps[ep_index].stream_info->free_streams_command; + ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); + if (!ctrl_ctx) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + return -EINVAL; + } + + for (i = 0; i < num_eps; i++) { + struct xhci_ep_ctx *ep_ctx; + + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); + xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= + EP_GETTING_NO_STREAMS; + + xhci_endpoint_copy(xhci, command->in_ctx, + vdev->out_ctx, ep_index); + xhci_setup_no_streams_ep_input_ctx(ep_ctx, + &vdev->eps[ep_index]); + } + xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, + vdev->out_ctx, ctrl_ctx, + changed_ep_bitmask, changed_ep_bitmask); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* Issue and wait for the configure endpoint command, + * which must succeed. + */ + ret = xhci_configure_endpoint(xhci, udev, command, + false, true); + + /* xHC rejected the configure endpoint command for some reason, so we + * leave the streams rings intact. + */ + if (ret < 0) + return ret; + + spin_lock_irqsave(&xhci->lock, flags); + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); + vdev->eps[ep_index].stream_info = NULL; + /* FIXME Unset maxPstreams in endpoint context and + * update deq ptr to point to normal string ring. + */ + vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; + vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; + } + spin_unlock_irqrestore(&xhci->lock, flags); + + return 0; +} + +/* + * Deletes endpoint resources for endpoints that were active before a Reset + * Device command, or a Disable Slot command. The Reset Device command leaves + * the control endpoint intact, whereas the Disable Slot command deletes it. + * + * Must be called with xhci->lock held. + */ +void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, bool drop_control_ep) +{ + int i; + unsigned int num_dropped_eps = 0; + unsigned int drop_flags = 0; + + for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { + if (virt_dev->eps[i].ring) { + drop_flags |= 1 << i; + num_dropped_eps++; + } + } + xhci->num_active_eps -= num_dropped_eps; + if (num_dropped_eps) + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Dropped %u ep ctxs, flags = 0x%x, " + "%u now active.", + num_dropped_eps, drop_flags, + xhci->num_active_eps); +} + +/* + * This submits a Reset Device Command, which will set the device state to 0, + * set the device address to 0, and disable all the endpoints except the default + * control endpoint. The USB core should come back and call + * xhci_address_device(), and then re-set up the configuration. If this is + * called because of a usb_reset_and_verify_device(), then the old alternate + * settings will be re-installed through the normal bandwidth allocation + * functions. + * + * Wait for the Reset Device command to finish. Remove all structures + * associated with the endpoints that were disabled. Clear the input device + * structure? Reset the control endpoint 0 max packet size? + * + * If the virt_dev to be reset does not exist or does not match the udev, + * it means the device is lost, possibly due to the xHC restore error and + * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to + * re-allocate the device. + */ +static int xhci_discover_or_reset_device(struct usb_hcd *hcd, + struct usb_device *udev) +{ + int ret, i; + unsigned long flags; + struct xhci_hcd *xhci; + unsigned int slot_id; + struct xhci_virt_device *virt_dev; + struct xhci_command *reset_device_cmd; + struct xhci_slot_ctx *slot_ctx; + int old_active_eps = 0; + + ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); + if (ret <= 0) + return ret; + xhci = hcd_to_xhci(hcd); + slot_id = udev->slot_id; + virt_dev = xhci->devs[slot_id]; + if (!virt_dev) { + xhci_dbg(xhci, "The device to be reset with slot ID %u does " + "not exist. Re-allocate the device\n", slot_id); + ret = xhci_alloc_dev(hcd, udev); + if (ret == 1) + return 0; + else + return -EINVAL; + } + + if (virt_dev->tt_info) + old_active_eps = virt_dev->tt_info->active_eps; + + if (virt_dev->udev != udev) { + /* If the virt_dev and the udev does not match, this virt_dev + * may belong to another udev. + * Re-allocate the device. + */ + xhci_dbg(xhci, "The device to be reset with slot ID %u does " + "not match the udev. Re-allocate the device\n", + slot_id); + ret = xhci_alloc_dev(hcd, udev); + if (ret == 1) + return 0; + else + return -EINVAL; + } + + /* If device is not setup, there is no point in resetting it */ + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); + if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == + SLOT_STATE_DISABLED) + return 0; + + trace_xhci_discover_or_reset_device(slot_ctx); + + xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); + /* Allocate the command structure that holds the struct completion. + * Assume we're in process context, since the normal device reset + * process has to wait for the device anyway. Storage devices are + * reset as part of error handling, so use GFP_NOIO instead of + * GFP_KERNEL. + */ + reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO); + if (!reset_device_cmd) { + xhci_dbg(xhci, "Couldn't allocate command structure.\n"); + return -ENOMEM; + } + + /* Attempt to submit the Reset Device command to the command ring */ + spin_lock_irqsave(&xhci->lock, flags); + + ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); + if (ret) { + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + spin_unlock_irqrestore(&xhci->lock, flags); + goto command_cleanup; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* Wait for the Reset Device command to finish */ + wait_for_completion(reset_device_cmd->completion); + + ret = xhci_vendor_sync_dev_ctx(xhci, slot_id); + if (ret) { + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", + __func__, ret); + goto command_cleanup; + } + + /* The Reset Device command can't fail, according to the 0.95/0.96 spec, + * unless we tried to reset a slot ID that wasn't enabled, + * or the device wasn't in the addressed or configured state. + */ + ret = reset_device_cmd->status; + switch (ret) { + case COMP_COMMAND_ABORTED: + case COMP_COMMAND_RING_STOPPED: + xhci_warn(xhci, "Timeout waiting for reset device command\n"); + ret = -ETIME; + goto command_cleanup; + case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */ + case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */ + xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", + slot_id, + xhci_get_slot_state(xhci, virt_dev->out_ctx)); + xhci_dbg(xhci, "Not freeing device rings.\n"); + /* Don't treat this as an error. May change my mind later. */ + ret = 0; + goto command_cleanup; + case COMP_SUCCESS: + xhci_dbg(xhci, "Successful reset device command.\n"); + break; + default: + if (xhci_is_vendor_info_code(xhci, ret)) + break; + xhci_warn(xhci, "Unknown completion code %u for " + "reset device command.\n", ret); + ret = -EINVAL; + goto command_cleanup; + } + + /* Free up host controller endpoint resources */ + if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { + spin_lock_irqsave(&xhci->lock, flags); + /* Don't delete the default control endpoint resources */ + xhci_free_device_endpoint_resources(xhci, virt_dev, false); + spin_unlock_irqrestore(&xhci->lock, flags); + } + + /* Everything but endpoint 0 is disabled, so free the rings. */ + for (i = 1; i < 31; i++) { + struct xhci_virt_ep *ep = &virt_dev->eps[i]; + + if (ep->ep_state & EP_HAS_STREAMS) { + xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", + xhci_get_endpoint_address(i)); + xhci_free_stream_info(xhci, ep->stream_info); + ep->stream_info = NULL; + ep->ep_state &= ~EP_HAS_STREAMS; + } + + if (ep->ring) { + xhci_debugfs_remove_endpoint(xhci, virt_dev, i); + xhci_free_endpoint_ring(xhci, virt_dev, i); + } + if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) + xhci_drop_ep_from_interval_table(xhci, + &virt_dev->eps[i].bw_info, + virt_dev->bw_table, + udev, + &virt_dev->eps[i], + virt_dev->tt_info); + xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); + } + /* If necessary, update the number of active TTs on this root port */ + xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); + virt_dev->flags = 0; + ret = 0; + +command_cleanup: + xhci_free_command(xhci, reset_device_cmd); + return ret; +} + +/* + * At this point, the struct usb_device is about to go away, the device has + * disconnected, and all traffic has been stopped and the endpoints have been + * disabled. Free any HC data structures associated with that device. + */ +static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *virt_dev; + struct xhci_slot_ctx *slot_ctx; + int i, ret; + +#ifndef CONFIG_USB_DEFAULT_PERSIST + /* + * We called pm_runtime_get_noresume when the device was attached. + * Decrement the counter here to allow controller to runtime suspend + * if no devices remain. + */ + if (xhci->quirks & XHCI_RESET_ON_RESUME) + pm_runtime_put_noidle(hcd->self.controller); +#endif + + ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); + /* If the host is halted due to driver unload, we still need to free the + * device. + */ + if (ret <= 0 && ret != -ENODEV) + return; + + virt_dev = xhci->devs[udev->slot_id]; + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); + trace_xhci_free_dev(slot_ctx); + + /* Stop any wayward timer functions (which may grab the lock) */ + for (i = 0; i < 31; i++) { + virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; + del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); + } + virt_dev->udev = NULL; + ret = xhci_disable_slot(xhci, udev->slot_id); + if (ret) + xhci_free_virt_device(xhci, udev->slot_id); +} + +int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) +{ + struct xhci_command *command; + unsigned long flags; + u32 state; + int ret = 0; + + command = xhci_alloc_command(xhci, false, GFP_KERNEL); + if (!command) + return -ENOMEM; + + xhci_debugfs_remove_slot(xhci, slot_id); + + spin_lock_irqsave(&xhci->lock, flags); + /* Don't disable the slot if the host controller is dead. */ + state = readl(&xhci->op_regs->status); + if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || + (xhci->xhc_state & XHCI_STATE_HALTED)) { + spin_unlock_irqrestore(&xhci->lock, flags); + kfree(command); + return -ENODEV; + } + + ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, + slot_id); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + kfree(command); + return ret; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + return ret; +} + +/* + * Checks if we have enough host controller resources for the default control + * endpoint. + * + * Must be called with xhci->lock held. + */ +static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) +{ + if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Not enough ep ctxs: " + "%u active, need to add 1, limit is %u.", + xhci->num_active_eps, xhci->limit_active_eps); + return -ENOMEM; + } + xhci->num_active_eps += 1; + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Adding 1 ep ctx, %u now active.", + xhci->num_active_eps); + return 0; +} + + +/* + * Returns 0 if the xHC ran out of device slots, the Enable Slot command + * timed out, or allocating memory failed. Returns 1 on success. + */ +int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *vdev; + struct xhci_slot_ctx *slot_ctx; + unsigned long flags; + int ret, slot_id; + struct xhci_command *command; + + command = xhci_alloc_command(xhci, true, GFP_KERNEL); + if (!command) + return 0; + + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + xhci_free_command(xhci, command); + return 0; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(command->completion); + slot_id = command->slot_id; + + if (!slot_id || command->status != COMP_SUCCESS) { + xhci_err(xhci, "Error while assigning device slot ID\n"); + xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", + HCS_MAX_SLOTS( + readl(&xhci->cap_regs->hcs_params1))); + xhci_free_command(xhci, command); + return 0; + } + + xhci_free_command(xhci, command); + + if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_reserve_host_control_ep_resources(xhci); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_warn(xhci, "Not enough host resources, " + "active endpoint contexts = %u\n", + xhci->num_active_eps); + goto disable_slot; + } + spin_unlock_irqrestore(&xhci->lock, flags); + } + /* Use GFP_NOIO, since this function can be called from + * xhci_discover_or_reset_device(), which may be called as part of + * mass storage driver error handling. + */ + if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { + xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); + goto disable_slot; + } + + ret = xhci_vendor_sync_dev_ctx(xhci, slot_id); + if (ret) { + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", + __func__, ret); + goto disable_slot; + } + + vdev = xhci->devs[slot_id]; + slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); + trace_xhci_alloc_dev(slot_ctx); + + udev->slot_id = slot_id; + + xhci_debugfs_create_slot(xhci, slot_id); + +#ifndef CONFIG_USB_DEFAULT_PERSIST + /* + * If resetting upon resume, we can't put the controller into runtime + * suspend if there is a device attached. + */ + if (xhci->quirks & XHCI_RESET_ON_RESUME) + pm_runtime_get_noresume(hcd->self.controller); +#endif + + /* Is this a LS or FS device under a HS hub? */ + /* Hub or peripherial? */ + return 1; + +disable_slot: + ret = xhci_disable_slot(xhci, udev->slot_id); + if (ret) + xhci_free_virt_device(xhci, udev->slot_id); + + return 0; +} + +/* + * Issue an Address Device command and optionally send a corresponding + * SetAddress request to the device. + */ +static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, + enum xhci_setup_dev setup) +{ + const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address"; + unsigned long flags; + struct xhci_virt_device *virt_dev; + int ret = 0; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_slot_ctx *slot_ctx; + struct xhci_input_control_ctx *ctrl_ctx; + u64 temp_64; + struct xhci_command *command = NULL; + + mutex_lock(&xhci->mutex); + + if (xhci->xhc_state) { /* dying, removing or halted */ + ret = -ESHUTDOWN; + goto out; + } + + if (!udev->slot_id) { + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "Bad Slot ID %d", udev->slot_id); + ret = -EINVAL; + goto out; + } + + virt_dev = xhci->devs[udev->slot_id]; + + if (WARN_ON(!virt_dev)) { + /* + * In plug/unplug torture test with an NEC controller, + * a zero-dereference was observed once due to virt_dev = 0. + * Print useful debug rather than crash if it is observed again! + */ + xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", + udev->slot_id); + ret = -EINVAL; + goto out; + } + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); + trace_xhci_setup_device_slot(slot_ctx); + + if (setup == SETUP_CONTEXT_ONLY) { + if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == + SLOT_STATE_DEFAULT) { + xhci_dbg(xhci, "Slot already in default state\n"); + goto out; + } + } + + command = xhci_alloc_command(xhci, true, GFP_KERNEL); + if (!command) { + ret = -ENOMEM; + goto out; + } + + command->in_ctx = virt_dev->in_ctx; + + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); + if (!ctrl_ctx) { + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + ret = -EINVAL; + goto out; + } + /* + * If this is the first Set Address since device plug-in or + * virt_device realloaction after a resume with an xHCI power loss, + * then set up the slot context. + */ + if (!slot_ctx->dev_info) + xhci_setup_addressable_virt_dev(xhci, udev); + /* Otherwise, update the control endpoint ring enqueue pointer. */ + else + xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); + ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); + ctrl_ctx->drop_flags = 0; + + trace_xhci_address_ctx(xhci, virt_dev->in_ctx, + le32_to_cpu(slot_ctx->dev_info) >> 27); + + trace_xhci_address_ctrl_ctx(ctrl_ctx); + spin_lock_irqsave(&xhci->lock, flags); + trace_xhci_setup_device(virt_dev); + ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, + udev->slot_id, setup); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "FIXME: allocate a command ring segment"); + goto out; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ + wait_for_completion(command->completion); + + ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); + if (ret) { + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", + __func__, ret); + goto out; + } + + /* FIXME: From section 4.3.4: "Software shall be responsible for timing + * the SetAddress() "recovery interval" required by USB and aborting the + * command on a timeout. + */ + switch (command->status) { + case COMP_COMMAND_ABORTED: + case COMP_COMMAND_RING_STOPPED: + xhci_warn(xhci, "Timeout while waiting for setup device command\n"); + ret = -ETIME; + break; + case COMP_CONTEXT_STATE_ERROR: + case COMP_SLOT_NOT_ENABLED_ERROR: + xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", + act, udev->slot_id); + ret = -EINVAL; + break; + case COMP_USB_TRANSACTION_ERROR: + dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); + + mutex_unlock(&xhci->mutex); + ret = xhci_disable_slot(xhci, udev->slot_id); + if (!ret) + xhci_alloc_dev(hcd, udev); + kfree(command->completion); + kfree(command); + return -EPROTO; + case COMP_INCOMPATIBLE_DEVICE_ERROR: + dev_warn(&udev->dev, + "ERROR: Incompatible device for setup %s command\n", act); + ret = -ENODEV; + break; + case COMP_SUCCESS: + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "Successful setup %s command", act); + break; + default: + xhci_err(xhci, + "ERROR: unexpected setup %s command completion code 0x%x.\n", + act, command->status); + trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); + ret = -EINVAL; + break; + } + if (ret) + goto out; + temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "Op regs DCBAA ptr = %#016llx", temp_64); + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "Slot ID %d dcbaa entry @%p = %#016llx", + udev->slot_id, + &xhci->dcbaa->dev_context_ptrs[udev->slot_id], + (unsigned long long) + le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "Output Context DMA address = %#08llx", + (unsigned long long)virt_dev->out_ctx->dma); + trace_xhci_address_ctx(xhci, virt_dev->in_ctx, + le32_to_cpu(slot_ctx->dev_info) >> 27); + /* + * USB core uses address 1 for the roothubs, so we add one to the + * address given back to us by the HC. + */ + trace_xhci_address_ctx(xhci, virt_dev->out_ctx, + le32_to_cpu(slot_ctx->dev_info) >> 27); + /* Zero the input context control for later use */ + ctrl_ctx->add_flags = 0; + ctrl_ctx->drop_flags = 0; + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); + udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); + + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "Internal device address = %d", + le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); +out: + mutex_unlock(&xhci->mutex); + if (command) { + kfree(command->completion); + kfree(command); + } + return ret; +} + +int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS); +} +EXPORT_SYMBOL_GPL(xhci_address_device); + +static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY); +} + +/* + * Transfer the port index into real index in the HW port status + * registers. Caculate offset between the port's PORTSC register + * and port status base. Divide the number of per port register + * to get the real index. The raw port number bases 1. + */ +int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) +{ + struct xhci_hub *rhub; + + rhub = xhci_get_rhub(hcd); + return rhub->ports[port1 - 1]->hw_portnum + 1; +} + +/* + * Issue an Evaluate Context command to change the Maximum Exit Latency in the + * slot context. If that succeeds, store the new MEL in the xhci_virt_device. + */ +static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, + struct usb_device *udev, u16 max_exit_latency) +{ + struct xhci_virt_device *virt_dev; + struct xhci_command *command; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_slot_ctx *slot_ctx; + unsigned long flags; + int ret; + + spin_lock_irqsave(&xhci->lock, flags); + + virt_dev = xhci->devs[udev->slot_id]; + + /* + * virt_dev might not exists yet if xHC resumed from hibernate (S4) and + * xHC was re-initialized. Exit latency will be set later after + * hub_port_finish_reset() is done and xhci->devs[] are re-allocated + */ + + if (!virt_dev || max_exit_latency == virt_dev->current_mel) { + spin_unlock_irqrestore(&xhci->lock, flags); + return 0; + } + + /* Attempt to issue an Evaluate Context command to change the MEL. */ + command = xhci->lpm_command; + ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); + if (!ctrl_ctx) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + return -ENOMEM; + } + + ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", + __func__, ret); + return ret; + } + + xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); + spin_unlock_irqrestore(&xhci->lock, flags); + + ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); + slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); + slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); + slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); + slot_ctx->dev_state = 0; + + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Set up evaluate context for LPM MEL change."); + + /* Issue and wait for the evaluate context command. */ + ret = xhci_configure_endpoint(xhci, udev, command, + true, true); + + if (!ret) { + spin_lock_irqsave(&xhci->lock, flags); + virt_dev->current_mel = max_exit_latency; + spin_unlock_irqrestore(&xhci->lock, flags); + } + return ret; +} + +struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci) +{ + return xhci->vendor_ops; +} +EXPORT_SYMBOL_GPL(xhci_vendor_get_ops); + +int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id) +{ + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if (ops && ops->sync_dev_ctx) + return ops->sync_dev_ctx(xhci, slot_id); + return 0; +} + +bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb) +{ + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + + if (ops && ops->usb_offload_skip_urb) + return ops->usb_offload_skip_urb(xhci, urb); + return false; +} + +#ifdef CONFIG_PM + +/* BESL to HIRD Encoding array for USB2 LPM */ +static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, + 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; + +/* Calculate HIRD/BESL for USB2 PORTPMSC*/ +static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, + struct usb_device *udev) +{ + int u2del, besl, besl_host; + int besl_device = 0; + u32 field; + + u2del = HCS_U2_LATENCY(xhci->hcs_params3); + field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); + + if (field & USB_BESL_SUPPORT) { + for (besl_host = 0; besl_host < 16; besl_host++) { + if (xhci_besl_encoding[besl_host] >= u2del) + break; + } + /* Use baseline BESL value as default */ + if (field & USB_BESL_BASELINE_VALID) + besl_device = USB_GET_BESL_BASELINE(field); + else if (field & USB_BESL_DEEP_VALID) + besl_device = USB_GET_BESL_DEEP(field); + } else { + if (u2del <= 50) + besl_host = 0; + else + besl_host = (u2del - 51) / 75 + 1; + } + + besl = besl_host + besl_device; + if (besl > 15) + besl = 15; + + return besl; +} + +/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ +static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) +{ + u32 field; + int l1; + int besld = 0; + int hirdm = 0; + + field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); + + /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ + l1 = udev->l1_params.timeout / 256; + + /* device has preferred BESLD */ + if (field & USB_BESL_DEEP_VALID) { + besld = USB_GET_BESL_DEEP(field); + hirdm = 1; + } + + return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); +} + +static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, + struct usb_device *udev, int enable) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_port **ports; + __le32 __iomem *pm_addr, *hlpm_addr; + u32 pm_val, hlpm_val, field; + unsigned int port_num; + unsigned long flags; + int hird, exit_latency; + int ret; + + if (xhci->quirks & XHCI_HW_LPM_DISABLE) + return -EPERM; + + if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || + !udev->lpm_capable) + return -EPERM; + + if (!udev->parent || udev->parent->parent || + udev->descriptor.bDeviceClass == USB_CLASS_HUB) + return -EPERM; + + if (udev->usb2_hw_lpm_capable != 1) + return -EPERM; + + spin_lock_irqsave(&xhci->lock, flags); + + ports = xhci->usb2_rhub.ports; + port_num = udev->portnum - 1; + pm_addr = ports[port_num]->addr + PORTPMSC; + pm_val = readl(pm_addr); + hlpm_addr = ports[port_num]->addr + PORTHLPMC; + + xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", + enable ? "enable" : "disable", port_num + 1); + + if (enable) { + /* Host supports BESL timeout instead of HIRD */ + if (udev->usb2_hw_lpm_besl_capable) { + /* if device doesn't have a preferred BESL value use a + * default one which works with mixed HIRD and BESL + * systems. See XHCI_DEFAULT_BESL definition in xhci.h + */ + field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); + if ((field & USB_BESL_SUPPORT) && + (field & USB_BESL_BASELINE_VALID)) + hird = USB_GET_BESL_BASELINE(field); + else + hird = udev->l1_params.besl; + + exit_latency = xhci_besl_encoding[hird]; + spin_unlock_irqrestore(&xhci->lock, flags); + + /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx + * input context for link powermanagement evaluate + * context commands. It is protected by hcd->bandwidth + * mutex and is shared by all devices. We need to set + * the max ext latency in USB 2 BESL LPM as well, so + * use the same mutex and xhci_change_max_exit_latency() + */ + mutex_lock(hcd->bandwidth_mutex); + ret = xhci_change_max_exit_latency(xhci, udev, + exit_latency); + mutex_unlock(hcd->bandwidth_mutex); + + if (ret < 0) + return ret; + spin_lock_irqsave(&xhci->lock, flags); + + hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); + writel(hlpm_val, hlpm_addr); + /* flush write */ + readl(hlpm_addr); + } else { + hird = xhci_calculate_hird_besl(xhci, udev); + } + + pm_val &= ~PORT_HIRD_MASK; + pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); + writel(pm_val, pm_addr); + pm_val = readl(pm_addr); + pm_val |= PORT_HLE; + writel(pm_val, pm_addr); + /* flush write */ + readl(pm_addr); + } else { + pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); + writel(pm_val, pm_addr); + /* flush write */ + readl(pm_addr); + if (udev->usb2_hw_lpm_besl_capable) { + spin_unlock_irqrestore(&xhci->lock, flags); + mutex_lock(hcd->bandwidth_mutex); + xhci_change_max_exit_latency(xhci, udev, 0); + mutex_unlock(hcd->bandwidth_mutex); + readl_poll_timeout(ports[port_num]->addr, pm_val, + (pm_val & PORT_PLS_MASK) == XDEV_U0, + 100, 10000); + return 0; + } + } + + spin_unlock_irqrestore(&xhci->lock, flags); + return 0; +} + +/* check if a usb2 port supports a given extened capability protocol + * only USB2 ports extended protocol capability values are cached. + * Return 1 if capability is supported + */ +static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, + unsigned capability) +{ + u32 port_offset, port_count; + int i; + + for (i = 0; i < xhci->num_ext_caps; i++) { + if (xhci->ext_caps[i] & capability) { + /* port offsets starts at 1 */ + port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; + port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); + if (port >= port_offset && + port < port_offset + port_count) + return 1; + } + } + return 0; +} + +static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + int portnum = udev->portnum - 1; + + if (hcd->speed >= HCD_USB3 || !udev->lpm_capable) + return 0; + + /* we only support lpm for non-hub device connected to root hub yet */ + if (!udev->parent || udev->parent->parent || + udev->descriptor.bDeviceClass == USB_CLASS_HUB) + return 0; + + if (xhci->hw_lpm_support == 1 && + xhci_check_usb2_port_capability( + xhci, portnum, XHCI_HLC)) { + udev->usb2_hw_lpm_capable = 1; + udev->l1_params.timeout = XHCI_L1_TIMEOUT; + udev->l1_params.besl = XHCI_DEFAULT_BESL; + if (xhci_check_usb2_port_capability(xhci, portnum, + XHCI_BLC)) + udev->usb2_hw_lpm_besl_capable = 1; + } + + return 0; +} + +/*---------------------- USB 3.0 Link PM functions ------------------------*/ + +/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ +static unsigned long long xhci_service_interval_to_ns( + struct usb_endpoint_descriptor *desc) +{ + return (1ULL << (desc->bInterval - 1)) * 125 * 1000; +} + +static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, + enum usb3_link_state state) +{ + unsigned long long sel; + unsigned long long pel; + unsigned int max_sel_pel; + char *state_name; + + switch (state) { + case USB3_LPM_U1: + /* Convert SEL and PEL stored in nanoseconds to microseconds */ + sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); + pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); + max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; + state_name = "U1"; + break; + case USB3_LPM_U2: + sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); + pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); + max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; + state_name = "U2"; + break; + default: + dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", + __func__); + return USB3_LPM_DISABLED; + } + + if (sel <= max_sel_pel && pel <= max_sel_pel) + return USB3_LPM_DEVICE_INITIATED; + + if (sel > max_sel_pel) + dev_dbg(&udev->dev, "Device-initiated %s disabled " + "due to long SEL %llu ms\n", + state_name, sel); + else + dev_dbg(&udev->dev, "Device-initiated %s disabled " + "due to long PEL %llu ms\n", + state_name, pel); + return USB3_LPM_DISABLED; +} + +/* The U1 timeout should be the maximum of the following values: + * - For control endpoints, U1 system exit latency (SEL) * 3 + * - For bulk endpoints, U1 SEL * 5 + * - For interrupt endpoints: + * - Notification EPs, U1 SEL * 3 + * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) + * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) + */ +static unsigned long long xhci_calculate_intel_u1_timeout( + struct usb_device *udev, + struct usb_endpoint_descriptor *desc) +{ + unsigned long long timeout_ns; + int ep_type; + int intr_type; + + ep_type = usb_endpoint_type(desc); + switch (ep_type) { + case USB_ENDPOINT_XFER_CONTROL: + timeout_ns = udev->u1_params.sel * 3; + break; + case USB_ENDPOINT_XFER_BULK: + timeout_ns = udev->u1_params.sel * 5; + break; + case USB_ENDPOINT_XFER_INT: + intr_type = usb_endpoint_interrupt_type(desc); + if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { + timeout_ns = udev->u1_params.sel * 3; + break; + } + /* Otherwise the calculation is the same as isoc eps */ + fallthrough; + case USB_ENDPOINT_XFER_ISOC: + timeout_ns = xhci_service_interval_to_ns(desc); + timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); + if (timeout_ns < udev->u1_params.sel * 2) + timeout_ns = udev->u1_params.sel * 2; + break; + default: + return 0; + } + + return timeout_ns; +} + +/* Returns the hub-encoded U1 timeout value. */ +static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, + struct usb_device *udev, + struct usb_endpoint_descriptor *desc) +{ + unsigned long long timeout_ns; + + if (xhci->quirks & XHCI_INTEL_HOST) + timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); + else + timeout_ns = udev->u1_params.sel; + + /* Prevent U1 if service interval is shorter than U1 exit latency */ + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { + if (xhci_service_interval_to_ns(desc) <= timeout_ns) { + dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); + return USB3_LPM_DISABLED; + } + } + + /* The U1 timeout is encoded in 1us intervals. + * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. + */ + if (timeout_ns == USB3_LPM_DISABLED) + timeout_ns = 1; + else + timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); + + /* If the necessary timeout value is bigger than what we can set in the + * USB 3.0 hub, we have to disable hub-initiated U1. + */ + if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) + return timeout_ns; + dev_dbg(&udev->dev, "Hub-initiated U1 disabled " + "due to long timeout %llu ms\n", timeout_ns); + return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); +} + +/* The U2 timeout should be the maximum of: + * - 10 ms (to avoid the bandwidth impact on the scheduler) + * - largest bInterval of any active periodic endpoint (to avoid going + * into lower power link states between intervals). + * - the U2 Exit Latency of the device + */ +static unsigned long long xhci_calculate_intel_u2_timeout( + struct usb_device *udev, + struct usb_endpoint_descriptor *desc) +{ + unsigned long long timeout_ns; + unsigned long long u2_del_ns; + + timeout_ns = 10 * 1000 * 1000; + + if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) && + (xhci_service_interval_to_ns(desc) > timeout_ns)) + timeout_ns = xhci_service_interval_to_ns(desc); + + u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; + if (u2_del_ns > timeout_ns) + timeout_ns = u2_del_ns; + + return timeout_ns; +} + +/* Returns the hub-encoded U2 timeout value. */ +static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, + struct usb_device *udev, + struct usb_endpoint_descriptor *desc) +{ + unsigned long long timeout_ns; + + if (xhci->quirks & XHCI_INTEL_HOST) + timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); + else + timeout_ns = udev->u2_params.sel; + + /* Prevent U2 if service interval is shorter than U2 exit latency */ + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { + if (xhci_service_interval_to_ns(desc) <= timeout_ns) { + dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); + return USB3_LPM_DISABLED; + } + } + + /* The U2 timeout is encoded in 256us intervals */ + timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); + /* If the necessary timeout value is bigger than what we can set in the + * USB 3.0 hub, we have to disable hub-initiated U2. + */ + if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) + return timeout_ns; + dev_dbg(&udev->dev, "Hub-initiated U2 disabled " + "due to long timeout %llu ms\n", timeout_ns); + return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); +} + +static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, + struct usb_device *udev, + struct usb_endpoint_descriptor *desc, + enum usb3_link_state state, + u16 *timeout) +{ + if (state == USB3_LPM_U1) + return xhci_calculate_u1_timeout(xhci, udev, desc); + else if (state == USB3_LPM_U2) + return xhci_calculate_u2_timeout(xhci, udev, desc); + + return USB3_LPM_DISABLED; +} + +static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, + struct usb_device *udev, + struct usb_endpoint_descriptor *desc, + enum usb3_link_state state, + u16 *timeout) +{ + u16 alt_timeout; + + alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, + desc, state, timeout); + + /* If we found we can't enable hub-initiated LPM, and + * the U1 or U2 exit latency was too high to allow + * device-initiated LPM as well, then we will disable LPM + * for this device, so stop searching any further. + */ + if (alt_timeout == USB3_LPM_DISABLED) { + *timeout = alt_timeout; + return -E2BIG; + } + if (alt_timeout > *timeout) + *timeout = alt_timeout; + return 0; +} + +static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, + struct usb_device *udev, + struct usb_host_interface *alt, + enum usb3_link_state state, + u16 *timeout) +{ + int j; + + for (j = 0; j < alt->desc.bNumEndpoints; j++) { + if (xhci_update_timeout_for_endpoint(xhci, udev, + &alt->endpoint[j].desc, state, timeout)) + return -E2BIG; + continue; + } + return 0; +} + +static int xhci_check_intel_tier_policy(struct usb_device *udev, + enum usb3_link_state state) +{ + struct usb_device *parent; + unsigned int num_hubs; + + if (state == USB3_LPM_U2) + return 0; + + /* Don't enable U1 if the device is on a 2nd tier hub or lower. */ + for (parent = udev->parent, num_hubs = 0; parent->parent; + parent = parent->parent) + num_hubs++; + + if (num_hubs < 2) + return 0; + + dev_dbg(&udev->dev, "Disabling U1 link state for device" + " below second-tier hub.\n"); + dev_dbg(&udev->dev, "Plug device into first-tier hub " + "to decrease power consumption.\n"); + return -E2BIG; +} + +static int xhci_check_tier_policy(struct xhci_hcd *xhci, + struct usb_device *udev, + enum usb3_link_state state) +{ + if (xhci->quirks & XHCI_INTEL_HOST) + return xhci_check_intel_tier_policy(udev, state); + else + return 0; +} + +/* Returns the U1 or U2 timeout that should be enabled. + * If the tier check or timeout setting functions return with a non-zero exit + * code, that means the timeout value has been finalized and we shouldn't look + * at any more endpoints. + */ +static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, + struct usb_device *udev, enum usb3_link_state state) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct usb_host_config *config; + char *state_name; + int i; + u16 timeout = USB3_LPM_DISABLED; + + if (state == USB3_LPM_U1) + state_name = "U1"; + else if (state == USB3_LPM_U2) + state_name = "U2"; + else { + dev_warn(&udev->dev, "Can't enable unknown link state %i\n", + state); + return timeout; + } + + if (xhci_check_tier_policy(xhci, udev, state) < 0) + return timeout; + + /* Gather some information about the currently installed configuration + * and alternate interface settings. + */ + if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, + state, &timeout)) + return timeout; + + config = udev->actconfig; + if (!config) + return timeout; + + for (i = 0; i < config->desc.bNumInterfaces; i++) { + struct usb_driver *driver; + struct usb_interface *intf = config->interface[i]; + + if (!intf) + continue; + + /* Check if any currently bound drivers want hub-initiated LPM + * disabled. + */ + if (intf->dev.driver) { + driver = to_usb_driver(intf->dev.driver); + if (driver && driver->disable_hub_initiated_lpm) { + dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n", + state_name, driver->name); + timeout = xhci_get_timeout_no_hub_lpm(udev, + state); + if (timeout == USB3_LPM_DISABLED) + return timeout; + } + } + + /* Not sure how this could happen... */ + if (!intf->cur_altsetting) + continue; + + if (xhci_update_timeout_for_interface(xhci, udev, + intf->cur_altsetting, + state, &timeout)) + return timeout; + } + return timeout; +} + +static int calculate_max_exit_latency(struct usb_device *udev, + enum usb3_link_state state_changed, + u16 hub_encoded_timeout) +{ + unsigned long long u1_mel_us = 0; + unsigned long long u2_mel_us = 0; + unsigned long long mel_us = 0; + bool disabling_u1; + bool disabling_u2; + bool enabling_u1; + bool enabling_u2; + + disabling_u1 = (state_changed == USB3_LPM_U1 && + hub_encoded_timeout == USB3_LPM_DISABLED); + disabling_u2 = (state_changed == USB3_LPM_U2 && + hub_encoded_timeout == USB3_LPM_DISABLED); + + enabling_u1 = (state_changed == USB3_LPM_U1 && + hub_encoded_timeout != USB3_LPM_DISABLED); + enabling_u2 = (state_changed == USB3_LPM_U2 && + hub_encoded_timeout != USB3_LPM_DISABLED); + + /* If U1 was already enabled and we're not disabling it, + * or we're going to enable U1, account for the U1 max exit latency. + */ + if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || + enabling_u1) + u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); + if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || + enabling_u2) + u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); + + if (u1_mel_us > u2_mel_us) + mel_us = u1_mel_us; + else + mel_us = u2_mel_us; + /* xHCI host controller max exit latency field is only 16 bits wide. */ + if (mel_us > MAX_EXIT) { + dev_warn(&udev->dev, "Link PM max exit latency of %lluus " + "is too big.\n", mel_us); + return -E2BIG; + } + return mel_us; +} + +/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ +static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, + struct usb_device *udev, enum usb3_link_state state) +{ + struct xhci_hcd *xhci; + u16 hub_encoded_timeout; + int mel; + int ret; + + xhci = hcd_to_xhci(hcd); + /* The LPM timeout values are pretty host-controller specific, so don't + * enable hub-initiated timeouts unless the vendor has provided + * information about their timeout algorithm. + */ + if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || + !xhci->devs[udev->slot_id]) + return USB3_LPM_DISABLED; + + hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); + mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); + if (mel < 0) { + /* Max Exit Latency is too big, disable LPM. */ + hub_encoded_timeout = USB3_LPM_DISABLED; + mel = 0; + } + + ret = xhci_change_max_exit_latency(xhci, udev, mel); + if (ret) + return ret; + return hub_encoded_timeout; +} + +static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, + struct usb_device *udev, enum usb3_link_state state) +{ + struct xhci_hcd *xhci; + u16 mel; + + xhci = hcd_to_xhci(hcd); + if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || + !xhci->devs[udev->slot_id]) + return 0; + + mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED); + return xhci_change_max_exit_latency(xhci, udev, mel); +} +#else /* CONFIG_PM */ + +static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, + struct usb_device *udev, int enable) +{ + return 0; +} + +static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + return 0; +} + +static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, + struct usb_device *udev, enum usb3_link_state state) +{ + return USB3_LPM_DISABLED; +} + +static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, + struct usb_device *udev, enum usb3_link_state state) +{ + return 0; +} +#endif /* CONFIG_PM */ + +/*-------------------------------------------------------------------------*/ + +/* Once a hub descriptor is fetched for a device, we need to update the xHC's + * internal data structures for the device. + */ +static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, + struct usb_tt *tt, gfp_t mem_flags) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *vdev; + struct xhci_command *config_cmd; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_slot_ctx *slot_ctx; + unsigned long flags; + unsigned think_time; + int ret; + + /* Ignore root hubs */ + if (!hdev->parent) + return 0; + + vdev = xhci->devs[hdev->slot_id]; + if (!vdev) { + xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); + return -EINVAL; + } + + config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); + if (!config_cmd) + return -ENOMEM; + + ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); + if (!ctrl_ctx) { + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + xhci_free_command(xhci, config_cmd); + return -ENOMEM; + } + + spin_lock_irqsave(&xhci->lock, flags); + if (hdev->speed == USB_SPEED_HIGH && + xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { + xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); + xhci_free_command(xhci, config_cmd); + spin_unlock_irqrestore(&xhci->lock, flags); + return -ENOMEM; + } + + ret = xhci_vendor_sync_dev_ctx(xhci, hdev->slot_id); + if (ret) { + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", + __func__, ret); + xhci_free_command(xhci, config_cmd); + spin_unlock_irqrestore(&xhci->lock, flags); + return ret; + } + + xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); + ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); + slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); + slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); + /* + * refer to section 6.2.2: MTT should be 0 for full speed hub, + * but it may be already set to 1 when setup an xHCI virtual + * device, so clear it anyway. + */ + if (tt->multi) + slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); + else if (hdev->speed == USB_SPEED_FULL) + slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); + + if (xhci->hci_version > 0x95) { + xhci_dbg(xhci, "xHCI version %x needs hub " + "TT think time and number of ports\n", + (unsigned int) xhci->hci_version); + slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); + /* Set TT think time - convert from ns to FS bit times. + * 0 = 8 FS bit times, 1 = 16 FS bit times, + * 2 = 24 FS bit times, 3 = 32 FS bit times. + * + * xHCI 1.0: this field shall be 0 if the device is not a + * High-spped hub. + */ + think_time = tt->think_time; + if (think_time != 0) + think_time = (think_time / 666) - 1; + if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) + slot_ctx->tt_info |= + cpu_to_le32(TT_THINK_TIME(think_time)); + } else { + xhci_dbg(xhci, "xHCI version %x doesn't need hub " + "TT think time or number of ports\n", + (unsigned int) xhci->hci_version); + } + slot_ctx->dev_state = 0; + spin_unlock_irqrestore(&xhci->lock, flags); + + xhci_dbg(xhci, "Set up %s for hub device.\n", + (xhci->hci_version > 0x95) ? + "configure endpoint" : "evaluate context"); + + /* Issue and wait for the configure endpoint or + * evaluate context command. + */ + if (xhci->hci_version > 0x95) + ret = xhci_configure_endpoint(xhci, hdev, config_cmd, + false, false); + else + ret = xhci_configure_endpoint(xhci, hdev, config_cmd, + true, false); + + xhci_free_command(xhci, config_cmd); + return ret; +} + +static int xhci_get_frame(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + /* EHCI mods by the periodic size. Why? */ + return readl(&xhci->run_regs->microframe_index) >> 3; +} + +int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) +{ + struct xhci_hcd *xhci; + /* + * TODO: Check with DWC3 clients for sysdev according to + * quirks + */ + struct device *dev = hcd->self.sysdev; + unsigned int minor_rev; + int retval; + + /* Accept arbitrarily long scatter-gather lists */ + hcd->self.sg_tablesize = ~0; + + /* support to build packet from discontinuous buffers */ + hcd->self.no_sg_constraint = 1; + + /* XHCI controllers don't stop the ep queue on short packets :| */ + hcd->self.no_stop_on_short = 1; + + xhci = hcd_to_xhci(hcd); + + if (usb_hcd_is_primary_hcd(hcd)) { + xhci->main_hcd = hcd; + xhci->usb2_rhub.hcd = hcd; + /* Mark the first roothub as being USB 2.0. + * The xHCI driver will register the USB 3.0 roothub. + */ + hcd->speed = HCD_USB2; + hcd->self.root_hub->speed = USB_SPEED_HIGH; + /* + * USB 2.0 roothub under xHCI has an integrated TT, + * (rate matching hub) as opposed to having an OHCI/UHCI + * companion controller. + */ + hcd->has_tt = 1; + } else { + /* + * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts + * should return 0x31 for sbrn, or that the minor revision + * is a two digit BCD containig minor and sub-minor numbers. + * This was later clarified in xHCI 1.2. + * + * Some USB 3.1 capable hosts therefore have sbrn 0x30, and + * minor revision set to 0x1 instead of 0x10. + */ + if (xhci->usb3_rhub.min_rev == 0x1) + minor_rev = 1; + else + minor_rev = xhci->usb3_rhub.min_rev / 0x10; + + switch (minor_rev) { + case 2: + hcd->speed = HCD_USB32; + hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; + hcd->self.root_hub->rx_lanes = 2; + hcd->self.root_hub->tx_lanes = 2; + break; + case 1: + hcd->speed = HCD_USB31; + hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; + break; + } + xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", + minor_rev, + minor_rev ? "Enhanced " : ""); + + xhci->usb3_rhub.hcd = hcd; + /* xHCI private pointer was set in xhci_pci_probe for the second + * registered roothub. + */ + return 0; + } + + mutex_init(&xhci->mutex); + xhci->cap_regs = hcd->regs; + xhci->op_regs = hcd->regs + + HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); + xhci->run_regs = hcd->regs + + (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); + /* Cache read-only capability registers */ + xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); + xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); + xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); + xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); + xhci->hci_version = HC_VERSION(xhci->hcc_params); + xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); + if (xhci->hci_version > 0x100) + xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); + + xhci->quirks |= quirks; + + get_quirks(dev, xhci); + + /* In xhci controllers which follow xhci 1.0 spec gives a spurious + * success event after a short transfer. This quirk will ignore such + * spurious event. + */ + if (xhci->hci_version > 0x96) + xhci->quirks |= XHCI_SPURIOUS_SUCCESS; + + /* Make sure the HC is halted. */ + retval = xhci_halt(xhci); + if (retval) + return retval; + + xhci_zero_64b_regs(xhci); + + xhci_dbg(xhci, "Resetting HCD\n"); + /* Reset the internal HC memory state and registers. */ + retval = xhci_reset(xhci); + if (retval) + return retval; + xhci_dbg(xhci, "Reset complete\n"); + + /* + * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) + * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit + * address memory pointers actually. So, this driver clears the AC64 + * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, + * DMA_BIT_MASK(32)) in this xhci_gen_setup(). + */ + if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) + xhci->hcc_params &= ~BIT(0); + + /* Set dma_mask and coherent_dma_mask to 64-bits, + * if xHC supports 64-bit addressing */ + if (HCC_64BIT_ADDR(xhci->hcc_params) && + !dma_set_mask(dev, DMA_BIT_MASK(64))) { + xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); + dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); + } else { + /* + * This is to avoid error in cases where a 32-bit USB + * controller is used on a 64-bit capable system. + */ + retval = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (retval) + return retval; + xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); + dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + } + + xhci_dbg(xhci, "Calling HCD init\n"); + /* Initialize HCD and host controller data structures. */ + retval = xhci_init(hcd); + if (retval) + return retval; + xhci_dbg(xhci, "Called HCD init\n"); + + xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", + xhci->hcc_params, xhci->hci_version, xhci->quirks); + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_gen_setup); + +static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct xhci_hcd *xhci; + struct usb_device *udev; + unsigned int slot_id; + unsigned int ep_index; + unsigned long flags; + + xhci = hcd_to_xhci(hcd); + + spin_lock_irqsave(&xhci->lock, flags); + udev = (struct usb_device *)ep->hcpriv; + slot_id = udev->slot_id; + ep_index = xhci_get_endpoint_index(&ep->desc); + + xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; + xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + spin_unlock_irqrestore(&xhci->lock, flags); +} + +static const struct hc_driver xhci_hc_driver = { + .description = "xhci-hcd", + .product_desc = "xHCI Host Controller", + .hcd_priv_size = sizeof(struct xhci_hcd), + + /* + * generic hardware linkage + */ + .irq = xhci_irq, + .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED | + HCD_BH, + + /* + * basic lifecycle operations + */ + .reset = NULL, /* set in xhci_init_driver() */ + .start = xhci_run, + .stop = xhci_stop, + .shutdown = xhci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .map_urb_for_dma = xhci_map_urb_for_dma, + .urb_enqueue = xhci_urb_enqueue, + .urb_dequeue = xhci_urb_dequeue, + .alloc_dev = xhci_alloc_dev, + .free_dev = xhci_free_dev, + .alloc_streams = xhci_alloc_streams, + .free_streams = xhci_free_streams, + .add_endpoint = xhci_add_endpoint, + .drop_endpoint = xhci_drop_endpoint, + .endpoint_disable = xhci_endpoint_disable, + .endpoint_reset = xhci_endpoint_reset, + .check_bandwidth = xhci_check_bandwidth, + .reset_bandwidth = xhci_reset_bandwidth, + .address_device = xhci_address_device, + .enable_device = xhci_enable_device, + .update_hub_device = xhci_update_hub_device, + .reset_device = xhci_discover_or_reset_device, + + /* + * scheduling support + */ + .get_frame_number = xhci_get_frame, + + /* + * root hub support + */ + .hub_control = xhci_hub_control, + .hub_status_data = xhci_hub_status_data, + .bus_suspend = xhci_bus_suspend, + .bus_resume = xhci_bus_resume, + .get_resuming_ports = xhci_get_resuming_ports, + + /* + * call back when device connected and addressed + */ + .update_device = xhci_update_device, + .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, + .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, + .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, + .find_raw_port_number = xhci_find_raw_port_number, + .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete, +}; + +void xhci_init_driver(struct hc_driver *drv, + const struct xhci_driver_overrides *over) +{ + BUG_ON(!over); + + /* Copy the generic table to drv then apply the overrides */ + *drv = xhci_hc_driver; + + if (over) { + drv->hcd_priv_size += over->extra_priv_size; + if (over->reset) + drv->reset = over->reset; + if (over->start) + drv->start = over->start; + if (over->add_endpoint) + drv->add_endpoint = over->add_endpoint; + if (over->drop_endpoint) + drv->drop_endpoint = over->drop_endpoint; + if (over->check_bandwidth) + drv->check_bandwidth = over->check_bandwidth; + if (over->reset_bandwidth) + drv->reset_bandwidth = over->reset_bandwidth; + if (over->address_device) + drv->address_device = over->address_device; + if (over->bus_suspend) + drv->bus_suspend = over->bus_suspend; + if (over->bus_resume) + drv->bus_resume = over->bus_resume; + } +} +EXPORT_SYMBOL_GPL(xhci_init_driver); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_LICENSE("GPL"); + +static int __init xhci_hcd_init(void) +{ + /* + * Check the compiler generated sizes of structures that must be laid + * out in specific ways for hardware access. + */ + BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); + BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); + BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); + /* xhci_device_control has eight fields, and also + * embeds one xhci_slot_ctx and 31 xhci_ep_ctx + */ + BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); + BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); + BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); + BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); + BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); + /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ + BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); + + if (usb_disabled()) + return -ENODEV; + + xhci_debugfs_create_root(); + + return 0; +} + +/* + * If an init function is provided, an exit function must also be provided + * to allow module unload. + */ +static void __exit xhci_hcd_fini(void) +{ + xhci_debugfs_remove_root(); +} + +module_init(xhci_hcd_init); +module_exit(xhci_hcd_fini); diff --git a/usb/host/xhci.h b/usb/host/xhci.h new file mode 100644 index 0000000..71c51de --- /dev/null +++ b/usb/host/xhci.h @@ -0,0 +1,2833 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * xHCI host controller driver + * + * Copyright (C) 2008 Intel Corp. + * + * Author: Sarah Sharp + * Some code borrowed from the Linux EHCI driver. + */ + +#ifndef __LINUX_XHCI_HCD_H +#define __LINUX_XHCI_HCD_H + +#include +#include +#include +#include +#include +#include + +/* Code sharing between pci-quirks and xhci hcd */ +#include "xhci-ext-caps.h" +#include "pci-quirks.h" + +/* max buffer size for trace and debug messages */ +#define XHCI_MSG_MAX 500 + +/* xHCI PCI Configuration Registers */ +#define XHCI_SBRN_OFFSET (0x60) + +/* Max number of USB devices for any host controller - limit in section 6.1 */ +#define MAX_HC_SLOTS 256 +/* Section 5.3.3 - MaxPorts */ +#define MAX_HC_PORTS 127 + +/* + * xHCI register interface. + * This corresponds to the eXtensible Host Controller Interface (xHCI) + * Revision 0.95 specification + */ + +/** + * struct xhci_cap_regs - xHCI Host Controller Capability Registers. + * @hc_capbase: length of the capabilities register and HC version number + * @hcs_params1: HCSPARAMS1 - Structural Parameters 1 + * @hcs_params2: HCSPARAMS2 - Structural Parameters 2 + * @hcs_params3: HCSPARAMS3 - Structural Parameters 3 + * @hcc_params: HCCPARAMS - Capability Parameters + * @db_off: DBOFF - Doorbell array offset + * @run_regs_off: RTSOFF - Runtime register space offset + * @hcc_params2: HCCPARAMS2 Capability Parameters 2, xhci 1.1 only + */ +struct xhci_cap_regs { + __le32 hc_capbase; + __le32 hcs_params1; + __le32 hcs_params2; + __le32 hcs_params3; + __le32 hcc_params; + __le32 db_off; + __le32 run_regs_off; + __le32 hcc_params2; /* xhci 1.1 */ + /* Reserved up to (CAPLENGTH - 0x1C) */ +}; + +/* hc_capbase bitmasks */ +/* bits 7:0 - how long is the Capabilities register */ +#define HC_LENGTH(p) XHCI_HC_LENGTH(p) +/* bits 31:16 */ +#define HC_VERSION(p) (((p) >> 16) & 0xffff) + +/* HCSPARAMS1 - hcs_params1 - bitmasks */ +/* bits 0:7, Max Device Slots */ +#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff) +#define HCS_SLOTS_MASK 0xff +/* bits 8:18, Max Interrupters */ +#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff) +/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */ +#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f) + +/* HCSPARAMS2 - hcs_params2 - bitmasks */ +/* bits 0:3, frames or uframes that SW needs to queue transactions + * ahead of the HW to meet periodic deadlines */ +#define HCS_IST(p) (((p) >> 0) & 0xf) +/* bits 4:7, max number of Event Ring segments */ +#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) +/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */ +/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ +/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */ +#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f)) + +/* HCSPARAMS3 - hcs_params3 - bitmasks */ +/* bits 0:7, Max U1 to U0 latency for the roothub ports */ +#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff) +/* bits 16:31, Max U2 to U0 latency for the roothub ports */ +#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff) + +/* HCCPARAMS - hcc_params - bitmasks */ +/* true: HC can use 64-bit address pointers */ +#define HCC_64BIT_ADDR(p) ((p) & (1 << 0)) +/* true: HC can do bandwidth negotiation */ +#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1)) +/* true: HC uses 64-byte Device Context structures + * FIXME 64-byte context structures aren't supported yet. + */ +#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2)) +/* true: HC has port power switches */ +#define HCC_PPC(p) ((p) & (1 << 3)) +/* true: HC has port indicators */ +#define HCS_INDICATOR(p) ((p) & (1 << 4)) +/* true: HC has Light HC Reset Capability */ +#define HCC_LIGHT_RESET(p) ((p) & (1 << 5)) +/* true: HC supports latency tolerance messaging */ +#define HCC_LTC(p) ((p) & (1 << 6)) +/* true: no secondary Stream ID Support */ +#define HCC_NSS(p) ((p) & (1 << 7)) +/* true: HC supports Stopped - Short Packet */ +#define HCC_SPC(p) ((p) & (1 << 9)) +/* true: HC has Contiguous Frame ID Capability */ +#define HCC_CFC(p) ((p) & (1 << 11)) +/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */ +#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1)) +/* Extended Capabilities pointer from PCI base - section 5.3.6 */ +#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p) + +#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) + +/* db_off bitmask - bits 0:1 reserved */ +#define DBOFF_MASK (~0x3) + +/* run_regs_off bitmask - bits 0:4 reserved */ +#define RTSOFF_MASK (~0x1f) + +/* HCCPARAMS2 - hcc_params2 - bitmasks */ +/* true: HC supports U3 entry Capability */ +#define HCC2_U3C(p) ((p) & (1 << 0)) +/* true: HC supports Configure endpoint command Max exit latency too large */ +#define HCC2_CMC(p) ((p) & (1 << 1)) +/* true: HC supports Force Save context Capability */ +#define HCC2_FSC(p) ((p) & (1 << 2)) +/* true: HC supports Compliance Transition Capability */ +#define HCC2_CTC(p) ((p) & (1 << 3)) +/* true: HC support Large ESIT payload Capability > 48k */ +#define HCC2_LEC(p) ((p) & (1 << 4)) +/* true: HC support Configuration Information Capability */ +#define HCC2_CIC(p) ((p) & (1 << 5)) +/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */ +#define HCC2_ETC(p) ((p) & (1 << 6)) + +/* Number of registers per port */ +#define NUM_PORT_REGS 4 + +#define PORTSC 0 +#define PORTPMSC 1 +#define PORTLI 2 +#define PORTHLPMC 3 + +/** + * struct xhci_op_regs - xHCI Host Controller Operational Registers. + * @command: USBCMD - xHC command register + * @status: USBSTS - xHC status register + * @page_size: This indicates the page size that the host controller + * supports. If bit n is set, the HC supports a page size + * of 2^(n+12), up to a 128MB page size. + * 4K is the minimum page size. + * @cmd_ring: CRP - 64-bit Command Ring Pointer + * @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer + * @config_reg: CONFIG - Configure Register + * @port_status_base: PORTSCn - base address for Port Status and Control + * Each port has a Port Status and Control register, + * followed by a Port Power Management Status and Control + * register, a Port Link Info register, and a reserved + * register. + * @port_power_base: PORTPMSCn - base address for + * Port Power Management Status and Control + * @port_link_base: PORTLIn - base address for Port Link Info (current + * Link PM state and control) for USB 2.1 and USB 3.0 + * devices. + */ +struct xhci_op_regs { + __le32 command; + __le32 status; + __le32 page_size; + __le32 reserved1; + __le32 reserved2; + __le32 dev_notification; + __le64 cmd_ring; + /* rsvd: offset 0x20-2F */ + __le32 reserved3[4]; + __le64 dcbaa_ptr; + __le32 config_reg; + /* rsvd: offset 0x3C-3FF */ + __le32 reserved4[241]; + /* port 1 registers, which serve as a base address for other ports */ + __le32 port_status_base; + __le32 port_power_base; + __le32 port_link_base; + __le32 reserved5; + /* registers for ports 2-255 */ + __le32 reserved6[NUM_PORT_REGS*254]; +}; + +/* USBCMD - USB command - command bitmasks */ +/* start/stop HC execution - do not write unless HC is halted*/ +#define CMD_RUN XHCI_CMD_RUN +/* Reset HC - resets internal HC state machine and all registers (except + * PCI config regs). HC does NOT drive a USB reset on the downstream ports. + * The xHCI driver must reinitialize the xHC after setting this bit. + */ +#define CMD_RESET (1 << 1) +/* Event Interrupt Enable - a '1' allows interrupts from the host controller */ +#define CMD_EIE XHCI_CMD_EIE +/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */ +#define CMD_HSEIE XHCI_CMD_HSEIE +/* bits 4:6 are reserved (and should be preserved on writes). */ +/* light reset (port status stays unchanged) - reset completed when this is 0 */ +#define CMD_LRESET (1 << 7) +/* host controller save/restore state. */ +#define CMD_CSS (1 << 8) +#define CMD_CRS (1 << 9) +/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */ +#define CMD_EWE XHCI_CMD_EWE +/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root + * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off. + * '0' means the xHC can power it off if all ports are in the disconnect, + * disabled, or powered-off state. + */ +#define CMD_PM_INDEX (1 << 11) +/* bit 14 Extended TBC Enable, changes Isoc TRB fields to support larger TBC */ +#define CMD_ETE (1 << 14) +/* bits 15:31 are reserved (and should be preserved on writes). */ + +/* IMAN - Interrupt Management Register */ +#define IMAN_IE (1 << 1) +#define IMAN_IP (1 << 0) + +/* USBSTS - USB status - status bitmasks */ +/* HC not running - set to 1 when run/stop bit is cleared. */ +#define STS_HALT XHCI_STS_HALT +/* serious error, e.g. PCI parity error. The HC will clear the run/stop bit. */ +#define STS_FATAL (1 << 2) +/* event interrupt - clear this prior to clearing any IP flags in IR set*/ +#define STS_EINT (1 << 3) +/* port change detect */ +#define STS_PORT (1 << 4) +/* bits 5:7 reserved and zeroed */ +/* save state status - '1' means xHC is saving state */ +#define STS_SAVE (1 << 8) +/* restore state status - '1' means xHC is restoring state */ +#define STS_RESTORE (1 << 9) +/* true: save or restore error */ +#define STS_SRE (1 << 10) +/* true: Controller Not Ready to accept doorbell or op reg writes after reset */ +#define STS_CNR XHCI_STS_CNR +/* true: internal Host Controller Error - SW needs to reset and reinitialize */ +#define STS_HCE (1 << 12) +/* bits 13:31 reserved and should be preserved */ + +/* + * DNCTRL - Device Notification Control Register - dev_notification bitmasks + * Generate a device notification event when the HC sees a transaction with a + * notification type that matches a bit set in this bit field. + */ +#define DEV_NOTE_MASK (0xffff) +#define ENABLE_DEV_NOTE(x) (1 << (x)) +/* Most of the device notification types should only be used for debug. + * SW does need to pay attention to function wake notifications. + */ +#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1) + +/* CRCR - Command Ring Control Register - cmd_ring bitmasks */ +/* bit 0 is the command ring cycle state */ +/* stop ring operation after completion of the currently executing command */ +#define CMD_RING_PAUSE (1 << 1) +/* stop ring immediately - abort the currently executing command */ +#define CMD_RING_ABORT (1 << 2) +/* true: command ring is running */ +#define CMD_RING_RUNNING (1 << 3) +/* bits 4:5 reserved and should be preserved */ +/* Command Ring pointer - bit mask for the lower 32 bits. */ +#define CMD_RING_RSVD_BITS (0x3f) + +/* CONFIG - Configure Register - config_reg bitmasks */ +/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ +#define MAX_DEVS(p) ((p) & 0xff) +/* bit 8: U3 Entry Enabled, assert PLC when root port enters U3, xhci 1.1 */ +#define CONFIG_U3E (1 << 8) +/* bit 9: Configuration Information Enable, xhci 1.1 */ +#define CONFIG_CIE (1 << 9) +/* bits 10:31 - reserved and should be preserved */ + +/* PORTSC - Port Status and Control Register - port_status_base bitmasks */ +/* true: device connected */ +#define PORT_CONNECT (1 << 0) +/* true: port enabled */ +#define PORT_PE (1 << 1) +/* bit 2 reserved and zeroed */ +/* true: port has an over-current condition */ +#define PORT_OC (1 << 3) +/* true: port reset signaling asserted */ +#define PORT_RESET (1 << 4) +/* Port Link State - bits 5:8 + * A read gives the current link PM state of the port, + * a write with Link State Write Strobe set sets the link state. + */ +#define PORT_PLS_MASK (0xf << 5) +#define XDEV_U0 (0x0 << 5) +#define XDEV_U1 (0x1 << 5) +#define XDEV_U2 (0x2 << 5) +#define XDEV_U3 (0x3 << 5) +#define XDEV_DISABLED (0x4 << 5) +#define XDEV_RXDETECT (0x5 << 5) +#define XDEV_INACTIVE (0x6 << 5) +#define XDEV_POLLING (0x7 << 5) +#define XDEV_RECOVERY (0x8 << 5) +#define XDEV_HOT_RESET (0x9 << 5) +#define XDEV_COMP_MODE (0xa << 5) +#define XDEV_TEST_MODE (0xb << 5) +#define XDEV_RESUME (0xf << 5) + +/* true: port has power (see HCC_PPC) */ +#define PORT_POWER (1 << 9) +/* bits 10:13 indicate device speed: + * 0 - undefined speed - port hasn't be initialized by a reset yet + * 1 - full speed + * 2 - low speed + * 3 - high speed + * 4 - super speed + * 5-15 reserved + */ +#define DEV_SPEED_MASK (0xf << 10) +#define XDEV_FS (0x1 << 10) +#define XDEV_LS (0x2 << 10) +#define XDEV_HS (0x3 << 10) +#define XDEV_SS (0x4 << 10) +#define XDEV_SSP (0x5 << 10) +#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10)) +#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS) +#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS) +#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS) +#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS) +#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP) +#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS) +#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f) + +/* Bits 20:23 in the Slot Context are the speed for the device */ +#define SLOT_SPEED_FS (XDEV_FS << 10) +#define SLOT_SPEED_LS (XDEV_LS << 10) +#define SLOT_SPEED_HS (XDEV_HS << 10) +#define SLOT_SPEED_SS (XDEV_SS << 10) +#define SLOT_SPEED_SSP (XDEV_SSP << 10) +/* Port Indicator Control */ +#define PORT_LED_OFF (0 << 14) +#define PORT_LED_AMBER (1 << 14) +#define PORT_LED_GREEN (2 << 14) +#define PORT_LED_MASK (3 << 14) +/* Port Link State Write Strobe - set this when changing link state */ +#define PORT_LINK_STROBE (1 << 16) +/* true: connect status change */ +#define PORT_CSC (1 << 17) +/* true: port enable change */ +#define PORT_PEC (1 << 18) +/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port + * into an enabled state, and the device into the default state. A "warm" reset + * also resets the link, forcing the device through the link training sequence. + * SW can also look at the Port Reset register to see when warm reset is done. + */ +#define PORT_WRC (1 << 19) +/* true: over-current change */ +#define PORT_OCC (1 << 20) +/* true: reset change - 1 to 0 transition of PORT_RESET */ +#define PORT_RC (1 << 21) +/* port link status change - set on some port link state transitions: + * Transition Reason + * ------------------------------------------------------------------------------ + * - U3 to Resume Wakeup signaling from a device + * - Resume to Recovery to U0 USB 3.0 device resume + * - Resume to U0 USB 2.0 device resume + * - U3 to Recovery to U0 Software resume of USB 3.0 device complete + * - U3 to U0 Software resume of USB 2.0 device complete + * - U2 to U0 L1 resume of USB 2.1 device complete + * - U0 to U0 (???) L1 entry rejection by USB 2.1 device + * - U0 to disabled L1 entry error with USB 2.1 device + * - Any state to inactive Error on USB 3.0 port + */ +#define PORT_PLC (1 << 22) +/* port configure error change - port failed to configure its link partner */ +#define PORT_CEC (1 << 23) +#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \ + PORT_RC | PORT_PLC | PORT_CEC) + + +/* Cold Attach Status - xHC can set this bit to report device attached during + * Sx state. Warm port reset should be perfomed to clear this bit and move port + * to connected state. + */ +#define PORT_CAS (1 << 24) +/* wake on connect (enable) */ +#define PORT_WKCONN_E (1 << 25) +/* wake on disconnect (enable) */ +#define PORT_WKDISC_E (1 << 26) +/* wake on over-current (enable) */ +#define PORT_WKOC_E (1 << 27) +/* bits 28:29 reserved */ +/* true: device is non-removable - for USB 3.0 roothub emulation */ +#define PORT_DEV_REMOVE (1 << 30) +/* Initiate a warm port reset - complete when PORT_WRC is '1' */ +#define PORT_WR (1 << 31) + +/* We mark duplicate entries with -1 */ +#define DUPLICATE_ENTRY ((u8)(-1)) + +/* Port Power Management Status and Control - port_power_base bitmasks */ +/* Inactivity timer value for transitions into U1, in microseconds. + * Timeout can be up to 127us. 0xFF means an infinite timeout. + */ +#define PORT_U1_TIMEOUT(p) ((p) & 0xff) +#define PORT_U1_TIMEOUT_MASK 0xff +/* Inactivity timer value for transitions into U2 */ +#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8) +#define PORT_U2_TIMEOUT_MASK (0xff << 8) +/* Bits 24:31 for port testing */ + +/* USB2 Protocol PORTSPMSC */ +#define PORT_L1S_MASK 7 +#define PORT_L1S_SUCCESS 1 +#define PORT_RWE (1 << 3) +#define PORT_HIRD(p) (((p) & 0xf) << 4) +#define PORT_HIRD_MASK (0xf << 4) +#define PORT_L1DS_MASK (0xff << 8) +#define PORT_L1DS(p) (((p) & 0xff) << 8) +#define PORT_HLE (1 << 16) +#define PORT_TEST_MODE_SHIFT 28 + +/* USB3 Protocol PORTLI Port Link Information */ +#define PORT_RX_LANES(p) (((p) >> 16) & 0xf) +#define PORT_TX_LANES(p) (((p) >> 20) & 0xf) + +/* USB2 Protocol PORTHLPMC */ +#define PORT_HIRDM(p)((p) & 3) +#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2) +#define PORT_BESLD(p)(((p) & 0xf) << 10) + +/* use 512 microseconds as USB2 LPM L1 default timeout. */ +#define XHCI_L1_TIMEOUT 512 + +/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency. + * Safe to use with mixed HIRD and BESL systems (host and device) and is used + * by other operating systems. + * + * XHCI 1.0 errata 8/14/12 Table 13 notes: + * "Software should choose xHC BESL/BESLD field values that do not violate a + * device's resume latency requirements, + * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached, + * or not program values < '4' if BLC = '0' and a BESL device is attached. + */ +#define XHCI_DEFAULT_BESL 4 + +/* + * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports + * to complete link training. usually link trainig completes much faster + * so check status 10 times with 36ms sleep in places we need to wait for + * polling to complete. + */ +#define XHCI_PORT_POLLING_LFPS_TIME 36 + +/** + * struct xhci_intr_reg - Interrupt Register Set + * @irq_pending: IMAN - Interrupt Management Register. Used to enable + * interrupts and check for pending interrupts. + * @irq_control: IMOD - Interrupt Moderation Register. + * Used to throttle interrupts. + * @erst_size: Number of segments in the Event Ring Segment Table (ERST). + * @erst_base: ERST base address. + * @erst_dequeue: Event ring dequeue pointer. + * + * Each interrupter (defined by a MSI-X vector) has an event ring and an Event + * Ring Segment Table (ERST) associated with it. The event ring is comprised of + * multiple segments of the same size. The HC places events on the ring and + * "updates the Cycle bit in the TRBs to indicate to software the current + * position of the Enqueue Pointer." The HCD (Linux) processes those events and + * updates the dequeue pointer. + */ +struct xhci_intr_reg { + __le32 irq_pending; + __le32 irq_control; + __le32 erst_size; + __le32 rsvd; + __le64 erst_base; + __le64 erst_dequeue; +}; + +/* irq_pending bitmasks */ +#define ER_IRQ_PENDING(p) ((p) & 0x1) +/* bits 2:31 need to be preserved */ +/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */ +#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe) +#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2) +#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2)) + +/* irq_control bitmasks */ +/* Minimum interval between interrupts (in 250ns intervals). The interval + * between interrupts will be longer if there are no events on the event ring. + * Default is 4000 (1 ms). + */ +#define ER_IRQ_INTERVAL_MASK (0xffff) +/* Counter used to count down the time to the next interrupt - HW use only */ +#define ER_IRQ_COUNTER_MASK (0xffff << 16) + +/* erst_size bitmasks */ +/* Preserve bits 16:31 of erst_size */ +#define ERST_SIZE_MASK (0xffff << 16) + +/* erst_dequeue bitmasks */ +/* Dequeue ERST Segment Index (DESI) - Segment number (or alias) + * where the current dequeue pointer lies. This is an optional HW hint. + */ +#define ERST_DESI_MASK (0x7) +/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by + * a work queue (or delayed service routine)? + */ +#define ERST_EHB (1 << 3) +#define ERST_PTR_MASK (0xf) + +/** + * struct xhci_run_regs + * @microframe_index: + * MFINDEX - current microframe number + * + * Section 5.5 Host Controller Runtime Registers: + * "Software should read and write these registers using only Dword (32 bit) + * or larger accesses" + */ +struct xhci_run_regs { + __le32 microframe_index; + __le32 rsvd[7]; + struct xhci_intr_reg ir_set[128]; +}; + +/** + * struct doorbell_array + * + * Bits 0 - 7: Endpoint target + * Bits 8 - 15: RsvdZ + * Bits 16 - 31: Stream ID + * + * Section 5.6 + */ +struct xhci_doorbell_array { + __le32 doorbell[256]; +}; + +#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16)) +#define DB_VALUE_HOST 0x00000000 + +/** + * struct xhci_protocol_caps + * @revision: major revision, minor revision, capability ID, + * and next capability pointer. + * @name_string: Four ASCII characters to say which spec this xHC + * follows, typically "USB ". + * @port_info: Port offset, count, and protocol-defined information. + */ +struct xhci_protocol_caps { + u32 revision; + u32 name_string; + u32 port_info; +}; + +#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff) +#define XHCI_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff) +#define XHCI_EXT_PORT_PSIC(x) (((x) >> 28) & 0x0f) +#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff) +#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff) + +#define XHCI_EXT_PORT_PSIV(x) (((x) >> 0) & 0x0f) +#define XHCI_EXT_PORT_PSIE(x) (((x) >> 4) & 0x03) +#define XHCI_EXT_PORT_PLT(x) (((x) >> 6) & 0x03) +#define XHCI_EXT_PORT_PFD(x) (((x) >> 8) & 0x01) +#define XHCI_EXT_PORT_LP(x) (((x) >> 14) & 0x03) +#define XHCI_EXT_PORT_PSIM(x) (((x) >> 16) & 0xffff) + +#define PLT_MASK (0x03 << 6) +#define PLT_SYM (0x00 << 6) +#define PLT_ASYM_RX (0x02 << 6) +#define PLT_ASYM_TX (0x03 << 6) + +/** + * struct xhci_container_ctx + * @type: Type of context. Used to calculated offsets to contained contexts. + * @size: Size of the context data + * @bytes: The raw context data given to HW + * @dma: dma address of the bytes + * + * Represents either a Device or Input context. Holds a pointer to the raw + * memory used for the context (bytes) and dma address of it (dma). + */ +struct xhci_container_ctx { + unsigned type; +#define XHCI_CTX_TYPE_DEVICE 0x1 +#define XHCI_CTX_TYPE_INPUT 0x2 + + int size; + + u8 *bytes; + dma_addr_t dma; +}; + +/** + * struct xhci_slot_ctx + * @dev_info: Route string, device speed, hub info, and last valid endpoint + * @dev_info2: Max exit latency for device number, root hub port number + * @tt_info: tt_info is used to construct split transaction tokens + * @dev_state: slot state and device address + * + * Slot Context - section 6.2.1.1. This assumes the HC uses 32-byte context + * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes + * reserved at the end of the slot context for HC internal use. + */ +struct xhci_slot_ctx { + __le32 dev_info; + __le32 dev_info2; + __le32 tt_info; + __le32 dev_state; + /* offset 0x10 to 0x1f reserved for HC internal use */ + __le32 reserved[4]; +}; + +/* dev_info bitmasks */ +/* Route String - 0:19 */ +#define ROUTE_STRING_MASK (0xfffff) +/* Device speed - values defined by PORTSC Device Speed field - 20:23 */ +#define DEV_SPEED (0xf << 20) +#define GET_DEV_SPEED(n) (((n) & DEV_SPEED) >> 20) +/* bit 24 reserved */ +/* Is this LS/FS device connected through a HS hub? - bit 25 */ +#define DEV_MTT (0x1 << 25) +/* Set if the device is a hub - bit 26 */ +#define DEV_HUB (0x1 << 26) +/* Index of the last valid endpoint context in this device context - 27:31 */ +#define LAST_CTX_MASK (0x1f << 27) +#define LAST_CTX(p) ((p) << 27) +#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1) +#define SLOT_FLAG (1 << 0) +#define EP0_FLAG (1 << 1) + +/* dev_info2 bitmasks */ +/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */ +#define MAX_EXIT (0xffff) +/* Root hub port number that is needed to access the USB device */ +#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16) +#define DEVINFO_TO_ROOT_HUB_PORT(p) (((p) >> 16) & 0xff) +/* Maximum number of ports under a hub device */ +#define XHCI_MAX_PORTS(p) (((p) & 0xff) << 24) +#define DEVINFO_TO_MAX_PORTS(p) (((p) & (0xff << 24)) >> 24) + +/* tt_info bitmasks */ +/* + * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub + * The Slot ID of the hub that isolates the high speed signaling from + * this low or full-speed device. '0' if attached to root hub port. + */ +#define TT_SLOT (0xff) +/* + * The number of the downstream facing port of the high-speed hub + * '0' if the device is not low or full speed. + */ +#define TT_PORT (0xff << 8) +#define TT_THINK_TIME(p) (((p) & 0x3) << 16) +#define GET_TT_THINK_TIME(p) (((p) & (0x3 << 16)) >> 16) + +/* dev_state bitmasks */ +/* USB device address - assigned by the HC */ +#define DEV_ADDR_MASK (0xff) +/* bits 8:26 reserved */ +/* Slot state */ +#define SLOT_STATE (0x1f << 27) +#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27) + +#define SLOT_STATE_DISABLED 0 +#define SLOT_STATE_ENABLED SLOT_STATE_DISABLED +#define SLOT_STATE_DEFAULT 1 +#define SLOT_STATE_ADDRESSED 2 +#define SLOT_STATE_CONFIGURED 3 + +/** + * struct xhci_ep_ctx + * @ep_info: endpoint state, streams, mult, and interval information. + * @ep_info2: information on endpoint type, max packet size, max burst size, + * error count, and whether the HC will force an event for all + * transactions. + * @deq: 64-bit ring dequeue pointer address. If the endpoint only + * defines one stream, this points to the endpoint transfer ring. + * Otherwise, it points to a stream context array, which has a + * ring pointer for each flow. + * @tx_info: + * Average TRB lengths for the endpoint ring and + * max payload within an Endpoint Service Interval Time (ESIT). + * + * Endpoint Context - section 6.2.1.2. This assumes the HC uses 32-byte context + * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes + * reserved at the end of the endpoint context for HC internal use. + */ +struct xhci_ep_ctx { + __le32 ep_info; + __le32 ep_info2; + __le64 deq; + __le32 tx_info; + /* offset 0x14 - 0x1f reserved for HC internal use */ + __le32 reserved[3]; +}; + +/* ep_info bitmasks */ +/* + * Endpoint State - bits 0:2 + * 0 - disabled + * 1 - running + * 2 - halted due to halt condition - ok to manipulate endpoint ring + * 3 - stopped + * 4 - TRB error + * 5-7 - reserved + */ +#define EP_STATE_MASK (0x7) +#define EP_STATE_DISABLED 0 +#define EP_STATE_RUNNING 1 +#define EP_STATE_HALTED 2 +#define EP_STATE_STOPPED 3 +#define EP_STATE_ERROR 4 +#define GET_EP_CTX_STATE(ctx) (le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK) + +/* Mult - Max number of burtst within an interval, in EP companion desc. */ +#define EP_MULT(p) (((p) & 0x3) << 8) +#define CTX_TO_EP_MULT(p) (((p) >> 8) & 0x3) +/* bits 10:14 are Max Primary Streams */ +/* bit 15 is Linear Stream Array */ +/* Interval - period between requests to an endpoint - 125u increments. */ +#define EP_INTERVAL(p) (((p) & 0xff) << 16) +#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) +#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff) +#define EP_MAXPSTREAMS_MASK (0x1f << 10) +#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) +#define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10) +/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ +#define EP_HAS_LSA (1 << 15) +/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ +#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p) (((p) >> 24) & 0xff) + +/* ep_info2 bitmasks */ +/* + * Force Event - generate transfer events for all TRBs for this endpoint + * This will tell the HC to ignore the IOC and ISP flags (for debugging only). + */ +#define FORCE_EVENT (0x1) +#define ERROR_COUNT(p) (((p) & 0x3) << 1) +#define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7) +#define EP_TYPE(p) ((p) << 3) +#define ISOC_OUT_EP 1 +#define BULK_OUT_EP 2 +#define INT_OUT_EP 3 +#define CTRL_EP 4 +#define ISOC_IN_EP 5 +#define BULK_IN_EP 6 +#define INT_IN_EP 7 +/* bit 6 reserved */ +/* bit 7 is Host Initiate Disable - for disabling stream selection */ +#define MAX_BURST(p) (((p)&0xff) << 8) +#define CTX_TO_MAX_BURST(p) (((p) >> 8) & 0xff) +#define MAX_PACKET(p) (((p)&0xffff) << 16) +#define MAX_PACKET_MASK (0xffff << 16) +#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) + +/* tx_info bitmasks */ +#define EP_AVG_TRB_LENGTH(p) ((p) & 0xffff) +#define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) & 0xffff) << 16) +#define EP_MAX_ESIT_PAYLOAD_HI(p) ((((p) >> 16) & 0xff) << 24) +#define CTX_TO_MAX_ESIT_PAYLOAD(p) (((p) >> 16) & 0xffff) + +/* deq bitmasks */ +#define EP_CTX_CYCLE_MASK (1 << 0) +#define SCTX_DEQ_MASK (~0xfL) + + +/** + * struct xhci_input_control_context + * Input control context; see section 6.2.5. + * + * @drop_context: set the bit of the endpoint context you want to disable + * @add_context: set the bit of the endpoint context you want to enable + */ +struct xhci_input_control_ctx { + __le32 drop_flags; + __le32 add_flags; + __le32 rsvd2[6]; +}; + +#define EP_IS_ADDED(ctrl_ctx, i) \ + (le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))) +#define EP_IS_DROPPED(ctrl_ctx, i) \ + (le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) + +/* Represents everything that is needed to issue a command on the command ring. + * It's useful to pre-allocate these for commands that cannot fail due to + * out-of-memory errors, like freeing streams. + */ +struct xhci_command { + /* Input context for changing device state */ + struct xhci_container_ctx *in_ctx; + u32 status; + int slot_id; + /* If completion is null, no one is waiting on this command + * and the structure can be freed after the command completes. + */ + struct completion *completion; + union xhci_trb *command_trb; + struct list_head cmd_list; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); +}; + +/* drop context bitmasks */ +#define DROP_EP(x) (0x1 << x) +/* add context bitmasks */ +#define ADD_EP(x) (0x1 << x) + +struct xhci_stream_ctx { + /* 64-bit stream ring address, cycle state, and stream type */ + __le64 stream_ring; + /* offset 0x14 - 0x1f reserved for HC internal use */ + __le32 reserved[2]; +}; + +/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */ +#define SCT_FOR_CTX(p) (((p) & 0x7) << 1) +/* Secondary stream array type, dequeue pointer is to a transfer ring */ +#define SCT_SEC_TR 0 +/* Primary stream array type, dequeue pointer is to a transfer ring */ +#define SCT_PRI_TR 1 +/* Dequeue pointer is for a secondary stream array (SSA) with 8 entries */ +#define SCT_SSA_8 2 +#define SCT_SSA_16 3 +#define SCT_SSA_32 4 +#define SCT_SSA_64 5 +#define SCT_SSA_128 6 +#define SCT_SSA_256 7 + +/* Assume no secondary streams for now */ +struct xhci_stream_info { + struct xhci_ring **stream_rings; + /* Number of streams, including stream 0 (which drivers can't use) */ + unsigned int num_streams; + /* The stream context array may be bigger than + * the number of streams the driver asked for + */ + struct xhci_stream_ctx *stream_ctx_array; + unsigned int num_stream_ctxs; + dma_addr_t ctx_array_dma; + /* For mapping physical TRB addresses to segments in stream rings */ + struct radix_tree_root trb_address_map; + struct xhci_command *free_streams_command; +}; + +#define SMALL_STREAM_ARRAY_SIZE 256 +#define MEDIUM_STREAM_ARRAY_SIZE 1024 + +/* Some Intel xHCI host controllers need software to keep track of the bus + * bandwidth. Keep track of endpoint info here. Each root port is allocated + * the full bus bandwidth. We must also treat TTs (including each port under a + * multi-TT hub) as a separate bandwidth domain. The direct memory interface + * (DMI) also limits the total bandwidth (across all domains) that can be used. + */ +struct xhci_bw_info { + /* ep_interval is zero-based */ + unsigned int ep_interval; + /* mult and num_packets are one-based */ + unsigned int mult; + unsigned int num_packets; + unsigned int max_packet_size; + unsigned int max_esit_payload; + unsigned int type; +}; + +/* "Block" sizes in bytes the hardware uses for different device speeds. + * The logic in this part of the hardware limits the number of bits the hardware + * can use, so must represent bandwidth in a less precise manner to mimic what + * the scheduler hardware computes. + */ +#define FS_BLOCK 1 +#define HS_BLOCK 4 +#define SS_BLOCK 16 +#define DMI_BLOCK 32 + +/* Each device speed has a protocol overhead (CRC, bit stuffing, etc) associated + * with each byte transferred. SuperSpeed devices have an initial overhead to + * set up bursts. These are in blocks, see above. LS overhead has already been + * translated into FS blocks. + */ +#define DMI_OVERHEAD 8 +#define DMI_OVERHEAD_BURST 4 +#define SS_OVERHEAD 8 +#define SS_OVERHEAD_BURST 32 +#define HS_OVERHEAD 26 +#define FS_OVERHEAD 20 +#define LS_OVERHEAD 128 +/* The TTs need to claim roughly twice as much bandwidth (94 bytes per + * microframe ~= 24Mbps) of the HS bus as the devices can actually use because + * of overhead associated with split transfers crossing microframe boundaries. + * 31 blocks is pure protocol overhead. + */ +#define TT_HS_OVERHEAD (31 + 94) +#define TT_DMI_OVERHEAD (25 + 12) + +/* Bandwidth limits in blocks */ +#define FS_BW_LIMIT 1285 +#define TT_BW_LIMIT 1320 +#define HS_BW_LIMIT 1607 +#define SS_BW_LIMIT_IN 3906 +#define DMI_BW_LIMIT_IN 3906 +#define SS_BW_LIMIT_OUT 3906 +#define DMI_BW_LIMIT_OUT 3906 + +/* Percentage of bus bandwidth reserved for non-periodic transfers */ +#define FS_BW_RESERVED 10 +#define HS_BW_RESERVED 20 +#define SS_BW_RESERVED 10 + +struct xhci_virt_ep { + struct xhci_virt_device *vdev; /* parent */ + unsigned int ep_index; + struct xhci_ring *ring; + /* Related to endpoints that are configured to use stream IDs only */ + struct xhci_stream_info *stream_info; + /* Temporary storage in case the configure endpoint command fails and we + * have to restore the device state to the previous state + */ + struct xhci_ring *new_ring; + unsigned int ep_state; +#define SET_DEQ_PENDING (1 << 0) +#define EP_HALTED (1 << 1) /* For stall handling */ +#define EP_STOP_CMD_PENDING (1 << 2) /* For URB cancellation */ +/* Transitioning the endpoint to using streams, don't enqueue URBs */ +#define EP_GETTING_STREAMS (1 << 3) +#define EP_HAS_STREAMS (1 << 4) +/* Transitioning the endpoint to not using streams, don't enqueue URBs */ +#define EP_GETTING_NO_STREAMS (1 << 5) +#define EP_HARD_CLEAR_TOGGLE (1 << 6) +#define EP_SOFT_CLEAR_TOGGLE (1 << 7) +/* usb_hub_clear_tt_buffer is in progress */ +#define EP_CLEARING_TT (1 << 8) + /* ---- Related to URB cancellation ---- */ + struct list_head cancelled_td_list; + /* Watchdog timer for stop endpoint command to cancel URBs */ + struct timer_list stop_cmd_timer; + struct xhci_hcd *xhci; + /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue + * command. We'll need to update the ring's dequeue segment and dequeue + * pointer after the command completes. + */ + struct xhci_segment *queued_deq_seg; + union xhci_trb *queued_deq_ptr; + /* + * Sometimes the xHC can not process isochronous endpoint ring quickly + * enough, and it will miss some isoc tds on the ring and generate + * a Missed Service Error Event. + * Set skip flag when receive a Missed Service Error Event and + * process the missed tds on the endpoint ring. + */ + bool skip; + /* Bandwidth checking storage */ + struct xhci_bw_info bw_info; + struct list_head bw_endpoint_list; + /* Isoch Frame ID checking storage */ + int next_frame_id; + /* Use new Isoch TRB layout needed for extended TBC support */ + bool use_extended_tbc; +}; + +enum xhci_overhead_type { + LS_OVERHEAD_TYPE = 0, + FS_OVERHEAD_TYPE, + HS_OVERHEAD_TYPE, +}; + +struct xhci_interval_bw { + unsigned int num_packets; + /* Sorted by max packet size. + * Head of the list is the greatest max packet size. + */ + struct list_head endpoints; + /* How many endpoints of each speed are present. */ + unsigned int overhead[3]; +}; + +#define XHCI_MAX_INTERVAL 16 + +struct xhci_interval_bw_table { + unsigned int interval0_esit_payload; + struct xhci_interval_bw interval_bw[XHCI_MAX_INTERVAL]; + /* Includes reserved bandwidth for async endpoints */ + unsigned int bw_used; + unsigned int ss_bw_in; + unsigned int ss_bw_out; +}; + +#define EP_CTX_PER_DEV 31 + +struct xhci_virt_device { + int slot_id; + struct usb_device *udev; + /* + * Commands to the hardware are passed an "input context" that + * tells the hardware what to change in its data structures. + * The hardware will return changes in an "output context" that + * software must allocate for the hardware. We need to keep + * track of input and output contexts separately because + * these commands might fail and we don't trust the hardware. + */ + struct xhci_container_ctx *out_ctx; + /* Used for addressing devices and configuration changes */ + struct xhci_container_ctx *in_ctx; + struct xhci_virt_ep eps[EP_CTX_PER_DEV]; + u8 fake_port; + u8 real_port; + struct xhci_interval_bw_table *bw_table; + struct xhci_tt_bw_info *tt_info; + /* + * flags for state tracking based on events and issued commands. + * Software can not rely on states from output contexts because of + * latency between events and xHC updating output context values. + * See xhci 1.1 section 4.8.3 for more details + */ + unsigned long flags; +#define VDEV_PORT_ERROR BIT(0) /* Port error, link inactive */ + + /* The current max exit latency for the enabled USB3 link states. */ + u16 current_mel; + /* Used for the debugfs interfaces. */ + void *debugfs_private; +}; + +/* + * For each roothub, keep track of the bandwidth information for each periodic + * interval. + * + * If a high speed hub is attached to the roothub, each TT associated with that + * hub is a separate bandwidth domain. The interval information for the + * endpoints on the devices under that TT will appear in the TT structure. + */ +struct xhci_root_port_bw_info { + struct list_head tts; + unsigned int num_active_tts; + struct xhci_interval_bw_table bw_table; +}; + +struct xhci_tt_bw_info { + struct list_head tt_list; + int slot_id; + int ttport; + struct xhci_interval_bw_table bw_table; + int active_eps; +}; + + +/** + * struct xhci_device_context_array + * @dev_context_ptr array of 64-bit DMA addresses for device contexts + */ +struct xhci_device_context_array { + /* 64-bit device addresses; we only write 32-bit addresses */ + __le64 dev_context_ptrs[MAX_HC_SLOTS]; + /* private xHCD pointers */ + dma_addr_t dma; +}; +/* TODO: write function to set the 64-bit device DMA address */ +/* + * TODO: change this to be dynamically sized at HC mem init time since the HC + * might not be able to handle the maximum number of devices possible. + */ + + +struct xhci_transfer_event { + /* 64-bit buffer address, or immediate data */ + __le64 buffer; + __le32 transfer_len; + /* This field is interpreted differently based on the type of TRB */ + __le32 flags; +}; + +/* Transfer event TRB length bit mask */ +/* bits 0:23 */ +#define EVENT_TRB_LEN(p) ((p) & 0xffffff) + +/** Transfer Event bit fields **/ +#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) + +/* Completion Code - only applicable for some types of TRBs */ +#define COMP_CODE_MASK (0xff << 24) +#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24) +#define COMP_INVALID 0 +#define COMP_SUCCESS 1 +#define COMP_DATA_BUFFER_ERROR 2 +#define COMP_BABBLE_DETECTED_ERROR 3 +#define COMP_USB_TRANSACTION_ERROR 4 +#define COMP_TRB_ERROR 5 +#define COMP_STALL_ERROR 6 +#define COMP_RESOURCE_ERROR 7 +#define COMP_BANDWIDTH_ERROR 8 +#define COMP_NO_SLOTS_AVAILABLE_ERROR 9 +#define COMP_INVALID_STREAM_TYPE_ERROR 10 +#define COMP_SLOT_NOT_ENABLED_ERROR 11 +#define COMP_ENDPOINT_NOT_ENABLED_ERROR 12 +#define COMP_SHORT_PACKET 13 +#define COMP_RING_UNDERRUN 14 +#define COMP_RING_OVERRUN 15 +#define COMP_VF_EVENT_RING_FULL_ERROR 16 +#define COMP_PARAMETER_ERROR 17 +#define COMP_BANDWIDTH_OVERRUN_ERROR 18 +#define COMP_CONTEXT_STATE_ERROR 19 +#define COMP_NO_PING_RESPONSE_ERROR 20 +#define COMP_EVENT_RING_FULL_ERROR 21 +#define COMP_INCOMPATIBLE_DEVICE_ERROR 22 +#define COMP_MISSED_SERVICE_ERROR 23 +#define COMP_COMMAND_RING_STOPPED 24 +#define COMP_COMMAND_ABORTED 25 +#define COMP_STOPPED 26 +#define COMP_STOPPED_LENGTH_INVALID 27 +#define COMP_STOPPED_SHORT_PACKET 28 +#define COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR 29 +#define COMP_ISOCH_BUFFER_OVERRUN 31 +#define COMP_EVENT_LOST_ERROR 32 +#define COMP_UNDEFINED_ERROR 33 +#define COMP_INVALID_STREAM_ID_ERROR 34 +#define COMP_SECONDARY_BANDWIDTH_ERROR 35 +#define COMP_SPLIT_TRANSACTION_ERROR 36 + +static inline const char *xhci_trb_comp_code_string(u8 status) +{ + switch (status) { + case COMP_INVALID: + return "Invalid"; + case COMP_SUCCESS: + return "Success"; + case COMP_DATA_BUFFER_ERROR: + return "Data Buffer Error"; + case COMP_BABBLE_DETECTED_ERROR: + return "Babble Detected"; + case COMP_USB_TRANSACTION_ERROR: + return "USB Transaction Error"; + case COMP_TRB_ERROR: + return "TRB Error"; + case COMP_STALL_ERROR: + return "Stall Error"; + case COMP_RESOURCE_ERROR: + return "Resource Error"; + case COMP_BANDWIDTH_ERROR: + return "Bandwidth Error"; + case COMP_NO_SLOTS_AVAILABLE_ERROR: + return "No Slots Available Error"; + case COMP_INVALID_STREAM_TYPE_ERROR: + return "Invalid Stream Type Error"; + case COMP_SLOT_NOT_ENABLED_ERROR: + return "Slot Not Enabled Error"; + case COMP_ENDPOINT_NOT_ENABLED_ERROR: + return "Endpoint Not Enabled Error"; + case COMP_SHORT_PACKET: + return "Short Packet"; + case COMP_RING_UNDERRUN: + return "Ring Underrun"; + case COMP_RING_OVERRUN: + return "Ring Overrun"; + case COMP_VF_EVENT_RING_FULL_ERROR: + return "VF Event Ring Full Error"; + case COMP_PARAMETER_ERROR: + return "Parameter Error"; + case COMP_BANDWIDTH_OVERRUN_ERROR: + return "Bandwidth Overrun Error"; + case COMP_CONTEXT_STATE_ERROR: + return "Context State Error"; + case COMP_NO_PING_RESPONSE_ERROR: + return "No Ping Response Error"; + case COMP_EVENT_RING_FULL_ERROR: + return "Event Ring Full Error"; + case COMP_INCOMPATIBLE_DEVICE_ERROR: + return "Incompatible Device Error"; + case COMP_MISSED_SERVICE_ERROR: + return "Missed Service Error"; + case COMP_COMMAND_RING_STOPPED: + return "Command Ring Stopped"; + case COMP_COMMAND_ABORTED: + return "Command Aborted"; + case COMP_STOPPED: + return "Stopped"; + case COMP_STOPPED_LENGTH_INVALID: + return "Stopped - Length Invalid"; + case COMP_STOPPED_SHORT_PACKET: + return "Stopped - Short Packet"; + case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: + return "Max Exit Latency Too Large Error"; + case COMP_ISOCH_BUFFER_OVERRUN: + return "Isoch Buffer Overrun"; + case COMP_EVENT_LOST_ERROR: + return "Event Lost Error"; + case COMP_UNDEFINED_ERROR: + return "Undefined Error"; + case COMP_INVALID_STREAM_ID_ERROR: + return "Invalid Stream ID Error"; + case COMP_SECONDARY_BANDWIDTH_ERROR: + return "Secondary Bandwidth Error"; + case COMP_SPLIT_TRANSACTION_ERROR: + return "Split Transaction Error"; + default: + return "Unknown!!"; + } +} + +struct xhci_link_trb { + /* 64-bit segment pointer*/ + __le64 segment_ptr; + __le32 intr_target; + __le32 control; +}; + +/* control bitfields */ +#define LINK_TOGGLE (0x1<<1) + +/* Command completion event TRB */ +struct xhci_event_cmd { + /* Pointer to command TRB, or the value passed by the event data trb */ + __le64 cmd_trb; + __le32 status; + __le32 flags; +}; + +/* flags bitmasks */ + +/* Address device - disable SetAddress */ +#define TRB_BSR (1<<9) + +/* Configure Endpoint - Deconfigure */ +#define TRB_DC (1<<9) + +/* Stop Ring - Transfer State Preserve */ +#define TRB_TSP (1<<9) + +enum xhci_ep_reset_type { + EP_HARD_RESET, + EP_SOFT_RESET, +}; + +/* Force Event */ +#define TRB_TO_VF_INTR_TARGET(p) (((p) & (0x3ff << 22)) >> 22) +#define TRB_TO_VF_ID(p) (((p) & (0xff << 16)) >> 16) + +/* Set Latency Tolerance Value */ +#define TRB_TO_BELT(p) (((p) & (0xfff << 16)) >> 16) + +/* Get Port Bandwidth */ +#define TRB_TO_DEV_SPEED(p) (((p) & (0xf << 16)) >> 16) + +/* Force Header */ +#define TRB_TO_PACKET_TYPE(p) ((p) & 0x1f) +#define TRB_TO_ROOTHUB_PORT(p) (((p) & (0xff << 24)) >> 24) + +enum xhci_setup_dev { + SETUP_CONTEXT_ONLY, + SETUP_CONTEXT_ADDRESS, +}; + +/* bits 16:23 are the virtual function ID */ +/* bits 24:31 are the slot ID */ +#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24) +#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24) + +/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */ +#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1) +#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16) + +#define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23) +#define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23) +#define LAST_EP_INDEX 30 + +/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */ +#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16)) +#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16) +#define SCT_FOR_TRB(p) (((p) << 1) & 0x7) + +/* Link TRB specific fields */ +#define TRB_TC (1<<1) + +/* Port Status Change Event TRB fields */ +/* Port ID - bits 31:24 */ +#define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24) + +#define EVENT_DATA (1 << 2) + +/* Normal TRB fields */ +/* transfer_len bitmasks - bits 0:16 */ +#define TRB_LEN(p) ((p) & 0x1ffff) +/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31) */ +#define TRB_TD_SIZE(p) (min((p), (u32)31) << 17) +#define GET_TD_SIZE(p) (((p) & 0x3e0000) >> 17) +/* xhci 1.1 uses the TD_SIZE field for TBC if Extended TBC is enabled (ETE) */ +#define TRB_TD_SIZE_TBC(p) (min((p), (u32)31) << 17) +/* Interrupter Target - which MSI-X vector to target the completion event at */ +#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22) +#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff) +/* Total burst count field, Rsvdz on xhci 1.1 with Extended TBC enabled (ETE) */ +#define TRB_TBC(p) (((p) & 0x3) << 7) +#define TRB_TLBPC(p) (((p) & 0xf) << 16) + +/* Cycle bit - indicates TRB ownership by HC or HCD */ +#define TRB_CYCLE (1<<0) +/* + * Force next event data TRB to be evaluated before task switch. + * Used to pass OS data back after a TD completes. + */ +#define TRB_ENT (1<<1) +/* Interrupt on short packet */ +#define TRB_ISP (1<<2) +/* Set PCIe no snoop attribute */ +#define TRB_NO_SNOOP (1<<3) +/* Chain multiple TRBs into a TD */ +#define TRB_CHAIN (1<<4) +/* Interrupt on completion */ +#define TRB_IOC (1<<5) +/* The buffer pointer contains immediate data */ +#define TRB_IDT (1<<6) +/* TDs smaller than this might use IDT */ +#define TRB_IDT_MAX_SIZE 8 + +/* Block Event Interrupt */ +#define TRB_BEI (1<<9) + +/* Control transfer TRB specific fields */ +#define TRB_DIR_IN (1<<16) +#define TRB_TX_TYPE(p) ((p) << 16) +#define TRB_DATA_OUT 2 +#define TRB_DATA_IN 3 + +/* Isochronous TRB specific fields */ +#define TRB_SIA (1<<31) +#define TRB_FRAME_ID(p) (((p) & 0x7ff) << 20) + +struct xhci_generic_trb { + __le32 field[4]; +}; + +union xhci_trb { + struct xhci_link_trb link; + struct xhci_transfer_event trans_event; + struct xhci_event_cmd event_cmd; + struct xhci_generic_trb generic; +}; + +/* TRB bit mask */ +#define TRB_TYPE_BITMASK (0xfc00) +#define TRB_TYPE(p) ((p) << 10) +#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10) +/* TRB type IDs */ +/* bulk, interrupt, isoc scatter/gather, and control data stage */ +#define TRB_NORMAL 1 +/* setup stage for control transfers */ +#define TRB_SETUP 2 +/* data stage for control transfers */ +#define TRB_DATA 3 +/* status stage for control transfers */ +#define TRB_STATUS 4 +/* isoc transfers */ +#define TRB_ISOC 5 +/* TRB for linking ring segments */ +#define TRB_LINK 6 +#define TRB_EVENT_DATA 7 +/* Transfer Ring No-op (not for the command ring) */ +#define TRB_TR_NOOP 8 +/* Command TRBs */ +/* Enable Slot Command */ +#define TRB_ENABLE_SLOT 9 +/* Disable Slot Command */ +#define TRB_DISABLE_SLOT 10 +/* Address Device Command */ +#define TRB_ADDR_DEV 11 +/* Configure Endpoint Command */ +#define TRB_CONFIG_EP 12 +/* Evaluate Context Command */ +#define TRB_EVAL_CONTEXT 13 +/* Reset Endpoint Command */ +#define TRB_RESET_EP 14 +/* Stop Transfer Ring Command */ +#define TRB_STOP_RING 15 +/* Set Transfer Ring Dequeue Pointer Command */ +#define TRB_SET_DEQ 16 +/* Reset Device Command */ +#define TRB_RESET_DEV 17 +/* Force Event Command (opt) */ +#define TRB_FORCE_EVENT 18 +/* Negotiate Bandwidth Command (opt) */ +#define TRB_NEG_BANDWIDTH 19 +/* Set Latency Tolerance Value Command (opt) */ +#define TRB_SET_LT 20 +/* Get port bandwidth Command */ +#define TRB_GET_BW 21 +/* Force Header Command - generate a transaction or link management packet */ +#define TRB_FORCE_HEADER 22 +/* No-op Command - not for transfer rings */ +#define TRB_CMD_NOOP 23 +/* TRB IDs 24-31 reserved */ +/* Event TRBS */ +/* Transfer Event */ +#define TRB_TRANSFER 32 +/* Command Completion Event */ +#define TRB_COMPLETION 33 +/* Port Status Change Event */ +#define TRB_PORT_STATUS 34 +/* Bandwidth Request Event (opt) */ +#define TRB_BANDWIDTH_EVENT 35 +/* Doorbell Event (opt) */ +#define TRB_DOORBELL 36 +/* Host Controller Event */ +#define TRB_HC_EVENT 37 +/* Device Notification Event - device sent function wake notification */ +#define TRB_DEV_NOTE 38 +/* MFINDEX Wrap Event - microframe counter wrapped */ +#define TRB_MFINDEX_WRAP 39 +/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */ +#define TRB_VENDOR_DEFINED_LOW 48 +/* Nec vendor-specific command completion event. */ +#define TRB_NEC_CMD_COMP 48 +/* Get NEC firmware revision. */ +#define TRB_NEC_GET_FW 49 + +static inline const char *xhci_trb_type_string(u8 type) +{ + switch (type) { + case TRB_NORMAL: + return "Normal"; + case TRB_SETUP: + return "Setup Stage"; + case TRB_DATA: + return "Data Stage"; + case TRB_STATUS: + return "Status Stage"; + case TRB_ISOC: + return "Isoch"; + case TRB_LINK: + return "Link"; + case TRB_EVENT_DATA: + return "Event Data"; + case TRB_TR_NOOP: + return "No-Op"; + case TRB_ENABLE_SLOT: + return "Enable Slot Command"; + case TRB_DISABLE_SLOT: + return "Disable Slot Command"; + case TRB_ADDR_DEV: + return "Address Device Command"; + case TRB_CONFIG_EP: + return "Configure Endpoint Command"; + case TRB_EVAL_CONTEXT: + return "Evaluate Context Command"; + case TRB_RESET_EP: + return "Reset Endpoint Command"; + case TRB_STOP_RING: + return "Stop Ring Command"; + case TRB_SET_DEQ: + return "Set TR Dequeue Pointer Command"; + case TRB_RESET_DEV: + return "Reset Device Command"; + case TRB_FORCE_EVENT: + return "Force Event Command"; + case TRB_NEG_BANDWIDTH: + return "Negotiate Bandwidth Command"; + case TRB_SET_LT: + return "Set Latency Tolerance Value Command"; + case TRB_GET_BW: + return "Get Port Bandwidth Command"; + case TRB_FORCE_HEADER: + return "Force Header Command"; + case TRB_CMD_NOOP: + return "No-Op Command"; + case TRB_TRANSFER: + return "Transfer Event"; + case TRB_COMPLETION: + return "Command Completion Event"; + case TRB_PORT_STATUS: + return "Port Status Change Event"; + case TRB_BANDWIDTH_EVENT: + return "Bandwidth Request Event"; + case TRB_DOORBELL: + return "Doorbell Event"; + case TRB_HC_EVENT: + return "Host Controller Event"; + case TRB_DEV_NOTE: + return "Device Notification Event"; + case TRB_MFINDEX_WRAP: + return "MFINDEX Wrap Event"; + case TRB_NEC_CMD_COMP: + return "NEC Command Completion Event"; + case TRB_NEC_GET_FW: + return "NET Get Firmware Revision Command"; + default: + return "UNKNOWN"; + } +} + +#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) +/* Above, but for __le32 types -- can avoid work by swapping constants: */ +#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ + cpu_to_le32(TRB_TYPE(TRB_LINK))) +#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ + cpu_to_le32(TRB_TYPE(TRB_TR_NOOP))) + +#define NEC_FW_MINOR(p) (((p) >> 0) & 0xff) +#define NEC_FW_MAJOR(p) (((p) >> 8) & 0xff) + +/* + * TRBS_PER_SEGMENT must be a multiple of 4, + * since the command ring is 64-byte aligned. + * It must also be greater than 16. + */ +#define TRBS_PER_SEGMENT 256 +/* Allow two commands + a link TRB, along with any reserved command TRBs */ +#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) +#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16) +#define TRB_SEGMENT_SHIFT (ilog2(TRB_SEGMENT_SIZE)) +/* TRB buffer pointers can't cross 64KB boundaries */ +#define TRB_MAX_BUFF_SHIFT 16 +#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT) +/* How much data is left before the 64KB boundary? */ +#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \ + (addr & (TRB_MAX_BUFF_SIZE - 1))) +#define MAX_SOFT_RETRY 3 + +struct xhci_segment { + union xhci_trb *trbs; + /* private to HCD */ + struct xhci_segment *next; + dma_addr_t dma; + /* Max packet sized bounce buffer for td-fragmant alignment */ + dma_addr_t bounce_dma; + void *bounce_buf; + unsigned int bounce_offs; + unsigned int bounce_len; + + ANDROID_KABI_RESERVE(1); +}; + +enum xhci_cancelled_td_status { + TD_DIRTY = 0, + TD_HALTED, + TD_CLEARING_CACHE, + TD_CLEARED, +}; + +struct xhci_td { + struct list_head td_list; + struct list_head cancelled_td_list; + int status; + enum xhci_cancelled_td_status cancel_status; + struct urb *urb; + struct xhci_segment *start_seg; + union xhci_trb *first_trb; + union xhci_trb *last_trb; + struct xhci_segment *last_trb_seg; + struct xhci_segment *bounce_seg; + /* actual_length of the URB has already been set */ + bool urb_length_set; + unsigned int num_trbs; +}; + +/* xHCI command default timeout value */ +#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ) + +/* command descriptor */ +struct xhci_cd { + struct xhci_command *command; + union xhci_trb *cmd_trb; +}; + +enum xhci_ring_type { + TYPE_CTRL = 0, + TYPE_ISOC, + TYPE_BULK, + TYPE_INTR, + TYPE_STREAM, + TYPE_COMMAND, + TYPE_EVENT, +}; + +static inline const char *xhci_ring_type_string(enum xhci_ring_type type) +{ + switch (type) { + case TYPE_CTRL: + return "CTRL"; + case TYPE_ISOC: + return "ISOC"; + case TYPE_BULK: + return "BULK"; + case TYPE_INTR: + return "INTR"; + case TYPE_STREAM: + return "STREAM"; + case TYPE_COMMAND: + return "CMD"; + case TYPE_EVENT: + return "EVENT"; + } + + return "UNKNOWN"; +} + +struct xhci_ring { + struct xhci_segment *first_seg; + struct xhci_segment *last_seg; + union xhci_trb *enqueue; + struct xhci_segment *enq_seg; + union xhci_trb *dequeue; + struct xhci_segment *deq_seg; + struct list_head td_list; + /* + * Write the cycle state into the TRB cycle field to give ownership of + * the TRB to the host controller (if we are the producer), or to check + * if we own the TRB (if we are the consumer). See section 4.9.1. + */ + u32 cycle_state; + unsigned int err_count; + unsigned int stream_id; + unsigned int num_segs; + unsigned int num_trbs_free; + unsigned int num_trbs_free_temp; + unsigned int bounce_buf_len; + enum xhci_ring_type type; + bool last_td_was_short; + struct radix_tree_root *trb_address_map; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); +}; + +struct xhci_erst_entry { + /* 64-bit event ring segment address */ + __le64 seg_addr; + __le32 seg_size; + /* Set to zero */ + __le32 rsvd; +}; + +struct xhci_erst { + struct xhci_erst_entry *entries; + unsigned int num_entries; + /* xhci->event_ring keeps track of segment dma addresses */ + dma_addr_t erst_dma_addr; + /* Num entries the ERST can contain */ + unsigned int erst_size; + + ANDROID_KABI_RESERVE(1); +}; + +struct xhci_scratchpad { + u64 *sp_array; + dma_addr_t sp_dma; + void **sp_buffers; +}; + +struct urb_priv { + int num_tds; + int num_tds_done; + struct xhci_td td[]; +}; + +/* + * Each segment table entry is 4*32bits long. 1K seems like an ok size: + * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, + * meaning 64 ring segments. + * Initial allocated size of the ERST, in number of entries */ +#define ERST_NUM_SEGS 1 +/* Initial allocated size of the ERST, in number of entries */ +#define ERST_SIZE 64 +/* Initial number of event segment rings allocated */ +#define ERST_ENTRIES 1 +/* Poll every 60 seconds */ +#define POLL_TIMEOUT 60 +/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */ +#define XHCI_STOP_EP_CMD_TIMEOUT 5 +/* XXX: Make these module parameters */ + +struct s3_save { + u32 command; + u32 dev_nt; + u64 dcbaa_ptr; + u32 config_reg; + u32 irq_pending; + u32 irq_control; + u32 erst_size; + u64 erst_base; + u64 erst_dequeue; +}; + +/* Use for lpm */ +struct dev_info { + u32 dev_id; + struct list_head list; +}; + +struct xhci_bus_state { + unsigned long bus_suspended; + unsigned long next_statechange; + + /* Port suspend arrays are indexed by the portnum of the fake roothub */ + /* ports suspend status arrays - max 31 ports for USB2, 15 for USB3 */ + u32 port_c_suspend; + u32 suspended_ports; + u32 port_remote_wakeup; + unsigned long resume_done[USB_MAXCHILDREN]; + /* which ports have started to resume */ + unsigned long resuming_ports; + /* Which ports are waiting on RExit to U0 transition. */ + unsigned long rexit_ports; + struct completion rexit_done[USB_MAXCHILDREN]; + struct completion u3exit_done[USB_MAXCHILDREN]; +}; + + +/* + * It can take up to 20 ms to transition from RExit to U0 on the + * Intel Lynx Point LP xHCI host. + */ +#define XHCI_MAX_REXIT_TIMEOUT_MS 20 +struct xhci_port_cap { + u32 *psi; /* array of protocol speed ID entries */ + u8 psi_count; + u8 psi_uid_count; + u8 maj_rev; + u8 min_rev; +}; + +struct xhci_port { + __le32 __iomem *addr; + int hw_portnum; + int hcd_portnum; + struct xhci_hub *rhub; + struct xhci_port_cap *port_cap; +}; + +struct xhci_hub { + struct xhci_port **ports; + unsigned int num_ports; + struct usb_hcd *hcd; + /* keep track of bus suspend info */ + struct xhci_bus_state bus_state; + /* supported prococol extended capabiliy values */ + u8 maj_rev; + u8 min_rev; +}; + +/* There is one xhci_hcd structure per controller */ +struct xhci_hcd { + struct usb_hcd *main_hcd; + struct usb_hcd *shared_hcd; + /* glue to PCI and HCD framework */ + struct xhci_cap_regs __iomem *cap_regs; + struct xhci_op_regs __iomem *op_regs; + struct xhci_run_regs __iomem *run_regs; + struct xhci_doorbell_array __iomem *dba; + /* Our HCD's current interrupter register set */ + struct xhci_intr_reg __iomem *ir_set; + + /* Cached register copies of read-only HC data */ + __u32 hcs_params1; + __u32 hcs_params2; + __u32 hcs_params3; + __u32 hcc_params; + __u32 hcc_params2; + + spinlock_t lock; + + /* packed release number */ + u8 sbrn; + u16 hci_version; + u8 max_slots; + u8 max_interrupters; + u8 max_ports; + u8 isoc_threshold; + /* imod_interval in ns (I * 250ns) */ + u32 imod_interval; + int event_ring_max; + /* 4KB min, 128MB max */ + int page_size; + /* Valid values are 12 to 20, inclusive */ + int page_shift; + /* msi-x vectors */ + int msix_count; + /* optional clocks */ + struct clk *clk; + struct clk *reg_clk; + /* optional reset controller */ + struct reset_control *reset; + /* data structures */ + struct xhci_device_context_array *dcbaa; + struct xhci_ring *cmd_ring; + unsigned int cmd_ring_state; +#define CMD_RING_STATE_RUNNING (1 << 0) +#define CMD_RING_STATE_ABORTED (1 << 1) +#define CMD_RING_STATE_STOPPED (1 << 2) + struct list_head cmd_list; + unsigned int cmd_ring_reserved_trbs; + struct delayed_work cmd_timer; + struct completion cmd_ring_stop_completion; + struct xhci_command *current_cmd; + struct xhci_ring *event_ring; + struct xhci_erst erst; + /* Scratchpad */ + struct xhci_scratchpad *scratchpad; + /* Store LPM test failed devices' information */ + struct list_head lpm_failed_devs; + + /* slot enabling and address device helpers */ + /* these are not thread safe so use mutex */ + struct mutex mutex; + /* For USB 3.0 LPM enable/disable. */ + struct xhci_command *lpm_command; + /* Internal mirror of the HW's dcbaa */ + struct xhci_virt_device *devs[MAX_HC_SLOTS]; + /* For keeping track of bandwidth domains per roothub. */ + struct xhci_root_port_bw_info *rh_bw; + + /* DMA pools */ + struct dma_pool *device_pool; + struct dma_pool *segment_pool; + struct dma_pool *small_streams_pool; + struct dma_pool *medium_streams_pool; + + /* Host controller watchdog timer structures */ + unsigned int xhc_state; + + u32 command; + struct s3_save s3; +/* Host controller is dying - not responding to commands. "I'm not dead yet!" + * + * xHC interrupts have been disabled and a watchdog timer will (or has already) + * halt the xHCI host, and complete all URBs with an -ESHUTDOWN code. Any code + * that sees this status (other than the timer that set it) should stop touching + * hardware immediately. Interrupt handlers should return immediately when + * they see this status (any time they drop and re-acquire xhci->lock). + * xhci_urb_dequeue() should call usb_hcd_check_unlink_urb() and return without + * putting the TD on the canceled list, etc. + * + * There are no reports of xHCI host controllers that display this issue. + */ +#define XHCI_STATE_DYING (1 << 0) +#define XHCI_STATE_HALTED (1 << 1) +#define XHCI_STATE_REMOVING (1 << 2) + unsigned long long quirks; +#define XHCI_LINK_TRB_QUIRK BIT_ULL(0) +#define XHCI_RESET_EP_QUIRK BIT_ULL(1) +#define XHCI_NEC_HOST BIT_ULL(2) +#define XHCI_AMD_PLL_FIX BIT_ULL(3) +#define XHCI_SPURIOUS_SUCCESS BIT_ULL(4) +/* + * Certain Intel host controllers have a limit to the number of endpoint + * contexts they can handle. Ideally, they would signal that they can't handle + * anymore endpoint contexts by returning a Resource Error for the Configure + * Endpoint command, but they don't. Instead they expect software to keep track + * of the number of active endpoints for them, across configure endpoint + * commands, reset device commands, disable slot commands, and address device + * commands. + */ +#define XHCI_EP_LIMIT_QUIRK BIT_ULL(5) +#define XHCI_BROKEN_MSI BIT_ULL(6) +#define XHCI_RESET_ON_RESUME BIT_ULL(7) +#define XHCI_SW_BW_CHECKING BIT_ULL(8) +#define XHCI_AMD_0x96_HOST BIT_ULL(9) +#define XHCI_TRUST_TX_LENGTH BIT_ULL(10) +#define XHCI_LPM_SUPPORT BIT_ULL(11) +#define XHCI_INTEL_HOST BIT_ULL(12) +#define XHCI_SPURIOUS_REBOOT BIT_ULL(13) +#define XHCI_COMP_MODE_QUIRK BIT_ULL(14) +#define XHCI_AVOID_BEI BIT_ULL(15) +#define XHCI_PLAT BIT_ULL(16) +#define XHCI_SLOW_SUSPEND BIT_ULL(17) +#define XHCI_SPURIOUS_WAKEUP BIT_ULL(18) +/* For controllers with a broken beyond repair streams implementation */ +#define XHCI_BROKEN_STREAMS BIT_ULL(19) +#define XHCI_PME_STUCK_QUIRK BIT_ULL(20) +#define XHCI_MTK_HOST BIT_ULL(21) +#define XHCI_SSIC_PORT_UNUSED BIT_ULL(22) +#define XHCI_NO_64BIT_SUPPORT BIT_ULL(23) +#define XHCI_MISSING_CAS BIT_ULL(24) +/* For controller with a broken Port Disable implementation */ +#define XHCI_BROKEN_PORT_PED BIT_ULL(25) +#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 BIT_ULL(26) +#define XHCI_U2_DISABLE_WAKE BIT_ULL(27) +#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL BIT_ULL(28) +#define XHCI_HW_LPM_DISABLE BIT_ULL(29) +#define XHCI_SUSPEND_DELAY BIT_ULL(30) +#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31) +#define XHCI_ZERO_64B_REGS BIT_ULL(32) +#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) +#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) +#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) +#define XHCI_RENESAS_FW_QUIRK BIT_ULL(36) +#define XHCI_SKIP_PHY_INIT BIT_ULL(37) +#define XHCI_DISABLE_SPARSE BIT_ULL(38) +#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39) +#define XHCI_NO_SOFT_RETRY BIT_ULL(40) + + unsigned int num_active_eps; + unsigned int limit_active_eps; + struct xhci_port *hw_ports; + struct xhci_hub usb2_rhub; + struct xhci_hub usb3_rhub; + /* support xHCI 1.0 spec USB2 hardware LPM */ + unsigned hw_lpm_support:1; + /* Broken Suspend flag for SNPS Suspend resume issue */ + unsigned broken_suspend:1; + /* cached usb2 extened protocol capabilites */ + u32 *ext_caps; + unsigned int num_ext_caps; + /* cached extended protocol port capabilities */ + struct xhci_port_cap *port_caps; + unsigned int num_port_caps; + /* Compliance Mode Recovery Data */ + struct timer_list comp_mode_recovery_timer; + u32 port_status_u0; + u16 test_mode; +/* Compliance Mode Timer Triggered every 2 seconds */ +#define COMP_MODE_RCVRY_MSECS 2000 + + struct dentry *debugfs_root; + struct dentry *debugfs_slots; + struct list_head regset_list; + + void *dbc; + + /* Used for bug 194461020 */ + ANDROID_KABI_USE(1, struct xhci_vendor_ops *vendor_ops); + + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); + + /* platform-specific data -- must come last */ + unsigned long priv[] __aligned(sizeof(s64)); +}; + +/* Platform specific overrides to generic XHCI hc_driver ops */ +struct xhci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *hcd); + int (*start)(struct usb_hcd *hcd); + int (*add_endpoint)(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep); + int (*drop_endpoint)(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + int (*address_device)(struct usb_hcd *hcd, struct usb_device *udev); + int (*bus_suspend)(struct usb_hcd *hcd); + int (*bus_resume)(struct usb_hcd *hcd); +}; + +#define XHCI_CFC_DELAY 10 + +/* convert between an HCD pointer and the corresponding EHCI_HCD */ +static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd) +{ + struct usb_hcd *primary_hcd; + + if (usb_hcd_is_primary_hcd(hcd)) + primary_hcd = hcd; + else + primary_hcd = hcd->primary_hcd; + + return (struct xhci_hcd *) (primary_hcd->hcd_priv); +} + +static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) +{ + return xhci->main_hcd; +} + +#define xhci_dbg(xhci, fmt, args...) \ + dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args) +#define xhci_err(xhci, fmt, args...) \ + dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args) +#define xhci_warn(xhci, fmt, args...) \ + dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args) +#define xhci_warn_ratelimited(xhci, fmt, args...) \ + dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) +#define xhci_info(xhci, fmt, args...) \ + dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args) + +/* + * Registers should always be accessed with double word or quad word accesses. + * + * Some xHCI implementations may support 64-bit address pointers. Registers + * with 64-bit address pointers should be written to with dword accesses by + * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. + * xHCI implementations that do not support 64-bit address pointers will ignore + * the high dword, and write order is irrelevant. + */ +static inline u64 xhci_read_64(const struct xhci_hcd *xhci, + __le64 __iomem *regs) +{ + return lo_hi_readq(regs); +} +static inline void xhci_write_64(struct xhci_hcd *xhci, + const u64 val, __le64 __iomem *regs) +{ + lo_hi_writeq(val, regs); +} + +static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) +{ + return xhci->quirks & XHCI_LINK_TRB_QUIRK; +} + +/* xHCI debugging */ +char *xhci_get_slot_state(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx); +void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *), + const char *fmt, ...); + +/* xHCI memory management */ +void xhci_mem_cleanup(struct xhci_hcd *xhci); +int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); +void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); +int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); +int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); +void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, + struct usb_device *udev); +unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc); +unsigned int xhci_get_endpoint_address(unsigned int ep_index); +unsigned int xhci_last_valid_endpoint(u32 added_ctxs); +void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep); +void xhci_update_tt_active_eps(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + int old_active_eps); +void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info); +void xhci_update_bw_info(struct xhci_hcd *xhci, + struct xhci_container_ctx *in_ctx, + struct xhci_input_control_ctx *ctrl_ctx, + struct xhci_virt_device *virt_dev); +void xhci_endpoint_copy(struct xhci_hcd *xhci, + struct xhci_container_ctx *in_ctx, + struct xhci_container_ctx *out_ctx, + unsigned int ep_index); +void xhci_slot_copy(struct xhci_hcd *xhci, + struct xhci_container_ctx *in_ctx, + struct xhci_container_ctx *out_ctx); +int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, + struct usb_device *udev, struct usb_host_endpoint *ep, + gfp_t mem_flags); +struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, + unsigned int num_segs, unsigned int cycle_state, + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags); +void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); +int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, + unsigned int num_trbs, gfp_t flags); +int xhci_alloc_erst(struct xhci_hcd *xhci, + struct xhci_ring *evt_ring, + struct xhci_erst *erst, + gfp_t flags); +void xhci_initialize_ring_info(struct xhci_ring *ring, + unsigned int cycle_state); +void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); +void xhci_free_endpoint_ring(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + unsigned int ep_index); +struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, + unsigned int num_stream_ctxs, + unsigned int num_streams, + unsigned int max_packet, gfp_t flags); +void xhci_free_stream_info(struct xhci_hcd *xhci, + struct xhci_stream_info *stream_info); +void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, + struct xhci_ep_ctx *ep_ctx, + struct xhci_stream_info *stream_info); +void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx, + struct xhci_virt_ep *ep); +void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, bool drop_control_ep); +struct xhci_ring *xhci_dma_to_transfer_ring( + struct xhci_virt_ep *ep, + u64 address); +struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, + bool allocate_completion, gfp_t mem_flags); +struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci, + bool allocate_completion, gfp_t mem_flags); +void xhci_urb_free_priv(struct urb_priv *urb_priv); +void xhci_free_command(struct xhci_hcd *xhci, + struct xhci_command *command); +struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, + int type, gfp_t flags); +void xhci_free_container_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx); + +/* xHCI host controller glue */ +typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); +int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec); +void xhci_quiesce(struct xhci_hcd *xhci); +int xhci_halt(struct xhci_hcd *xhci); +int xhci_start(struct xhci_hcd *xhci); +int xhci_reset(struct xhci_hcd *xhci); +int xhci_run(struct usb_hcd *hcd); +int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); +void xhci_shutdown(struct usb_hcd *hcd); +void xhci_init_driver(struct hc_driver *drv, + const struct xhci_driver_overrides *over); +int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep); +int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep); +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); +int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); +int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); +int xhci_ext_cap_init(struct xhci_hcd *xhci); + +int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); +int xhci_resume(struct xhci_hcd *xhci, bool hibernated); + +irqreturn_t xhci_irq(struct usb_hcd *hcd); +irqreturn_t xhci_msi_irq(int irq, void *hcd); +int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev); +int xhci_alloc_tt_info(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + struct usb_device *hdev, + struct usb_tt *tt, gfp_t mem_flags); + +/* xHCI ring, segment, TRB, and TD functions */ +dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); +struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, + struct xhci_segment *start_seg, union xhci_trb *start_trb, + union xhci_trb *end_trb, dma_addr_t suspect_dma, bool debug); +int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code); +void xhci_ring_cmd_db(struct xhci_hcd *xhci); +int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 trb_type, u32 slot_id); +int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, + dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev); +int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 field1, u32 field2, u32 field3, u32 field4); +int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, + int slot_id, unsigned int ep_index, int suspend); +int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, + int slot_id, unsigned int ep_index); +int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, + int slot_id, unsigned int ep_index); +int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, + int slot_id, unsigned int ep_index); +int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, + struct urb *urb, int slot_id, unsigned int ep_index); +int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, + struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, + bool command_must_succeed); +int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, + dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed); +int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, + int slot_id, unsigned int ep_index, + enum xhci_ep_reset_type reset_type); +int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 slot_id); +void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, unsigned int stream_id, + struct xhci_td *td); +void xhci_stop_endpoint_command_watchdog(struct timer_list *t); +void xhci_handle_command_timeout(struct work_struct *work); + +void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, unsigned int stream_id); +void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, + unsigned int slot_id, + unsigned int ep_index); +void xhci_cleanup_command_queue(struct xhci_hcd *xhci); +void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring); +unsigned int count_trbs(u64 addr, u64 len); + +/* xHCI roothub code */ +void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, + u32 link_state); +void xhci_test_and_clear_bit(struct xhci_hcd *xhci, struct xhci_port *port, + u32 port_bit); +int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, + char *buf, u16 wLength); +int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); +int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1); +struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd); + +void xhci_hc_died(struct xhci_hcd *xhci); + +#ifdef CONFIG_PM +int xhci_bus_suspend(struct usb_hcd *hcd); +int xhci_bus_resume(struct usb_hcd *hcd); +unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd); +#else +#define xhci_bus_suspend NULL +#define xhci_bus_resume NULL +#define xhci_get_resuming_ports NULL +#endif /* CONFIG_PM */ + +u32 xhci_port_state_to_neutral(u32 state); +int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, + u16 port); +void xhci_ring_device(struct xhci_hcd *xhci, int slot_id); + +/* xHCI contexts */ +struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx); +struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); +struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index); + +struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + unsigned int stream_id); + +static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, + struct urb *urb) +{ + return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, + xhci_get_endpoint_index(&urb->ep->desc), + urb->stream_id); +} + +/** + * struct xhci_vendor_ops - function callbacks for vendor specific operations + * @vendor_init: called for vendor init process + * @vendor_cleanup: called for vendor cleanup process + * @is_usb_offload_enabled: called to check if usb offload enabled + * @queue_irq_work: called to queue vendor specific irq work + * @alloc_dcbaa: called when allocating vendor specific dcbaa + * @free_dcbaa: called to free vendor specific dcbaa + * @alloc_transfer_ring: called when remote transfer ring allocation is required + * @free_transfer_ring: called to free vendor specific transfer ring + * @sync_dev_ctx: called when synchronization for device context is required + * @alloc_container_ctx: called when allocating vendor specific container context + * @free_container_ctx: called to free vendor specific container context + */ +struct xhci_vendor_ops { + int (*vendor_init)(struct xhci_hcd *xhci); + void (*vendor_cleanup)(struct xhci_hcd *xhci); + bool (*is_usb_offload_enabled)(struct xhci_hcd *xhci, + struct xhci_virt_device *vdev, + unsigned int ep_index); + irqreturn_t (*queue_irq_work)(struct xhci_hcd *xhci); + + struct xhci_device_context_array *(*alloc_dcbaa)(struct xhci_hcd *xhci, + gfp_t flags); + void (*free_dcbaa)(struct xhci_hcd *xhci); + + struct xhci_ring *(*alloc_transfer_ring)(struct xhci_hcd *xhci, + u32 endpoint_type, enum xhci_ring_type ring_type, + unsigned int max_packet, gfp_t mem_flags); + void (*free_transfer_ring)(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, unsigned int ep_index); + int (*sync_dev_ctx)(struct xhci_hcd *xhci, unsigned int slot_id); + bool (*usb_offload_skip_urb)(struct xhci_hcd *xhci, struct urb *urb); + void (*alloc_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, + int type, gfp_t flags); + void (*free_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); +}; + +struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci); + +int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id); +bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb); +void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, unsigned int ep_index); +bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, unsigned int ep_index); + +/* + * TODO: As per spec Isochronous IDT transmissions are supported. We bypass + * them anyways as we where unable to find a device that matches the + * constraints. + */ +static inline bool xhci_urb_suitable_for_idt(struct urb *urb) +{ + if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) && + usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE && + urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE && + !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) && + !urb->num_sgs) + return true; + + return false; +} + +static inline char *xhci_slot_state_string(u32 state) +{ + switch (state) { + case SLOT_STATE_ENABLED: + return "enabled/disabled"; + case SLOT_STATE_DEFAULT: + return "default"; + case SLOT_STATE_ADDRESSED: + return "addressed"; + case SLOT_STATE_CONFIGURED: + return "configured"; + default: + return "reserved"; + } +} + +static inline const char *xhci_decode_trb(char *str, size_t size, + u32 field0, u32 field1, u32 field2, u32 field3) +{ + int type = TRB_FIELD_TO_TYPE(field3); + + switch (type) { + case TRB_LINK: + snprintf(str, size, + "LINK %08x%08x intr %d type '%s' flags %c:%c:%c:%c", + field1, field0, GET_INTR_TARGET(field2), + xhci_trb_type_string(type), + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_TC ? 'T' : 't', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_TRANSFER: + case TRB_COMPLETION: + case TRB_PORT_STATUS: + case TRB_BANDWIDTH_EVENT: + case TRB_DOORBELL: + case TRB_HC_EVENT: + case TRB_DEV_NOTE: + case TRB_MFINDEX_WRAP: + snprintf(str, size, + "TRB %08x%08x status '%s' len %d slot %d ep %d type '%s' flags %c:%c", + field1, field0, + xhci_trb_comp_code_string(GET_COMP_CODE(field2)), + EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3), + /* Macro decrements 1, maybe it shouldn't?!? */ + TRB_TO_EP_INDEX(field3) + 1, + xhci_trb_type_string(type), + field3 & EVENT_DATA ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + + break; + case TRB_SETUP: + snprintf(str, size, + "bRequestType %02x bRequest %02x wValue %02x%02x wIndex %02x%02x wLength %d length %d TD size %d intr %d type '%s' flags %c:%c:%c", + field0 & 0xff, + (field0 & 0xff00) >> 8, + (field0 & 0xff000000) >> 24, + (field0 & 0xff0000) >> 16, + (field1 & 0xff00) >> 8, + field1 & 0xff, + (field1 & 0xff000000) >> 16 | + (field1 & 0xff0000) >> 16, + TRB_LEN(field2), GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + xhci_trb_type_string(type), + field3 & TRB_IDT ? 'I' : 'i', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_DATA: + snprintf(str, size, + "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c", + field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + xhci_trb_type_string(type), + field3 & TRB_IDT ? 'I' : 'i', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_NO_SNOOP ? 'S' : 's', + field3 & TRB_ISP ? 'I' : 'i', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_STATUS: + snprintf(str, size, + "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c", + field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + xhci_trb_type_string(type), + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_NORMAL: + case TRB_ISOC: + case TRB_EVENT_DATA: + case TRB_TR_NOOP: + snprintf(str, size, + "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c:%c", + field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + xhci_trb_type_string(type), + field3 & TRB_BEI ? 'B' : 'b', + field3 & TRB_IDT ? 'I' : 'i', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_NO_SNOOP ? 'S' : 's', + field3 & TRB_ISP ? 'I' : 'i', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + + case TRB_CMD_NOOP: + case TRB_ENABLE_SLOT: + snprintf(str, size, + "%s: flags %c", + xhci_trb_type_string(type), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_DISABLE_SLOT: + case TRB_NEG_BANDWIDTH: + snprintf(str, size, + "%s: slot %d flags %c", + xhci_trb_type_string(type), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_ADDR_DEV: + snprintf(str, size, + "%s: ctx %08x%08x slot %d flags %c:%c", + xhci_trb_type_string(type), + field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_BSR ? 'B' : 'b', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_CONFIG_EP: + snprintf(str, size, + "%s: ctx %08x%08x slot %d flags %c:%c", + xhci_trb_type_string(type), + field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_DC ? 'D' : 'd', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_EVAL_CONTEXT: + snprintf(str, size, + "%s: ctx %08x%08x slot %d flags %c", + xhci_trb_type_string(type), + field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_RESET_EP: + snprintf(str, size, + "%s: ctx %08x%08x slot %d ep %d flags %c:%c", + xhci_trb_type_string(type), + field1, field0, + TRB_TO_SLOT_ID(field3), + /* Macro decrements 1, maybe it shouldn't?!? */ + TRB_TO_EP_INDEX(field3) + 1, + field3 & TRB_TSP ? 'T' : 't', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_STOP_RING: + sprintf(str, + "%s: slot %d sp %d ep %d flags %c", + xhci_trb_type_string(type), + TRB_TO_SLOT_ID(field3), + TRB_TO_SUSPEND_PORT(field3), + /* Macro decrements 1, maybe it shouldn't?!? */ + TRB_TO_EP_INDEX(field3) + 1, + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_SET_DEQ: + snprintf(str, size, + "%s: deq %08x%08x stream %d slot %d ep %d flags %c", + xhci_trb_type_string(type), + field1, field0, + TRB_TO_STREAM_ID(field2), + TRB_TO_SLOT_ID(field3), + /* Macro decrements 1, maybe it shouldn't?!? */ + TRB_TO_EP_INDEX(field3) + 1, + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_RESET_DEV: + snprintf(str, size, + "%s: slot %d flags %c", + xhci_trb_type_string(type), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_FORCE_EVENT: + snprintf(str, size, + "%s: event %08x%08x vf intr %d vf id %d flags %c", + xhci_trb_type_string(type), + field1, field0, + TRB_TO_VF_INTR_TARGET(field2), + TRB_TO_VF_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_SET_LT: + snprintf(str, size, + "%s: belt %d flags %c", + xhci_trb_type_string(type), + TRB_TO_BELT(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_GET_BW: + snprintf(str, size, + "%s: ctx %08x%08x slot %d speed %d flags %c", + xhci_trb_type_string(type), + field1, field0, + TRB_TO_SLOT_ID(field3), + TRB_TO_DEV_SPEED(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_FORCE_HEADER: + snprintf(str, size, + "%s: info %08x%08x%08x pkt type %d roothub port %d flags %c", + xhci_trb_type_string(type), + field2, field1, field0 & 0xffffffe0, + TRB_TO_PACKET_TYPE(field0), + TRB_TO_ROOTHUB_PORT(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + default: + snprintf(str, size, + "type '%s' -> raw %08x %08x %08x %08x", + xhci_trb_type_string(type), + field0, field1, field2, field3); + } + + return str; +} + +static inline const char *xhci_decode_ctrl_ctx(char *str, + unsigned long drop, unsigned long add) +{ + unsigned int bit; + int ret = 0; + + if (drop) { + ret = sprintf(str, "Drop:"); + for_each_set_bit(bit, &drop, 32) + ret += sprintf(str + ret, " %d%s", + bit / 2, + bit % 2 ? "in":"out"); + ret += sprintf(str + ret, ", "); + } + + if (add) { + ret += sprintf(str + ret, "Add:%s%s", + (add & SLOT_FLAG) ? " slot":"", + (add & EP0_FLAG) ? " ep0":""); + add &= ~(SLOT_FLAG | EP0_FLAG); + for_each_set_bit(bit, &add, 32) + ret += sprintf(str + ret, " %d%s", + bit / 2, + bit % 2 ? "in":"out"); + } + return str; +} + +static inline const char *xhci_decode_slot_context(char *str, + u32 info, u32 info2, u32 tt_info, u32 state) +{ + u32 speed; + u32 hub; + u32 mtt; + int ret = 0; + + speed = info & DEV_SPEED; + hub = info & DEV_HUB; + mtt = info & DEV_MTT; + + ret = sprintf(str, "RS %05x %s%s%s Ctx Entries %d MEL %d us Port# %d/%d", + info & ROUTE_STRING_MASK, + ({ char *s; + switch (speed) { + case SLOT_SPEED_FS: + s = "full-speed"; + break; + case SLOT_SPEED_LS: + s = "low-speed"; + break; + case SLOT_SPEED_HS: + s = "high-speed"; + break; + case SLOT_SPEED_SS: + s = "super-speed"; + break; + case SLOT_SPEED_SSP: + s = "super-speed plus"; + break; + default: + s = "UNKNOWN speed"; + } s; }), + mtt ? " multi-TT" : "", + hub ? " Hub" : "", + (info & LAST_CTX_MASK) >> 27, + info2 & MAX_EXIT, + DEVINFO_TO_ROOT_HUB_PORT(info2), + DEVINFO_TO_MAX_PORTS(info2)); + + ret += sprintf(str + ret, " [TT Slot %d Port# %d TTT %d Intr %d] Addr %d State %s", + tt_info & TT_SLOT, (tt_info & TT_PORT) >> 8, + GET_TT_THINK_TIME(tt_info), GET_INTR_TARGET(tt_info), + state & DEV_ADDR_MASK, + xhci_slot_state_string(GET_SLOT_STATE(state))); + + return str; +} + + +static inline const char *xhci_portsc_link_state_string(u32 portsc) +{ + switch (portsc & PORT_PLS_MASK) { + case XDEV_U0: + return "U0"; + case XDEV_U1: + return "U1"; + case XDEV_U2: + return "U2"; + case XDEV_U3: + return "U3"; + case XDEV_DISABLED: + return "Disabled"; + case XDEV_RXDETECT: + return "RxDetect"; + case XDEV_INACTIVE: + return "Inactive"; + case XDEV_POLLING: + return "Polling"; + case XDEV_RECOVERY: + return "Recovery"; + case XDEV_HOT_RESET: + return "Hot Reset"; + case XDEV_COMP_MODE: + return "Compliance mode"; + case XDEV_TEST_MODE: + return "Test mode"; + case XDEV_RESUME: + return "Resume"; + default: + break; + } + return "Unknown"; +} + +static inline const char *xhci_decode_portsc(char *str, u32 portsc) +{ + int ret; + + ret = sprintf(str, "%s %s %s Link:%s PortSpeed:%d ", + portsc & PORT_POWER ? "Powered" : "Powered-off", + portsc & PORT_CONNECT ? "Connected" : "Not-connected", + portsc & PORT_PE ? "Enabled" : "Disabled", + xhci_portsc_link_state_string(portsc), + DEV_PORT_SPEED(portsc)); + + if (portsc & PORT_OC) + ret += sprintf(str + ret, "OverCurrent "); + if (portsc & PORT_RESET) + ret += sprintf(str + ret, "In-Reset "); + + ret += sprintf(str + ret, "Change: "); + if (portsc & PORT_CSC) + ret += sprintf(str + ret, "CSC "); + if (portsc & PORT_PEC) + ret += sprintf(str + ret, "PEC "); + if (portsc & PORT_WRC) + ret += sprintf(str + ret, "WRC "); + if (portsc & PORT_OCC) + ret += sprintf(str + ret, "OCC "); + if (portsc & PORT_RC) + ret += sprintf(str + ret, "PRC "); + if (portsc & PORT_PLC) + ret += sprintf(str + ret, "PLC "); + if (portsc & PORT_CEC) + ret += sprintf(str + ret, "CEC "); + if (portsc & PORT_CAS) + ret += sprintf(str + ret, "CAS "); + + ret += sprintf(str + ret, "Wake: "); + if (portsc & PORT_WKCONN_E) + ret += sprintf(str + ret, "WCE "); + if (portsc & PORT_WKDISC_E) + ret += sprintf(str + ret, "WDE "); + if (portsc & PORT_WKOC_E) + ret += sprintf(str + ret, "WOE "); + + return str; +} + +static inline const char *xhci_decode_usbsts(char *str, u32 usbsts) +{ + int ret = 0; + + if (usbsts == ~(u32)0) + return " 0xffffffff"; + if (usbsts & STS_HALT) + ret += sprintf(str + ret, " HCHalted"); + if (usbsts & STS_FATAL) + ret += sprintf(str + ret, " HSE"); + if (usbsts & STS_EINT) + ret += sprintf(str + ret, " EINT"); + if (usbsts & STS_PORT) + ret += sprintf(str + ret, " PCD"); + if (usbsts & STS_SAVE) + ret += sprintf(str + ret, " SSS"); + if (usbsts & STS_RESTORE) + ret += sprintf(str + ret, " RSS"); + if (usbsts & STS_SRE) + ret += sprintf(str + ret, " SRE"); + if (usbsts & STS_CNR) + ret += sprintf(str + ret, " CNR"); + if (usbsts & STS_HCE) + ret += sprintf(str + ret, " HCE"); + + return str; +} + +static inline const char *xhci_decode_doorbell(char *str, u32 slot, u32 doorbell) +{ + u8 ep; + u16 stream; + int ret; + + ep = (doorbell & 0xff); + stream = doorbell >> 16; + + if (slot == 0) { + sprintf(str, "Command Ring %d", doorbell); + return str; + } + ret = sprintf(str, "Slot %d ", slot); + if (ep > 0 && ep < 32) + ret = sprintf(str + ret, "ep%d%s", + ep / 2, + ep % 2 ? "in" : "out"); + else if (ep == 0 || ep < 248) + ret = sprintf(str + ret, "Reserved %d", ep); + else + ret = sprintf(str + ret, "Vendor Defined %d", ep); + if (stream) + ret = sprintf(str + ret, " Stream %d", stream); + + return str; +} + +static inline const char *xhci_ep_state_string(u8 state) +{ + switch (state) { + case EP_STATE_DISABLED: + return "disabled"; + case EP_STATE_RUNNING: + return "running"; + case EP_STATE_HALTED: + return "halted"; + case EP_STATE_STOPPED: + return "stopped"; + case EP_STATE_ERROR: + return "error"; + default: + return "INVALID"; + } +} + +static inline const char *xhci_ep_type_string(u8 type) +{ + switch (type) { + case ISOC_OUT_EP: + return "Isoc OUT"; + case BULK_OUT_EP: + return "Bulk OUT"; + case INT_OUT_EP: + return "Int OUT"; + case CTRL_EP: + return "Ctrl"; + case ISOC_IN_EP: + return "Isoc IN"; + case BULK_IN_EP: + return "Bulk IN"; + case INT_IN_EP: + return "Int IN"; + default: + return "INVALID"; + } +} + +static inline const char *xhci_decode_ep_context(char *str, u32 info, + u32 info2, u64 deq, u32 tx_info) +{ + int ret; + + u32 esit; + u16 maxp; + u16 avg; + + u8 max_pstr; + u8 ep_state; + u8 interval; + u8 ep_type; + u8 burst; + u8 cerr; + u8 mult; + + bool lsa; + bool hid; + + esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | + CTX_TO_MAX_ESIT_PAYLOAD(tx_info); + + ep_state = info & EP_STATE_MASK; + max_pstr = CTX_TO_EP_MAXPSTREAMS(info); + interval = CTX_TO_EP_INTERVAL(info); + mult = CTX_TO_EP_MULT(info) + 1; + lsa = !!(info & EP_HAS_LSA); + + cerr = (info2 & (3 << 1)) >> 1; + ep_type = CTX_TO_EP_TYPE(info2); + hid = !!(info2 & (1 << 7)); + burst = CTX_TO_MAX_BURST(info2); + maxp = MAX_PACKET_DECODED(info2); + + avg = EP_AVG_TRB_LENGTH(tx_info); + + ret = sprintf(str, "State %s mult %d max P. Streams %d %s", + xhci_ep_state_string(ep_state), mult, + max_pstr, lsa ? "LSA " : ""); + + ret += sprintf(str + ret, "interval %d us max ESIT payload %d CErr %d ", + (1 << interval) * 125, esit, cerr); + + ret += sprintf(str + ret, "Type %s %sburst %d maxp %d deq %016llx ", + xhci_ep_type_string(ep_type), hid ? "HID" : "", + burst, maxp, deq); + + ret += sprintf(str + ret, "avg trb len %d", avg); + + return str; +} + +#endif /* __LINUX_XHCI_HCD_H */ diff --git a/usb/usb_platform.c b/usb/usb_platform.c new file mode 100644 index 0000000..d0760f2 --- /dev/null +++ b/usb/usb_platform.c @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * Copyright 2024 The TenonOS Authors + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static __u32 usb_platform_count = USB_PLATFORM_ID; + +static int usb_platform_probe(struct pf_device *pfdev) +{ + int ret = -ENODEV; + + ret = dwc3_probe(pfdev); + if (ret) { + uk_pr_err("fail to probe dwc3\n"); + return ret; + } + + ret = xhci_plat_probe(pfdev); + if (ret) { + uk_pr_err("fail to probe xhci\n"); + return ret; + } + + return 0; +} + +static int usb_platform_init(struct uk_alloc *drv_allocator) +{ + return 0; +} + +static struct pf_device_id usb_platform_id = { + .device_id = USB_PLATFORM_ID +}; + +int usb_platform_add_dev(struct pf_device *pfdev) +{ + return 0; +} + +static const struct device_match_table usb_platform_match_table[] = { + { .compatible = "snps,dwc3", + .id = &usb_platform_id }, + { .compatible = "synopsys,dwc3", + .id = &usb_platform_id }, + {NULL} +}; + +static int usb_platform_match_compatible(const char *compatible) +{ + for (int i = 0; usb_platform_match_table[i].compatible != NULL; i++) + if (strcmp(usb_platform_match_table[i].compatible, compatible) == 0) + return usb_platform_count++; + + return -1; +} + +static struct pf_driver usb_platform_driver = { + .device_ids = &usb_platform_id, + .probe = usb_platform_probe, + .init = usb_platform_init, + .add_dev = usb_platform_add_dev, + .match = usb_platform_match_compatible +}; + +PF_REGISTER_DRIVER(&usb_platform_driver); \ No newline at end of file -- Gitee From 215375ef73a4eb62b96679069dbf252f37fd7b88 Mon Sep 17 00:00:00 2001 From: caicheng Date: Wed, 15 Jan 2025 11:06:55 +0800 Subject: [PATCH 2/5] modify dwc3_driver --- usb/dwc3/core.c | 957 ++++++++++++++++++++++++++------------------- usb/dwc3/core.h | 636 ++++++++++++++++-------------- usb/dwc3/host.c | 24 +- usb/dwc3/io.h | 23 +- usb/usb_platform.c | 3 +- 5 files changed, 923 insertions(+), 720 deletions(-) diff --git a/usb/dwc3/core.c b/usb/dwc3/core.c index 6c5d2bb..01b24ca 100644 --- a/usb/dwc3/core.c +++ b/usb/dwc3/core.c @@ -8,46 +8,124 @@ * Sebastian Andrzej Siewior */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include #include "core.h" -#include "gadget.h" +// #include "gadget.h" #include "io.h" -#include "debug.h" +// #include "debug.h" +#include +#include +#include +#include +#include +#include +#include +#include + +// #define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */ + +static const char *const speed_names[] = { + [USB_SPEED_UNKNOWN] = "UNKNOWN", + [USB_SPEED_LOW] = "low-speed", + [USB_SPEED_FULL] = "full-speed", + [USB_SPEED_HIGH] = "high-speed", + [USB_SPEED_WIRELESS] = "wireless", + [USB_SPEED_SUPER] = "super-speed", + [USB_SPEED_SUPER_PLUS] = "super-speed-plus", +}; + +static const char *const usb_dr_modes[] = { + [USB_DR_MODE_UNKNOWN] = "", + [USB_DR_MODE_HOST] = "host", + [USB_DR_MODE_PERIPHERAL] = "peripheral", + [USB_DR_MODE_OTG] = "otg", +}; + +static void udelay(u8 usec) +{ + u64 count = 0; + u64 limit = usec * 100; + while (count < limit) { + count++; + } +} + +static void msleep(u8 msec) +{ + u64 count = 0; + u64 limit = msec * 100000; + while (count < limit) { + count++; + } +} -#define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */ +static enum usb_device_speed usb_get_maximum_speed(const void *fdt, int start_offset) +{ + const char *maximum_speed = NULL; + int i; + + fdt_getprop_string_by_offset(fdt, start_offset, "maximum-speed", &maximum_speed); + if (!maximum_speed) + return USB_SPEED_UNKNOWN; + + for (i = 0; i < (int)ARRAY_SIZE(speed_names); i++) { + if (speed_names[i] && strcmp(speed_names[i], maximum_speed) == 0) { + return i; + } + } + + return USB_SPEED_UNKNOWN; +} + +static enum usb_dr_mode usb_get_dr_mode(const void *fdt, int start_offset) +{ + const char *dr_mode = NULL; + int i; + + fdt_getprop_string_by_offset(fdt, start_offset, "dr_mode", &dr_mode); + if (!dr_mode) + return USB_DR_MODE_UNKNOWN; + + for (i = 0; i < (int)ARRAY_SIZE(usb_dr_modes); i++) { + if (usb_dr_modes[i] && strcmp(usb_dr_modes[i], dr_mode) == 0) { + return i; + } + } + + return USB_DR_MODE_UNKNOWN; +} /** * dwc3_get_dr_mode - Validates and sets dr_mode * @dwc: pointer to our context structure */ -static int dwc3_get_dr_mode(struct dwc3 *dwc) -{ +static int dwc3_get_dr_mode(struct dwc3 *dwc) { enum usb_dr_mode mode; - struct device *dev = dwc->dev; + // struct device *dev = dwc->dev; unsigned int hw_mode; if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) @@ -58,43 +136,45 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc) switch (hw_mode) { case DWC3_GHWPARAMS0_MODE_GADGET: - if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) { - dev_err(dev, - "Controller does not support host mode.\n"); - return -EINVAL; - } + // if (CONFIG_USB_DWC3_HOST) { + uk_pr_err("Controller does not support host mode.\n"); + return -EINVAL; + // } mode = USB_DR_MODE_PERIPHERAL; break; case DWC3_GHWPARAMS0_MODE_HOST: - if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) { - dev_err(dev, - "Controller does not support device mode.\n"); - return -EINVAL; - } + // if (CONFIG_USB_DWC3_GADGET) { + // uk_pr_err("Controller does not support device mode.\n"); + // return -EINVAL; + // } mode = USB_DR_MODE_HOST; break; default: - if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) +#if 0 + if (CONFIG_USB_DWC3_HOST) mode = USB_DR_MODE_HOST; - else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) + else if (CONFIG_USB_DWC3_GADGET) mode = USB_DR_MODE_PERIPHERAL; /* - * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG - * mode. If the controller supports DRD but the dr_mode is not - * specified or set to OTG, then set the mode to peripheral. - */ + * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG + * mode. If the controller supports DRD but the dr_mode is not + * specified or set to OTG, then set the mode to peripheral. + */ if (mode == USB_DR_MODE_OTG && - (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) || - !device_property_read_bool(dwc->dev, "usb-role-switch")) && - !DWC3_VER_IS_PRIOR(DWC3, 330A)) - mode = USB_DR_MODE_PERIPHERAL; + (!CONFIG_USB_ROLE_SWITCH || + !fdt_prop_read_bool(fdt, start_offset, "usb-role-switch")) && + !DWC3_VER_IS_PRIOR(DWC3, 330A)) + mode = USB_DR_MODE_PERIPHERAL; +#endif + mode = USB_DR_MODE_HOST; + // uk_pr_err("Controller does not support OTG mode.\n"); + // return -EINVAL; } if (mode != dwc->dr_mode) { - dev_warn(dev, - "Configuration mismatch. dr_mode forced to %s\n", - mode == USB_DR_MODE_HOST ? "host" : "gadget"); + uk_pr_warn("Configuration mismatch. dr_mode forced to %s\n", + mode == USB_DR_MODE_HOST ? "host" : "gadget"); dwc->dr_mode = mode; } @@ -116,6 +196,7 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) static int dwc3_core_soft_reset(struct dwc3 *dwc); +#if 0 static void __dwc3_set_mode(struct work_struct *work) { struct dwc3 *dwc = work_to_dwc(work); @@ -228,6 +309,7 @@ out: mutex_unlock(&dwc->mutex); } + void dwc3_set_mode(struct dwc3 *dwc, u32 mode) { unsigned long flags; @@ -255,6 +337,7 @@ u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg); } +#endif /** * dwc3_core_soft_reset - Issues core soft reset and PHY reset @@ -335,6 +418,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc) } } +#if 0 /** * dwc3_free_one_event_buffer - Frees one event buffer * @dwc: Pointer to our controller context structure @@ -533,6 +617,7 @@ static void dwc3_free_scratch_buffers(struct dwc3 *dwc) DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL); kfree(dwc->scratchbuf); } +#endif static void dwc3_core_num_eps(struct dwc3 *dwc) { @@ -559,6 +644,7 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc) parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9); } +#if 0 static int dwc3_core_ulpi_init(struct dwc3 *dwc) { int intf; @@ -574,6 +660,7 @@ static int dwc3_core_ulpi_init(struct dwc3 *dwc) return ret; } +#endif /** * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core @@ -665,7 +752,8 @@ static int dwc3_phy_setup(struct dwc3 *dwc) if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI)) break; } - fallthrough; + // fallthrough; + __fallthrough; case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: default: break; @@ -721,6 +809,7 @@ static int dwc3_phy_setup(struct dwc3 *dwc) return 0; } +#if 0 static void dwc3_core_exit(struct dwc3 *dwc) { dwc3_event_buffers_cleanup(dwc); @@ -737,6 +826,7 @@ static void dwc3_core_exit(struct dwc3 *dwc) clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks); reset_control_assert(dwc->reset); } +#endif static bool dwc3_core_is_valid(struct dwc3 *dwc) { @@ -804,14 +894,13 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc) /* check if current dwc3 is on simulation board */ if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { - dev_info(dwc->dev, "Running with FPGA optimizations\n"); - dwc->is_fpga = true; + uk_pr_info("Running with FPGA optimizations\n"); + dwc->is_fpga = true; } - WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga, - "disable_scramble cannot be used on non-FPGA builds\n"); + uk_pr_warn("disable_scramble cannot be used on non-FPGA builds\n"); - if (dwc->disable_scramble_quirk && dwc->is_fpga) + if (dwc->disable_scramble_quirk && dwc->is_fpga) reg |= DWC3_GCTL_DISSCRAMBLE; else reg &= ~DWC3_GCTL_DISSCRAMBLE; @@ -831,6 +920,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_GCTL, reg); } +#if 0 static int dwc3_core_get_phy(struct dwc3 *dwc); static int dwc3_core_ulpi_init(struct dwc3 *dwc); @@ -927,6 +1017,7 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg); } +#endif /** * dwc3_core_init - Low-level initialization of DWC3 Core @@ -942,16 +1033,17 @@ static int dwc3_core_init(struct dwc3 *dwc) hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); - /* - * Write Linux Version Code to our GUID register so it's easy to figure - * out which kernel version a bug was found. - */ - dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); + // /* + // * Write Linux Version Code to our GUID register so it's easy to figure + // * out which kernel version a bug was found. + // */ + // dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); ret = dwc3_phy_setup(dwc); if (ret) goto err0; - +/* phy sub-system not supported yet */ +#if 0 if (!dwc->ulpi_ready) { ret = dwc3_core_ulpi_init(dwc); if (ret) @@ -977,59 +1069,64 @@ static int dwc3_core_init(struct dwc3 *dwc) phy_exit(dwc->usb2_generic_phy); goto err0a; } +#endif ret = dwc3_core_soft_reset(dwc); if (ret) - goto err1; + // goto err1; + return -1; if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && - !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) { + !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) { if (!dwc->dis_u3_susphy_quirk) { - reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); - reg |= DWC3_GUSB3PIPECTL_SUSPHY; - dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); + reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); + reg |= DWC3_GUSB3PIPECTL_SUSPHY; + dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); } if (!dwc->dis_u2_susphy_quirk) { - reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); - reg |= DWC3_GUSB2PHYCFG_SUSPHY; - dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + reg |= DWC3_GUSB2PHYCFG_SUSPHY; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); } } dwc3_core_setup_global_control(dwc); dwc3_core_num_eps(dwc); - ret = dwc3_setup_scratch_buffers(dwc); - if (ret) - goto err1; + // ret = dwc3_setup_scratch_buffers(dwc); + // if (ret) + // goto err1; /* Adjust Frame Length */ dwc3_frame_length_adjustment(dwc); - dwc3_set_incr_burst_type(dwc); + /* burst mode not used in D9 */ + #if 0 + dwc3_set_incr_burst_type(dwc); - usb_phy_set_suspend(dwc->usb2_phy, 0); - usb_phy_set_suspend(dwc->usb3_phy, 0); - ret = phy_power_on(dwc->usb2_generic_phy); - if (ret < 0) - goto err2; + usb_phy_set_suspend(dwc->usb2_phy, 0); + usb_phy_set_suspend(dwc->usb3_phy, 0); + ret = phy_power_on(dwc->usb2_generic_phy); + if (ret < 0) + goto err2; - ret = phy_power_on(dwc->usb3_generic_phy); - if (ret < 0) - goto err3; + ret = phy_power_on(dwc->usb3_generic_phy); + if (ret < 0) + goto err3; - ret = dwc3_event_buffers_setup(dwc); - if (ret) { - dev_err(dwc->dev, "failed to setup event buffers\n"); - goto err4; - } + ret = dwc3_event_buffers_setup(dwc); + if (ret) { + dev_err(dwc->dev, "failed to setup event buffers\n"); + goto err4; + } + #endif /* - * ENDXFER polling is available on version 3.10a and later of - * the DWC_usb3 controller. It is NOT available in the - * DWC_usb31 controller. - */ + * ENDXFER polling is available on version 3.10a and later of + * the DWC_usb3 controller. It is NOT available in the + * DWC_usb31 controller. + */ if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) { reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); reg |= DWC3_GUCTL2_RST_ACTBITLATER; @@ -1040,50 +1137,49 @@ static int dwc3_core_init(struct dwc3 *dwc) reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); /* - * Enable hardware control of sending remote wakeup - * in HS when the device is in the L1 state. - */ + * Enable hardware control of sending remote wakeup + * in HS when the device is in the L1 state. + */ if (!DWC3_VER_IS_PRIOR(DWC3, 290A)) - reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW; + reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW; /* - * Decouple USB 2.0 L1 & L2 events which will allow for - * gadget driver to only receive U3/L2 suspend & wakeup - * events and prevent the more frequent L1 LPM transitions - * from interrupting the driver. - */ + * Decouple USB 2.0 L1 & L2 events which will allow for + * gadget driver to only receive U3/L2 suspend & wakeup + * events and prevent the more frequent L1 LPM transitions + * from interrupting the driver. + */ if (!DWC3_VER_IS_PRIOR(DWC3, 300A)) - reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT; + reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT; if (dwc->dis_tx_ipgap_linecheck_quirk) - reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; + reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; if (dwc->parkmode_disable_ss_quirk) - reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; + reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); } - if (dwc->dr_mode == USB_DR_MODE_HOST || - dwc->dr_mode == USB_DR_MODE_OTG) { + if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->dr_mode == USB_DR_MODE_OTG) { reg = dwc3_readl(dwc->regs, DWC3_GUCTL); /* - * Enable Auto retry Feature to make the controller operating in - * Host mode on seeing transaction errors(CRC errors or internal - * overrun scenerios) on IN transfers to reply to the device - * with a non-terminating retry ACK (i.e, an ACK transcation - * packet with Retry=1 & Nump != 0) - */ + * Enable Auto retry Feature to make the controller operating in + * Host mode on seeing transaction errors(CRC errors or internal + * overrun scenerios) on IN transfers to reply to the device + * with a non-terminating retry ACK (i.e, an ACK transcation + * packet with Retry=1 & Nump != 0) + */ reg |= DWC3_GUCTL_HSTINAUTORETRY; dwc3_writel(dwc->regs, DWC3_GUCTL, reg); } /* - * Must config both number of packets and max burst settings to enable - * RX and/or TX threshold. - */ + * Must config both number of packets and max burst settings to enable + * RX and/or TX threshold. + */ if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) { u8 rx_thr_num = dwc->rx_thr_num_pkt_prd; u8 rx_maxburst = dwc->rx_max_burst_prd; @@ -1091,57 +1187,58 @@ static int dwc3_core_init(struct dwc3 *dwc) u8 tx_maxburst = dwc->tx_max_burst_prd; if (rx_thr_num && rx_maxburst) { - reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); - reg |= DWC31_RXTHRNUMPKTSEL_PRD; + reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); + reg |= DWC31_RXTHRNUMPKTSEL_PRD; - reg &= ~DWC31_RXTHRNUMPKT_PRD(~0); - reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num); + reg &= ~DWC31_RXTHRNUMPKT_PRD(~0); + reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num); - reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0); - reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst); + reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0); + reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst); - dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); + dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); } if (tx_thr_num && tx_maxburst) { - reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); - reg |= DWC31_TXTHRNUMPKTSEL_PRD; + reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); + reg |= DWC31_TXTHRNUMPKTSEL_PRD; - reg &= ~DWC31_TXTHRNUMPKT_PRD(~0); - reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num); + reg &= ~DWC31_TXTHRNUMPKT_PRD(~0); + reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num); - reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0); - reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst); + reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0); + reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst); - dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); + dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); } } return 0; -err4: - phy_power_off(dwc->usb3_generic_phy); + // err4: + // phy_power_off(dwc->usb3_generic_phy); -err3: - phy_power_off(dwc->usb2_generic_phy); + // err3: + // phy_power_off(dwc->usb2_generic_phy); -err2: - usb_phy_set_suspend(dwc->usb2_phy, 1); - usb_phy_set_suspend(dwc->usb3_phy, 1); + // err2: + // usb_phy_set_suspend(dwc->usb2_phy, 1); + // usb_phy_set_suspend(dwc->usb3_phy, 1); -err1: - usb_phy_shutdown(dwc->usb2_phy); - usb_phy_shutdown(dwc->usb3_phy); - phy_exit(dwc->usb2_generic_phy); - phy_exit(dwc->usb3_generic_phy); + // err1: + // usb_phy_shutdown(dwc->usb2_phy); + // usb_phy_shutdown(dwc->usb3_phy); + // phy_exit(dwc->usb2_generic_phy); + // phy_exit(dwc->usb3_generic_phy); -err0a: - dwc3_ulpi_exit(dwc); + // err0a: + // dwc3_ulpi_exit(dwc); -err0: - return ret; + err0: + return ret; } +#if 0 static int dwc3_core_get_phy(struct dwc3 *dwc) { struct device *dev = dwc->dev; @@ -1196,14 +1293,16 @@ static int dwc3_core_get_phy(struct dwc3 *dwc) return 0; } +#endif static int dwc3_core_init_mode(struct dwc3 *dwc) { - struct device *dev = dwc->dev; + // struct device *dev = dwc->dev; int ret; switch (dwc->dr_mode) { case USB_DR_MODE_PERIPHERAL: +#if 0 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); if (dwc->usb2_phy) @@ -1214,45 +1313,55 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) ret = dwc3_gadget_init(dwc); if (ret) return dev_err_probe(dev, ret, "failed to initialize gadget\n"); +#endif + uk_pr_err("gadget not supported\n"); + ret = -1; + return ret; break; case USB_DR_MODE_HOST: dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); - +#if 0 if (dwc->usb2_phy) otg_set_vbus(dwc->usb2_phy->otg, true); phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST); phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST); - +#endif ret = dwc3_host_init(dwc); if (ret) - return dev_err_probe(dev, ret, "failed to initialize host\n"); + uk_pr_err("failed to initialize host\n"); break; case USB_DR_MODE_OTG: +#if 0 INIT_WORK(&dwc->drd_work, __dwc3_set_mode); ret = dwc3_drd_init(dwc); if (ret) return dev_err_probe(dev, ret, "failed to initialize dual-role\n"); +#endif + uk_pr_err("dual-role not supported\n"); + ret = -1; + return ret; break; default: - dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode); + uk_pr_err("Unsupported mode of operation %d\n", dwc->dr_mode); return -EINVAL; } return 0; } +#if 0 static void dwc3_core_exit_mode(struct dwc3 *dwc) { switch (dwc->dr_mode) { - case USB_DR_MODE_PERIPHERAL: - dwc3_gadget_exit(dwc); - break; + // case USB_DR_MODE_PERIPHERAL: + // dwc3_gadget_exit(dwc); + // break; case USB_DR_MODE_HOST: dwc3_host_exit(dwc); break; - case USB_DR_MODE_OTG: - dwc3_drd_exit(dwc); - break; + // case USB_DR_MODE_OTG: + // dwc3_drd_exit(dwc); + // break; default: /* do nothing */ break; @@ -1261,20 +1370,22 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc) /* de-assert DRVVBUS for HOST and OTG mode */ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); } +#endif -static void dwc3_get_properties(struct dwc3 *dwc) -{ - struct device *dev = dwc->dev; - u8 lpm_nyet_threshold; - u8 tx_de_emphasis; - u8 hird_threshold; - u8 rx_thr_num_pkt_prd; - u8 rx_max_burst_prd; - u8 tx_thr_num_pkt_prd; - u8 tx_max_burst_prd; - u8 tx_fifo_resize_max_num; - const char *usb_psy_name; - int ret; +static int dwc3_get_properties(const void *fdt, int start_offset, + struct dwc3 *dwc) { + // struct device *dev = dwc->dev; + u8 lpm_nyet_threshold; + u8 tx_de_emphasis; + u8 hird_threshold; + u8 rx_thr_num_pkt_prd; + u8 rx_max_burst_prd; + u8 tx_thr_num_pkt_prd; + u8 tx_max_burst_prd; + u8 tx_fifo_resize_max_num; + // u32 maximum_speed; + const char *usb_psy_name; + int ret; /* default to highest possible threshold */ lpm_nyet_threshold = 0xf; @@ -1283,118 +1394,113 @@ static void dwc3_get_properties(struct dwc3 *dwc) tx_de_emphasis = 1; /* - * default to assert utmi_sleep_n and use maximum allowed HIRD - * threshold value of 0b1100 - */ + * default to assert utmi_sleep_n and use maximum allowed HIRD + * threshold value of 0b1100 + */ hird_threshold = 12; /* - * default to a TXFIFO size large enough to fit 6 max packets. This - * allows for systems with larger bus latencies to have some headroom - * for endpoints that have a large bMaxBurst value. - */ + * default to a TXFIFO size large enough to fit 6 max packets. This + * allows for systems with larger bus latencies to have some headroom + * for endpoints that have a large bMaxBurst value. + */ tx_fifo_resize_max_num = 6; - dwc->maximum_speed = usb_get_maximum_speed(dev); - dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev); - dwc->dr_mode = usb_get_dr_mode(dev); - dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); + dwc->maximum_speed = usb_get_maximum_speed(fdt, start_offset); + // dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev); + dwc->max_ssp_rate = USB_SSP_GEN_UNKNOWN; + dwc->dr_mode = usb_get_dr_mode(fdt, start_offset); - dwc->sysdev_is_parent = device_property_read_bool(dev, - "linux,sysdev_is_parent"); - if (dwc->sysdev_is_parent) - dwc->sysdev = dwc->dev->parent; - else - dwc->sysdev = dwc->dev; + // dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); + dwc->hsphy_mode = USBPHY_INTERFACE_MODE_UTMIW; + + // dwc->sysdev_is_parent = device_property_read_bool(fdt, start_offset, + // "linux,sysdev_is_parent"); + // if (dwc->sysdev_is_parent) + // dwc->sysdev = dwc->dev->parent; + // else + // dwc->sysdev = dwc->dev; - ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name); + ret = fdt_getprop_string_by_offset(fdt, start_offset, "usb-psy-name", &usb_psy_name); if (ret >= 0) { - dwc->usb_psy = power_supply_get_by_name(usb_psy_name); - if (!dwc->usb_psy) - dev_err(dev, "couldn't get usb power supply\n"); + // dwc->usb_psy = power_supply_get_by_name(usb_psy_name); + // if (!dwc->usb_psy) + // uk_pr_err("couldn't get usb power supply\n"); + uk_pr_info("usb power supply not supported\n"); } - dwc->has_lpm_erratum = device_property_read_bool(dev, - "snps,has-lpm-erratum"); - device_property_read_u8(dev, "snps,lpm-nyet-threshold", - &lpm_nyet_threshold); - dwc->is_utmi_l1_suspend = device_property_read_bool(dev, - "snps,is-utmi-l1-suspend"); - device_property_read_u8(dev, "snps,hird-threshold", - &hird_threshold); - dwc->dis_start_transfer_quirk = device_property_read_bool(dev, - "snps,dis-start-transfer-quirk"); - dwc->usb3_lpm_capable = device_property_read_bool(dev, - "snps,usb3_lpm_capable"); - dwc->usb2_lpm_disable = device_property_read_bool(dev, - "snps,usb2-lpm-disable"); - dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev, - "snps,usb2-gadget-lpm-disable"); - device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd", - &rx_thr_num_pkt_prd); - device_property_read_u8(dev, "snps,rx-max-burst-prd", - &rx_max_burst_prd); - device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd", - &tx_thr_num_pkt_prd); - device_property_read_u8(dev, "snps,tx-max-burst-prd", - &tx_max_burst_prd); - dwc->do_fifo_resize = device_property_read_bool(dev, - "tx-fifo-resize"); + dwc->has_lpm_erratum = fdt_prop_read_bool(fdt, start_offset, "snps,has-lpm-erratum"); + fdt_getprop_u8_by_offset(fdt, start_offset, "snps,lpm-nyet-threshold", &lpm_nyet_threshold); + dwc->is_utmi_l1_suspend = + fdt_prop_read_bool(fdt, start_offset, "snps,is-utmi-l1-suspend"); + fdt_getprop_u8_by_offset(fdt, start_offset, "snps,hird-threshold", &hird_threshold); + dwc->dis_start_transfer_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,dis-start-transfer-quirk"); + dwc->usb3_lpm_capable = + fdt_prop_read_bool(fdt, start_offset, "snps,usb3_lpm_capable"); + dwc->usb2_lpm_disable = + fdt_prop_read_bool(fdt, start_offset, "snps,usb2-lpm-disable"); + dwc->usb2_gadget_lpm_disable = + fdt_prop_read_bool(fdt, start_offset, "snps,usb2-gadget-lpm-disable"); + fdt_getprop_u8_by_offset(fdt, start_offset, "snps,rx-thr-num-pkt-prd", &rx_thr_num_pkt_prd); + fdt_getprop_u8_by_offset(fdt, start_offset, "snps,rx-max-burst-prd", &rx_max_burst_prd); + fdt_getprop_u8_by_offset(fdt, start_offset, "snps,tx-thr-num-pkt-prd", &tx_thr_num_pkt_prd); + fdt_getprop_u8_by_offset(fdt, start_offset, "snps,tx-max-burst-prd", &tx_max_burst_prd); + dwc->do_fifo_resize = fdt_prop_read_bool(fdt, start_offset, "tx-fifo-resize"); if (dwc->do_fifo_resize) - device_property_read_u8(dev, "tx-fifo-max-num", - &tx_fifo_resize_max_num); - - dwc->disable_scramble_quirk = device_property_read_bool(dev, - "snps,disable_scramble_quirk"); - dwc->u2exit_lfps_quirk = device_property_read_bool(dev, - "snps,u2exit_lfps_quirk"); - dwc->u2ss_inp3_quirk = device_property_read_bool(dev, - "snps,u2ss_inp3_quirk"); - dwc->req_p1p2p3_quirk = device_property_read_bool(dev, - "snps,req_p1p2p3_quirk"); - dwc->del_p1p2p3_quirk = device_property_read_bool(dev, - "snps,del_p1p2p3_quirk"); - dwc->del_phy_power_chg_quirk = device_property_read_bool(dev, - "snps,del_phy_power_chg_quirk"); - dwc->lfps_filter_quirk = device_property_read_bool(dev, - "snps,lfps_filter_quirk"); - dwc->rx_detect_poll_quirk = device_property_read_bool(dev, - "snps,rx_detect_poll_quirk"); - dwc->dis_u3_susphy_quirk = device_property_read_bool(dev, - "snps,dis_u3_susphy_quirk"); - dwc->dis_u2_susphy_quirk = device_property_read_bool(dev, - "snps,dis_u2_susphy_quirk"); - dwc->dis_enblslpm_quirk = device_property_read_bool(dev, - "snps,dis_enblslpm_quirk"); - dwc->dis_u1_entry_quirk = device_property_read_bool(dev, - "snps,dis-u1-entry-quirk"); - dwc->dis_u2_entry_quirk = device_property_read_bool(dev, - "snps,dis-u2-entry-quirk"); - dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev, - "snps,dis_rxdet_inp3_quirk"); - dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev, - "snps,dis-u2-freeclk-exists-quirk"); - dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev, - "snps,dis-del-phy-power-chg-quirk"); - dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, - "snps,dis-tx-ipgap-linecheck-quirk"); - dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, - "snps,parkmode-disable-ss-quirk"); - - dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, - "snps,tx_de_emphasis_quirk"); - device_property_read_u8(dev, "snps,tx_de_emphasis", - &tx_de_emphasis); - device_property_read_string(dev, "snps,hsphy_interface", - &dwc->hsphy_interface); - device_property_read_u32(dev, "snps,quirk-frame-length-adjustment", - &dwc->fladj); - - dwc->dis_metastability_quirk = device_property_read_bool(dev, - "snps,dis_metastability_quirk"); - - dwc->dis_split_quirk = device_property_read_bool(dev, - "snps,dis-split-quirk"); + fdt_getprop_u8_by_offset(fdt, start_offset, "tx-fifo-max-num", &tx_fifo_resize_max_num); + + dwc->disable_scramble_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,disable_scramble_quirk"); + dwc->u2exit_lfps_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,u2exit_lfps_quirk"); + dwc->u2ss_inp3_quirk = fdt_prop_read_bool(fdt, start_offset, "snps,u2ss_inp3_quirk"); + dwc->req_p1p2p3_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,req_p1p2p3_quirk"); + dwc->del_p1p2p3_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,del_p1p2p3_quirk"); + dwc->del_phy_power_chg_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,del_phy_power_chg_quirk"); + dwc->lfps_filter_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,lfps_filter_quirk"); + dwc->rx_detect_poll_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,rx_detect_poll_quirk"); + dwc->dis_u3_susphy_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,dis_u3_susphy_quirk"); + + dwc->dis_u3_susphy_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,dis_u3_susphy_quirk"); + dwc->dis_u2_susphy_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,dis_u2_susphy_quirk"); + dwc->dis_enblslpm_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,dis_enblslpm_quirk"); + dwc->dis_u1_entry_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,dis-u1-entry-quirk"); + dwc->dis_u2_entry_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,dis-u2-entry-quirk"); + dwc->dis_rxdet_inp3_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,dis_rxdet_inp3_quirk"); + dwc->dis_u2_freeclk_exists_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,dis-u2-freeclk-exists-quirk"); + dwc->dis_del_phy_power_chg_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,dis-del-phy-power-chg-quirk"); + dwc->dis_tx_ipgap_linecheck_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,dis-tx-ipgap-linecheck-quirk"); + dwc->parkmode_disable_ss_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,parkmode-disable-ss-quirk"); + dwc->tx_de_emphasis_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,tx_de_emphasis_quirk"); + fdt_getprop_u8_by_offset(fdt, start_offset, "snps,tx_de_emphasis", &tx_de_emphasis); + fdt_getprop_string_by_offset(fdt, start_offset, "snps,hsphy_interface", + &dwc->hsphy_interface); + fdt_getprop_u32_by_offset(fdt, start_offset, "snps,quirk-frame-length-adjustment", + &dwc->fladj); + + dwc->dis_metastability_quirk = + fdt_prop_read_bool(fdt, start_offset, "snps,dis_metastability_quirk"); + + dwc->dis_split_quirk = fdt_prop_read_bool(fdt, start_offset, "snps,dis-split-quirk"); + dwc->dis_split_quirk = fdt_prop_read_bool(fdt, start_offset, "snps,dis-split-quirk"); dwc->lpm_nyet_threshold = lpm_nyet_threshold; dwc->tx_de_emphasis = tx_de_emphasis; @@ -1422,14 +1528,14 @@ bool dwc3_has_imod(struct dwc3 *dwc) static void dwc3_check_params(struct dwc3 *dwc) { - struct device *dev = dwc->dev; + // struct device *dev = dwc->dev; unsigned int hwparam_gen = DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); /* Check for proper value of imod_interval */ if (dwc->imod_interval && !dwc3_has_imod(dwc)) { - dev_warn(dwc->dev, "Interrupt moderation not supported\n"); - dwc->imod_interval = 0; + uk_pr_warn("Interrupt moderation not supported\n"); + dwc->imod_interval = 0; } /* @@ -1451,19 +1557,20 @@ static void dwc3_check_params(struct dwc3 *dwc) break; case USB_SPEED_SUPER: if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) - dev_warn(dev, "UDC doesn't support Gen 1\n"); + uk_pr_warn("UDC doesn't support Gen 1\n"); break; case USB_SPEED_SUPER_PLUS: if ((DWC3_IP_IS(DWC32) && hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) || (!DWC3_IP_IS(DWC32) && hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) - dev_warn(dev, "UDC doesn't support SSP\n"); + uk_pr_warn("UDC doesn't support SSP\n"); break; default: - dev_err(dev, "invalid maximum_speed parameter %d\n", + uk_pr_err("invalid maximum_speed parameter %d\n", dwc->maximum_speed); - fallthrough; + // fallthrough; + __fallthrough; case USB_SPEED_UNKNOWN: switch (hwparam_gen) { case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: @@ -1496,12 +1603,12 @@ static void dwc3_check_params(struct dwc3 *dwc) switch (dwc->max_ssp_rate) { case USB_SSP_GEN_2x1: if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1) - dev_warn(dev, "UDC only supports Gen 1\n"); + uk_pr_warn("UDC only supports Gen 1\n"); break; case USB_SSP_GEN_1x2: case USB_SSP_GEN_2x2: if (DWC3_IP_IS(DWC31)) - dev_warn(dev, "UDC only supports single lane\n"); + uk_pr_warn("UDC only supports single lane\n"); break; case USB_SSP_GEN_UNKNOWN: default: @@ -1522,191 +1629,220 @@ static void dwc3_check_params(struct dwc3 *dwc) } } -static void dwc3_resume_quirk(struct dwc3 *dwc); - -static void dwc3_set_ncr(struct dwc3 *dwc) +// static void dwc3_resume_quirk(struct dwc3 *dwc); +static void dwc3_set_ncr(const void *fdt, int start_offset, + struct dwc3 *dwc) { - if (device_property_read_bool(dwc->dev, "SOC_Thang")) { + if (fdt_prop_read_bool(fdt, start_offset, "SOC_Thang")) { dwc->soc_taihang = true; - if (device_property_read_bool(dwc->dev, "usb-role-switch")) - sd_otg_enable(dwc); + // if (device_property_read_bool(dwc->dev, "usb-role-switch")) + // sd_otg_enable(dwc); } - dwc3_resume_quirk(dwc); + // dwc3_resume_quirk(dwc); } -static int dwc3_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct resource *res, dwc_res; - struct dwc3_vendor *vdwc; - struct dwc3 *dwc; - - int ret; - - void __iomem *regs; +int dwc3_probe(struct pf_device *pdev) { + // struct device *dev = &pdev->dev; + // struct resource *res, dwc_res; + // struct dwc3_vendor *vdwc; + // struct device *dev; + struct dwc3 *dwc; + int ret, offs; + // void __iomem *regs; + void *dtb; + struct uk_intctlr_irq irq; + __u64 reg_base, reg_size; + + dtb = (void *)ukplat_bootinfo_get()->dtb; + offs = pdev->fdt_offset; + + /* dwc3 offset should already be caught */ + if (unlikely(offs < 0)) + return -EINVAL; - vdwc = devm_kzalloc(dev, sizeof(*vdwc), GFP_KERNEL); - if (!vdwc) + // vdwc = devm_kzalloc(dev, sizeof(*vdwc), GFP_KERNEL); + dwc = (struct dwc3 *)uk_calloc(uk_alloc_get_default(), 1, sizeof(*dwc)); + if (!dwc) return -ENOMEM; - dwc = &vdwc->dwc; - - dwc->dev = dev; + // dwc = &vdwc->dwc; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "missing memory resource\n"); - return -ENODEV; - } + // dwc->dev = dev; - dwc->xhci_resources[0].start = res->start; - dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + - DWC3_XHCI_REGS_END; - dwc->xhci_resources[0].flags = res->flags; - dwc->xhci_resources[0].name = res->name; - - /* - * Request memory region but exclude xHCI regs, - * since it will be requested by the xhci-plat driver. - */ - dwc_res = *res; - dwc_res.start += DWC3_GLOBALS_REGS_START; - - regs = devm_ioremap_resource(dev, &dwc_res); - if (IS_ERR(regs)) - return PTR_ERR(regs); + ret = uk_intctlr_irq_fdt_xlat(dtb, offs, 0, &irq); + if (unlikely(ret < 0)) + return -EINVAL; - dwc->regs = regs; - dwc->regs_size = resource_size(&dwc_res); + uk_intctlr_irq_set_trigger(&irq); - dwc3_get_properties(dwc); + fdt_get_address(dtb, offs, 0, ®_base, ®_size); - dwc->reset = devm_reset_control_array_get_optional_shared(dev); - if (IS_ERR(dwc->reset)) - return PTR_ERR(dwc->reset); + pdev->base = reg_base; + pdev->size = reg_size; + pdev->irq = irq.id; - if (dev->of_node) { - ret = devm_clk_bulk_get_all(dev, &dwc->clks); - if (ret == -EPROBE_DEFER) - return ret; - /* - * Clocks are optional, but new DT platforms should support all - * clocks as required by the DT-binding. - */ - if (ret < 0) - dwc->num_clks = 0; - else - dwc->num_clks = ret; + // res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + // if (!res) { + // uk_pr_err(dev, "missing memory resource\n"); + // return -ENODEV; + // } - } + // dwc->xhci_resources[0].start = res->start; + // dwc->xhci_resources[0].end = + // dwc->xhci_resources[0].start + DWC3_XHCI_REGS_END; + // dwc->xhci_resources[0].flags = res->flags; + // dwc->xhci_resources[0].name = res->name; - ret = reset_control_deassert(dwc->reset); - if (ret) - return ret; - - ret = clk_bulk_prepare_enable(dwc->num_clks, dwc->clks); - if (ret) - goto assert_reset; + /* + * Request memory region but exclude xHCI regs, + * since it will be requested by the xhci-plat driver. + */ + // dwc_res = *res; + // dwc_res.start += DWC3_GLOBALS_REGS_START; + + // regs = devm_ioremap_resource(dev, &dwc_res); + dwc->regs = reg_base + DWC3_GLOBALS_REGS_START; + dwc->regs_size = reg_size; + // if (IS_ERR(regs)) + // return PTR_ERR(regs); + + // dwc->regs = regs; + // dwc->regs_size = resource_size(&dwc_res); + + dwc3_get_properties(dtb, offs, dwc); + + /* skip reset/clk sub-system */ + // dwc->reset = devm_reset_control_array_get_optional_shared(dev); + // if (IS_ERR(dwc->reset)) + // return PTR_ERR(dwc->reset); + + // if (dev->of_node) { + // ret = devm_clk_bulk_get_all(dev, &dwc->clks); + // if (ret == -EPROBE_DEFER) + // return ret; + // /* + // * Clocks are optional, but new DT platforms should support all + // * clocks as required by the DT-binding. + // */ + // if (ret < 0) + // dwc->num_clks = 0; + // else + // dwc->num_clks = ret; + + // } + + // ret = reset_control_deassert(dwc->reset); + // if (ret) + // return ret; + + // ret = clk_bulk_prepare_enable(dwc->num_clks, dwc->clks); + // if (ret) + // goto assert_reset; if (!dwc3_core_is_valid(dwc)) { - dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); + uk_pr_err("this is not a DesignWare USB3 DRD Core\n"); ret = -ENODEV; - goto disable_clks; + // goto disable_clks; + return ret; } - platform_set_drvdata(pdev, dwc); + // platform_set_drvdata(pdev, dwc); dwc3_cache_hwparams(dwc); - spin_lock_init(&dwc->lock); - mutex_init(&dwc->mutex); + uk_spin_init(&dwc->lock); + uk_mutex_init(&dwc->mutex); - pm_runtime_set_active(dev); - pm_runtime_use_autosuspend(dev); - pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); - pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); - if (ret < 0) - goto err1; + /* skip pm sub-system */ + // pm_runtime_set_active(dev); + // pm_runtime_use_autosuspend(dev); + // pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); + // pm_runtime_enable(dev); + // ret = pm_runtime_get_sync(dev); + // if (ret < 0) + // goto err1; - pm_runtime_forbid(dev); + // pm_runtime_forbid(dev); - ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); - if (ret) { - dev_err(dwc->dev, "failed to allocate event buffers\n"); - ret = -ENOMEM; - goto err2; - } + // ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); + // if (ret) { + // dev_err(dwc->dev, "failed to allocate event buffers\n"); + // ret = -ENOMEM; + // goto err2; + // } ret = dwc3_get_dr_mode(dwc); if (ret) - goto err3; + return -1; + // goto err3; - ret = dwc3_alloc_scratch_buffers(dwc); - if (ret) - goto err3; + // ret = dwc3_alloc_scratch_buffers(dwc); + // if (ret) + // goto err3; ret = dwc3_core_init(dwc); if (ret) { - dev_err_probe(dev, ret, "failed to initialize core\n"); - goto err4; + uk_pr_err("failed to initialize core\n"); + // goto err4; + return ret; } dwc3_check_params(dwc); - dwc3_debugfs_init(dwc); + // dwc3_debugfs_init(dwc); ret = dwc3_core_init_mode(dwc); if (ret) - goto err5; + // goto err5; + return ret; - dwc3_set_ncr(dwc); + dwc3_set_ncr(dtb, offs, dwc); - pm_runtime_put(dev); + // pm_runtime_put(dev); return 0; -err5: - dwc3_debugfs_exit(dwc); - dwc3_event_buffers_cleanup(dwc); + // err5: + // dwc3_debugfs_exit(dwc); + // dwc3_event_buffers_cleanup(dwc); - usb_phy_shutdown(dwc->usb2_phy); - usb_phy_shutdown(dwc->usb3_phy); - phy_exit(dwc->usb2_generic_phy); - phy_exit(dwc->usb3_generic_phy); + // usb_phy_shutdown(dwc->usb2_phy); + // usb_phy_shutdown(dwc->usb3_phy); + // phy_exit(dwc->usb2_generic_phy); + // phy_exit(dwc->usb3_generic_phy); - usb_phy_set_suspend(dwc->usb2_phy, 1); - usb_phy_set_suspend(dwc->usb3_phy, 1); - phy_power_off(dwc->usb2_generic_phy); - phy_power_off(dwc->usb3_generic_phy); + // usb_phy_set_suspend(dwc->usb2_phy, 1); + // usb_phy_set_suspend(dwc->usb3_phy, 1); + // phy_power_off(dwc->usb2_generic_phy); + // phy_power_off(dwc->usb3_generic_phy); - dwc3_ulpi_exit(dwc); + // dwc3_ulpi_exit(dwc); -err4: - dwc3_free_scratch_buffers(dwc); + // err4: + // dwc3_free_scratch_buffers(dwc); -err3: - dwc3_free_event_buffers(dwc); + // err3: + // dwc3_free_event_buffers(dwc); -err2: - pm_runtime_allow(&pdev->dev); + // err2: + // pm_runtime_allow(&pdev->dev); -err1: - pm_runtime_put_sync(&pdev->dev); - pm_runtime_disable(&pdev->dev); + // err1: + // pm_runtime_put_sync(&pdev->dev); + // pm_runtime_disable(&pdev->dev); -disable_clks: - clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks); -assert_reset: - reset_control_assert(dwc->reset); + // disable_clks: + // clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks); + // assert_reset: + // reset_control_assert(dwc->reset); - if (dwc->usb_psy) - power_supply_put(dwc->usb_psy); + // if (dwc->usb_psy) + // power_supply_put(dwc->usb_psy); - return ret; + // return ret; } -static int dwc3_remove(struct platform_device *pdev) -{ - struct dwc3 *dwc = platform_get_drvdata(pdev); +#if 0 +static int dwc3_remove(struct pf_device *pdev) { + struct dwc3 *dwc = platform_get_drvdata(pdev); pm_runtime_get_sync(&pdev->dev); @@ -1726,7 +1862,7 @@ static int dwc3_remove(struct platform_device *pdev) if (dwc->usb_psy) power_supply_put(dwc->usb_psy); - return 0; + return 0; } #ifdef CONFIG_PM @@ -2080,3 +2216,4 @@ MODULE_ALIAS("platform:dwc3"); MODULE_AUTHOR("Felipe Balbi "); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver"); +#endif \ No newline at end of file diff --git a/usb/dwc3/core.h b/usb/dwc3/core.h index 9ce3fa0..344c028 100644 --- a/usb/dwc3/core.h +++ b/usb/dwc3/core.h @@ -11,30 +11,47 @@ #ifndef __DRIVERS_USB_DWC3_CORE_H #define __DRIVERS_USB_DWC3_CORE_H -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include - -#include - -#include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include +// #include + +// #include + +// #include + +// #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long u64; +typedef u64 dma_addr_t; +# define __iomem __attribute__((noderef, address_space(__iomem))) #define DWC3_MSG_MAX 500 @@ -72,7 +89,7 @@ #define DWC3_OTG_ROLE_DEVICE 2 #define DWC3_GEVNTCOUNT_MASK 0xfffc -#define DWC3_GEVNTCOUNT_EHB BIT(31) +#define DWC3_GEVNTCOUNT_EHB UK_BIT(31) #define DWC3_GSNPSID_MASK 0xffff0000 #define DWC3_GSNPSREV_MASK 0xffff #define DWC3_GSNPS_ID(p) (((p) & DWC3_GSNPSID_MASK) >> 16) @@ -180,14 +197,14 @@ #define DWC3_THANG_NCR_INTR 0xE080 #define DWC3_THANG_NCR_CTRL_2 0xE018 #define DWC3_THANG_NCR_CTRL_5 0xE024 -#define U3DRD_NCR_INTR_IDCHG (BIT(7)) -#define U3DRD_NCR_CTRL_2_SW_RESET_WAKEUP (BIT(31)) -#define U3DRD_NCR_CTRL_2_UTMI_SUSPENDM_OVRD_EN (BIT(0)) -#define U3DRD_NCR_CTRL_5_IDCHGINTCTL (BIT(12) | BIT(13)) +#define U3DRD_NCR_INTR_IDCHG (UK_BIT(7)) +#define U3DRD_NCR_CTRL_2_SW_RESET_WAKEUP (UK_BIT(31)) +#define U3DRD_NCR_CTRL_2_UTMI_SUSPENDM_OVRD_EN (UK_BIT(0)) +#define U3DRD_NCR_CTRL_5_IDCHGINTCTL (UK_BIT(12) | UK_BIT(13)) /* Phy NCR Registers */ #define USB_PHY_NCR_STS0 0x10080 -#define USB_PHY_NCR_STS0_IDDIG (BIT(8)) +#define USB_PHY_NCR_STS0_IDDIG (UK_BIT(8)) /* Bit fields */ @@ -203,7 +220,7 @@ #define DWC3_GSBUSCFG0_INCRBRST_MASK 0xff /* Global Debug LSP MUX Select */ -#define DWC3_GDBGLSPMUX_ENDBC BIT(15) /* Host only */ +#define DWC3_GDBGLSPMUX_ENDBC UK_BIT(15) /* Host only */ #define DWC3_GDBGLSPMUX_HOSTSELECT(n) ((n) & 0x3fff) #define DWC3_GDBGLSPMUX_DEVSELECT(n) (((n) & 0xf) << 4) #define DWC3_GDBGLSPMUX_EPSELECT(n) ((n) & 0xf) @@ -226,31 +243,31 @@ /* Global RX Threshold Configuration Register */ #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) #define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24) -#define DWC3_GRXTHRCFG_PKTCNTSEL BIT(29) +#define DWC3_GRXTHRCFG_PKTCNTSEL UK_BIT(29) /* Global RX Threshold Configuration Register for DWC_usb31 only */ #define DWC31_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 16) #define DWC31_GRXTHRCFG_RXPKTCNT(n) (((n) & 0x1f) << 21) -#define DWC31_GRXTHRCFG_PKTCNTSEL BIT(26) -#define DWC31_RXTHRNUMPKTSEL_HS_PRD BIT(15) +#define DWC31_GRXTHRCFG_PKTCNTSEL UK_BIT(26) +#define DWC31_RXTHRNUMPKTSEL_HS_PRD UK_BIT(15) #define DWC31_RXTHRNUMPKT_HS_PRD(n) (((n) & 0x3) << 13) -#define DWC31_RXTHRNUMPKTSEL_PRD BIT(10) +#define DWC31_RXTHRNUMPKTSEL_PRD UK_BIT(10) #define DWC31_RXTHRNUMPKT_PRD(n) (((n) & 0x1f) << 5) #define DWC31_MAXRXBURSTSIZE_PRD(n) ((n) & 0x1f) /* Global TX Threshold Configuration Register for DWC_usb31 only */ #define DWC31_GTXTHRCFG_MAXTXBURSTSIZE(n) (((n) & 0x1f) << 16) #define DWC31_GTXTHRCFG_TXPKTCNT(n) (((n) & 0x1f) << 21) -#define DWC31_GTXTHRCFG_PKTCNTSEL BIT(26) -#define DWC31_TXTHRNUMPKTSEL_HS_PRD BIT(15) +#define DWC31_GTXTHRCFG_PKTCNTSEL UK_BIT(26) +#define DWC31_TXTHRNUMPKTSEL_HS_PRD UK_BIT(15) #define DWC31_TXTHRNUMPKT_HS_PRD(n) (((n) & 0x3) << 13) -#define DWC31_TXTHRNUMPKTSEL_PRD BIT(10) +#define DWC31_TXTHRNUMPKTSEL_PRD UK_BIT(10) #define DWC31_TXTHRNUMPKT_PRD(n) (((n) & 0x1f) << 5) #define DWC31_MAXTXBURSTSIZE_PRD(n) ((n) & 0x1f) /* Global Configuration Register */ #define DWC3_GCTL_PWRDNSCALE(n) ((n) << 19) -#define DWC3_GCTL_U2RSTECN BIT(16) +#define DWC3_GCTL_U2RSTECN UK_BIT(16) #define DWC3_GCTL_RAMCLKSEL(x) (((x) & DWC3_GCTL_CLK_MASK) << 6) #define DWC3_GCTL_CLK_BUS (0) #define DWC3_GCTL_CLK_PIPE (1) @@ -263,42 +280,42 @@ #define DWC3_GCTL_PRTCAP_DEVICE 2 #define DWC3_GCTL_PRTCAP_OTG 3 -#define DWC3_GCTL_CORESOFTRESET BIT(11) -#define DWC3_GCTL_SOFITPSYNC BIT(10) +#define DWC3_GCTL_CORESOFTRESET UK_BIT(11) +#define DWC3_GCTL_SOFITPSYNC UK_BIT(10) #define DWC3_GCTL_SCALEDOWN(n) ((n) << 4) #define DWC3_GCTL_SCALEDOWN_MASK DWC3_GCTL_SCALEDOWN(3) -#define DWC3_GCTL_DISSCRAMBLE BIT(3) -#define DWC3_GCTL_U2EXIT_LFPS BIT(2) -#define DWC3_GCTL_GBLHIBERNATIONEN BIT(1) -#define DWC3_GCTL_DSBLCLKGTNG BIT(0) +#define DWC3_GCTL_DISSCRAMBLE UK_BIT(3) +#define DWC3_GCTL_U2EXIT_LFPS UK_BIT(2) +#define DWC3_GCTL_GBLHIBERNATIONEN UK_BIT(1) +#define DWC3_GCTL_DSBLCLKGTNG UK_BIT(0) /* Global User Control Register */ -#define DWC3_GUCTL_HSTINAUTORETRY BIT(14) +#define DWC3_GUCTL_HSTINAUTORETRY UK_BIT(14) /* Global User Control 1 Register */ -#define DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT BIT(31) -#define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28) -#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24) -#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17) +#define DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT UK_BIT(31) +#define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS UK_BIT(28) +#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW UK_BIT(24) +#define DWC3_GUCTL1_PARKMODE_DISABLE_SS UK_BIT(17) /* Global Status Register */ -#define DWC3_GSTS_OTG_IP BIT(10) -#define DWC3_GSTS_BC_IP BIT(9) -#define DWC3_GSTS_ADP_IP BIT(8) -#define DWC3_GSTS_HOST_IP BIT(7) -#define DWC3_GSTS_DEVICE_IP BIT(6) -#define DWC3_GSTS_CSR_TIMEOUT BIT(5) -#define DWC3_GSTS_BUS_ERR_ADDR_VLD BIT(4) +#define DWC3_GSTS_OTG_IP UK_BIT(10) +#define DWC3_GSTS_BC_IP UK_BIT(9) +#define DWC3_GSTS_ADP_IP UK_BIT(8) +#define DWC3_GSTS_HOST_IP UK_BIT(7) +#define DWC3_GSTS_DEVICE_IP UK_BIT(6) +#define DWC3_GSTS_CSR_TIMEOUT UK_BIT(5) +#define DWC3_GSTS_BUS_ERR_ADDR_VLD UK_BIT(4) #define DWC3_GSTS_CURMOD(n) ((n) & 0x3) #define DWC3_GSTS_CURMOD_DEVICE 0 #define DWC3_GSTS_CURMOD_HOST 1 /* Global USB2 PHY Configuration Register */ -#define DWC3_GUSB2PHYCFG_PHYSOFTRST BIT(31) -#define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS BIT(30) -#define DWC3_GUSB2PHYCFG_SUSPHY BIT(6) -#define DWC3_GUSB2PHYCFG_ULPI_UTMI BIT(4) -#define DWC3_GUSB2PHYCFG_ENBLSLPM BIT(8) +#define DWC3_GUSB2PHYCFG_PHYSOFTRST UK_BIT(31) +#define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS UK_BIT(30) +#define DWC3_GUSB2PHYCFG_SUSPHY UK_BIT(6) +#define DWC3_GUSB2PHYCFG_ULPI_UTMI UK_BIT(4) +#define DWC3_GUSB2PHYCFG_ENBLSLPM UK_BIT(8) #define DWC3_GUSB2PHYCFG_PHYIF(n) (n << 3) #define DWC3_GUSB2PHYCFG_PHYIF_MASK DWC3_GUSB2PHYCFG_PHYIF(1) #define DWC3_GUSB2PHYCFG_USBTRDTIM(n) (n << 10) @@ -309,32 +326,32 @@ #define UTMI_PHYIF_8_BIT 0 /* Global USB2 PHY Vendor Control Register */ -#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25) -#define DWC3_GUSB2PHYACC_DONE BIT(24) -#define DWC3_GUSB2PHYACC_BUSY BIT(23) -#define DWC3_GUSB2PHYACC_WRITE BIT(22) +#define DWC3_GUSB2PHYACC_NEWREGREQ UK_BIT(25) +#define DWC3_GUSB2PHYACC_DONE UK_BIT(24) +#define DWC3_GUSB2PHYACC_BUSY UK_BIT(23) +#define DWC3_GUSB2PHYACC_WRITE UK_BIT(22) #define DWC3_GUSB2PHYACC_ADDR(n) (n << 16) #define DWC3_GUSB2PHYACC_EXTEND_ADDR(n) (n << 8) #define DWC3_GUSB2PHYACC_DATA(n) (n & 0xff) /* Global USB3 PIPE Control Register */ -#define DWC3_GUSB3PIPECTL_PHYSOFTRST BIT(31) -#define DWC3_GUSB3PIPECTL_U2SSINP3OK BIT(29) -#define DWC3_GUSB3PIPECTL_DISRXDETINP3 BIT(28) -#define DWC3_GUSB3PIPECTL_UX_EXIT_PX BIT(27) -#define DWC3_GUSB3PIPECTL_REQP1P2P3 BIT(24) +#define DWC3_GUSB3PIPECTL_PHYSOFTRST UK_BIT(31) +#define DWC3_GUSB3PIPECTL_U2SSINP3OK UK_BIT(29) +#define DWC3_GUSB3PIPECTL_DISRXDETINP3 UK_BIT(28) +#define DWC3_GUSB3PIPECTL_UX_EXIT_PX UK_BIT(27) +#define DWC3_GUSB3PIPECTL_REQP1P2P3 UK_BIT(24) #define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19) #define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK DWC3_GUSB3PIPECTL_DEP1P2P3(7) #define DWC3_GUSB3PIPECTL_DEP1P2P3_EN DWC3_GUSB3PIPECTL_DEP1P2P3(1) -#define DWC3_GUSB3PIPECTL_DEPOCHANGE BIT(18) -#define DWC3_GUSB3PIPECTL_SUSPHY BIT(17) -#define DWC3_GUSB3PIPECTL_LFPSFILT BIT(9) -#define DWC3_GUSB3PIPECTL_RX_DETOPOLL BIT(8) +#define DWC3_GUSB3PIPECTL_DEPOCHANGE UK_BIT(18) +#define DWC3_GUSB3PIPECTL_SUSPHY UK_BIT(17) +#define DWC3_GUSB3PIPECTL_LFPSFILT UK_BIT(9) +#define DWC3_GUSB3PIPECTL_RX_DETOPOLL UK_BIT(8) #define DWC3_GUSB3PIPECTL_TX_DEEPH_MASK DWC3_GUSB3PIPECTL_TX_DEEPH(3) #define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1) /* Global TX Fifo Size Register */ -#define DWC31_GTXFIFOSIZ_TXFRAMNUM BIT(15) /* DWC_usb31 only */ +#define DWC31_GTXFIFOSIZ_TXFRAMNUM UK_BIT(15) /* DWC_usb31 only */ #define DWC31_GTXFIFOSIZ_TXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */ #define DWC3_GTXFIFOSIZ_TXFDEP(n) ((n) & 0xffff) #define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000) @@ -344,7 +361,7 @@ #define DWC3_GRXFIFOSIZ_RXFDEP(n) ((n) & 0xffff) /* Global Event Size Registers */ -#define DWC3_GEVNTSIZ_INTMASK BIT(31) +#define DWC3_GEVNTSIZ_INTMASK UK_BIT(31) #define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff) /* Global HWPARAMS0 Register */ @@ -365,7 +382,7 @@ #define DWC3_GHWPARAMS1_EN_PWROPT_HIB 2 #define DWC3_GHWPARAMS1_PWROPT(n) ((n) << 24) #define DWC3_GHWPARAMS1_PWROPT_MASK DWC3_GHWPARAMS1_PWROPT(3) -#define DWC3_GHWPARAMS1_ENDBC BIT(31) +#define DWC3_GHWPARAMS1_ENDBC UK_BIT(31) /* Global HWPARAMS3 Register */ #define DWC3_GHWPARAMS3_SSPHY_IFC(n) ((n) & 3) @@ -386,12 +403,12 @@ #define DWC3_MAX_HIBER_SCRATCHBUFS 15 /* Global HWPARAMS6 Register */ -#define DWC3_GHWPARAMS6_BCSUPPORT BIT(14) -#define DWC3_GHWPARAMS6_OTG3SUPPORT BIT(13) -#define DWC3_GHWPARAMS6_ADPSUPPORT BIT(12) -#define DWC3_GHWPARAMS6_HNPSUPPORT BIT(11) -#define DWC3_GHWPARAMS6_SRPSUPPORT BIT(10) -#define DWC3_GHWPARAMS6_EN_FPGA BIT(7) +#define DWC3_GHWPARAMS6_BCSUPPORT UK_BIT(14) +#define DWC3_GHWPARAMS6_OTG3SUPPORT UK_BIT(13) +#define DWC3_GHWPARAMS6_ADPSUPPORT UK_BIT(12) +#define DWC3_GHWPARAMS6_HNPSUPPORT UK_BIT(11) +#define DWC3_GHWPARAMS6_SRPSUPPORT UK_BIT(10) +#define DWC3_GHWPARAMS6_EN_FPGA UK_BIT(7) /* DWC_usb32 only */ #define DWC3_GHWPARAMS6_MDWIDTH(n) ((n) & (0x3 << 8)) @@ -401,17 +418,17 @@ #define DWC3_GHWPARAMS7_RAM2_DEPTH(n) (((n) >> 16) & 0xffff) /* Global HWPARAMS9 Register */ -#define DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS BIT(0) +#define DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS UK_BIT(0) /* Global Frame Length Adjustment Register */ -#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7) +#define DWC3_GFLADJ_30MHZ_SDBND_SEL UK_BIT(7) #define DWC3_GFLADJ_30MHZ_MASK 0x3f /* Global User Control Register 2 */ -#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14) +#define DWC3_GUCTL2_RST_ACTBITLATER UK_BIT(14) /* Global User Control Register 3 */ -#define DWC3_GUCTL3_SPLITDISABLE BIT(14) +#define DWC3_GUCTL3_SPLITDISABLE UK_BIT(14) /* Device Configuration Register */ #define DWC3_DCFG_NUMLANES(n) (((n) & 0x3) << 30) /* DWC_usb32 only */ @@ -423,24 +440,24 @@ #define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */ #define DWC3_DCFG_SUPERSPEED (4 << 0) #define DWC3_DCFG_HIGHSPEED (0 << 0) -#define DWC3_DCFG_FULLSPEED BIT(0) +#define DWC3_DCFG_FULLSPEED UK_BIT(0) #define DWC3_DCFG_LOWSPEED (2 << 0) #define DWC3_DCFG_NUMP_SHIFT 17 #define DWC3_DCFG_NUMP(n) (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f) #define DWC3_DCFG_NUMP_MASK (0x1f << DWC3_DCFG_NUMP_SHIFT) -#define DWC3_DCFG_LPM_CAP BIT(22) -#define DWC3_DCFG_IGNSTRMPP BIT(23) +#define DWC3_DCFG_LPM_CAP UK_BIT(22) +#define DWC3_DCFG_IGNSTRMPP UK_BIT(23) /* Device Control Register */ -#define DWC3_DCTL_RUN_STOP BIT(31) -#define DWC3_DCTL_CSFTRST BIT(30) -#define DWC3_DCTL_LSFTRST BIT(29) +#define DWC3_DCTL_RUN_STOP UK_BIT(31) +#define DWC3_DCTL_CSFTRST UK_BIT(30) +#define DWC3_DCTL_LSFTRST UK_BIT(29) #define DWC3_DCTL_HIRD_THRES_MASK (0x1f << 24) #define DWC3_DCTL_HIRD_THRES(n) ((n) << 24) -#define DWC3_DCTL_APPL1RES BIT(23) +#define DWC3_DCTL_APPL1RES UK_BIT(23) /* These apply for core versions 1.87a and earlier */ #define DWC3_DCTL_TRGTULST_MASK (0x0f << 17) @@ -454,15 +471,15 @@ /* These apply for core versions 1.94a and later */ #define DWC3_DCTL_NYET_THRES(n) (((n) & 0xf) << 20) -#define DWC3_DCTL_KEEP_CONNECT BIT(19) -#define DWC3_DCTL_L1_HIBER_EN BIT(18) -#define DWC3_DCTL_CRS BIT(17) -#define DWC3_DCTL_CSS BIT(16) +#define DWC3_DCTL_KEEP_CONNECT UK_BIT(19) +#define DWC3_DCTL_L1_HIBER_EN UK_BIT(18) +#define DWC3_DCTL_CRS UK_BIT(17) +#define DWC3_DCTL_CSS UK_BIT(16) -#define DWC3_DCTL_INITU2ENA BIT(12) -#define DWC3_DCTL_ACCEPTU2ENA BIT(11) -#define DWC3_DCTL_INITU1ENA BIT(10) -#define DWC3_DCTL_ACCEPTU1ENA BIT(9) +#define DWC3_DCTL_INITU2ENA UK_BIT(12) +#define DWC3_DCTL_ACCEPTU2ENA UK_BIT(11) +#define DWC3_DCTL_INITU1ENA UK_BIT(10) +#define DWC3_DCTL_ACCEPTU1ENA UK_BIT(9) #define DWC3_DCTL_TSTCTRL_MASK (0xf << 1) #define DWC3_DCTL_ULSTCHNGREQ_MASK (0x0f << 5) @@ -477,38 +494,38 @@ #define DWC3_DCTL_ULSTCHNG_LOOPBACK (DWC3_DCTL_ULSTCHNGREQ(11)) /* Device Event Enable Register */ -#define DWC3_DEVTEN_VNDRDEVTSTRCVEDEN BIT(12) -#define DWC3_DEVTEN_EVNTOVERFLOWEN BIT(11) -#define DWC3_DEVTEN_CMDCMPLTEN BIT(10) -#define DWC3_DEVTEN_ERRTICERREN BIT(9) -#define DWC3_DEVTEN_SOFEN BIT(7) -#define DWC3_DEVTEN_U3L2L1SUSPEN BIT(6) -#define DWC3_DEVTEN_HIBERNATIONREQEVTEN BIT(5) -#define DWC3_DEVTEN_WKUPEVTEN BIT(4) -#define DWC3_DEVTEN_ULSTCNGEN BIT(3) -#define DWC3_DEVTEN_CONNECTDONEEN BIT(2) -#define DWC3_DEVTEN_USBRSTEN BIT(1) -#define DWC3_DEVTEN_DISCONNEVTEN BIT(0) +#define DWC3_DEVTEN_VNDRDEVTSTRCVEDEN UK_BIT(12) +#define DWC3_DEVTEN_EVNTOVERFLOWEN UK_BIT(11) +#define DWC3_DEVTEN_CMDCMPLTEN UK_BIT(10) +#define DWC3_DEVTEN_ERRTICERREN UK_BIT(9) +#define DWC3_DEVTEN_SOFEN UK_BIT(7) +#define DWC3_DEVTEN_U3L2L1SUSPEN UK_BIT(6) +#define DWC3_DEVTEN_HIBERNATIONREQEVTEN UK_BIT(5) +#define DWC3_DEVTEN_WKUPEVTEN UK_BIT(4) +#define DWC3_DEVTEN_ULSTCNGEN UK_BIT(3) +#define DWC3_DEVTEN_CONNECTDONEEN UK_BIT(2) +#define DWC3_DEVTEN_USBRSTEN UK_BIT(1) +#define DWC3_DEVTEN_DISCONNEVTEN UK_BIT(0) #define DWC3_DSTS_CONNLANES(n) (((n) >> 30) & 0x3) /* DWC_usb32 only */ /* Device Status Register */ -#define DWC3_DSTS_DCNRD BIT(29) +#define DWC3_DSTS_DCNRD UK_BIT(29) /* This applies for core versions 1.87a and earlier */ -#define DWC3_DSTS_PWRUPREQ BIT(24) +#define DWC3_DSTS_PWRUPREQ UK_BIT(24) /* These apply for core versions 1.94a and later */ -#define DWC3_DSTS_RSS BIT(25) -#define DWC3_DSTS_SSS BIT(24) +#define DWC3_DSTS_RSS UK_BIT(25) +#define DWC3_DSTS_SSS UK_BIT(24) -#define DWC3_DSTS_COREIDLE BIT(23) -#define DWC3_DSTS_DEVCTRLHLT BIT(22) +#define DWC3_DSTS_COREIDLE UK_BIT(23) +#define DWC3_DSTS_DEVCTRLHLT UK_BIT(22) #define DWC3_DSTS_USBLNKST_MASK (0x0f << 18) #define DWC3_DSTS_USBLNKST(n) (((n) & DWC3_DSTS_USBLNKST_MASK) >> 18) -#define DWC3_DSTS_RXFIFOEMPTY BIT(17) +#define DWC3_DSTS_RXFIFOEMPTY UK_BIT(17) #define DWC3_DSTS_SOFFN_MASK (0x3fff << 3) #define DWC3_DSTS_SOFFN(n) (((n) & DWC3_DSTS_SOFFN_MASK) >> 3) @@ -518,7 +535,7 @@ #define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */ #define DWC3_DSTS_SUPERSPEED (4 << 0) #define DWC3_DSTS_HIGHSPEED (0 << 0) -#define DWC3_DSTS_FULLSPEED BIT(0) +#define DWC3_DSTS_FULLSPEED UK_BIT(0) #define DWC3_DSTS_LOWSPEED (2 << 0) /* Device Generic Command Register */ @@ -537,26 +554,26 @@ #define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10 #define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F) -#define DWC3_DGCMD_CMDACT BIT(10) -#define DWC3_DGCMD_CMDIOC BIT(8) +#define DWC3_DGCMD_CMDACT UK_BIT(10) +#define DWC3_DGCMD_CMDIOC UK_BIT(8) /* Device Generic Command Parameter Register */ -#define DWC3_DGCMDPAR_FORCE_LINKPM_ACCEPT BIT(0) +#define DWC3_DGCMDPAR_FORCE_LINKPM_ACCEPT UK_BIT(0) #define DWC3_DGCMDPAR_FIFO_NUM(n) ((n) << 0) #define DWC3_DGCMDPAR_RX_FIFO (0 << 5) -#define DWC3_DGCMDPAR_TX_FIFO BIT(5) +#define DWC3_DGCMDPAR_TX_FIFO UK_BIT(5) #define DWC3_DGCMDPAR_LOOPBACK_DIS (0 << 0) -#define DWC3_DGCMDPAR_LOOPBACK_ENA BIT(0) +#define DWC3_DGCMDPAR_LOOPBACK_ENA UK_BIT(0) /* Device Endpoint Command Register */ #define DWC3_DEPCMD_PARAM_SHIFT 16 #define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT) #define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) #define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F) -#define DWC3_DEPCMD_HIPRI_FORCERM BIT(11) -#define DWC3_DEPCMD_CLEARPENDIN BIT(11) -#define DWC3_DEPCMD_CMDACT BIT(10) -#define DWC3_DEPCMD_CMDIOC BIT(8) +#define DWC3_DEPCMD_HIPRI_FORCERM UK_BIT(11) +#define DWC3_DEPCMD_CLEARPENDIN UK_BIT(11) +#define DWC3_DEPCMD_CMDACT UK_BIT(10) +#define DWC3_DEPCMD_CMDIOC UK_BIT(8) #define DWC3_DEPCMD_DEPSTARTCFG (0x09 << 0) #define DWC3_DEPCMD_ENDTRANSFER (0x08 << 0) @@ -574,7 +591,7 @@ #define DWC3_DEPCMD_CMD(x) ((x) & 0xf) /* The EP number goes 0..31 so ep0 is always out and ep1 is always in */ -#define DWC3_DALEPENA_EP(n) BIT(n) +#define DWC3_DALEPENA_EP(n) UK_BIT(n) #define DWC3_DEPCMD_TYPE_CONTROL 0 #define DWC3_DEPCMD_TYPE_ISOC 1 @@ -587,72 +604,72 @@ #define DWC3_DEV_IMOD_INTERVAL_MASK (0xffff << 0) /* OTG Configuration Register */ -#define DWC3_OCFG_DISPWRCUTTOFF BIT(5) -#define DWC3_OCFG_HIBDISMASK BIT(4) -#define DWC3_OCFG_SFTRSTMASK BIT(3) -#define DWC3_OCFG_OTGVERSION BIT(2) -#define DWC3_OCFG_HNPCAP BIT(1) -#define DWC3_OCFG_SRPCAP BIT(0) +#define DWC3_OCFG_DISPWRCUTTOFF UK_BIT(5) +#define DWC3_OCFG_HIBDISMASK UK_BIT(4) +#define DWC3_OCFG_SFTRSTMASK UK_BIT(3) +#define DWC3_OCFG_OTGVERSION UK_BIT(2) +#define DWC3_OCFG_HNPCAP UK_BIT(1) +#define DWC3_OCFG_SRPCAP UK_BIT(0) /* OTG CTL Register */ -#define DWC3_OCTL_OTG3GOERR BIT(7) -#define DWC3_OCTL_PERIMODE BIT(6) -#define DWC3_OCTL_PRTPWRCTL BIT(5) -#define DWC3_OCTL_HNPREQ BIT(4) -#define DWC3_OCTL_SESREQ BIT(3) -#define DWC3_OCTL_TERMSELIDPULSE BIT(2) -#define DWC3_OCTL_DEVSETHNPEN BIT(1) -#define DWC3_OCTL_HSTSETHNPEN BIT(0) +#define DWC3_OCTL_OTG3GOERR UK_BIT(7) +#define DWC3_OCTL_PERIMODE UK_BIT(6) +#define DWC3_OCTL_PRTPWRCTL UK_BIT(5) +#define DWC3_OCTL_HNPREQ UK_BIT(4) +#define DWC3_OCTL_SESREQ UK_BIT(3) +#define DWC3_OCTL_TERMSELIDPULSE UK_BIT(2) +#define DWC3_OCTL_DEVSETHNPEN UK_BIT(1) +#define DWC3_OCTL_HSTSETHNPEN UK_BIT(0) /* OTG Event Register */ -#define DWC3_OEVT_DEVICEMODE BIT(31) -#define DWC3_OEVT_XHCIRUNSTPSET BIT(27) -#define DWC3_OEVT_DEVRUNSTPSET BIT(26) -#define DWC3_OEVT_HIBENTRY BIT(25) -#define DWC3_OEVT_CONIDSTSCHNG BIT(24) -#define DWC3_OEVT_HRRCONFNOTIF BIT(23) -#define DWC3_OEVT_HRRINITNOTIF BIT(22) -#define DWC3_OEVT_ADEVIDLE BIT(21) -#define DWC3_OEVT_ADEVBHOSTEND BIT(20) -#define DWC3_OEVT_ADEVHOST BIT(19) -#define DWC3_OEVT_ADEVHNPCHNG BIT(18) -#define DWC3_OEVT_ADEVSRPDET BIT(17) -#define DWC3_OEVT_ADEVSESSENDDET BIT(16) -#define DWC3_OEVT_BDEVBHOSTEND BIT(11) -#define DWC3_OEVT_BDEVHNPCHNG BIT(10) -#define DWC3_OEVT_BDEVSESSVLDDET BIT(9) -#define DWC3_OEVT_BDEVVBUSCHNG BIT(8) -#define DWC3_OEVT_BSESSVLD BIT(3) -#define DWC3_OEVT_HSTNEGSTS BIT(2) -#define DWC3_OEVT_SESREQSTS BIT(1) -#define DWC3_OEVT_ERROR BIT(0) +#define DWC3_OEVT_DEVICEMODE UK_BIT(31) +#define DWC3_OEVT_XHCIRUNSTPSET UK_BIT(27) +#define DWC3_OEVT_DEVRUNSTPSET UK_BIT(26) +#define DWC3_OEVT_HIBENTRY UK_BIT(25) +#define DWC3_OEVT_CONIDSTSCHNG UK_BIT(24) +#define DWC3_OEVT_HRRCONFNOTIF UK_BIT(23) +#define DWC3_OEVT_HRRINITNOTIF UK_BIT(22) +#define DWC3_OEVT_ADEVIDLE UK_BIT(21) +#define DWC3_OEVT_ADEVBHOSTEND UK_BIT(20) +#define DWC3_OEVT_ADEVHOST UK_BIT(19) +#define DWC3_OEVT_ADEVHNPCHNG UK_BIT(18) +#define DWC3_OEVT_ADEVSRPDET UK_BIT(17) +#define DWC3_OEVT_ADEVSESSENDDET UK_BIT(16) +#define DWC3_OEVT_BDEVBHOSTEND UK_BIT(11) +#define DWC3_OEVT_BDEVHNPCHNG UK_BIT(10) +#define DWC3_OEVT_BDEVSESSVLDDET UK_BIT(9) +#define DWC3_OEVT_BDEVVBUSCHNG UK_BIT(8) +#define DWC3_OEVT_BSESSVLD UK_BIT(3) +#define DWC3_OEVT_HSTNEGSTS UK_BIT(2) +#define DWC3_OEVT_SESREQSTS UK_BIT(1) +#define DWC3_OEVT_ERROR UK_BIT(0) /* OTG Event Enable Register */ -#define DWC3_OEVTEN_XHCIRUNSTPSETEN BIT(27) -#define DWC3_OEVTEN_DEVRUNSTPSETEN BIT(26) -#define DWC3_OEVTEN_HIBENTRYEN BIT(25) -#define DWC3_OEVTEN_CONIDSTSCHNGEN BIT(24) -#define DWC3_OEVTEN_HRRCONFNOTIFEN BIT(23) -#define DWC3_OEVTEN_HRRINITNOTIFEN BIT(22) -#define DWC3_OEVTEN_ADEVIDLEEN BIT(21) -#define DWC3_OEVTEN_ADEVBHOSTENDEN BIT(20) -#define DWC3_OEVTEN_ADEVHOSTEN BIT(19) -#define DWC3_OEVTEN_ADEVHNPCHNGEN BIT(18) -#define DWC3_OEVTEN_ADEVSRPDETEN BIT(17) -#define DWC3_OEVTEN_ADEVSESSENDDETEN BIT(16) -#define DWC3_OEVTEN_BDEVBHOSTENDEN BIT(11) -#define DWC3_OEVTEN_BDEVHNPCHNGEN BIT(10) -#define DWC3_OEVTEN_BDEVSESSVLDDETEN BIT(9) -#define DWC3_OEVTEN_BDEVVBUSCHNGEN BIT(8) +#define DWC3_OEVTEN_XHCIRUNSTPSETEN UK_BIT(27) +#define DWC3_OEVTEN_DEVRUNSTPSETEN UK_BIT(26) +#define DWC3_OEVTEN_HIBENTRYEN UK_BIT(25) +#define DWC3_OEVTEN_CONIDSTSCHNGEN UK_BIT(24) +#define DWC3_OEVTEN_HRRCONFNOTIFEN UK_BIT(23) +#define DWC3_OEVTEN_HRRINITNOTIFEN UK_BIT(22) +#define DWC3_OEVTEN_ADEVIDLEEN UK_BIT(21) +#define DWC3_OEVTEN_ADEVBHOSTENDEN UK_BIT(20) +#define DWC3_OEVTEN_ADEVHOSTEN UK_BIT(19) +#define DWC3_OEVTEN_ADEVHNPCHNGEN UK_BIT(18) +#define DWC3_OEVTEN_ADEVSRPDETEN UK_BIT(17) +#define DWC3_OEVTEN_ADEVSESSENDDETEN UK_BIT(16) +#define DWC3_OEVTEN_BDEVBHOSTENDEN UK_BIT(11) +#define DWC3_OEVTEN_BDEVHNPCHNGEN UK_BIT(10) +#define DWC3_OEVTEN_BDEVSESSVLDDETEN UK_BIT(9) +#define DWC3_OEVTEN_BDEVVBUSCHNGEN UK_BIT(8) /* OTG Status Register */ -#define DWC3_OSTS_DEVRUNSTP BIT(13) -#define DWC3_OSTS_XHCIRUNSTP BIT(12) -#define DWC3_OSTS_PERIPHERALSTATE BIT(4) -#define DWC3_OSTS_XHCIPRTPOWER BIT(3) -#define DWC3_OSTS_BSESVLD BIT(2) -#define DWC3_OSTS_VBUSVLD BIT(1) -#define DWC3_OSTS_CONIDSTS BIT(0) +#define DWC3_OSTS_DEVRUNSTP UK_BIT(13) +#define DWC3_OSTS_XHCIRUNSTP UK_BIT(12) +#define DWC3_OSTS_PERIPHERALSTATE UK_BIT(4) +#define DWC3_OSTS_XHCIPRTPOWER UK_BIT(3) +#define DWC3_OSTS_BSESVLD UK_BIT(2) +#define DWC3_OSTS_VBUSVLD UK_BIT(1) +#define DWC3_OSTS_CONIDSTS UK_BIT(0) /* Structures */ @@ -677,23 +694,24 @@ struct dwc3_event_buffer { unsigned int count; unsigned int flags; -#define DWC3_EVENT_PENDING BIT(0) +#define DWC3_EVENT_PENDING UK_BIT(0) dma_addr_t dma; struct dwc3 *dwc; - ANDROID_KABI_RESERVE(1); + // ANDROID_KABI_RESERVE(1); }; -#define DWC3_EP_FLAG_STALLED BIT(0) -#define DWC3_EP_FLAG_WEDGED BIT(1) +#define DWC3_EP_FLAG_STALLED UK_BIT(0) +#define DWC3_EP_FLAG_WEDGED UK_BIT(1) #define DWC3_EP_DIRECTION_TX true #define DWC3_EP_DIRECTION_RX false #define DWC3_TRB_NUM 256 +#if 0 /** * struct dwc3_ep - device side endpoint representation * @endpoint: usb endpoint @@ -723,9 +741,9 @@ struct dwc3_event_buffer { */ struct dwc3_ep { struct usb_ep endpoint; - struct list_head cancelled_list; - struct list_head pending_list; - struct list_head started_list; + struct uk_list_head cancelled_list; + struct uk_list_head pending_list; + struct uk_list_head started_list; void __iomem *regs; @@ -735,22 +753,22 @@ struct dwc3_ep { u32 saved_state; unsigned int flags; -#define DWC3_EP_ENABLED BIT(0) -#define DWC3_EP_STALL BIT(1) -#define DWC3_EP_WEDGE BIT(2) -#define DWC3_EP_TRANSFER_STARTED BIT(3) -#define DWC3_EP_END_TRANSFER_PENDING BIT(4) -#define DWC3_EP_PENDING_REQUEST BIT(5) -#define DWC3_EP_DELAY_START BIT(6) -#define DWC3_EP_WAIT_TRANSFER_COMPLETE BIT(7) -#define DWC3_EP_IGNORE_NEXT_NOSTREAM BIT(8) -#define DWC3_EP_FORCE_RESTART_STREAM BIT(9) -#define DWC3_EP_FIRST_STREAM_PRIMED BIT(10) -#define DWC3_EP_PENDING_CLEAR_STALL BIT(11) -#define DWC3_EP_TXFIFO_RESIZED BIT(12) +#define DWC3_EP_ENABLED UK_BIT(0) +#define DWC3_EP_STALL UK_BIT(1) +#define DWC3_EP_WEDGE UK_BIT(2) +#define DWC3_EP_TRANSFER_STARTED UK_BIT(3) +#define DWC3_EP_END_TRANSFER_PENDING UK_BIT(4) +#define DWC3_EP_PENDING_REQUEST UK_BIT(5) +#define DWC3_EP_DELAY_START UK_BIT(6) +#define DWC3_EP_WAIT_TRANSFER_COMPLETE UK_BIT(7) +#define DWC3_EP_IGNORE_NEXT_NOSTREAM UK_BIT(8) +#define DWC3_EP_FORCE_RESTART_STREAM UK_BIT(9) +#define DWC3_EP_FIRST_STREAM_PRIMED UK_BIT(10) +#define DWC3_EP_PENDING_CLEAR_STALL UK_BIT(11) +#define DWC3_EP_TXFIFO_RESIZED UK_BIT(12) /* This last one is specific to EP0 */ -#define DWC3_EP0_DIR_IN BIT(31) +#define DWC3_EP0_DIR_IN UK_BIT(31) /* * IMPORTANT: we *know* we have 256 TRBs in our @trb_pool, so we will @@ -779,10 +797,11 @@ struct dwc3_ep { u8 combo_num; int start_cmd_status; - ANDROID_KABI_RESERVE(1); - ANDROID_KABI_RESERVE(2); + // ANDROID_KABI_RESERVE(1); + // ANDROID_KABI_RESERVE(2); }; + enum dwc3_phy { DWC3_PHY_UNKNOWN = 0, DWC3_PHY_USB3, @@ -821,7 +840,7 @@ enum dwc3_link_state { DWC3_LINK_STATE_RESUME = 0x0f, DWC3_LINK_STATE_MASK = 0x0f, }; - +#endif /* TRB Length, PCM and Status */ #define DWC3_TRB_SIZE_MASK (0x00ffffff) #define DWC3_TRB_SIZE_LENGTH(n) ((n) & DWC3_TRB_SIZE_MASK) @@ -834,13 +853,13 @@ enum dwc3_link_state { #define DWC3_TRB_STS_XFER_IN_PROG 4 /* TRB Control */ -#define DWC3_TRB_CTRL_HWO BIT(0) -#define DWC3_TRB_CTRL_LST BIT(1) -#define DWC3_TRB_CTRL_CHN BIT(2) -#define DWC3_TRB_CTRL_CSP BIT(3) +#define DWC3_TRB_CTRL_HWO UK_BIT(0) +#define DWC3_TRB_CTRL_LST UK_BIT(1) +#define DWC3_TRB_CTRL_CHN UK_BIT(2) +#define DWC3_TRB_CTRL_CSP UK_BIT(3) #define DWC3_TRB_CTRL_TRBCTL(n) (((n) & 0x3f) << 4) -#define DWC3_TRB_CTRL_ISP_IMI BIT(10) -#define DWC3_TRB_CTRL_IOC BIT(11) +#define DWC3_TRB_CTRL_ISP_IMI UK_BIT(10) +#define DWC3_TRB_CTRL_IOC UK_BIT(11) #define DWC3_TRB_CTRL_SID_SOFN(n) (((n) & 0xffff) << 14) #define DWC3_TRB_CTRL_GET_SID_SOFN(n) (((n) & (0xffff << 14)) >> 14) @@ -893,8 +912,8 @@ struct dwc3_hwparams { u32 hwparams8; u32 hwparams9; - ANDROID_KABI_RESERVE(1); - ANDROID_KABI_RESERVE(2); + // ANDROID_KABI_RESERVE(1); + // ANDROID_KABI_RESERVE(2); }; /* HWPARAMS0 */ @@ -914,6 +933,7 @@ struct dwc3_hwparams { /* HWPARAMS7 */ #define DWC3_RAM1_DEPTH(n) ((n) & 0xffff) +#if 0 /** * struct dwc3_request - representation of a transfer request * @request: struct usb_request to be transferred @@ -936,7 +956,7 @@ struct dwc3_hwparams { */ struct dwc3_request { struct usb_request request; - struct list_head list; + struct uk_list_head list; struct dwc3_ep *dep; struct scatterlist *sg; struct scatterlist *start_sg; @@ -964,8 +984,8 @@ struct dwc3_request { unsigned int direction:1; unsigned int mapped:1; - ANDROID_KABI_RESERVE(1); - ANDROID_KABI_RESERVE(2); + // ANDROID_KABI_RESERVE(1); + // ANDROID_KABI_RESERVE(2); }; /* @@ -975,6 +995,39 @@ struct dwc3_request { struct dwc3_scratchpad_array { __le64 dma_adr[DWC3_MAX_HIBER_SCRATCHBUFS]; }; +#endif + +enum usb_dr_mode { + USB_DR_MODE_UNKNOWN, + USB_DR_MODE_HOST, + USB_DR_MODE_PERIPHERAL, + USB_DR_MODE_OTG, +}; + +enum usb_phy_interface { + USBPHY_INTERFACE_MODE_UNKNOWN, + USBPHY_INTERFACE_MODE_UTMI, + USBPHY_INTERFACE_MODE_UTMIW, + USBPHY_INTERFACE_MODE_ULPI, + USBPHY_INTERFACE_MODE_SERIAL, + USBPHY_INTERFACE_MODE_HSIC, +}; + +enum usb_device_speed { + USB_SPEED_UNKNOWN = 0, /* enumerating */ + USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */ + USB_SPEED_HIGH, /* usb 2.0 */ + USB_SPEED_WIRELESS, /* wireless (usb 2.5) */ + USB_SPEED_SUPER, /* usb 3.0 */ + USB_SPEED_SUPER_PLUS, /* usb 3.1 */ +}; + +enum usb_ssp_rate { + USB_SSP_GEN_UNKNOWN = 0, + USB_SSP_GEN_2x1, + USB_SSP_GEN_1x2, + USB_SSP_GEN_2x2, +}; /** * struct dwc3 - representation of our controller @@ -1124,50 +1177,51 @@ struct dwc3_scratchpad_array { * fifo resized. */ struct dwc3 { - struct work_struct drd_work; - struct dwc3_trb *ep0_trb; - void *bounce; - void *scratchbuf; - u8 *setup_buf; - dma_addr_t ep0_trb_addr; - dma_addr_t bounce_addr; - dma_addr_t scratch_addr; - struct dwc3_request ep0_usb_req; - struct completion ep0_in_setup; + // struct work_struct drd_work; + // struct dwc3_trb *ep0_trb; + // void *bounce; + // void *scratchbuf; + // u8 *setup_buf; + // dma_addr_t ep0_trb_addr; + // dma_addr_t bounce_addr; + // dma_addr_t scratch_addr; + // struct dwc3_request ep0_usb_req; + // struct completion ep0_in_setup; /* device lock */ - spinlock_t lock; + uk_spinlock lock; /* mode switching lock */ - struct mutex mutex; + struct uk_mutex mutex; + + // struct device *dev; + // struct device *sysdev; - struct device *dev; - struct device *sysdev; + struct pf_device *xhci; - struct platform_device *xhci; - struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM]; + // struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM]; - struct dwc3_event_buffer *ev_buf; - struct dwc3_ep *eps[DWC3_ENDPOINTS_NUM]; + // struct dwc3_event_buffer *ev_buf; + // struct dwc3_ep *eps[DWC3_ENDPOINTS_NUM]; - struct usb_gadget *gadget; - struct usb_gadget_driver *gadget_driver; + // struct usb_gadget *gadget; + // struct usb_gadget_driver *gadget_driver; - struct clk_bulk_data *clks; - int num_clks; + // struct clk_bulk_data *clks; + // int num_clks; - struct reset_control *reset; + // struct reset_control *reset; - struct usb_phy *usb2_phy; - struct usb_phy *usb3_phy; + // struct usb_phy *usb2_phy; + // struct usb_phy *usb3_phy; - struct phy *usb2_generic_phy; - struct phy *usb3_generic_phy; + // struct phy *usb2_generic_phy; + // struct phy *usb3_generic_phy; bool phys_ready; - struct ulpi *ulpi; - bool ulpi_ready; + // struct ulpi *ulpi; + // bool ulpi_ready; void __iomem *regs; size_t regs_size; @@ -1177,13 +1231,13 @@ struct dwc3 { enum usb_dr_mode dr_mode; u32 current_dr_role; u32 desired_dr_role; - struct extcon_dev *edev; - struct notifier_block edev_nb; + // struct extcon_dev *edev; + // struct notifier_block edev_nb; enum usb_phy_interface hsphy_mode; - struct usb_role_switch *role_sw; + // struct usb_role_switch *role_sw; enum usb_dr_mode role_switch_default_mode; - struct power_supply *usb_psy; + // struct power_supply *usb_psy; u32 fladj; u32 irq_gadget; @@ -1196,7 +1250,7 @@ struct dwc3 { u32 maximum_speed; u32 gadget_max_speed; enum usb_ssp_rate max_ssp_rate; - enum usb_ssp_rate gadget_ssp_rate; + // enum usb_ssp_rate gadget_ssp_rate; u32 ip; @@ -1252,9 +1306,9 @@ struct dwc3 { #define DWC31_VERSIONTYPE_EA05 0x65613035 #define DWC31_VERSIONTYPE_EA06 0x65613036 - enum dwc3_ep0_next ep0_next_event; - enum dwc3_ep0_state ep0state; - enum dwc3_link_state link_state; + // enum dwc3_ep0_next ep0_next_event; + // enum dwc3_ep0_state ep0state; + // enum dwc3_link_state link_state; u16 u2sel; u16 u2pel; @@ -1266,8 +1320,8 @@ struct dwc3 { u8 num_eps; struct dwc3_hwparams hwparams; - struct dentry *root; - struct debugfs_regset32 *regset; + // struct dentry *root; + // struct debugfs_regset32 *regset; u32 dbg_lsp_select; @@ -1335,10 +1389,10 @@ struct dwc3 { int last_fifo_depth; int num_ep_resized; - ANDROID_KABI_RESERVE(1); - ANDROID_KABI_RESERVE(2); - ANDROID_KABI_RESERVE(3); - ANDROID_KABI_RESERVE(4); + // ANDROID_KABI_RESERVE(1); + // ANDROID_KABI_RESERVE(2); + // ANDROID_KABI_RESERVE(3); + // ANDROID_KABI_RESERVE(4); }; /** @@ -1354,7 +1408,7 @@ struct dwc3_vendor { #define INCRX_BURST_MODE 0 #define INCRX_UNDEF_LENGTH_BURST_MODE 1 -#define work_to_dwc(w) (container_of((w), struct dwc3, drd_work)) +// #define work_to_dwc(w) (container_of((w), struct dwc3, drd_work)) /* -------------------------------------------------------------------------- */ @@ -1398,14 +1452,14 @@ struct dwc3_event_depevt { u32 status:4; /* Within XferNotReady */ -#define DEPEVT_STATUS_TRANSFER_ACTIVE BIT(3) +#define DEPEVT_STATUS_TRANSFER_ACTIVE UK_BIT(3) /* Within XferComplete or XferInProgress */ -#define DEPEVT_STATUS_BUSERR BIT(0) -#define DEPEVT_STATUS_SHORT BIT(1) -#define DEPEVT_STATUS_IOC BIT(2) -#define DEPEVT_STATUS_LST BIT(3) /* XferComplete */ -#define DEPEVT_STATUS_MISSED_ISOC BIT(3) /* XferInProgress */ +#define DEPEVT_STATUS_BUSERR UK_BIT(0) +#define DEPEVT_STATUS_SHORT UK_BIT(1) +#define DEPEVT_STATUS_IOC UK_BIT(2) +#define DEPEVT_STATUS_LST UK_BIT(3) /* XferComplete */ +#define DEPEVT_STATUS_MISSED_ISOC UK_BIT(3) /* XferInProgress */ /* Stream event only */ #define DEPEVT_STREAMEVT_FOUND 1 @@ -1463,7 +1517,7 @@ struct dwc3_event_devt { /** * struct dwc3_event_gevt - Other Core Events - * @one_bit: indicates this is a non-endpoint event (not used) + * @one_UK_BIT: indicates this is a non-endpoint event (not used) * @device_event: indicates it's (0x03) Carkit or (0x04) I2C event. * @phy_port_number: self-explanatory * @reserved31_12: Reserved, not used. @@ -1508,14 +1562,14 @@ struct dwc3_gadget_ep_cmd_params { * DWC3 Features to be used as Driver Data */ -#define DWC3_HAS_PERIPHERAL BIT(0) -#define DWC3_HAS_XHCI BIT(1) -#define DWC3_HAS_OTG BIT(3) +#define DWC3_HAS_PERIPHERAL UK_BIT(0) +#define DWC3_HAS_XHCI UK_BIT(1) +#define DWC3_HAS_OTG UK_BIT(3) /* prototypes */ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode); void dwc3_set_mode(struct dwc3 *dwc, u32 mode); -u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type); +// u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type); #define DWC3_IP_IS(_ip) \ (dwc->ip == _ip##_IP) @@ -1557,18 +1611,19 @@ static inline u32 dwc3_mdwidth(struct dwc3 *dwc) bool dwc3_has_imod(struct dwc3 *dwc); -int dwc3_event_buffers_setup(struct dwc3 *dwc); -void dwc3_event_buffers_cleanup(struct dwc3 *dwc); +// int dwc3_event_buffers_setup(struct dwc3 *dwc); +// void dwc3_event_buffers_cleanup(struct dwc3 *dwc); -#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) -int dwc3_host_init(struct dwc3 *dwc); -void dwc3_host_exit(struct dwc3 *dwc); -#else +// #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) +int dwc3_host_init(struct dwc3 *dwc __unused); +void dwc3_host_exit(struct dwc3 *dwc __unused); +#if 0 +//#else static inline int dwc3_host_init(struct dwc3 *dwc) { return 0; } static inline void dwc3_host_exit(struct dwc3 *dwc) { } -#endif +//#endif #if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) int dwc3_gadget_init(struct dwc3 *dwc); @@ -1654,7 +1709,8 @@ static inline void dwc3_gadget_process_pending_events(struct dwc3 *dwc) } #endif /* !IS_ENABLED(CONFIG_USB_DWC3_HOST) */ -#if IS_ENABLED(CONFIG_USB_DWC3_ULPI) +// #if IS_ENABLED(CONFIG_USB_DWC3_ULPI) +#ifdef CONFIG_USB_DWC3_ULPI int dwc3_ulpi_init(struct dwc3 *dwc); void dwc3_ulpi_exit(struct dwc3 *dwc); #else @@ -1663,5 +1719,7 @@ static inline int dwc3_ulpi_init(struct dwc3 *dwc) static inline void dwc3_ulpi_exit(struct dwc3 *dwc) { } #endif +#endif +int dwc3_probe(struct pf_device *pdev); #endif /* __DRIVERS_USB_DWC3_CORE_H */ diff --git a/usb/dwc3/host.c b/usb/dwc3/host.c index 4cbcf87..5d1efb7 100644 --- a/usb/dwc3/host.c +++ b/usb/dwc3/host.c @@ -7,11 +7,12 @@ * Authors: Felipe Balbi , */ -#include -#include +// #include +// #include #include "core.h" +#if 0 static int dwc3_host_get_irq(struct dwc3 *dwc) { struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); @@ -41,9 +42,11 @@ static int dwc3_host_get_irq(struct dwc3 *dwc) out: return irq; } +#endif -int dwc3_host_init(struct dwc3 *dwc) +int dwc3_host_init(struct dwc3 *dwc __unused) { +#if 0 struct property_entry props[4]; struct platform_device *xhci; int ret, irq; @@ -123,14 +126,17 @@ int dwc3_host_init(struct dwc3 *dwc) dev_err(dwc->dev, "failed to register xHCI device\n"); goto err; } - +#endif return 0; -err: - platform_device_put(xhci); - return ret; +// err: +// platform_device_put(xhci); +// return ret; } -void dwc3_host_exit(struct dwc3 *dwc) +#if 0 +void dwc3_host_exit(struct dwc3 *dwc __unused) { - platform_device_unregister(dwc->xhci); + // platform_device_unregister(dwc->xhci); + return; } +#endif diff --git a/usb/dwc3/io.h b/usb/dwc3/io.h index 76b73b1..201a123 100644 --- a/usb/dwc3/io.h +++ b/usb/dwc3/io.h @@ -11,12 +11,14 @@ #ifndef __DRIVERS_USB_DWC3_IO_H #define __DRIVERS_USB_DWC3_IO_H -#include -#include "trace.h" -#include "debug.h" +// #include +// #include "trace.h" +// #include "debug.h" + #include "core.h" +#include -static inline u32 dwc3_readl(void __iomem *base, u32 offset) +static inline u32 dwc3_readl(void *base, u32 offset) { u32 value; @@ -25,33 +27,34 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset) * space, see dwc3_probe in core.c. * However, the offsets are given starting from xHCI address space. */ - value = readl(base + offset - DWC3_GLOBALS_REGS_START); - + value = ioreg_read32(base + offset - DWC3_GLOBALS_REGS_START); +#if 0 /* * When tracing we want to make it easy to find the correct address on * documentation, so we revert it back to the proper addresses, the * same way they are described on SNPS documentation */ trace_dwc3_readl(base - DWC3_GLOBALS_REGS_START, offset, value); - +#endif return value; } -static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value) +static inline void dwc3_writel(void *base, u32 offset, u32 value) { /* * We requested the mem region starting from the Globals address * space, see dwc3_probe in core.c. * However, the offsets are given starting from xHCI address space. */ - writel(value, base + offset - DWC3_GLOBALS_REGS_START); - + ioreg_write32(base + offset - DWC3_GLOBALS_REGS_START, value); +#if 0 /* * When tracing we want to make it easy to find the correct address on * documentation, so we revert it back to the proper addresses, the * same way they are described on SNPS documentation */ trace_dwc3_writel(base - DWC3_GLOBALS_REGS_START, offset, value); +#endif } #endif /* __DRIVERS_USB_DWC3_IO_H */ diff --git a/usb/usb_platform.c b/usb/usb_platform.c index d0760f2..ee7e5ec 100644 --- a/usb/usb_platform.c +++ b/usb/usb_platform.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -36,7 +35,7 @@ static int usb_platform_probe(struct pf_device *pfdev) return 0; } -static int usb_platform_init(struct uk_alloc *drv_allocator) +static int usb_platform_init(struct uk_alloc *drv_allocator __unused) { return 0; } -- Gitee From 501359f255d293185e3b2936558aa55541956589 Mon Sep 17 00:00:00 2001 From: caicheng Date: Wed, 15 Jan 2025 15:21:59 +0800 Subject: [PATCH 3/5] modify xhci driver --- usb/host/hcd.c | 3 +- usb/host/hcd.h | 397 +++++++++++++++++++++++++++++++++++++++ usb/host/xhci-ext-caps.h | 3 +- usb/host/xhci-hub.c | 9 +- usb/host/xhci-mem.c | 286 ++++++++++++++++------------ usb/host/xhci-plat.c | 214 +++++++++++---------- usb/host/xhci-plat.h | 6 +- usb/host/xhci-ring.c | 15 +- usb/host/xhci.c | 339 ++++++++++++++++++--------------- usb/host/xhci.h | 304 ++++++++++++++++++++---------- 10 files changed, 1087 insertions(+), 489 deletions(-) create mode 100644 usb/host/hcd.h diff --git a/usb/host/hcd.c b/usb/host/hcd.c index 4908718..16c2ba0 100644 --- a/usb/host/hcd.c +++ b/usb/host/hcd.c @@ -10,7 +10,6 @@ */ #include -#include #include #include #include @@ -214,7 +213,7 @@ int usb_add_hcd(struct usb_hcd *hcd, retval = hcd->driver->reset(hcd); if (retval < 0) { uk_pr_err("can't setup: %d\n", retval); - goto err_hcd_driver_setup; + // goto err_hcd_driver_setup; } } #if 0 diff --git a/usb/host/hcd.h b/usb/host/hcd.h new file mode 100644 index 0000000..5b9e219 --- /dev/null +++ b/usb/host/hcd.h @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2001-2002 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_HCD_H__ +#define __LINUX_HCD_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +typedef signed long s64; +typedef unsigned long u64; +typedef u64 resource_size_t; +# define __iomem __attribute__((noderef, address_space(__iomem))) + +struct usb_hcd { + + /* + * housekeeping + */ + // struct usb_bus self; /* hcd is-a bus */ + // struct kref kref; /* reference counter */ + + const char *product_desc; /* product/vendor string */ + int speed; /* Speed for this roothub. + * May be different from + * hcd->driver->flags & HCD_MASK + */ + char irq_descr[24]; /* driver + bus # */ + + // struct timer_list rh_timer; /* drives root-hub polling */ + // struct urb *status_urb; /* the current status urb */ +#ifdef CONFIG_PM + struct work_struct wakeup_work; /* for remote wakeup */ +#endif + // struct work_struct died_work; /* for when the device dies */ + + /* + * hardware info/state + */ + const struct hc_driver *driver; /* hw-specific hooks */ + + /* + * OTG and some Host controllers need software interaction with phys; + * other external phys should be software-transparent + */ + // struct usb_phy *usb_phy; + // struct usb_phy_roothub *phy_roothub; + + /* Flags that need to be manipulated atomically because they can + * change while the host controller is running. Always use + * set_bit() or clear_bit() to change their values. + */ + unsigned long flags; +#define HCD_FLAG_HW_ACCESSIBLE 0 /* at full power */ +#define HCD_FLAG_POLL_RH 2 /* poll for rh status? */ +#define HCD_FLAG_POLL_PENDING 3 /* status has changed? */ +#define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ +#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ +#define HCD_FLAG_DEAD 6 /* controller has died? */ +#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ + + /* The flags can be tested using these macros; they are likely to + * be slightly faster than test_bit(). + */ +#define HCD_HW_ACCESSIBLE(hcd) ((hcd)->flags & (1U << HCD_FLAG_HW_ACCESSIBLE)) +#define HCD_POLL_RH(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_RH)) +#define HCD_POLL_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_PENDING)) +#define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) +#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) +#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) + + /* + * Specifies if interfaces are authorized by default + * or they require explicit user space authorization; this bit is + * settable through /sys/class/usb_host/X/interface_authorized_default + */ +#define HCD_INTF_AUTHORIZED(hcd) \ + ((hcd)->flags & (1U << HCD_FLAG_INTF_AUTHORIZED)) + + /* + * Specifies if devices are authorized by default + * or they require explicit user space authorization; this bit is + * settable through /sys/class/usb_host/X/authorized_default + */ + // enum usb_dev_authorize_policy dev_policy; + + /* Flags that get set only during HCD registration or removal. */ + unsigned rh_registered:1;/* is root hub registered? */ + unsigned rh_pollable:1; /* may we poll the root hub? */ + unsigned msix_enabled:1; /* driver has MSI-X enabled? */ + unsigned msi_enabled:1; /* driver has MSI enabled? */ + /* + * do not manage the PHY state in the HCD core, instead let the driver + * handle this (for example if the PHY can only be turned on after a + * specific event) + */ + unsigned skip_phy_initialization:1; + + /* The next flag is a stopgap, to be removed when all the HCDs + * support the new root-hub polling mechanism. */ + unsigned uses_new_polling:1; + unsigned wireless:1; /* Wireless USB HCD */ + unsigned has_tt:1; /* Integrated TT in root hub */ + unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ + unsigned can_do_streams:1; /* HC supports streams */ + unsigned tpl_support:1; /* OTG & EH TPL support */ + unsigned cant_recv_wakeups:1; + /* wakeup requests from downstream aren't received */ + + unsigned int irq; /* irq allocated */ + void __iomem *regs; /* device memory/io */ + void __iomem *ncr_regs; /* device ncr memory/io */ + resource_size_t rsrc_start; /* memory/io resource start */ + resource_size_t rsrc_len; /* memory/io resource length */ + unsigned power_budget; /* in mA, 0 = no limit */ + + bool soc_taihang; /* choose soc version */ + + // struct giveback_urb_bh high_prio_bh; + // struct giveback_urb_bh low_prio_bh; + + /* bandwidth_mutex should be taken before adding or removing + * any new bus bandwidth constraints: + * 1. Before adding a configuration for a new device. + * 2. Before removing the configuration to put the device into + * the addressed state. + * 3. Before selecting a different configuration. + * 4. Before selecting an alternate interface setting. + * + * bandwidth_mutex should be dropped after a successful control message + * to the device, or resetting the bandwidth after a failed attempt. + */ + struct uk_mutex *address0_mutex; + struct uk_mutex *bandwidth_mutex; + struct usb_hcd *shared_hcd; + struct usb_hcd *primary_hcd; + + +#define HCD_BUFFER_POOLS 4 + // struct dma_pool *pool[HCD_BUFFER_POOLS]; + + int state; +# define __ACTIVE 0x01 +# define __SUSPEND 0x04 +# define __TRANSIENT 0x80 + +# define HC_STATE_HALT 0 +# define HC_STATE_RUNNING (__ACTIVE) +# define HC_STATE_QUIESCING (__SUSPEND|__TRANSIENT|__ACTIVE) +# define HC_STATE_RESUMING (__SUSPEND|__TRANSIENT) +# define HC_STATE_SUSPENDED (__SUSPEND) + +#define HC_IS_RUNNING(state) ((state) & __ACTIVE) +#define HC_IS_SUSPENDED(state) ((state) & __SUSPEND) + + /* memory pool for HCs having local memory, or %NULL */ + // struct gen_pool *localmem_pool; + + /* more shared queuing code would be good; it should support + * smarter scheduling, handle transaction translators, etc; + * input size of periodic table to an interrupt scheduler. + * (ohci 32, uhci 1024, ehci 256/512/1024). + */ + + // ANDROID_KABI_RESERVE(1); + // ANDROID_KABI_RESERVE(2); + // ANDROID_KABI_RESERVE(3); + // ANDROID_KABI_RESERVE(4); + + /* The HC driver's private data is stored at the end of + * this structure. + */ + unsigned long hcd_priv[] + __attribute__ ((aligned(sizeof(s64)))); +}; + +struct hc_driver { + const char *description; /* "ehci-hcd" etc */ + const char *product_desc; /* product/vendor string */ + size_t hcd_priv_size; /* size of private data */ + + /* irq handler */ + // irqreturn_t (*irq) (struct usb_hcd *hcd); + + int flags; +#define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */ +#define HCD_DMA 0x0002 /* HC uses DMA */ +#define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */ +#define HCD_USB11 0x0010 /* USB 1.1 */ +#define HCD_USB2 0x0020 /* USB 2.0 */ +#define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/ +#define HCD_USB3 0x0040 /* USB 3.0 */ +#define HCD_USB31 0x0050 /* USB 3.1 */ +#define HCD_USB32 0x0060 /* USB 3.2 */ +#define HCD_MASK 0x0070 +#define HCD_BH 0x0100 /* URB complete in BH context */ + + /* called to init HCD and root hub */ + int (*reset) (struct usb_hcd *hcd); + int (*start) (struct usb_hcd *hcd); +#if 0 + /* NOTE: these suspend/resume calls relate to the HC as + * a whole, not just the root hub; they're for PCI bus glue. + */ + /* called after suspending the hub, before entering D3 etc */ + int (*pci_suspend)(struct usb_hcd *hcd, bool do_wakeup); + + /* called after entering D0 (etc), before resuming the hub */ + int (*pci_resume)(struct usb_hcd *hcd, bool hibernated); + + /* cleanly make HCD stop writing memory and doing I/O */ + void (*stop) (struct usb_hcd *hcd); + + /* shutdown HCD */ + void (*shutdown) (struct usb_hcd *hcd); + + /* return current frame number */ + int (*get_frame_number) (struct usb_hcd *hcd); + + + /* manage i/o requests, device state */ + int (*urb_enqueue)(struct usb_hcd *hcd, + struct urb *urb, gfp_t mem_flags); + int (*urb_dequeue)(struct usb_hcd *hcd, + struct urb *urb, int status); + + /* + * (optional) these hooks allow an HCD to override the default DMA + * mapping and unmapping routines. In general, they shouldn't be + * necessary unless the host controller has special DMA requirements, + * such as alignment contraints. If these are not specified, the + * general usb_hcd_(un)?map_urb_for_dma functions will be used instead + * (and it may be a good idea to call these functions in your HCD + * implementation) + */ + int (*map_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags); + void (*unmap_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb); + + /* hw synch, freeing endpoint resources that urb_dequeue can't */ + void (*endpoint_disable)(struct usb_hcd *hcd, + struct usb_host_endpoint *ep); + + /* (optional) reset any endpoint state such as sequence number + and current window */ + void (*endpoint_reset)(struct usb_hcd *hcd, + struct usb_host_endpoint *ep); + + /* root hub support */ + int (*hub_status_data) (struct usb_hcd *hcd, char *buf); + int (*hub_control) (struct usb_hcd *hcd, + u16 typeReq, u16 wValue, u16 wIndex, + char *buf, u16 wLength); + int (*bus_suspend)(struct usb_hcd *); + int (*bus_resume)(struct usb_hcd *); + int (*start_port_reset)(struct usb_hcd *, unsigned port_num); + unsigned long (*get_resuming_ports)(struct usb_hcd *); + + /* force handover of high-speed port to full-speed companion */ + void (*relinquish_port)(struct usb_hcd *, int); + /* has a port been handed over to a companion? */ + int (*port_handed_over)(struct usb_hcd *, int); + + /* CLEAR_TT_BUFFER completion callback */ + void (*clear_tt_buffer_complete)(struct usb_hcd *, + struct usb_host_endpoint *); + + /* xHCI specific functions */ + /* Called by usb_alloc_dev to alloc HC device structures */ + int (*alloc_dev)(struct usb_hcd *, struct usb_device *); + /* Called by usb_disconnect to free HC device structures */ + void (*free_dev)(struct usb_hcd *, struct usb_device *); + /* Change a group of bulk endpoints to support multiple stream IDs */ + int (*alloc_streams)(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps, + unsigned int num_streams, gfp_t mem_flags); + /* Reverts a group of bulk endpoints back to not using stream IDs. + * Can fail if we run out of memory. + */ + int (*free_streams)(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps, + gfp_t mem_flags); + + /* Bandwidth computation functions */ + /* Note that add_endpoint() can only be called once per endpoint before + * check_bandwidth() or reset_bandwidth() must be called. + * drop_endpoint() can only be called once per endpoint also. + * A call to xhci_drop_endpoint() followed by a call to + * xhci_add_endpoint() will add the endpoint to the schedule with + * possibly new parameters denoted by a different endpoint descriptor + * in usb_host_endpoint. A call to xhci_add_endpoint() followed by a + * call to xhci_drop_endpoint() is not allowed. + */ + /* Allocate endpoint resources and add them to a new schedule */ + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, + struct usb_host_endpoint *); + /* Drop an endpoint from a new schedule */ + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, + struct usb_host_endpoint *); + /* Check that a new hardware configuration, set using + * endpoint_enable and endpoint_disable, does not exceed bus + * bandwidth. This must be called before any set configuration + * or set interface requests are sent to the device. + */ + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + /* Reset the device schedule to the last known good schedule, + * which was set from a previous successful call to + * check_bandwidth(). This reverts any add_endpoint() and + * drop_endpoint() calls since that last successful call. + * Used for when a check_bandwidth() call fails due to resource + * or bandwidth constraints. + */ + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + /* Returns the hardware-chosen device address */ + int (*address_device)(struct usb_hcd *, struct usb_device *udev); + /* prepares the hardware to send commands to the device */ + int (*enable_device)(struct usb_hcd *, struct usb_device *udev); + /* Notifies the HCD after a hub descriptor is fetched. + * Will block. + */ + int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev, + struct usb_tt *tt, gfp_t mem_flags); + int (*reset_device)(struct usb_hcd *, struct usb_device *); + /* Notifies the HCD after a device is connected and its + * address is set + */ + int (*update_device)(struct usb_hcd *, struct usb_device *); + int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); + /* USB 3.0 Link Power Management */ + /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ + int (*enable_usb3_lpm_timeout)(struct usb_hcd *, + struct usb_device *, enum usb3_link_state state); + /* The xHCI host controller can still fail the command to + * disable the LPM timeouts, so this can return an error code. + */ + int (*disable_usb3_lpm_timeout)(struct usb_hcd *, + struct usb_device *, enum usb3_link_state state); + int (*find_raw_port_number)(struct usb_hcd *, int); + /* Call for power on/off the port if necessary */ + int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); +#endif +}; + +// static struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver __unused, +// struct device *sysdev __unused, struct device *dev __unused, +// const char *bus_name __unused, struct usb_hcd *primary_hcd __unused) +// { +// return NULL; +// } +struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver, + struct device *sysdev, struct device *dev, const char *bus_name, + struct usb_hcd *primary_hcd); + +// static int usb_add_hcd(struct usb_hcd *hcd __unused, +// unsigned int irqnum __unused, unsigned long irqflags __unused) +// { +// return 0; +// } + +int usb_add_hcd(struct usb_hcd *hcd, + unsigned int irqnum, unsigned long irqflags __unused); + +static int usb_hcd_is_primary_hcd(struct usb_hcd *hcd) +{ + if (!hcd->primary_hcd) + return 1; + return hcd == hcd->primary_hcd; +} + +#endif /* __LINUX_HCD_H__ */ \ No newline at end of file diff --git a/usb/host/xhci-ext-caps.h b/usb/host/xhci-ext-caps.h index 8e8dba9..83c3f41 100644 --- a/usb/host/xhci-ext-caps.h +++ b/usb/host/xhci-ext-caps.h @@ -94,7 +94,7 @@ * Some capabilities can occur several times, e.g., the XHCI_EXT_CAPS_PROTOCOL, * and this provides a way to find them all. */ - +#if 0 static inline int xhci_find_next_ext_cap(void __iomem *base, u32 start, int id) { u32 val; @@ -123,3 +123,4 @@ static inline int xhci_find_next_ext_cap(void __iomem *base, u32 start, int id) return 0; } +#endif \ No newline at end of file diff --git a/usb/host/xhci-hub.c b/usb/host/xhci-hub.c index 15d543f..33db2bd 100644 --- a/usb/host/xhci-hub.c +++ b/usb/host/xhci-hub.c @@ -8,12 +8,12 @@ * Some code borrowed from the Linux EHCI driver. */ - -#include -#include +#if 0 +// #include +// #include #include "xhci.h" -#include "xhci-trace.h" +// #include "xhci-trace.h" #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) #define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \ @@ -1880,3 +1880,4 @@ unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd) } #endif /* CONFIG_PM */ +#endif \ No newline at end of file diff --git a/usb/host/xhci-mem.c b/usb/host/xhci-mem.c index 4324fd3..71052b2 100644 --- a/usb/host/xhci-mem.c +++ b/usb/host/xhci-mem.c @@ -8,15 +8,15 @@ * Some code borrowed from the Linux EHCI driver. */ -#include -#include -#include -#include -#include +// #include +// #include +// #include +// #include +// #include #include "xhci.h" -#include "xhci-trace.h" -#include "xhci-debugfs.h" +// #include "xhci-trace.h" +// #include "xhci-debugfs.h" /* * Allocates a generic ring segment from the ring pool, sets the dma address, @@ -33,12 +33,14 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, struct xhci_segment *seg; dma_addr_t dma; int i; - struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + // struct device *dev = xhci_to_hcd(xhci)->self.sysdev; - seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev)); + // seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev)); + seg = (struct xhci_segment *)uk_calloc(uk_alloc_get_default(), 1, sizeof(*seg)); if (!seg) return NULL; +#if 0 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma); if (!seg->trbs) { kfree(seg); @@ -54,12 +56,13 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, return NULL; } } +#endif /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ if (cycle_state == 0) { for (i = 0; i < TRBS_PER_SEGMENT; i++) seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); } - seg->dma = dma; + // seg->dma = dma; seg->next = NULL; return seg; @@ -67,14 +70,19 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) { +#if 0 if (seg->trbs) { dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); seg->trbs = NULL; } + kfree(seg->bounce_buf); kfree(seg); +#endif + uk_free(uk_alloc_get_default(), seg->bounce_buf); + uk_free(uk_alloc_get_default(), seg); } -EXPORT_SYMBOL_GPL(xhci_segment_free); +// EXPORT_SYMBOL_GPL(xhci_segment_free); void xhci_free_segments_for_ring(struct xhci_hcd *xhci, struct xhci_segment *first) @@ -119,7 +127,7 @@ void xhci_link_segments(struct xhci_segment *prev, prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); } } -EXPORT_SYMBOL_GPL(xhci_link_segments); +// EXPORT_SYMBOL_GPL(xhci_link_segments); /* * Link the ring to the new segments. @@ -154,7 +162,7 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, ring->last_seg = last; } } - +#if 0 /* * We need a radix tree for mapping physical addresses of TRBs to which stream * ID they belong to. We need to do this because the host controller won't tell @@ -294,8 +302,8 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) kfree(ring); } -EXPORT_SYMBOL_GPL(xhci_ring_free); - +// EXPORT_SYMBOL_GPL(xhci_ring_free); +#endif void xhci_initialize_ring_info(struct xhci_ring *ring, unsigned int cycle_state) { @@ -319,7 +327,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring, */ ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; } -EXPORT_SYMBOL_GPL(xhci_initialize_ring_info); +// EXPORT_SYMBOL_GPL(xhci_initialize_ring_info); /* Allocate segments and link them for a ring */ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, @@ -364,7 +372,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, return 0; } - +#if 0 static void xhci_vendor_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); @@ -412,7 +420,7 @@ bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci, return ops->is_usb_offload_enabled(xhci, virt_dev, ep_index); return false; } - +#endif /* * Create a new ring with zero or more segments. * @@ -426,15 +434,17 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, { struct xhci_ring *ring; int ret; - struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + // struct device *dev = xhci_to_hcd(xhci)->self.sysdev; - ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev)); + // ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev)); + ring = (struct xhci_ring *)uk_calloc(uk_alloc_get_default(), 1, sizeof(*ring)); if (!ring) return NULL; ring->num_segs = num_segs; ring->bounce_buf_len = max_packet; - INIT_LIST_HEAD(&ring->td_list); + // INIT_LIST_HEAD(&ring->td_list); + UK_INIT_LIST_HEAD(&ring->td_list); ring->type = type; if (num_segs == 0) return ring; @@ -452,15 +462,16 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, cpu_to_le32(LINK_TOGGLE); } xhci_initialize_ring_info(ring, cycle_state); - trace_xhci_ring_alloc(ring); + // trace_xhci_ring_alloc(ring); return ring; fail: - kfree(ring); + // kfree(ring); + uk_free(uk_alloc_get_default(), ring); return NULL; } -EXPORT_SYMBOL_GPL(xhci_ring_alloc); - +// EXPORT_SYMBOL_GPL(xhci_ring_alloc); +#if 0 void xhci_free_endpoint_ring(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index) @@ -522,18 +533,19 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, return 0; } - +#endif struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, int type, gfp_t flags) { struct xhci_container_ctx *ctx; - struct device *dev = xhci_to_hcd(xhci)->self.sysdev; - struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); + // struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + // struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)) return NULL; - ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev)); + // ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev)); + ctx = (struct xhci_container_ctx *)uk_calloc(uk_alloc_get_default(), 1, sizeof(*ctx)); if (!ctx) return NULL; @@ -542,19 +554,19 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, if (type == XHCI_CTX_TYPE_INPUT) ctx->size += CTX_SIZE(xhci->hcc_params); - if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) && - (ops && ops->alloc_container_ctx)) - xhci_vendor_alloc_container_ctx(xhci, ctx, type, flags); - else - ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma); + // if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) && + // (ops && ops->alloc_container_ctx)) + // xhci_vendor_alloc_container_ctx(xhci, ctx, type, flags); + // else + // ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma); - if (!ctx->bytes) { - kfree(ctx); - return NULL; - } + // if (!ctx->bytes) { + // kfree(ctx); + // return NULL; + // } return ctx; } - +#if 0 void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { @@ -604,10 +616,10 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); } EXPORT_SYMBOL_GPL(xhci_get_ep_ctx); - +#endif /***************** Streams structures manipulation *************************/ - +#if 0 static void xhci_free_stream_ctx(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) @@ -851,10 +863,10 @@ void xhci_free_stream_info(struct xhci_hcd *xhci, kfree(stream_info->stream_rings); kfree(stream_info); } - +#endif /***************** Device context manipulation *************************/ - +#if 0 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, struct xhci_virt_ep *ep) { @@ -867,7 +879,7 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, int slot_id) { - struct list_head *tt_list_head; + struct uk_list_head *tt_list_head; struct xhci_tt_bw_info *tt_info, *next; bool slot_found = false; @@ -1000,7 +1012,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) { struct xhci_virt_device *vdev; - struct list_head *tt_list_head; + struct uk_list_head *tt_list_head; struct xhci_tt_bw_info *tt_info, *next; int i; @@ -1810,33 +1822,35 @@ static void scratchpad_free(struct xhci_hcd *xhci) kfree(xhci->scratchpad); xhci->scratchpad = NULL; } +#endif struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, bool allocate_completion, gfp_t mem_flags) { struct xhci_command *command; - struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + // struct device *dev = xhci_to_hcd(xhci)->self.sysdev; - command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev)); + // command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev)); + command = (struct xhci_command *)uk_calloc(uk_alloc_get_default(), 1, sizeof(*command)); if (!command) return NULL; - if (allocate_completion) { - command->completion = - kzalloc_node(sizeof(struct completion), mem_flags, - dev_to_node(dev)); - if (!command->completion) { - kfree(command); - return NULL; - } - init_completion(command->completion); - } + // if (allocate_completion) { + // command->completion = + // kzalloc_node(sizeof(struct completion), mem_flags, + // dev_to_node(dev)); + // if (!command->completion) { + // kfree(command); + // return NULL; + // } + // init_completion(command->completion); + // } command->status = 0; - INIT_LIST_HEAD(&command->cmd_list); + UK_INIT_LIST_HEAD(&command->cmd_list); return command; } -EXPORT_SYMBOL_GPL(xhci_alloc_command); +// EXPORT_SYMBOL_GPL(xhci_alloc_command); struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci, bool allocate_completion, gfp_t mem_flags) @@ -1850,13 +1864,14 @@ struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci, command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); if (!command->in_ctx) { - kfree(command->completion); - kfree(command); + // kfree(command->completion); + // kfree(command); + uk_free(uk_alloc_get_default(), command); return NULL; } return command; } - +#if 0 void xhci_urb_free_priv(struct urb_priv *urb_priv) { kfree(urb_priv); @@ -1871,7 +1886,7 @@ void xhci_free_command(struct xhci_hcd *xhci, kfree(command); } EXPORT_SYMBOL_GPL(xhci_free_command); - +#endif int xhci_alloc_erst(struct xhci_hcd *xhci, struct xhci_ring *evt_ring, struct xhci_erst *erst, @@ -1882,11 +1897,11 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, struct xhci_segment *seg; struct xhci_erst_entry *entry; - size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; - erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, - size, &erst->erst_dma_addr, flags); - if (!erst->entries) - return -ENOMEM; + // size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; + // erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, + // size, &erst->erst_dma_addr, flags); + // if (!erst->entries) + // return -ENOMEM; erst->num_entries = evt_ring->num_segs; @@ -1901,8 +1916,8 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, return 0; } -EXPORT_SYMBOL_GPL(xhci_alloc_erst); - +// EXPORT_SYMBOL_GPL(xhci_alloc_erst); +#if 0 void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) { size_t size; @@ -1937,7 +1952,7 @@ static void xhci_vendor_free_dcbaa(struct xhci_hcd *xhci) void xhci_mem_cleanup(struct xhci_hcd *xhci) { - struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + // struct device *dev = xhci_to_hcd(xhci)->self.sysdev; int i, j, num_ports; cancel_delayed_work_sync(&xhci->cmd_timer); @@ -1962,7 +1977,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) for (i = 0; i < num_ports && xhci->rh_bw; i++) { struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; for (j = 0; j < XHCI_MAX_INTERVAL; j++) { - struct list_head *ep = &bwt->interval_bw[j].endpoints; + struct uk_list_head *ep = &bwt->interval_bw[j].endpoints; while (!list_empty(ep)) list_del_init(ep->next); } @@ -2193,10 +2208,12 @@ int xhci_check_trb_in_td_math(struct xhci_hcd *xhci) if (ret < 0) return ret; } - xhci_dbg(xhci, "TRB math tests passed.\n"); + // xhci_dbg(xhci, "TRB math tests passed.\n"); + uk_pr_info("TRB math tests passed.\n"); return 0; } EXPORT_SYMBOL_GPL(xhci_check_trb_in_td_math); +#endif static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) { @@ -2205,9 +2222,10 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, xhci->event_ring->dequeue); - if (deq == 0 && !in_interrupt()) - xhci_warn(xhci, "WARN something wrong with SW event ring " - "dequeue ptr.\n"); + // if (deq == 0 && !in_interrupt()) + // xhci_warn(xhci, "WARN something wrong with SW event ring " + // "dequeue ptr.\n"); + // uk_pr_warn("WARN something wrong with SW event ring dequeue ptr.\n"); /* Update HC event ring dequeue pointer */ temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); temp &= ERST_PTR_MASK; @@ -2215,13 +2233,13 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) * there might be more events to service. */ temp &= ~ERST_EHB; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Write event ring dequeue pointer, " - "preserving EHB bit"); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "// Write event ring dequeue pointer, " + // "preserving EHB bit"); xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, &xhci->ir_set->erst_dequeue); } - +#if 0 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, __le32 __iomem *addr, int max_caps) { @@ -2484,54 +2502,62 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) return 0; } +#endif int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { - dma_addr_t dma; - struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + // dma_addr_t dma; + // struct device *dev = xhci_to_hcd(xhci)->self.sysdev; unsigned int val, val2; u64 val_64; u32 page_size, temp; int i, ret; - INIT_LIST_HEAD(&xhci->cmd_list); + // INIT_LIST_HEAD(&xhci->cmd_list); + UK_INIT_LIST_HEAD(&xhci->cmd_list); /* init command timeout work */ - INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); - init_completion(&xhci->cmd_ring_stop_completion); + // INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); + // init_completion(&xhci->cmd_ring_stop_completion); page_size = readl(&xhci->op_regs->page_size); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Supported page size register = 0x%x", page_size); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "Supported page size register = 0x%x", page_size); for (i = 0; i < 16; i++) { if ((0x1 & page_size) != 0) break; page_size = page_size >> 1; } if (i < 16) - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Supported page size of %iK", (1 << (i+12)) / 1024); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "Supported page size of %iK", (1 << (i+12)) / 1024); + uk_pr_info("Supported page size of %iK", (1 << (i+12)) / 1024); else - xhci_warn(xhci, "WARN: no supported page size\n"); + // xhci_warn(xhci, "WARN: no supported page size\n"); + uk_pr_warn("WARN: no supported page size\n"); /* Use 4K pages, since that's common and the minimum the HC supports */ xhci->page_shift = 12; xhci->page_size = 1 << xhci->page_shift; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "HCD page size set to %iK", xhci->page_size / 1024); - + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "HCD page size set to %iK", xhci->page_size / 1024); + uk_pr_info("HCD page size set to %iK\n", xhci->page_size / 1024); /* * Program the Number of Device Slots Enabled field in the CONFIG * register with the max value of slots the HC can handle. */ val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1)); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// xHC can handle at most %d device slots.", val); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "// xHC can handle at most %d device slots.", val); + uk_pr_info("// xHC can handle at most %d device slots.\n", val); val2 = readl(&xhci->op_regs->config_reg); val |= (val2 & ~HCS_SLOTS_MASK); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting Max device slots reg = 0x%x.", val); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "// Setting Max device slots reg = 0x%x.", val); + uk_pr_info("// Setting Max device slots reg = 0x%x.\n", val); writel(val, &xhci->op_regs->config_reg); + /* skip dma_pool_create/dma_alloc_coherent for now */ +#if 0 /* * xHCI section 5.4.6 - doorbell array must be * "physically contiguous and 64-byte (cache line) aligned". @@ -2547,8 +2573,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) goto fail; xhci->dcbaa->dma = dma; } - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Device context base array address = 0x%llx (DMA), %p (virt)", + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "// Device context base array address = 0x%llx (DMA), %p (virt)", + // (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); + uk_pr_info("// Device context base array address = 0x%llx (DMA), %p (virt)\n", (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr); @@ -2583,23 +2611,26 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) if (!xhci->small_streams_pool || !xhci->medium_streams_pool) goto fail; - +#endif /* Set up the command ring to have one segments for now. */ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags); if (!xhci->cmd_ring) goto fail; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Allocated command ring at %p", xhci->cmd_ring); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx", - (unsigned long long)xhci->cmd_ring->first_seg->dma); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "Allocated command ring at %p", xhci->cmd_ring); + uk_pr_info("Allocated command ring at %p\n", xhci->cmd_ring); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx", + // (unsigned long long)xhci->cmd_ring->first_seg->dma); + uk_pr_info("First segment DMA is 0x%llx\n", (unsigned long long)xhci->cmd_ring->first_seg->dma); /* Set the address in the Command Ring Control register */ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | xhci->cmd_ring->cycle_state; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting command ring address to 0x%016llx", val_64); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "// Setting command ring address to 0x%016llx", val_64); + uk_pr_info("// Setting command ring address to 0x%016llx\n", val_64); xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags); @@ -2614,9 +2645,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) val = readl(&xhci->cap_regs->db_off); val &= DBOFF_MASK; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Doorbell array is located at offset 0x%x" - " from cap regs base addr", val); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "// Doorbell array is located at offset 0x%x" + // " from cap regs base addr", val); + uk_pr_info("// Doorbell array is located at offset 0x%x from cap regs base addr\n", val); xhci->dba = (void __iomem *) xhci->cap_regs + val; /* Set ir_set to interrupt register set 0 */ xhci->ir_set = &xhci->run_regs->ir_set[0]; @@ -2625,13 +2657,14 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * Event ring setup: Allocate a normal ring, but also setup * the event ring segment table (ERST). Section 4.9.3. */ - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring"); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring"); + uk_pr_info("// Allocating event ring\n"); xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 0, flags); if (!xhci->event_ring) goto fail; - if (xhci_check_trb_in_td_math(xhci) < 0) - goto fail; + // if (xhci_check_trb_in_td_math(xhci) < 0) + // goto fail; ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags); if (ret) @@ -2641,16 +2674,20 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) val = readl(&xhci->ir_set->erst_size); val &= ERST_SIZE_MASK; val |= ERST_NUM_SEGS; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Write ERST size = %i to ir_set 0 (some bits preserved)", - val); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "// Write ERST size = %i to ir_set 0 (some bits preserved)", + // val); + uk_pr_info("// Write ERST size = %i to ir_set 0 (some bits preserved)\n", val); writel(val, &xhci->ir_set->erst_size); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Set ERST entries to point to event ring."); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "// Set ERST entries to point to event ring."); + uk_pr_info("// Set ERST entries to point to event ring.\n"); /* set the segment table base address */ - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Set ERST base address for ir_set 0 = 0x%llx", + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "// Set ERST base address for ir_set 0 = 0x%llx", + // (unsigned long long)xhci->erst.erst_dma_addr); + uk_pr_info("// Set ERST base address for ir_set 0 = 0x%llx\n", (unsigned long long)xhci->erst.erst_dma_addr); val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); val_64 &= ERST_PTR_MASK; @@ -2659,9 +2696,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) /* Set the event ring dequeue address */ xhci_set_hc_event_deq(xhci); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Wrote ERST address to ir_set 0."); - + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "Wrote ERST address to ir_set 0."); + uk_pr_info("Wrote ERST address to ir_set 0.\n"); /* * XXX: Might need to set the Interrupter Moderation Register to * something other than the default (~1ms minimum between interrupts). @@ -2669,6 +2706,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) */ for (i = 0; i < MAX_HC_SLOTS; i++) xhci->devs[i] = NULL; +#if 0 for (i = 0; i < USB_MAXCHILDREN; i++) { xhci->usb2_rhub.bus_state.resume_done[i] = 0; xhci->usb3_rhub.bus_state.resume_done[i] = 0; @@ -2681,7 +2719,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) goto fail; if (xhci_setup_port_arrays(xhci, flags)) goto fail; - +#endif /* Enable USB 3.0 device notifications for function remote wake, which * is necessary for allowing USB 3.0 devices to do remote wakeup from * U3 (device suspend). @@ -2696,6 +2734,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) fail: xhci_halt(xhci); xhci_reset(xhci); - xhci_mem_cleanup(xhci); + // xhci_mem_cleanup(xhci); return -ENOMEM; } diff --git a/usb/host/xhci-plat.c b/usb/host/xhci-plat.c index 60e72ca..7ea8056 100644 --- a/usb/host/xhci-plat.c +++ b/usb/host/xhci-plat.c @@ -8,62 +8,65 @@ * A lot of code borrowed from the Linux xHCI driver. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include #include "xhci.h" #include "xhci-plat.h" -#include "xhci-mvebu.h" -#include "xhci-rcar.h" +// #include +// #include "xhci-mvebu.h" +// #include "xhci-rcar.h" -static struct hc_driver __read_mostly xhci_plat_hc_driver; +// static struct hc_driver __read_mostly xhci_plat_hc_driver; +static struct hc_driver xhci_plat_hc_driver; static int xhci_plat_setup(struct usb_hcd *hcd); static int xhci_plat_start(struct usb_hcd *hcd); -static const struct xhci_driver_overrides xhci_plat_overrides __initconst = { +// // static const struct xhci_driver_overrides xhci_plat_overrides __initconst = { +static const struct xhci_driver_overrides xhci_plat_overrides = { .extra_priv_size = sizeof(struct xhci_plat_priv), .reset = xhci_plat_setup, .start = xhci_plat_start, }; -static void xhci_priv_plat_start(struct usb_hcd *hcd) -{ - struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); +// static void xhci_priv_plat_start(struct usb_hcd *hcd) +// { +// struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); - if (priv->plat_start) - priv->plat_start(hcd); -} +// if (priv->plat_start) +// priv->plat_start(hcd); +// } -static int xhci_priv_plat_setup(struct usb_hcd *hcd) -{ - struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); +// static int xhci_priv_plat_setup(struct usb_hcd *hcd) +// { +// struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); - if (!priv->plat_setup) - return 0; +// if (!priv->plat_setup) +// return 0; - return priv->plat_setup(hcd); -} +// return priv->plat_setup(hcd); +// } -static int xhci_priv_init_quirk(struct usb_hcd *hcd) -{ - struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); - - if (!priv->init_quirk) - return 0; +// static int xhci_priv_init_quirk(struct usb_hcd *hcd) +// { +// struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); - return priv->init_quirk(hcd); -} +// if (!priv->init_quirk) +// return 0; +// return priv->init_quirk(hcd); +// } +#if 0 static int xhci_priv_suspend_quirk(struct usb_hcd *hcd) { struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); @@ -83,7 +86,7 @@ static int xhci_priv_resume_quirk(struct usb_hcd *hcd) return priv->resume_quirk(hcd); } - +#endif static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci) { struct xhci_plat_priv *priv = xhci_to_priv(xhci); @@ -102,16 +105,16 @@ static int xhci_plat_setup(struct usb_hcd *hcd) int ret; - ret = xhci_priv_init_quirk(hcd); - if (ret) - return ret; + // ret = xhci_priv_init_quirk(hcd); + // if (ret) + // return ret; return xhci_gen_setup(hcd, xhci_plat_quirks); } static int xhci_plat_start(struct usb_hcd *hcd) { - xhci_priv_plat_start(hcd); + // xhci_priv_plat_start(hcd); return xhci_run(hcd); } @@ -184,8 +187,9 @@ static const struct of_device_id usb_xhci_of_match[] = { MODULE_DEVICE_TABLE(of, usb_xhci_of_match); #endif -static struct xhci_plat_priv_overwrite xhci_plat_vendor_overwrite; +// static struct xhci_plat_priv_overwrite xhci_plat_vendor_overwrite; +#if 0 int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops) { if (vendor_ops == NULL) @@ -218,46 +222,54 @@ static void xhci_vendor_cleanup(struct xhci_hcd *xhci) xhci->vendor_ops = NULL; } +#endif -static int xhci_plat_ncr_init(struct usb_hcd *hcd, struct platform_device *pdev) +#if 0 +// static int xhci_plat_ncr_init(struct usb_hcd *hcd, struct platform_device *pdev) +int xhci_plat_ncr_init(struct usb_hcd *hcd, struct pf_device *pdev) { int ret; + void *dtb; - if (device_property_read_bool(&pdev->dev, "semidrive-taihang")) + dtb = (void *)ukplat_bootinfo_get()->dtb; + + // if (device_property_read_bool(&pdev->dev, "semidrive-taihang")) + if (fdt_prop_read_bool(dtb, pdev->fdt_offset, "semidrive-taihang")) hcd->soc_taihang = true; if (!hcd->soc_taihang) { if ((hcd->rsrc_start == 0x31260000 && hcd->rsrc_len == 0x8000) || (hcd->rsrc_start == 0x31220000 && hcd->rsrc_len == 0x8000)) { - hcd->ncr_regs = devm_ioremap(&pdev->dev, - hcd->rsrc_start + 0xd000, 0x1000); - if (IS_ERR(hcd->ncr_regs)) { - ret = PTR_ERR(hcd->ncr_regs); - return ret; - } + // hcd->ncr_regs = devm_ioremap(&pdev->dev, + // hcd->rsrc_start + 0xd000, 0x1000); + // if (IS_ERR(hcd->ncr_regs)) { + // ret = PTR_ERR(hcd->ncr_regs); + // return ret; + // } + hcd->ncr_regs = hcd->rsrc_start + 0xd000; } } return 0; } +#endif -static int xhci_plat_probe(struct platform_device *pdev) +int xhci_plat_probe(struct pf_device *pdev) { - const struct xhci_plat_priv *priv_match; + // const struct xhci_plat_priv *priv_match; const struct hc_driver *driver; - struct device *sysdev, *tmpdev; + // struct device *sysdev, *tmpdev; struct xhci_hcd *xhci; - struct resource *res; + // struct resource *res; struct usb_hcd *hcd; int ret; int irq; struct xhci_plat_priv *priv = NULL; - - if (usb_disabled()) - return -ENODEV; - + xhci_init_driver(&xhci_plat_hc_driver, &xhci_plat_overrides); driver = &xhci_plat_hc_driver; + irq = pdev->irq; +#if 0 irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; @@ -296,32 +308,36 @@ static int xhci_plat_probe(struct platform_device *pdev) if (ret) return ret; } +#endif - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - pm_runtime_get_noresume(&pdev->dev); - - hcd = __usb_create_hcd(driver, sysdev, &pdev->dev, - dev_name(&pdev->dev), NULL); - if (!hcd) { - ret = -ENOMEM; - goto disable_runtime; - } - - hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); - if (IS_ERR(hcd->regs)) { - ret = PTR_ERR(hcd->regs); - goto put_hcd; - } - - hcd->rsrc_start = res->start; - hcd->rsrc_len = resource_size(res); + /* create timer/workqueue in `__usb_create_hcd` */ + // hcd = __usb_create_hcd(driver, sysdev, &pdev->dev, + // dev_name(&pdev->dev), NULL); + // if (!hcd) { + // ret = -ENOMEM; + // // goto disable_runtime; + // return ret; + // } + hcd = __usb_create_hcd(driver, NULL, NULL, + NULL, NULL); + + // hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + hcd->regs = pdev->base; + // if (IS_ERR(hcd->regs)) { + // ret = PTR_ERR(hcd->regs); + // goto put_hcd; + // } + + // hcd->rsrc_start = res->start; + // hcd->rsrc_len = resource_size(res); xhci = hcd_to_xhci(hcd); - if (xhci_plat_ncr_init(hcd, pdev)) - goto put_hcd; - + // if (xhci_plat_ncr_init(pdev->hcd, pdev)) + // if (xhci_plat_ncr_init(hcd, pdev)) + // // goto put_hcd; + // return -1; +#if 0 /* * Not all platforms have clks so it is not an error if the * clock do not exist. @@ -358,19 +374,24 @@ static int xhci_plat_probe(struct platform_device *pdev) } device_set_wakeup_capable(&pdev->dev, true); - +#endif xhci->main_hcd = hcd; - xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev, - dev_name(&pdev->dev), hcd); - if (!xhci->shared_hcd) { - ret = -ENOMEM; - goto disable_clk; - } + // xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev, + // dev_name(&pdev->dev), hcd); + xhci->shared_hcd = __usb_create_hcd(driver, NULL, NULL, + NULL, hcd); + + // if (!xhci->shared_hcd) { + // ret = -ENOMEM; + // // goto disable_clk; + // return ret; + // } /* imod_interval is the interrupt moderation value in nanoseconds. */ xhci->imod_interval = 40000; /* Iterate over all parent nodes for finding quirks */ +#if 0 for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) { if (device_property_read_bool(tmpdev, "usb2-lpm-disable")) @@ -416,15 +437,17 @@ static int xhci_plat_probe(struct platform_device *pdev) if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK)) xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK; - - ret = usb_add_hcd(hcd, irq, IRQF_SHARED); +#endif + ret = usb_add_hcd(hcd, irq, NULL); if (ret) - goto disable_usb_phy; + // goto disable_usb_phy; + return -1; if (HCC_MAX_PSA(xhci->hcc_params) >= 4) xhci->shared_hcd->can_do_streams = 1; - ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); + ret = usb_add_hcd(xhci->shared_hcd, irq, NULL); +#if 0 if (ret) goto dealloc_usb2_hcd; @@ -436,10 +459,10 @@ static int xhci_plat_probe(struct platform_device *pdev) * runtime pm using power/control in sysfs. */ pm_runtime_forbid(&pdev->dev); - +#endif return 0; - +#if 0 dealloc_usb2_hcd: usb_remove_hcd(hcd); @@ -463,8 +486,10 @@ disable_runtime: pm_runtime_disable(&pdev->dev); return ret; +#endif } +#if 0 static int xhci_plat_remove(struct platform_device *dev) { struct usb_hcd *hcd = platform_get_drvdata(dev); @@ -596,6 +621,5 @@ static void __exit xhci_plat_exit(void) platform_driver_unregister(&usb_xhci_driver); } module_exit(xhci_plat_exit); +#endif -MODULE_DESCRIPTION("xHCI Platform Host Controller Driver"); -MODULE_LICENSE("GPL"); diff --git a/usb/host/xhci-plat.h b/usb/host/xhci-plat.h index e726a57..ba5f052 100644 --- a/usb/host/xhci-plat.h +++ b/usb/host/xhci-plat.h @@ -8,12 +8,14 @@ #ifndef _XHCI_PLAT_H #define _XHCI_PLAT_H +#include #include "xhci.h" /* for hcd_to_xhci() */ +#include "hcd.h" struct xhci_plat_priv { const char *firmware_name; unsigned long long quirks; - struct xhci_vendor_data *vendor_data; + // struct xhci_vendor_data *vendor_data; int (*plat_setup)(struct usb_hcd *); void (*plat_start)(struct usb_hcd *); int (*init_quirk)(struct usb_hcd *); @@ -30,4 +32,6 @@ struct xhci_plat_priv_overwrite { int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops); +// int xhci_plat_probe(struct platform_device *pdev); +int xhci_plat_probe(struct pf_device *pdev); #endif /* _XHCI_PLAT_H */ diff --git a/usb/host/xhci-ring.c b/usb/host/xhci-ring.c index 3b43592..9cbcc2e 100644 --- a/usb/host/xhci-ring.c +++ b/usb/host/xhci-ring.c @@ -52,11 +52,11 @@ * endpoint rings; it generates events on the event ring for these. */ -#include -#include -#include +// #include +// #include +// #include #include "xhci.h" -#include "xhci-trace.h" +// #include "xhci-trace.h" static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, u32 field1, u32 field2, @@ -79,8 +79,8 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, return 0; return seg->dma + (segment_offset * sizeof(*trb)); } -EXPORT_SYMBOL_GPL(xhci_trb_virt_to_dma); - +// EXPORT_SYMBOL_GPL(xhci_trb_virt_to_dma); +#if 0 static bool trb_is_noop(union xhci_trb *trb) { return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); @@ -2536,7 +2536,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, union xhci_trb *ep_trb; int status = -EINPROGRESS; struct xhci_ep_ctx *ep_ctx; - struct list_head *tmp; + struct uk_list_head *tmp; u32 trb_comp_code; int td_num = 0; bool handling_skipped_tds = false; @@ -4405,3 +4405,4 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, return queue_command(xhci, cmd, 0, 0, 0, trb_slot_id | trb_ep_index | type, false); } +#endif \ No newline at end of file diff --git a/usb/host/xhci.c b/usb/host/xhci.c index cd2d7ca..dd9b487 100644 --- a/usb/host/xhci.c +++ b/usb/host/xhci.c @@ -8,35 +8,45 @@ * Some code borrowed from the Linux EHCI driver. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include #include "xhci.h" -#include "xhci-trace.h" -#include "xhci-debugfs.h" -#include "xhci-dbgcap.h" +// #include "xhci-trace.h" +// #include "xhci-debugfs.h" +// #include "xhci-dbgcap.h" -#define DRIVER_AUTHOR "Sarah Sharp" -#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" +// #define DRIVER_AUTHOR "Sarah Sharp" +// #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" -#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) +// #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) + +static void udelay(u32 usec) +{ + u64 count = 0; + u64 limit = usec * 100; + while (count < limit) { + count++; + } +} /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ -static int link_quirk; -module_param(link_quirk, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); +// static int link_quirk; +// module_param(link_quirk, int, S_IRUGO | S_IWUSR); +// MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); -static unsigned long long quirks; -module_param(quirks, ullong, S_IRUGO); -MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); +// static unsigned long long quirks; +// module_param(quirks, ullong, S_IRUGO); +// MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); +#if 0 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) { struct xhci_segment *seg = ring->first_seg; @@ -51,7 +61,7 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) return false; } - +#endif /* * xhci_handshake - spin reading hc until handshake completes or fails * @ptr: address of hc register to be read @@ -68,14 +78,15 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) { u32 result; - int ret; + int ret = 0; - ret = readl_poll_timeout_atomic(ptr, result, - (result & mask) == done || - result == U32_MAX, - 1, usec); - if (result == U32_MAX) /* card removed */ - return -ENODEV; + // ret = readl_poll_timeout_atomic(ptr, result, + // (result & mask) == done || + // result == U32_MAX, + // 1, usec); + udelay(1000); + // if (result == U32_MAX) /* card removed */ + // return -ENODEV; return ret; } @@ -110,13 +121,15 @@ void xhci_quiesce(struct xhci_hcd *xhci) int xhci_halt(struct xhci_hcd *xhci) { int ret; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); + uk_pr_warn("// Halt the HC\n"); xhci_quiesce(xhci); ret = xhci_handshake(&xhci->op_regs->status, STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); if (ret) { - xhci_warn(xhci, "Host halt failed, %d\n", ret); + // xhci_warn(xhci, "Host halt failed, %d\n", ret); + uk_pr_warn("Host halt failed, %d\n", ret); return ret; } xhci->xhc_state |= XHCI_STATE_HALTED; @@ -134,8 +147,8 @@ int xhci_start(struct xhci_hcd *xhci) temp = readl(&xhci->op_regs->command); temp |= (CMD_RUN); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", - temp); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", + // temp); writel(temp, &xhci->op_regs->command); /* @@ -144,10 +157,10 @@ int xhci_start(struct xhci_hcd *xhci) */ ret = xhci_handshake(&xhci->op_regs->status, STS_HALT, 0, XHCI_MAX_HALT_USEC); - if (ret == -ETIMEDOUT) - xhci_err(xhci, "Host took too long to start, " - "waited %u microseconds.\n", - XHCI_MAX_HALT_USEC); + // if (ret == -ETIMEDOUT) + // xhci_err(xhci, "Host took too long to start, " + // "waited %u microseconds.\n", + // XHCI_MAX_HALT_USEC); if (!ret) /* clear state flags. Including dying, halted or removing */ xhci->xhc_state = 0; @@ -171,16 +184,19 @@ int xhci_reset(struct xhci_hcd *xhci) state = readl(&xhci->op_regs->status); if (state == ~(u32)0) { - xhci_warn(xhci, "Host not accessible, reset failed.\n"); + // xhci_warn(xhci, "Host not accessible, reset failed.\n"); + uk_pr_warn("Host not accessible, reset failed.\n"); return -ENODEV; } if ((state & STS_HALT) == 0) { - xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); + // xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); + uk_pr_warn("Host controller not halted, aborting reset.\n"); return 0; } - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); + uk_pr_warn("// Reset the HC\n"); command = readl(&xhci->op_regs->command); command |= CMD_RESET; writel(command, &xhci->op_regs->command); @@ -200,11 +216,12 @@ int xhci_reset(struct xhci_hcd *xhci) if (ret) return ret; - if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) - usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); + // if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) + // usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Wait for controller to be ready for doorbell rings"); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "Wait for controller to be ready for doorbell rings"); + uk_pr_warn("// Wait for controller to be ready for doorbell rings\n"); /* * xHCI cannot write to any doorbells or operational registers other * than status until the "Controller Not Ready" flag is cleared. @@ -221,10 +238,10 @@ int xhci_reset(struct xhci_hcd *xhci) return ret; } - +#if 0 static void xhci_zero_64b_regs(struct xhci_hcd *xhci) { - struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + // struct device *dev = xhci_to_hcd(xhci)->self.sysdev; int err, i; u64 val; u32 intrs; @@ -246,8 +263,8 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev)) return; - xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); - + // xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); + uk_pr_info("Zeroing 64bit base registers, expecting fault\n"); /* Clear HSEIE so that faults do not get signaled */ val = readl(&xhci->op_regs->command); val &= ~CMD_HSEIE; @@ -286,9 +303,10 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) STS_FATAL, STS_FATAL, XHCI_MAX_HALT_USEC); if (!err) - xhci_info(xhci, "Fault detected\n"); + // xhci_info(xhci, "Fault detected\n"); + uk_pr_info("Fault detected\n"); } - +#endif #ifdef CONFIG_USB_PCI /* * Set up MSI @@ -459,7 +477,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) } #else - +#if 0 static inline int xhci_try_enable_msi(struct usb_hcd *hcd) { return 0; @@ -472,9 +490,9 @@ static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) { } - #endif - +#endif +#if 0 static void compliance_mode_recovery(struct timer_list *t) { struct xhci_hcd *xhci; @@ -566,7 +584,7 @@ static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) { return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); } - +#endif /* * Initialize memory for HCD and xHC (one-time init). @@ -580,24 +598,25 @@ static int xhci_init(struct usb_hcd *hcd) struct xhci_hcd *xhci = hcd_to_xhci(hcd); int retval = 0; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); - spin_lock_init(&xhci->lock); - if (xhci->hci_version == 0x95 && link_quirk) { - xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, - "QUIRK: Not clearing Link TRB chain bits."); - xhci->quirks |= XHCI_LINK_TRB_QUIRK; - } else { - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "xHCI doesn't need link TRB QUIRK"); - } + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); + // spin_lock_init(&xhci->lock); + uk_spin_init(&xhci->lock); + // if (xhci->hci_version == 0x95 && link_quirk) { + // xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + // "QUIRK: Not clearing Link TRB chain bits."); + // xhci->quirks |= XHCI_LINK_TRB_QUIRK; + // } else { + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "xHCI doesn't need link TRB QUIRK"); + // } retval = xhci_mem_init(xhci, GFP_KERNEL); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); /* Initializing Compliance Mode Recovery Data If Needed */ - if (xhci_compliance_mode_recovery_timer_quirk_check()) { - xhci->quirks |= XHCI_COMP_MODE_QUIRK; - compliance_mode_recovery_timer_init(xhci); - } + // if (xhci_compliance_mode_recovery_timer_quirk_check()) { + // xhci->quirks |= XHCI_COMP_MODE_QUIRK; + // compliance_mode_recovery_timer_init(xhci); + // } return retval; } @@ -614,11 +633,11 @@ static int xhci_run_finished(struct xhci_hcd *xhci) xhci->shared_hcd->state = HC_STATE_RUNNING; xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; - if (xhci->quirks & XHCI_NEC_HOST) - xhci_ring_cmd_db(xhci); + // if (xhci->quirks & XHCI_NEC_HOST) + // xhci_ring_cmd_db(xhci); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Finished xhci_run for USB3 roothub"); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "Finished xhci_run for USB3 roothub"); return 0; } @@ -649,19 +668,19 @@ int xhci_run(struct usb_hcd *hcd) if (!usb_hcd_is_primary_hcd(hcd)) return xhci_run_finished(xhci); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); - ret = xhci_try_enable_msi(hcd); - if (ret) - return ret; + // ret = xhci_try_enable_msi(hcd); + // if (ret) + // return ret; temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); temp_64 &= ~ERST_PTR_MASK; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "ERST deq = 64'h%0lx", (long unsigned int) temp_64); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "ERST deq = 64'h%0lx", (long unsigned int) temp_64); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Set the interrupt modulation register"); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "// Set the interrupt modulation register"); temp = readl(&xhci->ir_set->irq_control); temp &= ~ER_IRQ_INTERVAL_MASK; temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; @@ -670,39 +689,40 @@ int xhci_run(struct usb_hcd *hcd) /* Set the HCD state before we enable the irqs */ temp = readl(&xhci->op_regs->command); temp |= (CMD_EIE); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Enable interrupts, cmd = 0x%x.", temp); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "// Enable interrupts, cmd = 0x%x.", temp); writel(temp, &xhci->op_regs->command); temp = readl(&xhci->ir_set->irq_pending); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", - xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", + // xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); - if (xhci->quirks & XHCI_NEC_HOST) { - struct xhci_command *command; + // if (xhci->quirks & XHCI_NEC_HOST) { + // struct xhci_command *command; - command = xhci_alloc_command(xhci, false, GFP_KERNEL); - if (!command) - return -ENOMEM; + // command = xhci_alloc_command(xhci, false, GFP_KERNEL); + // if (!command) + // return -ENOMEM; - ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, - TRB_TYPE(TRB_NEC_GET_FW)); - if (ret) - xhci_free_command(xhci, command); - } - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Finished xhci_run for USB2 roothub"); + // ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, + // TRB_TYPE(TRB_NEC_GET_FW)); + // if (ret) + // xhci_free_command(xhci, command); + // } + // xhci_dbg_trace(xhci, trace_xhci_dbg_init, + // "Finished xhci_run for USB2 roothub"); - xhci_dbc_init(xhci); + // xhci_dbc_init(xhci); - xhci_debugfs_init(xhci); + // xhci_debugfs_init(xhci); return 0; } -EXPORT_SYMBOL_GPL(xhci_run); +// EXPORT_SYMBOL_GPL(xhci_run); +#if 0 /* * Stop xHCI driver. * @@ -794,6 +814,7 @@ void xhci_shutdown(struct usb_hcd *hcd) readl(&xhci->op_regs->status)); } EXPORT_SYMBOL_GPL(xhci_shutdown); +#endif #ifdef CONFIG_PM static void xhci_save_registers(struct xhci_hcd *xhci) @@ -1270,6 +1291,7 @@ EXPORT_SYMBOL_GPL(xhci_resume); /*-------------------------------------------------------------------------*/ +#if 0 /* * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT), * we'll copy the actual data into the TRB address register. This is limited to @@ -2373,7 +2395,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci, largest_mps = 0; else { struct xhci_virt_ep *virt_ep; - struct list_head *ep_entry; + struct uk_list_head *ep_entry; ep_entry = bw_table->interval_bw[i].endpoints.next; virt_ep = list_entry(ep_entry, @@ -4337,7 +4359,7 @@ bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb) return ops->usb_offload_skip_urb(xhci, urb); return false; } - +#endif #ifdef CONFIG_PM /* BESL to HIRD Encoding array for USB2 LPM */ @@ -5004,7 +5026,7 @@ static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, return xhci_change_max_exit_latency(xhci, udev, mel); } #else /* CONFIG_PM */ - +#if 0 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, struct usb_device *udev, int enable) { @@ -5027,10 +5049,11 @@ static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, { return 0; } +#endif #endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ - +#if 0 /* Once a hub descriptor is fetched for a device, we need to update the xHC's * internal data structures for the device. */ @@ -5150,7 +5173,7 @@ static int xhci_get_frame(struct usb_hcd *hcd) /* EHCI mods by the periodic size. Why? */ return readl(&xhci->run_regs->microframe_index) >> 3; } - +#endif int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) { struct xhci_hcd *xhci; @@ -5158,18 +5181,18 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) * TODO: Check with DWC3 clients for sysdev according to * quirks */ - struct device *dev = hcd->self.sysdev; + // struct device *dev = hcd->self.sysdev; unsigned int minor_rev; int retval; /* Accept arbitrarily long scatter-gather lists */ - hcd->self.sg_tablesize = ~0; + // hcd->self.sg_tablesize = ~0; /* support to build packet from discontinuous buffers */ - hcd->self.no_sg_constraint = 1; + // hcd->self.no_sg_constraint = 1; /* XHCI controllers don't stop the ep queue on short packets :| */ - hcd->self.no_stop_on_short = 1; + // hcd->self.no_stop_on_short = 1; xhci = hcd_to_xhci(hcd); @@ -5180,7 +5203,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) * The xHCI driver will register the USB 3.0 roothub. */ hcd->speed = HCD_USB2; - hcd->self.root_hub->speed = USB_SPEED_HIGH; + // hcd->self.root_hub->speed = USB_SPEED_HIGH; /* * USB 2.0 roothub under xHCI has an integrated TT, * (rate matching hub) as opposed to having an OHCI/UHCI @@ -5205,19 +5228,20 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) switch (minor_rev) { case 2: hcd->speed = HCD_USB32; - hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; - hcd->self.root_hub->rx_lanes = 2; - hcd->self.root_hub->tx_lanes = 2; + // hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; + // hcd->self.root_hub->rx_lanes = 2; + // hcd->self.root_hub->tx_lanes = 2; break; case 1: hcd->speed = HCD_USB31; - hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; + // hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; break; } - xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", - minor_rev, - minor_rev ? "Enhanced " : ""); - + // xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", + // minor_rev, + // minor_rev ? "Enhanced " : ""); + uk_pr_info("Host supports USB 3.%x %sSuperSpeed\n", + minor_rev, minor_rev ? "Enhanced " : ""); xhci->usb3_rhub.hcd = hcd; /* xHCI private pointer was set in xhci_pci_probe for the second * registered roothub. @@ -5225,7 +5249,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) return 0; } - mutex_init(&xhci->mutex); + // mutex_init(&xhci->mutex); + uk_mutex_init(&xhci->mutex); xhci->cap_regs = hcd->regs; xhci->op_regs = hcd->regs + HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); @@ -5241,9 +5266,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) if (xhci->hci_version > 0x100) xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); - xhci->quirks |= quirks; + // xhci->quirks |= quirks; - get_quirks(dev, xhci); + // get_quirks(dev, xhci); /* In xhci controllers which follow xhci 1.0 spec gives a spurious * success event after a short transfer. This quirk will ignore such @@ -5257,14 +5282,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) if (retval) return retval; - xhci_zero_64b_regs(xhci); + // xhci_zero_64b_regs(xhci); - xhci_dbg(xhci, "Resetting HCD\n"); + // xhci_dbg(xhci, "Resetting HCD\n"); + uk_pr_info("Resetting HCD\n"); /* Reset the internal HC memory state and registers. */ retval = xhci_reset(xhci); if (retval) return retval; - xhci_dbg(xhci, "Reset complete\n"); + // xhci_dbg(xhci, "Reset complete\n"); + uk_pr_info("Reset complete\n"); /* * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) @@ -5274,40 +5301,44 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) * DMA_BIT_MASK(32)) in this xhci_gen_setup(). */ if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) - xhci->hcc_params &= ~BIT(0); + xhci->hcc_params &= ~UK_BIT(0); /* Set dma_mask and coherent_dma_mask to 64-bits, * if xHC supports 64-bit addressing */ - if (HCC_64BIT_ADDR(xhci->hcc_params) && - !dma_set_mask(dev, DMA_BIT_MASK(64))) { - xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); - dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); - } else { - /* - * This is to avoid error in cases where a 32-bit USB - * controller is used on a 64-bit capable system. - */ - retval = dma_set_mask(dev, DMA_BIT_MASK(32)); - if (retval) - return retval; - xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); - dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); - } - - xhci_dbg(xhci, "Calling HCD init\n"); + // if (HCC_64BIT_ADDR(xhci->hcc_params) && + // !dma_set_mask(dev, DMA_BIT_MASK(64))) { + // xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); + // dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); + // } else { + // /* + // * This is to avoid error in cases where a 32-bit USB + // * controller is used on a 64-bit capable system. + // */ + // retval = dma_set_mask(dev, DMA_BIT_MASK(32)); + // if (retval) + // return retval; + // xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); + // dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + // } + + // xhci_dbg(xhci, "Calling HCD init\n"); + uk_pr_info("Calling HCD init\n"); /* Initialize HCD and host controller data structures. */ retval = xhci_init(hcd); if (retval) return retval; - xhci_dbg(xhci, "Called HCD init\n"); + // xhci_dbg(xhci, "Called HCD init\n"); + uk_pr_info("Called HCD init\n"); - xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", - xhci->hcc_params, xhci->hci_version, xhci->quirks); + // xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", + // xhci->hcc_params, xhci->hci_version, xhci->quirks); + uk_pr_info("hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", + xhci->hcc_params, xhci->hci_version, xhci->quirks); return 0; } -EXPORT_SYMBOL_GPL(xhci_gen_setup); - +// EXPORT_SYMBOL_GPL(xhci_gen_setup); +#if 0 static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { @@ -5328,7 +5359,7 @@ static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); } - +#endif static const struct hc_driver xhci_hc_driver = { .description = "xhci-hcd", .product_desc = "xHCI Host Controller", @@ -5337,7 +5368,7 @@ static const struct hc_driver xhci_hc_driver = { /* * generic hardware linkage */ - .irq = xhci_irq, + // .irq = xhci_irq, .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED | HCD_BH, @@ -5346,6 +5377,7 @@ static const struct hc_driver xhci_hc_driver = { */ .reset = NULL, /* set in xhci_init_driver() */ .start = xhci_run, +#if 0 .stop = xhci_stop, .shutdown = xhci_shutdown, @@ -5393,12 +5425,13 @@ static const struct hc_driver xhci_hc_driver = { .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, .find_raw_port_number = xhci_find_raw_port_number, .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete, +#endif }; void xhci_init_driver(struct hc_driver *drv, const struct xhci_driver_overrides *over) { - BUG_ON(!over); + // BUG_ON(!over); /* Copy the generic table to drv then apply the overrides */ *drv = xhci_hc_driver; @@ -5409,6 +5442,7 @@ void xhci_init_driver(struct hc_driver *drv, drv->reset = over->reset; if (over->start) drv->start = over->start; +#if 0 if (over->add_endpoint) drv->add_endpoint = over->add_endpoint; if (over->drop_endpoint) @@ -5423,8 +5457,10 @@ void xhci_init_driver(struct hc_driver *drv, drv->bus_suspend = over->bus_suspend; if (over->bus_resume) drv->bus_resume = over->bus_resume; +#endif } } +#if 0 EXPORT_SYMBOL_GPL(xhci_init_driver); MODULE_DESCRIPTION(DRIVER_DESC); @@ -5470,3 +5506,4 @@ static void __exit xhci_hcd_fini(void) module_init(xhci_hcd_init); module_exit(xhci_hcd_fini); +#endif \ No newline at end of file diff --git a/usb/host/xhci.h b/usb/host/xhci.h index 71c51de..98d2a04 100644 --- a/usb/host/xhci.h +++ b/usb/host/xhci.h @@ -12,16 +12,55 @@ #ifndef __LINUX_XHCI_HCD_H #define __LINUX_XHCI_HCD_H -#include -#include -#include -#include -#include -#include +// #include +// #include +// #include +// #include +// #include +// #include /* Code sharing between pci-quirks and xhci hcd */ -#include "xhci-ext-caps.h" -#include "pci-quirks.h" +// #include "xhci-ext-caps.h" +// #include "pci-quirks.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "xhci-ext-caps.h" +#include "hcd.h" + +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long u64; +typedef u64 dma_addr_t; +typedef u32 __le32; +typedef u64 __le64; +#ifndef __aligned +#define __aligned(x) __attribute__((__aligned__(x))) +#endif +#ifndef __iomem +#define __iomem +#endif + +typedef enum { + GFP_KERNEL, + GFP_ATOMIC, + __GFP_HIGHMEM, + __GFP_HIGH +} gfp_t; /* max buffer size for trace and debug messages */ #define XHCI_MSG_MAX 500 @@ -810,12 +849,12 @@ struct xhci_command { /* If completion is null, no one is waiting on this command * and the structure can be freed after the command completes. */ - struct completion *completion; + // struct completion *completion; union xhci_trb *command_trb; - struct list_head cmd_list; + struct uk_list_head cmd_list; - ANDROID_KABI_RESERVE(1); - ANDROID_KABI_RESERVE(2); + // ANDROID_KABI_RESERVE(1); + // ANDROID_KABI_RESERVE(2); }; /* drop context bitmasks */ @@ -856,7 +895,7 @@ struct xhci_stream_info { unsigned int num_stream_ctxs; dma_addr_t ctx_array_dma; /* For mapping physical TRB addresses to segments in stream rings */ - struct radix_tree_root trb_address_map; + // struct radix_tree_root trb_address_map; struct xhci_command *free_streams_command; }; @@ -948,9 +987,9 @@ struct xhci_virt_ep { /* usb_hub_clear_tt_buffer is in progress */ #define EP_CLEARING_TT (1 << 8) /* ---- Related to URB cancellation ---- */ - struct list_head cancelled_td_list; + struct uk_list_head cancelled_td_list; /* Watchdog timer for stop endpoint command to cancel URBs */ - struct timer_list stop_cmd_timer; + // struct timer_list stop_cmd_timer; struct xhci_hcd *xhci; /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue * command. We'll need to update the ring's dequeue segment and dequeue @@ -968,7 +1007,7 @@ struct xhci_virt_ep { bool skip; /* Bandwidth checking storage */ struct xhci_bw_info bw_info; - struct list_head bw_endpoint_list; + struct uk_list_head bw_endpoint_list; /* Isoch Frame ID checking storage */ int next_frame_id; /* Use new Isoch TRB layout needed for extended TBC support */ @@ -986,7 +1025,7 @@ struct xhci_interval_bw { /* Sorted by max packet size. * Head of the list is the greatest max packet size. */ - struct list_head endpoints; + struct uk_list_head endpoints; /* How many endpoints of each speed are present. */ unsigned int overhead[3]; }; @@ -1006,7 +1045,7 @@ struct xhci_interval_bw_table { struct xhci_virt_device { int slot_id; - struct usb_device *udev; + // struct usb_device *udev; /* * Commands to the hardware are passed an "input context" that * tells the hardware what to change in its data structures. @@ -1038,6 +1077,7 @@ struct xhci_virt_device { void *debugfs_private; }; + /* * For each roothub, keep track of the bandwidth information for each periodic * interval. @@ -1047,13 +1087,13 @@ struct xhci_virt_device { * endpoints on the devices under that TT will appear in the TT structure. */ struct xhci_root_port_bw_info { - struct list_head tts; + struct uk_list_head tts; unsigned int num_active_tts; struct xhci_interval_bw_table bw_table; }; struct xhci_tt_bw_info { - struct list_head tt_list; + struct uk_list_head tt_list; int slot_id; int ttport; struct xhci_interval_bw_table bw_table; @@ -1541,7 +1581,7 @@ struct xhci_segment { unsigned int bounce_offs; unsigned int bounce_len; - ANDROID_KABI_RESERVE(1); + // ANDROID_KABI_RESERVE(1); }; enum xhci_cancelled_td_status { @@ -1552,11 +1592,11 @@ enum xhci_cancelled_td_status { }; struct xhci_td { - struct list_head td_list; - struct list_head cancelled_td_list; + struct uk_list_head td_list; + struct uk_list_head cancelled_td_list; int status; enum xhci_cancelled_td_status cancel_status; - struct urb *urb; + // struct urb *urb; struct xhci_segment *start_seg; union xhci_trb *first_trb; union xhci_trb *last_trb; @@ -1615,7 +1655,7 @@ struct xhci_ring { struct xhci_segment *enq_seg; union xhci_trb *dequeue; struct xhci_segment *deq_seg; - struct list_head td_list; + struct uk_list_head td_list; /* * Write the cycle state into the TRB cycle field to give ownership of * the TRB to the host controller (if we are the producer), or to check @@ -1630,10 +1670,10 @@ struct xhci_ring { unsigned int bounce_buf_len; enum xhci_ring_type type; bool last_td_was_short; - struct radix_tree_root *trb_address_map; + // struct radix_tree_root *trb_address_map; - ANDROID_KABI_RESERVE(1); - ANDROID_KABI_RESERVE(2); + // ANDROID_KABI_RESERVE(1); + // ANDROID_KABI_RESERVE(2); }; struct xhci_erst_entry { @@ -1652,7 +1692,7 @@ struct xhci_erst { /* Num entries the ERST can contain */ unsigned int erst_size; - ANDROID_KABI_RESERVE(1); + // ANDROID_KABI_RESERVE(1); }; struct xhci_scratchpad { @@ -1698,9 +1738,10 @@ struct s3_save { /* Use for lpm */ struct dev_info { u32 dev_id; - struct list_head list; + struct uk_list_head list; }; +#define USB_MAXCHILDREN 31 struct xhci_bus_state { unsigned long bus_suspended; unsigned long next_statechange; @@ -1715,8 +1756,8 @@ struct xhci_bus_state { unsigned long resuming_ports; /* Which ports are waiting on RExit to U0 transition. */ unsigned long rexit_ports; - struct completion rexit_done[USB_MAXCHILDREN]; - struct completion u3exit_done[USB_MAXCHILDREN]; + // struct completion rexit_done[USB_MAXCHILDREN]; + // struct completion u3exit_done[USB_MAXCHILDREN]; }; @@ -1771,7 +1812,8 @@ struct xhci_hcd { __u32 hcc_params; __u32 hcc_params2; - spinlock_t lock; + // spinlock_t lock; + uk_spinlock lock; /* packed release number */ u8 sbrn; @@ -1790,10 +1832,10 @@ struct xhci_hcd { /* msi-x vectors */ int msix_count; /* optional clocks */ - struct clk *clk; - struct clk *reg_clk; + // struct clk *clk; + // struct clk *reg_clk; /* optional reset controller */ - struct reset_control *reset; + // struct reset_control *reset; /* data structures */ struct xhci_device_context_array *dcbaa; struct xhci_ring *cmd_ring; @@ -1801,21 +1843,21 @@ struct xhci_hcd { #define CMD_RING_STATE_RUNNING (1 << 0) #define CMD_RING_STATE_ABORTED (1 << 1) #define CMD_RING_STATE_STOPPED (1 << 2) - struct list_head cmd_list; + struct uk_list_head cmd_list; unsigned int cmd_ring_reserved_trbs; - struct delayed_work cmd_timer; - struct completion cmd_ring_stop_completion; + // struct delayed_work cmd_timer; + // struct completion cmd_ring_stop_completion; struct xhci_command *current_cmd; struct xhci_ring *event_ring; struct xhci_erst erst; /* Scratchpad */ struct xhci_scratchpad *scratchpad; /* Store LPM test failed devices' information */ - struct list_head lpm_failed_devs; + struct uk_list_head lpm_failed_devs; /* slot enabling and address device helpers */ /* these are not thread safe so use mutex */ - struct mutex mutex; + struct uk_mutex mutex; /* For USB 3.0 LPM enable/disable. */ struct xhci_command *lpm_command; /* Internal mirror of the HW's dcbaa */ @@ -1824,10 +1866,10 @@ struct xhci_hcd { struct xhci_root_port_bw_info *rh_bw; /* DMA pools */ - struct dma_pool *device_pool; - struct dma_pool *segment_pool; - struct dma_pool *small_streams_pool; - struct dma_pool *medium_streams_pool; + // struct dma_pool *device_pool; + // struct dma_pool *segment_pool; + // struct dma_pool *small_streams_pool; + // struct dma_pool *medium_streams_pool; /* Host controller watchdog timer structures */ unsigned int xhc_state; @@ -1850,11 +1892,11 @@ struct xhci_hcd { #define XHCI_STATE_HALTED (1 << 1) #define XHCI_STATE_REMOVING (1 << 2) unsigned long long quirks; -#define XHCI_LINK_TRB_QUIRK BIT_ULL(0) -#define XHCI_RESET_EP_QUIRK BIT_ULL(1) -#define XHCI_NEC_HOST BIT_ULL(2) -#define XHCI_AMD_PLL_FIX BIT_ULL(3) -#define XHCI_SPURIOUS_SUCCESS BIT_ULL(4) +#define XHCI_LINK_TRB_QUIRK UK_BIT_ULL(0) +#define XHCI_RESET_EP_QUIRK UK_BIT_ULL(1) +#define XHCI_NEC_HOST UK_BIT_ULL(2) +#define XHCI_AMD_PLL_FIX UK_BIT_ULL(3) +#define XHCI_SPURIOUS_SUCCESS UK_BIT_ULL(4) /* * Certain Intel host controllers have a limit to the number of endpoint * contexts they can handle. Ideally, they would signal that they can't handle @@ -1864,44 +1906,44 @@ struct xhci_hcd { * commands, reset device commands, disable slot commands, and address device * commands. */ -#define XHCI_EP_LIMIT_QUIRK BIT_ULL(5) -#define XHCI_BROKEN_MSI BIT_ULL(6) -#define XHCI_RESET_ON_RESUME BIT_ULL(7) -#define XHCI_SW_BW_CHECKING BIT_ULL(8) -#define XHCI_AMD_0x96_HOST BIT_ULL(9) -#define XHCI_TRUST_TX_LENGTH BIT_ULL(10) -#define XHCI_LPM_SUPPORT BIT_ULL(11) -#define XHCI_INTEL_HOST BIT_ULL(12) -#define XHCI_SPURIOUS_REBOOT BIT_ULL(13) -#define XHCI_COMP_MODE_QUIRK BIT_ULL(14) -#define XHCI_AVOID_BEI BIT_ULL(15) -#define XHCI_PLAT BIT_ULL(16) -#define XHCI_SLOW_SUSPEND BIT_ULL(17) -#define XHCI_SPURIOUS_WAKEUP BIT_ULL(18) +#define XHCI_EP_LIMIT_QUIRK UK_BIT_ULL(5) +#define XHCI_BROKEN_MSI UK_BIT_ULL(6) +#define XHCI_RESET_ON_RESUME UK_BIT_ULL(7) +#define XHCI_SW_BW_CHECKING UK_BIT_ULL(8) +#define XHCI_AMD_0x96_HOST UK_BIT_ULL(9) +#define XHCI_TRUST_TX_LENGTH UK_BIT_ULL(10) +#define XHCI_LPM_SUPPORT UK_BIT_ULL(11) +#define XHCI_INTEL_HOST UK_BIT_ULL(12) +#define XHCI_SPURIOUS_REBOOT UK_BIT_ULL(13) +#define XHCI_COMP_MODE_QUIRK UK_BIT_ULL(14) +#define XHCI_AVOID_BEI UK_BIT_ULL(15) +#define XHCI_PLAT UK_BIT_ULL(16) +#define XHCI_SLOW_SUSPEND UK_BIT_ULL(17) +#define XHCI_SPURIOUS_WAKEUP UK_BIT_ULL(18) /* For controllers with a broken beyond repair streams implementation */ -#define XHCI_BROKEN_STREAMS BIT_ULL(19) -#define XHCI_PME_STUCK_QUIRK BIT_ULL(20) -#define XHCI_MTK_HOST BIT_ULL(21) -#define XHCI_SSIC_PORT_UNUSED BIT_ULL(22) -#define XHCI_NO_64BIT_SUPPORT BIT_ULL(23) -#define XHCI_MISSING_CAS BIT_ULL(24) +#define XHCI_BROKEN_STREAMS UK_BIT_ULL(19) +#define XHCI_PME_STUCK_QUIRK UK_BIT_ULL(20) +#define XHCI_MTK_HOST UK_BIT_ULL(21) +#define XHCI_SSIC_PORT_UNUSED UK_BIT_ULL(22) +#define XHCI_NO_64BIT_SUPPORT UK_BIT_ULL(23) +#define XHCI_MISSING_CAS UK_BIT_ULL(24) /* For controller with a broken Port Disable implementation */ -#define XHCI_BROKEN_PORT_PED BIT_ULL(25) -#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 BIT_ULL(26) -#define XHCI_U2_DISABLE_WAKE BIT_ULL(27) -#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL BIT_ULL(28) -#define XHCI_HW_LPM_DISABLE BIT_ULL(29) -#define XHCI_SUSPEND_DELAY BIT_ULL(30) -#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31) -#define XHCI_ZERO_64B_REGS BIT_ULL(32) -#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) -#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) -#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) -#define XHCI_RENESAS_FW_QUIRK BIT_ULL(36) -#define XHCI_SKIP_PHY_INIT BIT_ULL(37) -#define XHCI_DISABLE_SPARSE BIT_ULL(38) -#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39) -#define XHCI_NO_SOFT_RETRY BIT_ULL(40) +#define XHCI_BROKEN_PORT_PED UK_BIT_ULL(25) +#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 UK_BIT_ULL(26) +#define XHCI_U2_DISABLE_WAKE UK_BIT_ULL(27) +#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL UK_BIT_ULL(28) +#define XHCI_HW_LPM_DISABLE UK_BIT_ULL(29) +#define XHCI_SUSPEND_DELAY UK_BIT_ULL(30) +#define XHCI_INTEL_USB_ROLE_SW UK_BIT_ULL(31) +#define XHCI_ZERO_64B_REGS UK_BIT_ULL(32) +#define XHCI_DEFAULT_PM_RUNTIME_ALLOW UK_BIT_ULL(33) +#define XHCI_RESET_PLL_ON_DISCONNECT UK_BIT_ULL(34) +#define XHCI_SNPS_BROKEN_SUSPEND UK_BIT_ULL(35) +#define XHCI_RENESAS_FW_QUIRK UK_BIT_ULL(36) +#define XHCI_SKIP_PHY_INIT UK_BIT_ULL(37) +#define XHCI_DISABLE_SPARSE UK_BIT_ULL(38) +#define XHCI_SG_TRB_CACHE_SIZE_QUIRK UK_BIT_ULL(39) +#define XHCI_NO_SOFT_RETRY UK_BIT_ULL(40) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -1919,24 +1961,24 @@ struct xhci_hcd { struct xhci_port_cap *port_caps; unsigned int num_port_caps; /* Compliance Mode Recovery Data */ - struct timer_list comp_mode_recovery_timer; + // struct timer_list comp_mode_recovery_timer; u32 port_status_u0; u16 test_mode; /* Compliance Mode Timer Triggered every 2 seconds */ #define COMP_MODE_RCVRY_MSECS 2000 - struct dentry *debugfs_root; - struct dentry *debugfs_slots; - struct list_head regset_list; + // struct dentry *debugfs_root; + // struct dentry *debugfs_slots; + struct uk_list_head regset_list; void *dbc; /* Used for bug 194461020 */ - ANDROID_KABI_USE(1, struct xhci_vendor_ops *vendor_ops); + // ANDROID_KABI_USE(1, struct xhci_vendor_ops *vendor_ops); - ANDROID_KABI_RESERVE(2); - ANDROID_KABI_RESERVE(3); - ANDROID_KABI_RESERVE(4); + // ANDROID_KABI_RESERVE(2); + // ANDROID_KABI_RESERVE(3); + // ANDROID_KABI_RESERVE(4); /* platform-specific data -- must come last */ unsigned long priv[] __aligned(sizeof(s64)); @@ -1947,6 +1989,7 @@ struct xhci_driver_overrides { size_t extra_priv_size; int (*reset)(struct usb_hcd *hcd); int (*start)(struct usb_hcd *hcd); +#if 0 int (*add_endpoint)(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); int (*drop_endpoint)(struct usb_hcd *hcd, struct usb_device *udev, @@ -1956,6 +1999,7 @@ struct xhci_driver_overrides { int (*address_device)(struct usb_hcd *hcd, struct usb_device *udev); int (*bus_suspend)(struct usb_hcd *hcd); int (*bus_resume)(struct usb_hcd *hcd); +#endif }; #define XHCI_CFC_DELAY 10 @@ -1978,6 +2022,7 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) return xhci->main_hcd; } +#if 0 #define xhci_dbg(xhci, fmt, args...) \ dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args) #define xhci_err(xhci, fmt, args...) \ @@ -1988,7 +2033,7 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) #define xhci_info(xhci, fmt, args...) \ dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args) - +#endif /* * Registers should always be accessed with double word or quad word accesses. * @@ -1998,31 +2043,57 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) * xHCI implementations that do not support 64-bit address pointers will ignore * the high dword, and write order is irrelevant. */ -static inline u64 xhci_read_64(const struct xhci_hcd *xhci, +static inline u64 xhci_read_64(const struct xhci_hcd *xhci __unused, __le64 __iomem *regs) { - return lo_hi_readq(regs); + // return lo_hi_readq(regs); + return ioreg_read64(regs); } -static inline void xhci_write_64(struct xhci_hcd *xhci, +static inline void xhci_write_64(struct xhci_hcd *xhci __unused, const u64 val, __le64 __iomem *regs) { - lo_hi_writeq(val, regs); + // lo_hi_writeq(val, regs); + ioreg_write64(regs, val); } +static inline u32 readl(__le32 __iomem *regs) +{ + return ioreg_read32(regs); +} +static inline void writel(const u32 val, __le32 __iomem *regs) +{ + ioreg_write32(regs, val); +} + +#define cpu_to_le32(x) ((uint32_t)(x)) +#define cpu_to_le64(x) ((uint64_t)(x)) +#define le32_to_cpu(x) ((uint32_t)(x)) +#define le64_to_cpu(x) ((uint64_t)(x)) +#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16)) +#define lower_32_bits(n) ((uint32_t)(n)) + static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) { return xhci->quirks & XHCI_LINK_TRB_QUIRK; } +static inline int readl_poll_timeout_atomic(void) +{ + return 0; +} + /* xHCI debugging */ +#if 0 char *xhci_get_slot_state(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *), const char *fmt, ...); +#endif /* xHCI memory management */ -void xhci_mem_cleanup(struct xhci_hcd *xhci); +// void xhci_mem_cleanup(struct xhci_hcd *xhci); int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); +#if 0 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); @@ -2050,19 +2121,24 @@ void xhci_slot_copy(struct xhci_hcd *xhci, int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep, gfp_t mem_flags); +#endif struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, unsigned int cycle_state, enum xhci_ring_type type, unsigned int max_packet, gfp_t flags); void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); +#if 0 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, unsigned int num_trbs, gfp_t flags); +#endif int xhci_alloc_erst(struct xhci_hcd *xhci, struct xhci_ring *evt_ring, struct xhci_erst *erst, gfp_t flags); void xhci_initialize_ring_info(struct xhci_ring *ring, unsigned int cycle_state); +#if 0 void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); + void xhci_free_endpoint_ring(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index); @@ -2082,17 +2158,22 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, struct xhci_ring *xhci_dma_to_transfer_ring( struct xhci_virt_ep *ep, u64 address); +#endif struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, bool allocate_completion, gfp_t mem_flags); struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci, bool allocate_completion, gfp_t mem_flags); +#if 0 void xhci_urb_free_priv(struct urb_priv *urb_priv); void xhci_free_command(struct xhci_hcd *xhci, struct xhci_command *command); +#endif struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, int type, gfp_t flags); +#if 0 void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); +#endif /* xHCI host controller glue */ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); @@ -2103,9 +2184,10 @@ int xhci_start(struct xhci_hcd *xhci); int xhci_reset(struct xhci_hcd *xhci); int xhci_run(struct usb_hcd *hcd); int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); -void xhci_shutdown(struct usb_hcd *hcd); +// void xhci_shutdown(struct usb_hcd *hcd); void xhci_init_driver(struct hc_driver *drv, const struct xhci_driver_overrides *over); +#if 0 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, @@ -2126,9 +2208,11 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags); - +#endif /* xHCI ring, segment, TRB, and TD functions */ + dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); +#if 0 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_segment *start_seg, union xhci_trb *start_trb, union xhci_trb *end_trb, dma_addr_t suspect_dma, bool debug); @@ -2174,8 +2258,9 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, void xhci_cleanup_command_queue(struct xhci_hcd *xhci); void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring); unsigned int count_trbs(u64 addr, u64 len); - +#endif /* xHCI roothub code */ +#if 0 void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, u32 link_state); void xhci_test_and_clear_bit(struct xhci_hcd *xhci, struct xhci_port *port, @@ -2187,7 +2272,7 @@ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1); struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd); void xhci_hc_died(struct xhci_hcd *xhci); - +#endif #ifdef CONFIG_PM int xhci_bus_suspend(struct usb_hcd *hcd); int xhci_bus_resume(struct usb_hcd *hcd); @@ -2198,16 +2283,21 @@ unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd); #define xhci_get_resuming_ports NULL #endif /* CONFIG_PM */ +#if 0 u32 xhci_port_state_to_neutral(u32 state); int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, u16 port); void xhci_ring_device(struct xhci_hcd *xhci, int slot_id); +#endif /* xHCI contexts */ +#if 0 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx); struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index); +#endif +#if 0 struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id); @@ -2219,7 +2309,9 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id); } +#endif +#if 0 /** * struct xhci_vendor_ops - function callbacks for vendor specific operations * @vendor_init: called for vendor init process @@ -2266,7 +2358,9 @@ void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index); bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index); +#endif +#if 0 /* * TODO: As per spec Isochronous IDT transmissions are supported. We bypass * them anyways as we where unable to find a device that matches the @@ -2300,6 +2394,7 @@ static inline char *xhci_slot_state_string(u32 state) } } + static inline const char *xhci_decode_trb(char *str, size_t size, u32 field0, u32 field1, u32 field2, u32 field3) { @@ -2829,5 +2924,6 @@ static inline const char *xhci_decode_ep_context(char *str, u32 info, return str; } +#endif #endif /* __LINUX_XHCI_HCD_H */ -- Gitee From 28ee0b8f99ff5830d2cd771cf1901e5b3c1732a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=94=A1=E9=93=96?= Date: Wed, 5 Feb 2025 12:29:31 +0800 Subject: [PATCH 4/5] import semidrive phy driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 蔡铖 --- usb/phy/semidrive/phy-semidrive-usb.c | 108 ++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 usb/phy/semidrive/phy-semidrive-usb.c diff --git a/usb/phy/semidrive/phy-semidrive-usb.c b/usb/phy/semidrive/phy-semidrive-usb.c new file mode 100644 index 0000000..2b879a2 --- /dev/null +++ b/usb/phy/semidrive/phy-semidrive-usb.c @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2020 Semidrive Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include + +#define USB_PHY_NCR_CTRL0 0x10000 +#define USB_PHY_NCR_CTRL3 0x1000C +#define USB_PHY_NCR_CTRL6 0x10018 + +struct phy_semidrive_usb_priv { + void __iomem *base; + bool soc_taihang; +}; + +static int phy_semidrive_usb_init(struct phy *phy) +{ + struct phy_semidrive_usb_priv *priv = phy_get_drvdata(phy); + unsigned int data; + + /* use internal phy clock and reset usb phy, reset high effective */ + data = readl(priv->base + USB_PHY_NCR_CTRL0); + data &= ~(1<<18); + data |= (1<<0); + writel(data, priv->base + USB_PHY_NCR_CTRL0); + + if (priv->soc_taihang) { + data = readl(priv->base + USB_PHY_NCR_CTRL6); + data |= (1<<0); + writel(data, priv->base + USB_PHY_NCR_CTRL6); + + data = readl(priv->base + USB_PHY_NCR_CTRL3); + data |= (1<<8); + writel(data, priv->base + USB_PHY_NCR_CTRL3); + } + + udelay(30); + + data = readl(priv->base + USB_PHY_NCR_CTRL0); + data &= ~(1<<0); + writel(data, priv->base + USB_PHY_NCR_CTRL0); + + return 0; +} + +static const struct phy_ops phy_semidrive_usb_ops = { + .init = phy_semidrive_usb_init, + .owner = THIS_MODULE, +}; + +static int phy_semidrive_usb_probe(struct platform_device *pdev) +{ + struct phy_semidrive_usb_priv *priv; + struct resource *res; + struct phy *phy; + struct phy_provider *phy_provider; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + if (device_property_read_bool(&pdev->dev, "SOC_Thang")) + priv->soc_taihang = true; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + phy = devm_phy_create(&pdev->dev, NULL, &phy_semidrive_usb_ops); + if (IS_ERR(phy)) { + dev_err(&pdev->dev, "failed to create PHY\n"); + return PTR_ERR(phy); + } + + phy_set_drvdata(phy, priv); + + phy_provider = + devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate); + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct of_device_id phy_semidrive_usb_of_match[] = { + { .compatible = "semidrive,usb-phy" }, + { }, +}; +MODULE_DEVICE_TABLE(of, phy_semidrive_usb_of_match); + +static struct platform_driver phy_semidrive_usb_driver = { + .probe = phy_semidrive_usb_probe, + .driver = { + .name = "semidrive-usb-phy", + .of_match_table = phy_semidrive_usb_of_match, + }, +}; +module_platform_driver(phy_semidrive_usb_driver); + +MODULE_AUTHOR("Dejin Zheng "); +MODULE_DESCRIPTION("Semidrive PHY driver for USB"); +MODULE_LICENSE("GPL"); -- Gitee From 9e3519fc180ded3e06d773cbcc6c254d64ad4bb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=94=A1=E9=93=96?= Date: Wed, 5 Feb 2025 14:06:58 +0800 Subject: [PATCH 5/5] modify semidriver phy driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 蔡铖 --- usb/dwc3/core.c | 55 +++++++------ usb/dwc3/core.h | 3 +- usb/phy/semidrive/phy-semidrive-usb.c | 113 ++++++++++++++++++++++---- usb/usb_platform.c | 6 +- 4 files changed, 133 insertions(+), 44 deletions(-) diff --git a/usb/dwc3/core.c b/usb/dwc3/core.c index 01b24ca..5d2697b 100644 --- a/usb/dwc3/core.c +++ b/usb/dwc3/core.c @@ -45,6 +45,7 @@ #include #include #include +#include // #define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */ @@ -920,8 +921,8 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_GCTL, reg); } -#if 0 static int dwc3_core_get_phy(struct dwc3 *dwc); +#if 0 static int dwc3_core_ulpi_init(struct dwc3 *dwc); /* set global incr burst type configuration registers */ @@ -1050,26 +1051,30 @@ static int dwc3_core_init(struct dwc3 *dwc) goto err0; dwc->ulpi_ready = true; } +#endif if (!dwc->phys_ready) { ret = dwc3_core_get_phy(dwc); + // if (ret) + // goto err0a; if (ret) - goto err0a; + return ret; dwc->phys_ready = true; } - usb_phy_init(dwc->usb2_phy); - usb_phy_init(dwc->usb3_phy); - ret = phy_init(dwc->usb2_generic_phy); - if (ret < 0) - goto err0a; + // usb_phy_init(dwc->usb2_phy); + // usb_phy_init(dwc->usb3_phy); + // ret = phy_init(dwc->usb2_generic_phy); + // if (ret < 0) + // goto err0a; + + // ret = phy_init(dwc->usb3_generic_phy); + ret = tn_phy_init(dwc->usb3_generic_phy); + // if (ret < 0) { + // phy_exit(dwc->usb2_generic_phy); + // goto err0a; + // } - ret = phy_init(dwc->usb3_generic_phy); - if (ret < 0) { - phy_exit(dwc->usb2_generic_phy); - goto err0a; - } -#endif ret = dwc3_core_soft_reset(dwc); if (ret) @@ -1238,9 +1243,9 @@ static int dwc3_core_init(struct dwc3 *dwc) return ret; } -#if 0 static int dwc3_core_get_phy(struct dwc3 *dwc) { +#if 0 struct device *dev = dwc->dev; struct device_node *node = dev->of_node; int ret; @@ -1280,20 +1285,22 @@ static int dwc3_core_get_phy(struct dwc3 *dwc) return dev_err_probe(dev, ret, "no usb2 phy configured\n"); } } - - dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy"); - if (IS_ERR(dwc->usb3_generic_phy)) { - ret = PTR_ERR(dwc->usb3_generic_phy); - if (ret == -ENOSYS || ret == -ENODEV) { - dwc->usb3_generic_phy = NULL; - } else { - return dev_err_probe(dev, ret, "no usb3 phy configured\n"); - } +#endif + // dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy"); + dwc->usb3_generic_phy = tn_get_phy_by_node(0xf4b0); + if (!(dwc->usb3_generic_phy)) { + // ret = PTR_ERR(dwc->usb3_generic_phy); + // if (ret == -ENOSYS || ret == -ENODEV) { + // dwc->usb3_generic_phy = NULL; + // } else { + // return dev_err_probe(dev, ret, "no usb3 phy configured\n"); + // } + uk_pr_err("Fail to get usb phy device.\n"); + return -EAGAIN; } return 0; } -#endif static int dwc3_core_init_mode(struct dwc3 *dwc) { diff --git a/usb/dwc3/core.h b/usb/dwc3/core.h index 344c028..4cfcbc1 100644 --- a/usb/dwc3/core.h +++ b/usb/dwc3/core.h @@ -45,6 +45,7 @@ #include #include #include +#include typedef unsigned char u8; typedef unsigned short u16; @@ -1216,7 +1217,7 @@ struct dwc3 { // struct usb_phy *usb3_phy; // struct phy *usb2_generic_phy; - // struct phy *usb3_generic_phy; + struct tn_phy *usb3_generic_phy; bool phys_ready; diff --git a/usb/phy/semidrive/phy-semidrive-usb.c b/usb/phy/semidrive/phy-semidrive-usb.c index 2b879a2..14ae4ba 100644 --- a/usb/phy/semidrive/phy-semidrive-usb.c +++ b/usb/phy/semidrive/phy-semidrive-usb.c @@ -6,33 +6,74 @@ * warranty of any kind, whether express or implied. */ +#ifndef __PHY_SEMIDRIVE_USB_H__ +#define __PHY_SEMIDRIVE_USB_H__ + +#if 0 #include #include #include #include #include #include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +# define __iomem __attribute__((noderef, address_space(__iomem))) #define USB_PHY_NCR_CTRL0 0x10000 #define USB_PHY_NCR_CTRL3 0x1000C #define USB_PHY_NCR_CTRL6 0x10018 +#if 0 struct phy_semidrive_usb_priv { void __iomem *base; bool soc_taihang; }; +#endif + +static inline uint32_t readl(uint32_t __iomem *regs) +{ + return ioreg_read32(regs); +} + +static inline void writel(const uint32_t val, uint32_t __iomem *regs) +{ + ioreg_write32(regs, val); +} -static int phy_semidrive_usb_init(struct phy *phy) +static void udelay(uint8_t usec) { - struct phy_semidrive_usb_priv *priv = phy_get_drvdata(phy); + uint64_t count = 0; + uint64_t limit = usec * 100; + while (count < limit) { + count++; + } +} + +static int phy_semidrive_usb_ops_init(struct tn_phy *phy) +{ + // struct phy_semidrive_usb_priv *priv = phy_get_drvdata(phy); unsigned int data; /* use internal phy clock and reset usb phy, reset high effective */ - data = readl(priv->base + USB_PHY_NCR_CTRL0); + // data = readl(priv->base + USB_PHY_NCR_CTRL0); + data = readl(phy->base + USB_PHY_NCR_CTRL0); data &= ~(1<<18); data |= (1<<0); - writel(data, priv->base + USB_PHY_NCR_CTRL0); - + // writel(data, priv->base + USB_PHY_NCR_CTRL0); + writel(data, phy->base + USB_PHY_NCR_CTRL0); +#if 0 if (priv->soc_taihang) { data = readl(priv->base + USB_PHY_NCR_CTRL6); data |= (1<<0); @@ -42,28 +83,34 @@ static int phy_semidrive_usb_init(struct phy *phy) data |= (1<<8); writel(data, priv->base + USB_PHY_NCR_CTRL3); } - +#endif udelay(30); - data = readl(priv->base + USB_PHY_NCR_CTRL0); + // data = readl(priv->base + USB_PHY_NCR_CTRL0); + data = readl(phy->base + USB_PHY_NCR_CTRL0); data &= ~(1<<0); - writel(data, priv->base + USB_PHY_NCR_CTRL0); + // writel(data, priv->base + USB_PHY_NCR_CTRL0); + writel(data, phy->base + USB_PHY_NCR_CTRL0); return 0; } -static const struct phy_ops phy_semidrive_usb_ops = { - .init = phy_semidrive_usb_init, - .owner = THIS_MODULE, +static const struct tn_phy_ops phy_semidrive_usb_ops = { + .init = phy_semidrive_usb_ops_init, + // .owner = THIS_MODULE, }; -static int phy_semidrive_usb_probe(struct platform_device *pdev) +static int phy_semidrive_usb_probe(struct pf_device *pfdev) +// static int phy_semidrive_usb_probe(struct platform_device *pdev) { - struct phy_semidrive_usb_priv *priv; - struct resource *res; - struct phy *phy; - struct phy_provider *phy_provider; - + // struct phy_semidrive_usb_priv *priv; + // struct resource *res; + struct tn_phy *phy; + // struct phy_provider *phy_provider; + int offs; + __u64 reg_base, reg_size; + void *dtb; +#if 0 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -86,8 +133,24 @@ static int phy_semidrive_usb_probe(struct platform_device *pdev) phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate); return PTR_ERR_OR_ZERO(phy_provider); +#endif + offs = pfdev->fdt_offset; + if (unlikely(offs < 0)) + return -EINVAL; + + dtb = (void *)ukplat_bootinfo_get()->dtb; + fdt_get_address(dtb, offs, 0, ®_base, ®_size); + + pfdev->base = reg_base; + pfdev->size = reg_size; + phy = tn_phy_create(pfdev, &phy_semidrive_usb_ops); + if (!phy) + uk_pr_err("fail to create phy"); + + return 0; } +#if 0 static const struct of_device_id phy_semidrive_usb_of_match[] = { { .compatible = "semidrive,usb-phy" }, { }, @@ -106,3 +169,19 @@ module_platform_driver(phy_semidrive_usb_driver); MODULE_AUTHOR("Dejin Zheng "); MODULE_DESCRIPTION("Semidrive PHY driver for USB"); MODULE_LICENSE("GPL"); +#endif + +static const struct tn_phy_matcher semidrive_phy_matcher = +{ + .phandle = 0x80, + .node_name = "semidrive,usb-phy", +}; + +static struct tn_phy_info semidrive_phy_info = +{ + .matcher = &semidrive_phy_matcher, + .alloc_data = NULL, +}; + +PHY_REGISTER_DRIVER(&semidrive_phy_info, &phy_semidrive_usb_ops); +#endif /* __PHY_SEMIDRIVE_USB_H__ */ diff --git a/usb/usb_platform.c b/usb/usb_platform.c index ee7e5ec..456451a 100644 --- a/usb/usb_platform.c +++ b/usb/usb_platform.c @@ -60,8 +60,10 @@ static const struct device_match_table usb_platform_match_table[] = { static int usb_platform_match_compatible(const char *compatible) { for (int i = 0; usb_platform_match_table[i].compatible != NULL; i++) - if (strcmp(usb_platform_match_table[i].compatible, compatible) == 0) - return usb_platform_count++; + if (strcmp(usb_platform_match_table[i].compatible, compatible) == 0) { + usb_platform_id.device_id++; + return ++usb_platform_count; + } return -1; } -- Gitee