@@ -2369,7 +2369,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
- xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH) {
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
+ } else
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
@@ -334,6 +334,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4))
xhci->quirks |= XHCI_NO_SOFT_RETRY;
+ if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN &&
+ (pdev->device == 0x9202 ||
+ pdev->device == 0x9203))
+ xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+
/* xHC spec requires PCI devices to support D3hot and D3cold */
if (xhci->hci_version >= 0x120)
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
@@ -1905,6 +1905,7 @@ struct xhci_hcd {
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43)
#define XHCI_RESET_TO_DEFAULT BIT_ULL(44)
+#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45)
unsigned int num_active_eps;
unsigned int limit_active_eps;
On some Zhaoxin platforms, xHCI will prefetch TRB for performance improvement. However this TRB prefetch mechanism may cross page boundary, which may access memory not allocated by xHCI driver. In order to fix this issue, two pages was allocated for TRB and only the first page will be used. Signed-off-by: Weitao Wang <WeitaoWang-oc@zhaoxin.com> --- drivers/usb/host/xhci-mem.c | 6 +++++- drivers/usb/host/xhci-pci.c | 5 +++++ drivers/usb/host/xhci.h | 1 + 3 files changed, 11 insertions(+), 1 deletion(-)