return 0;
}
+static int ehci_quirk_amd_SB800(struct ehci_hcd *ehci)
+{
+ struct pci_dev *amd_smbus_dev;
+ u8 rev = 0;
+
+ amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
+ if (!amd_smbus_dev)
+ return 0;
+
+ pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+ if (rev < 0x40) {
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+ return 0;
+ }
+
+ if (!amd_nb_dev)
+ amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
+ if (!amd_nb_dev)
+ ehci_err(ehci, "QUIRK: unable to get AMD NB device\n");
+
+ ehci_info(ehci, "QUIRK: Enable AMD SB800 L1 fix\n");
+
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+
+ return 1;
+}
+
/* called during probe() after chip reset completes */
static int ehci_pci_setup(struct usb_hcd *hcd)
{
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ if (ehci_quirk_amd_SB800(ehci))
+ ehci->amd_l1_fix = 1;
+
retval = ehci_halt(ehci);
if (retval)
return retval;
ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
}
+#define AB_REG_BAR_LOW 0xe0
+#define AB_REG_BAR_HIGH 0xe1
+#define AB_INDX(addr) ((addr) + 0x00)
+#define AB_DATA(addr) ((addr) + 0x04)
+#define NB_PCIE_INDX_ADDR 0xe0
+#define NB_PCIE_INDX_DATA 0xe4
+#define NB_PIF0_PWRDOWN_0 0x01100012
+#define NB_PIF0_PWRDOWN_1 0x01100013
+
+static void ehci_quirk_amd_L1(struct ehci_hcd *ehci, int disable)
+{
+ u32 addr, addr_low, addr_high, val;
+
+ outb_p(AB_REG_BAR_LOW, 0xcd6);
+ addr_low = inb_p(0xcd7);
+ outb_p(AB_REG_BAR_HIGH, 0xcd6);
+ addr_high = inb_p(0xcd7);
+ addr = addr_high << 8 | addr_low;
+ outl_p(0x30, AB_INDX(addr));
+ outl_p(0x40, AB_DATA(addr));
+ outl_p(0x34, AB_INDX(addr));
+ val = inl_p(AB_DATA(addr));
+
+ if (disable) {
+ val &= ~0x8;
+ val |= (1 << 4) | (1 << 9);
+ } else {
+ val |= 0x8;
+ val &= ~((1 << 4) | (1 << 9));
+ }
+ outl_p(val, AB_DATA(addr));
+
+ if (amd_nb_dev) {
+ addr = NB_PIF0_PWRDOWN_0;
+ pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
+ pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
+ if (disable)
+ val &= ~(0x3f << 7);
+ else
+ val |= 0x3f << 7;
+
+ pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
+
+ addr = NB_PIF0_PWRDOWN_1;
+ pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
+ pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
+ if (disable)
+ val &= ~(0x3f << 7);
+ else
+ val |= 0x3f << 7;
+
+ pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
+ }
+
+ return;
+}
+
/* fit urb's itds into the selected schedule slot; activate as needed */
static int
itd_link_urb (
next_uframe >> 3, next_uframe & 0x7);
stream->start = jiffies;
}
+
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_l1_fix == 1)
+ ehci_quirk_amd_L1(ehci, 1);
+ }
+
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill iTDs uframe by uframe */
(void) disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_l1_fix == 1)
+ ehci_quirk_amd_L1(ehci, 0);
+ }
+
if (unlikely(list_is_singular(&stream->td_list))) {
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
stream->interval, hc32_to_cpu(ehci, stream->splits));
stream->start = jiffies;
}
+
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_l1_fix == 1)
+ ehci_quirk_amd_L1(ehci, 1);
+ }
+
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill sITDs frame by frame */
(void) disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_l1_fix == 1)
+ ehci_quirk_amd_L1(ehci, 0);
+ }
+
if (list_is_singular(&stream->td_list)) {
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;