2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/delay.h>
18 #include <linux/string.h>
19 #include <linux/pci.h>
28 #include <pcie_core.h>
34 sbpcieregs_t *pcieregs;
35 struct sbpciregs *pciregs;
36 } regs; /* Memory mapped register to the core */
38 si_t *sih; /* System interconnect handle */
40 u8 pciecap_lcreg_offset; /* PCIE capability LCreg offset in the config space */
43 u8 pcie_war_aspm_ovr; /* Override ASPM/Clkreq settings */
45 u8 pmecap_offset; /* PM Capability offset in the config space */
46 bool pmecap; /* Capable of generating PME */
50 #define PCI_ERROR(args)
51 #define PCIE_PUB(sih) \
52 (((sih)->bustype == PCI_BUS) && ((sih)->buscoretype == PCIE_CORE_ID))
54 /* routines to access mdio slave device registers */
55 static bool pcie_mdiosetblock(pcicore_info_t *pi, uint blk);
56 static int pcie_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr,
57 bool write, uint *val);
58 static int pcie_mdiowrite(pcicore_info_t *pi, uint physmedia, uint readdr,
60 static int pcie_mdioread(pcicore_info_t *pi, uint physmedia, uint readdr,
63 static void pcie_extendL1timer(pcicore_info_t *pi, bool extend);
64 static void pcie_clkreq_upd(pcicore_info_t *pi, uint state);
66 static void pcie_war_aspm_clkreq(pcicore_info_t *pi);
67 static void pcie_war_serdes(pcicore_info_t *pi);
68 static void pcie_war_noplldown(pcicore_info_t *pi);
69 static void pcie_war_polarity(pcicore_info_t *pi);
70 static void pcie_war_pci_setup(pcicore_info_t *pi);
72 static bool pcicore_pmecap(pcicore_info_t *pi);
74 #define PCIE_ASPM(sih) ((PCIE_PUB(sih)) && (((sih)->buscorerev >= 3) && ((sih)->buscorerev <= 5)))
77 /* delay needed between the mdio control/ mdiodata register data access */
78 #define PR28829_DELAY() udelay(10)
80 /* Initialize the PCI core. It's caller's responsibility to make sure that this is done
83 void *pcicore_init(si_t *sih, void *pdev, void *regs)
87 /* alloc pcicore_info_t */
88 pi = kzalloc(sizeof(pcicore_info_t), GFP_ATOMIC);
90 PCI_ERROR(("pci_attach: malloc failed!\n"));
97 if (sih->buscoretype == PCIE_CORE_ID) {
99 pi->regs.pcieregs = (sbpcieregs_t *) regs;
100 cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
102 pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
104 pi->regs.pciregs = (struct sbpciregs *) regs;
109 void pcicore_deinit(void *pch)
111 pcicore_info_t *pi = (pcicore_info_t *) pch;
118 /* return cap_offset if requested capability exists in the PCI config space */
119 /* Note that it's caller's responsibility to make sure it's a pci bus */
121 pcicore_find_pci_capability(void *dev, u8 req_cap_id,
122 unsigned char *buf, u32 *buflen)
129 /* check for Header type 0 */
130 pci_read_config_byte(dev, PCI_HEADER_TYPE, &byte_val);
131 if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL)
134 /* check if the capability pointer field exists */
135 pci_read_config_byte(dev, PCI_STATUS, &byte_val);
136 if (!(byte_val & PCI_STATUS_CAP_LIST))
139 pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr);
140 /* check if the capability pointer is 0x00 */
144 /* loop thr'u the capability list and see if the pcie capabilty exists */
146 pci_read_config_byte(dev, cap_ptr, &cap_id);
148 while (cap_id != req_cap_id) {
149 pci_read_config_byte(dev, cap_ptr + 1, &cap_ptr);
152 pci_read_config_byte(dev, cap_ptr, &cap_id);
154 if (cap_id != req_cap_id) {
157 /* found the caller requested capability */
158 if ((buf != NULL) && (buflen != NULL)) {
165 /* copy the cpability data excluding cap ID and next ptr */
166 cap_data = cap_ptr + 2;
167 if ((bufsize + cap_data) > PCI_SZPCR)
168 bufsize = PCI_SZPCR - cap_data;
171 pci_read_config_byte(dev, cap_data, buf);
180 /* ***** Register Access API */
182 pcie_readreg(sbpcieregs_t *pcieregs, uint addrtype,
185 uint retval = 0xFFFFFFFF;
188 case PCIE_CONFIGREGS:
189 W_REG((&pcieregs->configaddr), offset);
190 (void)R_REG((&pcieregs->configaddr));
191 retval = R_REG(&(pcieregs->configdata));
194 W_REG(&(pcieregs->pcieindaddr), offset);
195 (void)R_REG((&pcieregs->pcieindaddr));
196 retval = R_REG(&(pcieregs->pcieinddata));
206 pcie_writereg(sbpcieregs_t *pcieregs, uint addrtype,
207 uint offset, uint val)
210 case PCIE_CONFIGREGS:
211 W_REG((&pcieregs->configaddr), offset);
212 W_REG((&pcieregs->configdata), val);
215 W_REG((&pcieregs->pcieindaddr), offset);
216 W_REG((&pcieregs->pcieinddata), val);
224 static bool pcie_mdiosetblock(pcicore_info_t *pi, uint blk)
226 sbpcieregs_t *pcieregs = pi->regs.pcieregs;
227 uint mdiodata, i = 0;
228 uint pcie_serdes_spinwait = 200;
231 MDIODATA_START | MDIODATA_WRITE | (MDIODATA_DEV_ADDR <<
232 MDIODATA_DEVADDR_SHF) |
233 (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) | MDIODATA_TA | (blk <<
235 W_REG(&pcieregs->mdiodata, mdiodata);
238 /* retry till the transaction is complete */
239 while (i < pcie_serdes_spinwait) {
240 if (R_REG(&(pcieregs->mdiocontrol)) &
241 MDIOCTL_ACCESS_DONE) {
248 if (i >= pcie_serdes_spinwait) {
249 PCI_ERROR(("pcie_mdiosetblock: timed out\n"));
257 pcie_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write,
260 sbpcieregs_t *pcieregs = pi->regs.pcieregs;
263 uint pcie_serdes_spinwait = 10;
265 /* enable mdio access to SERDES */
266 W_REG((&pcieregs->mdiocontrol),
267 MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
269 if (pi->sih->buscorerev >= 10) {
270 /* new serdes is slower in rw, using two layers of reg address mapping */
271 if (!pcie_mdiosetblock(pi, physmedia))
273 mdiodata = (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
274 (regaddr << MDIODATA_REGADDR_SHF);
275 pcie_serdes_spinwait *= 20;
277 mdiodata = (physmedia << MDIODATA_DEVADDR_SHF_OLD) |
278 (regaddr << MDIODATA_REGADDR_SHF_OLD);
282 mdiodata |= (MDIODATA_START | MDIODATA_READ | MDIODATA_TA);
285 (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA | *val);
287 W_REG(&pcieregs->mdiodata, mdiodata);
291 /* retry till the transaction is complete */
292 while (i < pcie_serdes_spinwait) {
293 if (R_REG(&(pcieregs->mdiocontrol)) &
294 MDIOCTL_ACCESS_DONE) {
298 (R_REG(&(pcieregs->mdiodata)) &
301 /* Disable mdio access to SERDES */
302 W_REG((&pcieregs->mdiocontrol), 0);
309 PCI_ERROR(("pcie_mdioop: timed out op: %d\n", write));
310 /* Disable mdio access to SERDES */
311 W_REG((&pcieregs->mdiocontrol), 0);
315 /* use the mdio interface to read from mdio slaves */
317 pcie_mdioread(pcicore_info_t *pi, uint physmedia, uint regaddr, uint *regval)
319 return pcie_mdioop(pi, physmedia, regaddr, false, regval);
322 /* use the mdio interface to write to mdio slaves */
324 pcie_mdiowrite(pcicore_info_t *pi, uint physmedia, uint regaddr, uint val)
326 return pcie_mdioop(pi, physmedia, regaddr, true, &val);
329 /* ***** Support functions ***** */
330 u8 pcie_clkreq(void *pch, u32 mask, u32 val)
332 pcicore_info_t *pi = (pcicore_info_t *) pch;
336 offset = pi->pciecap_lcreg_offset;
340 pci_read_config_dword(pi->dev, offset, ®_val);
344 reg_val |= PCIE_CLKREQ_ENAB;
346 reg_val &= ~PCIE_CLKREQ_ENAB;
347 pci_write_config_dword(pi->dev, offset, reg_val);
348 pci_read_config_dword(pi->dev, offset, ®_val);
350 if (reg_val & PCIE_CLKREQ_ENAB)
356 static void pcie_extendL1timer(pcicore_info_t *pi, bool extend)
360 sbpcieregs_t *pcieregs = pi->regs.pcieregs;
362 if (!PCIE_PUB(sih) || sih->buscorerev < 7)
365 w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
367 w |= PCIE_ASPMTIMER_EXTEND;
369 w &= ~PCIE_ASPMTIMER_EXTEND;
370 pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
371 w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
374 /* centralized clkreq control policy */
375 static void pcie_clkreq_upd(pcicore_info_t *pi, uint state)
382 pcie_clkreq((void *)pi, 1, 0);
385 if (sih->buscorerev == 6) { /* turn on serdes PLL down */
386 ai_corereg(sih, SI_CC_IDX,
387 offsetof(chipcregs_t, chipcontrol_addr), ~0,
389 ai_corereg(sih, SI_CC_IDX,
390 offsetof(chipcregs_t, chipcontrol_data),
392 } else if (pi->pcie_pr42767) {
393 pcie_clkreq((void *)pi, 1, 1);
397 if (sih->buscorerev == 6) { /* turn off serdes PLL down */
398 ai_corereg(sih, SI_CC_IDX,
399 offsetof(chipcregs_t, chipcontrol_addr), ~0,
401 ai_corereg(sih, SI_CC_IDX,
402 offsetof(chipcregs_t, chipcontrol_data),
404 } else if (PCIE_ASPM(sih)) { /* disable clkreq */
405 pcie_clkreq((void *)pi, 1, 0);
413 /* ***** PCI core WARs ***** */
414 /* Done only once at attach time */
415 static void pcie_war_polarity(pcicore_info_t *pi)
419 if (pi->pcie_polarity != 0)
422 w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS,
425 /* Detect the current polarity at attach and force that polarity and
426 * disable changing the polarity
428 if ((w & PCIE_PLP_POLARITYINV_STAT) == 0)
429 pi->pcie_polarity = (SERDES_RX_CTRL_FORCE);
432 (SERDES_RX_CTRL_FORCE | SERDES_RX_CTRL_POLARITY);
435 /* enable ASPM and CLKREQ if srom doesn't have it */
436 /* Needs to happen when update to shadow SROM is needed
437 * : Coming out of 'standby'/'hibernate'
438 * : If pcie_war_aspm_ovr state changed
440 static void pcie_war_aspm_clkreq(pcicore_info_t *pi)
442 sbpcieregs_t *pcieregs = pi->regs.pcieregs;
450 /* bypass this on QT or VSIM */
451 if (!ISSIM_ENAB(sih)) {
453 reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
454 val16 = R_REG(reg16);
456 val16 &= ~SRSH_ASPM_ENB;
457 if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
458 val16 |= SRSH_ASPM_ENB;
459 else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB)
460 val16 |= SRSH_ASPM_L1_ENB;
461 else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
462 val16 |= SRSH_ASPM_L0s_ENB;
466 pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset,
468 w &= ~PCIE_ASPM_ENAB;
469 w |= pi->pcie_war_aspm_ovr;
470 pci_write_config_dword(pi->dev,
471 pi->pciecap_lcreg_offset, w);
474 reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
475 val16 = R_REG(reg16);
477 if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
478 val16 |= SRSH_CLKREQ_ENB;
479 pi->pcie_pr42767 = true;
481 val16 &= ~SRSH_CLKREQ_ENB;
486 /* Apply the polarity determined at the start */
487 /* Needs to happen when coming out of 'standby'/'hibernate' */
488 static void pcie_war_serdes(pcicore_info_t *pi)
492 if (pi->pcie_polarity != 0)
493 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CTRL,
496 pcie_mdioread(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w);
497 if (w & PLL_CTRL_FREQDET_EN) {
498 w &= ~PLL_CTRL_FREQDET_EN;
499 pcie_mdiowrite(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w);
503 /* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
504 /* Needs to happen when coming out of 'standby'/'hibernate' */
505 static void pcie_misc_config_fixup(pcicore_info_t *pi)
507 sbpcieregs_t *pcieregs = pi->regs.pcieregs;
510 reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
511 val16 = R_REG(reg16);
513 if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
514 val16 |= SRSH_L23READY_EXIT_NOPERST;
519 /* quick hack for testing */
520 /* Needs to happen when coming out of 'standby'/'hibernate' */
521 static void pcie_war_noplldown(pcicore_info_t *pi)
523 sbpcieregs_t *pcieregs = pi->regs.pcieregs;
526 /* turn off serdes PLL down */
527 ai_corereg(pi->sih, SI_CC_IDX, offsetof(chipcregs_t, chipcontrol),
528 CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
530 /* clear srom shadow backdoor */
531 reg16 = &pcieregs->sprom[SRSH_BD_OFFSET];
535 /* Needs to happen when coming out of 'standby'/'hibernate' */
536 static void pcie_war_pci_setup(pcicore_info_t *pi)
539 sbpcieregs_t *pcieregs = pi->regs.pcieregs;
542 if ((sih->buscorerev == 0) || (sih->buscorerev == 1)) {
543 w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
544 PCIE_TLP_WORKAROUNDSREG);
546 pcie_writereg(pcieregs, PCIE_PCIEREGS,
547 PCIE_TLP_WORKAROUNDSREG, w);
550 if (sih->buscorerev == 1) {
551 w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
553 pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
556 if (sih->buscorerev == 0) {
557 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
558 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
559 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
560 } else if (PCIE_ASPM(sih)) {
561 /* Change the L1 threshold for better performance */
562 w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
563 PCIE_DLLP_PMTHRESHREG);
564 w &= ~(PCIE_L1THRESHOLDTIME_MASK);
565 w |= (PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT);
566 pcie_writereg(pcieregs, PCIE_PCIEREGS,
567 PCIE_DLLP_PMTHRESHREG, w);
571 pcie_war_aspm_clkreq(pi);
572 } else if (pi->sih->buscorerev == 7)
573 pcie_war_noplldown(pi);
575 /* Note that the fix is actually in the SROM, that's why this is open-ended */
576 if (pi->sih->buscorerev >= 6)
577 pcie_misc_config_fixup(pi);
580 void pcie_war_ovr_aspm_update(void *pch, u8 aspm)
582 pcicore_info_t *pi = (pcicore_info_t *) pch;
584 if (!PCIE_ASPM(pi->sih))
588 if (aspm > PCIE_ASPM_ENAB)
591 pi->pcie_war_aspm_ovr = aspm;
593 /* Update the current state */
594 pcie_war_aspm_clkreq(pi);
597 /* ***** Functions called during driver state changes ***** */
598 void pcicore_attach(void *pch, char *pvars, int state)
600 pcicore_info_t *pi = (pcicore_info_t *) pch;
603 /* Determine if this board needs override */
604 if (PCIE_ASPM(sih)) {
605 if ((u32) getintvar(pvars, "boardflags2") & BFL2_PCIEWAR_OVR) {
606 pi->pcie_war_aspm_ovr = PCIE_ASPM_DISAB;
608 pi->pcie_war_aspm_ovr = PCIE_ASPM_ENAB;
612 /* These need to happen in this order only */
613 pcie_war_polarity(pi);
617 pcie_war_aspm_clkreq(pi);
619 pcie_clkreq_upd(pi, state);
623 void pcicore_hwup(void *pch)
625 pcicore_info_t *pi = (pcicore_info_t *) pch;
627 if (!pi || !PCIE_PUB(pi->sih))
630 pcie_war_pci_setup(pi);
633 void pcicore_up(void *pch, int state)
635 pcicore_info_t *pi = (pcicore_info_t *) pch;
637 if (!pi || !PCIE_PUB(pi->sih))
640 /* Restore L1 timer for better performance */
641 pcie_extendL1timer(pi, true);
643 pcie_clkreq_upd(pi, state);
646 /* When the device is going to enter D3 state (or the system is going to enter S3/S4 states */
647 void pcicore_sleep(void *pch)
649 pcicore_info_t *pi = (pcicore_info_t *) pch;
652 if (!pi || !PCIE_ASPM(pi->sih))
655 pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
656 w &= ~PCIE_CAP_LCREG_ASPML1;
657 pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
659 pi->pcie_pr42767 = false;
662 void pcicore_down(void *pch, int state)
664 pcicore_info_t *pi = (pcicore_info_t *) pch;
666 if (!pi || !PCIE_PUB(pi->sih))
669 pcie_clkreq_upd(pi, state);
671 /* Reduce L1 timer for better power savings */
672 pcie_extendL1timer(pi, false);
675 /* ***** Wake-on-wireless-LAN (WOWL) support functions ***** */
676 /* Just uses PCI config accesses to find out, when needed before sb_attach is done */
677 bool pcicore_pmecap_fast(void *pch)
679 pcicore_info_t *pi = (pcicore_info_t *) pch;
683 cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_PM, NULL,
689 pci_read_config_dword(pi->dev, cap_ptr, &pmecap);
691 return (pmecap & (PCI_PM_CAP_PME_MASK << 16)) != 0;
694 /* return true if PM capability exists in the pci config space
695 * Uses and caches the information using core handle
697 static bool pcicore_pmecap(pcicore_info_t *pi)
702 if (!pi->pmecap_offset) {
703 cap_ptr = pcicore_find_pci_capability(pi->dev,
709 pi->pmecap_offset = cap_ptr;
711 pci_read_config_dword(pi->dev, pi->pmecap_offset,
714 /* At least one state can generate PME */
715 pi->pmecap = (pmecap & (PCI_PM_CAP_PME_MASK << 16)) != 0;
721 /* Enable PME generation */
722 void pcicore_pmeen(void *pch)
724 pcicore_info_t *pi = (pcicore_info_t *) pch;
727 /* if not pmecapable return */
728 if (!pcicore_pmecap(pi))
731 pci_read_config_dword(pi->dev, pi->pmecap_offset + PCI_PM_CTRL,
733 w |= (PCI_PM_CTRL_PME_ENABLE);
734 pci_write_config_dword(pi->dev,
735 pi->pmecap_offset + PCI_PM_CTRL, w);
739 * Return true if PME status set
741 bool pcicore_pmestat(void *pch)
743 pcicore_info_t *pi = (pcicore_info_t *) pch;
746 if (!pcicore_pmecap(pi))
749 pci_read_config_dword(pi->dev, pi->pmecap_offset + PCI_PM_CTRL,
752 return (w & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
755 /* Disable PME generation, clear the PME status bit if set
757 void pcicore_pmeclr(void *pch)
759 pcicore_info_t *pi = (pcicore_info_t *) pch;
762 if (!pcicore_pmecap(pi))
765 pci_read_config_dword(pi->dev, pi->pmecap_offset + PCI_PM_CTRL,
768 PCI_ERROR(("pcicore_pci_pmeclr PMECSR : 0x%x\n", w));
770 /* PMESTAT is cleared by writing 1 to it */
771 w &= ~(PCI_PM_CTRL_PME_ENABLE);
773 pci_write_config_dword(pi->dev,
774 pi->pmecap_offset + PCI_PM_CTRL, w);
777 u32 pcie_lcreg(void *pch, u32 mask, u32 val)
779 pcicore_info_t *pi = (pcicore_info_t *) pch;
783 offset = pi->pciecap_lcreg_offset;
789 pci_write_config_dword(pi->dev, offset, val);
791 pci_read_config_dword(pi->dev, offset, &tmpval);
796 pcicore_pciereg(void *pch, u32 offset, u32 mask, u32 val, uint type)
799 pcicore_info_t *pi = (pcicore_info_t *) pch;
800 sbpcieregs_t *pcieregs = pi->regs.pcieregs;
803 PCI_ERROR(("PCIEREG: 0x%x writeval 0x%x\n", offset, val));
804 pcie_writereg(pcieregs, type, offset, val);
807 /* Should not read register 0x154 */
808 if (pi->sih->buscorerev <= 5 && offset == PCIE_DLLP_PCIE11
809 && type == PCIE_PCIEREGS)
812 reg_val = pcie_readreg(pcieregs, type, offset);
813 PCI_ERROR(("PCIEREG: 0x%x readval is 0x%x\n", offset, reg_val));
819 pcicore_pcieserdesreg(void *pch, u32 mdioslave, u32 offset, u32 mask,
823 pcicore_info_t *pi = (pcicore_info_t *) pch;
826 PCI_ERROR(("PCIEMDIOREG: 0x%x writeval 0x%x\n", offset, val));
827 pcie_mdiowrite(pi, mdioslave, offset, val);
830 if (pcie_mdioread(pi, mdioslave, offset, ®_val))
831 reg_val = 0xFFFFFFFF;
832 PCI_ERROR(("PCIEMDIOREG: dev 0x%x offset 0x%x read 0x%x\n", mdioslave,