2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
10 #include <linux/slab.h>
12 #include <linux/bitops.h>
14 #define MASK(n) ((1ULL<<(n))-1)
15 #define OCM_WIN_P3P(addr) (addr & 0xffc0000)
17 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
19 #define CRB_BLK(off) ((off >> 20) & 0x3f)
20 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
21 #define CRB_WINDOW_2M (0x130060)
22 #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
23 #define CRB_INDIRECT_2M (0x1e0000UL)
27 static inline u64 readq(void __iomem *addr)
29 return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
34 static inline void writeq(u64 val, void __iomem *addr)
36 writel(((u32) (val)), (addr));
37 writel(((u32) (val >> 32)), (addr + 4));
41 static struct crb_128M_2M_block_map
42 crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
43 {{{0, 0, 0, 0} } }, /* 0: PCI */
44 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
45 {1, 0x0110000, 0x0120000, 0x130000},
46 {1, 0x0120000, 0x0122000, 0x124000},
47 {1, 0x0130000, 0x0132000, 0x126000},
48 {1, 0x0140000, 0x0142000, 0x128000},
49 {1, 0x0150000, 0x0152000, 0x12a000},
50 {1, 0x0160000, 0x0170000, 0x110000},
51 {1, 0x0170000, 0x0172000, 0x12e000},
52 {0, 0x0000000, 0x0000000, 0x000000},
53 {0, 0x0000000, 0x0000000, 0x000000},
54 {0, 0x0000000, 0x0000000, 0x000000},
55 {0, 0x0000000, 0x0000000, 0x000000},
56 {0, 0x0000000, 0x0000000, 0x000000},
57 {0, 0x0000000, 0x0000000, 0x000000},
58 {1, 0x01e0000, 0x01e0800, 0x122000},
59 {0, 0x0000000, 0x0000000, 0x000000} } },
60 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
61 {{{0, 0, 0, 0} } }, /* 3: */
62 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
63 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
64 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
65 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
66 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
67 {0, 0x0000000, 0x0000000, 0x000000},
68 {0, 0x0000000, 0x0000000, 0x000000},
69 {0, 0x0000000, 0x0000000, 0x000000},
70 {0, 0x0000000, 0x0000000, 0x000000},
71 {0, 0x0000000, 0x0000000, 0x000000},
72 {0, 0x0000000, 0x0000000, 0x000000},
73 {0, 0x0000000, 0x0000000, 0x000000},
74 {0, 0x0000000, 0x0000000, 0x000000},
75 {0, 0x0000000, 0x0000000, 0x000000},
76 {0, 0x0000000, 0x0000000, 0x000000},
77 {0, 0x0000000, 0x0000000, 0x000000},
78 {0, 0x0000000, 0x0000000, 0x000000},
79 {0, 0x0000000, 0x0000000, 0x000000},
80 {0, 0x0000000, 0x0000000, 0x000000},
81 {1, 0x08f0000, 0x08f2000, 0x172000} } },
82 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
83 {0, 0x0000000, 0x0000000, 0x000000},
84 {0, 0x0000000, 0x0000000, 0x000000},
85 {0, 0x0000000, 0x0000000, 0x000000},
86 {0, 0x0000000, 0x0000000, 0x000000},
87 {0, 0x0000000, 0x0000000, 0x000000},
88 {0, 0x0000000, 0x0000000, 0x000000},
89 {0, 0x0000000, 0x0000000, 0x000000},
90 {0, 0x0000000, 0x0000000, 0x000000},
91 {0, 0x0000000, 0x0000000, 0x000000},
92 {0, 0x0000000, 0x0000000, 0x000000},
93 {0, 0x0000000, 0x0000000, 0x000000},
94 {0, 0x0000000, 0x0000000, 0x000000},
95 {0, 0x0000000, 0x0000000, 0x000000},
96 {0, 0x0000000, 0x0000000, 0x000000},
97 {1, 0x09f0000, 0x09f2000, 0x176000} } },
98 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
99 {0, 0x0000000, 0x0000000, 0x000000},
100 {0, 0x0000000, 0x0000000, 0x000000},
101 {0, 0x0000000, 0x0000000, 0x000000},
102 {0, 0x0000000, 0x0000000, 0x000000},
103 {0, 0x0000000, 0x0000000, 0x000000},
104 {0, 0x0000000, 0x0000000, 0x000000},
105 {0, 0x0000000, 0x0000000, 0x000000},
106 {0, 0x0000000, 0x0000000, 0x000000},
107 {0, 0x0000000, 0x0000000, 0x000000},
108 {0, 0x0000000, 0x0000000, 0x000000},
109 {0, 0x0000000, 0x0000000, 0x000000},
110 {0, 0x0000000, 0x0000000, 0x000000},
111 {0, 0x0000000, 0x0000000, 0x000000},
112 {0, 0x0000000, 0x0000000, 0x000000},
113 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
114 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {0, 0x0000000, 0x0000000, 0x000000},
124 {0, 0x0000000, 0x0000000, 0x000000},
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {0, 0x0000000, 0x0000000, 0x000000},
128 {0, 0x0000000, 0x0000000, 0x000000},
129 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
130 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
131 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
132 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
133 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
134 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
135 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
136 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
137 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
138 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
139 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
140 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
141 {{{0, 0, 0, 0} } }, /* 23: */
142 {{{0, 0, 0, 0} } }, /* 24: */
143 {{{0, 0, 0, 0} } }, /* 25: */
144 {{{0, 0, 0, 0} } }, /* 26: */
145 {{{0, 0, 0, 0} } }, /* 27: */
146 {{{0, 0, 0, 0} } }, /* 28: */
147 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
148 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
149 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
150 {{{0} } }, /* 32: PCI */
151 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
152 {1, 0x2110000, 0x2120000, 0x130000},
153 {1, 0x2120000, 0x2122000, 0x124000},
154 {1, 0x2130000, 0x2132000, 0x126000},
155 {1, 0x2140000, 0x2142000, 0x128000},
156 {1, 0x2150000, 0x2152000, 0x12a000},
157 {1, 0x2160000, 0x2170000, 0x110000},
158 {1, 0x2170000, 0x2172000, 0x12e000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000},
162 {0, 0x0000000, 0x0000000, 0x000000},
163 {0, 0x0000000, 0x0000000, 0x000000},
164 {0, 0x0000000, 0x0000000, 0x000000},
165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000} } },
167 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
173 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
174 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
175 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
176 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
177 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
178 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
179 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
180 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
181 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
182 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
183 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
184 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
186 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
187 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
188 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
189 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
190 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
191 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
192 {{{0} } }, /* 59: I2C0 */
193 {{{0} } }, /* 60: I2C1 */
194 {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
195 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
196 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
200 * top 12 bits of crb internal address (hub, agent)
202 static const unsigned crb_hub_agt[64] = {
204 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
205 QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
206 QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
208 QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
209 QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
210 QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
211 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
212 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
213 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
214 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
215 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
216 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
217 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
218 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
219 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
220 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
221 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
222 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
223 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
224 QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
225 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
226 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
227 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
228 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
229 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
231 QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
232 QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
234 QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
236 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
237 QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
243 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
245 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
246 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
247 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
248 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
249 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
250 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
251 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
252 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
253 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
254 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
256 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
257 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
258 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
259 QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
261 QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
262 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
263 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
265 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
269 static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
272 void __iomem *window_reg;
274 dest = addr & 0xFFFF0000;
275 window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
276 writel(dest, window_reg);
278 window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
279 *data = readl(window_reg);
282 static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
285 void __iomem *window_reg;
287 dest = addr & 0xFFFF0000;
288 window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
289 writel(dest, window_reg);
291 window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
292 writel(data, window_reg);
296 /* PCI Windowing for DDR regions. */
298 #define QLCNIC_PCIE_SEM_TIMEOUT 10000
301 qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
303 int done = 0, timeout = 0;
306 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
309 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
310 dev_err(&adapter->pdev->dev,
311 "Failed to acquire sem=%d lock; holdby=%d\n",
312 sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
319 QLCWR32(adapter, id_reg, adapter->portnum);
325 qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
327 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
331 qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
332 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
334 u32 i, producer, consumer;
335 struct qlcnic_cmd_buffer *pbuf;
336 struct cmd_desc_type0 *cmd_desc;
337 struct qlcnic_host_tx_ring *tx_ring;
341 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
344 tx_ring = adapter->tx_ring;
345 __netif_tx_lock_bh(tx_ring->txq);
347 producer = tx_ring->producer;
348 consumer = tx_ring->sw_consumer;
350 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
351 netif_tx_stop_queue(tx_ring->txq);
353 if (qlcnic_tx_avail(tx_ring) > nr_desc) {
354 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
355 netif_tx_wake_queue(tx_ring->txq);
357 adapter->stats.xmit_off++;
358 __netif_tx_unlock_bh(tx_ring->txq);
364 cmd_desc = &cmd_desc_arr[i];
366 pbuf = &tx_ring->cmd_buf_arr[producer];
368 pbuf->frag_count = 0;
370 memcpy(&tx_ring->desc_head[producer],
371 &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
373 producer = get_next_index(producer, tx_ring->num_desc);
376 } while (i != nr_desc);
378 tx_ring->producer = producer;
380 qlcnic_update_cmd_producer(tx_ring);
382 __netif_tx_unlock_bh(tx_ring->txq);
388 qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
389 __le16 vlan_id, unsigned op)
391 struct qlcnic_nic_req req;
392 struct qlcnic_mac_req *mac_req;
393 struct qlcnic_vlan_req *vlan_req;
396 memset(&req, 0, sizeof(struct qlcnic_nic_req));
397 req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
399 word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
400 req.req_hdr = cpu_to_le64(word);
402 mac_req = (struct qlcnic_mac_req *)&req.words[0];
404 memcpy(mac_req->mac_addr, addr, 6);
406 vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
407 vlan_req->vlan_id = vlan_id;
409 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
412 static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
414 struct list_head *head;
415 struct qlcnic_mac_list_s *cur;
417 /* look up if already exists */
418 list_for_each(head, &adapter->mac_list) {
419 cur = list_entry(head, struct qlcnic_mac_list_s, list);
420 if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
424 cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
426 dev_err(&adapter->netdev->dev,
427 "failed to add mac address filter\n");
430 memcpy(cur->mac_addr, addr, ETH_ALEN);
432 if (qlcnic_sre_macaddr_change(adapter,
433 cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
438 list_add_tail(&cur->list, &adapter->mac_list);
442 void qlcnic_set_multi(struct net_device *netdev)
444 struct qlcnic_adapter *adapter = netdev_priv(netdev);
445 struct netdev_hw_addr *ha;
446 static const u8 bcast_addr[ETH_ALEN] = {
447 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
449 u32 mode = VPORT_MISS_MODE_DROP;
451 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
454 qlcnic_nic_add_mac(adapter, adapter->mac_addr);
455 qlcnic_nic_add_mac(adapter, bcast_addr);
457 if (netdev->flags & IFF_PROMISC) {
458 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
459 mode = VPORT_MISS_MODE_ACCEPT_ALL;
463 if ((netdev->flags & IFF_ALLMULTI) ||
464 (netdev_mc_count(netdev) > adapter->max_mc_count)) {
465 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
469 if (!netdev_mc_empty(netdev)) {
470 netdev_for_each_mc_addr(ha, netdev) {
471 qlcnic_nic_add_mac(adapter, ha->addr);
476 if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
477 qlcnic_alloc_lb_filters_mem(adapter);
478 adapter->mac_learn = 1;
480 adapter->mac_learn = 0;
483 qlcnic_nic_set_promisc(adapter, mode);
486 int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
488 struct qlcnic_nic_req req;
491 memset(&req, 0, sizeof(struct qlcnic_nic_req));
493 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
495 word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
496 ((u64)adapter->portnum << 16);
497 req.req_hdr = cpu_to_le64(word);
499 req.words[0] = cpu_to_le64(mode);
501 return qlcnic_send_cmd_descs(adapter,
502 (struct cmd_desc_type0 *)&req, 1);
505 void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
507 struct qlcnic_mac_list_s *cur;
508 struct list_head *head = &adapter->mac_list;
510 while (!list_empty(head)) {
511 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
512 qlcnic_sre_macaddr_change(adapter,
513 cur->mac_addr, 0, QLCNIC_MAC_DEL);
514 list_del(&cur->list);
519 void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
521 struct qlcnic_filter *tmp_fil;
522 struct hlist_node *tmp_hnode, *n;
523 struct hlist_head *head;
526 for (i = 0; i < adapter->fhash.fmax; i++) {
527 head = &(adapter->fhash.fhead[i]);
529 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
532 (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
533 qlcnic_sre_macaddr_change(adapter,
534 tmp_fil->faddr, tmp_fil->vlan_id,
535 tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
537 spin_lock_bh(&adapter->mac_learn_lock);
538 adapter->fhash.fnum--;
539 hlist_del(&tmp_fil->fnode);
540 spin_unlock_bh(&adapter->mac_learn_lock);
547 void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
549 struct qlcnic_filter *tmp_fil;
550 struct hlist_node *tmp_hnode, *n;
551 struct hlist_head *head;
554 for (i = 0; i < adapter->fhash.fmax; i++) {
555 head = &(adapter->fhash.fhead[i]);
557 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
558 qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
559 tmp_fil->vlan_id, tmp_fil->vlan_id ?
560 QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
561 spin_lock_bh(&adapter->mac_learn_lock);
562 adapter->fhash.fnum--;
563 hlist_del(&tmp_fil->fnode);
564 spin_unlock_bh(&adapter->mac_learn_lock);
570 static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
572 struct qlcnic_nic_req req;
575 memset(&req, 0, sizeof(struct qlcnic_nic_req));
577 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
578 req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
579 ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32));
581 req.words[0] = cpu_to_le64(flag);
583 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
585 dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n",
586 flag ? "Set" : "Reset");
590 int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
592 if (qlcnic_set_fw_loopback(adapter, mode))
595 if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
596 qlcnic_set_fw_loopback(adapter, 0);
604 void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
606 int mode = VPORT_MISS_MODE_DROP;
607 struct net_device *netdev = adapter->netdev;
609 qlcnic_set_fw_loopback(adapter, 0);
611 if (netdev->flags & IFF_PROMISC)
612 mode = VPORT_MISS_MODE_ACCEPT_ALL;
613 else if (netdev->flags & IFF_ALLMULTI)
614 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
616 qlcnic_nic_set_promisc(adapter, mode);
621 * Send the interrupt coalescing parameter set by ethtool to the card.
623 int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
625 struct qlcnic_nic_req req;
628 memset(&req, 0, sizeof(struct qlcnic_nic_req));
630 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
632 req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
633 ((u64) adapter->portnum << 16));
635 req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
636 req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
637 ((u64) adapter->ahw->coal.rx_time_us) << 16);
638 req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
639 ((u64) adapter->ahw->coal.type) << 32 |
640 ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
641 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
643 dev_err(&adapter->netdev->dev,
644 "Could not send interrupt coalescing parameters\n");
648 int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
650 struct qlcnic_nic_req req;
654 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
657 memset(&req, 0, sizeof(struct qlcnic_nic_req));
659 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
661 word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
662 req.req_hdr = cpu_to_le64(word);
664 req.words[0] = cpu_to_le64(enable);
666 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
668 dev_err(&adapter->netdev->dev,
669 "Could not send configure hw lro request\n");
674 int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
676 struct qlcnic_nic_req req;
680 if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
683 memset(&req, 0, sizeof(struct qlcnic_nic_req));
685 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
687 word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
688 ((u64)adapter->portnum << 16);
689 req.req_hdr = cpu_to_le64(word);
691 req.words[0] = cpu_to_le64(enable);
693 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
695 dev_err(&adapter->netdev->dev,
696 "Could not send configure bridge mode request\n");
698 adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
704 #define RSS_HASHTYPE_IP_TCP 0x3
706 int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
708 struct qlcnic_nic_req req;
712 static const u64 key[] = {
713 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
714 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
715 0x255b0ec26d5a56daULL
718 memset(&req, 0, sizeof(struct qlcnic_nic_req));
719 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
721 word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
722 req.req_hdr = cpu_to_le64(word);
726 * bits 3-0: hash_method
727 * 5-4: hash_type_ipv4
728 * 7-6: hash_type_ipv6
730 * 9: use indirection table
732 * 63-48: indirection table mask
734 word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
735 ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
736 ((u64)(enable & 0x1) << 8) |
738 req.words[0] = cpu_to_le64(word);
739 for (i = 0; i < 5; i++)
740 req.words[i+1] = cpu_to_le64(key[i]);
742 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
744 dev_err(&adapter->netdev->dev, "could not configure RSS\n");
749 int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
751 struct qlcnic_nic_req req;
752 struct qlcnic_ipaddr *ipa;
756 memset(&req, 0, sizeof(struct qlcnic_nic_req));
757 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
759 word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
760 req.req_hdr = cpu_to_le64(word);
762 req.words[0] = cpu_to_le64(cmd);
763 ipa = (struct qlcnic_ipaddr *)&req.words[1];
766 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
768 dev_err(&adapter->netdev->dev,
769 "could not notify %s IP 0x%x reuqest\n",
770 (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
775 int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
777 struct qlcnic_nic_req req;
781 memset(&req, 0, sizeof(struct qlcnic_nic_req));
782 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
784 word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
785 req.req_hdr = cpu_to_le64(word);
786 req.words[0] = cpu_to_le64(enable | (enable << 8));
788 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
790 dev_err(&adapter->netdev->dev,
791 "could not configure link notification\n");
796 int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
798 struct qlcnic_nic_req req;
802 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
805 memset(&req, 0, sizeof(struct qlcnic_nic_req));
806 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
808 word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
809 ((u64)adapter->portnum << 16) |
810 ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
812 req.req_hdr = cpu_to_le64(word);
814 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
816 dev_err(&adapter->netdev->dev,
817 "could not cleanup lro flows\n");
823 * qlcnic_change_mtu - Change the Maximum Transfer Unit
824 * @returns 0 on success, negative on failure
827 int qlcnic_change_mtu(struct net_device *netdev, int mtu)
829 struct qlcnic_adapter *adapter = netdev_priv(netdev);
832 if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
833 dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
834 " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
838 rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
847 netdev_features_t qlcnic_fix_features(struct net_device *netdev,
848 netdev_features_t features)
850 struct qlcnic_adapter *adapter = netdev_priv(netdev);
852 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
853 netdev_features_t changed = features ^ netdev->features;
854 features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
857 if (!(features & NETIF_F_RXCSUM))
858 features &= ~NETIF_F_LRO;
864 int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
866 struct qlcnic_adapter *adapter = netdev_priv(netdev);
867 netdev_features_t changed = netdev->features ^ features;
868 int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
870 if (!(changed & NETIF_F_LRO))
873 netdev->features = features ^ NETIF_F_LRO;
875 if (qlcnic_config_hw_lro(adapter, hw_lro))
878 if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
885 * Changes the CRB window to the specified window.
887 /* Returns < 0 if off is not valid,
888 * 1 if window access is needed. 'off' is set to offset from
889 * CRB space in 128M pci map
890 * 0 if no window access is needed. 'off' is set to 2M addr
891 * In: 'off' is offset from base in 128M pci map
894 qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
895 ulong off, void __iomem **addr)
897 const struct crb_128M_2M_sub_block_map *m;
899 if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
902 off -= QLCNIC_PCI_CRBSPACE;
907 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
909 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
910 *addr = adapter->ahw->pci_base0 + m->start_2M +
911 (off - m->start_128M);
916 * Not in direct map, use crb window
918 *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
923 * In: 'off' is offset from CRB space in 128M pci map
924 * Out: 'off' is 2M pci map addr
925 * side effect: lock crb window
928 qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
931 void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
933 off -= QLCNIC_PCI_CRBSPACE;
935 window = CRB_HI(off);
937 dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
941 writel(window, addr);
942 if (readl(addr) != window) {
943 if (printk_ratelimit())
944 dev_warn(&adapter->pdev->dev,
945 "failed to set CRB window to %d off 0x%lx\n",
953 qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
957 void __iomem *addr = NULL;
959 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
967 /* indirect access */
968 write_lock_irqsave(&adapter->ahw->crb_lock, flags);
969 crb_win_lock(adapter);
970 rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
973 crb_win_unlock(adapter);
974 write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
978 dev_err(&adapter->pdev->dev,
979 "%s: invalid offset: 0x%016lx\n", __func__, off);
985 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
990 void __iomem *addr = NULL;
992 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
998 /* indirect access */
999 write_lock_irqsave(&adapter->ahw->crb_lock, flags);
1000 crb_win_lock(adapter);
1001 if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
1003 crb_win_unlock(adapter);
1004 write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
1008 dev_err(&adapter->pdev->dev,
1009 "%s: invalid offset: 0x%016lx\n", __func__, off);
1016 qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
1018 void __iomem *addr = NULL;
1020 WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
1027 qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
1028 u64 addr, u32 *start)
1032 window = OCM_WIN_P3P(addr);
1034 writel(window, adapter->ahw->ocm_win_crb);
1035 /* read back to flush */
1036 readl(adapter->ahw->ocm_win_crb);
1038 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
1043 qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
1050 mutex_lock(&adapter->ahw->mem_lock);
1052 ret = qlcnic_pci_set_window_2M(adapter, off, &start);
1056 addr = adapter->ahw->pci_base0 + start;
1058 if (op == 0) /* read */
1059 *data = readq(addr);
1061 writeq(*data, addr);
1064 mutex_unlock(&adapter->ahw->mem_lock);
1070 qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
1072 void __iomem *addr = adapter->ahw->pci_base0 +
1073 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
1075 mutex_lock(&adapter->ahw->mem_lock);
1076 *data = readq(addr);
1077 mutex_unlock(&adapter->ahw->mem_lock);
1081 qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
1083 void __iomem *addr = adapter->ahw->pci_base0 +
1084 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
1086 mutex_lock(&adapter->ahw->mem_lock);
1088 mutex_unlock(&adapter->ahw->mem_lock);
1091 #define MAX_CTL_CHECK 1000
1094 qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
1099 void __iomem *mem_crb;
1101 /* Only 64-bit aligned access */
1105 /* P3 onward, test agent base for MIU and SIU is same */
1106 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1107 QLCNIC_ADDR_QDR_NET_MAX)) {
1108 mem_crb = qlcnic_get_ioaddr(adapter,
1109 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1113 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1114 mem_crb = qlcnic_get_ioaddr(adapter,
1115 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1119 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
1120 return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
1127 mutex_lock(&adapter->ahw->mem_lock);
1129 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1130 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1133 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1134 writel((TA_CTL_START | TA_CTL_ENABLE),
1135 (mem_crb + TEST_AGT_CTRL));
1137 for (j = 0; j < MAX_CTL_CHECK; j++) {
1138 temp = readl(mem_crb + TEST_AGT_CTRL);
1139 if ((temp & TA_CTL_BUSY) == 0)
1143 if (j >= MAX_CTL_CHECK) {
1148 i = (off & 0xf) ? 0 : 2;
1149 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1150 mem_crb + MIU_TEST_AGT_WRDATA(i));
1151 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1152 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1153 i = (off & 0xf) ? 2 : 0;
1155 writel(data & 0xffffffff,
1156 mem_crb + MIU_TEST_AGT_WRDATA(i));
1157 writel((data >> 32) & 0xffffffff,
1158 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1160 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1161 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1162 (mem_crb + TEST_AGT_CTRL));
1164 for (j = 0; j < MAX_CTL_CHECK; j++) {
1165 temp = readl(mem_crb + TEST_AGT_CTRL);
1166 if ((temp & TA_CTL_BUSY) == 0)
1170 if (j >= MAX_CTL_CHECK) {
1171 if (printk_ratelimit())
1172 dev_err(&adapter->pdev->dev,
1173 "failed to write through agent\n");
1179 mutex_unlock(&adapter->ahw->mem_lock);
1185 qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1191 void __iomem *mem_crb;
1193 /* Only 64-bit aligned access */
1197 /* P3 onward, test agent base for MIU and SIU is same */
1198 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1199 QLCNIC_ADDR_QDR_NET_MAX)) {
1200 mem_crb = qlcnic_get_ioaddr(adapter,
1201 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1205 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1206 mem_crb = qlcnic_get_ioaddr(adapter,
1207 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1211 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
1212 return qlcnic_pci_mem_access_direct(adapter,
1221 mutex_lock(&adapter->ahw->mem_lock);
1223 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1224 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1225 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1226 writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1228 for (j = 0; j < MAX_CTL_CHECK; j++) {
1229 temp = readl(mem_crb + TEST_AGT_CTRL);
1230 if ((temp & TA_CTL_BUSY) == 0)
1234 if (j >= MAX_CTL_CHECK) {
1235 if (printk_ratelimit())
1236 dev_err(&adapter->pdev->dev,
1237 "failed to read through agent\n");
1240 off8 = MIU_TEST_AGT_RDDATA_LO;
1242 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1244 temp = readl(mem_crb + off8 + 4);
1245 val = (u64)temp << 32;
1246 val |= readl(mem_crb + off8);
1251 mutex_unlock(&adapter->ahw->mem_lock);
1256 int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1258 int offset, board_type, magic;
1259 struct pci_dev *pdev = adapter->pdev;
1261 offset = QLCNIC_FW_MAGIC_OFFSET;
1262 if (qlcnic_rom_fast_read(adapter, offset, &magic))
1265 if (magic != QLCNIC_BDINFO_MAGIC) {
1266 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1271 offset = QLCNIC_BRDTYPE_OFFSET;
1272 if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1275 adapter->ahw->board_type = board_type;
1277 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
1278 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
1279 if ((gpio & 0x8000) == 0)
1280 board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
1283 switch (board_type) {
1284 case QLCNIC_BRDTYPE_P3P_HMEZ:
1285 case QLCNIC_BRDTYPE_P3P_XG_LOM:
1286 case QLCNIC_BRDTYPE_P3P_10G_CX4:
1287 case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
1288 case QLCNIC_BRDTYPE_P3P_IMEZ:
1289 case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
1290 case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
1291 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
1292 case QLCNIC_BRDTYPE_P3P_10G_XFP:
1293 case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
1294 adapter->ahw->port_type = QLCNIC_XGBE;
1296 case QLCNIC_BRDTYPE_P3P_REF_QG:
1297 case QLCNIC_BRDTYPE_P3P_4_GB:
1298 case QLCNIC_BRDTYPE_P3P_4_GB_MM:
1299 adapter->ahw->port_type = QLCNIC_GBE;
1301 case QLCNIC_BRDTYPE_P3P_10G_TP:
1302 adapter->ahw->port_type = (adapter->portnum < 2) ?
1303 QLCNIC_XGBE : QLCNIC_GBE;
1306 dev_err(&pdev->dev, "unknown board type %x\n", board_type);
1307 adapter->ahw->port_type = QLCNIC_XGBE;
1315 qlcnic_wol_supported(struct qlcnic_adapter *adapter)
1319 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1320 if (wol_cfg & (1UL << adapter->portnum)) {
1321 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1322 if (wol_cfg & (1 << adapter->portnum))
1329 int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1331 struct qlcnic_nic_req req;
1335 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1336 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1338 word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
1339 req.req_hdr = cpu_to_le64(word);
1341 req.words[0] = cpu_to_le64((u64)rate << 32);
1342 req.words[1] = cpu_to_le64(state);
1344 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1346 dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
1351 /* FW dump related functions */
1352 static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
1353 struct qlcnic_dump_entry *entry, __le32 *buffer)
1357 struct __crb *crb = &entry->region.crb;
1358 void __iomem *base = adapter->ahw->pci_base0;
1362 for (i = 0; i < crb->no_ops; i++) {
1363 qlcnic_read_dump_reg(addr, base, &data);
1364 *buffer++ = cpu_to_le32(addr);
1365 *buffer++ = cpu_to_le32(data);
1366 addr += crb->stride;
1368 return crb->no_ops * 2 * sizeof(u32);
1371 static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
1372 struct qlcnic_dump_entry *entry, __le32 *buffer)
1374 int i, k, timeout = 0;
1375 void __iomem *base = adapter->ahw->pci_base0;
1378 struct __ctrl *ctr = &entry->region.ctrl;
1379 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
1382 no_ops = ctr->no_ops;
1384 for (i = 0; i < no_ops; i++) {
1387 for (k = 0; k < 8; k++) {
1388 if (!(ctr->opcode & (1 << k)))
1391 case QLCNIC_DUMP_WCRB:
1392 qlcnic_write_dump_reg(addr, base, ctr->val1);
1394 case QLCNIC_DUMP_RWCRB:
1395 qlcnic_read_dump_reg(addr, base, &data);
1396 qlcnic_write_dump_reg(addr, base, data);
1398 case QLCNIC_DUMP_ANDCRB:
1399 qlcnic_read_dump_reg(addr, base, &data);
1400 qlcnic_write_dump_reg(addr, base,
1403 case QLCNIC_DUMP_ORCRB:
1404 qlcnic_read_dump_reg(addr, base, &data);
1405 qlcnic_write_dump_reg(addr, base,
1408 case QLCNIC_DUMP_POLLCRB:
1409 while (timeout <= ctr->timeout) {
1410 qlcnic_read_dump_reg(addr, base, &data);
1411 if ((data & ctr->val2) == ctr->val1)
1416 if (timeout > ctr->timeout) {
1417 dev_info(&adapter->pdev->dev,
1418 "Timed out, aborting poll CRB\n");
1422 case QLCNIC_DUMP_RD_SAVE:
1424 addr = t_hdr->saved_state[ctr->index_a];
1425 qlcnic_read_dump_reg(addr, base, &data);
1426 t_hdr->saved_state[ctr->index_v] = data;
1428 case QLCNIC_DUMP_WRT_SAVED:
1430 data = t_hdr->saved_state[ctr->index_v];
1434 addr = t_hdr->saved_state[ctr->index_a];
1435 qlcnic_write_dump_reg(addr, base, data);
1437 case QLCNIC_DUMP_MOD_SAVE_ST:
1438 data = t_hdr->saved_state[ctr->index_v];
1439 data <<= ctr->shl_val;
1440 data >>= ctr->shr_val;
1445 t_hdr->saved_state[ctr->index_v] = data;
1448 dev_info(&adapter->pdev->dev,
1449 "Unknown opcode\n");
1453 addr += ctr->stride;
1458 static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
1459 struct qlcnic_dump_entry *entry, __le32 *buffer)
1463 struct __mux *mux = &entry->region.mux;
1464 void __iomem *base = adapter->ahw->pci_base0;
1467 for (loop = 0; loop < mux->no_ops; loop++) {
1468 qlcnic_write_dump_reg(mux->addr, base, val);
1469 qlcnic_read_dump_reg(mux->read_addr, base, &data);
1470 *buffer++ = cpu_to_le32(val);
1471 *buffer++ = cpu_to_le32(data);
1472 val += mux->val_stride;
1474 return 2 * mux->no_ops * sizeof(u32);
1477 static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
1478 struct qlcnic_dump_entry *entry, __le32 *buffer)
1481 u32 cnt, addr, data, que_id = 0;
1482 void __iomem *base = adapter->ahw->pci_base0;
1483 struct __queue *que = &entry->region.que;
1485 addr = que->read_addr;
1486 cnt = que->read_addr_cnt;
1488 for (loop = 0; loop < que->no_ops; loop++) {
1489 qlcnic_write_dump_reg(que->sel_addr, base, que_id);
1490 addr = que->read_addr;
1491 for (i = 0; i < cnt; i++) {
1492 qlcnic_read_dump_reg(addr, base, &data);
1493 *buffer++ = cpu_to_le32(data);
1494 addr += que->read_addr_stride;
1496 que_id += que->stride;
1498 return que->no_ops * cnt * sizeof(u32);
1501 static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
1502 struct qlcnic_dump_entry *entry, __le32 *buffer)
1507 struct __ocm *ocm = &entry->region.ocm;
1509 addr = adapter->ahw->pci_base0 + ocm->read_addr;
1510 for (i = 0; i < ocm->no_ops; i++) {
1512 *buffer++ = cpu_to_le32(data);
1513 addr += ocm->read_addr_stride;
1515 return ocm->no_ops * sizeof(u32);
1518 static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
1519 struct qlcnic_dump_entry *entry, __le32 *buffer)
1522 u32 fl_addr, size, val, lck_val, addr;
1523 struct __mem *rom = &entry->region.mem;
1524 void __iomem *base = adapter->ahw->pci_base0;
1526 fl_addr = rom->addr;
1529 lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
1530 if (!lck_val && count < MAX_CTL_CHECK) {
1535 writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
1536 for (i = 0; i < size; i++) {
1537 addr = fl_addr & 0xFFFF0000;
1538 qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
1539 addr = LSW(fl_addr) + FLASH_ROM_DATA;
1540 qlcnic_read_dump_reg(addr, base, &val);
1542 *buffer++ = cpu_to_le32(val);
1544 readl(base + QLCNIC_FLASH_SEM2_ULK);
1548 static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
1549 struct qlcnic_dump_entry *entry, __le32 *buffer)
1552 u32 cnt, val, data, addr;
1553 void __iomem *base = adapter->ahw->pci_base0;
1554 struct __cache *l1 = &entry->region.cache;
1556 val = l1->init_tag_val;
1558 for (i = 0; i < l1->no_ops; i++) {
1559 qlcnic_write_dump_reg(l1->addr, base, val);
1560 qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
1561 addr = l1->read_addr;
1562 cnt = l1->read_addr_num;
1564 qlcnic_read_dump_reg(addr, base, &data);
1565 *buffer++ = cpu_to_le32(data);
1566 addr += l1->read_addr_stride;
1571 return l1->no_ops * l1->read_addr_num * sizeof(u32);
1574 static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
1575 struct qlcnic_dump_entry *entry, __le32 *buffer)
1578 u32 cnt, val, data, addr;
1579 u8 poll_mask, poll_to, time_out = 0;
1580 void __iomem *base = adapter->ahw->pci_base0;
1581 struct __cache *l2 = &entry->region.cache;
1583 val = l2->init_tag_val;
1584 poll_mask = LSB(MSW(l2->ctrl_val));
1585 poll_to = MSB(MSW(l2->ctrl_val));
1587 for (i = 0; i < l2->no_ops; i++) {
1588 qlcnic_write_dump_reg(l2->addr, base, val);
1589 if (LSW(l2->ctrl_val))
1590 qlcnic_write_dump_reg(l2->ctrl_addr, base,
1595 qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
1596 if (!(data & poll_mask))
1600 } while (time_out <= poll_to);
1602 if (time_out > poll_to) {
1603 dev_err(&adapter->pdev->dev,
1604 "Timeout exceeded in %s, aborting dump\n",
1609 addr = l2->read_addr;
1610 cnt = l2->read_addr_num;
1612 qlcnic_read_dump_reg(addr, base, &data);
1613 *buffer++ = cpu_to_le32(data);
1614 addr += l2->read_addr_stride;
1619 return l2->no_ops * l2->read_addr_num * sizeof(u32);
1622 static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
1623 struct qlcnic_dump_entry *entry, __le32 *buffer)
1625 u32 addr, data, test, ret = 0;
1627 struct __mem *mem = &entry->region.mem;
1628 void __iomem *base = adapter->ahw->pci_base0;
1630 reg_read = mem->size;
1632 /* check for data size of multiple of 16 and 16 byte alignment */
1633 if ((addr & 0xf) || (reg_read%16)) {
1634 dev_info(&adapter->pdev->dev,
1635 "Unaligned memory addr:0x%x size:0x%x\n",
1640 mutex_lock(&adapter->ahw->mem_lock);
1642 while (reg_read != 0) {
1643 qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
1644 qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
1645 qlcnic_write_dump_reg(MIU_TEST_CTR, base,
1646 TA_CTL_ENABLE | TA_CTL_START);
1648 for (i = 0; i < MAX_CTL_CHECK; i++) {
1649 qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
1650 if (!(test & TA_CTL_BUSY))
1653 if (i == MAX_CTL_CHECK) {
1654 if (printk_ratelimit()) {
1655 dev_err(&adapter->pdev->dev,
1656 "failed to read through agent\n");
1661 for (i = 0; i < 4; i++) {
1662 qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
1664 *buffer++ = cpu_to_le32(data);
1671 mutex_unlock(&adapter->ahw->mem_lock);
1675 static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
1676 struct qlcnic_dump_entry *entry, __le32 *buffer)
1678 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1682 static const struct qlcnic_dump_operations fw_dump_ops[] = {
1683 { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
1684 { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
1685 { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
1686 { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
1687 { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
1688 { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
1689 { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
1690 { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
1691 { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
1692 { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
1693 { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
1694 { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
1695 { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
1696 { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
1697 { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
1698 { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
1699 { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
1700 { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
1701 { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
1702 { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
1705 /* Walk the template and collect dump for each entry in the dump template */
1707 qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
1711 if (size != entry->hdr.cap_size) {
1713 "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
1714 entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
1715 dev_info(dev, "Aborting further dump capture\n");
1721 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1725 char *msg[] = {mesg, NULL};
1726 int i, k, ops_cnt, ops_index, dump_size = 0;
1727 u32 entry_offset, dump, no_entries, buf_offset = 0;
1728 struct qlcnic_dump_entry *entry;
1729 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1730 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
1733 dev_info(&adapter->pdev->dev,
1734 "Previous dump not cleared, not capturing dump\n");
1737 /* Calculate the size for dump data area only */
1738 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1739 if (i & tmpl_hdr->drv_cap_mask)
1740 dump_size += tmpl_hdr->cap_sizes[k];
1744 fw_dump->data = vzalloc(dump_size);
1745 if (!fw_dump->data) {
1746 dev_info(&adapter->pdev->dev,
1747 "Unable to allocate (%d KB) for fw dump\n",
1751 buffer = fw_dump->data;
1752 fw_dump->size = dump_size;
1753 no_entries = tmpl_hdr->num_entries;
1754 ops_cnt = ARRAY_SIZE(fw_dump_ops);
1755 entry_offset = tmpl_hdr->offset;
1756 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
1757 tmpl_hdr->sys_info[1] = adapter->fw_version;
1759 for (i = 0; i < no_entries; i++) {
1760 entry = (void *)tmpl_hdr + entry_offset;
1761 if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
1762 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1763 entry_offset += entry->hdr.offset;
1766 /* Find the handler for this entry */
1768 while (ops_index < ops_cnt) {
1769 if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1773 if (ops_index == ops_cnt) {
1774 dev_info(&adapter->pdev->dev,
1775 "Invalid entry type %d, exiting dump\n",
1779 /* Collect dump for this entry */
1780 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1781 if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
1783 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1784 buf_offset += entry->hdr.cap_size;
1785 entry_offset += entry->hdr.offset;
1786 buffer = fw_dump->data + buf_offset;
1788 if (dump_size != buf_offset) {
1789 dev_info(&adapter->pdev->dev,
1790 "Captured(%d) and expected size(%d) do not match\n",
1791 buf_offset, dump_size);
1795 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
1796 adapter->netdev->name);
1797 dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
1799 /* Send a udev event to notify availability of FW dump */
1800 kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
1804 vfree(fw_dump->data);