2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/phy.h>
29 #include <linux/clk.h>
30 #include <linux/hrtimer.h>
31 #include <linux/ktime.h>
32 #include <uapi/linux/ppp_defs.h>
36 /* RX Fifo Registers */
37 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
38 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
39 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
40 #define MVPP2_RX_FIFO_INIT_REG 0x64
42 /* RX DMA Top Registers */
43 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
44 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
45 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
46 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
47 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
48 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
49 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
50 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
51 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
52 #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
53 #define MVPP2_RXQ_POOL_LONG_OFFS 24
54 #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
55 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
56 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
57 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
59 /* Parser Registers */
60 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
61 #define MVPP2_PRS_PORT_LU_MAX 0xf
62 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
63 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
64 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
65 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
66 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
67 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
68 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
69 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
70 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
71 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
72 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
73 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
74 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
75 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
76 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
78 /* Classifier Registers */
79 #define MVPP2_CLS_MODE_REG 0x1800
80 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
81 #define MVPP2_CLS_PORT_WAY_REG 0x1810
82 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
83 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
84 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
85 #define MVPP2_CLS_LKP_TBL_REG 0x1818
86 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
87 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
88 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
89 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
90 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
91 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
92 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
93 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
94 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
95 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
96 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
97 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
99 /* Descriptor Manager Top Registers */
100 #define MVPP2_RXQ_NUM_REG 0x2040
101 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
102 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
103 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
104 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
105 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
106 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
107 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
108 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
109 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
110 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
111 #define MVPP2_RXQ_THRESH_REG 0x204c
112 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
113 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
114 #define MVPP2_RXQ_INDEX_REG 0x2050
115 #define MVPP2_TXQ_NUM_REG 0x2080
116 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
117 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
118 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
119 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
120 #define MVPP2_TXQ_INDEX_REG 0x2098
121 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
122 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
123 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
124 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
125 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
126 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
127 #define MVPP2_TXQ_PENDING_REG 0x20a0
128 #define MVPP2_TXQ_PENDING_MASK 0x3fff
129 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
130 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
131 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
132 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
133 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
134 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
135 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
136 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
137 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
138 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
139 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
140 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
141 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
142 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
143 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
144 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
146 /* MBUS bridge registers */
147 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
148 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
149 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
150 #define MVPP2_BASE_ADDR_ENABLE 0x4060
152 /* Interrupt Cause and Mask registers */
153 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
154 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
155 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
156 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
157 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
158 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
159 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
160 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
161 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
162 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
163 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
164 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
165 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
166 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
167 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
168 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
169 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
170 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
171 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
172 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
173 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
175 /* Buffer Manager registers */
176 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
177 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
178 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
179 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
180 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
181 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
182 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
183 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
184 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
185 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
186 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
187 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
188 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
189 #define MVPP2_BM_START_MASK BIT(0)
190 #define MVPP2_BM_STOP_MASK BIT(1)
191 #define MVPP2_BM_STATE_MASK BIT(4)
192 #define MVPP2_BM_LOW_THRESH_OFFS 8
193 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
194 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
195 MVPP2_BM_LOW_THRESH_OFFS)
196 #define MVPP2_BM_HIGH_THRESH_OFFS 16
197 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
198 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
199 MVPP2_BM_HIGH_THRESH_OFFS)
200 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
201 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
202 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
203 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
204 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
205 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
206 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
207 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
208 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
209 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
210 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
211 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
212 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
213 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
214 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
216 /* TX Scheduler registers */
217 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
218 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
219 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
220 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
221 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
222 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
223 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
224 #define MVPP2_TXP_MTU_MAX 0x7FFFF
225 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
226 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
227 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
228 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
229 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
230 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
231 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
232 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
233 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
234 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
235 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
236 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
237 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
238 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
240 /* TX general registers */
241 #define MVPP2_TX_SNOOP_REG 0x8800
242 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
243 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
246 #define MVPP2_SRC_ADDR_MIDDLE 0x24
247 #define MVPP2_SRC_ADDR_HIGH 0x28
248 #define MVPP2_PHY_AN_CFG0_REG 0x34
249 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
250 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
251 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
253 /* Per-port registers */
254 #define MVPP2_GMAC_CTRL_0_REG 0x0
255 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
256 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
257 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
258 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
259 #define MVPP2_GMAC_CTRL_1_REG 0x4
260 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
261 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
262 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
263 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
264 #define MVPP2_GMAC_SA_LOW_OFFS 7
265 #define MVPP2_GMAC_CTRL_2_REG 0x8
266 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
267 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
268 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
269 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
270 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
271 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
272 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
273 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
274 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
275 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
276 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
277 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
278 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
279 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
280 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
281 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
282 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
283 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
285 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
287 /* Descriptor ring Macros */
288 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
289 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
291 /* Various constants */
294 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
295 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
296 #define MVPP2_RX_COAL_PKTS 32
297 #define MVPP2_RX_COAL_USEC 100
299 /* The two bytes Marvell header. Either contains a special value used
300 * by Marvell switches when a specific hardware mode is enabled (not
301 * supported by this driver) or is filled automatically by zeroes on
302 * the RX side. Those two bytes being at the front of the Ethernet
303 * header, they allow to have the IP header aligned on a 4 bytes
304 * boundary automatically: the hardware skips those two bytes on its
307 #define MVPP2_MH_SIZE 2
308 #define MVPP2_ETH_TYPE_LEN 2
309 #define MVPP2_PPPOE_HDR_SIZE 8
310 #define MVPP2_VLAN_TAG_LEN 4
312 /* Lbtd 802.3 type */
313 #define MVPP2_IP_LBDT_TYPE 0xfffa
315 #define MVPP2_TX_CSUM_MAX_SIZE 9800
317 /* Timeout constants */
318 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
319 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
321 #define MVPP2_TX_MTU_MAX 0x7ffff
323 /* Maximum number of T-CONTs of PON port */
324 #define MVPP2_MAX_TCONT 16
326 /* Maximum number of supported ports */
327 #define MVPP2_MAX_PORTS 4
329 /* Maximum number of TXQs used by single port */
330 #define MVPP2_MAX_TXQ 8
332 /* Maximum number of RXQs used by single port */
333 #define MVPP2_MAX_RXQ 8
335 /* Dfault number of RXQs in use */
336 #define MVPP2_DEFAULT_RXQ 4
338 /* Total number of RXQs available to all ports */
339 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
341 /* Max number of Rx descriptors */
342 #define MVPP2_MAX_RXD 128
344 /* Max number of Tx descriptors */
345 #define MVPP2_MAX_TXD 1024
347 /* Amount of Tx descriptors that can be reserved at once by CPU */
348 #define MVPP2_CPU_DESC_CHUNK 64
350 /* Max number of Tx descriptors in each aggregated queue */
351 #define MVPP2_AGGR_TXQ_SIZE 256
353 /* Descriptor aligned size */
354 #define MVPP2_DESC_ALIGNED_SIZE 32
356 /* Descriptor alignment mask */
357 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
359 /* RX FIFO constants */
360 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
361 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
362 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
364 /* RX buffer constants */
365 #define MVPP2_SKB_SHINFO_SIZE \
366 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
368 #define MVPP2_RX_PKT_SIZE(mtu) \
369 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
370 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
372 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
373 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
374 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
375 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
377 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
379 /* IPv6 max L3 address size */
380 #define MVPP2_MAX_L3_ADDR_SIZE 16
383 #define MVPP2_F_LOOPBACK BIT(0)
385 /* Marvell tag types */
386 enum mvpp2_tag_type {
387 MVPP2_TAG_TYPE_NONE = 0,
388 MVPP2_TAG_TYPE_MH = 1,
389 MVPP2_TAG_TYPE_DSA = 2,
390 MVPP2_TAG_TYPE_EDSA = 3,
391 MVPP2_TAG_TYPE_VLAN = 4,
392 MVPP2_TAG_TYPE_LAST = 5
395 /* Parser constants */
396 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
397 #define MVPP2_PRS_TCAM_WORDS 6
398 #define MVPP2_PRS_SRAM_WORDS 4
399 #define MVPP2_PRS_FLOW_ID_SIZE 64
400 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
401 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
402 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
403 #define MVPP2_PRS_IPV4_HEAD 0x40
404 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
405 #define MVPP2_PRS_IPV4_MC 0xe0
406 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
407 #define MVPP2_PRS_IPV4_BC_MASK 0xff
408 #define MVPP2_PRS_IPV4_IHL 0x5
409 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
410 #define MVPP2_PRS_IPV6_MC 0xff
411 #define MVPP2_PRS_IPV6_MC_MASK 0xff
412 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
413 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
414 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
415 #define MVPP2_PRS_DBL_VLANS_MAX 100
418 * - lookup ID - 4 bits
420 * - additional information - 1 byte
421 * - header data - 8 bytes
422 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
424 #define MVPP2_PRS_AI_BITS 8
425 #define MVPP2_PRS_PORT_MASK 0xff
426 #define MVPP2_PRS_LU_MASK 0xf
427 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
428 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
429 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
430 (((offs) * 2) - ((offs) % 2) + 2)
431 #define MVPP2_PRS_TCAM_AI_BYTE 16
432 #define MVPP2_PRS_TCAM_PORT_BYTE 17
433 #define MVPP2_PRS_TCAM_LU_BYTE 20
434 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
435 #define MVPP2_PRS_TCAM_INV_WORD 5
436 /* Tcam entries ID */
437 #define MVPP2_PE_DROP_ALL 0
438 #define MVPP2_PE_FIRST_FREE_TID 1
439 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
440 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
441 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
442 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
443 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
444 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
445 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
446 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
447 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
448 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
449 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
450 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
451 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
452 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
453 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
454 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
455 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
456 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
457 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
458 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
459 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
460 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
461 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
462 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
463 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
466 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
468 #define MVPP2_PRS_SRAM_RI_OFFS 0
469 #define MVPP2_PRS_SRAM_RI_WORD 0
470 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
471 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
472 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
473 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
474 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
475 #define MVPP2_PRS_SRAM_UDF_OFFS 73
476 #define MVPP2_PRS_SRAM_UDF_BITS 8
477 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
478 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
479 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
480 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
481 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
482 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
483 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
484 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
485 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
486 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
487 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
488 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
489 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
490 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
491 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
492 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
493 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
494 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
495 #define MVPP2_PRS_SRAM_AI_OFFS 90
496 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
497 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
498 #define MVPP2_PRS_SRAM_AI_MASK 0xff
499 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
500 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
501 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
502 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
504 /* Sram result info bits assignment */
505 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
506 #define MVPP2_PRS_RI_DSA_MASK 0x2
507 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
508 #define MVPP2_PRS_RI_VLAN_NONE 0x0
509 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
510 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
511 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
512 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
513 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
514 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
515 #define MVPP2_PRS_RI_L2_UCAST 0x0
516 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
517 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
518 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
519 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
520 #define MVPP2_PRS_RI_L3_UN 0x0
521 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
522 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
523 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
524 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
525 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
526 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
527 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
528 #define MVPP2_PRS_RI_L3_UCAST 0x0
529 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
530 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
531 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
532 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
533 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
534 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
535 #define MVPP2_PRS_RI_L4_TCP BIT(22)
536 #define MVPP2_PRS_RI_L4_UDP BIT(23)
537 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
538 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
539 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
540 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
542 /* Sram additional info bits assignment */
543 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
544 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
545 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
546 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
547 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
548 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
549 #define MVPP2_PRS_SINGLE_VLAN_AI 0
550 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
553 #define MVPP2_PRS_TAGGED true
554 #define MVPP2_PRS_UNTAGGED false
555 #define MVPP2_PRS_EDSA true
556 #define MVPP2_PRS_DSA false
558 /* MAC entries, shadow udf */
560 MVPP2_PRS_UDF_MAC_DEF,
561 MVPP2_PRS_UDF_MAC_RANGE,
562 MVPP2_PRS_UDF_L2_DEF,
563 MVPP2_PRS_UDF_L2_DEF_COPY,
564 MVPP2_PRS_UDF_L2_USER,
568 enum mvpp2_prs_lookup {
582 enum mvpp2_prs_l3_cast {
583 MVPP2_PRS_L3_UNI_CAST,
584 MVPP2_PRS_L3_MULTI_CAST,
585 MVPP2_PRS_L3_BROAD_CAST
588 /* Classifier constants */
589 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
590 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
591 #define MVPP2_CLS_LKP_TBL_SIZE 64
594 #define MVPP2_BM_POOLS_NUM 8
595 #define MVPP2_BM_LONG_BUF_NUM 1024
596 #define MVPP2_BM_SHORT_BUF_NUM 2048
597 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
598 #define MVPP2_BM_POOL_PTR_ALIGN 128
599 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
600 #define MVPP2_BM_SWF_SHORT_POOL 3
602 /* BM cookie (32 bits) definition */
603 #define MVPP2_BM_COOKIE_POOL_OFFS 8
604 #define MVPP2_BM_COOKIE_CPU_OFFS 24
606 /* BM short pool packet size
607 * These value assure that for SWF the total number
608 * of bytes allocated for each buffer will be 512
610 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
620 /* Shared Packet Processor resources */
622 /* Shared registers' base addresses */
624 void __iomem *lms_base;
630 /* List of pointers to port structures */
631 struct mvpp2_port **port_list;
633 /* Aggregated TXQs */
634 struct mvpp2_tx_queue *aggr_txqs;
637 struct mvpp2_bm_pool *bm_pools;
639 /* PRS shadow table */
640 struct mvpp2_prs_shadow *prs_shadow;
641 /* PRS auxiliary table for double vlan entries control */
642 bool *prs_double_vlans;
648 struct mvpp2_pcpu_stats {
649 struct u64_stats_sync syncp;
656 /* Per-CPU port control */
657 struct mvpp2_port_pcpu {
658 struct hrtimer tx_done_timer;
659 bool timer_scheduled;
660 /* Tasklet for egress finalization */
661 struct tasklet_struct tx_done_tasklet;
671 /* Per-port registers' base address */
674 struct mvpp2_rx_queue **rxqs;
675 struct mvpp2_tx_queue **txqs;
676 struct net_device *dev;
680 u32 pending_cause_rx;
681 struct napi_struct napi;
683 /* Per-CPU port control */
684 struct mvpp2_port_pcpu __percpu *pcpu;
691 struct mvpp2_pcpu_stats __percpu *stats;
693 phy_interface_t phy_interface;
694 struct device_node *phy_node;
699 struct mvpp2_bm_pool *pool_long;
700 struct mvpp2_bm_pool *pool_short;
702 /* Index of first port's physical RXQ */
706 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
707 * layout of the transmit and reception DMA descriptors, and their
708 * layout is therefore defined by the hardware design
711 #define MVPP2_TXD_L3_OFF_SHIFT 0
712 #define MVPP2_TXD_IP_HLEN_SHIFT 8
713 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
714 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
715 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
716 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
717 #define MVPP2_TXD_L4_UDP BIT(24)
718 #define MVPP2_TXD_L3_IP6 BIT(26)
719 #define MVPP2_TXD_L_DESC BIT(28)
720 #define MVPP2_TXD_F_DESC BIT(29)
722 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
723 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
724 #define MVPP2_RXD_ERR_CRC 0x0
725 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
726 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
727 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
728 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
729 #define MVPP2_RXD_HWF_SYNC BIT(21)
730 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
731 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
732 #define MVPP2_RXD_L4_TCP BIT(25)
733 #define MVPP2_RXD_L4_UDP BIT(26)
734 #define MVPP2_RXD_L3_IP4 BIT(28)
735 #define MVPP2_RXD_L3_IP6 BIT(30)
736 #define MVPP2_RXD_BUF_HDR BIT(31)
738 struct mvpp2_tx_desc {
739 u32 command; /* Options used by HW for packet transmitting.*/
740 u8 packet_offset; /* the offset from the buffer beginning */
741 u8 phys_txq; /* destination queue ID */
742 u16 data_size; /* data size of transmitted packet in bytes */
743 u32 buf_dma_addr; /* physical addr of transmitted buffer */
744 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
745 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
746 u32 reserved2; /* reserved (for future use) */
749 struct mvpp2_rx_desc {
750 u32 status; /* info about received packet */
751 u16 reserved1; /* parser_info (for future use, PnC) */
752 u16 data_size; /* size of received packet in bytes */
753 u32 buf_dma_addr; /* physical address of the buffer */
754 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
755 u16 reserved2; /* gem_port_id (for future use, PON) */
756 u16 reserved3; /* csum_l4 (for future use, PnC) */
757 u8 reserved4; /* bm_qset (for future use, BM) */
759 u16 reserved6; /* classify_info (for future use, PnC) */
760 u32 reserved7; /* flow_id (for future use, PnC) */
764 struct mvpp2_txq_pcpu_buf {
765 /* Transmitted SKB */
768 /* Physical address of transmitted buffer */
771 /* Size transmitted */
775 /* Per-CPU Tx queue control */
776 struct mvpp2_txq_pcpu {
779 /* Number of Tx DMA descriptors in the descriptor ring */
782 /* Number of currently used Tx DMA descriptor in the
787 /* Number of Tx DMA descriptors reserved for each CPU */
790 /* Infos about transmitted buffers */
791 struct mvpp2_txq_pcpu_buf *buffs;
793 /* Index of last TX DMA descriptor that was inserted */
796 /* Index of the TX DMA descriptor to be cleaned up */
800 struct mvpp2_tx_queue {
801 /* Physical number of this Tx queue */
804 /* Logical number of this Tx queue */
807 /* Number of Tx DMA descriptors in the descriptor ring */
810 /* Number of currently used Tx DMA descriptor in the descriptor ring */
813 /* Per-CPU control of physical Tx queues */
814 struct mvpp2_txq_pcpu __percpu *pcpu;
818 /* Virtual address of thex Tx DMA descriptors array */
819 struct mvpp2_tx_desc *descs;
821 /* DMA address of the Tx DMA descriptors array */
822 dma_addr_t descs_dma;
824 /* Index of the last Tx DMA descriptor */
827 /* Index of the next Tx DMA descriptor to process */
828 int next_desc_to_proc;
831 struct mvpp2_rx_queue {
832 /* RX queue number, in the range 0-31 for physical RXQs */
835 /* Num of rx descriptors in the rx descriptor ring */
841 /* Virtual address of the RX DMA descriptors array */
842 struct mvpp2_rx_desc *descs;
844 /* DMA address of the RX DMA descriptors array */
845 dma_addr_t descs_dma;
847 /* Index of the last RX DMA descriptor */
850 /* Index of the next RX DMA descriptor to process */
851 int next_desc_to_proc;
853 /* ID of port to which physical RXQ is mapped */
856 /* Port's logic RXQ number to which physical RXQ is mapped */
860 union mvpp2_prs_tcam_entry {
861 u32 word[MVPP2_PRS_TCAM_WORDS];
862 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
865 union mvpp2_prs_sram_entry {
866 u32 word[MVPP2_PRS_SRAM_WORDS];
867 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
870 struct mvpp2_prs_entry {
872 union mvpp2_prs_tcam_entry tcam;
873 union mvpp2_prs_sram_entry sram;
876 struct mvpp2_prs_shadow {
883 /* User defined offset */
891 struct mvpp2_cls_flow_entry {
893 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
896 struct mvpp2_cls_lookup_entry {
902 struct mvpp2_bm_pool {
903 /* Pool number in the range 0-7 */
905 enum mvpp2_bm_type type;
907 /* Buffer Pointers Pool External (BPPE) size */
909 /* Number of buffers for this pool */
911 /* Pool buffer size */
917 /* BPPE virtual base address */
919 /* BPPE DMA base address */
922 /* Ports using BM pool */
926 /* Static declaractions */
928 /* Number of RXQs used by single port */
929 static int rxq_number = MVPP2_DEFAULT_RXQ;
930 /* Number of TXQs used by single port */
931 static int txq_number = MVPP2_MAX_TXQ;
933 #define MVPP2_DRIVER_NAME "mvpp2"
934 #define MVPP2_DRIVER_VERSION "1.0"
936 /* Utility/helper methods */
938 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
940 writel(data, priv->base + offset);
943 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
945 return readl(priv->base + offset);
948 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
950 txq_pcpu->txq_get_index++;
951 if (txq_pcpu->txq_get_index == txq_pcpu->size)
952 txq_pcpu->txq_get_index = 0;
955 static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
957 struct mvpp2_tx_desc *tx_desc)
959 struct mvpp2_txq_pcpu_buf *tx_buf =
960 txq_pcpu->buffs + txq_pcpu->txq_put_index;
962 tx_buf->size = tx_desc->data_size;
963 tx_buf->dma = tx_desc->buf_dma_addr + tx_desc->packet_offset;
964 txq_pcpu->txq_put_index++;
965 if (txq_pcpu->txq_put_index == txq_pcpu->size)
966 txq_pcpu->txq_put_index = 0;
969 /* Get number of physical egress port */
970 static inline int mvpp2_egress_port(struct mvpp2_port *port)
972 return MVPP2_MAX_TCONT + port->id;
975 /* Get number of physical TXQ */
976 static inline int mvpp2_txq_phys(int port, int txq)
978 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
981 /* Parser configuration routines */
983 /* Update parser tcam and sram hw entries */
984 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
988 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
991 /* Clear entry invalidation bit */
992 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
994 /* Write tcam index - indirect access */
995 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
996 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
997 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
999 /* Write sram index - indirect access */
1000 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1001 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1002 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1007 /* Read tcam entry from hw */
1008 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1012 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1015 /* Write tcam index - indirect access */
1016 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1018 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1019 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1020 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1021 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1023 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1024 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1026 /* Write sram index - indirect access */
1027 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1028 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1029 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1034 /* Invalidate tcam hw entry */
1035 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1037 /* Write index - indirect access */
1038 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1039 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1040 MVPP2_PRS_TCAM_INV_MASK);
1043 /* Enable shadow table entry and set its lookup ID */
1044 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1046 priv->prs_shadow[index].valid = true;
1047 priv->prs_shadow[index].lu = lu;
1050 /* Update ri fields in shadow table entry */
1051 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1052 unsigned int ri, unsigned int ri_mask)
1054 priv->prs_shadow[index].ri_mask = ri_mask;
1055 priv->prs_shadow[index].ri = ri;
1058 /* Update lookup field in tcam sw entry */
1059 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1061 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1063 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1064 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1067 /* Update mask for single port in tcam sw entry */
1068 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1069 unsigned int port, bool add)
1071 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1074 pe->tcam.byte[enable_off] &= ~(1 << port);
1076 pe->tcam.byte[enable_off] |= 1 << port;
1079 /* Update port map in tcam sw entry */
1080 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1083 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1084 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1086 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1087 pe->tcam.byte[enable_off] &= ~port_mask;
1088 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1091 /* Obtain port map from tcam sw entry */
1092 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1094 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1096 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1099 /* Set byte of data and its enable bits in tcam sw entry */
1100 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1101 unsigned int offs, unsigned char byte,
1102 unsigned char enable)
1104 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1105 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1108 /* Get byte of data and its enable bits from tcam sw entry */
1109 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1110 unsigned int offs, unsigned char *byte,
1111 unsigned char *enable)
1113 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1114 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1117 /* Compare tcam data bytes with a pattern */
1118 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1121 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1124 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1125 if (tcam_data != data)
1130 /* Update ai bits in tcam sw entry */
1131 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1132 unsigned int bits, unsigned int enable)
1134 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1136 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1138 if (!(enable & BIT(i)))
1142 pe->tcam.byte[ai_idx] |= 1 << i;
1144 pe->tcam.byte[ai_idx] &= ~(1 << i);
1147 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1150 /* Get ai bits from tcam sw entry */
1151 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1153 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1156 /* Set ethertype in tcam sw entry */
1157 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1158 unsigned short ethertype)
1160 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1161 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1164 /* Set bits in sram sw entry */
1165 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1168 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1171 /* Clear bits in sram sw entry */
1172 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1175 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1178 /* Update ri bits in sram sw entry */
1179 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1180 unsigned int bits, unsigned int mask)
1184 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1185 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1187 if (!(mask & BIT(i)))
1191 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1193 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1195 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1199 /* Obtain ri bits from sram sw entry */
1200 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1202 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1205 /* Update ai bits in sram sw entry */
1206 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1207 unsigned int bits, unsigned int mask)
1210 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1212 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1214 if (!(mask & BIT(i)))
1218 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1220 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1222 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1226 /* Read ai bits from sram sw entry */
1227 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1230 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1231 int ai_en_off = ai_off + 1;
1232 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1234 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1235 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1240 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1243 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1246 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1248 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1249 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1250 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1253 /* In the sram sw entry set sign and value of the next lookup offset
1254 * and the offset value generated to the classifier
1256 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1261 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1264 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1268 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1269 (unsigned char)shift;
1271 /* Reset and set operation */
1272 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1273 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1274 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1276 /* Set base offset as current */
1277 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1280 /* In the sram sw entry set sign and value of the user defined offset
1281 * generated to the classifier
1283 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1284 unsigned int type, int offset,
1289 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1290 offset = 0 - offset;
1292 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1296 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1297 MVPP2_PRS_SRAM_UDF_MASK);
1298 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1299 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1300 MVPP2_PRS_SRAM_UDF_BITS)] &=
1301 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1302 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1303 MVPP2_PRS_SRAM_UDF_BITS)] |=
1304 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1306 /* Set offset type */
1307 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1308 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1309 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1311 /* Set offset operation */
1312 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1313 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1314 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1316 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1317 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1318 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1319 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1321 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1322 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1323 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1325 /* Set base offset as current */
1326 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1329 /* Find parser flow entry */
1330 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1332 struct mvpp2_prs_entry *pe;
1335 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1338 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1340 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1341 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1344 if (!priv->prs_shadow[tid].valid ||
1345 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1349 mvpp2_prs_hw_read(priv, pe);
1350 bits = mvpp2_prs_sram_ai_get(pe);
1352 /* Sram store classification lookup ID in AI bits [5:0] */
1353 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1361 /* Return first free tcam index, seeking from start to end */
1362 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1370 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1371 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1373 for (tid = start; tid <= end; tid++) {
1374 if (!priv->prs_shadow[tid].valid)
1381 /* Enable/disable dropping all mac da's */
1382 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1384 struct mvpp2_prs_entry pe;
1386 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1387 /* Entry exist - update port only */
1388 pe.index = MVPP2_PE_DROP_ALL;
1389 mvpp2_prs_hw_read(priv, &pe);
1391 /* Entry doesn't exist - create new */
1392 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1393 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1394 pe.index = MVPP2_PE_DROP_ALL;
1396 /* Non-promiscuous mode for all ports - DROP unknown packets */
1397 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1398 MVPP2_PRS_RI_DROP_MASK);
1400 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1401 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1403 /* Update shadow table */
1404 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1406 /* Mask all ports */
1407 mvpp2_prs_tcam_port_map_set(&pe, 0);
1410 /* Update port mask */
1411 mvpp2_prs_tcam_port_set(&pe, port, add);
1413 mvpp2_prs_hw_write(priv, &pe);
1416 /* Set port to promiscuous mode */
1417 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1419 struct mvpp2_prs_entry pe;
1421 /* Promiscuous mode - Accept unknown packets */
1423 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1424 /* Entry exist - update port only */
1425 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1426 mvpp2_prs_hw_read(priv, &pe);
1428 /* Entry doesn't exist - create new */
1429 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1430 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1431 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1433 /* Continue - set next lookup */
1434 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1436 /* Set result info bits */
1437 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1438 MVPP2_PRS_RI_L2_CAST_MASK);
1440 /* Shift to ethertype */
1441 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1442 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1444 /* Mask all ports */
1445 mvpp2_prs_tcam_port_map_set(&pe, 0);
1447 /* Update shadow table */
1448 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1451 /* Update port mask */
1452 mvpp2_prs_tcam_port_set(&pe, port, add);
1454 mvpp2_prs_hw_write(priv, &pe);
1457 /* Accept multicast */
1458 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1461 struct mvpp2_prs_entry pe;
1462 unsigned char da_mc;
1464 /* Ethernet multicast address first byte is
1465 * 0x01 for IPv4 and 0x33 for IPv6
1467 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1469 if (priv->prs_shadow[index].valid) {
1470 /* Entry exist - update port only */
1472 mvpp2_prs_hw_read(priv, &pe);
1474 /* Entry doesn't exist - create new */
1475 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1476 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1479 /* Continue - set next lookup */
1480 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1482 /* Set result info bits */
1483 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1484 MVPP2_PRS_RI_L2_CAST_MASK);
1486 /* Update tcam entry data first byte */
1487 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1489 /* Shift to ethertype */
1490 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1491 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1493 /* Mask all ports */
1494 mvpp2_prs_tcam_port_map_set(&pe, 0);
1496 /* Update shadow table */
1497 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1500 /* Update port mask */
1501 mvpp2_prs_tcam_port_set(&pe, port, add);
1503 mvpp2_prs_hw_write(priv, &pe);
1506 /* Set entry for dsa packets */
1507 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1508 bool tagged, bool extend)
1510 struct mvpp2_prs_entry pe;
1514 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1517 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1521 if (priv->prs_shadow[tid].valid) {
1522 /* Entry exist - update port only */
1524 mvpp2_prs_hw_read(priv, &pe);
1526 /* Entry doesn't exist - create new */
1527 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1528 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1531 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1532 mvpp2_prs_sram_shift_set(&pe, shift,
1533 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1535 /* Update shadow table */
1536 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1539 /* Set tagged bit in DSA tag */
1540 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1541 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1542 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1543 /* Clear all ai bits for next iteration */
1544 mvpp2_prs_sram_ai_update(&pe, 0,
1545 MVPP2_PRS_SRAM_AI_MASK);
1546 /* If packet is tagged continue check vlans */
1547 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1549 /* Set result info bits to 'no vlans' */
1550 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1551 MVPP2_PRS_RI_VLAN_MASK);
1552 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1555 /* Mask all ports */
1556 mvpp2_prs_tcam_port_map_set(&pe, 0);
1559 /* Update port mask */
1560 mvpp2_prs_tcam_port_set(&pe, port, add);
1562 mvpp2_prs_hw_write(priv, &pe);
1565 /* Set entry for dsa ethertype */
1566 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1567 bool add, bool tagged, bool extend)
1569 struct mvpp2_prs_entry pe;
1570 int tid, shift, port_mask;
1573 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1574 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1578 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1579 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1580 port_mask = MVPP2_PRS_PORT_MASK;
1584 if (priv->prs_shadow[tid].valid) {
1585 /* Entry exist - update port only */
1587 mvpp2_prs_hw_read(priv, &pe);
1589 /* Entry doesn't exist - create new */
1590 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1591 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1595 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1596 mvpp2_prs_match_etype(&pe, 2, 0);
1598 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1599 MVPP2_PRS_RI_DSA_MASK);
1600 /* Shift ethertype + 2 byte reserved + tag*/
1601 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1602 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1604 /* Update shadow table */
1605 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1608 /* Set tagged bit in DSA tag */
1609 mvpp2_prs_tcam_data_byte_set(&pe,
1610 MVPP2_ETH_TYPE_LEN + 2 + 3,
1611 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1612 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1613 /* Clear all ai bits for next iteration */
1614 mvpp2_prs_sram_ai_update(&pe, 0,
1615 MVPP2_PRS_SRAM_AI_MASK);
1616 /* If packet is tagged continue check vlans */
1617 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1619 /* Set result info bits to 'no vlans' */
1620 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1621 MVPP2_PRS_RI_VLAN_MASK);
1622 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1624 /* Mask/unmask all ports, depending on dsa type */
1625 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1628 /* Update port mask */
1629 mvpp2_prs_tcam_port_set(&pe, port, add);
1631 mvpp2_prs_hw_write(priv, &pe);
1634 /* Search for existing single/triple vlan entry */
1635 static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1636 unsigned short tpid, int ai)
1638 struct mvpp2_prs_entry *pe;
1641 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1644 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1646 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1647 for (tid = MVPP2_PE_FIRST_FREE_TID;
1648 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1649 unsigned int ri_bits, ai_bits;
1652 if (!priv->prs_shadow[tid].valid ||
1653 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1658 mvpp2_prs_hw_read(priv, pe);
1659 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1664 ri_bits = mvpp2_prs_sram_ri_get(pe);
1665 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1667 /* Get current ai value from tcam */
1668 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1669 /* Clear double vlan bit */
1670 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1675 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1676 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1684 /* Add/update single/triple vlan entry */
1685 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1686 unsigned int port_map)
1688 struct mvpp2_prs_entry *pe;
1692 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1695 /* Create new tcam entry */
1696 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1697 MVPP2_PE_FIRST_FREE_TID);
1701 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1705 /* Get last double vlan tid */
1706 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1707 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1708 unsigned int ri_bits;
1710 if (!priv->prs_shadow[tid_aux].valid ||
1711 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1714 pe->index = tid_aux;
1715 mvpp2_prs_hw_read(priv, pe);
1716 ri_bits = mvpp2_prs_sram_ri_get(pe);
1717 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1718 MVPP2_PRS_RI_VLAN_DOUBLE)
1722 if (tid <= tid_aux) {
1727 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1728 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1731 mvpp2_prs_match_etype(pe, 0, tpid);
1733 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1734 /* Shift 4 bytes - skip 1 vlan tag */
1735 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1736 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1737 /* Clear all ai bits for next iteration */
1738 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1740 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
1741 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
1742 MVPP2_PRS_RI_VLAN_MASK);
1744 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
1745 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
1746 MVPP2_PRS_RI_VLAN_MASK);
1748 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
1750 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1752 /* Update ports' mask */
1753 mvpp2_prs_tcam_port_map_set(pe, port_map);
1755 mvpp2_prs_hw_write(priv, pe);
1763 /* Get first free double vlan ai number */
1764 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
1768 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
1769 if (!priv->prs_double_vlans[i])
1776 /* Search for existing double vlan entry */
1777 static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
1778 unsigned short tpid1,
1779 unsigned short tpid2)
1781 struct mvpp2_prs_entry *pe;
1784 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1787 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1789 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1790 for (tid = MVPP2_PE_FIRST_FREE_TID;
1791 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1792 unsigned int ri_mask;
1795 if (!priv->prs_shadow[tid].valid ||
1796 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1800 mvpp2_prs_hw_read(priv, pe);
1802 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
1803 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
1808 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
1809 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
1817 /* Add or update double vlan entry */
1818 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1819 unsigned short tpid2,
1820 unsigned int port_map)
1822 struct mvpp2_prs_entry *pe;
1823 int tid_aux, tid, ai, ret = 0;
1825 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1828 /* Create new tcam entry */
1829 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1830 MVPP2_PE_LAST_FREE_TID);
1834 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1838 /* Set ai value for new double vlan entry */
1839 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
1845 /* Get first single/triple vlan tid */
1846 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
1847 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
1848 unsigned int ri_bits;
1850 if (!priv->prs_shadow[tid_aux].valid ||
1851 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1854 pe->index = tid_aux;
1855 mvpp2_prs_hw_read(priv, pe);
1856 ri_bits = mvpp2_prs_sram_ri_get(pe);
1857 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1858 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1859 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1863 if (tid >= tid_aux) {
1868 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1869 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1872 priv->prs_double_vlans[ai] = true;
1874 mvpp2_prs_match_etype(pe, 0, tpid1);
1875 mvpp2_prs_match_etype(pe, 4, tpid2);
1877 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
1878 /* Shift 8 bytes - skip 2 vlan tags */
1879 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
1880 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1881 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1882 MVPP2_PRS_RI_VLAN_MASK);
1883 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
1884 MVPP2_PRS_SRAM_AI_MASK);
1886 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1889 /* Update ports' mask */
1890 mvpp2_prs_tcam_port_map_set(pe, port_map);
1891 mvpp2_prs_hw_write(priv, pe);
1898 /* IPv4 header parsing for fragmentation and L4 offset */
1899 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
1900 unsigned int ri, unsigned int ri_mask)
1902 struct mvpp2_prs_entry pe;
1905 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1906 (proto != IPPROTO_IGMP))
1909 /* Fragmented packet */
1910 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1911 MVPP2_PE_LAST_FREE_TID);
1915 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1916 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1919 /* Set next lu to IPv4 */
1920 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1921 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1923 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1924 sizeof(struct iphdr) - 4,
1925 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1926 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1927 MVPP2_PRS_IPV4_DIP_AI_BIT);
1928 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
1929 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
1931 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1932 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1933 /* Unmask all ports */
1934 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1936 /* Update shadow table and hw entry */
1937 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1938 mvpp2_prs_hw_write(priv, &pe);
1940 /* Not fragmented packet */
1941 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1942 MVPP2_PE_LAST_FREE_TID);
1947 /* Clear ri before updating */
1948 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1949 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1950 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1952 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
1953 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
1955 /* Update shadow table and hw entry */
1956 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1957 mvpp2_prs_hw_write(priv, &pe);
1962 /* IPv4 L3 multicast or broadcast */
1963 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
1965 struct mvpp2_prs_entry pe;
1968 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1969 MVPP2_PE_LAST_FREE_TID);
1973 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1974 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1978 case MVPP2_PRS_L3_MULTI_CAST:
1979 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
1980 MVPP2_PRS_IPV4_MC_MASK);
1981 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1982 MVPP2_PRS_RI_L3_ADDR_MASK);
1984 case MVPP2_PRS_L3_BROAD_CAST:
1985 mask = MVPP2_PRS_IPV4_BC_MASK;
1986 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
1987 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
1988 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
1989 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
1990 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
1991 MVPP2_PRS_RI_L3_ADDR_MASK);
1997 /* Finished: go to flowid generation */
1998 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1999 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2001 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2002 MVPP2_PRS_IPV4_DIP_AI_BIT);
2003 /* Unmask all ports */
2004 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2006 /* Update shadow table and hw entry */
2007 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2008 mvpp2_prs_hw_write(priv, &pe);
2013 /* Set entries for protocols over IPv6 */
2014 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2015 unsigned int ri, unsigned int ri_mask)
2017 struct mvpp2_prs_entry pe;
2020 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2021 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2024 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2025 MVPP2_PE_LAST_FREE_TID);
2029 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2030 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2033 /* Finished: go to flowid generation */
2034 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2035 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2036 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2037 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2038 sizeof(struct ipv6hdr) - 6,
2039 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2041 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2042 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2043 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2044 /* Unmask all ports */
2045 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2048 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2049 mvpp2_prs_hw_write(priv, &pe);
2054 /* IPv6 L3 multicast entry */
2055 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2057 struct mvpp2_prs_entry pe;
2060 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2063 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2064 MVPP2_PE_LAST_FREE_TID);
2068 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2069 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2072 /* Finished: go to flowid generation */
2073 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2074 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2075 MVPP2_PRS_RI_L3_ADDR_MASK);
2076 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2077 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2078 /* Shift back to IPv6 NH */
2079 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2081 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2082 MVPP2_PRS_IPV6_MC_MASK);
2083 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2084 /* Unmask all ports */
2085 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2087 /* Update shadow table and hw entry */
2088 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2089 mvpp2_prs_hw_write(priv, &pe);
2094 /* Parser per-port initialization */
2095 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2096 int lu_max, int offset)
2101 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2102 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2103 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2104 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2106 /* Set maximum number of loops for packet received from port */
2107 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2108 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2109 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2110 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2112 /* Set initial offset for packet header extraction for the first
2115 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2116 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2117 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2118 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2121 /* Default flow entries initialization for all ports */
2122 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2124 struct mvpp2_prs_entry pe;
2127 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2128 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2129 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2130 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2132 /* Mask all ports */
2133 mvpp2_prs_tcam_port_map_set(&pe, 0);
2136 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2137 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2139 /* Update shadow table and hw entry */
2140 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2141 mvpp2_prs_hw_write(priv, &pe);
2145 /* Set default entry for Marvell Header field */
2146 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2148 struct mvpp2_prs_entry pe;
2150 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2152 pe.index = MVPP2_PE_MH_DEFAULT;
2153 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2154 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2155 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2156 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2158 /* Unmask all ports */
2159 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2161 /* Update shadow table and hw entry */
2162 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2163 mvpp2_prs_hw_write(priv, &pe);
2166 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2167 * multicast MAC addresses
2169 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2171 struct mvpp2_prs_entry pe;
2173 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2175 /* Non-promiscuous mode for all ports - DROP unknown packets */
2176 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2177 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2179 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2180 MVPP2_PRS_RI_DROP_MASK);
2181 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2182 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2184 /* Unmask all ports */
2185 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2187 /* Update shadow table and hw entry */
2188 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2189 mvpp2_prs_hw_write(priv, &pe);
2191 /* place holders only - no ports */
2192 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2193 mvpp2_prs_mac_promisc_set(priv, 0, false);
2194 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2195 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2198 /* Set default entries for various types of dsa packets */
2199 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2201 struct mvpp2_prs_entry pe;
2203 /* None tagged EDSA entry - place holder */
2204 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2207 /* Tagged EDSA entry - place holder */
2208 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2210 /* None tagged DSA entry - place holder */
2211 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2214 /* Tagged DSA entry - place holder */
2215 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2217 /* None tagged EDSA ethertype entry - place holder*/
2218 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2219 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2221 /* Tagged EDSA ethertype entry - place holder*/
2222 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2223 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2225 /* None tagged DSA ethertype entry */
2226 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2227 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2229 /* Tagged DSA ethertype entry */
2230 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2231 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2233 /* Set default entry, in case DSA or EDSA tag not found */
2234 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2235 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2236 pe.index = MVPP2_PE_DSA_DEFAULT;
2237 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2240 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2241 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2243 /* Clear all sram ai bits for next iteration */
2244 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2246 /* Unmask all ports */
2247 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2249 mvpp2_prs_hw_write(priv, &pe);
2252 /* Match basic ethertypes */
2253 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2255 struct mvpp2_prs_entry pe;
2258 /* Ethertype: PPPoE */
2259 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2260 MVPP2_PE_LAST_FREE_TID);
2264 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2265 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2268 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2270 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2271 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2272 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2273 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2274 MVPP2_PRS_RI_PPPOE_MASK);
2276 /* Update shadow table and hw entry */
2277 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2278 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2279 priv->prs_shadow[pe.index].finish = false;
2280 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2281 MVPP2_PRS_RI_PPPOE_MASK);
2282 mvpp2_prs_hw_write(priv, &pe);
2284 /* Ethertype: ARP */
2285 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2286 MVPP2_PE_LAST_FREE_TID);
2290 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2291 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2294 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2296 /* Generate flow in the next iteration*/
2297 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2298 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2299 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2300 MVPP2_PRS_RI_L3_PROTO_MASK);
2302 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2304 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2306 /* Update shadow table and hw entry */
2307 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2308 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2309 priv->prs_shadow[pe.index].finish = true;
2310 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2311 MVPP2_PRS_RI_L3_PROTO_MASK);
2312 mvpp2_prs_hw_write(priv, &pe);
2314 /* Ethertype: LBTD */
2315 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2316 MVPP2_PE_LAST_FREE_TID);
2320 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2321 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2324 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2326 /* Generate flow in the next iteration*/
2327 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2328 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2329 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2330 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2331 MVPP2_PRS_RI_CPU_CODE_MASK |
2332 MVPP2_PRS_RI_UDF3_MASK);
2334 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2336 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2338 /* Update shadow table and hw entry */
2339 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2340 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2341 priv->prs_shadow[pe.index].finish = true;
2342 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2343 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2344 MVPP2_PRS_RI_CPU_CODE_MASK |
2345 MVPP2_PRS_RI_UDF3_MASK);
2346 mvpp2_prs_hw_write(priv, &pe);
2348 /* Ethertype: IPv4 without options */
2349 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2350 MVPP2_PE_LAST_FREE_TID);
2354 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2355 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2358 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2359 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2360 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2361 MVPP2_PRS_IPV4_HEAD_MASK |
2362 MVPP2_PRS_IPV4_IHL_MASK);
2364 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2365 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2366 MVPP2_PRS_RI_L3_PROTO_MASK);
2367 /* Skip eth_type + 4 bytes of IP header */
2368 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2369 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2371 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2373 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2375 /* Update shadow table and hw entry */
2376 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2377 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2378 priv->prs_shadow[pe.index].finish = false;
2379 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2380 MVPP2_PRS_RI_L3_PROTO_MASK);
2381 mvpp2_prs_hw_write(priv, &pe);
2383 /* Ethertype: IPv4 with options */
2384 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2385 MVPP2_PE_LAST_FREE_TID);
2391 /* Clear tcam data before updating */
2392 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2393 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2395 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2396 MVPP2_PRS_IPV4_HEAD,
2397 MVPP2_PRS_IPV4_HEAD_MASK);
2399 /* Clear ri before updating */
2400 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2401 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2402 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2403 MVPP2_PRS_RI_L3_PROTO_MASK);
2405 /* Update shadow table and hw entry */
2406 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2407 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2408 priv->prs_shadow[pe.index].finish = false;
2409 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2410 MVPP2_PRS_RI_L3_PROTO_MASK);
2411 mvpp2_prs_hw_write(priv, &pe);
2413 /* Ethertype: IPv6 without options */
2414 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2415 MVPP2_PE_LAST_FREE_TID);
2419 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2420 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2423 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2425 /* Skip DIP of IPV6 header */
2426 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2427 MVPP2_MAX_L3_ADDR_SIZE,
2428 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2429 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2430 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2431 MVPP2_PRS_RI_L3_PROTO_MASK);
2433 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2435 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2437 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2438 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2439 priv->prs_shadow[pe.index].finish = false;
2440 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2441 MVPP2_PRS_RI_L3_PROTO_MASK);
2442 mvpp2_prs_hw_write(priv, &pe);
2444 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2445 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2446 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2447 pe.index = MVPP2_PE_ETH_TYPE_UN;
2449 /* Unmask all ports */
2450 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2452 /* Generate flow in the next iteration*/
2453 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2454 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2455 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2456 MVPP2_PRS_RI_L3_PROTO_MASK);
2457 /* Set L3 offset even it's unknown L3 */
2458 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2460 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2462 /* Update shadow table and hw entry */
2463 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2464 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2465 priv->prs_shadow[pe.index].finish = true;
2466 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2467 MVPP2_PRS_RI_L3_PROTO_MASK);
2468 mvpp2_prs_hw_write(priv, &pe);
2473 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2480 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2482 struct mvpp2_prs_entry pe;
2485 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2486 MVPP2_PRS_DBL_VLANS_MAX,
2488 if (!priv->prs_double_vlans)
2491 /* Double VLAN: 0x8100, 0x88A8 */
2492 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2493 MVPP2_PRS_PORT_MASK);
2497 /* Double VLAN: 0x8100, 0x8100 */
2498 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2499 MVPP2_PRS_PORT_MASK);
2503 /* Single VLAN: 0x88a8 */
2504 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2505 MVPP2_PRS_PORT_MASK);
2509 /* Single VLAN: 0x8100 */
2510 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2511 MVPP2_PRS_PORT_MASK);
2515 /* Set default double vlan entry */
2516 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2517 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2518 pe.index = MVPP2_PE_VLAN_DBL;
2520 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2521 /* Clear ai for next iterations */
2522 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2523 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2524 MVPP2_PRS_RI_VLAN_MASK);
2526 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2527 MVPP2_PRS_DBL_VLAN_AI_BIT);
2528 /* Unmask all ports */
2529 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2531 /* Update shadow table and hw entry */
2532 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2533 mvpp2_prs_hw_write(priv, &pe);
2535 /* Set default vlan none entry */
2536 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2537 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2538 pe.index = MVPP2_PE_VLAN_NONE;
2540 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2541 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2542 MVPP2_PRS_RI_VLAN_MASK);
2544 /* Unmask all ports */
2545 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2547 /* Update shadow table and hw entry */
2548 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2549 mvpp2_prs_hw_write(priv, &pe);
2554 /* Set entries for PPPoE ethertype */
2555 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2557 struct mvpp2_prs_entry pe;
2560 /* IPv4 over PPPoE with options */
2561 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2562 MVPP2_PE_LAST_FREE_TID);
2566 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2567 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2570 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2572 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2573 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2574 MVPP2_PRS_RI_L3_PROTO_MASK);
2575 /* Skip eth_type + 4 bytes of IP header */
2576 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2577 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2579 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2581 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2583 /* Update shadow table and hw entry */
2584 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2585 mvpp2_prs_hw_write(priv, &pe);
2587 /* IPv4 over PPPoE without options */
2588 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2589 MVPP2_PE_LAST_FREE_TID);
2595 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2596 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2597 MVPP2_PRS_IPV4_HEAD_MASK |
2598 MVPP2_PRS_IPV4_IHL_MASK);
2600 /* Clear ri before updating */
2601 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2602 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2603 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2604 MVPP2_PRS_RI_L3_PROTO_MASK);
2606 /* Update shadow table and hw entry */
2607 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2608 mvpp2_prs_hw_write(priv, &pe);
2610 /* IPv6 over PPPoE */
2611 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2612 MVPP2_PE_LAST_FREE_TID);
2616 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2617 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2620 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2622 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2623 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2624 MVPP2_PRS_RI_L3_PROTO_MASK);
2625 /* Skip eth_type + 4 bytes of IPv6 header */
2626 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2627 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2629 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2631 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2633 /* Update shadow table and hw entry */
2634 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2635 mvpp2_prs_hw_write(priv, &pe);
2637 /* Non-IP over PPPoE */
2638 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2639 MVPP2_PE_LAST_FREE_TID);
2643 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2644 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2647 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2648 MVPP2_PRS_RI_L3_PROTO_MASK);
2650 /* Finished: go to flowid generation */
2651 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2652 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2653 /* Set L3 offset even if it's unknown L3 */
2654 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2656 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2658 /* Update shadow table and hw entry */
2659 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2660 mvpp2_prs_hw_write(priv, &pe);
2665 /* Initialize entries for IPv4 */
2666 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2668 struct mvpp2_prs_entry pe;
2671 /* Set entries for TCP, UDP and IGMP over IPv4 */
2672 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2673 MVPP2_PRS_RI_L4_PROTO_MASK);
2677 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2678 MVPP2_PRS_RI_L4_PROTO_MASK);
2682 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2683 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2684 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2685 MVPP2_PRS_RI_CPU_CODE_MASK |
2686 MVPP2_PRS_RI_UDF3_MASK);
2690 /* IPv4 Broadcast */
2691 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2695 /* IPv4 Multicast */
2696 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2700 /* Default IPv4 entry for unknown protocols */
2701 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2702 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2703 pe.index = MVPP2_PE_IP4_PROTO_UN;
2705 /* Set next lu to IPv4 */
2706 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2707 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2709 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2710 sizeof(struct iphdr) - 4,
2711 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2712 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2713 MVPP2_PRS_IPV4_DIP_AI_BIT);
2714 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2715 MVPP2_PRS_RI_L4_PROTO_MASK);
2717 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2718 /* Unmask all ports */
2719 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2721 /* Update shadow table and hw entry */
2722 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2723 mvpp2_prs_hw_write(priv, &pe);
2725 /* Default IPv4 entry for unicast address */
2726 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2727 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2728 pe.index = MVPP2_PE_IP4_ADDR_UN;
2730 /* Finished: go to flowid generation */
2731 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2732 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2733 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2734 MVPP2_PRS_RI_L3_ADDR_MASK);
2736 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2737 MVPP2_PRS_IPV4_DIP_AI_BIT);
2738 /* Unmask all ports */
2739 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2741 /* Update shadow table and hw entry */
2742 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2743 mvpp2_prs_hw_write(priv, &pe);
2748 /* Initialize entries for IPv6 */
2749 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
2751 struct mvpp2_prs_entry pe;
2754 /* Set entries for TCP, UDP and ICMP over IPv6 */
2755 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
2756 MVPP2_PRS_RI_L4_TCP,
2757 MVPP2_PRS_RI_L4_PROTO_MASK);
2761 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
2762 MVPP2_PRS_RI_L4_UDP,
2763 MVPP2_PRS_RI_L4_PROTO_MASK);
2767 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
2768 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2769 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2770 MVPP2_PRS_RI_CPU_CODE_MASK |
2771 MVPP2_PRS_RI_UDF3_MASK);
2775 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2776 /* Result Info: UDF7=1, DS lite */
2777 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
2778 MVPP2_PRS_RI_UDF7_IP6_LITE,
2779 MVPP2_PRS_RI_UDF7_MASK);
2783 /* IPv6 multicast */
2784 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2788 /* Entry for checking hop limit */
2789 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2790 MVPP2_PE_LAST_FREE_TID);
2794 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2795 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2798 /* Finished: go to flowid generation */
2799 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2800 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2801 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
2802 MVPP2_PRS_RI_DROP_MASK,
2803 MVPP2_PRS_RI_L3_PROTO_MASK |
2804 MVPP2_PRS_RI_DROP_MASK);
2806 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
2807 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2808 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2810 /* Update shadow table and hw entry */
2811 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2812 mvpp2_prs_hw_write(priv, &pe);
2814 /* Default IPv6 entry for unknown protocols */
2815 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2816 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2817 pe.index = MVPP2_PE_IP6_PROTO_UN;
2819 /* Finished: go to flowid generation */
2820 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2821 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2822 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2823 MVPP2_PRS_RI_L4_PROTO_MASK);
2824 /* Set L4 offset relatively to our current place */
2825 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2826 sizeof(struct ipv6hdr) - 4,
2827 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2829 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2830 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2831 /* Unmask all ports */
2832 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2834 /* Update shadow table and hw entry */
2835 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2836 mvpp2_prs_hw_write(priv, &pe);
2838 /* Default IPv6 entry for unknown ext protocols */
2839 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2840 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2841 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
2843 /* Finished: go to flowid generation */
2844 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2845 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2846 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2847 MVPP2_PRS_RI_L4_PROTO_MASK);
2849 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
2850 MVPP2_PRS_IPV6_EXT_AI_BIT);
2851 /* Unmask all ports */
2852 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2854 /* Update shadow table and hw entry */
2855 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2856 mvpp2_prs_hw_write(priv, &pe);
2858 /* Default IPv6 entry for unicast address */
2859 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2860 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2861 pe.index = MVPP2_PE_IP6_ADDR_UN;
2863 /* Finished: go to IPv6 again */
2864 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2865 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2866 MVPP2_PRS_RI_L3_ADDR_MASK);
2867 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2868 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2869 /* Shift back to IPV6 NH */
2870 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2872 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2873 /* Unmask all ports */
2874 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2876 /* Update shadow table and hw entry */
2877 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2878 mvpp2_prs_hw_write(priv, &pe);
2883 /* Parser default initialization */
2884 static int mvpp2_prs_default_init(struct platform_device *pdev,
2889 /* Enable tcam table */
2890 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2892 /* Clear all tcam and sram entries */
2893 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2894 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2895 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2896 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2898 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2899 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2900 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2903 /* Invalidate all tcam entries */
2904 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2905 mvpp2_prs_hw_inv(priv, index);
2907 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2908 sizeof(struct mvpp2_prs_shadow),
2910 if (!priv->prs_shadow)
2913 /* Always start from lookup = 0 */
2914 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2915 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2916 MVPP2_PRS_PORT_LU_MAX, 0);
2918 mvpp2_prs_def_flow_init(priv);
2920 mvpp2_prs_mh_init(priv);
2922 mvpp2_prs_mac_init(priv);
2924 mvpp2_prs_dsa_init(priv);
2926 err = mvpp2_prs_etype_init(priv);
2930 err = mvpp2_prs_vlan_init(pdev, priv);
2934 err = mvpp2_prs_pppoe_init(priv);
2938 err = mvpp2_prs_ip6_init(priv);
2942 err = mvpp2_prs_ip4_init(priv);
2949 /* Compare MAC DA with tcam entry data */
2950 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2951 const u8 *da, unsigned char *mask)
2953 unsigned char tcam_byte, tcam_mask;
2956 for (index = 0; index < ETH_ALEN; index++) {
2957 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2958 if (tcam_mask != mask[index])
2961 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2968 /* Find tcam entry with matched pair <MAC DA, port> */
2969 static struct mvpp2_prs_entry *
2970 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2971 unsigned char *mask, int udf_type)
2973 struct mvpp2_prs_entry *pe;
2976 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2979 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2981 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2982 for (tid = MVPP2_PE_FIRST_FREE_TID;
2983 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2984 unsigned int entry_pmap;
2986 if (!priv->prs_shadow[tid].valid ||
2987 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2988 (priv->prs_shadow[tid].udf != udf_type))
2992 mvpp2_prs_hw_read(priv, pe);
2993 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
2995 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3004 /* Update parser's mac da entry */
3005 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3006 const u8 *da, bool add)
3008 struct mvpp2_prs_entry *pe;
3009 unsigned int pmap, len, ri;
3010 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3013 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3014 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3015 MVPP2_PRS_UDF_MAC_DEF);
3022 /* Create new TCAM entry */
3023 /* Find first range mac entry*/
3024 for (tid = MVPP2_PE_FIRST_FREE_TID;
3025 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3026 if (priv->prs_shadow[tid].valid &&
3027 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3028 (priv->prs_shadow[tid].udf ==
3029 MVPP2_PRS_UDF_MAC_RANGE))
3032 /* Go through the all entries from first to last */
3033 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3038 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3041 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3044 /* Mask all ports */
3045 mvpp2_prs_tcam_port_map_set(pe, 0);
3048 /* Update port mask */
3049 mvpp2_prs_tcam_port_set(pe, port, add);
3051 /* Invalidate the entry if no ports are left enabled */
3052 pmap = mvpp2_prs_tcam_port_map_get(pe);
3058 mvpp2_prs_hw_inv(priv, pe->index);
3059 priv->prs_shadow[pe->index].valid = false;
3064 /* Continue - set next lookup */
3065 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3067 /* Set match on DA */
3070 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3072 /* Set result info bits */
3073 if (is_broadcast_ether_addr(da))
3074 ri = MVPP2_PRS_RI_L2_BCAST;
3075 else if (is_multicast_ether_addr(da))
3076 ri = MVPP2_PRS_RI_L2_MCAST;
3078 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3080 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3081 MVPP2_PRS_RI_MAC_ME_MASK);
3082 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3083 MVPP2_PRS_RI_MAC_ME_MASK);
3085 /* Shift to ethertype */
3086 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3087 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3089 /* Update shadow table and hw entry */
3090 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3091 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3092 mvpp2_prs_hw_write(priv, pe);
3099 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3101 struct mvpp2_port *port = netdev_priv(dev);
3104 /* Remove old parser entry */
3105 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3110 /* Add new parser entry */
3111 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3115 /* Set addr in the device */
3116 ether_addr_copy(dev->dev_addr, da);
3121 /* Delete all port's multicast simple (not range) entries */
3122 static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3124 struct mvpp2_prs_entry pe;
3127 for (tid = MVPP2_PE_FIRST_FREE_TID;
3128 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3129 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3131 if (!priv->prs_shadow[tid].valid ||
3132 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3133 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3136 /* Only simple mac entries */
3138 mvpp2_prs_hw_read(priv, &pe);
3140 /* Read mac addr from entry */
3141 for (index = 0; index < ETH_ALEN; index++)
3142 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3145 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3146 /* Delete this entry */
3147 mvpp2_prs_mac_da_accept(priv, port, da, false);
3151 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3154 case MVPP2_TAG_TYPE_EDSA:
3155 /* Add port to EDSA entries */
3156 mvpp2_prs_dsa_tag_set(priv, port, true,
3157 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3158 mvpp2_prs_dsa_tag_set(priv, port, true,
3159 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3160 /* Remove port from DSA entries */
3161 mvpp2_prs_dsa_tag_set(priv, port, false,
3162 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3163 mvpp2_prs_dsa_tag_set(priv, port, false,
3164 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3167 case MVPP2_TAG_TYPE_DSA:
3168 /* Add port to DSA entries */
3169 mvpp2_prs_dsa_tag_set(priv, port, true,
3170 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3171 mvpp2_prs_dsa_tag_set(priv, port, true,
3172 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3173 /* Remove port from EDSA entries */
3174 mvpp2_prs_dsa_tag_set(priv, port, false,
3175 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3176 mvpp2_prs_dsa_tag_set(priv, port, false,
3177 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3180 case MVPP2_TAG_TYPE_MH:
3181 case MVPP2_TAG_TYPE_NONE:
3182 /* Remove port form EDSA and DSA entries */
3183 mvpp2_prs_dsa_tag_set(priv, port, false,
3184 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3185 mvpp2_prs_dsa_tag_set(priv, port, false,
3186 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3187 mvpp2_prs_dsa_tag_set(priv, port, false,
3188 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3189 mvpp2_prs_dsa_tag_set(priv, port, false,
3190 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3194 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3201 /* Set prs flow for the port */
3202 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3204 struct mvpp2_prs_entry *pe;
3207 pe = mvpp2_prs_flow_find(port->priv, port->id);
3209 /* Such entry not exist */
3211 /* Go through the all entires from last to first */
3212 tid = mvpp2_prs_tcam_first_free(port->priv,
3213 MVPP2_PE_LAST_FREE_TID,
3214 MVPP2_PE_FIRST_FREE_TID);
3218 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3222 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3226 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3227 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3229 /* Update shadow table */
3230 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3233 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3234 mvpp2_prs_hw_write(port->priv, pe);
3240 /* Classifier configuration routines */
3242 /* Update classification flow table registers */
3243 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3244 struct mvpp2_cls_flow_entry *fe)
3246 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3247 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3248 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3249 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3252 /* Update classification lookup table register */
3253 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3254 struct mvpp2_cls_lookup_entry *le)
3258 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3259 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3260 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3263 /* Classifier default initialization */
3264 static void mvpp2_cls_init(struct mvpp2 *priv)
3266 struct mvpp2_cls_lookup_entry le;
3267 struct mvpp2_cls_flow_entry fe;
3270 /* Enable classifier */
3271 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3273 /* Clear classifier flow table */
3274 memset(&fe.data, 0, sizeof(fe.data));
3275 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3277 mvpp2_cls_flow_write(priv, &fe);
3280 /* Clear classifier lookup table */
3282 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3285 mvpp2_cls_lookup_write(priv, &le);
3288 mvpp2_cls_lookup_write(priv, &le);
3292 static void mvpp2_cls_port_config(struct mvpp2_port *port)
3294 struct mvpp2_cls_lookup_entry le;
3297 /* Set way for the port */
3298 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3299 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3300 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3302 /* Pick the entry to be accessed in lookup ID decoding table
3303 * according to the way and lkpid.
3305 le.lkpid = port->id;
3309 /* Set initial CPU queue for receiving packets */
3310 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3311 le.data |= port->first_rxq;
3313 /* Disable classification engines */
3314 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3316 /* Update lookup ID table entry */
3317 mvpp2_cls_lookup_write(port->priv, &le);
3320 /* Set CPU queue number for oversize packets */
3321 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3325 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3326 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3328 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3329 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3331 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3332 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3333 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3336 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3338 if (likely(pool->frag_size <= PAGE_SIZE))
3339 return netdev_alloc_frag(pool->frag_size);
3341 return kmalloc(pool->frag_size, GFP_ATOMIC);
3344 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3346 if (likely(pool->frag_size <= PAGE_SIZE))
3347 skb_free_frag(data);
3352 /* Buffer Manager configuration routines */
3355 static int mvpp2_bm_pool_create(struct platform_device *pdev,
3357 struct mvpp2_bm_pool *bm_pool, int size)
3362 size_bytes = sizeof(u32) * size;
3363 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
3366 if (!bm_pool->virt_addr)
3369 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3370 MVPP2_BM_POOL_PTR_ALIGN)) {
3371 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
3373 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3374 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3378 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3380 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3382 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3383 val |= MVPP2_BM_START_MASK;
3384 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3386 bm_pool->type = MVPP2_BM_FREE;
3387 bm_pool->size = size;
3388 bm_pool->pkt_size = 0;
3389 bm_pool->buf_num = 0;
3394 /* Set pool buffer size */
3395 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3396 struct mvpp2_bm_pool *bm_pool,
3401 bm_pool->buf_size = buf_size;
3403 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3404 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3407 /* Free all buffers from the pool */
3408 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3409 struct mvpp2_bm_pool *bm_pool)
3413 for (i = 0; i < bm_pool->buf_num; i++) {
3414 dma_addr_t buf_dma_addr;
3415 phys_addr_t buf_phys_addr;
3418 buf_dma_addr = mvpp2_read(priv,
3419 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3420 buf_phys_addr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3422 dma_unmap_single(dev, buf_dma_addr,
3423 bm_pool->buf_size, DMA_FROM_DEVICE);
3425 data = (void *)phys_to_virt(buf_phys_addr);
3429 mvpp2_frag_free(bm_pool, data);
3432 /* Update BM driver with number of buffers removed from pool */
3433 bm_pool->buf_num -= i;
3437 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3439 struct mvpp2_bm_pool *bm_pool)
3443 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3444 if (bm_pool->buf_num) {
3445 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3449 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3450 val |= MVPP2_BM_STOP_MASK;
3451 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3453 dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
3459 static int mvpp2_bm_pools_init(struct platform_device *pdev,
3463 struct mvpp2_bm_pool *bm_pool;
3465 /* Create all pools with maximum size */
3466 size = MVPP2_BM_POOL_SIZE_MAX;
3467 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3468 bm_pool = &priv->bm_pools[i];
3470 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3472 goto err_unroll_pools;
3473 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3478 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3479 for (i = i - 1; i >= 0; i--)
3480 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3484 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3488 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3489 /* Mask BM all interrupts */
3490 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3491 /* Clear BM cause register */
3492 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3495 /* Allocate and initialize BM pools */
3496 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3497 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3498 if (!priv->bm_pools)
3501 err = mvpp2_bm_pools_init(pdev, priv);
3507 /* Attach long pool to rxq */
3508 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3509 int lrxq, int long_pool)
3514 /* Get queue physical ID */
3515 prxq = port->rxqs[lrxq]->id;
3517 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3518 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3519 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
3520 MVPP2_RXQ_POOL_LONG_MASK);
3522 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3525 /* Attach short pool to rxq */
3526 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3527 int lrxq, int short_pool)
3532 /* Get queue physical ID */
3533 prxq = port->rxqs[lrxq]->id;
3535 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3536 val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3537 val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
3538 MVPP2_RXQ_POOL_SHORT_MASK);
3540 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3543 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
3544 struct mvpp2_bm_pool *bm_pool,
3545 dma_addr_t *buf_dma_addr,
3546 phys_addr_t *buf_phys_addr,
3549 dma_addr_t dma_addr;
3552 data = mvpp2_frag_alloc(bm_pool);
3556 dma_addr = dma_map_single(port->dev->dev.parent, data,
3557 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3559 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3560 mvpp2_frag_free(bm_pool, data);
3563 *buf_dma_addr = dma_addr;
3564 *buf_phys_addr = virt_to_phys(data);
3569 /* Set pool number in a BM cookie */
3570 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3574 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3575 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3580 /* Get pool number from a BM cookie */
3581 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
3583 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3586 /* Release buffer to BM */
3587 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3588 dma_addr_t buf_dma_addr,
3589 phys_addr_t buf_phys_addr)
3591 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
3592 * returned in the "cookie" field of the RX
3593 * descriptor. Instead of storing the virtual address, we
3594 * store the physical address
3596 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
3597 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
3600 /* Refill BM pool */
3601 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3602 dma_addr_t dma_addr,
3603 phys_addr_t phys_addr)
3605 int pool = mvpp2_bm_cookie_pool_get(bm);
3607 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3610 /* Allocate buffers for the pool */
3611 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3612 struct mvpp2_bm_pool *bm_pool, int buf_num)
3614 int i, buf_size, total_size;
3615 dma_addr_t dma_addr;
3616 phys_addr_t phys_addr;
3619 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3620 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3623 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3624 netdev_err(port->dev,
3625 "cannot allocate %d buffers for pool %d\n",
3626 buf_num, bm_pool->id);
3630 for (i = 0; i < buf_num; i++) {
3631 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
3632 &phys_addr, GFP_KERNEL);
3636 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
3640 /* Update BM driver with number of buffers added to pool */
3641 bm_pool->buf_num += i;
3643 netdev_dbg(port->dev,
3644 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3645 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3646 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3648 netdev_dbg(port->dev,
3649 "%s pool %d: %d of %d buffers added\n",
3650 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3651 bm_pool->id, i, buf_num);
3655 /* Notify the driver that BM pool is being used as specific type and return the
3656 * pool pointer on success
3658 static struct mvpp2_bm_pool *
3659 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3662 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3665 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3666 netdev_err(port->dev, "mixing pool types is forbidden\n");
3670 if (new_pool->type == MVPP2_BM_FREE)
3671 new_pool->type = type;
3673 /* Allocate buffers in case BM pool is used as long pool, but packet
3674 * size doesn't match MTU or BM pool hasn't being used yet
3676 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
3677 (new_pool->pkt_size == 0)) {
3680 /* Set default buffer number or free all the buffers in case
3681 * the pool is not empty
3683 pkts_num = new_pool->buf_num;
3685 pkts_num = type == MVPP2_BM_SWF_LONG ?
3686 MVPP2_BM_LONG_BUF_NUM :
3687 MVPP2_BM_SHORT_BUF_NUM;
3689 mvpp2_bm_bufs_free(port->dev->dev.parent,
3690 port->priv, new_pool);
3692 new_pool->pkt_size = pkt_size;
3693 new_pool->frag_size =
3694 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
3695 MVPP2_SKB_SHINFO_SIZE;
3697 /* Allocate buffers for this pool */
3698 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
3699 if (num != pkts_num) {
3700 WARN(1, "pool %d: %d of %d allocated\n",
3701 new_pool->id, num, pkts_num);
3706 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3707 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3712 /* Initialize pools for swf */
3713 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3717 if (!port->pool_long) {
3719 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
3722 if (!port->pool_long)
3725 port->pool_long->port_map |= (1 << port->id);
3727 for (rxq = 0; rxq < rxq_number; rxq++)
3728 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
3731 if (!port->pool_short) {
3733 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
3735 MVPP2_BM_SHORT_PKT_SIZE);
3736 if (!port->pool_short)
3739 port->pool_short->port_map |= (1 << port->id);
3741 for (rxq = 0; rxq < rxq_number; rxq++)
3742 mvpp2_rxq_short_pool_set(port, rxq,
3743 port->pool_short->id);
3749 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3751 struct mvpp2_port *port = netdev_priv(dev);
3752 struct mvpp2_bm_pool *port_pool = port->pool_long;
3753 int num, pkts_num = port_pool->buf_num;
3754 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3756 /* Update BM pool with new buffer size */
3757 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
3758 if (port_pool->buf_num) {
3759 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3763 port_pool->pkt_size = pkt_size;
3764 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
3765 MVPP2_SKB_SHINFO_SIZE;
3766 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
3767 if (num != pkts_num) {
3768 WARN(1, "pool %d: %d of %d allocated\n",
3769 port_pool->id, num, pkts_num);
3773 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
3774 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
3776 netdev_update_features(dev);
3780 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
3782 int cpu, cpu_mask = 0;
3784 for_each_present_cpu(cpu)
3785 cpu_mask |= 1 << cpu;
3786 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3787 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3790 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
3792 int cpu, cpu_mask = 0;
3794 for_each_present_cpu(cpu)
3795 cpu_mask |= 1 << cpu;
3796 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3797 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3800 /* Mask the current CPU's Rx/Tx interrupts */
3801 static void mvpp2_interrupts_mask(void *arg)
3803 struct mvpp2_port *port = arg;
3805 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
3808 /* Unmask the current CPU's Rx/Tx interrupts */
3809 static void mvpp2_interrupts_unmask(void *arg)
3811 struct mvpp2_port *port = arg;
3813 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3814 (MVPP2_CAUSE_MISC_SUM_MASK |
3815 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3818 /* Port configuration routines */
3820 static void mvpp2_port_mii_set(struct mvpp2_port *port)
3824 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3826 switch (port->phy_interface) {
3827 case PHY_INTERFACE_MODE_SGMII:
3828 val |= MVPP2_GMAC_INBAND_AN_MASK;
3830 case PHY_INTERFACE_MODE_RGMII:
3831 val |= MVPP2_GMAC_PORT_RGMII_MASK;
3833 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3836 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3839 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
3843 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3844 val |= MVPP2_GMAC_FC_ADV_EN;
3845 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3848 static void mvpp2_port_enable(struct mvpp2_port *port)
3852 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3853 val |= MVPP2_GMAC_PORT_EN_MASK;
3854 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3855 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3858 static void mvpp2_port_disable(struct mvpp2_port *port)
3862 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3863 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
3864 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3867 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3868 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
3872 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
3873 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3874 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3877 /* Configure loopback port */
3878 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
3882 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3884 if (port->speed == 1000)
3885 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
3887 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
3889 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
3890 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
3892 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
3894 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3897 static void mvpp2_port_reset(struct mvpp2_port *port)
3901 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3902 ~MVPP2_GMAC_PORT_RESET_MASK;
3903 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3905 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3906 MVPP2_GMAC_PORT_RESET_MASK)
3910 /* Change maximum receive size of the port */
3911 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
3915 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3916 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3917 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
3918 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
3919 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3922 /* Set defaults to the MVPP2 port */
3923 static void mvpp2_defaults_set(struct mvpp2_port *port)
3925 int tx_port_num, val, queue, ptxq, lrxq;
3927 /* Configure port to loopback if needed */
3928 if (port->flags & MVPP2_F_LOOPBACK)
3929 mvpp2_port_loopback_set(port);
3931 /* Update TX FIFO MIN Threshold */
3932 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3933 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3934 /* Min. TX threshold must be less than minimal packet length */
3935 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3936 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3938 /* Disable Legacy WRR, Disable EJP, Release from reset */
3939 tx_port_num = mvpp2_egress_port(port);
3940 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3942 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3944 /* Close bandwidth for all queues */
3945 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3946 ptxq = mvpp2_txq_phys(port->id, queue);
3947 mvpp2_write(port->priv,
3948 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3951 /* Set refill period to 1 usec, refill tokens
3952 * and bucket size to maximum
3954 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
3955 port->priv->tclk / USEC_PER_SEC);
3956 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3957 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3958 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3959 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3960 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3961 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3962 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3964 /* Set MaximumLowLatencyPacketSize value to 256 */
3965 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3966 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3967 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3969 /* Enable Rx cache snoop */
3970 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3971 queue = port->rxqs[lrxq]->id;
3972 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3973 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3974 MVPP2_SNOOP_BUF_HDR_MASK;
3975 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3978 /* At default, mask all interrupts to all present cpus */
3979 mvpp2_interrupts_disable(port);
3982 /* Enable/disable receiving packets */
3983 static void mvpp2_ingress_enable(struct mvpp2_port *port)
3988 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3989 queue = port->rxqs[lrxq]->id;
3990 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3991 val &= ~MVPP2_RXQ_DISABLE_MASK;
3992 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3996 static void mvpp2_ingress_disable(struct mvpp2_port *port)
4001 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4002 queue = port->rxqs[lrxq]->id;
4003 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4004 val |= MVPP2_RXQ_DISABLE_MASK;
4005 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4009 /* Enable transmit via physical egress queue
4010 * - HW starts take descriptors from DRAM
4012 static void mvpp2_egress_enable(struct mvpp2_port *port)
4016 int tx_port_num = mvpp2_egress_port(port);
4018 /* Enable all initialized TXs. */
4020 for (queue = 0; queue < txq_number; queue++) {
4021 struct mvpp2_tx_queue *txq = port->txqs[queue];
4023 if (txq->descs != NULL)
4024 qmap |= (1 << queue);
4027 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4028 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4031 /* Disable transmit via physical egress queue
4032 * - HW doesn't take descriptors from DRAM
4034 static void mvpp2_egress_disable(struct mvpp2_port *port)
4038 int tx_port_num = mvpp2_egress_port(port);
4040 /* Issue stop command for active channels only */
4041 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4042 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4043 MVPP2_TXP_SCHED_ENQ_MASK;
4045 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4046 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4048 /* Wait for all Tx activity to terminate. */
4051 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4052 netdev_warn(port->dev,
4053 "Tx stop timed out, status=0x%08x\n",
4060 /* Check port TX Command register that all
4061 * Tx queues are stopped
4063 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4064 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4067 /* Rx descriptors helper methods */
4069 /* Get number of Rx descriptors occupied by received packets */
4071 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4073 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4075 return val & MVPP2_RXQ_OCCUPIED_MASK;
4078 /* Update Rx queue status with the number of occupied and available
4079 * Rx descriptor slots.
4082 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4083 int used_count, int free_count)
4085 /* Decrement the number of used descriptors and increment count
4086 * increment the number of free descriptors.
4088 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4090 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4093 /* Get pointer to next RX descriptor to be processed by SW */
4094 static inline struct mvpp2_rx_desc *
4095 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4097 int rx_desc = rxq->next_desc_to_proc;
4099 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4100 prefetch(rxq->descs + rxq->next_desc_to_proc);
4101 return rxq->descs + rx_desc;
4104 /* Set rx queue offset */
4105 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4106 int prxq, int offset)
4110 /* Convert offset from bytes to units of 32 bytes */
4111 offset = offset >> 5;
4113 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4114 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4117 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4118 MVPP2_RXQ_PACKET_OFFSET_MASK);
4120 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4123 /* Obtain BM cookie information from descriptor */
4124 static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
4126 int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4127 MVPP2_RXD_BM_POOL_ID_OFFS;
4128 int cpu = smp_processor_id();
4130 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4131 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4134 /* Tx descriptors helper methods */
4136 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4137 static struct mvpp2_tx_desc *
4138 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4140 int tx_desc = txq->next_desc_to_proc;
4142 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4143 return txq->descs + tx_desc;
4146 /* Update HW with number of aggregated Tx descriptors to be sent */
4147 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4149 /* aggregated access - relevant TXQ number is written in TX desc */
4150 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4154 /* Check if there are enough free descriptors in aggregated txq.
4155 * If not, update the number of occupied descriptors and repeat the check.
4157 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4158 struct mvpp2_tx_queue *aggr_txq, int num)
4160 if ((aggr_txq->count + num) > aggr_txq->size) {
4161 /* Update number of occupied aggregated Tx descriptors */
4162 int cpu = smp_processor_id();
4163 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4165 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4168 if ((aggr_txq->count + num) > aggr_txq->size)
4174 /* Reserved Tx descriptors allocation request */
4175 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4176 struct mvpp2_tx_queue *txq, int num)
4180 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4181 mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
4183 val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
4185 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4188 /* Check if there are enough reserved descriptors for transmission.
4189 * If not, request chunk of reserved descriptors and check again.
4191 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4192 struct mvpp2_tx_queue *txq,
4193 struct mvpp2_txq_pcpu *txq_pcpu,
4196 int req, cpu, desc_count;
4198 if (txq_pcpu->reserved_num >= num)
4201 /* Not enough descriptors reserved! Update the reserved descriptor
4202 * count and check again.
4206 /* Compute total of used descriptors */
4207 for_each_present_cpu(cpu) {
4208 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4210 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4211 desc_count += txq_pcpu_aux->count;
4212 desc_count += txq_pcpu_aux->reserved_num;
4215 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4219 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4222 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4224 /* OK, the descriptor cound has been updated: check again. */
4225 if (txq_pcpu->reserved_num < num)
4230 /* Release the last allocated Tx descriptor. Useful to handle DMA
4231 * mapping failures in the Tx path.
4233 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4235 if (txq->next_desc_to_proc == 0)
4236 txq->next_desc_to_proc = txq->last_desc - 1;
4238 txq->next_desc_to_proc--;
4241 /* Set Tx descriptors fields relevant for CSUM calculation */
4242 static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4243 int ip_hdr_len, int l4_proto)
4247 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4248 * G_L4_chk, L4_type required only for checksum calculation
4250 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4251 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4252 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4254 if (l3_proto == swab16(ETH_P_IP)) {
4255 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4256 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4258 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4261 if (l4_proto == IPPROTO_TCP) {
4262 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4263 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4264 } else if (l4_proto == IPPROTO_UDP) {
4265 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4266 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4268 command |= MVPP2_TXD_L4_CSUM_NOT;
4274 /* Get number of sent descriptors and decrement counter.
4275 * The number of sent descriptors is returned.
4278 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4279 struct mvpp2_tx_queue *txq)
4283 /* Reading status reg resets transmitted descriptor counter */
4284 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
4286 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4287 MVPP2_TRANSMITTED_COUNT_OFFSET;
4290 static void mvpp2_txq_sent_counter_clear(void *arg)
4292 struct mvpp2_port *port = arg;
4295 for (queue = 0; queue < txq_number; queue++) {
4296 int id = port->txqs[queue]->id;
4298 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
4302 /* Set max sizes for Tx queues */
4303 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4306 int txq, tx_port_num;
4308 mtu = port->pkt_size * 8;
4309 if (mtu > MVPP2_TXP_MTU_MAX)
4310 mtu = MVPP2_TXP_MTU_MAX;
4312 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4315 /* Indirect access to registers */
4316 tx_port_num = mvpp2_egress_port(port);
4317 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4320 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4321 val &= ~MVPP2_TXP_MTU_MAX;
4323 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4325 /* TXP token size and all TXQs token size must be larger that MTU */
4326 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4327 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4330 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4332 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4335 for (txq = 0; txq < txq_number; txq++) {
4336 val = mvpp2_read(port->priv,
4337 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4338 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4342 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4344 mvpp2_write(port->priv,
4345 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4351 /* Set the number of packets that will be received before Rx interrupt
4352 * will be generated by HW.
4354 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4355 struct mvpp2_rx_queue *rxq)
4357 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4358 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
4360 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4361 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG,
4365 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
4367 u64 tmp = (u64)clk_hz * usec;
4369 do_div(tmp, USEC_PER_SEC);
4371 return tmp > U32_MAX ? U32_MAX : tmp;
4374 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
4376 u64 tmp = (u64)cycles * USEC_PER_SEC;
4378 do_div(tmp, clk_hz);
4380 return tmp > U32_MAX ? U32_MAX : tmp;
4383 /* Set the time delay in usec before Rx interrupt */
4384 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4385 struct mvpp2_rx_queue *rxq)
4387 unsigned long freq = port->priv->tclk;
4388 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4390 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
4392 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
4394 /* re-evaluate to get actual register value */
4395 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4398 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4401 /* Free Tx queue skbuffs */
4402 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4403 struct mvpp2_tx_queue *txq,
4404 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4408 for (i = 0; i < num; i++) {
4409 struct mvpp2_txq_pcpu_buf *tx_buf =
4410 txq_pcpu->buffs + txq_pcpu->txq_get_index;
4412 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
4413 tx_buf->size, DMA_TO_DEVICE);
4415 dev_kfree_skb_any(tx_buf->skb);
4417 mvpp2_txq_inc_get(txq_pcpu);
4421 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4424 int queue = fls(cause) - 1;
4426 return port->rxqs[queue];
4429 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4432 int queue = fls(cause) - 1;
4434 return port->txqs[queue];
4437 /* Handle end of transmission */
4438 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4439 struct mvpp2_txq_pcpu *txq_pcpu)
4441 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4444 if (txq_pcpu->cpu != smp_processor_id())
4445 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4447 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4450 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4452 txq_pcpu->count -= tx_done;
4454 if (netif_tx_queue_stopped(nq))
4455 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4456 netif_tx_wake_queue(nq);
4459 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4461 struct mvpp2_tx_queue *txq;
4462 struct mvpp2_txq_pcpu *txq_pcpu;
4463 unsigned int tx_todo = 0;
4466 txq = mvpp2_get_tx_queue(port, cause);
4470 txq_pcpu = this_cpu_ptr(txq->pcpu);
4472 if (txq_pcpu->count) {
4473 mvpp2_txq_done(port, txq, txq_pcpu);
4474 tx_todo += txq_pcpu->count;
4477 cause &= ~(1 << txq->log_id);
4482 /* Rx/Tx queue initialization/cleanup methods */
4484 /* Allocate and initialize descriptors for aggr TXQ */
4485 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4486 struct mvpp2_tx_queue *aggr_txq,
4487 int desc_num, int cpu,
4490 /* Allocate memory for TX descriptors */
4491 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4492 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4493 &aggr_txq->descs_dma, GFP_KERNEL);
4494 if (!aggr_txq->descs)
4497 aggr_txq->last_desc = aggr_txq->size - 1;
4499 /* Aggr TXQ no reset WA */
4500 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4501 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4503 /* Set Tx descriptors queue starting address */
4504 /* indirect access */
4505 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
4506 aggr_txq->descs_dma);
4507 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4512 /* Create a specified Rx queue */
4513 static int mvpp2_rxq_init(struct mvpp2_port *port,
4514 struct mvpp2_rx_queue *rxq)
4517 rxq->size = port->rx_ring_size;
4519 /* Allocate memory for RX descriptors */
4520 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4521 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4522 &rxq->descs_dma, GFP_KERNEL);
4526 rxq->last_desc = rxq->size - 1;
4528 /* Zero occupied and non-occupied counters - direct access */
4529 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4531 /* Set Rx descriptors queue starting address - indirect access */
4532 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4533 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_dma);
4534 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4535 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4538 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4540 /* Set coalescing pkts and time */
4541 mvpp2_rx_pkts_coal_set(port, rxq);
4542 mvpp2_rx_time_coal_set(port, rxq);
4544 /* Add number of descriptors ready for receiving packets */
4545 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4550 /* Push packets received by the RXQ to BM pool */
4551 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4552 struct mvpp2_rx_queue *rxq)
4556 rx_received = mvpp2_rxq_received(port, rxq->id);
4560 for (i = 0; i < rx_received; i++) {
4561 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4562 u32 bm = mvpp2_bm_cookie_build(rx_desc);
4564 mvpp2_pool_refill(port, bm, rx_desc->buf_dma_addr,
4565 rx_desc->buf_cookie);
4567 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4570 /* Cleanup Rx queue */
4571 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4572 struct mvpp2_rx_queue *rxq)
4574 mvpp2_rxq_drop_pkts(port, rxq);
4577 dma_free_coherent(port->dev->dev.parent,
4578 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4584 rxq->next_desc_to_proc = 0;
4587 /* Clear Rx descriptors queue starting address and size;
4588 * free descriptor number
4590 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4591 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4592 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4593 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4596 /* Create and initialize a Tx queue */
4597 static int mvpp2_txq_init(struct mvpp2_port *port,
4598 struct mvpp2_tx_queue *txq)
4601 int cpu, desc, desc_per_txq, tx_port_num;
4602 struct mvpp2_txq_pcpu *txq_pcpu;
4604 txq->size = port->tx_ring_size;
4606 /* Allocate memory for Tx descriptors */
4607 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4608 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4609 &txq->descs_dma, GFP_KERNEL);
4613 txq->last_desc = txq->size - 1;
4615 /* Set Tx descriptors queue starting address - indirect access */
4616 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4617 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
4618 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4619 MVPP2_TXQ_DESC_SIZE_MASK);
4620 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4621 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4622 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4623 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4624 val &= ~MVPP2_TXQ_PENDING_MASK;
4625 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4627 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4628 * for each existing TXQ.
4629 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4630 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4633 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4634 (txq->log_id * desc_per_txq);
4636 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4637 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4638 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
4640 /* WRR / EJP configuration - indirect access */
4641 tx_port_num = mvpp2_egress_port(port);
4642 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4644 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4645 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4646 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4647 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4648 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4650 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4651 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4654 for_each_present_cpu(cpu) {
4655 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4656 txq_pcpu->size = txq->size;
4657 txq_pcpu->buffs = kmalloc(txq_pcpu->size *
4658 sizeof(struct mvpp2_txq_pcpu_buf),
4660 if (!txq_pcpu->buffs)
4663 txq_pcpu->count = 0;
4664 txq_pcpu->reserved_num = 0;
4665 txq_pcpu->txq_put_index = 0;
4666 txq_pcpu->txq_get_index = 0;
4672 for_each_present_cpu(cpu) {
4673 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4674 kfree(txq_pcpu->buffs);
4677 dma_free_coherent(port->dev->dev.parent,
4678 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4679 txq->descs, txq->descs_dma);
4684 /* Free allocated TXQ resources */
4685 static void mvpp2_txq_deinit(struct mvpp2_port *port,
4686 struct mvpp2_tx_queue *txq)
4688 struct mvpp2_txq_pcpu *txq_pcpu;
4691 for_each_present_cpu(cpu) {
4692 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4693 kfree(txq_pcpu->buffs);
4697 dma_free_coherent(port->dev->dev.parent,
4698 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4699 txq->descs, txq->descs_dma);
4703 txq->next_desc_to_proc = 0;
4706 /* Set minimum bandwidth for disabled TXQs */
4707 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4709 /* Set Tx descriptors queue starting address and size */
4710 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4711 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4712 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4715 /* Cleanup Tx ports */
4716 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4718 struct mvpp2_txq_pcpu *txq_pcpu;
4719 int delay, pending, cpu;
4722 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4723 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4724 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4725 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4727 /* The napi queue has been stopped so wait for all packets
4728 * to be transmitted.
4732 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4733 netdev_warn(port->dev,
4734 "port %d: cleaning queue %d timed out\n",
4735 port->id, txq->log_id);
4741 pending = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG) &
4742 MVPP2_TXQ_PENDING_MASK;
4745 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4746 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4748 for_each_present_cpu(cpu) {
4749 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4751 /* Release all packets */
4752 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4755 txq_pcpu->count = 0;
4756 txq_pcpu->txq_put_index = 0;
4757 txq_pcpu->txq_get_index = 0;
4761 /* Cleanup all Tx queues */
4762 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4764 struct mvpp2_tx_queue *txq;
4768 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4770 /* Reset Tx ports and delete Tx queues */
4771 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4772 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4774 for (queue = 0; queue < txq_number; queue++) {
4775 txq = port->txqs[queue];
4776 mvpp2_txq_clean(port, txq);
4777 mvpp2_txq_deinit(port, txq);
4780 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4782 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4783 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4786 /* Cleanup all Rx queues */
4787 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4791 for (queue = 0; queue < rxq_number; queue++)
4792 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4795 /* Init all Rx queues for port */
4796 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4800 for (queue = 0; queue < rxq_number; queue++) {
4801 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4808 mvpp2_cleanup_rxqs(port);
4812 /* Init all tx queues for port */
4813 static int mvpp2_setup_txqs(struct mvpp2_port *port)
4815 struct mvpp2_tx_queue *txq;
4818 for (queue = 0; queue < txq_number; queue++) {
4819 txq = port->txqs[queue];
4820 err = mvpp2_txq_init(port, txq);
4825 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4829 mvpp2_cleanup_txqs(port);
4833 /* The callback for per-port interrupt */
4834 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4836 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
4838 mvpp2_interrupts_disable(port);
4840 napi_schedule(&port->napi);
4846 static void mvpp2_link_event(struct net_device *dev)
4848 struct mvpp2_port *port = netdev_priv(dev);
4849 struct phy_device *phydev = dev->phydev;
4850 int status_change = 0;
4854 if ((port->speed != phydev->speed) ||
4855 (port->duplex != phydev->duplex)) {
4858 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4859 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4860 MVPP2_GMAC_CONFIG_GMII_SPEED |
4861 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4862 MVPP2_GMAC_AN_SPEED_EN |
4863 MVPP2_GMAC_AN_DUPLEX_EN);
4866 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4868 if (phydev->speed == SPEED_1000)
4869 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4870 else if (phydev->speed == SPEED_100)
4871 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4873 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4875 port->duplex = phydev->duplex;
4876 port->speed = phydev->speed;
4880 if (phydev->link != port->link) {
4881 if (!phydev->link) {
4886 port->link = phydev->link;
4890 if (status_change) {
4892 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4893 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4894 MVPP2_GMAC_FORCE_LINK_DOWN);
4895 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4896 mvpp2_egress_enable(port);
4897 mvpp2_ingress_enable(port);
4899 mvpp2_ingress_disable(port);
4900 mvpp2_egress_disable(port);
4902 phy_print_status(phydev);
4906 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4910 if (!port_pcpu->timer_scheduled) {
4911 port_pcpu->timer_scheduled = true;
4912 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
4913 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4914 HRTIMER_MODE_REL_PINNED);
4918 static void mvpp2_tx_proc_cb(unsigned long data)
4920 struct net_device *dev = (struct net_device *)data;
4921 struct mvpp2_port *port = netdev_priv(dev);
4922 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4923 unsigned int tx_todo, cause;
4925 if (!netif_running(dev))
4927 port_pcpu->timer_scheduled = false;
4929 /* Process all the Tx queues */
4930 cause = (1 << txq_number) - 1;
4931 tx_todo = mvpp2_tx_done(port, cause);
4933 /* Set the timer in case not all the packets were processed */
4935 mvpp2_timer_set(port_pcpu);
4938 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4940 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4941 struct mvpp2_port_pcpu,
4944 tasklet_schedule(&port_pcpu->tx_done_tasklet);
4946 return HRTIMER_NORESTART;
4949 /* Main RX/TX processing routines */
4951 /* Display more error info */
4952 static void mvpp2_rx_error(struct mvpp2_port *port,
4953 struct mvpp2_rx_desc *rx_desc)
4955 u32 status = rx_desc->status;
4957 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4958 case MVPP2_RXD_ERR_CRC:
4959 netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
4960 status, rx_desc->data_size);
4962 case MVPP2_RXD_ERR_OVERRUN:
4963 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
4964 status, rx_desc->data_size);
4966 case MVPP2_RXD_ERR_RESOURCE:
4967 netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
4968 status, rx_desc->data_size);
4973 /* Handle RX checksum offload */
4974 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
4975 struct sk_buff *skb)
4977 if (((status & MVPP2_RXD_L3_IP4) &&
4978 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
4979 (status & MVPP2_RXD_L3_IP6))
4980 if (((status & MVPP2_RXD_L4_UDP) ||
4981 (status & MVPP2_RXD_L4_TCP)) &&
4982 (status & MVPP2_RXD_L4_CSUM_OK)) {
4984 skb->ip_summed = CHECKSUM_UNNECESSARY;
4988 skb->ip_summed = CHECKSUM_NONE;
4991 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
4992 static int mvpp2_rx_refill(struct mvpp2_port *port,
4993 struct mvpp2_bm_pool *bm_pool, u32 bm)
4995 dma_addr_t dma_addr;
4996 phys_addr_t phys_addr;
4999 /* No recycle or too many buffers are in use, so allocate a new skb */
5000 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
5005 mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
5010 /* Handle tx checksum */
5011 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5013 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5017 if (skb->protocol == htons(ETH_P_IP)) {
5018 struct iphdr *ip4h = ip_hdr(skb);
5020 /* Calculate IPv4 checksum and L4 checksum */
5021 ip_hdr_len = ip4h->ihl;
5022 l4_proto = ip4h->protocol;
5023 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5024 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5026 /* Read l4_protocol from one of IPv6 extra headers */
5027 if (skb_network_header_len(skb) > 0)
5028 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5029 l4_proto = ip6h->nexthdr;
5031 return MVPP2_TXD_L4_CSUM_NOT;
5034 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5035 skb->protocol, ip_hdr_len, l4_proto);
5038 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5041 /* Main rx processing */
5042 static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5043 struct mvpp2_rx_queue *rxq)
5045 struct net_device *dev = port->dev;
5051 /* Get number of received packets and clamp the to-do */
5052 rx_received = mvpp2_rxq_received(port, rxq->id);
5053 if (rx_todo > rx_received)
5054 rx_todo = rx_received;
5056 while (rx_done < rx_todo) {
5057 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5058 struct mvpp2_bm_pool *bm_pool;
5059 struct sk_buff *skb;
5060 unsigned int frag_size;
5061 dma_addr_t dma_addr;
5063 int pool, rx_bytes, err;
5067 rx_status = rx_desc->status;
5068 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
5069 dma_addr = rx_desc->buf_dma_addr;
5070 data = (void *)phys_to_virt(rx_desc->buf_cookie);
5072 bm = mvpp2_bm_cookie_build(rx_desc);
5073 pool = mvpp2_bm_cookie_pool_get(bm);
5074 bm_pool = &port->priv->bm_pools[pool];
5076 /* In case of an error, release the requested buffer pointer
5077 * to the Buffer Manager. This request process is controlled
5078 * by the hardware, and the information about the buffer is
5079 * comprised by the RX descriptor.
5081 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5083 dev->stats.rx_errors++;
5084 mvpp2_rx_error(port, rx_desc);
5085 /* Return the buffer to the pool */
5087 mvpp2_pool_refill(port, bm, rx_desc->buf_dma_addr,
5088 rx_desc->buf_cookie);
5092 if (bm_pool->frag_size > PAGE_SIZE)
5095 frag_size = bm_pool->frag_size;
5097 skb = build_skb(data, frag_size);
5099 netdev_warn(port->dev, "skb build failed\n");
5100 goto err_drop_frame;
5103 err = mvpp2_rx_refill(port, bm_pool, bm);
5105 netdev_err(port->dev, "failed to refill BM pools\n");
5106 goto err_drop_frame;
5109 dma_unmap_single(dev->dev.parent, dma_addr,
5110 bm_pool->buf_size, DMA_FROM_DEVICE);
5113 rcvd_bytes += rx_bytes;
5115 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
5116 skb_put(skb, rx_bytes);
5117 skb->protocol = eth_type_trans(skb, dev);
5118 mvpp2_rx_csum(port, rx_status, skb);
5120 napi_gro_receive(&port->napi, skb);
5124 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5126 u64_stats_update_begin(&stats->syncp);
5127 stats->rx_packets += rcvd_pkts;
5128 stats->rx_bytes += rcvd_bytes;
5129 u64_stats_update_end(&stats->syncp);
5132 /* Update Rx queue management counters */
5134 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5140 tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
5141 struct mvpp2_tx_desc *desc)
5143 dma_unmap_single(dev, desc->buf_dma_addr,
5144 desc->data_size, DMA_TO_DEVICE);
5145 mvpp2_txq_desc_put(txq);
5148 /* Handle tx fragmentation processing */
5149 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5150 struct mvpp2_tx_queue *aggr_txq,
5151 struct mvpp2_tx_queue *txq)
5153 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5154 struct mvpp2_tx_desc *tx_desc;
5156 dma_addr_t buf_dma_addr;
5158 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5159 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5160 void *addr = page_address(frag->page.p) + frag->page_offset;
5162 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5163 tx_desc->phys_txq = txq->id;
5164 tx_desc->data_size = frag->size;
5166 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
5169 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
5170 mvpp2_txq_desc_put(txq);
5174 tx_desc->packet_offset = buf_dma_addr & MVPP2_TX_DESC_ALIGN;
5175 tx_desc->buf_dma_addr = buf_dma_addr & (~MVPP2_TX_DESC_ALIGN);
5177 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5178 /* Last descriptor */
5179 tx_desc->command = MVPP2_TXD_L_DESC;
5180 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5182 /* Descriptor in the middle: Not First, Not Last */
5183 tx_desc->command = 0;
5184 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5191 /* Release all descriptors that were used to map fragments of
5192 * this packet, as well as the corresponding DMA mappings
5194 for (i = i - 1; i >= 0; i--) {
5195 tx_desc = txq->descs + i;
5196 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5202 /* Main tx processing */
5203 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5205 struct mvpp2_port *port = netdev_priv(dev);
5206 struct mvpp2_tx_queue *txq, *aggr_txq;
5207 struct mvpp2_txq_pcpu *txq_pcpu;
5208 struct mvpp2_tx_desc *tx_desc;
5209 dma_addr_t buf_dma_addr;
5214 txq_id = skb_get_queue_mapping(skb);
5215 txq = port->txqs[txq_id];
5216 txq_pcpu = this_cpu_ptr(txq->pcpu);
5217 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5219 frags = skb_shinfo(skb)->nr_frags + 1;
5221 /* Check number of available descriptors */
5222 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5223 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5229 /* Get a descriptor for the first part of the packet */
5230 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5231 tx_desc->phys_txq = txq->id;
5232 tx_desc->data_size = skb_headlen(skb);
5234 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
5235 tx_desc->data_size, DMA_TO_DEVICE);
5236 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
5237 mvpp2_txq_desc_put(txq);
5241 tx_desc->packet_offset = buf_dma_addr & MVPP2_TX_DESC_ALIGN;
5242 tx_desc->buf_dma_addr = buf_dma_addr & ~MVPP2_TX_DESC_ALIGN;
5244 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5247 /* First and Last descriptor */
5248 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5249 tx_desc->command = tx_cmd;
5250 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5252 /* First but not Last */
5253 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5254 tx_desc->command = tx_cmd;
5255 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5257 /* Continue with other skb fragments */
5258 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5259 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5265 txq_pcpu->reserved_num -= frags;
5266 txq_pcpu->count += frags;
5267 aggr_txq->count += frags;
5269 /* Enable transmit */
5271 mvpp2_aggr_txq_pend_desc_add(port, frags);
5273 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5274 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5276 netif_tx_stop_queue(nq);
5280 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5282 u64_stats_update_begin(&stats->syncp);
5283 stats->tx_packets++;
5284 stats->tx_bytes += skb->len;
5285 u64_stats_update_end(&stats->syncp);
5287 dev->stats.tx_dropped++;
5288 dev_kfree_skb_any(skb);
5291 /* Finalize TX processing */
5292 if (txq_pcpu->count >= txq->done_pkts_coal)
5293 mvpp2_txq_done(port, txq, txq_pcpu);
5295 /* Set the timer in case not all frags were processed */
5296 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5297 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5299 mvpp2_timer_set(port_pcpu);
5302 return NETDEV_TX_OK;
5305 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5307 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5308 netdev_err(dev, "FCS error\n");
5309 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5310 netdev_err(dev, "rx fifo overrun error\n");
5311 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5312 netdev_err(dev, "tx fifo underrun error\n");
5315 static int mvpp2_poll(struct napi_struct *napi, int budget)
5317 u32 cause_rx_tx, cause_rx, cause_misc;
5319 struct mvpp2_port *port = netdev_priv(napi->dev);
5321 /* Rx/Tx cause register
5323 * Bits 0-15: each bit indicates received packets on the Rx queue
5324 * (bit 0 is for Rx queue 0).
5326 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5327 * (bit 16 is for Tx queue 0).
5329 * Each CPU has its own Rx/Tx cause register
5331 cause_rx_tx = mvpp2_read(port->priv,
5332 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5333 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5334 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5337 mvpp2_cause_error(port->dev, cause_misc);
5339 /* Clear the cause register */
5340 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5341 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5342 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5345 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5347 /* Process RX packets */
5348 cause_rx |= port->pending_cause_rx;
5349 while (cause_rx && budget > 0) {
5351 struct mvpp2_rx_queue *rxq;
5353 rxq = mvpp2_get_rx_queue(port, cause_rx);
5357 count = mvpp2_rx(port, budget, rxq);
5361 /* Clear the bit associated to this Rx queue
5362 * so that next iteration will continue from
5363 * the next Rx queue.
5365 cause_rx &= ~(1 << rxq->logic_rxq);
5371 napi_complete_done(napi, rx_done);
5373 mvpp2_interrupts_enable(port);
5375 port->pending_cause_rx = cause_rx;
5379 /* Set hw internals when starting port */
5380 static void mvpp2_start_dev(struct mvpp2_port *port)
5382 struct net_device *ndev = port->dev;
5384 mvpp2_gmac_max_rx_size_set(port);
5385 mvpp2_txp_max_tx_size_set(port);
5387 napi_enable(&port->napi);
5389 /* Enable interrupts on all CPUs */
5390 mvpp2_interrupts_enable(port);
5392 mvpp2_port_enable(port);
5393 phy_start(ndev->phydev);
5394 netif_tx_start_all_queues(port->dev);
5397 /* Set hw internals when stopping port */
5398 static void mvpp2_stop_dev(struct mvpp2_port *port)
5400 struct net_device *ndev = port->dev;
5402 /* Stop new packets from arriving to RXQs */
5403 mvpp2_ingress_disable(port);
5407 /* Disable interrupts on all CPUs */
5408 mvpp2_interrupts_disable(port);
5410 napi_disable(&port->napi);
5412 netif_carrier_off(port->dev);
5413 netif_tx_stop_all_queues(port->dev);
5415 mvpp2_egress_disable(port);
5416 mvpp2_port_disable(port);
5417 phy_stop(ndev->phydev);
5420 static int mvpp2_check_ringparam_valid(struct net_device *dev,
5421 struct ethtool_ringparam *ring)
5423 u16 new_rx_pending = ring->rx_pending;
5424 u16 new_tx_pending = ring->tx_pending;
5426 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5429 if (ring->rx_pending > MVPP2_MAX_RXD)
5430 new_rx_pending = MVPP2_MAX_RXD;
5431 else if (!IS_ALIGNED(ring->rx_pending, 16))
5432 new_rx_pending = ALIGN(ring->rx_pending, 16);
5434 if (ring->tx_pending > MVPP2_MAX_TXD)
5435 new_tx_pending = MVPP2_MAX_TXD;
5436 else if (!IS_ALIGNED(ring->tx_pending, 32))
5437 new_tx_pending = ALIGN(ring->tx_pending, 32);
5439 if (ring->rx_pending != new_rx_pending) {
5440 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5441 ring->rx_pending, new_rx_pending);
5442 ring->rx_pending = new_rx_pending;
5445 if (ring->tx_pending != new_tx_pending) {
5446 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5447 ring->tx_pending, new_tx_pending);
5448 ring->tx_pending = new_tx_pending;
5454 static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5456 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5458 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5459 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5460 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5461 addr[0] = (mac_addr_h >> 24) & 0xFF;
5462 addr[1] = (mac_addr_h >> 16) & 0xFF;
5463 addr[2] = (mac_addr_h >> 8) & 0xFF;
5464 addr[3] = mac_addr_h & 0xFF;
5465 addr[4] = mac_addr_m & 0xFF;
5466 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5469 static int mvpp2_phy_connect(struct mvpp2_port *port)
5471 struct phy_device *phy_dev;
5473 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5474 port->phy_interface);
5476 netdev_err(port->dev, "cannot connect to phy\n");
5479 phy_dev->supported &= PHY_GBIT_FEATURES;
5480 phy_dev->advertising = phy_dev->supported;
5489 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5491 struct net_device *ndev = port->dev;
5493 phy_disconnect(ndev->phydev);
5496 static int mvpp2_open(struct net_device *dev)
5498 struct mvpp2_port *port = netdev_priv(dev);
5499 unsigned char mac_bcast[ETH_ALEN] = {
5500 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5503 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5505 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5508 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5509 dev->dev_addr, true);
5511 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5514 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5516 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5519 err = mvpp2_prs_def_flow(port);
5521 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5525 /* Allocate the Rx/Tx queues */
5526 err = mvpp2_setup_rxqs(port);
5528 netdev_err(port->dev, "cannot allocate Rx queues\n");
5532 err = mvpp2_setup_txqs(port);
5534 netdev_err(port->dev, "cannot allocate Tx queues\n");
5535 goto err_cleanup_rxqs;
5538 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5540 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5541 goto err_cleanup_txqs;
5544 /* In default link is down */
5545 netif_carrier_off(port->dev);
5547 err = mvpp2_phy_connect(port);
5551 /* Unmask interrupts on all CPUs */
5552 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5554 mvpp2_start_dev(port);
5559 free_irq(port->irq, port);
5561 mvpp2_cleanup_txqs(port);
5563 mvpp2_cleanup_rxqs(port);
5567 static int mvpp2_stop(struct net_device *dev)
5569 struct mvpp2_port *port = netdev_priv(dev);
5570 struct mvpp2_port_pcpu *port_pcpu;
5573 mvpp2_stop_dev(port);
5574 mvpp2_phy_disconnect(port);
5576 /* Mask interrupts on all CPUs */
5577 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5579 free_irq(port->irq, port);
5580 for_each_present_cpu(cpu) {
5581 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5583 hrtimer_cancel(&port_pcpu->tx_done_timer);
5584 port_pcpu->timer_scheduled = false;
5585 tasklet_kill(&port_pcpu->tx_done_tasklet);
5587 mvpp2_cleanup_rxqs(port);
5588 mvpp2_cleanup_txqs(port);
5593 static void mvpp2_set_rx_mode(struct net_device *dev)
5595 struct mvpp2_port *port = netdev_priv(dev);
5596 struct mvpp2 *priv = port->priv;
5597 struct netdev_hw_addr *ha;
5599 bool allmulti = dev->flags & IFF_ALLMULTI;
5601 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
5602 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
5603 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
5605 /* Remove all port->id's mcast enries */
5606 mvpp2_prs_mcast_del_all(priv, id);
5608 if (allmulti && !netdev_mc_empty(dev)) {
5609 netdev_for_each_mc_addr(ha, dev)
5610 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
5614 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
5616 struct mvpp2_port *port = netdev_priv(dev);
5617 const struct sockaddr *addr = p;
5620 if (!is_valid_ether_addr(addr->sa_data)) {
5621 err = -EADDRNOTAVAIL;
5625 if (!netif_running(dev)) {
5626 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5629 /* Reconfigure parser to accept the original MAC address */
5630 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5635 mvpp2_stop_dev(port);
5637 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5641 /* Reconfigure parser accept the original MAC address */
5642 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5646 mvpp2_start_dev(port);
5647 mvpp2_egress_enable(port);
5648 mvpp2_ingress_enable(port);
5652 netdev_err(dev, "fail to change MAC address\n");
5656 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5658 struct mvpp2_port *port = netdev_priv(dev);
5661 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5662 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5663 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5664 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5667 if (!netif_running(dev)) {
5668 err = mvpp2_bm_update_mtu(dev, mtu);
5670 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5674 /* Reconfigure BM to the original MTU */
5675 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5680 mvpp2_stop_dev(port);
5682 err = mvpp2_bm_update_mtu(dev, mtu);
5684 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5688 /* Reconfigure BM to the original MTU */
5689 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5694 mvpp2_start_dev(port);
5695 mvpp2_egress_enable(port);
5696 mvpp2_ingress_enable(port);
5701 netdev_err(dev, "fail to change MTU\n");
5706 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5708 struct mvpp2_port *port = netdev_priv(dev);
5712 for_each_possible_cpu(cpu) {
5713 struct mvpp2_pcpu_stats *cpu_stats;
5719 cpu_stats = per_cpu_ptr(port->stats, cpu);
5721 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5722 rx_packets = cpu_stats->rx_packets;
5723 rx_bytes = cpu_stats->rx_bytes;
5724 tx_packets = cpu_stats->tx_packets;
5725 tx_bytes = cpu_stats->tx_bytes;
5726 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5728 stats->rx_packets += rx_packets;
5729 stats->rx_bytes += rx_bytes;
5730 stats->tx_packets += tx_packets;
5731 stats->tx_bytes += tx_bytes;
5734 stats->rx_errors = dev->stats.rx_errors;
5735 stats->rx_dropped = dev->stats.rx_dropped;
5736 stats->tx_dropped = dev->stats.tx_dropped;
5739 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5746 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
5748 mvpp2_link_event(dev);
5753 /* Ethtool methods */
5755 /* Set interrupt coalescing for ethtools */
5756 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5757 struct ethtool_coalesce *c)
5759 struct mvpp2_port *port = netdev_priv(dev);
5762 for (queue = 0; queue < rxq_number; queue++) {
5763 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5765 rxq->time_coal = c->rx_coalesce_usecs;
5766 rxq->pkts_coal = c->rx_max_coalesced_frames;
5767 mvpp2_rx_pkts_coal_set(port, rxq);
5768 mvpp2_rx_time_coal_set(port, rxq);
5771 for (queue = 0; queue < txq_number; queue++) {
5772 struct mvpp2_tx_queue *txq = port->txqs[queue];
5774 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5780 /* get coalescing for ethtools */
5781 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5782 struct ethtool_coalesce *c)
5784 struct mvpp2_port *port = netdev_priv(dev);
5786 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5787 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5788 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5792 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5793 struct ethtool_drvinfo *drvinfo)
5795 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5796 sizeof(drvinfo->driver));
5797 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5798 sizeof(drvinfo->version));
5799 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5800 sizeof(drvinfo->bus_info));
5803 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5804 struct ethtool_ringparam *ring)
5806 struct mvpp2_port *port = netdev_priv(dev);
5808 ring->rx_max_pending = MVPP2_MAX_RXD;
5809 ring->tx_max_pending = MVPP2_MAX_TXD;
5810 ring->rx_pending = port->rx_ring_size;
5811 ring->tx_pending = port->tx_ring_size;
5814 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5815 struct ethtool_ringparam *ring)
5817 struct mvpp2_port *port = netdev_priv(dev);
5818 u16 prev_rx_ring_size = port->rx_ring_size;
5819 u16 prev_tx_ring_size = port->tx_ring_size;
5822 err = mvpp2_check_ringparam_valid(dev, ring);
5826 if (!netif_running(dev)) {
5827 port->rx_ring_size = ring->rx_pending;
5828 port->tx_ring_size = ring->tx_pending;
5832 /* The interface is running, so we have to force a
5833 * reallocation of the queues
5835 mvpp2_stop_dev(port);
5836 mvpp2_cleanup_rxqs(port);
5837 mvpp2_cleanup_txqs(port);
5839 port->rx_ring_size = ring->rx_pending;
5840 port->tx_ring_size = ring->tx_pending;
5842 err = mvpp2_setup_rxqs(port);
5844 /* Reallocate Rx queues with the original ring size */
5845 port->rx_ring_size = prev_rx_ring_size;
5846 ring->rx_pending = prev_rx_ring_size;
5847 err = mvpp2_setup_rxqs(port);
5851 err = mvpp2_setup_txqs(port);
5853 /* Reallocate Tx queues with the original ring size */
5854 port->tx_ring_size = prev_tx_ring_size;
5855 ring->tx_pending = prev_tx_ring_size;
5856 err = mvpp2_setup_txqs(port);
5858 goto err_clean_rxqs;
5861 mvpp2_start_dev(port);
5862 mvpp2_egress_enable(port);
5863 mvpp2_ingress_enable(port);
5868 mvpp2_cleanup_rxqs(port);
5870 netdev_err(dev, "fail to change ring parameters");
5876 static const struct net_device_ops mvpp2_netdev_ops = {
5877 .ndo_open = mvpp2_open,
5878 .ndo_stop = mvpp2_stop,
5879 .ndo_start_xmit = mvpp2_tx,
5880 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5881 .ndo_set_mac_address = mvpp2_set_mac_address,
5882 .ndo_change_mtu = mvpp2_change_mtu,
5883 .ndo_get_stats64 = mvpp2_get_stats64,
5884 .ndo_do_ioctl = mvpp2_ioctl,
5887 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5888 .nway_reset = phy_ethtool_nway_reset,
5889 .get_link = ethtool_op_get_link,
5890 .set_coalesce = mvpp2_ethtool_set_coalesce,
5891 .get_coalesce = mvpp2_ethtool_get_coalesce,
5892 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5893 .get_ringparam = mvpp2_ethtool_get_ringparam,
5894 .set_ringparam = mvpp2_ethtool_set_ringparam,
5895 .get_link_ksettings = phy_ethtool_get_link_ksettings,
5896 .set_link_ksettings = phy_ethtool_set_link_ksettings,
5899 /* Driver initialization */
5901 static void mvpp2_port_power_up(struct mvpp2_port *port)
5903 mvpp2_port_mii_set(port);
5904 mvpp2_port_periodic_xon_disable(port);
5905 mvpp2_port_fc_adv_enable(port);
5906 mvpp2_port_reset(port);
5909 /* Initialize port HW */
5910 static int mvpp2_port_init(struct mvpp2_port *port)
5912 struct device *dev = port->dev->dev.parent;
5913 struct mvpp2 *priv = port->priv;
5914 struct mvpp2_txq_pcpu *txq_pcpu;
5915 int queue, cpu, err;
5917 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
5921 mvpp2_egress_disable(port);
5922 mvpp2_port_disable(port);
5924 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
5929 /* Associate physical Tx queues to this port and initialize.
5930 * The mapping is predefined.
5932 for (queue = 0; queue < txq_number; queue++) {
5933 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5934 struct mvpp2_tx_queue *txq;
5936 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
5939 goto err_free_percpu;
5942 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5945 goto err_free_percpu;
5948 txq->id = queue_phy_id;
5949 txq->log_id = queue;
5950 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5951 for_each_present_cpu(cpu) {
5952 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5953 txq_pcpu->cpu = cpu;
5956 port->txqs[queue] = txq;
5959 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
5963 goto err_free_percpu;
5966 /* Allocate and initialize Rx queue for this port */
5967 for (queue = 0; queue < rxq_number; queue++) {
5968 struct mvpp2_rx_queue *rxq;
5970 /* Map physical Rx queue to port's logical Rx queue */
5971 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
5974 goto err_free_percpu;
5976 /* Map this Rx queue to a physical queue */
5977 rxq->id = port->first_rxq + queue;
5978 rxq->port = port->id;
5979 rxq->logic_rxq = queue;
5981 port->rxqs[queue] = rxq;
5984 /* Configure Rx queue group interrupt for this port */
5985 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
5987 /* Create Rx descriptor rings */
5988 for (queue = 0; queue < rxq_number; queue++) {
5989 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5991 rxq->size = port->rx_ring_size;
5992 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
5993 rxq->time_coal = MVPP2_RX_COAL_USEC;
5996 mvpp2_ingress_disable(port);
5998 /* Port default configuration */
5999 mvpp2_defaults_set(port);
6001 /* Port's classifier configuration */
6002 mvpp2_cls_oversize_rxq_set(port);
6003 mvpp2_cls_port_config(port);
6005 /* Provide an initial Rx packet size */
6006 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6008 /* Initialize pools for swf */
6009 err = mvpp2_swf_bm_pool_init(port);
6011 goto err_free_percpu;
6016 for (queue = 0; queue < txq_number; queue++) {
6017 if (!port->txqs[queue])
6019 free_percpu(port->txqs[queue]->pcpu);
6024 /* Ports initialization */
6025 static int mvpp2_port_probe(struct platform_device *pdev,
6026 struct device_node *port_node,
6028 int *next_first_rxq)
6030 struct device_node *phy_node;
6031 struct mvpp2_port *port;
6032 struct mvpp2_port_pcpu *port_pcpu;
6033 struct net_device *dev;
6034 struct resource *res;
6035 const char *dt_mac_addr;
6036 const char *mac_from;
6037 char hw_mac_addr[ETH_ALEN];
6041 int priv_common_regs_num = 2;
6044 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6049 phy_node = of_parse_phandle(port_node, "phy", 0);
6051 dev_err(&pdev->dev, "missing phy\n");
6053 goto err_free_netdev;
6056 phy_mode = of_get_phy_mode(port_node);
6058 dev_err(&pdev->dev, "incorrect phy mode\n");
6060 goto err_free_netdev;
6063 if (of_property_read_u32(port_node, "port-id", &id)) {
6065 dev_err(&pdev->dev, "missing port-id value\n");
6066 goto err_free_netdev;
6069 dev->tx_queue_len = MVPP2_MAX_TXD;
6070 dev->watchdog_timeo = 5 * HZ;
6071 dev->netdev_ops = &mvpp2_netdev_ops;
6072 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6074 port = netdev_priv(dev);
6076 port->irq = irq_of_parse_and_map(port_node, 0);
6077 if (port->irq <= 0) {
6079 goto err_free_netdev;
6082 if (of_property_read_bool(port_node, "marvell,loopback"))
6083 port->flags |= MVPP2_F_LOOPBACK;
6087 port->first_rxq = *next_first_rxq;
6088 port->phy_node = phy_node;
6089 port->phy_interface = phy_mode;
6091 res = platform_get_resource(pdev, IORESOURCE_MEM,
6092 priv_common_regs_num + id);
6093 port->base = devm_ioremap_resource(&pdev->dev, res);
6094 if (IS_ERR(port->base)) {
6095 err = PTR_ERR(port->base);
6099 /* Alloc per-cpu stats */
6100 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6106 dt_mac_addr = of_get_mac_address(port_node);
6107 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6108 mac_from = "device tree";
6109 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6111 mvpp2_get_mac_address(port, hw_mac_addr);
6112 if (is_valid_ether_addr(hw_mac_addr)) {
6113 mac_from = "hardware";
6114 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6116 mac_from = "random";
6117 eth_hw_addr_random(dev);
6121 port->tx_ring_size = MVPP2_MAX_TXD;
6122 port->rx_ring_size = MVPP2_MAX_RXD;
6124 SET_NETDEV_DEV(dev, &pdev->dev);
6126 err = mvpp2_port_init(port);
6128 dev_err(&pdev->dev, "failed to init port %d\n", id);
6129 goto err_free_stats;
6131 mvpp2_port_power_up(port);
6133 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6136 goto err_free_txq_pcpu;
6139 for_each_present_cpu(cpu) {
6140 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6142 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6143 HRTIMER_MODE_REL_PINNED);
6144 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6145 port_pcpu->timer_scheduled = false;
6147 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6148 (unsigned long)dev);
6151 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6152 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6153 dev->features = features | NETIF_F_RXCSUM;
6154 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6155 dev->vlan_features |= features;
6157 /* MTU range: 68 - 9676 */
6158 dev->min_mtu = ETH_MIN_MTU;
6159 /* 9676 == 9700 - 20 and rounding to 8 */
6160 dev->max_mtu = 9676;
6162 err = register_netdev(dev);
6164 dev_err(&pdev->dev, "failed to register netdev\n");
6165 goto err_free_port_pcpu;
6167 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6169 /* Increment the first Rx queue number to be used by the next port */
6170 *next_first_rxq += rxq_number;
6171 priv->port_list[id] = port;
6175 free_percpu(port->pcpu);
6177 for (i = 0; i < txq_number; i++)
6178 free_percpu(port->txqs[i]->pcpu);
6180 free_percpu(port->stats);
6182 irq_dispose_mapping(port->irq);
6184 of_node_put(phy_node);
6189 /* Ports removal routine */
6190 static void mvpp2_port_remove(struct mvpp2_port *port)
6194 unregister_netdev(port->dev);
6195 of_node_put(port->phy_node);
6196 free_percpu(port->pcpu);
6197 free_percpu(port->stats);
6198 for (i = 0; i < txq_number; i++)
6199 free_percpu(port->txqs[i]->pcpu);
6200 irq_dispose_mapping(port->irq);
6201 free_netdev(port->dev);
6204 /* Initialize decoding windows */
6205 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6211 for (i = 0; i < 6; i++) {
6212 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6213 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6216 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6221 for (i = 0; i < dram->num_cs; i++) {
6222 const struct mbus_dram_window *cs = dram->cs + i;
6224 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6225 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6226 dram->mbus_dram_target_id);
6228 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6229 (cs->size - 1) & 0xffff0000);
6231 win_enable |= (1 << i);
6234 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6237 /* Initialize Rx FIFO's */
6238 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6242 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6243 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6244 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6245 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6246 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6249 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6250 MVPP2_RX_FIFO_PORT_MIN_PKT);
6251 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6254 /* Initialize network controller common part HW */
6255 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6257 const struct mbus_dram_target_info *dram_target_info;
6261 /* Checks for hardware constraints */
6262 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6263 (txq_number > MVPP2_MAX_TXQ)) {
6264 dev_err(&pdev->dev, "invalid queue size parameter\n");
6268 /* MBUS windows configuration */
6269 dram_target_info = mv_mbus_dram_info();
6270 if (dram_target_info)
6271 mvpp2_conf_mbus_windows(dram_target_info, priv);
6273 /* Disable HW PHY polling */
6274 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6275 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6276 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6278 /* Allocate and initialize aggregated TXQs */
6279 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6280 sizeof(struct mvpp2_tx_queue),
6282 if (!priv->aggr_txqs)
6285 for_each_present_cpu(i) {
6286 priv->aggr_txqs[i].id = i;
6287 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6288 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6289 MVPP2_AGGR_TXQ_SIZE, i, priv);
6295 mvpp2_rx_fifo_init(priv);
6297 /* Reset Rx queue group interrupt configuration */
6298 for (i = 0; i < MVPP2_MAX_PORTS; i++)
6299 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6301 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6302 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6304 /* Allow cache snoop when transmiting packets */
6305 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6307 /* Buffer Manager initialization */
6308 err = mvpp2_bm_init(pdev, priv);
6312 /* Parser default initialization */
6313 err = mvpp2_prs_default_init(pdev, priv);
6317 /* Classifier default initialization */
6318 mvpp2_cls_init(priv);
6323 static int mvpp2_probe(struct platform_device *pdev)
6325 struct device_node *dn = pdev->dev.of_node;
6326 struct device_node *port_node;
6328 struct resource *res;
6329 int port_count, first_rxq;
6332 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6336 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6337 priv->base = devm_ioremap_resource(&pdev->dev, res);
6338 if (IS_ERR(priv->base))
6339 return PTR_ERR(priv->base);
6341 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6342 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6343 if (IS_ERR(priv->lms_base))
6344 return PTR_ERR(priv->lms_base);
6346 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6347 if (IS_ERR(priv->pp_clk))
6348 return PTR_ERR(priv->pp_clk);
6349 err = clk_prepare_enable(priv->pp_clk);
6353 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6354 if (IS_ERR(priv->gop_clk)) {
6355 err = PTR_ERR(priv->gop_clk);
6358 err = clk_prepare_enable(priv->gop_clk);
6362 /* Get system's tclk rate */
6363 priv->tclk = clk_get_rate(priv->pp_clk);
6365 /* Initialize network controller */
6366 err = mvpp2_init(pdev, priv);
6368 dev_err(&pdev->dev, "failed to initialize controller\n");
6372 port_count = of_get_available_child_count(dn);
6373 if (port_count == 0) {
6374 dev_err(&pdev->dev, "no ports enabled\n");
6379 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6380 sizeof(struct mvpp2_port *),
6382 if (!priv->port_list) {
6387 /* Initialize ports */
6389 for_each_available_child_of_node(dn, port_node) {
6390 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6395 platform_set_drvdata(pdev, priv);
6399 clk_disable_unprepare(priv->gop_clk);
6401 clk_disable_unprepare(priv->pp_clk);
6405 static int mvpp2_remove(struct platform_device *pdev)
6407 struct mvpp2 *priv = platform_get_drvdata(pdev);
6408 struct device_node *dn = pdev->dev.of_node;
6409 struct device_node *port_node;
6412 for_each_available_child_of_node(dn, port_node) {
6413 if (priv->port_list[i])
6414 mvpp2_port_remove(priv->port_list[i]);
6418 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6419 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6421 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6424 for_each_present_cpu(i) {
6425 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6427 dma_free_coherent(&pdev->dev,
6428 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6430 aggr_txq->descs_dma);
6433 clk_disable_unprepare(priv->pp_clk);
6434 clk_disable_unprepare(priv->gop_clk);
6439 static const struct of_device_id mvpp2_match[] = {
6440 { .compatible = "marvell,armada-375-pp2" },
6443 MODULE_DEVICE_TABLE(of, mvpp2_match);
6445 static struct platform_driver mvpp2_driver = {
6446 .probe = mvpp2_probe,
6447 .remove = mvpp2_remove,
6449 .name = MVPP2_DRIVER_NAME,
6450 .of_match_table = mvpp2_match,
6454 module_platform_driver(mvpp2_driver);
6456 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6457 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6458 MODULE_LICENSE("GPL v2");