]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/dsa/bcm_sf2_cfp.c
Merge remote-tracking branches 'asoc/fix/rt5665', 'asoc/fix/simple', 'asoc/fix/sti...
[karo-tx-linux.git] / drivers / net / dsa / bcm_sf2_cfp.c
1 /*
2  * Broadcom Starfighter 2 DSA switch CFP support
3  *
4  * Copyright (C) 2016, Broadcom
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/list.h>
13 #include <net/dsa.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_ether.h>
16 #include <linux/in.h>
17 #include <linux/bitmap.h>
18
19 #include "bcm_sf2.h"
20 #include "bcm_sf2_regs.h"
21
22 struct cfp_udf_layout {
23         u8 slices[UDF_NUM_SLICES];
24         u32 mask_value;
25
26 };
27
28 /* UDF slices layout for a TCPv4/UDPv4 specification */
29 static const struct cfp_udf_layout udf_tcpip4_layout = {
30         .slices = {
31                 /* End of L2, byte offset 12, src IP[0:15] */
32                 CFG_UDF_EOL2 | 6,
33                 /* End of L2, byte offset 14, src IP[16:31] */
34                 CFG_UDF_EOL2 | 7,
35                 /* End of L2, byte offset 16, dst IP[0:15] */
36                 CFG_UDF_EOL2 | 8,
37                 /* End of L2, byte offset 18, dst IP[16:31] */
38                 CFG_UDF_EOL2 | 9,
39                 /* End of L3, byte offset 0, src port */
40                 CFG_UDF_EOL3 | 0,
41                 /* End of L3, byte offset 2, dst port */
42                 CFG_UDF_EOL3 | 1,
43                 0, 0, 0
44         },
45         .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
46 };
47
48 static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
49 {
50         unsigned int i, count = 0;
51
52         for (i = 0; i < UDF_NUM_SLICES; i++) {
53                 if (layout[i] != 0)
54                         count++;
55         }
56
57         return count;
58 }
59
60 static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
61                                 unsigned int slice_num,
62                                 const u8 *layout)
63 {
64         u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET;
65         unsigned int i;
66
67         for (i = 0; i < UDF_NUM_SLICES; i++)
68                 core_writel(priv, layout[i], offset + i * 4);
69 }
70
71 static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
72 {
73         unsigned int timeout = 1000;
74         u32 reg;
75
76         reg = core_readl(priv, CORE_CFP_ACC);
77         reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
78         reg |= OP_STR_DONE | op;
79         core_writel(priv, reg, CORE_CFP_ACC);
80
81         do {
82                 reg = core_readl(priv, CORE_CFP_ACC);
83                 if (!(reg & OP_STR_DONE))
84                         break;
85
86                 cpu_relax();
87         } while (timeout--);
88
89         if (!timeout)
90                 return -ETIMEDOUT;
91
92         return 0;
93 }
94
95 static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
96                                              unsigned int addr)
97 {
98         u32 reg;
99
100         WARN_ON(addr >= CFP_NUM_RULES);
101
102         reg = core_readl(priv, CORE_CFP_ACC);
103         reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
104         reg |= addr << XCESS_ADDR_SHIFT;
105         core_writel(priv, reg, CORE_CFP_ACC);
106 }
107
108 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
109 {
110         /* Entry #0 is reserved */
111         return CFP_NUM_RULES - 1;
112 }
113
114 static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
115                                 struct ethtool_rx_flow_spec *fs)
116 {
117         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
118         struct ethtool_tcpip4_spec *v4_spec;
119         const struct cfp_udf_layout *layout;
120         unsigned int slice_num, rule_index;
121         unsigned int queue_num, port_num;
122         u8 ip_proto, ip_frag;
123         u8 num_udf;
124         u32 reg;
125         int ret;
126
127         /* Check for unsupported extensions */
128         if ((fs->flow_type & FLOW_EXT) &&
129             (fs->m_ext.vlan_etype || fs->m_ext.data[1]))
130                 return -EINVAL;
131
132         if (fs->location != RX_CLS_LOC_ANY &&
133             test_bit(fs->location, priv->cfp.used))
134                 return -EBUSY;
135
136         if (fs->location != RX_CLS_LOC_ANY &&
137             fs->location > bcm_sf2_cfp_rule_size(priv))
138                 return -EINVAL;
139
140         ip_frag = be32_to_cpu(fs->m_ext.data[0]);
141
142         /* We do not support discarding packets, check that the
143          * destination port is enabled and that we are within the
144          * number of ports supported by the switch
145          */
146         port_num = fs->ring_cookie / 8;
147
148         if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
149             !(BIT(port_num) & ds->enabled_port_mask) ||
150             port_num >= priv->hw_params.num_ports)
151                 return -EINVAL;
152
153         switch (fs->flow_type & ~FLOW_EXT) {
154         case TCP_V4_FLOW:
155                 ip_proto = IPPROTO_TCP;
156                 v4_spec = &fs->h_u.tcp_ip4_spec;
157                 break;
158         case UDP_V4_FLOW:
159                 ip_proto = IPPROTO_UDP;
160                 v4_spec = &fs->h_u.udp_ip4_spec;
161                 break;
162         default:
163                 return -EINVAL;
164         }
165
166         /* We only use one UDF slice for now */
167         slice_num = 1;
168         layout = &udf_tcpip4_layout;
169         num_udf = bcm_sf2_get_num_udf_slices(layout->slices);
170
171         /* Apply the UDF layout for this filter */
172         bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices);
173
174         /* Apply to all packets received through this port */
175         core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
176
177         /* S-Tag status         [31:30]
178          * C-Tag status         [29:28]
179          * L2 framing           [27:26]
180          * L3 framing           [25:24]
181          * IP ToS               [23:16]
182          * IP proto             [15:08]
183          * IP Fragm             [7]
184          * Non 1st frag         [6]
185          * IP Authen            [5]
186          * TTL range            [4:3]
187          * PPPoE session        [2]
188          * Reserved             [1]
189          * UDF_Valid[8]         [0]
190          */
191         core_writel(priv, v4_spec->tos << 16 | ip_proto << 8 | ip_frag << 7,
192                     CORE_CFP_DATA_PORT(6));
193
194         /* UDF_Valid[7:0]       [31:24]
195          * S-Tag                [23:8]
196          * C-Tag                [7:0]
197          */
198         core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5));
199
200         /* C-Tag                [31:24]
201          * UDF_n_A8             [23:8]
202          * UDF_n_A7             [7:0]
203          */
204         core_writel(priv, 0, CORE_CFP_DATA_PORT(4));
205
206         /* UDF_n_A7             [31:24]
207          * UDF_n_A6             [23:8]
208          * UDF_n_A5             [7:0]
209          */
210         core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8,
211                     CORE_CFP_DATA_PORT(3));
212
213         /* UDF_n_A5             [31:24]
214          * UDF_n_A4             [23:8]
215          * UDF_n_A3             [7:0]
216          */
217         reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
218               (u32)be16_to_cpu(v4_spec->psrc) << 8 |
219               (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
220         core_writel(priv, reg, CORE_CFP_DATA_PORT(2));
221
222         /* UDF_n_A3             [31:24]
223          * UDF_n_A2             [23:8]
224          * UDF_n_A1             [7:0]
225          */
226         reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
227               (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
228               (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
229         core_writel(priv, reg, CORE_CFP_DATA_PORT(1));
230
231         /* UDF_n_A1             [31:24]
232          * UDF_n_A0             [23:8]
233          * Reserved             [7:4]
234          * Slice ID             [3:2]
235          * Slice valid          [1:0]
236          */
237         reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
238               (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
239               SLICE_NUM(slice_num) | SLICE_VALID;
240         core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
241
242         /* Source port map match */
243         core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
244
245         /* Mask with the specific layout for IPv4 packets */
246         core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6));
247
248         /* Mask all but valid UDFs */
249         core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5));
250
251         /* Mask all */
252         core_writel(priv, 0, CORE_CFP_MASK_PORT(4));
253
254         /* All other UDFs should be matched with the filter */
255         core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3));
256         core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2));
257         core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1));
258         core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0));
259
260         /* Locate the first rule available */
261         if (fs->location == RX_CLS_LOC_ANY)
262                 rule_index = find_first_zero_bit(priv->cfp.used,
263                                                  bcm_sf2_cfp_rule_size(priv));
264         else
265                 rule_index = fs->location;
266
267         /* Insert into TCAM now */
268         bcm_sf2_cfp_rule_addr_set(priv, rule_index);
269
270         ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
271         if (ret) {
272                 pr_err("TCAM entry at addr %d failed\n", rule_index);
273                 return ret;
274         }
275
276         /* Replace ARL derived destination with DST_MAP derived, define
277          * which port and queue this should be forwarded to.
278          *
279          * We have a small oddity where Port 6 just does not have a
280          * valid bit here (so we subtract by one).
281          */
282         queue_num = fs->ring_cookie % 8;
283         if (port_num >= 7)
284                 port_num -= 1;
285
286         reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) |
287                 CHANGE_TC | queue_num << NEW_TC_SHIFT;
288
289         core_writel(priv, reg, CORE_ACT_POL_DATA0);
290
291         /* Set classification ID that needs to be put in Broadcom tag */
292         core_writel(priv, rule_index << CHAIN_ID_SHIFT,
293                     CORE_ACT_POL_DATA1);
294
295         core_writel(priv, 0, CORE_ACT_POL_DATA2);
296
297         /* Configure policer RAM now */
298         ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
299         if (ret) {
300                 pr_err("Policer entry at %d failed\n", rule_index);
301                 return ret;
302         }
303
304         /* Disable the policer */
305         core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
306
307         /* Now the rate meter */
308         ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
309         if (ret) {
310                 pr_err("Meter entry at %d failed\n", rule_index);
311                 return ret;
312         }
313
314         /* Turn on CFP for this rule now */
315         reg = core_readl(priv, CORE_CFP_CTL_REG);
316         reg |= BIT(port);
317         core_writel(priv, reg, CORE_CFP_CTL_REG);
318
319         /* Flag the rule as being used and return it */
320         set_bit(rule_index, priv->cfp.used);
321         fs->location = rule_index;
322
323         return 0;
324 }
325
326 static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
327                                 u32 loc)
328 {
329         int ret;
330         u32 reg;
331
332         /* Refuse deletion of unused rules, and the default reserved rule */
333         if (!test_bit(loc, priv->cfp.used) || loc == 0)
334                 return -EINVAL;
335
336         /* Indicate which rule we want to read */
337         bcm_sf2_cfp_rule_addr_set(priv, loc);
338
339         ret =  bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
340         if (ret)
341                 return ret;
342
343         /* Clear its valid bits */
344         reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
345         reg &= ~SLICE_VALID;
346         core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
347
348         /* Write back this entry into the TCAM now */
349         ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
350         if (ret)
351                 return ret;
352
353         clear_bit(loc, priv->cfp.used);
354
355         return 0;
356 }
357
358 static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
359 {
360         unsigned int i;
361
362         for (i = 0; i < sizeof(flow->m_u); i++)
363                 flow->m_u.hdata[i] ^= 0xff;
364
365         flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
366         flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
367         flow->m_ext.data[0] ^= cpu_to_be32(~0);
368         flow->m_ext.data[1] ^= cpu_to_be32(~0);
369 }
370
371 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
372                                 struct ethtool_rxnfc *nfc, bool search)
373 {
374         struct ethtool_tcpip4_spec *v4_spec;
375         unsigned int queue_num;
376         u16 src_dst_port;
377         u32 reg, ipv4;
378         int ret;
379
380         if (!search) {
381                 bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
382
383                 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
384                 if (ret)
385                         return ret;
386
387                 reg = core_readl(priv, CORE_ACT_POL_DATA0);
388
389                 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
390                 if (ret)
391                         return ret;
392         } else {
393                 reg = core_readl(priv, CORE_ACT_POL_DATA0);
394         }
395
396         /* Extract the destination port */
397         nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
398                                   DST_MAP_IB_MASK) - 1;
399
400         /* There is no Port 6, so we compensate for that here */
401         if (nfc->fs.ring_cookie >= 6)
402                 nfc->fs.ring_cookie++;
403         nfc->fs.ring_cookie *= 8;
404
405         /* Extract the destination queue */
406         queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
407         nfc->fs.ring_cookie += queue_num;
408
409         /* Extract the IP protocol */
410         reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
411         switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
412         case IPPROTO_TCP:
413                 nfc->fs.flow_type = TCP_V4_FLOW;
414                 v4_spec = &nfc->fs.h_u.tcp_ip4_spec;
415                 break;
416         case IPPROTO_UDP:
417                 nfc->fs.flow_type = UDP_V4_FLOW;
418                 v4_spec = &nfc->fs.h_u.udp_ip4_spec;
419                 break;
420         default:
421                 /* Clear to exit the search process */
422                 if (search)
423                         core_readl(priv, CORE_CFP_DATA_PORT(7));
424                 return -EINVAL;
425         }
426
427         v4_spec->tos = (reg >> 16) & IPPROTO_MASK;
428         nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> 7) & 1);
429
430         reg = core_readl(priv, CORE_CFP_DATA_PORT(3));
431         /* src port [15:8] */
432         src_dst_port = reg << 8;
433
434         reg = core_readl(priv, CORE_CFP_DATA_PORT(2));
435         /* src port [7:0] */
436         src_dst_port |= (reg >> 24);
437
438         v4_spec->pdst = cpu_to_be16(src_dst_port);
439         nfc->fs.m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
440         v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
441         nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
442
443         /* IPv4 dst [15:8] */
444         ipv4 = (reg & 0xff) << 8;
445         reg = core_readl(priv, CORE_CFP_DATA_PORT(1));
446         /* IPv4 dst [31:16] */
447         ipv4 |= ((reg >> 8) & 0xffff) << 16;
448         /* IPv4 dst [7:0] */
449         ipv4 |= (reg >> 24) & 0xff;
450         v4_spec->ip4dst = cpu_to_be32(ipv4);
451         nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
452
453         /* IPv4 src [15:8] */
454         ipv4 = (reg & 0xff) << 8;
455         reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
456
457         if (!(reg & SLICE_VALID))
458                 return -EINVAL;
459
460         /* IPv4 src [7:0] */
461         ipv4 |= (reg >> 24) & 0xff;
462         /* IPv4 src [31:16] */
463         ipv4 |= ((reg >> 8) & 0xffff) << 16;
464         v4_spec->ip4src = cpu_to_be32(ipv4);
465         nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
466
467         /* Read last to avoid next entry clobbering the results during search
468          * operations
469          */
470         reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
471         if (!(reg & 1 << port))
472                 return -EINVAL;
473
474         bcm_sf2_invert_masks(&nfc->fs);
475
476         /* Put the TCAM size here */
477         nfc->data = bcm_sf2_cfp_rule_size(priv);
478
479         return 0;
480 }
481
482 /* We implement the search doing a TCAM search operation */
483 static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
484                                     int port, struct ethtool_rxnfc *nfc,
485                                     u32 *rule_locs)
486 {
487         unsigned int index = 1, rules_cnt = 0;
488         int ret;
489         u32 reg;
490
491         /* Do not poll on OP_STR_DONE to be self-clearing for search
492          * operations, we cannot use bcm_sf2_cfp_op here because it completes
493          * on clearing OP_STR_DONE which won't clear until the entire search
494          * operation is over.
495          */
496         reg = core_readl(priv, CORE_CFP_ACC);
497         reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
498         reg |= index << XCESS_ADDR_SHIFT;
499         reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
500         reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE;
501         core_writel(priv, reg, CORE_CFP_ACC);
502
503         do {
504                 /* Wait for results to be ready */
505                 reg = core_readl(priv, CORE_CFP_ACC);
506
507                 /* Extract the address we are searching */
508                 index = reg >> XCESS_ADDR_SHIFT;
509                 index &= XCESS_ADDR_MASK;
510
511                 /* We have a valid search result, so flag it accordingly */
512                 if (reg & SEARCH_STS) {
513                         ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true);
514                         if (ret)
515                                 continue;
516
517                         rule_locs[rules_cnt] = index;
518                         rules_cnt++;
519                 }
520
521                 /* Search is over break out */
522                 if (!(reg & OP_STR_DONE))
523                         break;
524
525         } while (index < CFP_NUM_RULES);
526
527         /* Put the TCAM size here */
528         nfc->data = bcm_sf2_cfp_rule_size(priv);
529         nfc->rule_cnt = rules_cnt;
530
531         return 0;
532 }
533
534 int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
535                       struct ethtool_rxnfc *nfc, u32 *rule_locs)
536 {
537         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
538         int ret = 0;
539
540         mutex_lock(&priv->cfp.lock);
541
542         switch (nfc->cmd) {
543         case ETHTOOL_GRXCLSRLCNT:
544                 /* Subtract the default, unusable rule */
545                 nfc->rule_cnt = bitmap_weight(priv->cfp.used,
546                                               CFP_NUM_RULES) - 1;
547                 /* We support specifying rule locations */
548                 nfc->data |= RX_CLS_LOC_SPECIAL;
549                 break;
550         case ETHTOOL_GRXCLSRULE:
551                 ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false);
552                 break;
553         case ETHTOOL_GRXCLSRLALL:
554                 ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
555                 break;
556         default:
557                 ret = -EOPNOTSUPP;
558                 break;
559         }
560
561         mutex_unlock(&priv->cfp.lock);
562
563         return ret;
564 }
565
566 int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
567                       struct ethtool_rxnfc *nfc)
568 {
569         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
570         int ret = 0;
571
572         mutex_lock(&priv->cfp.lock);
573
574         switch (nfc->cmd) {
575         case ETHTOOL_SRXCLSRLINS:
576                 ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
577                 break;
578
579         case ETHTOOL_SRXCLSRLDEL:
580                 ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
581                 break;
582         default:
583                 ret = -EOPNOTSUPP;
584                 break;
585         }
586
587         mutex_unlock(&priv->cfp.lock);
588
589         return ret;
590 }
591
592 int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
593 {
594         unsigned int timeout = 1000;
595         u32 reg;
596
597         reg = core_readl(priv, CORE_CFP_ACC);
598         reg |= TCAM_RESET;
599         core_writel(priv, reg, CORE_CFP_ACC);
600
601         do {
602                 reg = core_readl(priv, CORE_CFP_ACC);
603                 if (!(reg & TCAM_RESET))
604                         break;
605
606                 cpu_relax();
607         } while (timeout--);
608
609         if (!timeout)
610                 return -ETIMEDOUT;
611
612         return 0;
613 }