]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/cavium/liquidio/octeon_network.h
uprobes: Default UPROBES_EVENTS to Y
[karo-tx-linux.git] / drivers / net / ethernet / cavium / liquidio / octeon_network.h
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  **********************************************************************/
19
20 /*!  \file  octeon_network.h
21  *   \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
22  */
23
24 #ifndef __OCTEON_NETWORK_H__
25 #define __OCTEON_NETWORK_H__
26 #include <linux/ptp_clock_kernel.h>
27
28 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
29 #define LIO_MIN_MTU_SIZE ETH_MIN_MTU
30
31 struct oct_nic_stats_resp {
32         u64     rh;
33         struct oct_link_stats stats;
34         u64     status;
35 };
36
37 struct oct_nic_stats_ctrl {
38         struct completion complete;
39         struct net_device *netdev;
40 };
41
42 /** LiquidIO per-interface network private data */
43 struct lio {
44         /** State of the interface. Rx/Tx happens only in the RUNNING state.  */
45         atomic_t ifstate;
46
47         /** Octeon Interface index number. This device will be represented as
48          *  oct<ifidx> in the system.
49          */
50         int ifidx;
51
52         /** Octeon Input queue to use to transmit for this network interface. */
53         int txq;
54
55         /** Octeon Output queue from which pkts arrive
56          * for this network interface.
57          */
58         int rxq;
59
60         /** Guards each glist */
61         spinlock_t *glist_lock;
62
63         /** Array of gather component linked lists */
64         struct list_head *glist;
65
66         /** Pointer to the NIC properties for the Octeon device this network
67          *  interface is associated with.
68          */
69         struct octdev_props *octprops;
70
71         /** Pointer to the octeon device structure. */
72         struct octeon_device *oct_dev;
73
74         struct net_device *netdev;
75
76         /** Link information sent by the core application for this interface. */
77         struct oct_link_info linfo;
78
79         /** counter of link changes */
80         u64 link_changes;
81
82         /** Size of Tx queue for this octeon device. */
83         u32 tx_qsize;
84
85         /** Size of Rx queue for this octeon device. */
86         u32 rx_qsize;
87
88         /** Size of MTU this octeon device. */
89         u32 mtu;
90
91         /** msg level flag per interface. */
92         u32 msg_enable;
93
94         /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
95         u64 dev_capability;
96
97         /* Copy of transmit encapsulation capabilities:
98          * TSO, TSO6, Checksums for this device for Kernel
99          * 3.10.0 onwards
100          */
101         u64 enc_dev_capability;
102
103         /** Copy of beacaon reg in phy */
104         u32 phy_beacon_val;
105
106         /** Copy of ctrl reg in phy */
107         u32 led_ctrl_val;
108
109         /* PTP clock information */
110         struct ptp_clock_info ptp_info;
111         struct ptp_clock *ptp_clock;
112         s64 ptp_adjust;
113
114         /* for atomic access to Octeon PTP reg and data struct */
115         spinlock_t ptp_lock;
116
117         /* Interface info */
118         u32     intf_open;
119
120         /* work queue for  txq status */
121         struct cavium_wq        txq_status_wq;
122
123         /* work queue for  link status */
124         struct cavium_wq        link_status_wq;
125
126         int netdev_uc_count;
127 };
128
129 #define LIO_SIZE         (sizeof(struct lio))
130 #define GET_LIO(netdev)  ((struct lio *)netdev_priv(netdev))
131
132 #define CIU3_WDOG(c)                 (0x1010000020000ULL + ((c) << 3))
133 #define CIU3_WDOG_MASK               12ULL
134 #define LIO_MONITOR_WDOG_EXPIRE      1
135 #define LIO_MONITOR_CORE_STUCK_MSGD  2
136 #define LIO_MAX_CORES                12
137
138 /**
139  * \brief Enable or disable feature
140  * @param netdev    pointer to network device
141  * @param cmd       Command that just requires acknowledgment
142  * @param param1    Parameter to command
143  */
144 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
145
146 /**
147  * \brief Link control command completion callback
148  * @param nctrl_ptr pointer to control packet structure
149  *
150  * This routine is called by the callback function when a ctrl pkt sent to
151  * core app completes. The nctrl_ptr contains a copy of the command type
152  * and data sent to the core app. This routine is only called if the ctrl
153  * pkt was sent successfully to the core app.
154  */
155 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
156
157 /**
158  * \brief Register ethtool operations
159  * @param netdev    pointer to network device
160  */
161 void liquidio_set_ethtool_ops(struct net_device *netdev);
162
163 #define SKB_ADJ_MASK  0x3F
164 #define SKB_ADJ       (SKB_ADJ_MASK + 1)
165
166 #define MIN_SKB_SIZE       256 /* 8 bytes and more - 8 bytes for PTP */
167 #define LIO_RXBUFFER_SZ    2048
168
169 static inline void
170 *recv_buffer_alloc(struct octeon_device *oct,
171                    struct octeon_skb_page_info *pg_info)
172 {
173         struct page *page;
174         struct sk_buff *skb;
175         struct octeon_skb_page_info *skb_pg_info;
176
177         page = alloc_page(GFP_ATOMIC | __GFP_COLD);
178         if (unlikely(!page))
179                 return NULL;
180
181         skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
182         if (unlikely(!skb)) {
183                 __free_page(page);
184                 pg_info->page = NULL;
185                 return NULL;
186         }
187
188         if ((unsigned long)skb->data & SKB_ADJ_MASK) {
189                 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
190
191                 skb_reserve(skb, r);
192         }
193
194         skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
195         /* Get DMA info */
196         pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
197                                     PAGE_SIZE, DMA_FROM_DEVICE);
198
199         /* Mapping failed!! */
200         if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
201                 __free_page(page);
202                 dev_kfree_skb_any((struct sk_buff *)skb);
203                 pg_info->page = NULL;
204                 return NULL;
205         }
206
207         pg_info->page = page;
208         pg_info->page_offset = 0;
209         skb_pg_info->page = page;
210         skb_pg_info->page_offset = 0;
211         skb_pg_info->dma = pg_info->dma;
212
213         return (void *)skb;
214 }
215
216 static inline void
217 *recv_buffer_fast_alloc(u32 size)
218 {
219         struct sk_buff *skb;
220         struct octeon_skb_page_info *skb_pg_info;
221
222         skb = dev_alloc_skb(size + SKB_ADJ);
223         if (unlikely(!skb))
224                 return NULL;
225
226         if ((unsigned long)skb->data & SKB_ADJ_MASK) {
227                 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
228
229                 skb_reserve(skb, r);
230         }
231
232         skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
233         skb_pg_info->page = NULL;
234         skb_pg_info->page_offset = 0;
235         skb_pg_info->dma = 0;
236
237         return skb;
238 }
239
240 static inline int
241 recv_buffer_recycle(struct octeon_device *oct, void *buf)
242 {
243         struct octeon_skb_page_info *pg_info = buf;
244
245         if (!pg_info->page) {
246                 dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
247                         __func__);
248                 return -ENOMEM;
249         }
250
251         if (unlikely(page_count(pg_info->page) != 1) ||
252             unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
253                 dma_unmap_page(&oct->pci_dev->dev,
254                                pg_info->dma, (PAGE_SIZE << 0),
255                                DMA_FROM_DEVICE);
256                 pg_info->dma = 0;
257                 pg_info->page = NULL;
258                 pg_info->page_offset = 0;
259                 return -ENOMEM;
260         }
261
262         /* Flip to other half of the buffer */
263         if (pg_info->page_offset == 0)
264                 pg_info->page_offset = LIO_RXBUFFER_SZ;
265         else
266                 pg_info->page_offset = 0;
267         page_ref_inc(pg_info->page);
268
269         return 0;
270 }
271
272 static inline void
273 *recv_buffer_reuse(struct octeon_device *oct, void *buf)
274 {
275         struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
276         struct sk_buff *skb;
277
278         skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
279         if (unlikely(!skb)) {
280                 dma_unmap_page(&oct->pci_dev->dev,
281                                pg_info->dma, (PAGE_SIZE << 0),
282                                DMA_FROM_DEVICE);
283                 return NULL;
284         }
285
286         if ((unsigned long)skb->data & SKB_ADJ_MASK) {
287                 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
288
289                 skb_reserve(skb, r);
290         }
291
292         skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
293         skb_pg_info->page = pg_info->page;
294         skb_pg_info->page_offset = pg_info->page_offset;
295         skb_pg_info->dma = pg_info->dma;
296
297         return skb;
298 }
299
300 static inline void
301 recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
302 {
303         struct sk_buff *skb = (struct sk_buff *)buffer;
304
305         put_page(pg_info->page);
306         pg_info->dma = 0;
307         pg_info->page = NULL;
308         pg_info->page_offset = 0;
309
310         if (skb)
311                 dev_kfree_skb_any(skb);
312 }
313
314 static inline void recv_buffer_free(void *buffer)
315 {
316         struct sk_buff *skb = (struct sk_buff *)buffer;
317         struct octeon_skb_page_info *pg_info;
318
319         pg_info = ((struct octeon_skb_page_info *)(skb->cb));
320
321         if (pg_info->page) {
322                 put_page(pg_info->page);
323                 pg_info->dma = 0;
324                 pg_info->page = NULL;
325                 pg_info->page_offset = 0;
326         }
327
328         dev_kfree_skb_any((struct sk_buff *)buffer);
329 }
330
331 static inline void
332 recv_buffer_fast_free(void *buffer)
333 {
334         dev_kfree_skb_any((struct sk_buff *)buffer);
335 }
336
337 static inline void tx_buffer_free(void *buffer)
338 {
339         dev_kfree_skb_any((struct sk_buff *)buffer);
340 }
341
342 #define lio_dma_alloc(oct, size, dma_addr) \
343         dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL)
344 #define lio_dma_free(oct, size, virt_addr, dma_addr) \
345         dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
346
347 static inline
348 void *get_rbd(struct sk_buff *skb)
349 {
350         struct octeon_skb_page_info *pg_info;
351         unsigned char *va;
352
353         pg_info = ((struct octeon_skb_page_info *)(skb->cb));
354         va = page_address(pg_info->page) + pg_info->page_offset;
355
356         return va;
357 }
358
359 static inline u64
360 lio_map_ring_info(struct octeon_droq *droq, u32 i)
361 {
362         dma_addr_t dma_addr;
363         struct octeon_device *oct = droq->oct_dev;
364
365         dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
366                                   OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
367
368         WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
369
370         return (u64)dma_addr;
371 }
372
373 static inline void
374 lio_unmap_ring_info(struct pci_dev *pci_dev,
375                     u64 info_ptr, u32 size)
376 {
377         dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
378 }
379
380 static inline u64
381 lio_map_ring(void *buf)
382 {
383         dma_addr_t dma_addr;
384
385         struct sk_buff *skb = (struct sk_buff *)buf;
386         struct octeon_skb_page_info *pg_info;
387
388         pg_info = ((struct octeon_skb_page_info *)(skb->cb));
389         if (!pg_info->page) {
390                 pr_err("%s: pg_info->page NULL\n", __func__);
391                 WARN_ON(1);
392         }
393
394         /* Get DMA info */
395         dma_addr = pg_info->dma;
396         if (!pg_info->dma) {
397                 pr_err("%s: ERROR it should be already available\n",
398                        __func__);
399                 WARN_ON(1);
400         }
401         dma_addr += pg_info->page_offset;
402
403         return (u64)dma_addr;
404 }
405
406 static inline void
407 lio_unmap_ring(struct pci_dev *pci_dev,
408                u64 buf_ptr)
409
410 {
411         dma_unmap_page(&pci_dev->dev,
412                        buf_ptr, (PAGE_SIZE << 0),
413                        DMA_FROM_DEVICE);
414 }
415
416 static inline void *octeon_fast_packet_alloc(u32 size)
417 {
418         return recv_buffer_fast_alloc(size);
419 }
420
421 static inline void octeon_fast_packet_next(struct octeon_droq *droq,
422                                            struct sk_buff *nicbuf,
423                                            int copy_len,
424                                            int idx)
425 {
426         memcpy(skb_put(nicbuf, copy_len),
427                get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
428 }
429
430 #endif