1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
18 **********************************************************************/
20 /*! \file octeon_network.h
21 * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
24 #ifndef __OCTEON_NETWORK_H__
25 #define __OCTEON_NETWORK_H__
26 #include <linux/ptp_clock_kernel.h>
28 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
29 #define LIO_MIN_MTU_SIZE ETH_MIN_MTU
31 struct oct_nic_stats_resp {
33 struct oct_link_stats stats;
37 struct oct_nic_stats_ctrl {
38 struct completion complete;
39 struct net_device *netdev;
42 /** LiquidIO per-interface network private data */
44 /** State of the interface. Rx/Tx happens only in the RUNNING state. */
47 /** Octeon Interface index number. This device will be represented as
48 * oct<ifidx> in the system.
52 /** Octeon Input queue to use to transmit for this network interface. */
55 /** Octeon Output queue from which pkts arrive
56 * for this network interface.
60 /** Guards each glist */
61 spinlock_t *glist_lock;
63 /** Array of gather component linked lists */
64 struct list_head *glist;
66 /** Pointer to the NIC properties for the Octeon device this network
67 * interface is associated with.
69 struct octdev_props *octprops;
71 /** Pointer to the octeon device structure. */
72 struct octeon_device *oct_dev;
74 struct net_device *netdev;
76 /** Link information sent by the core application for this interface. */
77 struct oct_link_info linfo;
79 /** counter of link changes */
82 /** Size of Tx queue for this octeon device. */
85 /** Size of Rx queue for this octeon device. */
88 /** Size of MTU this octeon device. */
91 /** msg level flag per interface. */
94 /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
97 /* Copy of transmit encapsulation capabilities:
98 * TSO, TSO6, Checksums for this device for Kernel
101 u64 enc_dev_capability;
103 /** Copy of beacaon reg in phy */
106 /** Copy of ctrl reg in phy */
109 /* PTP clock information */
110 struct ptp_clock_info ptp_info;
111 struct ptp_clock *ptp_clock;
114 /* for atomic access to Octeon PTP reg and data struct */
120 /* work queue for txq status */
121 struct cavium_wq txq_status_wq;
123 /* work queue for link status */
124 struct cavium_wq link_status_wq;
129 #define LIO_SIZE (sizeof(struct lio))
130 #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
132 #define CIU3_WDOG(c) (0x1010000020000ULL + ((c) << 3))
133 #define CIU3_WDOG_MASK 12ULL
134 #define LIO_MONITOR_WDOG_EXPIRE 1
135 #define LIO_MONITOR_CORE_STUCK_MSGD 2
136 #define LIO_MAX_CORES 12
139 * \brief Enable or disable feature
140 * @param netdev pointer to network device
141 * @param cmd Command that just requires acknowledgment
142 * @param param1 Parameter to command
144 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
147 * \brief Link control command completion callback
148 * @param nctrl_ptr pointer to control packet structure
150 * This routine is called by the callback function when a ctrl pkt sent to
151 * core app completes. The nctrl_ptr contains a copy of the command type
152 * and data sent to the core app. This routine is only called if the ctrl
153 * pkt was sent successfully to the core app.
155 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
158 * \brief Register ethtool operations
159 * @param netdev pointer to network device
161 void liquidio_set_ethtool_ops(struct net_device *netdev);
163 #define SKB_ADJ_MASK 0x3F
164 #define SKB_ADJ (SKB_ADJ_MASK + 1)
166 #define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
167 #define LIO_RXBUFFER_SZ 2048
170 *recv_buffer_alloc(struct octeon_device *oct,
171 struct octeon_skb_page_info *pg_info)
175 struct octeon_skb_page_info *skb_pg_info;
177 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
181 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
182 if (unlikely(!skb)) {
184 pg_info->page = NULL;
188 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
189 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
194 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
196 pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
197 PAGE_SIZE, DMA_FROM_DEVICE);
199 /* Mapping failed!! */
200 if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
202 dev_kfree_skb_any((struct sk_buff *)skb);
203 pg_info->page = NULL;
207 pg_info->page = page;
208 pg_info->page_offset = 0;
209 skb_pg_info->page = page;
210 skb_pg_info->page_offset = 0;
211 skb_pg_info->dma = pg_info->dma;
217 *recv_buffer_fast_alloc(u32 size)
220 struct octeon_skb_page_info *skb_pg_info;
222 skb = dev_alloc_skb(size + SKB_ADJ);
226 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
227 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
232 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
233 skb_pg_info->page = NULL;
234 skb_pg_info->page_offset = 0;
235 skb_pg_info->dma = 0;
241 recv_buffer_recycle(struct octeon_device *oct, void *buf)
243 struct octeon_skb_page_info *pg_info = buf;
245 if (!pg_info->page) {
246 dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
251 if (unlikely(page_count(pg_info->page) != 1) ||
252 unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
253 dma_unmap_page(&oct->pci_dev->dev,
254 pg_info->dma, (PAGE_SIZE << 0),
257 pg_info->page = NULL;
258 pg_info->page_offset = 0;
262 /* Flip to other half of the buffer */
263 if (pg_info->page_offset == 0)
264 pg_info->page_offset = LIO_RXBUFFER_SZ;
266 pg_info->page_offset = 0;
267 page_ref_inc(pg_info->page);
273 *recv_buffer_reuse(struct octeon_device *oct, void *buf)
275 struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
278 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
279 if (unlikely(!skb)) {
280 dma_unmap_page(&oct->pci_dev->dev,
281 pg_info->dma, (PAGE_SIZE << 0),
286 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
287 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
292 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
293 skb_pg_info->page = pg_info->page;
294 skb_pg_info->page_offset = pg_info->page_offset;
295 skb_pg_info->dma = pg_info->dma;
301 recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
303 struct sk_buff *skb = (struct sk_buff *)buffer;
305 put_page(pg_info->page);
307 pg_info->page = NULL;
308 pg_info->page_offset = 0;
311 dev_kfree_skb_any(skb);
314 static inline void recv_buffer_free(void *buffer)
316 struct sk_buff *skb = (struct sk_buff *)buffer;
317 struct octeon_skb_page_info *pg_info;
319 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
322 put_page(pg_info->page);
324 pg_info->page = NULL;
325 pg_info->page_offset = 0;
328 dev_kfree_skb_any((struct sk_buff *)buffer);
332 recv_buffer_fast_free(void *buffer)
334 dev_kfree_skb_any((struct sk_buff *)buffer);
337 static inline void tx_buffer_free(void *buffer)
339 dev_kfree_skb_any((struct sk_buff *)buffer);
342 #define lio_dma_alloc(oct, size, dma_addr) \
343 dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL)
344 #define lio_dma_free(oct, size, virt_addr, dma_addr) \
345 dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
348 void *get_rbd(struct sk_buff *skb)
350 struct octeon_skb_page_info *pg_info;
353 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
354 va = page_address(pg_info->page) + pg_info->page_offset;
360 lio_map_ring_info(struct octeon_droq *droq, u32 i)
363 struct octeon_device *oct = droq->oct_dev;
365 dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
366 OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
368 WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
370 return (u64)dma_addr;
374 lio_unmap_ring_info(struct pci_dev *pci_dev,
375 u64 info_ptr, u32 size)
377 dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
381 lio_map_ring(void *buf)
385 struct sk_buff *skb = (struct sk_buff *)buf;
386 struct octeon_skb_page_info *pg_info;
388 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
389 if (!pg_info->page) {
390 pr_err("%s: pg_info->page NULL\n", __func__);
395 dma_addr = pg_info->dma;
397 pr_err("%s: ERROR it should be already available\n",
401 dma_addr += pg_info->page_offset;
403 return (u64)dma_addr;
407 lio_unmap_ring(struct pci_dev *pci_dev,
411 dma_unmap_page(&pci_dev->dev,
412 buf_ptr, (PAGE_SIZE << 0),
416 static inline void *octeon_fast_packet_alloc(u32 size)
418 return recv_buffer_fast_alloc(size);
421 static inline void octeon_fast_packet_next(struct octeon_droq *droq,
422 struct sk_buff *nicbuf,
426 memcpy(skb_put(nicbuf, copy_len),
427 get_rbd(droq->recv_buf_list[idx].buffer), copy_len);